diff --git a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/italygpt2/__init__.py b/spaces/101-5/gpt4free/g4f/.v1/gpt4free/italygpt2/__init__.py
deleted file mode 100644
index 1eb191c0d57d5d14de4cf2f5b0a2ae3722311ab8..0000000000000000000000000000000000000000
--- a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/italygpt2/__init__.py
+++ /dev/null
@@ -1,70 +0,0 @@
-import re
-import requests
-import hashlib
-from fake_useragent import UserAgent
-class Account:
- @staticmethod
- def create():
- r=requests.get("https://italygpt.it/",headers=Account._header)
- f=r.text
- tid=re.search('',f).group(1)
- if len(tid)==0:
- raise RuntimeError("NetWorkError:failed to get id.")
- else:
- Account._tid=tid
- Account._raw="[]"
- return Account
- def next(next_id:str)->str:
- Account._tid=next_id
- return Account._tid
- def get()->str:
- return Account._tid
- _header={
- "Host": "italygpt.it",
- "Referer":"https://italygpt.it/",
- "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36",#UserAgent().random,
- "Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
- "Accept-Language":"zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
- "Upgrade-Insecure-Requests":"1",
- "Sec-Fetch-Dest":"document",
- "Sec-Fetch-Mode":"navigate",
- "Sec-Fetch-Site":"none",
- "Sec-Fetch-User":"?1",
- "Connection":"keep-alive",
- "Alt-Used":"italygpt.it",
- "Pragma":"no-cache",
- "Cache-Control":"no-cache",
- "TE": "trailers"
- }
- def settraw(raws:str):
- Account._raw=raws
- return Account._raw
- def gettraw():
- return Account._raw
-
-class Completion:
- @staticmethod
- def create(
- account_data,
- prompt: str,
- message=False
- ):
- param={
- "prompt":prompt.replace(" ","+"),
- "creative":"off",
- "internet":"false",
- "detailed":"off",
- "current_id":"0",
- "code":"",
- "gpt4":"false",
- "raw_messages":account_data.gettraw(),
- "hash":hashlib.sha256(account_data.get().encode()).hexdigest()
- }
- if(message):
- param["raw_messages"]=str(message)
- r = requests.get("https://italygpt.it/question",headers=account_data._header,params=param,stream=True)
- account_data.next(r.headers["Next_id"])
- account_data.settraw(r.headers["Raw_messages"])
- for chunk in r.iter_content(chunk_size=None):
- r.raise_for_status()
- yield chunk.decode()
\ No newline at end of file
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Dragonball Z Raging Blast 2 PC.rar What Makes This Game So Awesome and How to Get It.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Dragonball Z Raging Blast 2 PC.rar What Makes This Game So Awesome and How to Get It.md
deleted file mode 100644
index 6cdaee9c794c5b33ca386e5a9e227e2a51595a22..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Dragonball Z Raging Blast 2 PC.rar What Makes This Game So Awesome and How to Get It.md
+++ /dev/null
@@ -1,106 +0,0 @@
-
-
What is Realtek ATI HDMI Audio Device 2-70 Crack?
-
If you want to enjoy high-quality sound from your PC's HDMI port, you need a reliable audio driver that can communicate with your hardware and software. One of the most popular audio drivers for HDMI devices is Realtek ATI HDMI Audio Device Driver, which supports all of Realtek HD Audio Codec.
-
However, downloading and installing the official version of this driver may not be enough for some users who want to unlock more features and performance. That's why some people look for a crack version of this driver, which is a modified or hacked version that bypasses the license verification and activation process.
In this article, we will tell you everything you need to know about Realtek ATI HDMI Audio Device 2-70 Crack, which is one of the latest versions of this driver package. We will explain why you may need it, how to download and install it, how to fix common issues with it, and what are some alternatives to it.
-
Why do you need Realtek ATI HDMI Audio Device 2-70 Crack?
-
There are several reasons why you may want to use a crack version of Realtek ATI HDMI Audio Device Driver instead of the official one. Here are some of them:
-
-
It is free. The official version of this driver requires a license fee, which may not be affordable for some users. The crack version, on the other hand, is available for free on various websites and forums.
-
It has more features. The official version of this driver may have some limitations or restrictions on certain functions or settings. The crack version, on the other hand, may have more options and customizations that can enhance your audio experience.
-
It has better performance. The official version of this driver may have some bugs or glitches that can affect your audio quality or stability. The crack version, on the other hand, may have fixed or improved some issues that can make your audio smoother and clearer.
-
-
Of course, using a crack version also comes with some risks and drawbacks, such as:
-
-
It is illegal. The crack version violates the terms and conditions of the original software developer, which can result in legal consequences or penalties. You may also be infringing on the intellectual property rights of the software owner.
-
It is unsafe. The crack version may contain viruses, malware, spyware, or other harmful programs that can damage your computer or steal your personal information. You may also expose yourself to cyberattacks or identity theft.
-
It is unreliable. The crack version may not work properly or at all with your hardware or software configuration. You may also encounter compatibility issues or conflicts with other drivers or programs. You may also lose access to updates or support from the original software developer.
-
-
Therefore, before you decide to use a crack version of Realtek ATI HDMI Audio Device Driver, you should weigh the pros and cons carefully and be aware of the potential consequences.
-
How to download and install Realtek ATI HDMI Audio Device 2-70 Crack?
-
If you still want to try Realtek ATI HDMI Audio Device 2-70 Crack, here are the steps you need to follow:
-
-
Download the crack file. You can find various sources for downloading this file on the internet, such as torrent sites, file-sharing platforms, or online forums. However, be careful not to download any fake or malicious files that can harm your computer. You should also scan any file you download with an antivirus program before opening it.
-
Extract the crack file. After downloading the file, you need to extract it using a program like WinRAR or 7-Zip. You should see a folder containing several files, such as setup.exe, readme.txt, crack.dll, etc.
-
Run the setup file. Double-click on the setup.exe file to launch the installation wizard. Follow the instructions on the screen to install the driver package. You may need to restart your computer after the installation is complete.
-
Copy and paste the crack file. Locate the crack.dll file in the folder you extracted earlier. Copy this file and paste it into the installation directory of Realtek ATI HDMI Audio Device Driver. This is usually located in C:\Program Files\Realtek\Audio\HDA\. You may need to overwrite or replace an existing file with the same name.
-
Enjoy your cracked driver. You have successfully installed Realtek ATI HDMI Audio Device 2-70 Crack on your computer. You can now access more features and settings from your audio device manager or control panel.
-
-
How to fix common issues with Realtek ATI HDMI Audio Device 2-70 Crack?
-
Sometimes, you may encounter some problems or errors when using Realtek ATI HDMI Audio Device 2-70 Crack. Here are some tips and tricks on how to troubleshoot them:
-
-
No sound output from HDMI device. This may happen if your HDMI device is not detected by your computer or if your audio settings are incorrect. To fix this issue, you can try these solutions:
-
Check if your HDMI cable is properly connected between your PC and your monitor or TV.
-
Check if your HDMI device is turned on and set as the default playback device in your sound settings.
-
Check if your audio driver is up-to-date and compatible with your operating system and hardware configuration.
-
Check if there are any conflicts or interferences with other drivers or programs that may affect your audio output.
-
-
Poor sound quality from HDMI device. This may happen if your audio settings are not optimal for your HDMI device or if there are any background noises or distortions. To fix this issue, you can try these solutions:
-
Adjust your volume level and balance in your sound settings or control panel.
-
Select an appropriate sound mode or profile for your HDMI device in your audio device manager or control panel.
-
Tweak your equalizer settings or use a third-party software to enhance your sound quality.
-
Avoid placing any objects or devices that may cause interference near your HDMI device or cable.
-
-
HDMI device not recognized by audio driver. This may happen if your audio driver is corrupted or incompatible with your HDMI device. To fix this issue, you can try these solutions:
-
Uninstall and reinstall your audio driver using a clean installation method.
-
Update your audio driver to the latest version available from the official website or a trusted source.
-
Contact customer support from Realtek or ATI for assistance or guidance on how to resolve this issue.
-
-
-
What are the alternatives to Realtek ATI HDMI Audio Device 2-70 Crack?
- Realtek ATI HDMI Audio Device 2-70 Crack, you may want to consider some other alternatives for HDMI audio drivers. Here are some of them:
-
AMD High Definition Audio Device Driver
-
If you have an AMD graphics card or chipset, you may want to use the AMD High Definition Audio Device Driver, which is designed to work with AMD HDMI devices. This driver supports various audio formats and features, such as Dolby TrueHD, DTS-HD Master Audio, 7.1 surround sound, and more. You can download this driver from the AMD website or use the AMD Radeon Software to update it automatically.
-
How to download Realtek ATI HDMI Audio Device 2-70 Crack for free
-Realtek ATI HDMI Audio Device 2-70 Crack full version download
-Realtek ATI HDMI Audio Device 2-70 Crack serial key generator
-Realtek ATI HDMI Audio Device 2-70 Crack activation code
-Realtek ATI HDMI Audio Device 2-70 Crack license key
-Realtek ATI HDMI Audio Device 2-70 Crack patch
-Realtek ATI HDMI Audio Device 2-70 Crack torrent
-Realtek ATI HDMI Audio Device 2-70 Crack rar file
-Realtek ATI HDMI Audio Device 2-70 Crack zip file
-Realtek ATI HDMI Audio Device 2-70 Crack iso file
-Realtek ATI HDMI Audio Device 2-70 Crack setup file
-Realtek ATI HDMI Audio Device 2-70 Crack installer
-Realtek ATI HDMI Audio Device 2-70 Crack offline installer
-Realtek ATI HDMI Audio Device 2-70 Crack portable version
-Realtek ATI HDMI Audio Device 2-70 Crack latest version
-Realtek ATI HDMI Audio Device 2-70 Crack updated version
-Realtek ATI HDMI Audio Device 2-70 Crack review
-Realtek ATI HDMI Audio Device 2-70 Crack features
-Realtek ATI HDMI Audio Device 2-70 Crack benefits
-Realtek ATI HDMI Audio Device 2-70 Crack pros and cons
-Realtek ATI HDMI Audio Device 2-70 Crack comparison
-Realtek ATI HDMI Audio Device 2-70 Crack alternatives
-Realtek ATI HDMI Audio Device 2-70 Crack competitors
-Realtek ATI HDMI Audio Device 2-70 Crack compatibility
-Realtek ATI HDMI Audio Device 2-70 Crack system requirements
-Realtek ATI HDMI Audio Device 2-70 Crack troubleshooting
-Realtek ATI HDMI Audio Device 2-70 Crack error codes
-Realtek ATI HDMI Audio Device 2-70 Crack fix
-Realtek ATI HDMI Audio Device 2-70 Crack support
-Realtek ATI HDMI Audio Device 2-70 Crack customer service
-Realtek ATI HDMI Audio Device 2-70 Crack manual
-Realtek ATI HDMI Audio Device 2-70 Crack guide
-Realtek ATI HDMI Audio Device 2-70 Crack tutorial
-Realtek ATI HDMI Audio Device 2.7.0.1 Driver Download for Windows
-
NVIDIA High Definition Audio Driver
-
If you have an NVIDIA graphics card or chipset, you may want to use the NVIDIA High Definition Audio Driver, which is designed to work with NVIDIA HDMI devices. This driver supports various audio formats and features, such as Dolby Digital Plus, DTS-HD Master Audio, 7.1 surround sound, and more. You can download this driver from the NVIDIA website or use the NVIDIA GeForce Experience to update it automatically.
-
Intel High Definition Audio Driver
-
If you have an Intel processor or chipset, you may want to use the Intel High Definition Audio Driver, which is designed to work with Intel HDMI devices. This driver supports various audio formats and features, such as Dolby Digital Plus, DTS-HD Master Audio, 7.1 surround sound, and more. You can download this driver from the Intel website or use the Intel Driver & Support Assistant to update it automatically.
-
Conclusion
-
In conclusion, Realtek ATI HDMI Audio Device 2-70 Crack is a crack version of a popular audio driver for HDMI devices that can offer more features and performance than the official version. However, it also comes with some risks and drawbacks that you should be aware of before using it. If you are looking for other options for HDMI audio drivers, you can try some of the alternatives we mentioned above.
-
We hope this article has been helpful and informative for you. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!
-
FAQs
-
-
What is HDMI? HDMI stands for High-Definition Multimedia Interface, which is a standard for transmitting digital audio and video signals between devices, such as computers, monitors, TVs, speakers, etc.
-
What is an audio driver? An audio driver is a software program that allows your computer to communicate with your audio device and enable its functions and features.
-
What is a crack version? A crack version is a modified or hacked version of a software program that bypasses the license verification and activation process and allows you to use it for free or with more features.
-
Is Realtek ATI HDMI Audio Device 2-70 Crack safe to use? No, it is not safe to use because it is illegal, unsafe, and unreliable. It may contain viruses or malware that can harm your computer or steal your personal information. It may also not work properly or at all with your hardware or software configuration. It may also expose you to legal consequences or penalties.
-
How can I update my audio driver? You can update your audio driver by downloading the latest version from the official website or a trusted source and following the installation instructions. You can also use a software tool that can scan your computer and update your drivers automatically.
-
- 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/!EXCLUSIVE! Download Buku Ppdgj Iii Pdf Files.md b/spaces/1gistliPinn/ChatGPT4/Examples/!EXCLUSIVE! Download Buku Ppdgj Iii Pdf Files.md
deleted file mode 100644
index 54335641fb9f7fba85150a0a06cb0583299a9678..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/!EXCLUSIVE! Download Buku Ppdgj Iii Pdf Files.md
+++ /dev/null
@@ -1,104 +0,0 @@
-
-
Download Buku PPDGJ III PDF Files: A Complete Guide
-
-
If you are looking for a reliable and comprehensive source of information on mental disorders, you may want to download buku ppdgj iii pdf files. Buku PPDGJ III is the Indonesian version of the Diagnostic and Statistical Manual of Mental Disorders (DSM), which is the most widely used classification system for mental disorders in the world. Buku PPDGJ III was published in 1993 by the World Health Organization (WHO) and the Indonesian Psychiatric Association (IPA), and it is based on the International Classification of Diseases (ICD-10).
In this article, we will explain what buku ppdgj iii pdf files are, why they are useful, how to access and use them, and what benefits they offer for mental health professionals and students. We will also provide some tips on how to write an effective and SEO-optimized article using buku ppdgj iii pdf files as a reference.
-
-
What are Buku PPDGJ III PDF Files?
-
-
Buku PPDGJ III PDF files are digital copies of the book PPDGJ III, which stands for Pedoman Penggolongan dan Diagnosis Gangguan Jiwa di Indonesia III (Guidelines for Classification and Diagnosis of Mental Disorders in Indonesia III). This book contains the official criteria and guidelines for diagnosing and classifying mental disorders in Indonesia, according to the international standards of WHO and IPA.
-
-
Buku PPDGJ III PDF files are available online for free download from various sources, such as Scribd, Doku, and Documents and E-books. You can also find them by searching for "download buku ppdgj iii pdf files" on Google or other search engines. The PDF files are usually around 9 MB in size and have about 170 pages.
-
-
Why are Buku PPDGJ III PDF Files Useful?
-
-
Buku PPDGJ III PDF files are useful for several reasons. First, they provide a comprehensive and updated overview of the current knowledge and practice of psychiatry in Indonesia. They cover a wide range of mental disorders, such as mood disorders, anxiety disorders, personality disorders, psychotic disorders, substance-related disorders, and more. They also include diagnostic criteria, clinical features, differential diagnosis, etiology, course, prognosis, treatment, and prevention of each disorder.
-
-
Second, they help to standardize and harmonize the diagnosis and classification of mental disorders in Indonesia. By using buku ppdgj iii pdf files as a reference, mental health professionals can ensure that they are following the same criteria and guidelines as their colleagues and peers. This can improve the quality and consistency of mental health services and research in Indonesia.
-
-
Third, they facilitate communication and collaboration among mental health professionals across different settings and regions. By using buku ppdgj iii pdf files as a common language, mental health professionals can easily share information and opinions about their cases and clients. They can also compare and contrast their findings and outcomes with other professionals who use the same system.
-
-
-
How to Access and Use Buku PPDGJ III PDF Files?
-
-
To access and use buku ppdgj iii pdf files, you need to have a computer or a mobile device with an internet connection and a PDF reader software. You can download buku ppdgj iii pdf files from any of the sources mentioned above or from other websites that offer them. You can also scan or photocopy the printed version of the book if you have access to it.
-
-
To use buku ppdgj iii pdf files effectively, you need to have some basic knowledge of psychiatry and mental disorders. You also need to be familiar with the structure and format of the book. The book is divided into four parts: Part I: Introduction; Part II: General Principles of Diagnosis; Part III: Specific Disorders; Part IV: Appendices.
-
-
Part I: Introduction provides some background information on the history, development, purpose, scope, limitations, and revisions of PPDGJ III. It also explains the basic concepts and terms used in the book.
-
-
Part II: General Principles of Diagnosis outlines the general rules and guidelines for diagnosing mental disorders using PPDGJ III. It covers topics such as diagnostic criteria, diagnostic categories, diagnostic axes, multiaxial assessment, differential diagnosis, comorbidity, reliability, validity, cultural factors, ethical issues, and legal implications.
-
-
Part III: Specific Disorders describes each specific disorder in detail. It follows a uniform format that includes: name of disorder; code number; diagnostic criteria; clinical features; differential diagnosis; etiology; course; prognosis; treatment; prevention; notes.
-
-
Part IV: Appendices contains some supplementary materials that support the main text of the book. It includes: glossary of terms; list of abbreviations; list of references; index.
-
-
What Benefits do Buku PPDGJ III PDF Files Offer for Mental Health Professionals?
-
-
Buku PPDGJ III PDF files offer many benefits for mental health professionals who work or study in Indonesia or who have an interest in Indonesian psychiatry. Some of these benefits are:
-
-
-
They provide a comprehensive and updated source of information on mental disorders that is relevant to the Indonesian context.
-
They help to improve the quality and consistency of diagnosis and classification of mental disorders in Indonesia.
-
They facilitate communication and collaboration among mental health professionals across different settings and regions.
-
They enhance professional development and learning by providing opportunities for self-assessment, feedback, reflection, research, publication, teaching, training, supervision, consultation, etc.
-
They increase public awareness and understanding of mental disorders by providing accurate and reliable information that can be used for education, advocacy, prevention, intervention, etc.
-
-
-
How to Write an Effective and SEO-Optimized Article Using Buku PPDGJ III PDF Files as a Reference?
-
-
If you want to write an effective and SEO-optimized article using buku ppdgj iii pdf files as a reference
-
-
-- Add more details or examples to each point or paragraph
-- Add more subheadings or sections to cover more aspects of the topic
-- Add more images or videos to illustrate the content
-- Add more quotes or testimonials from experts or users
-- Add more statistics or facts to support the claims
-
-I hope this helps you write an effective and SEO-optimized article using buku ppdgj iii pdf files as a reference. If you have any questions or feedback, please let me know.?
-
How to Use Buku PPDGJ III PDF Files for Diagnosis and Classification of Mental Disorders
-
-
One of the main purposes of buku ppdgj iii pdf files is to help mental health professionals diagnose and classify mental disorders in Indonesia. To use buku ppdgj iii pdf files for this purpose, you need to follow some steps and guidelines. Here are some tips to help you use buku ppdgj iii pdf files effectively for diagnosis and classification of mental disorders.
-
-
-
Conduct a thorough assessment of the patient's symptoms, history, and context. You can use various methods and tools, such as interviews, observations, tests, scales, questionnaires, etc. You can also consult with other professionals or family members if needed.
-
Compare the patient's symptoms and features with the diagnostic criteria and clinical features of each disorder in buku ppdgj iii pdf files. You can use the index or the table of contents to find the relevant disorder or category. You can also use the notes section to find additional information or clarifications.
-
Select the most appropriate diagnosis or diagnoses for the patient based on the best fit and evidence. You can use the differential diagnosis section to rule out other possible disorders or conditions. You can also use the multiaxial assessment system to assign a diagnosis on each of the five axes: Axis I: Clinical Disorders; Axis II: Personality Disorders and Mental Retardation; Axis III: General Medical Conditions; Axis IV: Psychosocial and Environmental Problems; Axis V: Global Assessment of Functioning.
-
Document and communicate your diagnosis or diagnoses clearly and accurately. You can use the code number and the name of each disorder as they appear in buku ppdgj iii pdf files. You can also use the etiology, course, prognosis, treatment, and prevention sections to provide more information or recommendations for the patient.
-
-
-
How to Use Buku PPDGJ III PDF Files for Learning and Teaching Psychiatry
-
-
Another purpose of buku ppdgj iii pdf files is to help mental health professionals and students learn and teach psychiatry in Indonesia. To use buku ppdgj iii pdf files for this purpose, you need to follow some steps and guidelines. Here are some tips to help you use buku ppdgj iii pdf files effectively for learning and teaching psychiatry.
-
-
-
Read and study buku ppdgj iii pdf files regularly and thoroughly. You can use the introduction and the general principles of diagnosis sections to learn the basic concepts and terms of psychiatry. You can also use the specific disorders sections to learn the details and features of each disorder.
-
Practice and apply buku ppdgj iii pdf files in real or simulated situations. You can use case studies, role plays, quizzes, exams, assignments, projects, etc. to test your knowledge and skills in diagnosing and classifying mental disorders using buku ppdgj iii pdf files. You can also use feedback, reflection, supervision, consultation, etc. to improve your performance and competence.
-
Share and discuss buku ppdgj iii pdf files with other professionals or students. You can use seminars, workshops, conferences, journals, blogs, forums, etc. to exchange information and opinions about buku ppdgj iii pdf files and psychiatry in general. You can also use research, publication, teaching, training, etc. to contribute to the development and dissemination of buku ppdgj iii pdf files and psychiatry in Indonesia.
-
-
Conclusion
-
-
Buku PPDGJ III PDF files are valuable resources for mental health professionals and students who work or study in Indonesia or who have an interest in Indonesian psychiatry. They provide a comprehensive and updated source of information on mental disorders that is relevant to the Indonesian context. They also help to standardize and harmonize the diagnosis and classification of mental disorders in Indonesia. Furthermore, they facilitate communication and collaboration among mental health professionals across different settings and regions. They also enhance professional development and learning by providing opportunities for self-assessment, feedback, reflection, research, publication, teaching, training, supervision, consultation, etc. They also increase public awareness and understanding of mental disorders by providing accurate and reliable information that can be used for education, advocacy, prevention, intervention, etc.
-
-
If you want to download buku ppdgj iii pdf files or learn more about them, you can use the links and resources provided in this article. You can also use the tips and guidelines provided in this article to write an effective and SEO-optimized article using buku ppdgj iii pdf files as a reference. This will help you increase your website's visibility and traffic, as well as your credibility and authority in your field.
-
-
We hope this article has been helpful and informative for you. If you have any questions or feedback, please feel free to contact us. Thank you for reading.
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/(2011) Crack.PhotoElf.4.1.12 11 !!LINK!!.md b/spaces/1gistliPinn/ChatGPT4/Examples/(2011) Crack.PhotoElf.4.1.12 11 !!LINK!!.md
deleted file mode 100644
index 2398f64fa213ef4e3dbdefe79b0bca3e41204b33..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/(2011) Crack.PhotoElf.4.1.12 11 !!LINK!!.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
- 3cee63e6c2
-
-
-
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Application X-msdownload How To Open ((INSTALL)).md b/spaces/1gistliPinn/ChatGPT4/Examples/Application X-msdownload How To Open ((INSTALL)).md
deleted file mode 100644
index 49d8b871a43050705a0494a02cc965b2e09524f8..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Application X-msdownload How To Open ((INSTALL)).md
+++ /dev/null
@@ -1,9 +0,0 @@
-
-
-. KDE office; Microsoft Office Modeling; open office; Other Adobe applications. app/vnd.ms-cab-compressed . application/x-apple-diskimage. Download Adobe Acrobat Reader DC for Windows in Russian without registration and SMS from the link below.
-Acrobat Reader for Windows 10 in Russian via a direct link from the official website without registration and SMS.
-Download Adobe Acrobat Reader DC for free for Windows 7 in Russian without registration and SMS using the direct link below.
-Adobe Reader DC 2019. 8a78ff9644
-
-
-
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Archicad 16 ((FREE)) Crack Download Mega.md b/spaces/1gistliPinn/ChatGPT4/Examples/Archicad 16 ((FREE)) Crack Download Mega.md
deleted file mode 100644
index 0efe8d0e884a958411a6eb8d4d1be0d3092fa724..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Archicad 16 ((FREE)) Crack Download Mega.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-0:00 / 5:25•Watch the full video. Live. •. Scroll for details. Install full Archicad 16. 16,834 views16K views. September 7, 2015 . •. In the ArchiCAD Video Lessons section, you can watch video tutorials on working with ArchiCAD 16, which covers the basic working methods, such as creating and editing objects, creating walls, creating windows and doors, designing a roof, facade elements, creating and editing interior walls and partitions. The lesson uses an example project in which it is necessary to build a frame house. •. Scroll for details. Video tutorials Archicad 16. 2 771 views2 thousand views. 8a78ff9644
-
-
-
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Casio Fx 880p Emulator.md b/spaces/1gistliPinn/ChatGPT4/Examples/Casio Fx 880p Emulator.md
deleted file mode 100644
index 5e6cf2f8315de4ca92ee0a156db6b0955e2dda16..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Casio Fx 880p Emulator.md
+++ /dev/null
@@ -1,20 +0,0 @@
-
-
-casio fx 880p emulator
-
-Length 00:10:42 - Size 9.42 MB
-
-Convert Youtube video to mp3Download Mp3Download has been launched, thanks for supporting us.Download has been launched, thank you.DownloadedVideo is under conversion, please wait...Video is under conversion...Converting...Sorry, this video is protected, thank you for your understanding.Sorry, this video is protected.Protected video.
-
-casio fx 880p emulator free convert to mp3
-
-Length 00:10:24 - Size 9.04 MB
-
-casio fx 880p emulator master cart
-
-Length 00:25:47 - Size 23.33 MB
-
-Convert Youtube video to mp3Download Mp3Download has been launched, thanks for supporting us.Download has been launched, thank you.DownloadedVideo is under conversion, please wait...Video is under conversion...Converting...Sorry, this video is protected, thank you for your understanding.Sorry, this video is protected.Prot 4fefd39f24
-
-
-
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Coffee Crisis Download HOT For Pc [torrent Full].md b/spaces/1gistliPinn/ChatGPT4/Examples/Coffee Crisis Download HOT For Pc [torrent Full].md
deleted file mode 100644
index 3ac06d34dae107abad7c4862bf2fc4fc2aa12db9..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Coffee Crisis Download HOT For Pc [torrent Full].md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-Coffee Crisis is an arcade-style beat 'em up full of caffeinated carnage! ... The AI codenamed DUDE lies deep in the computer core of a long forgotten laboratory. 4d29de3e1b
-
-
-
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Emicsoft Vob Converter 4.1.20 REGISTRATION CODE.rar.md b/spaces/1gistliPinn/ChatGPT4/Examples/Emicsoft Vob Converter 4.1.20 REGISTRATION CODE.rar.md
deleted file mode 100644
index 73017f17da23fc2a79fa344705f8fb8ef4558a8f..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Emicsoft Vob Converter 4.1.20 REGISTRATION CODE.rar.md
+++ /dev/null
@@ -1,13 +0,0 @@
-
-
-16 April 2021 — Online unit converter, October 21, 2021 01:45 AM . .html]emicsoft vob convertor 4.1.20 REGISTRATION CODE.rar[/url] royarborbert .. Converters: Unit converter — Unit converter (Unit converter) ( download ) — Unit converter. .
-Unit converter.
-On this page you can download the unit converter.
-This program allows you to convert values ​​from different systems.
-Unit Converter - Download Unit Converter for free.
-Unit Converter - A program for converting values ​​from one dimensional system to another.
-Unit converter .
-Download Unit converter . (for Windows), Unit Converter (For MAC OS) . 8a78ff9644
-
-
-
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Focusrite Serial Number Prefixes What They Mean and How to Use Them.md b/spaces/1gistliPinn/ChatGPT4/Examples/Focusrite Serial Number Prefixes What They Mean and How to Use Them.md
deleted file mode 100644
index 41da83a8d470ea7b29dffa7db24bb3bb2eda6ef4..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Focusrite Serial Number Prefixes What They Mean and How to Use Them.md
+++ /dev/null
@@ -1,19 +0,0 @@
-
-
This link is for the Clarett interfaces with a Thunderbolt connection and the ClarettOctoPre.
The serial numbers for the Clarett range begin with a U.
The names of the different compressor and EQ patches only provide clues as to what they emulate, but see the 'Behind The Mask' box for a list of the real-life models that were researched in their creation. These range from expensive and rare outboard to popular consoles and workhorse rack gear, and the selection on offer is certainly impressive, covering many of the biggest and best-respected names in recording hardware from both the UK and the US, including a number of Focusrite's own products.
-
The Liquid Mix Manager software allows you to set the unit's sample rate, and also to specify the maximum number of channels of processing you wish to use, in case you want to conserve Firewire bandwidth.Operating the Liquid Mix is gratifyingly straightforward, and though some users might wish for photorealistic emulations of every plug-in control surface, there's a lot to be said for having a consistent control style. This being said, I personally find it less helpful when the normal layout is changed to reflect some of the oddities of the original, such as putting the HF controls on the left or having frequency controls that work backwards. While I'm making small gripes, I couldn't get the Snapshot facility to work in Logic, though the normal Save and Load menu does essentially the same thing, and does work. The compressor and EQ settings are always loaded together, though, and I feel it would make more sense to also have them available separately.
-
Sonically, I'm not able to vouch for the degree of authenticity of all the emulations, but there are some very nice-sounding EQs and compressors available covering a wide range of distinctive characters and styles. I particularly liked some of the more subtle passive EQs that seem to sweeten a mix or track with very little adjustment, and of course there are those kick-ass optical compressors that contrast nicely with the more workmanlike VCA versions. For the less experienced user, deciding which to use may present a challenge, but at the same time Liquid Mix offers a wonderful educational opportunity for any aspiring engineer to get familiar with the essential character of a number of classic compressors and equalisers that they may otherwise never come across.
-
We have had some reports of registration problems, but have been unable to reproduce the problem. Please send your username (anthonylavoie) along with your iLok account, plug-in serial number and key to support@eventide.com and we'll make sure we get you registered.
-
-
Free assistance is available for the first 60 days on new purchases, excluding internal hardware installations or networking support. Your invoice reference number will be required for free assistance.
-
Registration: 7/10 First thing was to register it. It didn't accept my information the first 2 times online when I filled out the Bundle serial number part, but the third time it took to the information and registered it as a product I own. Slightly frustrating but I stuck with it and it worked eventually. it's a confusing series of jumps and kept forcing me to re-sign in then complained I was already signed in and that my Bundle ID wasn't recognized when it was exactly right. I typed it in the first 2 times and the 3rd successful time was just a copy/paste of the second attempt.
-
The second major advantage of Thunderbolt for audio purposes is the lower levels of latency that are achievable using this protocol. Thunderbolt connects straight through to the PCIe layer, as opposed to USB which must go through a number of stages first (each stage adding additional latency).
-
What Generation of Scarlett Solo you got? The serial number of your Scarlett will be found on the underside, either on a sticker or etched on the casing. The prefix of your serial number will denote which generation Scarlett you own:
-
U. Zanghieri presented a scheme for carrying AES3 signals on the "spare" pairs (4-5 and 7-8) of Ethernet 100Base-TX in a system where an audio source and a number of destination devices (such as powered loudspeakers) are connected in a ring, so that the system can survive loss of any one of the links. The Ethernet connection is used purely for control, as the latency through each unit is much higher than on the AES3 connection. A Project Initiation Request will be submitted in due course.
-
Apple has informed its official retail stores, AppleCare employees, and authorized resellers that a small number of third-generation Apple TV units have WiFi issues. These issues surround not being able to locate a WiFi network, unable to join a network, and dropped or intermittent connections.
-
Apple has determined that a very small number of Apple TV (3rd generation) products might experience one of these Wi-Fi related connectivity issues: Cannot locate network, Unable to join network, Dropped or intermittent connection.
-
Apple, which works with suppliers to test new designs all the time, has been testing various TV prototypes for a number of years, according to people familiar with the efforts. The company generally tests and develops products internally before doing so with outside suppliers.
-
In Spain, the lack of podcasting data, and existing conflicting numbers, are holding the medium back, says a well-researched article published in TELOS Magazine. It quotes podcaster Francisco Izuzquiza:
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Enjoy Epic Stickman Fights with Supreme Duelist APK 2022 Download Now.md b/spaces/1phancelerku/anime-remove-background/Enjoy Epic Stickman Fights with Supreme Duelist APK 2022 Download Now.md
deleted file mode 100644
index 462262c238dec20422ec17ca16e342270a592f1c..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Enjoy Epic Stickman Fights with Supreme Duelist APK 2022 Download Now.md
+++ /dev/null
@@ -1,86 +0,0 @@
-
-
Download Supreme Duelist Stickman APK 2022: A Fun and Crazy Stickman Game
-
If you are looking for a fun and crazy stickman game to play on your Android device, you should download Supreme Duelist Stickman APK 2022. This is a popular stickman game that lets you fight against other stickmen in various modes and maps. You can also customize your character with different weapons and outfits, and enjoy realistic ragdoll physics. Whether you want to play solo or with your friends, Supreme Duelist Stickman APK 2022 will keep you entertained for hours.
Supreme Duelist Stickman APK 2022 has many features that make it one of the best stickman games on the market. Here are some of them:
-
-
Mini game mode: You can play football with your friends on the same device or against the CPU. This is a fun way to test your skills and have some laughs.
-
Boss Fight Tournament: You can challenge yourself against powerful enemies in this mode. You will face different bosses with different abilities and weapons. You will need to use your strategy and reflexes to defeat them.
-
Ragdoll physics: You will love the realistic and hilarious animations of the stickmen in this game. You can see them fly, bounce, fall, and twist in various ways. You can also use the ragdoll button to make them flop around.
-
Customizable characters: You can choose your favorite stickman and weapon from a variety of options. You can also change the color and size of your stickman, and unlock more items with coins. You can create your own unique stickman and show it off to your opponents.
-
Various modes and maps: You can explore different scenarios and gameplay styles in this game. You can play in normal mode, survival mode, or duel mode. You can also choose from different maps, such as desert, forest, city, space, and more. Each map has its own features and challenges.
-
-
How to Download Supreme Duelist Stickman APK 2022
-
Downloading Supreme Duelist Stickman APK 2022 is very easy and fast. Just follow these simple steps:
-
-
Go to the official Google Play Store link or click here.
-
Tap on the Install button and wait for the download to finish.
-
Open the app and enjoy the game.
-
-
Tips and Tricks for Supreme Duelist Stickman APK 2022
-
If you want to master Supreme Duelist Stickman APK 2022, you should know some tips and tricks that will help you improve your performance and have more fun. Here are some of them:
-
-
Use the joystick to move and jump, and the buttons to attack and defend: The controls of this game are very simple and intuitive. You can use the joystick on the left side of the screen to move your stickman around and jump over obstacles. You can use the buttons on the right side of the screen to attack with your weapon or defend yourself from enemy attacks.
-
Try different weapons and find the one that suits your style: There are many weapons to choose from in this game, such as swords, axes, hammers, guns, bows, and more. Each weapon has its own advantages and disadvantages, such as range, speed, damage, and accuracy. You should try different weapons and find the one that matches your style and preference.
-
Use the environment to your advantage, such as traps, spikes, and explosives: The maps in this game are not just backgrounds, they are also part of the gameplay. You can use the environment to your advantage, such as traps, spikes, explosives, and other objects. You can use them to damage or kill your enemies, or to escape from dangerous situations.
-
Watch ads to get free coins and unlock more items: If you want to get more coins and unlock more items in this game, you can watch ads to get free rewards. You can watch ads after each match or from the main menu. You can use the coins to buy new weapons, outfits, colors, and sizes for your stickman.
-
Practice in single-player mode before challenging your friends or online players: If you want to improve your skills and confidence in this game, you should practice in single-player mode before challenging your friends or online players. You can play against the CPU in different difficulty levels, or play in mini game mode or boss fight tournament mode. This will help you get familiar with the game mechanics and controls.
-
-
Conclusion
-
Supreme Duelist Stickman APK 2022 is a fun and crazy stickman game that you should download and play on your Android device. It has many features that make it one of the best stickman games on the market, such as mini game mode, boss fight tournament mode, ragdoll physics, customizable characters, various modes and maps, and more. It is also easy to download and install, as long as you use the official Google Play Store link. Whether you want to play solo or with your friends, Supreme Duelist Stickman APK 2022 will keep you entertained for hours.
A1: Yes, it is free to download and play, but it contains ads and in-app purchases.
-
Q2: Is Supreme Duelist Stickman APK 2022 safe?
-
A2: Yes, it is safe to download and install, as long as you use the official Google Play Store link.
-
Q3: Is Supreme Duelist Stickman APK 2022 compatible with my device?
-
A3: It requires Android 4.4 or higher, and at least 40 MB of free storage space.
Q4: How can I contact the developer of Supreme Duelist Stickman APK 2022?
-
A4: You can email them at neurononfire@gmail.com or follow them on Facebook or Instagram.
-
Q5: How can I rate and review Supreme Duelist Stickman APK 2022?
-
A5: You can rate and review it on the Google Play Store page, or share your feedback on social media.
-
-
I hope you enjoyed this article and found it helpful. If you have any questions or comments, please feel free to leave them below. Thank you for reading and have a great day!
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/2ndelement/voicevox/voicevox_engine/synthesis_engine/synthesis_engine_base.py b/spaces/2ndelement/voicevox/voicevox_engine/synthesis_engine/synthesis_engine_base.py
deleted file mode 100644
index aaf4fc4a10e35b85c794793424a1e1f10698838b..0000000000000000000000000000000000000000
--- a/spaces/2ndelement/voicevox/voicevox_engine/synthesis_engine/synthesis_engine_base.py
+++ /dev/null
@@ -1,259 +0,0 @@
-import copy
-from abc import ABCMeta, abstractmethod
-from typing import List, Optional
-
-import numpy as np
-
-from .. import full_context_label
-from ..full_context_label import extract_full_context_label
-from ..model import AccentPhrase, AudioQuery, Mora
-from ..mora_list import openjtalk_mora2text
-
-
-def mora_to_text(mora: str) -> str:
- if mora[-1:] in ["A", "I", "U", "E", "O"]:
- # 無声化母音を小文字に
- mora = mora[:-1] + mora[-1].lower()
- if mora in openjtalk_mora2text:
- return openjtalk_mora2text[mora]
- else:
- return mora
-
-
-def adjust_interrogative_accent_phrases(
- accent_phrases: List[AccentPhrase],
-) -> List[AccentPhrase]:
- """
- enable_interrogative_upspeakが有効になっていて与えられたaccent_phrasesに疑問系のものがあった場合、
- 各accent_phraseの末尾にある疑問系発音用のMoraに対して直前のMoraより少し音を高くすることで疑問文ぽくする
- NOTE: リファクタリング時に適切な場所へ移動させること
- """
- return [
- AccentPhrase(
- moras=adjust_interrogative_moras(accent_phrase),
- accent=accent_phrase.accent,
- pause_mora=accent_phrase.pause_mora,
- is_interrogative=accent_phrase.is_interrogative,
- )
- for accent_phrase in accent_phrases
- ]
-
-
-def adjust_interrogative_moras(accent_phrase: AccentPhrase) -> List[Mora]:
- moras = copy.deepcopy(accent_phrase.moras)
- if accent_phrase.is_interrogative and not (len(moras) == 0 or moras[-1].pitch == 0):
- interrogative_mora = make_interrogative_mora(moras[-1])
- moras.append(interrogative_mora)
- return moras
- else:
- return moras
-
-
-def make_interrogative_mora(last_mora: Mora) -> Mora:
- fix_vowel_length = 0.15
- adjust_pitch = 0.3
- max_pitch = 6.5
- return Mora(
- text=openjtalk_mora2text[last_mora.vowel],
- consonant=None,
- consonant_length=None,
- vowel=last_mora.vowel,
- vowel_length=fix_vowel_length,
- pitch=min(last_mora.pitch + adjust_pitch, max_pitch),
- )
-
-
-def full_context_label_moras_to_moras(
- full_context_moras: List[full_context_label.Mora],
-) -> List[Mora]:
- return [
- Mora(
- text=mora_to_text("".join([p.phoneme for p in mora.phonemes])),
- consonant=(mora.consonant.phoneme if mora.consonant is not None else None),
- consonant_length=0 if mora.consonant is not None else None,
- vowel=mora.vowel.phoneme,
- vowel_length=0,
- pitch=0,
- )
- for mora in full_context_moras
- ]
-
-
-class SynthesisEngineBase(metaclass=ABCMeta):
- # FIXME: jsonではなくModelを返すようにする
- @property
- @abstractmethod
- def speakers(self) -> str:
- raise NotImplementedError
-
- @property
- @abstractmethod
- def supported_devices(self) -> Optional[str]:
- raise NotImplementedError
-
- def initialize_speaker_synthesis( # noqa: B027
- self, speaker_id: int, skip_reinit: bool
- ):
-
- """
- 指定した話者での音声合成を初期化する。何度も実行可能。
- 未実装の場合は何もしない
- Parameters
- ----------
- speaker_id : int
- 話者ID
- skip_reinit : bool
- True の場合, 既に初期化済みの話者の再初期化をスキップします
- """
- pass
-
- def is_initialized_speaker_synthesis(self, speaker_id: int) -> bool:
- """
- 指定した話者での音声合成が初期化されているかどうかを返す
- Parameters
- ----------
- speaker_id : int
- 話者ID
- Returns
- -------
- bool
- 初期化されているかどうか
- """
- return True
-
- @abstractmethod
- def replace_phoneme_length(
- self, accent_phrases: List[AccentPhrase], speaker_id: int
- ) -> List[AccentPhrase]:
- """
- accent_phrasesの母音・子音の長さを設定する
- Parameters
- ----------
- accent_phrases : List[AccentPhrase]
- アクセント句モデルのリスト
- speaker_id : int
- 話者ID
- Returns
- -------
- accent_phrases : List[AccentPhrase]
- 母音・子音の長さが設定されたアクセント句モデルのリスト
- """
- raise NotImplementedError()
-
- @abstractmethod
- def replace_mora_pitch(
- self, accent_phrases: List[AccentPhrase], speaker_id: int
- ) -> List[AccentPhrase]:
- """
- accent_phrasesの音高(ピッチ)を設定する
- Parameters
- ----------
- accent_phrases : List[AccentPhrase]
- アクセント句モデルのリスト
- speaker_id : int
- 話者ID
- Returns
- -------
- accent_phrases : List[AccentPhrase]
- 音高(ピッチ)が設定されたアクセント句モデルのリスト
- """
- raise NotImplementedError()
-
- def replace_mora_data(
- self,
- accent_phrases: List[AccentPhrase],
- speaker_id: int,
- ) -> List[AccentPhrase]:
- return self.replace_mora_pitch(
- accent_phrases=self.replace_phoneme_length(
- accent_phrases=accent_phrases,
- speaker_id=speaker_id,
- ),
- speaker_id=speaker_id,
- )
-
- def create_accent_phrases(self, text: str, speaker_id: int) -> List[AccentPhrase]:
- if len(text.strip()) == 0:
- return []
-
- utterance = extract_full_context_label(text)
- if len(utterance.breath_groups) == 0:
- return []
-
- accent_phrases = self.replace_mora_data(
- accent_phrases=[
- AccentPhrase(
- moras=full_context_label_moras_to_moras(accent_phrase.moras),
- accent=accent_phrase.accent,
- pause_mora=(
- Mora(
- text="、",
- consonant=None,
- consonant_length=None,
- vowel="pau",
- vowel_length=0,
- pitch=0,
- )
- if (
- i_accent_phrase == len(breath_group.accent_phrases) - 1
- and i_breath_group != len(utterance.breath_groups) - 1
- )
- else None
- ),
- is_interrogative=accent_phrase.is_interrogative,
- )
- for i_breath_group, breath_group in enumerate(utterance.breath_groups)
- for i_accent_phrase, accent_phrase in enumerate(
- breath_group.accent_phrases
- )
- ],
- speaker_id=speaker_id,
- )
- return accent_phrases
-
- def synthesis(
- self,
- query: AudioQuery,
- speaker_id: int,
- enable_interrogative_upspeak: bool = True,
- ) -> np.ndarray:
- """
- 音声合成クエリ内の疑問文指定されたMoraを変形した後、
- 継承先における実装`_synthesis_impl`を使い音声合成を行う
- Parameters
- ----------
- query : AudioQuery
- 音声合成クエリ
- speaker_id : int
- 話者ID
- enable_interrogative_upspeak : bool
- 疑問系のテキストの語尾を自動調整する機能を有効にするか
- Returns
- -------
- wave : numpy.ndarray
- 音声合成結果
- """
- # モーフィング時などに同一参照のqueryで複数回呼ばれる可能性があるので、元の引数のqueryに破壊的変更を行わない
- query = copy.deepcopy(query)
- if enable_interrogative_upspeak:
- query.accent_phrases = adjust_interrogative_accent_phrases(
- query.accent_phrases
- )
- return self._synthesis_impl(query, speaker_id)
-
- @abstractmethod
- def _synthesis_impl(self, query: AudioQuery, speaker_id: int) -> np.ndarray:
- """
- 音声合成クエリから音声合成に必要な情報を構成し、実際に音声合成を行う
- Parameters
- ----------
- query : AudioQuery
- 音声合成クエリ
- speaker_id : int
- 話者ID
- Returns
- -------
- wave : numpy.ndarray
- 音声合成結果
- """
- raise NotImplementedError()
diff --git a/spaces/801artistry/RVC801/lib/infer_pack/models_dml.py b/spaces/801artistry/RVC801/lib/infer_pack/models_dml.py
deleted file mode 100644
index 958d7b29259763d2fea94caf8ba7e314c4a77d05..0000000000000000000000000000000000000000
--- a/spaces/801artistry/RVC801/lib/infer_pack/models_dml.py
+++ /dev/null
@@ -1,1124 +0,0 @@
-import math, pdb, os
-from time import time as ttime
-import torch
-from torch import nn
-from torch.nn import functional as F
-from lib.infer_pack import modules
-from lib.infer_pack import attentions
-from lib.infer_pack import commons
-from lib.infer_pack.commons import init_weights, get_padding
-from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
-from lib.infer_pack.commons import init_weights
-import numpy as np
-from lib.infer_pack import commons
-
-
-class TextEncoder256(nn.Module):
- def __init__(
- self,
- out_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- f0=True,
- ):
- super().__init__()
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.emb_phone = nn.Linear(256, hidden_channels)
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
- if f0 == True:
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
- self.encoder = attentions.Encoder(
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, phone, pitch, lengths):
- if pitch == None:
- x = self.emb_phone(phone)
- else:
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
- x = self.lrelu(x)
- x = torch.transpose(x, 1, -1) # [b, h, t]
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.encoder(x * x_mask, x_mask)
- stats = self.proj(x) * x_mask
-
- m, logs = torch.split(stats, self.out_channels, dim=1)
- return m, logs, x_mask
-
-
-class TextEncoder768(nn.Module):
- def __init__(
- self,
- out_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- f0=True,
- ):
- super().__init__()
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.emb_phone = nn.Linear(768, hidden_channels)
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
- if f0 == True:
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
- self.encoder = attentions.Encoder(
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, phone, pitch, lengths):
- if pitch == None:
- x = self.emb_phone(phone)
- else:
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
- x = self.lrelu(x)
- x = torch.transpose(x, 1, -1) # [b, h, t]
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.encoder(x * x_mask, x_mask)
- stats = self.proj(x) * x_mask
-
- m, logs = torch.split(stats, self.out_channels, dim=1)
- return m, logs, x_mask
-
-
-class ResidualCouplingBlock(nn.Module):
- def __init__(
- self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- n_flows=4,
- gin_channels=0,
- ):
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.flows = nn.ModuleList()
- for i in range(n_flows):
- self.flows.append(
- modules.ResidualCouplingLayer(
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=gin_channels,
- mean_only=True,
- )
- )
- self.flows.append(modules.Flip())
-
- def forward(self, x, x_mask, g=None, reverse=False):
- if not reverse:
- for flow in self.flows:
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
- else:
- for flow in reversed(self.flows):
- x = flow(x, x_mask, g=g, reverse=reverse)
- return x
-
- def remove_weight_norm(self):
- for i in range(self.n_flows):
- self.flows[i * 2].remove_weight_norm()
-
-
-class PosteriorEncoder(nn.Module):
- def __init__(
- self,
- in_channels,
- out_channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=0,
- ):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
-
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
- self.enc = modules.WN(
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=gin_channels,
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths, g=None):
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.pre(x) * x_mask
- x = self.enc(x, x_mask, g=g)
- stats = self.proj(x) * x_mask
- m, logs = torch.split(stats, self.out_channels, dim=1)
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
- return z, m, logs, x_mask
-
- def remove_weight_norm(self):
- self.enc.remove_weight_norm()
-
-
-class Generator(torch.nn.Module):
- def __init__(
- self,
- initial_channel,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=0,
- ):
- super(Generator, self).__init__()
- self.num_kernels = len(resblock_kernel_sizes)
- self.num_upsamples = len(upsample_rates)
- self.conv_pre = Conv1d(
- initial_channel, upsample_initial_channel, 7, 1, padding=3
- )
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
-
- self.ups = nn.ModuleList()
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
- self.ups.append(
- weight_norm(
- ConvTranspose1d(
- upsample_initial_channel // (2**i),
- upsample_initial_channel // (2 ** (i + 1)),
- k,
- u,
- padding=(k - u) // 2,
- )
- )
- )
-
- self.resblocks = nn.ModuleList()
- for i in range(len(self.ups)):
- ch = upsample_initial_channel // (2 ** (i + 1))
- for j, (k, d) in enumerate(
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
- ):
- self.resblocks.append(resblock(ch, k, d))
-
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
- self.ups.apply(init_weights)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
- def forward(self, x, g=None):
- x = self.conv_pre(x)
- if g is not None:
- x = x + self.cond(g)
-
- for i in range(self.num_upsamples):
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- x = self.ups[i](x)
- xs = None
- for j in range(self.num_kernels):
- if xs is None:
- xs = self.resblocks[i * self.num_kernels + j](x)
- else:
- xs += self.resblocks[i * self.num_kernels + j](x)
- x = xs / self.num_kernels
- x = F.leaky_relu(x)
- x = self.conv_post(x)
- x = torch.tanh(x)
-
- return x
-
- def remove_weight_norm(self):
- for l in self.ups:
- remove_weight_norm(l)
- for l in self.resblocks:
- l.remove_weight_norm()
-
-
-class SineGen(torch.nn.Module):
- """Definition of sine generator
- SineGen(samp_rate, harmonic_num = 0,
- sine_amp = 0.1, noise_std = 0.003,
- voiced_threshold = 0,
- flag_for_pulse=False)
- samp_rate: sampling rate in Hz
- harmonic_num: number of harmonic overtones (default 0)
- sine_amp: amplitude of sine-wavefrom (default 0.1)
- noise_std: std of Gaussian noise (default 0.003)
- voiced_thoreshold: F0 threshold for U/V classification (default 0)
- flag_for_pulse: this SinGen is used inside PulseGen (default False)
- Note: when flag_for_pulse is True, the first time step of a voiced
- segment is always sin(np.pi) or cos(0)
- """
-
- def __init__(
- self,
- samp_rate,
- harmonic_num=0,
- sine_amp=0.1,
- noise_std=0.003,
- voiced_threshold=0,
- flag_for_pulse=False,
- ):
- super(SineGen, self).__init__()
- self.sine_amp = sine_amp
- self.noise_std = noise_std
- self.harmonic_num = harmonic_num
- self.dim = self.harmonic_num + 1
- self.sampling_rate = samp_rate
- self.voiced_threshold = voiced_threshold
-
- def _f02uv(self, f0):
- # generate uv signal
- uv = torch.ones_like(f0)
- uv = uv * (f0 > self.voiced_threshold)
- return uv.float()
-
- def forward(self, f0, upp):
- """sine_tensor, uv = forward(f0)
- input F0: tensor(batchsize=1, length, dim=1)
- f0 for unvoiced steps should be 0
- output sine_tensor: tensor(batchsize=1, length, dim)
- output uv: tensor(batchsize=1, length, 1)
- """
- with torch.no_grad():
- f0 = f0[:, None].transpose(1, 2)
- f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
- # fundamental component
- f0_buf[:, :, 0] = f0[:, :, 0]
- for idx in np.arange(self.harmonic_num):
- f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
- idx + 2
- ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
- rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
- rand_ini = torch.rand(
- f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
- )
- rand_ini[:, 0] = 0
- rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
- tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
- tmp_over_one *= upp
- tmp_over_one = F.interpolate(
- tmp_over_one.transpose(2, 1),
- scale_factor=upp,
- mode="linear",
- align_corners=True,
- ).transpose(2, 1)
- rad_values = F.interpolate(
- rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
- ).transpose(
- 2, 1
- ) #######
- tmp_over_one %= 1
- tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
- cumsum_shift = torch.zeros_like(rad_values)
- cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
- sine_waves = torch.sin(
- torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
- )
- sine_waves = sine_waves * self.sine_amp
- uv = self._f02uv(f0)
- uv = F.interpolate(
- uv.transpose(2, 1), scale_factor=upp, mode="nearest"
- ).transpose(2, 1)
- noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
- noise = noise_amp * torch.randn_like(sine_waves)
- sine_waves = sine_waves * uv + noise
- return sine_waves, uv, noise
-
-
-class SourceModuleHnNSF(torch.nn.Module):
- """SourceModule for hn-nsf
- SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
- add_noise_std=0.003, voiced_threshod=0)
- sampling_rate: sampling_rate in Hz
- harmonic_num: number of harmonic above F0 (default: 0)
- sine_amp: amplitude of sine source signal (default: 0.1)
- add_noise_std: std of additive Gaussian noise (default: 0.003)
- note that amplitude of noise in unvoiced is decided
- by sine_amp
- voiced_threshold: threhold to set U/V given F0 (default: 0)
- Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
- F0_sampled (batchsize, length, 1)
- Sine_source (batchsize, length, 1)
- noise_source (batchsize, length 1)
- uv (batchsize, length, 1)
- """
-
- def __init__(
- self,
- sampling_rate,
- harmonic_num=0,
- sine_amp=0.1,
- add_noise_std=0.003,
- voiced_threshod=0,
- is_half=True,
- ):
- super(SourceModuleHnNSF, self).__init__()
-
- self.sine_amp = sine_amp
- self.noise_std = add_noise_std
- self.is_half = is_half
- # to produce sine waveforms
- self.l_sin_gen = SineGen(
- sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
- )
-
- # to merge source harmonics into a single excitation
- self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
- self.l_tanh = torch.nn.Tanh()
-
- def forward(self, x, upp=None):
- sine_wavs, uv, _ = self.l_sin_gen(x, upp)
- if self.is_half:
- sine_wavs = sine_wavs.half()
- sine_merge = self.l_tanh(self.l_linear(sine_wavs))
- return sine_merge, None, None # noise, uv
-
-
-class GeneratorNSF(torch.nn.Module):
- def __init__(
- self,
- initial_channel,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels,
- sr,
- is_half=False,
- ):
- super(GeneratorNSF, self).__init__()
- self.num_kernels = len(resblock_kernel_sizes)
- self.num_upsamples = len(upsample_rates)
-
- self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
- self.m_source = SourceModuleHnNSF(
- sampling_rate=sr, harmonic_num=0, is_half=is_half
- )
- self.noise_convs = nn.ModuleList()
- self.conv_pre = Conv1d(
- initial_channel, upsample_initial_channel, 7, 1, padding=3
- )
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
-
- self.ups = nn.ModuleList()
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
- c_cur = upsample_initial_channel // (2 ** (i + 1))
- self.ups.append(
- weight_norm(
- ConvTranspose1d(
- upsample_initial_channel // (2**i),
- upsample_initial_channel // (2 ** (i + 1)),
- k,
- u,
- padding=(k - u) // 2,
- )
- )
- )
- if i + 1 < len(upsample_rates):
- stride_f0 = np.prod(upsample_rates[i + 1 :])
- self.noise_convs.append(
- Conv1d(
- 1,
- c_cur,
- kernel_size=stride_f0 * 2,
- stride=stride_f0,
- padding=stride_f0 // 2,
- )
- )
- else:
- self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
-
- self.resblocks = nn.ModuleList()
- for i in range(len(self.ups)):
- ch = upsample_initial_channel // (2 ** (i + 1))
- for j, (k, d) in enumerate(
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
- ):
- self.resblocks.append(resblock(ch, k, d))
-
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
- self.ups.apply(init_weights)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
- self.upp = np.prod(upsample_rates)
-
- def forward(self, x, f0, g=None):
- har_source, noi_source, uv = self.m_source(f0, self.upp)
- har_source = har_source.transpose(1, 2)
- x = self.conv_pre(x)
- if g is not None:
- x = x + self.cond(g)
-
- for i in range(self.num_upsamples):
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- x = self.ups[i](x)
- x_source = self.noise_convs[i](har_source)
- x = x + x_source
- xs = None
- for j in range(self.num_kernels):
- if xs is None:
- xs = self.resblocks[i * self.num_kernels + j](x)
- else:
- xs += self.resblocks[i * self.num_kernels + j](x)
- x = xs / self.num_kernels
- x = F.leaky_relu(x)
- x = self.conv_post(x)
- x = torch.tanh(x)
- return x
-
- def remove_weight_norm(self):
- for l in self.ups:
- remove_weight_norm(l)
- for l in self.resblocks:
- l.remove_weight_norm()
-
-
-sr2sr = {
- "32k": 32000,
- "40k": 40000,
- "48k": 48000,
-}
-
-
-class SynthesizerTrnMs256NSFsid(nn.Module):
- def __init__(
- self,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- spk_embed_dim,
- gin_channels,
- sr,
- **kwargs
- ):
- super().__init__()
- if type(sr) == type("strr"):
- sr = sr2sr[sr]
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.gin_channels = gin_channels
- # self.hop_length = hop_length#
- self.spk_embed_dim = spk_embed_dim
- self.enc_p = TextEncoder256(
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- )
- self.dec = GeneratorNSF(
- inter_channels,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=gin_channels,
- sr=sr,
- is_half=kwargs["is_half"],
- )
- self.enc_q = PosteriorEncoder(
- spec_channels,
- inter_channels,
- hidden_channels,
- 5,
- 1,
- 16,
- gin_channels=gin_channels,
- )
- self.flow = ResidualCouplingBlock(
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
- )
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
-
- def remove_weight_norm(self):
- self.dec.remove_weight_norm()
- self.flow.remove_weight_norm()
- self.enc_q.remove_weight_norm()
-
- def forward(
- self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
- ): # 这里ds是id,[bs,1]
- # print(1,pitch.shape)#[bs,t]
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
- z_p = self.flow(z, y_mask, g=g)
- z_slice, ids_slice = commons.rand_slice_segments(
- z, y_lengths, self.segment_size
- )
- # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
- pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
- # print(-2,pitchf.shape,z_slice.shape)
- o = self.dec(z_slice, pitchf, g=g)
- return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
-
- def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):
- g = self.emb_g(sid).unsqueeze(-1)
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
- z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
- z = self.flow(z_p, x_mask, g=g, reverse=True)
- o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
- return o, x_mask, (z, z_p, m_p, logs_p)
-
-
-class SynthesizerTrnMs768NSFsid(nn.Module):
- def __init__(
- self,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- spk_embed_dim,
- gin_channels,
- sr,
- **kwargs
- ):
- super().__init__()
- if type(sr) == type("strr"):
- sr = sr2sr[sr]
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.gin_channels = gin_channels
- # self.hop_length = hop_length#
- self.spk_embed_dim = spk_embed_dim
- self.enc_p = TextEncoder768(
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- )
- self.dec = GeneratorNSF(
- inter_channels,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=gin_channels,
- sr=sr,
- is_half=kwargs["is_half"],
- )
- self.enc_q = PosteriorEncoder(
- spec_channels,
- inter_channels,
- hidden_channels,
- 5,
- 1,
- 16,
- gin_channels=gin_channels,
- )
- self.flow = ResidualCouplingBlock(
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
- )
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
-
- def remove_weight_norm(self):
- self.dec.remove_weight_norm()
- self.flow.remove_weight_norm()
- self.enc_q.remove_weight_norm()
-
- def forward(
- self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
- ): # 这里ds是id,[bs,1]
- # print(1,pitch.shape)#[bs,t]
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
- z_p = self.flow(z, y_mask, g=g)
- z_slice, ids_slice = commons.rand_slice_segments(
- z, y_lengths, self.segment_size
- )
- # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
- pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
- # print(-2,pitchf.shape,z_slice.shape)
- o = self.dec(z_slice, pitchf, g=g)
- return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
-
- def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):
- g = self.emb_g(sid).unsqueeze(-1)
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
- z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
- z = self.flow(z_p, x_mask, g=g, reverse=True)
- o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
- return o, x_mask, (z, z_p, m_p, logs_p)
-
-
-class SynthesizerTrnMs256NSFsid_nono(nn.Module):
- def __init__(
- self,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- spk_embed_dim,
- gin_channels,
- sr=None,
- **kwargs
- ):
- super().__init__()
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.gin_channels = gin_channels
- # self.hop_length = hop_length#
- self.spk_embed_dim = spk_embed_dim
- self.enc_p = TextEncoder256(
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- f0=False,
- )
- self.dec = Generator(
- inter_channels,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=gin_channels,
- )
- self.enc_q = PosteriorEncoder(
- spec_channels,
- inter_channels,
- hidden_channels,
- 5,
- 1,
- 16,
- gin_channels=gin_channels,
- )
- self.flow = ResidualCouplingBlock(
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
- )
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
-
- def remove_weight_norm(self):
- self.dec.remove_weight_norm()
- self.flow.remove_weight_norm()
- self.enc_q.remove_weight_norm()
-
- def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
- m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
- z_p = self.flow(z, y_mask, g=g)
- z_slice, ids_slice = commons.rand_slice_segments(
- z, y_lengths, self.segment_size
- )
- o = self.dec(z_slice, g=g)
- return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
-
- def infer(self, phone, phone_lengths, sid, max_len=None):
- g = self.emb_g(sid).unsqueeze(-1)
- m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
- z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
- z = self.flow(z_p, x_mask, g=g, reverse=True)
- o = self.dec((z * x_mask)[:, :, :max_len], g=g)
- return o, x_mask, (z, z_p, m_p, logs_p)
-
-
-class SynthesizerTrnMs768NSFsid_nono(nn.Module):
- def __init__(
- self,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- spk_embed_dim,
- gin_channels,
- sr=None,
- **kwargs
- ):
- super().__init__()
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.gin_channels = gin_channels
- # self.hop_length = hop_length#
- self.spk_embed_dim = spk_embed_dim
- self.enc_p = TextEncoder768(
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- f0=False,
- )
- self.dec = Generator(
- inter_channels,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=gin_channels,
- )
- self.enc_q = PosteriorEncoder(
- spec_channels,
- inter_channels,
- hidden_channels,
- 5,
- 1,
- 16,
- gin_channels=gin_channels,
- )
- self.flow = ResidualCouplingBlock(
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
- )
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
-
- def remove_weight_norm(self):
- self.dec.remove_weight_norm()
- self.flow.remove_weight_norm()
- self.enc_q.remove_weight_norm()
-
- def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
- m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
- z_p = self.flow(z, y_mask, g=g)
- z_slice, ids_slice = commons.rand_slice_segments(
- z, y_lengths, self.segment_size
- )
- o = self.dec(z_slice, g=g)
- return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
-
- def infer(self, phone, phone_lengths, sid, max_len=None):
- g = self.emb_g(sid).unsqueeze(-1)
- m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
- z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
- z = self.flow(z_p, x_mask, g=g, reverse=True)
- o = self.dec((z * x_mask)[:, :, :max_len], g=g)
- return o, x_mask, (z, z_p, m_p, logs_p)
-
-
-class MultiPeriodDiscriminator(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(MultiPeriodDiscriminator, self).__init__()
- periods = [2, 3, 5, 7, 11, 17]
- # periods = [3, 5, 7, 11, 17, 23, 37]
-
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
- discs = discs + [
- DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
- ]
- self.discriminators = nn.ModuleList(discs)
-
- def forward(self, y, y_hat):
- y_d_rs = [] #
- y_d_gs = []
- fmap_rs = []
- fmap_gs = []
- for i, d in enumerate(self.discriminators):
- y_d_r, fmap_r = d(y)
- y_d_g, fmap_g = d(y_hat)
- # for j in range(len(fmap_r)):
- # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
- y_d_rs.append(y_d_r)
- y_d_gs.append(y_d_g)
- fmap_rs.append(fmap_r)
- fmap_gs.append(fmap_g)
-
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-
-class MultiPeriodDiscriminatorV2(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(MultiPeriodDiscriminatorV2, self).__init__()
- # periods = [2, 3, 5, 7, 11, 17]
- periods = [2, 3, 5, 7, 11, 17, 23, 37]
-
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
- discs = discs + [
- DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
- ]
- self.discriminators = nn.ModuleList(discs)
-
- def forward(self, y, y_hat):
- y_d_rs = [] #
- y_d_gs = []
- fmap_rs = []
- fmap_gs = []
- for i, d in enumerate(self.discriminators):
- y_d_r, fmap_r = d(y)
- y_d_g, fmap_g = d(y_hat)
- # for j in range(len(fmap_r)):
- # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
- y_d_rs.append(y_d_r)
- y_d_gs.append(y_d_g)
- fmap_rs.append(fmap_r)
- fmap_gs.append(fmap_g)
-
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-
-class DiscriminatorS(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(DiscriminatorS, self).__init__()
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList(
- [
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
- ]
- )
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
-
- def forward(self, x):
- fmap = []
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class DiscriminatorP(torch.nn.Module):
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
- super(DiscriminatorP, self).__init__()
- self.period = period
- self.use_spectral_norm = use_spectral_norm
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList(
- [
- norm_f(
- Conv2d(
- 1,
- 32,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 32,
- 128,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 128,
- 512,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 512,
- 1024,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 1024,
- 1024,
- (kernel_size, 1),
- 1,
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- ]
- )
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
-
- def forward(self, x):
- fmap = []
-
- # 1d to 2d
- b, c, t = x.shape
- if t % self.period != 0: # pad first
- n_pad = self.period - (t % self.period)
- x = F.pad(x, (0, n_pad), "reflect")
- t = t + n_pad
- x = x.view(b, c, t // self.period, self.period)
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
diff --git a/spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/__init__.py b/spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/discriminator/multi_window_disc.py b/spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/discriminator/multi_window_disc.py
deleted file mode 100644
index 1aef6493c90c7cf5206ff92f7fe8831a0821664f..0000000000000000000000000000000000000000
--- a/spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/discriminator/multi_window_disc.py
+++ /dev/null
@@ -1,196 +0,0 @@
-import numpy as np
-import torch
-import torch.nn as nn
-
-
-class Discriminator2DFactory(nn.Module):
- def __init__(self, time_length, freq_length=80, kernel=(3, 3), c_in=1, hidden_size=128,
- norm_type='bn', reduction='sum'):
- super(Discriminator2DFactory, self).__init__()
- padding = (kernel[0] // 2, kernel[1] // 2)
-
- def discriminator_block(in_filters, out_filters, first=False):
- """
- Input: (B, in, 2H, 2W)
- Output:(B, out, H, W)
- """
- conv = nn.Conv2d(in_filters, out_filters, kernel, (2, 2), padding)
- if norm_type == 'sn':
- conv = nn.utils.spectral_norm(conv)
- block = [
- conv, # padding = kernel//2
- nn.LeakyReLU(0.2, inplace=True),
- nn.Dropout2d(0.25)
- ]
- if norm_type == 'bn' and not first:
- block.append(nn.BatchNorm2d(out_filters, 0.8))
- if norm_type == 'in' and not first:
- block.append(nn.InstanceNorm2d(out_filters, affine=True))
- block = nn.Sequential(*block)
- return block
-
- self.model = nn.ModuleList([
- discriminator_block(c_in, hidden_size, first=True),
- discriminator_block(hidden_size, hidden_size),
- discriminator_block(hidden_size, hidden_size),
- ])
-
- self.reduction = reduction
- ds_size = (time_length // 2 ** 3, (freq_length + 7) // 2 ** 3)
- if reduction != 'none':
- # The height and width of downsampled image
- self.adv_layer = nn.Linear(hidden_size * ds_size[0] * ds_size[1], 1)
- else:
- self.adv_layer = nn.Linear(hidden_size * ds_size[1], 1)
-
- def forward(self, x):
- """
-
- :param x: [B, C, T, n_bins]
- :return: validity: [B, 1], h: List of hiddens
- """
- h = []
- for l in self.model:
- x = l(x)
- h.append(x)
- if self.reduction != 'none':
- x = x.view(x.shape[0], -1)
- validity = self.adv_layer(x) # [B, 1]
- else:
- B, _, T_, _ = x.shape
- x = x.transpose(1, 2).reshape(B, T_, -1)
- validity = self.adv_layer(x)[:, :, 0] # [B, T]
- return validity, h
-
-
-class MultiWindowDiscriminator(nn.Module):
- def __init__(self, time_lengths, cond_size=0, freq_length=80, kernel=(3, 3),
- c_in=1, hidden_size=128, norm_type='bn', reduction='sum'):
- super(MultiWindowDiscriminator, self).__init__()
- self.win_lengths = time_lengths
- self.reduction = reduction
-
- self.conv_layers = nn.ModuleList()
- if cond_size > 0:
- self.cond_proj_layers = nn.ModuleList()
- self.mel_proj_layers = nn.ModuleList()
- for time_length in time_lengths:
- conv_layer = [
- Discriminator2DFactory(
- time_length, freq_length, kernel, c_in=c_in, hidden_size=hidden_size,
- norm_type=norm_type, reduction=reduction)
- ]
- self.conv_layers += conv_layer
- if cond_size > 0:
- self.cond_proj_layers.append(nn.Linear(cond_size, freq_length))
- self.mel_proj_layers.append(nn.Linear(freq_length, freq_length))
-
- def forward(self, x, x_len, cond=None, start_frames_wins=None):
- '''
- Args:
- x (tensor): input mel, (B, c_in, T, n_bins).
- x_length (tensor): len of per mel. (B,).
-
- Returns:
- tensor : (B).
- '''
- validity = []
- if start_frames_wins is None:
- start_frames_wins = [None] * len(self.conv_layers)
- h = []
- for i, start_frames in zip(range(len(self.conv_layers)), start_frames_wins):
- x_clip, c_clip, start_frames = self.clip(
- x, cond, x_len, self.win_lengths[i], start_frames) # (B, win_length, C)
- start_frames_wins[i] = start_frames
- if x_clip is None:
- continue
- if cond is not None:
- x_clip = self.mel_proj_layers[i](x_clip) # (B, 1, win_length, C)
- c_clip = self.cond_proj_layers[i](c_clip)[:, None] # (B, 1, win_length, C)
- x_clip = x_clip + c_clip
- x_clip, h_ = self.conv_layers[i](x_clip)
- h += h_
- validity.append(x_clip)
- if len(validity) != len(self.conv_layers):
- return None, start_frames_wins, h
- if self.reduction == 'sum':
- validity = sum(validity) # [B]
- elif self.reduction == 'stack':
- validity = torch.stack(validity, -1) # [B, W_L]
- elif self.reduction == 'none':
- validity = torch.cat(validity, -1) # [B, W_sum]
- return validity, start_frames_wins, h
-
- def clip(self, x, cond, x_len, win_length, start_frames=None):
- '''Ramdom clip x to win_length.
- Args:
- x (tensor) : (B, c_in, T, n_bins).
- cond (tensor) : (B, T, H).
- x_len (tensor) : (B,).
- win_length (int): target clip length
-
- Returns:
- (tensor) : (B, c_in, win_length, n_bins).
-
- '''
- T_start = 0
- T_end = x_len.max() - win_length
- if T_end < 0:
- return None, None, start_frames
- T_end = T_end.item()
- if start_frames is None:
- start_frame = np.random.randint(low=T_start, high=T_end + 1)
- start_frames = [start_frame] * x.size(0)
- else:
- start_frame = start_frames[0]
- x_batch = x[:, :, start_frame: start_frame + win_length]
- c_batch = cond[:, start_frame: start_frame + win_length] if cond is not None else None
- return x_batch, c_batch, start_frames
-
-
-class Discriminator(nn.Module):
- def __init__(self, time_lengths=[32, 64, 128], freq_length=80, cond_size=0, kernel=(3, 3), c_in=1,
- hidden_size=128, norm_type='bn', reduction='sum', uncond_disc=True):
- super(Discriminator, self).__init__()
- self.time_lengths = time_lengths
- self.cond_size = cond_size
- self.reduction = reduction
- self.uncond_disc = uncond_disc
- if uncond_disc:
- self.discriminator = MultiWindowDiscriminator(
- freq_length=freq_length,
- time_lengths=time_lengths,
- kernel=kernel,
- c_in=c_in, hidden_size=hidden_size, norm_type=norm_type,
- reduction=reduction
- )
- if cond_size > 0:
- self.cond_disc = MultiWindowDiscriminator(
- freq_length=freq_length,
- time_lengths=time_lengths,
- cond_size=cond_size,
- kernel=kernel,
- c_in=c_in, hidden_size=hidden_size, norm_type=norm_type,
- reduction=reduction
- )
-
- def forward(self, x, cond=None, start_frames_wins=None):
- """
-
- :param x: [B, T, 80]
- :param cond: [B, T, cond_size]
- :param return_y_only:
- :return:
- """
- if len(x.shape) == 3:
- x = x[:, None, :, :]
- x_len = x.sum([1, -1]).ne(0).int().sum([-1])
- ret = {'y_c': None, 'y': None}
- if self.uncond_disc:
- ret['y'], start_frames_wins, ret['h'] = self.discriminator(
- x, x_len, start_frames_wins=start_frames_wins)
- if self.cond_size > 0 and cond is not None:
- ret['y_c'], start_frames_wins, ret['h_c'] = self.cond_disc(
- x, x_len, cond, start_frames_wins=start_frames_wins)
- ret['start_frames_wins'] = start_frames_wins
- return ret
\ No newline at end of file
diff --git a/spaces/AIZ2H/Gradio331-3D-Models-AI-1/app.py b/spaces/AIZ2H/Gradio331-3D-Models-AI-1/app.py
deleted file mode 100644
index 62e7b60344f5957e86a9c0de3d77985f68b52224..0000000000000000000000000000000000000000
--- a/spaces/AIZ2H/Gradio331-3D-Models-AI-1/app.py
+++ /dev/null
@@ -1,24 +0,0 @@
-import time
-import gradio as gr
-import os
-
-def load_mesh(mesh_file_name):
- return mesh_file_name, mesh_file_name
-
-demo = gr.Interface(
- fn=load_mesh,
- inputs=gr.Model3D(),
- outputs=[
- gr.Model3D(
- clear_color=[0.0, 0.0, 0.0, 0.0], label="3D Model"),
- gr.File(label="Download 3D Model")
- ],
- examples=[
- [os.path.join(os.path.dirname(__file__), "files/Duck.glb")],
- [os.path.join(os.path.dirname(__file__), "files/rubber_duck.glb")],
- [os.path.join(os.path.dirname(__file__), "files/GroundVehicle.glb")]
- ],
-)
-
-if __name__ == "__main__":
- demo.launch()
\ No newline at end of file
diff --git a/spaces/Abdulkader/Abdulkader-T5-MedRepAnalyzer/README.md b/spaces/Abdulkader/Abdulkader-T5-MedRepAnalyzer/README.md
deleted file mode 100644
index bfb017eaa27c3c16b5d4d4f0aa0d3ed2d3c1ec17..0000000000000000000000000000000000000000
--- a/spaces/Abdulkader/Abdulkader-T5-MedRepAnalyzer/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Abdulkader T5 MedRepAnalyzer
-emoji: ⚡
-colorFrom: yellow
-colorTo: purple
-sdk: gradio
-sdk_version: 3.13.0
-app_file: app.py
-pinned: false
-license: cc-by-3.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/AchyuthGamer/OpenGPT/client/css/stop-generating.css b/spaces/AchyuthGamer/OpenGPT/client/css/stop-generating.css
deleted file mode 100644
index 3c2010d25065fbef63b104df743ef72c00259871..0000000000000000000000000000000000000000
--- a/spaces/AchyuthGamer/OpenGPT/client/css/stop-generating.css
+++ /dev/null
@@ -1,38 +0,0 @@
-.stop-generating {
- position: absolute;
- bottom: 128px;
- left: 50%;
- transform: translateX(-50%);
- z-index: 1000000;
-}
-
-.stop-generating button {
- backdrop-filter: blur(20px);
- -webkit-backdrop-filter: blur(20px);
- background-color: var(--blur-bg);
- color: var(--colour-3);
- cursor: pointer;
- animation: show_popup 0.4s;
-}
-
-@keyframes show_popup {
- from {
- opacity: 0;
- transform: translateY(10px);
- }
-}
-
-@keyframes hide_popup {
- to {
- opacity: 0;
- transform: translateY(10px);
- }
-}
-
-.stop-generating-hiding button {
- animation: hide_popup 0.4s;
-}
-
-.stop-generating-hidden button {
- display: none;
-}
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/lzstring.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/lzstring.js
deleted file mode 100644
index 7846c01e338c06be4538e532771e6f8a9fbb0fdb..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/lzstring.js
+++ /dev/null
@@ -1,2 +0,0 @@
-import LZString from './string/lzstring/LZString.js';
-export default LZString;
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/dynamictext/DynamicText.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/dynamictext/DynamicText.js
deleted file mode 100644
index c244780d1854d087100eb69ce3aa6dcffb119089..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/dynamictext/DynamicText.js
+++ /dev/null
@@ -1,2 +0,0 @@
-import DynamicText from '../../../plugins/dynamictext.js';
-export default DynamicText;
\ No newline at end of file
diff --git a/spaces/Ajit025/Text_to_Image_conversion/text_to_image.py b/spaces/Ajit025/Text_to_Image_conversion/text_to_image.py
deleted file mode 100644
index 3e758a1a6bfd6f0a178e20fea0e8bfac04fc1f3f..0000000000000000000000000000000000000000
--- a/spaces/Ajit025/Text_to_Image_conversion/text_to_image.py
+++ /dev/null
@@ -1,51 +0,0 @@
-from transformers.tools.base import Tool, get_default_device
-from transformers.utils import is_accelerate_available
-import torch
-
-from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
-
-
-TEXT_TO_IMAGE_DESCRIPTION = (
- "This is a tool that creates an image according to a prompt, which is a text description. It takes an input named `prompt` which "
- "contains the image description and outputs an image."
-)
-
-
-class TextToImageTool(Tool):
- default_checkpoint = "runwayml/stable-diffusion-v1-5"
- description = TEXT_TO_IMAGE_DESCRIPTION
- inputs = ['text']
- outputs = ['image']
-
- def __init__(self, device=None, **hub_kwargs) -> None:
- if not is_accelerate_available():
- raise ImportError("Accelerate should be installed in order to use tools.")
-
- super().__init__()
-
- self.device = device
- self.pipeline = None
- self.hub_kwargs = hub_kwargs
-
- def setup(self):
- if self.device is None:
- self.device = get_default_device()
-
- self.pipeline = DiffusionPipeline.from_pretrained(self.default_checkpoint)
- self.pipeline.scheduler = DPMSolverMultistepScheduler.from_config(self.pipeline.scheduler.config)
- self.pipeline.to(self.device)
-
- if self.device.type == "cuda":
- self.pipeline.to(torch_dtype=torch.float16)
-
- self.is_initialized = True
-
- def __call__(self, prompt):
- if not self.is_initialized:
- self.setup()
-
- negative_prompt = "low quality, bad quality, deformed, low resolution"
- added_prompt = " , highest quality, highly realistic, very high resolution"
-
- return self.pipeline(prompt + added_prompt, negative_prompt=negative_prompt, num_inference_steps=25).images[0]
-
diff --git a/spaces/AkitoP/umamusume_bert_vits2/data_utils.py b/spaces/AkitoP/umamusume_bert_vits2/data_utils.py
deleted file mode 100644
index d8e6b9e30b90839644e8a2c33c5166288b720d02..0000000000000000000000000000000000000000
--- a/spaces/AkitoP/umamusume_bert_vits2/data_utils.py
+++ /dev/null
@@ -1,406 +0,0 @@
-import os
-import random
-import torch
-import torch.utils.data
-from tqdm import tqdm
-from loguru import logger
-import commons
-from mel_processing import spectrogram_torch, mel_spectrogram_torch
-from utils import load_wav_to_torch, load_filepaths_and_text
-from text import cleaned_text_to_sequence, get_bert
-
-"""Multi speaker version"""
-
-
-class TextAudioSpeakerLoader(torch.utils.data.Dataset):
- """
- 1) loads audio, speaker_id, text pairs
- 2) normalizes text and converts them to sequences of integers
- 3) computes spectrograms from audio files.
- """
-
- def __init__(self, audiopaths_sid_text, hparams):
- self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)
- self.max_wav_value = hparams.max_wav_value
- self.sampling_rate = hparams.sampling_rate
- self.filter_length = hparams.filter_length
- self.hop_length = hparams.hop_length
- self.win_length = hparams.win_length
- self.sampling_rate = hparams.sampling_rate
- self.spk_map = hparams.spk2id
- self.hparams = hparams
-
- self.use_mel_spec_posterior = getattr(
- hparams, "use_mel_posterior_encoder", False
- )
- if self.use_mel_spec_posterior:
- self.n_mel_channels = getattr(hparams, "n_mel_channels", 80)
-
- self.cleaned_text = getattr(hparams, "cleaned_text", False)
-
- self.add_blank = hparams.add_blank
- self.min_text_len = getattr(hparams, "min_text_len", 1)
- self.max_text_len = getattr(hparams, "max_text_len", 300)
-
- random.seed(1234)
- random.shuffle(self.audiopaths_sid_text)
- self._filter()
-
- def _filter(self):
- """
- Filter text & store spec lengths
- """
- # Store spectrogram lengths for Bucketing
- # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
- # spec_length = wav_length // hop_length
-
- audiopaths_sid_text_new = []
- lengths = []
- skipped = 0
- logger.info("Init dataset...")
- for _id, spk, language, text, phones, tone, word2ph in tqdm(
- self.audiopaths_sid_text
- ):
- audiopath = f"{_id}"
- if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len:
- phones = phones.split(" ")
- tone = [int(i) for i in tone.split(" ")]
- word2ph = [int(i) for i in word2ph.split(" ")]
- audiopaths_sid_text_new.append(
- [audiopath, spk, language, text, phones, tone, word2ph]
- )
- lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))
- else:
- skipped += 1
- logger.info(
- "skipped: "
- + str(skipped)
- + ", total: "
- + str(len(self.audiopaths_sid_text))
- )
- self.audiopaths_sid_text = audiopaths_sid_text_new
- self.lengths = lengths
-
- def get_audio_text_speaker_pair(self, audiopath_sid_text):
- # separate filename, speaker_id and text
- audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text
-
- bert, ja_bert, phones, tone, language = self.get_text(
- text, word2ph, phones, tone, language, audiopath
- )
-
- spec, wav = self.get_audio(audiopath)
- sid = torch.LongTensor([int(self.spk_map[sid])])
- return (phones, spec, wav, sid, tone, language, bert, ja_bert)
-
- def get_audio(self, filename):
- audio, sampling_rate = load_wav_to_torch(filename)
- if sampling_rate != self.sampling_rate:
- raise ValueError(
- "{} {} SR doesn't match target {} SR".format(
- filename, sampling_rate, self.sampling_rate
- )
- )
- audio_norm = audio / self.max_wav_value
- audio_norm = audio_norm.unsqueeze(0)
- spec_filename = filename.replace(".wav", ".spec.pt")
- if self.use_mel_spec_posterior:
- spec_filename = spec_filename.replace(".spec.pt", ".mel.pt")
- try:
- spec = torch.load(spec_filename)
- except:
- if self.use_mel_spec_posterior:
- spec = mel_spectrogram_torch(
- audio_norm,
- self.filter_length,
- self.n_mel_channels,
- self.sampling_rate,
- self.hop_length,
- self.win_length,
- self.hparams.mel_fmin,
- self.hparams.mel_fmax,
- center=False,
- )
- else:
- spec = spectrogram_torch(
- audio_norm,
- self.filter_length,
- self.sampling_rate,
- self.hop_length,
- self.win_length,
- center=False,
- )
- spec = torch.squeeze(spec, 0)
- torch.save(spec, spec_filename)
- return spec, audio_norm
-
- def get_text(self, text, word2ph, phone, tone, language_str, wav_path):
- phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)
- if self.add_blank:
- phone = commons.intersperse(phone, 0)
- tone = commons.intersperse(tone, 0)
- language = commons.intersperse(language, 0)
- for i in range(len(word2ph)):
- word2ph[i] = word2ph[i] * 2
- word2ph[0] += 1
- bert_path = wav_path.replace(".wav", ".bert.pt")
- try:
- bert = torch.load(bert_path)
- assert bert.shape[-1] == len(phone)
- except:
- bert = get_bert(text, word2ph, language_str)
- torch.save(bert, bert_path)
- assert bert.shape[-1] == len(phone), phone
-
- if language_str == "ZH":
- bert = bert
- ja_bert = torch.zeros(768, len(phone))
- elif language_str == "JP":
- ja_bert = bert
- bert = torch.zeros(1024, len(phone))
- else:
- bert = torch.zeros(1024, len(phone))
- ja_bert = torch.zeros(768, len(phone))
- assert bert.shape[-1] == len(phone), (
- bert.shape,
- len(phone),
- sum(word2ph),
- p1,
- p2,
- t1,
- t2,
- pold,
- pold2,
- word2ph,
- text,
- w2pho,
- )
- phone = torch.LongTensor(phone)
- tone = torch.LongTensor(tone)
- language = torch.LongTensor(language)
- return bert, ja_bert, phone, tone, language
-
- def get_sid(self, sid):
- sid = torch.LongTensor([int(sid)])
- return sid
-
- def __getitem__(self, index):
- return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])
-
- def __len__(self):
- return len(self.audiopaths_sid_text)
-
-
-class TextAudioSpeakerCollate:
- """Zero-pads model inputs and targets"""
-
- def __init__(self, return_ids=False):
- self.return_ids = return_ids
-
- def __call__(self, batch):
- """Collate's training batch from normalized text, audio and speaker identities
- PARAMS
- ------
- batch: [text_normalized, spec_normalized, wav_normalized, sid]
- """
- # Right zero-pad all one-hot text sequences to max input length
- _, ids_sorted_decreasing = torch.sort(
- torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True
- )
-
- max_text_len = max([len(x[0]) for x in batch])
- max_spec_len = max([x[1].size(1) for x in batch])
- max_wav_len = max([x[2].size(1) for x in batch])
-
- text_lengths = torch.LongTensor(len(batch))
- spec_lengths = torch.LongTensor(len(batch))
- wav_lengths = torch.LongTensor(len(batch))
- sid = torch.LongTensor(len(batch))
-
- text_padded = torch.LongTensor(len(batch), max_text_len)
- tone_padded = torch.LongTensor(len(batch), max_text_len)
- language_padded = torch.LongTensor(len(batch), max_text_len)
- bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)
- ja_bert_padded = torch.FloatTensor(len(batch), 768, max_text_len)
-
- spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)
- wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
- text_padded.zero_()
- tone_padded.zero_()
- language_padded.zero_()
- spec_padded.zero_()
- wav_padded.zero_()
- bert_padded.zero_()
- ja_bert_padded.zero_()
- for i in range(len(ids_sorted_decreasing)):
- row = batch[ids_sorted_decreasing[i]]
-
- text = row[0]
- text_padded[i, : text.size(0)] = text
- text_lengths[i] = text.size(0)
-
- spec = row[1]
- spec_padded[i, :, : spec.size(1)] = spec
- spec_lengths[i] = spec.size(1)
-
- wav = row[2]
- wav_padded[i, :, : wav.size(1)] = wav
- wav_lengths[i] = wav.size(1)
-
- sid[i] = row[3]
-
- tone = row[4]
- tone_padded[i, : tone.size(0)] = tone
-
- language = row[5]
- language_padded[i, : language.size(0)] = language
-
- bert = row[6]
- bert_padded[i, :, : bert.size(1)] = bert
-
- ja_bert = row[7]
- ja_bert_padded[i, :, : ja_bert.size(1)] = ja_bert
-
- return (
- text_padded,
- text_lengths,
- spec_padded,
- spec_lengths,
- wav_padded,
- wav_lengths,
- sid,
- tone_padded,
- language_padded,
- bert_padded,
- ja_bert_padded,
- )
-
-
-class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):
- """
- Maintain similar input lengths in a batch.
- Length groups are specified by boundaries.
- Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.
-
- It removes samples which are not included in the boundaries.
- Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.
- """
-
- def __init__(
- self,
- dataset,
- batch_size,
- boundaries,
- num_replicas=None,
- rank=None,
- shuffle=True,
- ):
- super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
- self.lengths = dataset.lengths
- self.batch_size = batch_size
- self.boundaries = boundaries
-
- self.buckets, self.num_samples_per_bucket = self._create_buckets()
- self.total_size = sum(self.num_samples_per_bucket)
- self.num_samples = self.total_size // self.num_replicas
-
- def _create_buckets(self):
- buckets = [[] for _ in range(len(self.boundaries) - 1)]
- for i in range(len(self.lengths)):
- length = self.lengths[i]
- idx_bucket = self._bisect(length)
- if idx_bucket != -1:
- buckets[idx_bucket].append(i)
-
- try:
- for i in range(len(buckets) - 1, 0, -1):
- if len(buckets[i]) == 0:
- buckets.pop(i)
- self.boundaries.pop(i + 1)
- assert all(len(bucket) > 0 for bucket in buckets)
- # When one bucket is not traversed
- except Exception as e:
- print("Bucket warning ", e)
- for i in range(len(buckets) - 1, -1, -1):
- if len(buckets[i]) == 0:
- buckets.pop(i)
- self.boundaries.pop(i + 1)
-
- num_samples_per_bucket = []
- for i in range(len(buckets)):
- len_bucket = len(buckets[i])
- total_batch_size = self.num_replicas * self.batch_size
- rem = (
- total_batch_size - (len_bucket % total_batch_size)
- ) % total_batch_size
- num_samples_per_bucket.append(len_bucket + rem)
- return buckets, num_samples_per_bucket
-
- def __iter__(self):
- # deterministically shuffle based on epoch
- g = torch.Generator()
- g.manual_seed(self.epoch)
-
- indices = []
- if self.shuffle:
- for bucket in self.buckets:
- indices.append(torch.randperm(len(bucket), generator=g).tolist())
- else:
- for bucket in self.buckets:
- indices.append(list(range(len(bucket))))
-
- batches = []
- for i in range(len(self.buckets)):
- bucket = self.buckets[i]
- len_bucket = len(bucket)
- if len_bucket == 0:
- continue
- ids_bucket = indices[i]
- num_samples_bucket = self.num_samples_per_bucket[i]
-
- # add extra samples to make it evenly divisible
- rem = num_samples_bucket - len_bucket
- ids_bucket = (
- ids_bucket
- + ids_bucket * (rem // len_bucket)
- + ids_bucket[: (rem % len_bucket)]
- )
-
- # subsample
- ids_bucket = ids_bucket[self.rank :: self.num_replicas]
-
- # batching
- for j in range(len(ids_bucket) // self.batch_size):
- batch = [
- bucket[idx]
- for idx in ids_bucket[
- j * self.batch_size : (j + 1) * self.batch_size
- ]
- ]
- batches.append(batch)
-
- if self.shuffle:
- batch_ids = torch.randperm(len(batches), generator=g).tolist()
- batches = [batches[i] for i in batch_ids]
- self.batches = batches
-
- assert len(self.batches) * self.batch_size == self.num_samples
- return iter(self.batches)
-
- def _bisect(self, x, lo=0, hi=None):
- if hi is None:
- hi = len(self.boundaries) - 1
-
- if hi > lo:
- mid = (hi + lo) // 2
- if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:
- return mid
- elif x <= self.boundaries[mid]:
- return self._bisect(x, lo, mid)
- else:
- return self._bisect(x, mid + 1, hi)
- else:
- return -1
-
- def __len__(self):
- return self.num_samples // self.batch_size
diff --git a/spaces/AliHaider0343/implicit-and-explicit-aspects-Extraction-in-Restaurant-Reviews-Domain/README.md b/spaces/AliHaider0343/implicit-and-explicit-aspects-Extraction-in-Restaurant-Reviews-Domain/README.md
deleted file mode 100644
index 716dc7f58d2a5a31d2da2879aa599ebf438b82cd..0000000000000000000000000000000000000000
--- a/spaces/AliHaider0343/implicit-and-explicit-aspects-Extraction-in-Restaurant-Reviews-Domain/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Implicit And Explicit Aspects Extraction In Restaurant Reviews Domain
-emoji: 🌖
-colorFrom: gray
-colorTo: red
-sdk: streamlit
-sdk_version: 1.21.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git "a/spaces/Ameaou/academic-chatgpt3.1/crazy_functions/\350\260\267\346\255\214\346\243\200\347\264\242\345\260\217\345\212\251\346\211\213.py" "b/spaces/Ameaou/academic-chatgpt3.1/crazy_functions/\350\260\267\346\255\214\346\243\200\347\264\242\345\260\217\345\212\251\346\211\213.py"
deleted file mode 100644
index 94ef256327f6740cdaddc6f5ecea5852a9210163..0000000000000000000000000000000000000000
--- "a/spaces/Ameaou/academic-chatgpt3.1/crazy_functions/\350\260\267\346\255\214\346\243\200\347\264\242\345\260\217\345\212\251\346\211\213.py"
+++ /dev/null
@@ -1,106 +0,0 @@
-from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
-from toolbox import CatchException, report_execption, write_results_to_file
-from toolbox import update_ui
-
-def get_meta_information(url, chatbot, history):
- import requests
- import arxiv
- import difflib
- from bs4 import BeautifulSoup
- from toolbox import get_conf
- proxies, = get_conf('proxies')
- headers = {
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36',
- }
- # 发送 GET 请求
- response = requests.get(url, proxies=proxies, headers=headers)
-
- # 解析网页内容
- soup = BeautifulSoup(response.text, "html.parser")
-
- def string_similar(s1, s2):
- return difflib.SequenceMatcher(None, s1, s2).quick_ratio()
-
- profile = []
- # 获取所有文章的标题和作者
- for result in soup.select(".gs_ri"):
- title = result.a.text.replace('\n', ' ').replace(' ', ' ')
- author = result.select_one(".gs_a").text
- try:
- citation = result.select_one(".gs_fl > a[href*='cites']").text # 引用次数是链接中的文本,直接取出来
- except:
- citation = 'cited by 0'
- abstract = result.select_one(".gs_rs").text.strip() # 摘要在 .gs_rs 中的文本,需要清除首尾空格
- search = arxiv.Search(
- query = title,
- max_results = 1,
- sort_by = arxiv.SortCriterion.Relevance,
- )
- paper = next(search.results())
- if string_similar(title, paper.title) > 0.90: # same paper
- abstract = paper.summary.replace('\n', ' ')
- is_paper_in_arxiv = True
- else: # different paper
- abstract = abstract
- is_paper_in_arxiv = False
- paper = next(search.results())
- print(title)
- print(author)
- print(citation)
- profile.append({
- 'title':title,
- 'author':author,
- 'citation':citation,
- 'abstract':abstract,
- 'is_paper_in_arxiv':is_paper_in_arxiv,
- })
-
- chatbot[-1] = [chatbot[-1][0], title + f'\n\n是否在arxiv中(不在arxiv中无法获取完整摘要):{is_paper_in_arxiv}\n\n' + abstract]
- yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
- return profile
-
-@CatchException
-def 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
- # 基本信息:功能、贡献者
- chatbot.append([
- "函数插件功能?",
- "分析用户提供的谷歌学术(google scholar)搜索页面中,出现的所有文章: binary-husky,插件初始化中..."])
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-
- # 尝试导入依赖,如果缺少依赖,则给出安装建议
- try:
- import arxiv
- from bs4 import BeautifulSoup
- except:
- report_execption(chatbot, history,
- a = f"解析项目: {txt}",
- b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade beautifulsoup4 arxiv```。")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
-
- # 清空历史,以免输入溢出
- history = []
-
- meta_paper_info_list = yield from get_meta_information(txt, chatbot, history)
-
- if len(meta_paper_info_list[:10]) > 0:
- i_say = "下面是一些学术文献的数据,请从中提取出以下内容。" + \
- "1、英文题目;2、中文题目翻译;3、作者;4、arxiv公开(is_paper_in_arxiv);4、引用数量(cite);5、中文摘要翻译。" + \
- f"以下是信息源:{str(meta_paper_info_list[:10])}"
-
- inputs_show_user = f"请分析此页面中出现的所有文章:{txt}"
- gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
- inputs=i_say, inputs_show_user=inputs_show_user,
- llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
- sys_prompt="你是一个学术翻译,请从数据中提取信息。你必须使用Markdown格式。你必须逐个文献进行处理。"
- )
-
- history.extend([ "第一批", gpt_say ])
- meta_paper_info_list = meta_paper_info_list[10:]
-
- chatbot.append(["状态?", "已经全部完成"])
- msg = '正常'
- yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
- res = write_results_to_file(history)
- chatbot.append(("完成了吗?", res));
- yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
diff --git a/spaces/Amon1/ChatGPTForAcadamic/crazy_functions/test_project/latex/attention/introduction.tex b/spaces/Amon1/ChatGPTForAcadamic/crazy_functions/test_project/latex/attention/introduction.tex
deleted file mode 100644
index 1baa8915f4cf7aec2520894a87470fc9436d954b..0000000000000000000000000000000000000000
--- a/spaces/Amon1/ChatGPTForAcadamic/crazy_functions/test_project/latex/attention/introduction.tex
+++ /dev/null
@@ -1,18 +0,0 @@
-Recurrent neural networks, long short-term memory \citep{hochreiter1997} and gated recurrent \citep{gruEval14} neural networks in particular, have been firmly established as state of the art approaches in sequence modeling and transduction problems such as language modeling and machine translation \citep{sutskever14, bahdanau2014neural, cho2014learning}. Numerous efforts have since continued to push the boundaries of recurrent language models and encoder-decoder architectures \citep{wu2016google,luong2015effective,jozefowicz2016exploring}.
-
-Recurrent models typically factor computation along the symbol positions of the input and output sequences. Aligning the positions to steps in computation time, they generate a sequence of hidden states $h_t$, as a function of the previous hidden state $h_{t-1}$ and the input for position $t$. This inherently sequential nature precludes parallelization within training examples, which becomes critical at longer sequence lengths, as memory constraints limit batching across examples.
-%\marginpar{not sure if the memory constraints are understandable here}
-Recent work has achieved significant improvements in computational efficiency through factorization tricks \citep{Kuchaiev2017Factorization} and conditional computation \citep{shazeer2017outrageously}, while also improving model performance in case of the latter. The fundamental constraint of sequential computation, however, remains.
-
-%\marginpar{@all: there is work on analyzing what attention really does in seq2seq models, couldn't find it right away}
-
-Attention mechanisms have become an integral part of compelling sequence modeling and transduction models in various tasks, allowing modeling of dependencies without regard to their distance in the input or output sequences \citep{bahdanau2014neural, structuredAttentionNetworks}. In all but a few cases \citep{decomposableAttnModel}, however, such attention mechanisms are used in conjunction with a recurrent network.
-
-%\marginpar{not sure if "cross-positional communication" is understandable without explanation}
-%\marginpar{insert exact training times and stats for the model that reaches sota earliest, maybe even a single GPU model?}
-
-In this work we propose the Transformer, a model architecture eschewing recurrence and instead relying entirely on an attention mechanism to draw global dependencies between input and output. The Transformer allows for significantly more parallelization and can reach a new state of the art in translation quality after being trained for as little as twelve hours on eight P100 GPUs.
-%\marginpar{you removed the constant number of repetitions part. I wrote it because I wanted to make it clear that the model does not only perform attention once, while it's also not recurrent. I thought that might be important to get across early.}
-
-% Just a standard paragraph with citations, rewrite.
-%After the seminal papers of \citep{sutskever14}, \citep{bahdanau2014neural}, and \citep{cho2014learning}, recurrent models have become the dominant solution for both sequence modeling and sequence-to-sequence transduction. Many efforts such as \citep{wu2016google,luong2015effective,jozefowicz2016exploring} have pushed the boundaries of machine translation and language modeling with recurrent sequence models. Recent effort \citep{shazeer2017outrageously} has combined the power of conditional computation with sequence models to train very large models for machine translation, pushing SOTA at lower computational cost. Recurrent models compute a vector of hidden states $h_t$, for each time step $t$ of computation. $h_t$ is a function of both the input at time $t$ and the previous hidden state $h_t$. This dependence on the previous hidden state encumbers recurrnet models to process multiple inputs at once, and their time complexity is a linear function of the length of the input and output, both during training and inference. [What I want to say here is that although this is fine during decoding, at training time, we are given both input and output and this linear nature does not allow the RNN to process all inputs and outputs simultaneously and haven't been used on datasets that are the of the scale of the web. What's the largest dataset we have ? . Talk about Nividia and possibly other's effors to speed up things, and possibly other efforts that alleviate this, but are still limited by it's comptuational nature]. Rest of the intro: What if you could construct the state based on the actual inputs and outputs, then you could construct them all at once. This has been the foundation of many promising recent efforts, bytenet,facenet (Also talk about quasi rnn here). Now we talk about attention!! Along with cell architectures such as long short-term meory (LSTM) \citep{hochreiter1997}, and gated recurrent units (GRUs) \citep{cho2014learning}, attention has emerged as an essential ingredient in successful sequence models, in particular for machine translation. In recent years, many, if not all, state-of-the-art (SOTA) results in machine translation have been achieved with attention-based sequence models \citep{wu2016google,luong2015effective,jozefowicz2016exploring}. Talk about the neon work on how it played with attention to do self attention! Then talk about what we do.
\ No newline at end of file
diff --git a/spaces/Amrrs/DragGan-Inversion/stylegan_human/generate.py b/spaces/Amrrs/DragGan-Inversion/stylegan_human/generate.py
deleted file mode 100644
index a8b7d55e6d190c193e427bd8d623c583b2dcdeda..0000000000000000000000000000000000000000
--- a/spaces/Amrrs/DragGan-Inversion/stylegan_human/generate.py
+++ /dev/null
@@ -1,125 +0,0 @@
-# Copyright (c) SenseTime Research. All rights reserved.
-
-# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
-# This work is made available under the Nvidia Source Code License-NC.
-# To view a copy of this license, visit
-# https://nvlabs.github.io/stylegan2/license.html
-
-
-## this script is for generating images from pre-trained network based on StyleGAN1 (TensorFlow) and StyleGAN2-ada (PyTorch) ##
-
-import os
-import click
-import dnnlib
-import numpy as np
-import PIL.Image
-import legacy
-from typing import List, Optional
-
-"""
-Generate images using pretrained network pickle.
-Examples:
-
-\b
-# Generate human full-body images without truncation
-python generate.py --outdir=outputs/generate/stylegan_human_v2_1024 --trunc=1 --seeds=1,3,5,7 \\
- --network=pretrained_models/stylegan_human_v2_1024.pkl --version 2
-
-\b
-# Generate human full-body images with truncation
-python generate.py --outdir=outputs/generate/stylegan_human_v2_1024 --trunc=0.8 --seeds=0-100\\
- --network=pretrained_models/stylegan_human_v2_1024.pkl --version 2
-
-# \b
-# Generate human full-body images using stylegan V1
-# python generate.py --outdir=outputs/generate/stylegan_human_v1_1024 \\
-# --network=pretrained_models/stylegan_human_v1_1024.pkl --version 1
-"""
-
-
-@click.command()
-@click.pass_context
-@click.option('--network', 'network_pkl', help='Network pickle filename', required=True)
-@click.option('--seeds', type=legacy.num_range, help='List of random seeds')
-@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True)
-@click.option('--noise-mode', help='Noise mode', type=click.Choice(['const', 'random', 'none']), default='const', show_default=True)
-@click.option('--outdir', help='Where to save the output images', default='outputs/generate/', type=str, required=True, metavar='DIR')
-@click.option('--version', help="stylegan version, 1, 2 or 3", type=int, default=2)
-def generate_images(
- ctx: click.Context,
- network_pkl: str,
- seeds: Optional[List[int]],
- truncation_psi: float,
- noise_mode: str,
- outdir: str,
- version: int
-):
-
- print('Loading networks from "%s"...' % network_pkl)
- if version == 1:
- import dnnlib.tflib as tflib
- tflib.init_tf()
- G, D, Gs = legacy.load_pkl(network_pkl)
-
- else:
- import torch
- device = torch.device('cuda')
- with dnnlib.util.open_url(network_pkl) as f:
- G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore
- os.makedirs(outdir, exist_ok=True)
-
- if seeds is None:
- ctx.fail('--seeds option is required.')
-
- # Generate images.
- target_z = np.array([])
- target_w = np.array([])
- latent_out = outdir.replace('/images/', '')
- for seed_idx, seed in enumerate(seeds):
- if seed % 5000 == 0:
- print('Generating image for seed %d (%d/%d) ...' %
- (seed, seed_idx, len(seeds)))
-
- if version == 1: # stylegan v1
- z = np.random.RandomState(seed).randn(1, Gs.input_shape[1])
- # Generate image.
- fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
- if noise_mode == 'const':
- randomize_noise = False
- else:
- randomize_noise = True
- images = Gs.run(z, None, truncation_psi=truncation_psi,
- randomize_noise=randomize_noise, output_transform=fmt)
- PIL.Image.fromarray(images[0], 'RGB').save(
- f'{outdir}/seed{seed:04d}.png')
-
- else: # stylegan v2/v3
- label = torch.zeros([1, G.c_dim], device=device)
- z = torch.from_numpy(np.random.RandomState(
- seed).randn(1, G.z_dim)).to(device)
- if target_z.size == 0:
- target_z = z.cpu()
- else:
- target_z = np.append(target_z, z.cpu(), axis=0)
-
- w = G.mapping(z, label, truncation_psi=truncation_psi)
- img = G.synthesis(w, noise_mode=noise_mode, force_fp32=True)
- if target_w.size == 0:
- target_w = w.cpu()
- else:
- target_w = np.append(target_w, w.cpu(), axis=0)
-
- img = (img.permute(0, 2, 3, 1) * 127.5 +
- 128).clamp(0, 255).to(torch.uint8)
- PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save(
- f'{outdir}/seed{seed:04d}.png')
- # print(target_z)
- # print(target_z.shape,target_w.shape)
-
-
-# ----------------------------------------------------------------------------
-
-if __name__ == "__main__":
- generate_images()
-
-# ----------------------------------------------------------------------------
diff --git a/spaces/Andres99/Tune-A-Video-Training-UI/uploader.py b/spaces/Andres99/Tune-A-Video-Training-UI/uploader.py
deleted file mode 100644
index d9e06ec02127db34016d3d7b550e88f820a737fe..0000000000000000000000000000000000000000
--- a/spaces/Andres99/Tune-A-Video-Training-UI/uploader.py
+++ /dev/null
@@ -1,44 +0,0 @@
-from __future__ import annotations
-
-from huggingface_hub import HfApi
-
-
-class Uploader:
- def __init__(self, hf_token: str | None):
- self.hf_token = hf_token
-
- def upload(self,
- folder_path: str,
- repo_name: str,
- organization: str = '',
- repo_type: str = 'model',
- private: bool = True,
- delete_existing_repo: bool = False,
- input_token: str | None = None) -> str:
-
- api = HfApi(token=self.hf_token if self.hf_token else input_token)
-
- if not folder_path:
- raise ValueError
- if not repo_name:
- raise ValueError
- if not organization:
- organization = api.whoami()['name']
-
- repo_id = f'{organization}/{repo_name}'
- if delete_existing_repo:
- try:
- api.delete_repo(repo_id, repo_type=repo_type)
- except Exception:
- pass
- try:
- api.create_repo(repo_id, repo_type=repo_type, private=private)
- api.upload_folder(repo_id=repo_id,
- folder_path=folder_path,
- path_in_repo='.',
- repo_type=repo_type)
- url = f'https://huggingface.co/{repo_id}'
- message = f'Your model was successfully uploaded to {url}.'
- except Exception as e:
- message = str(e)
- return message
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/stable_diffusion/stable_diffusion_xl.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/stable_diffusion/stable_diffusion_xl.md
deleted file mode 100644
index 8486641da2c40ac9f68e1a4e50b5adbb0e96c4fa..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/stable_diffusion/stable_diffusion_xl.md
+++ /dev/null
@@ -1,427 +0,0 @@
-
-
-# Stable diffusion XL
-
-Stable Diffusion XL was proposed in [SDXL: Improving Latent Diffusion Models for High-Resolution Image Synthesis](https://arxiv.org/abs/2307.01952) by Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, Robin Rombach
-
-The abstract of the paper is the following:
-
-*We present SDXL, a latent diffusion model for text-to-image synthesis. Compared to previous versions of Stable Diffusion, SDXL leverages a three times larger UNet backbone: The increase of model parameters is mainly due to more attention blocks and a larger cross-attention context as SDXL uses a second text encoder. We design multiple novel conditioning schemes and train SDXL on multiple aspect ratios. We also introduce a refinement model which is used to improve the visual fidelity of samples generated by SDXL using a post-hoc image-to-image technique. We demonstrate that SDXL shows drastically improved performance compared the previous versions of Stable Diffusion and achieves results competitive with those of black-box state-of-the-art image generators.*
-
-## Tips
-
-- Stable Diffusion XL works especially well with images between 768 and 1024.
-- Stable Diffusion XL can pass a different prompt for each of the text encoders it was trained on as shown below. We can even pass different parts of the same prompt to the text encoders.
-- Stable Diffusion XL output image can be improved by making use of a refiner as shown below.
-
-### Available checkpoints:
-
-- *Text-to-Image (1024x1024 resolution)*: [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) with [`StableDiffusionXLPipeline`]
-- *Image-to-Image / Refiner (1024x1024 resolution)*: [stabilityai/stable-diffusion-xl-refiner-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0) with [`StableDiffusionXLImg2ImgPipeline`]
-
-## Usage Example
-
-Before using SDXL make sure to have `transformers`, `accelerate`, `safetensors` and `invisible_watermark` installed.
-You can install the libraries as follows:
-
-```
-pip install transformers
-pip install accelerate
-pip install safetensors
-```
-
-### Watermarker
-
-We recommend to add an invisible watermark to images generating by Stable Diffusion XL, this can help with identifying if an image is machine-synthesised for downstream applications. To do so, please install
-the [invisible-watermark library](https://pypi.org/project/invisible-watermark/) via:
-
-```
-pip install invisible-watermark>=0.2.0
-```
-
-If the `invisible-watermark` library is installed the watermarker will be used **by default**.
-
-If you have other provisions for generating or deploying images safely, you can disable the watermarker as follows:
-
-```py
-pipe = StableDiffusionXLPipeline.from_pretrained(..., add_watermarker=False)
-```
-
-### Text-to-Image
-
-You can use SDXL as follows for *text-to-image*:
-
-```py
-from diffusers import StableDiffusionXLPipeline
-import torch
-
-pipe = StableDiffusionXLPipeline.from_pretrained(
- "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
-)
-pipe.to("cuda")
-
-prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
-image = pipe(prompt=prompt).images[0]
-```
-
-### Image-to-image
-
-You can use SDXL as follows for *image-to-image*:
-
-```py
-import torch
-from diffusers import StableDiffusionXLImg2ImgPipeline
-from diffusers.utils import load_image
-
-pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained(
- "stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
-)
-pipe = pipe.to("cuda")
-url = "https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/aa_xl/000000009.png"
-
-init_image = load_image(url).convert("RGB")
-prompt = "a photo of an astronaut riding a horse on mars"
-image = pipe(prompt, image=init_image).images[0]
-```
-
-### Inpainting
-
-You can use SDXL as follows for *inpainting*
-
-```py
-import torch
-from diffusers import StableDiffusionXLInpaintPipeline
-from diffusers.utils import load_image
-
-pipe = StableDiffusionXLInpaintPipeline.from_pretrained(
- "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
-)
-pipe.to("cuda")
-
-img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
-mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
-
-init_image = load_image(img_url).convert("RGB")
-mask_image = load_image(mask_url).convert("RGB")
-
-prompt = "A majestic tiger sitting on a bench"
-image = pipe(prompt=prompt, image=init_image, mask_image=mask_image, num_inference_steps=50, strength=0.80).images[0]
-```
-
-### Refining the image output
-
-In addition to the [base model checkpoint](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0),
-StableDiffusion-XL also includes a [refiner checkpoint](huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0)
-that is specialized in denoising low-noise stage images to generate images of improved high-frequency quality.
-This refiner checkpoint can be used as a "second-step" pipeline after having run the base checkpoint to improve
-image quality.
-
-When using the refiner, one can easily
-- 1.) employ the base model and refiner as an *Ensemble of Expert Denoisers* as first proposed in [eDiff-I](https://research.nvidia.com/labs/dir/eDiff-I/) or
-- 2.) simply run the refiner in [SDEdit](https://arxiv.org/abs/2108.01073) fashion after the base model.
-
-**Note**: The idea of using SD-XL base & refiner as an ensemble of experts was first brought forward by
-a couple community contributors which also helped shape the following `diffusers` implementation, namely:
-- [SytanSD](https://github.com/SytanSD)
-- [bghira](https://github.com/bghira)
-- [Birch-san](https://github.com/Birch-san)
-- [AmericanPresidentJimmyCarter](https://github.com/AmericanPresidentJimmyCarter)
-
-#### 1.) Ensemble of Expert Denoisers
-
-When using the base and refiner model as an ensemble of expert of denoisers, the base model should serve as the
-expert for the high-noise diffusion stage and the refiner serves as the expert for the low-noise diffusion stage.
-
-The advantage of 1.) over 2.) is that it requires less overall denoising steps and therefore should be significantly
-faster. The drawback is that one cannot really inspect the output of the base model; it will still be heavily denoised.
-
-To use the base model and refiner as an ensemble of expert denoisers, make sure to define the span
-of timesteps which should be run through the high-noise denoising stage (*i.e.* the base model) and the low-noise
-denoising stage (*i.e.* the refiner model) respectively. We can set the intervals using the [`denoising_end`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/stable_diffusion_xl#diffusers.StableDiffusionXLPipeline.__call__.denoising_end) of the base model
-and [`denoising_start`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/stable_diffusion_xl#diffusers.StableDiffusionXLImg2ImgPipeline.__call__.denoising_start) of the refiner model.
-
-For both `denoising_end` and `denoising_start` a float value between 0 and 1 should be passed.
-When passed, the end and start of denoising will be defined by proportions of discrete timesteps as
-defined by the model schedule.
-Note that this will override `strength` if it is also declared, since the number of denoising steps
-is determined by the discrete timesteps the model was trained on and the declared fractional cutoff.
-
-Let's look at an example.
-First, we import the two pipelines. Since the text encoders and variational autoencoder are the same
-you don't have to load those again for the refiner.
-
-```py
-from diffusers import DiffusionPipeline
-import torch
-
-base = DiffusionPipeline.from_pretrained(
- "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
-)
-base.to("cuda")
-
-refiner = DiffusionPipeline.from_pretrained(
- "stabilityai/stable-diffusion-xl-refiner-1.0",
- text_encoder_2=base.text_encoder_2,
- vae=base.vae,
- torch_dtype=torch.float16,
- use_safetensors=True,
- variant="fp16",
-)
-refiner.to("cuda")
-```
-
-Now we define the number of inference steps and the point at which the model shall be run through the
-high-noise denoising stage (*i.e.* the base model).
-
-```py
-n_steps = 40
-high_noise_frac = 0.8
-```
-
-Stable Diffusion XL base is trained on timesteps 0-999 and Stable Diffusion XL refiner is finetuned
-from the base model on low noise timesteps 0-199 inclusive, so we use the base model for the first
-800 timesteps (high noise) and the refiner for the last 200 timesteps (low noise). Hence, `high_noise_frac`
-is set to 0.8, so that all steps 200-999 (the first 80% of denoising timesteps) are performed by the
-base model and steps 0-199 (the last 20% of denoising timesteps) are performed by the refiner model.
-
-Remember, the denoising process starts at **high value** (high noise) timesteps and ends at
-**low value** (low noise) timesteps.
-
-Let's run the two pipelines now. Make sure to set `denoising_end` and
-`denoising_start` to the same values and keep `num_inference_steps` constant. Also remember that
-the output of the base model should be in latent space:
-
-```py
-prompt = "A majestic lion jumping from a big stone at night"
-
-image = base(
- prompt=prompt,
- num_inference_steps=n_steps,
- denoising_end=high_noise_frac,
- output_type="latent",
-).images
-image = refiner(
- prompt=prompt,
- num_inference_steps=n_steps,
- denoising_start=high_noise_frac,
- image=image,
-).images[0]
-```
-
-Let's have a look at the images
-
-| Original Image | Ensemble of Denoisers Experts |
-|---|---|
-|  | 
-
-If we would have just run the base model on the same 40 steps, the image would have been arguably less detailed (e.g. the lion eyes and nose):
-
-
-
-The ensemble-of-experts method works well on all available schedulers!
-
-
-
-#### 2.) Refining the image output from fully denoised base image
-
-In standard [`StableDiffusionImg2ImgPipeline`]-fashion, the fully-denoised image generated of the base model
-can be further improved using the [refiner checkpoint](huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0).
-
-For this, you simply run the refiner as a normal image-to-image pipeline after the "base" text-to-image
-pipeline. You can leave the outputs of the base model in latent space.
-
-```py
-from diffusers import DiffusionPipeline
-import torch
-
-pipe = DiffusionPipeline.from_pretrained(
- "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
-)
-pipe.to("cuda")
-
-refiner = DiffusionPipeline.from_pretrained(
- "stabilityai/stable-diffusion-xl-refiner-1.0",
- text_encoder_2=pipe.text_encoder_2,
- vae=pipe.vae,
- torch_dtype=torch.float16,
- use_safetensors=True,
- variant="fp16",
-)
-refiner.to("cuda")
-
-prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
-
-image = pipe(prompt=prompt, output_type="latent" if use_refiner else "pil").images[0]
-image = refiner(prompt=prompt, image=image[None, :]).images[0]
-```
-
-| Original Image | Refined Image |
-|---|---|
-|  |  |
-
-
-
-The refiner can also very well be used in an in-painting setting. To do so just make
- sure you use the [`StableDiffusionXLInpaintPipeline`] classes as shown below
-
-
-
-To use the refiner for inpainting in the Ensemble of Expert Denoisers setting you can do the following:
-
-```py
-from diffusers import StableDiffusionXLInpaintPipeline
-from diffusers.utils import load_image
-
-pipe = StableDiffusionXLInpaintPipeline.from_pretrained(
- "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
-)
-pipe.to("cuda")
-
-refiner = StableDiffusionXLInpaintPipeline.from_pretrained(
- "stabilityai/stable-diffusion-xl-refiner-1.0",
- text_encoder_2=pipe.text_encoder_2,
- vae=pipe.vae,
- torch_dtype=torch.float16,
- use_safetensors=True,
- variant="fp16",
-)
-refiner.to("cuda")
-
-img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
-mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
-
-init_image = load_image(img_url).convert("RGB")
-mask_image = load_image(mask_url).convert("RGB")
-
-prompt = "A majestic tiger sitting on a bench"
-num_inference_steps = 75
-high_noise_frac = 0.7
-
-image = pipe(
- prompt=prompt,
- image=init_image,
- mask_image=mask_image,
- num_inference_steps=num_inference_steps,
- denoising_start=high_noise_frac,
- output_type="latent",
-).images
-image = refiner(
- prompt=prompt,
- image=image,
- mask_image=mask_image,
- num_inference_steps=num_inference_steps,
- denoising_start=high_noise_frac,
-).images[0]
-```
-
-To use the refiner for inpainting in the standard SDE-style setting, simply remove `denoising_end` and `denoising_start` and choose a smaller
-number of inference steps for the refiner.
-
-### Loading single file checkpoints / original file format
-
-By making use of [`~diffusers.loaders.FromSingleFileMixin.from_single_file`] you can also load the
-original file format into `diffusers`:
-
-```py
-from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline
-import torch
-
-pipe = StableDiffusionXLPipeline.from_single_file(
- "./sd_xl_base_1.0.safetensors", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
-)
-pipe.to("cuda")
-
-refiner = StableDiffusionXLImg2ImgPipeline.from_single_file(
- "./sd_xl_refiner_1.0.safetensors", torch_dtype=torch.float16, use_safetensors=True, variant="fp16"
-)
-refiner.to("cuda")
-```
-
-### Memory optimization via model offloading
-
-If you are seeing out-of-memory errors, we recommend making use of [`StableDiffusionXLPipeline.enable_model_cpu_offload`].
-
-```diff
-- pipe.to("cuda")
-+ pipe.enable_model_cpu_offload()
-```
-
-and
-
-```diff
-- refiner.to("cuda")
-+ refiner.enable_model_cpu_offload()
-```
-
-### Speed-up inference with `torch.compile`
-
-You can speed up inference by making use of `torch.compile`. This should give you **ca.** 20% speed-up.
-
-```diff
-+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
-+ refiner.unet = torch.compile(refiner.unet, mode="reduce-overhead", fullgraph=True)
-```
-
-### Running with `torch < 2.0`
-
-**Note** that if you want to run Stable Diffusion XL with `torch` < 2.0, please make sure to enable xformers
-attention:
-
-```
-pip install xformers
-```
-
-```diff
-+pipe.enable_xformers_memory_efficient_attention()
-+refiner.enable_xformers_memory_efficient_attention()
-```
-
-## StableDiffusionXLPipeline
-
-[[autodoc]] StableDiffusionXLPipeline
- - all
- - __call__
-
-## StableDiffusionXLImg2ImgPipeline
-
-[[autodoc]] StableDiffusionXLImg2ImgPipeline
- - all
- - __call__
-
-## StableDiffusionXLInpaintPipeline
-
-[[autodoc]] StableDiffusionXLInpaintPipeline
- - all
- - __call__
-
-### Passing different prompts to each text-encoder
-
-Stable Diffusion XL was trained on two text encoders. The default behavior is to pass the same prompt to each. But it is possible to pass a different prompt for each text-encoder, as [some users](https://github.com/huggingface/diffusers/issues/4004#issuecomment-1627764201) noted that it can boost quality.
-To do so, you can pass `prompt_2` and `negative_prompt_2` in addition to `prompt` and `negative_prompt`. By doing that, you will pass the original prompts and negative prompts (as in `prompt` and `negative_prompt`) to `text_encoder` (in official SDXL 0.9/1.0 that is [OpenAI CLIP-ViT/L-14](https://huggingface.co/openai/clip-vit-large-patch14)),
-and `prompt_2` and `negative_prompt_2` to `text_encoder_2` (in official SDXL 0.9/1.0 that is [OpenCLIP-ViT/bigG-14](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)).
-
-```py
-from diffusers import StableDiffusionXLPipeline
-import torch
-
-pipe = StableDiffusionXLPipeline.from_pretrained(
- "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
-)
-pipe.to("cuda")
-
-# prompt will be passed to OAI CLIP-ViT/L-14
-prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
-# prompt_2 will be passed to OpenCLIP-ViT/bigG-14
-prompt_2 = "monet painting"
-image = pipe(prompt=prompt, prompt_2=prompt_2).images[0]
-```
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax_inpaint.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax_inpaint.py
deleted file mode 100644
index 432619a79ddd32d288893e3021a14ab6893b370a..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax_inpaint.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# coding=utf-8
-# Copyright 2023 HuggingFace Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import gc
-import unittest
-
-from diffusers import FlaxStableDiffusionInpaintPipeline
-from diffusers.utils import is_flax_available, load_image, slow
-from diffusers.utils.testing_utils import require_flax
-
-
-if is_flax_available():
- import jax
- import jax.numpy as jnp
- from flax.jax_utils import replicate
- from flax.training.common_utils import shard
-
-
-@slow
-@require_flax
-class FlaxStableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase):
- def tearDown(self):
- # clean up the VRAM after each test
- super().tearDown()
- gc.collect()
-
- def test_stable_diffusion_inpaint_pipeline(self):
- init_image = load_image(
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
- "/sd2-inpaint/init_image.png"
- )
- mask_image = load_image(
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png"
- )
-
- model_id = "xvjiarui/stable-diffusion-2-inpainting"
- pipeline, params = FlaxStableDiffusionInpaintPipeline.from_pretrained(model_id, safety_checker=None)
-
- prompt = "Face of a yellow cat, high resolution, sitting on a park bench"
-
- prng_seed = jax.random.PRNGKey(0)
- num_inference_steps = 50
-
- num_samples = jax.device_count()
- prompt = num_samples * [prompt]
- init_image = num_samples * [init_image]
- mask_image = num_samples * [mask_image]
- prompt_ids, processed_masked_images, processed_masks = pipeline.prepare_inputs(prompt, init_image, mask_image)
-
- # shard inputs and rng
- params = replicate(params)
- prng_seed = jax.random.split(prng_seed, jax.device_count())
- prompt_ids = shard(prompt_ids)
- processed_masked_images = shard(processed_masked_images)
- processed_masks = shard(processed_masks)
-
- output = pipeline(
- prompt_ids, processed_masks, processed_masked_images, params, prng_seed, num_inference_steps, jit=True
- )
-
- images = output.images.reshape(num_samples, 512, 512, 3)
-
- image_slice = images[0, 253:256, 253:256, -1]
-
- output_slice = jnp.asarray(jax.device_get(image_slice.flatten()))
- expected_slice = jnp.array(
- [0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084]
- )
- print(f"output_slice: {output_slice}")
- assert jnp.abs(output_slice - expected_slice).max() < 1e-2
diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/core/anchor/__init__.py b/spaces/Andy1621/uniformer_image_detection/mmdet/core/anchor/__init__.py
deleted file mode 100644
index 5838ff3eefb03bc83928fa13848cea9ff8647827..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/mmdet/core/anchor/__init__.py
+++ /dev/null
@@ -1,11 +0,0 @@
-from .anchor_generator import (AnchorGenerator, LegacyAnchorGenerator,
- YOLOAnchorGenerator)
-from .builder import ANCHOR_GENERATORS, build_anchor_generator
-from .point_generator import PointGenerator
-from .utils import anchor_inside_flags, calc_region, images_to_levels
-
-__all__ = [
- 'AnchorGenerator', 'LegacyAnchorGenerator', 'anchor_inside_flags',
- 'PointGenerator', 'images_to_levels', 'calc_region',
- 'build_anchor_generator', 'ANCHOR_GENERATORS', 'YOLOAnchorGenerator'
-]
diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/ann/ann_r50-d8_512x1024_40k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/ann/ann_r50-d8_512x1024_40k_cityscapes.py
deleted file mode 100644
index 00b2594ba8a1c9edc90cca7a6d7c3334fa209edc..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_segmentation/configs/ann/ann_r50-d8_512x1024_40k_cityscapes.py
+++ /dev/null
@@ -1,4 +0,0 @@
-_base_ = [
- '../_base_/models/ann_r50-d8.py', '../_base_/datasets/cityscapes.py',
- '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py'
-]
diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_80k_ade20k.py b/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_80k_ade20k.py
deleted file mode 100644
index 796ba3fb142394c4d93a29ba57548dca59d8d02b..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_80k_ade20k.py
+++ /dev/null
@@ -1,2 +0,0 @@
-_base_ = './deeplabv3_r50-d8_512x512_80k_ade20k.py'
-model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r101b-d8_512x1024_80k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r101b-d8_512x1024_80k_cityscapes.py
deleted file mode 100644
index 398d9759cafc1d01e78c138abd249808531a97b9..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r101b-d8_512x1024_80k_cityscapes.py
+++ /dev/null
@@ -1,4 +0,0 @@
-_base_ = './deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py'
-model = dict(
- pretrained='torchvision://resnet101',
- backbone=dict(type='ResNet', depth=101))
diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r101-d8_512x512_20k_voc12aug.py b/spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r101-d8_512x512_20k_voc12aug.py
deleted file mode 100644
index 1a3c43495bbf9d302216d7ddf62df75446907a36..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r101-d8_512x512_20k_voc12aug.py
+++ /dev/null
@@ -1,2 +0,0 @@
-_base_ = './psanet_r50-d8_512x512_20k_voc12aug.py'
-model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
diff --git a/spaces/AnimalEquality/chatbot/_proc/styles.css b/spaces/AnimalEquality/chatbot/_proc/styles.css
deleted file mode 100644
index 66ccc49ee8f0e73901dac02dc4e9224b7d1b2c78..0000000000000000000000000000000000000000
--- a/spaces/AnimalEquality/chatbot/_proc/styles.css
+++ /dev/null
@@ -1,37 +0,0 @@
-.cell {
- margin-bottom: 1rem;
-}
-
-.cell > .sourceCode {
- margin-bottom: 0;
-}
-
-.cell-output > pre {
- margin-bottom: 0;
-}
-
-.cell-output > pre, .cell-output > .sourceCode > pre, .cell-output-stdout > pre {
- margin-left: 0.8rem;
- margin-top: 0;
- background: none;
- border-left: 2px solid lightsalmon;
- border-top-left-radius: 0;
- border-top-right-radius: 0;
-}
-
-.cell-output > .sourceCode {
- border: none;
-}
-
-.cell-output > .sourceCode {
- background: none;
- margin-top: 0;
-}
-
-div.description {
- padding-left: 2px;
- padding-top: 5px;
- font-style: italic;
- font-size: 135%;
- opacity: 70%;
-}
diff --git a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/multimodal/pipelines/llava/pipelines.py b/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/multimodal/pipelines/llava/pipelines.py
deleted file mode 100644
index 0f650c1ab1a0f66bf79ce72d052db43b96801b6d..0000000000000000000000000000000000000000
--- a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/multimodal/pipelines/llava/pipelines.py
+++ /dev/null
@@ -1,27 +0,0 @@
-from typing import Optional
-
-from extensions.multimodal.abstract_pipeline import AbstractMultimodalPipeline
-
-available_pipelines = ['llava-7b', 'llava-13b']
-
-
-def get_pipeline(name: str, params: dict) -> Optional[AbstractMultimodalPipeline]:
- if name == 'llava-7b':
- from .llava import LLaVA_v0_7B_Pipeline
- return LLaVA_v0_7B_Pipeline(params)
- if name == 'llava-13b':
- from .llava import LLaVA_v0_13B_Pipeline
- return LLaVA_v0_13B_Pipeline(params)
- return None
-
-
-def get_pipeline_from_model_name(model_name: str, params: dict) -> Optional[AbstractMultimodalPipeline]:
- if 'llava' not in model_name.lower():
- return None
- if '7b' in model_name.lower():
- from .llava import LLaVA_v0_7B_Pipeline
- return LLaVA_v0_7B_Pipeline(params)
- if '13b' in model_name.lower():
- from .llava import LLaVA_v0_13B_Pipeline
- return LLaVA_v0_13B_Pipeline(params)
- return None
diff --git a/spaces/Ank0X0/Image-Upscaling-Playground/app.py b/spaces/Ank0X0/Image-Upscaling-Playground/app.py
deleted file mode 100644
index 1f3736667bfd4e5ac6d9ee2ef9b95416cb80f9c0..0000000000000000000000000000000000000000
--- a/spaces/Ank0X0/Image-Upscaling-Playground/app.py
+++ /dev/null
@@ -1,85 +0,0 @@
-import numpy as np
-import cv2
-import onnxruntime
-import gradio as gr
-
-
-def pre_process(img: np.array) -> np.array:
- # H, W, C -> C, H, W
- img = np.transpose(img[:, :, 0:3], (2, 0, 1))
- # C, H, W -> 1, C, H, W
- img = np.expand_dims(img, axis=0).astype(np.float32)
- return img
-
-
-def post_process(img: np.array) -> np.array:
- # 1, C, H, W -> C, H, W
- img = np.squeeze(img)
- # C, H, W -> H, W, C
- img = np.transpose(img, (1, 2, 0))[:, :, ::-1].astype(np.uint8)
- return img
-
-
-def inference(model_path: str, img_array: np.array) -> np.array:
- options = onnxruntime.SessionOptions()
- options.intra_op_num_threads = 1
- options.inter_op_num_threads = 1
- ort_session = onnxruntime.InferenceSession(model_path, options)
- ort_inputs = {ort_session.get_inputs()[0].name: img_array}
- ort_outs = ort_session.run(None, ort_inputs)
-
- return ort_outs[0]
-
-
-def convert_pil_to_cv2(image):
- # pil_image = image.convert("RGB")
- open_cv_image = np.array(image)
- # RGB to BGR
- open_cv_image = open_cv_image[:, :, ::-1].copy()
- return open_cv_image
-
-
-def upscale(image, model):
- model_path = f"models/{model}.ort"
- img = convert_pil_to_cv2(image)
- if img.ndim == 2:
- img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
-
- if img.shape[2] == 4:
- alpha = img[:, :, 3] # GRAY
- alpha = cv2.cvtColor(alpha, cv2.COLOR_GRAY2BGR) # BGR
- alpha_output = post_process(inference(model_path, pre_process(alpha))) # BGR
- alpha_output = cv2.cvtColor(alpha_output, cv2.COLOR_BGR2GRAY) # GRAY
-
- img = img[:, :, 0:3] # BGR
- image_output = post_process(inference(model_path, pre_process(img))) # BGR
- image_output = cv2.cvtColor(image_output, cv2.COLOR_BGR2BGRA) # BGRA
- image_output[:, :, 3] = alpha_output
-
- elif img.shape[2] == 3:
- image_output = post_process(inference(model_path, pre_process(img))) # BGR
-
- return image_output
-
-
-css = ".output-image, .input-image, .image-preview {height: 480px !important} "
-model_choices = ["modelx2", "modelx2 25 JXL", "modelx4", "minecraft_modelx4"]
-
-gr.Interface(
- fn=upscale,
- inputs=[
- gr.inputs.Image(type="pil", label="Input Image"),
- gr.inputs.Radio(
- model_choices,
- type="value",
- default=None,
- label="Choose Upscaler",
- optional=False,
- ),
- ],
- outputs="image",
- title="Image Upscaling 🦆",
- description="Model: [Anchor-based Plain Net for Mobile Image Super-Resolution](https://arxiv.org/abs/2105.09750). Repository: [SR Mobile PyTorch](https://github.com/w11wo/sr_mobile_pytorch)",
- allow_flagging="never",
- css=css,
-).launch()
diff --git a/spaces/Annelisseishere/Streamlit_GPT/README.md b/spaces/Annelisseishere/Streamlit_GPT/README.md
deleted file mode 100644
index d690b01b20e2640c17f802059f3306685323045c..0000000000000000000000000000000000000000
--- a/spaces/Annelisseishere/Streamlit_GPT/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Streamlit GPT
-emoji: 🏢
-colorFrom: indigo
-colorTo: blue
-sdk: streamlit
-sdk_version: 1.21.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Asahi402/anime-remove-background/README.md b/spaces/Asahi402/anime-remove-background/README.md
deleted file mode 100644
index 1ba3cb5ea0e994e246d57b7d62b8aa5a6331901c..0000000000000000000000000000000000000000
--- a/spaces/Asahi402/anime-remove-background/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Anime Remove Background
-emoji: 🪄🖼️
-colorFrom: indigo
-colorTo: pink
-sdk: gradio
-sdk_version: 3.1.4
-app_file: app.py
-pinned: false
-license: apache-2.0
-duplicated_from: skytnt/anime-remove-background
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/show.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/show.py
deleted file mode 100644
index 3f10701f6b28c72b62c9904fec37b96bdd199dcc..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/show.py
+++ /dev/null
@@ -1,189 +0,0 @@
-import logging
-from optparse import Values
-from typing import Generator, Iterable, Iterator, List, NamedTuple, Optional
-
-from pip._vendor.packaging.utils import canonicalize_name
-
-from pip._internal.cli.base_command import Command
-from pip._internal.cli.status_codes import ERROR, SUCCESS
-from pip._internal.metadata import BaseDistribution, get_default_environment
-from pip._internal.utils.misc import write_output
-
-logger = logging.getLogger(__name__)
-
-
-class ShowCommand(Command):
- """
- Show information about one or more installed packages.
-
- The output is in RFC-compliant mail header format.
- """
-
- usage = """
- %prog [options] ..."""
- ignore_require_venv = True
-
- def add_options(self) -> None:
- self.cmd_opts.add_option(
- "-f",
- "--files",
- dest="files",
- action="store_true",
- default=False,
- help="Show the full list of installed files for each package.",
- )
-
- self.parser.insert_option_group(0, self.cmd_opts)
-
- def run(self, options: Values, args: List[str]) -> int:
- if not args:
- logger.warning("ERROR: Please provide a package name or names.")
- return ERROR
- query = args
-
- results = search_packages_info(query)
- if not print_results(
- results, list_files=options.files, verbose=options.verbose
- ):
- return ERROR
- return SUCCESS
-
-
-class _PackageInfo(NamedTuple):
- name: str
- version: str
- location: str
- editable_project_location: Optional[str]
- requires: List[str]
- required_by: List[str]
- installer: str
- metadata_version: str
- classifiers: List[str]
- summary: str
- homepage: str
- project_urls: List[str]
- author: str
- author_email: str
- license: str
- entry_points: List[str]
- files: Optional[List[str]]
-
-
-def search_packages_info(query: List[str]) -> Generator[_PackageInfo, None, None]:
- """
- Gather details from installed distributions. Print distribution name,
- version, location, and installed files. Installed files requires a
- pip generated 'installed-files.txt' in the distributions '.egg-info'
- directory.
- """
- env = get_default_environment()
-
- installed = {dist.canonical_name: dist for dist in env.iter_all_distributions()}
- query_names = [canonicalize_name(name) for name in query]
- missing = sorted(
- [name for name, pkg in zip(query, query_names) if pkg not in installed]
- )
- if missing:
- logger.warning("Package(s) not found: %s", ", ".join(missing))
-
- def _get_requiring_packages(current_dist: BaseDistribution) -> Iterator[str]:
- return (
- dist.metadata["Name"] or "UNKNOWN"
- for dist in installed.values()
- if current_dist.canonical_name
- in {canonicalize_name(d.name) for d in dist.iter_dependencies()}
- )
-
- for query_name in query_names:
- try:
- dist = installed[query_name]
- except KeyError:
- continue
-
- requires = sorted((req.name for req in dist.iter_dependencies()), key=str.lower)
- required_by = sorted(_get_requiring_packages(dist), key=str.lower)
-
- try:
- entry_points_text = dist.read_text("entry_points.txt")
- entry_points = entry_points_text.splitlines(keepends=False)
- except FileNotFoundError:
- entry_points = []
-
- files_iter = dist.iter_declared_entries()
- if files_iter is None:
- files: Optional[List[str]] = None
- else:
- files = sorted(files_iter)
-
- metadata = dist.metadata
-
- yield _PackageInfo(
- name=dist.raw_name,
- version=str(dist.version),
- location=dist.location or "",
- editable_project_location=dist.editable_project_location,
- requires=requires,
- required_by=required_by,
- installer=dist.installer,
- metadata_version=dist.metadata_version or "",
- classifiers=metadata.get_all("Classifier", []),
- summary=metadata.get("Summary", ""),
- homepage=metadata.get("Home-page", ""),
- project_urls=metadata.get_all("Project-URL", []),
- author=metadata.get("Author", ""),
- author_email=metadata.get("Author-email", ""),
- license=metadata.get("License", ""),
- entry_points=entry_points,
- files=files,
- )
-
-
-def print_results(
- distributions: Iterable[_PackageInfo],
- list_files: bool,
- verbose: bool,
-) -> bool:
- """
- Print the information from installed distributions found.
- """
- results_printed = False
- for i, dist in enumerate(distributions):
- results_printed = True
- if i > 0:
- write_output("---")
-
- write_output("Name: %s", dist.name)
- write_output("Version: %s", dist.version)
- write_output("Summary: %s", dist.summary)
- write_output("Home-page: %s", dist.homepage)
- write_output("Author: %s", dist.author)
- write_output("Author-email: %s", dist.author_email)
- write_output("License: %s", dist.license)
- write_output("Location: %s", dist.location)
- if dist.editable_project_location is not None:
- write_output(
- "Editable project location: %s", dist.editable_project_location
- )
- write_output("Requires: %s", ", ".join(dist.requires))
- write_output("Required-by: %s", ", ".join(dist.required_by))
-
- if verbose:
- write_output("Metadata-Version: %s", dist.metadata_version)
- write_output("Installer: %s", dist.installer)
- write_output("Classifiers:")
- for classifier in dist.classifiers:
- write_output(" %s", classifier)
- write_output("Entry-points:")
- for entry in dist.entry_points:
- write_output(" %s", entry.strip())
- write_output("Project-URLs:")
- for project_url in dist.project_urls:
- write_output(" %s", project_url)
- if list_files:
- write_output("Files:")
- if dist.files is None:
- write_output("Cannot locate RECORD or installed-files.txt")
- else:
- for line in dist.files:
- write_output(" %s", line.strip())
- return results_printed
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/discovery.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/discovery.py
deleted file mode 100644
index 98fc2a7f487da55a23b962793158911848800211..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/discovery.py
+++ /dev/null
@@ -1,600 +0,0 @@
-"""Automatic discovery of Python modules and packages (for inclusion in the
-distribution) and other config values.
-
-For the purposes of this module, the following nomenclature is used:
-
-- "src-layout": a directory representing a Python project that contains a "src"
- folder. Everything under the "src" folder is meant to be included in the
- distribution when packaging the project. Example::
-
- .
- ├── tox.ini
- ├── pyproject.toml
- └── src/
- └── mypkg/
- ├── __init__.py
- ├── mymodule.py
- └── my_data_file.txt
-
-- "flat-layout": a Python project that does not use "src-layout" but instead
- have a directory under the project root for each package::
-
- .
- ├── tox.ini
- ├── pyproject.toml
- └── mypkg/
- ├── __init__.py
- ├── mymodule.py
- └── my_data_file.txt
-
-- "single-module": a project that contains a single Python script direct under
- the project root (no directory used)::
-
- .
- ├── tox.ini
- ├── pyproject.toml
- └── mymodule.py
-
-"""
-
-import itertools
-import os
-from fnmatch import fnmatchcase
-from glob import glob
-from pathlib import Path
-from typing import (
- TYPE_CHECKING,
- Callable,
- Dict,
- Iterable,
- Iterator,
- List,
- Mapping,
- Optional,
- Tuple,
- Union
-)
-
-import _distutils_hack.override # noqa: F401
-
-from distutils import log
-from distutils.util import convert_path
-
-_Path = Union[str, os.PathLike]
-_Filter = Callable[[str], bool]
-StrIter = Iterator[str]
-
-chain_iter = itertools.chain.from_iterable
-
-if TYPE_CHECKING:
- from setuptools import Distribution # noqa
-
-
-def _valid_name(path: _Path) -> bool:
- # Ignore invalid names that cannot be imported directly
- return os.path.basename(path).isidentifier()
-
-
-class _Finder:
- """Base class that exposes functionality for module/package finders"""
-
- ALWAYS_EXCLUDE: Tuple[str, ...] = ()
- DEFAULT_EXCLUDE: Tuple[str, ...] = ()
-
- @classmethod
- def find(
- cls,
- where: _Path = '.',
- exclude: Iterable[str] = (),
- include: Iterable[str] = ('*',)
- ) -> List[str]:
- """Return a list of all Python items (packages or modules, depending on
- the finder implementation) found within directory 'where'.
-
- 'where' is the root directory which will be searched.
- It should be supplied as a "cross-platform" (i.e. URL-style) path;
- it will be converted to the appropriate local path syntax.
-
- 'exclude' is a sequence of names to exclude; '*' can be used
- as a wildcard in the names.
- When finding packages, 'foo.*' will exclude all subpackages of 'foo'
- (but not 'foo' itself).
-
- 'include' is a sequence of names to include.
- If it's specified, only the named items will be included.
- If it's not specified, all found items will be included.
- 'include' can contain shell style wildcard patterns just like
- 'exclude'.
- """
-
- exclude = exclude or cls.DEFAULT_EXCLUDE
- return list(
- cls._find_iter(
- convert_path(str(where)),
- cls._build_filter(*cls.ALWAYS_EXCLUDE, *exclude),
- cls._build_filter(*include),
- )
- )
-
- @classmethod
- def _find_iter(cls, where: _Path, exclude: _Filter, include: _Filter) -> StrIter:
- raise NotImplementedError
-
- @staticmethod
- def _build_filter(*patterns: str) -> _Filter:
- """
- Given a list of patterns, return a callable that will be true only if
- the input matches at least one of the patterns.
- """
- return lambda name: any(fnmatchcase(name, pat) for pat in patterns)
-
-
-class PackageFinder(_Finder):
- """
- Generate a list of all Python packages found within a directory
- """
-
- ALWAYS_EXCLUDE = ("ez_setup", "*__pycache__")
-
- @classmethod
- def _find_iter(cls, where: _Path, exclude: _Filter, include: _Filter) -> StrIter:
- """
- All the packages found in 'where' that pass the 'include' filter, but
- not the 'exclude' filter.
- """
- for root, dirs, files in os.walk(str(where), followlinks=True):
- # Copy dirs to iterate over it, then empty dirs.
- all_dirs = dirs[:]
- dirs[:] = []
-
- for dir in all_dirs:
- full_path = os.path.join(root, dir)
- rel_path = os.path.relpath(full_path, where)
- package = rel_path.replace(os.path.sep, '.')
-
- # Skip directory trees that are not valid packages
- if '.' in dir or not cls._looks_like_package(full_path, package):
- continue
-
- # Should this package be included?
- if include(package) and not exclude(package):
- yield package
-
- # Keep searching subdirectories, as there may be more packages
- # down there, even if the parent was excluded.
- dirs.append(dir)
-
- @staticmethod
- def _looks_like_package(path: _Path, _package_name: str) -> bool:
- """Does a directory look like a package?"""
- return os.path.isfile(os.path.join(path, '__init__.py'))
-
-
-class PEP420PackageFinder(PackageFinder):
- @staticmethod
- def _looks_like_package(_path: _Path, _package_name: str) -> bool:
- return True
-
-
-class ModuleFinder(_Finder):
- """Find isolated Python modules.
- This function will **not** recurse subdirectories.
- """
-
- @classmethod
- def _find_iter(cls, where: _Path, exclude: _Filter, include: _Filter) -> StrIter:
- for file in glob(os.path.join(where, "*.py")):
- module, _ext = os.path.splitext(os.path.basename(file))
-
- if not cls._looks_like_module(module):
- continue
-
- if include(module) and not exclude(module):
- yield module
-
- _looks_like_module = staticmethod(_valid_name)
-
-
-# We have to be extra careful in the case of flat layout to not include files
-# and directories not meant for distribution (e.g. tool-related)
-
-
-class FlatLayoutPackageFinder(PEP420PackageFinder):
- _EXCLUDE = (
- "ci",
- "bin",
- "doc",
- "docs",
- "documentation",
- "manpages",
- "news",
- "changelog",
- "test",
- "tests",
- "unit_test",
- "unit_tests",
- "example",
- "examples",
- "scripts",
- "tools",
- "util",
- "utils",
- "python",
- "build",
- "dist",
- "venv",
- "env",
- "requirements",
- # ---- Task runners / Build tools ----
- "tasks", # invoke
- "fabfile", # fabric
- "site_scons", # SCons
- # ---- Other tools ----
- "benchmark",
- "benchmarks",
- "exercise",
- "exercises",
- # ---- Hidden directories/Private packages ----
- "[._]*",
- )
-
- DEFAULT_EXCLUDE = tuple(chain_iter((p, f"{p}.*") for p in _EXCLUDE))
- """Reserved package names"""
-
- @staticmethod
- def _looks_like_package(_path: _Path, package_name: str) -> bool:
- names = package_name.split('.')
- # Consider PEP 561
- root_pkg_is_valid = names[0].isidentifier() or names[0].endswith("-stubs")
- return root_pkg_is_valid and all(name.isidentifier() for name in names[1:])
-
-
-class FlatLayoutModuleFinder(ModuleFinder):
- DEFAULT_EXCLUDE = (
- "setup",
- "conftest",
- "test",
- "tests",
- "example",
- "examples",
- "build",
- # ---- Task runners ----
- "toxfile",
- "noxfile",
- "pavement",
- "dodo",
- "tasks",
- "fabfile",
- # ---- Other tools ----
- "[Ss][Cc]onstruct", # SCons
- "conanfile", # Connan: C/C++ build tool
- "manage", # Django
- "benchmark",
- "benchmarks",
- "exercise",
- "exercises",
- # ---- Hidden files/Private modules ----
- "[._]*",
- )
- """Reserved top-level module names"""
-
-
-def _find_packages_within(root_pkg: str, pkg_dir: _Path) -> List[str]:
- nested = PEP420PackageFinder.find(pkg_dir)
- return [root_pkg] + [".".join((root_pkg, n)) for n in nested]
-
-
-class ConfigDiscovery:
- """Fill-in metadata and options that can be automatically derived
- (from other metadata/options, the file system or conventions)
- """
-
- def __init__(self, distribution: "Distribution"):
- self.dist = distribution
- self._called = False
- self._disabled = False
- self._skip_ext_modules = False
-
- def _disable(self):
- """Internal API to disable automatic discovery"""
- self._disabled = True
-
- def _ignore_ext_modules(self):
- """Internal API to disregard ext_modules.
-
- Normally auto-discovery would not be triggered if ``ext_modules`` are set
- (this is done for backward compatibility with existing packages relying on
- ``setup.py`` or ``setup.cfg``). However, ``setuptools`` can call this function
- to ignore given ``ext_modules`` and proceed with the auto-discovery if
- ``packages`` and ``py_modules`` are not given (e.g. when using pyproject.toml
- metadata).
- """
- self._skip_ext_modules = True
-
- @property
- def _root_dir(self) -> _Path:
- # The best is to wait until `src_root` is set in dist, before using _root_dir.
- return self.dist.src_root or os.curdir
-
- @property
- def _package_dir(self) -> Dict[str, str]:
- if self.dist.package_dir is None:
- return {}
- return self.dist.package_dir
-
- def __call__(self, force=False, name=True, ignore_ext_modules=False):
- """Automatically discover missing configuration fields
- and modifies the given ``distribution`` object in-place.
-
- Note that by default this will only have an effect the first time the
- ``ConfigDiscovery`` object is called.
-
- To repeatedly invoke automatic discovery (e.g. when the project
- directory changes), please use ``force=True`` (or create a new
- ``ConfigDiscovery`` instance).
- """
- if force is False and (self._called or self._disabled):
- # Avoid overhead of multiple calls
- return
-
- self._analyse_package_layout(ignore_ext_modules)
- if name:
- self.analyse_name() # depends on ``packages`` and ``py_modules``
-
- self._called = True
-
- def _explicitly_specified(self, ignore_ext_modules: bool) -> bool:
- """``True`` if the user has specified some form of package/module listing"""
- ignore_ext_modules = ignore_ext_modules or self._skip_ext_modules
- ext_modules = not (self.dist.ext_modules is None or ignore_ext_modules)
- return (
- self.dist.packages is not None
- or self.dist.py_modules is not None
- or ext_modules
- or hasattr(self.dist, "configuration") and self.dist.configuration
- # ^ Some projects use numpy.distutils.misc_util.Configuration
- )
-
- def _analyse_package_layout(self, ignore_ext_modules: bool) -> bool:
- if self._explicitly_specified(ignore_ext_modules):
- # For backward compatibility, just try to find modules/packages
- # when nothing is given
- return True
-
- log.debug(
- "No `packages` or `py_modules` configuration, performing "
- "automatic discovery."
- )
-
- return (
- self._analyse_explicit_layout()
- or self._analyse_src_layout()
- # flat-layout is the trickiest for discovery so it should be last
- or self._analyse_flat_layout()
- )
-
- def _analyse_explicit_layout(self) -> bool:
- """The user can explicitly give a package layout via ``package_dir``"""
- package_dir = self._package_dir.copy() # don't modify directly
- package_dir.pop("", None) # This falls under the "src-layout" umbrella
- root_dir = self._root_dir
-
- if not package_dir:
- return False
-
- log.debug(f"`explicit-layout` detected -- analysing {package_dir}")
- pkgs = chain_iter(
- _find_packages_within(pkg, os.path.join(root_dir, parent_dir))
- for pkg, parent_dir in package_dir.items()
- )
- self.dist.packages = list(pkgs)
- log.debug(f"discovered packages -- {self.dist.packages}")
- return True
-
- def _analyse_src_layout(self) -> bool:
- """Try to find all packages or modules under the ``src`` directory
- (or anything pointed by ``package_dir[""]``).
-
- The "src-layout" is relatively safe for automatic discovery.
- We assume that everything within is meant to be included in the
- distribution.
-
- If ``package_dir[""]`` is not given, but the ``src`` directory exists,
- this function will set ``package_dir[""] = "src"``.
- """
- package_dir = self._package_dir
- src_dir = os.path.join(self._root_dir, package_dir.get("", "src"))
- if not os.path.isdir(src_dir):
- return False
-
- log.debug(f"`src-layout` detected -- analysing {src_dir}")
- package_dir.setdefault("", os.path.basename(src_dir))
- self.dist.package_dir = package_dir # persist eventual modifications
- self.dist.packages = PEP420PackageFinder.find(src_dir)
- self.dist.py_modules = ModuleFinder.find(src_dir)
- log.debug(f"discovered packages -- {self.dist.packages}")
- log.debug(f"discovered py_modules -- {self.dist.py_modules}")
- return True
-
- def _analyse_flat_layout(self) -> bool:
- """Try to find all packages and modules under the project root.
-
- Since the ``flat-layout`` is more dangerous in terms of accidentally including
- extra files/directories, this function is more conservative and will raise an
- error if multiple packages or modules are found.
-
- This assumes that multi-package dists are uncommon and refuse to support that
- use case in order to be able to prevent unintended errors.
- """
- log.debug(f"`flat-layout` detected -- analysing {self._root_dir}")
- return self._analyse_flat_packages() or self._analyse_flat_modules()
-
- def _analyse_flat_packages(self) -> bool:
- self.dist.packages = FlatLayoutPackageFinder.find(self._root_dir)
- top_level = remove_nested_packages(remove_stubs(self.dist.packages))
- log.debug(f"discovered packages -- {self.dist.packages}")
- self._ensure_no_accidental_inclusion(top_level, "packages")
- return bool(top_level)
-
- def _analyse_flat_modules(self) -> bool:
- self.dist.py_modules = FlatLayoutModuleFinder.find(self._root_dir)
- log.debug(f"discovered py_modules -- {self.dist.py_modules}")
- self._ensure_no_accidental_inclusion(self.dist.py_modules, "modules")
- return bool(self.dist.py_modules)
-
- def _ensure_no_accidental_inclusion(self, detected: List[str], kind: str):
- if len(detected) > 1:
- from inspect import cleandoc
-
- from setuptools.errors import PackageDiscoveryError
-
- msg = f"""Multiple top-level {kind} discovered in a flat-layout: {detected}.
-
- To avoid accidental inclusion of unwanted files or directories,
- setuptools will not proceed with this build.
-
- If you are trying to create a single distribution with multiple {kind}
- on purpose, you should not rely on automatic discovery.
- Instead, consider the following options:
-
- 1. set up custom discovery (`find` directive with `include` or `exclude`)
- 2. use a `src-layout`
- 3. explicitly set `py_modules` or `packages` with a list of names
-
- To find more information, look for "package discovery" on setuptools docs.
- """
- raise PackageDiscoveryError(cleandoc(msg))
-
- def analyse_name(self):
- """The packages/modules are the essential contribution of the author.
- Therefore the name of the distribution can be derived from them.
- """
- if self.dist.metadata.name or self.dist.name:
- # get_name() is not reliable (can return "UNKNOWN")
- return None
-
- log.debug("No `name` configuration, performing automatic discovery")
-
- name = (
- self._find_name_single_package_or_module()
- or self._find_name_from_packages()
- )
- if name:
- self.dist.metadata.name = name
-
- def _find_name_single_package_or_module(self) -> Optional[str]:
- """Exactly one module or package"""
- for field in ('packages', 'py_modules'):
- items = getattr(self.dist, field, None) or []
- if items and len(items) == 1:
- log.debug(f"Single module/package detected, name: {items[0]}")
- return items[0]
-
- return None
-
- def _find_name_from_packages(self) -> Optional[str]:
- """Try to find the root package that is not a PEP 420 namespace"""
- if not self.dist.packages:
- return None
-
- packages = remove_stubs(sorted(self.dist.packages, key=len))
- package_dir = self.dist.package_dir or {}
-
- parent_pkg = find_parent_package(packages, package_dir, self._root_dir)
- if parent_pkg:
- log.debug(f"Common parent package detected, name: {parent_pkg}")
- return parent_pkg
-
- log.warn("No parent package detected, impossible to derive `name`")
- return None
-
-
-def remove_nested_packages(packages: List[str]) -> List[str]:
- """Remove nested packages from a list of packages.
-
- >>> remove_nested_packages(["a", "a.b1", "a.b2", "a.b1.c1"])
- ['a']
- >>> remove_nested_packages(["a", "b", "c.d", "c.d.e.f", "g.h", "a.a1"])
- ['a', 'b', 'c.d', 'g.h']
- """
- pkgs = sorted(packages, key=len)
- top_level = pkgs[:]
- size = len(pkgs)
- for i, name in enumerate(reversed(pkgs)):
- if any(name.startswith(f"{other}.") for other in top_level):
- top_level.pop(size - i - 1)
-
- return top_level
-
-
-def remove_stubs(packages: List[str]) -> List[str]:
- """Remove type stubs (:pep:`561`) from a list of packages.
-
- >>> remove_stubs(["a", "a.b", "a-stubs", "a-stubs.b.c", "b", "c-stubs"])
- ['a', 'a.b', 'b']
- """
- return [pkg for pkg in packages if not pkg.split(".")[0].endswith("-stubs")]
-
-
-def find_parent_package(
- packages: List[str], package_dir: Mapping[str, str], root_dir: _Path
-) -> Optional[str]:
- """Find the parent package that is not a namespace."""
- packages = sorted(packages, key=len)
- common_ancestors = []
- for i, name in enumerate(packages):
- if not all(n.startswith(f"{name}.") for n in packages[i+1:]):
- # Since packages are sorted by length, this condition is able
- # to find a list of all common ancestors.
- # When there is divergence (e.g. multiple root packages)
- # the list will be empty
- break
- common_ancestors.append(name)
-
- for name in common_ancestors:
- pkg_path = find_package_path(name, package_dir, root_dir)
- init = os.path.join(pkg_path, "__init__.py")
- if os.path.isfile(init):
- return name
-
- return None
-
-
-def find_package_path(
- name: str, package_dir: Mapping[str, str], root_dir: _Path
-) -> str:
- """Given a package name, return the path where it should be found on
- disk, considering the ``package_dir`` option.
-
- >>> path = find_package_path("my.pkg", {"": "root/is/nested"}, ".")
- >>> path.replace(os.sep, "/")
- './root/is/nested/my/pkg'
-
- >>> path = find_package_path("my.pkg", {"my": "root/is/nested"}, ".")
- >>> path.replace(os.sep, "/")
- './root/is/nested/pkg'
-
- >>> path = find_package_path("my.pkg", {"my.pkg": "root/is/nested"}, ".")
- >>> path.replace(os.sep, "/")
- './root/is/nested'
-
- >>> path = find_package_path("other.pkg", {"my.pkg": "root/is/nested"}, ".")
- >>> path.replace(os.sep, "/")
- './other/pkg'
- """
- parts = name.split(".")
- for i in range(len(parts), 0, -1):
- # Look backwards, the most specific package_dir first
- partial_name = ".".join(parts[:i])
- if partial_name in package_dir:
- parent = package_dir[partial_name]
- return os.path.join(root_dir, parent, *parts[i:])
-
- parent = package_dir.get("") or ""
- return os.path.join(root_dir, *parent.split("/"), *parts)
-
-
-def construct_package_dir(packages: List[str], package_path: _Path) -> Dict[str, str]:
- parent_pkgs = remove_nested_packages(packages)
- prefix = Path(package_path).parts
- return {pkg: "/".join([*prefix, *pkg.split(".")]) for pkg in parent_pkgs}
diff --git a/spaces/Azurro/APT-1B-Base/README.md b/spaces/Azurro/APT-1B-Base/README.md
deleted file mode 100644
index 51c40e7115619a9696b587322663e0b11cc38d59..0000000000000000000000000000000000000000
--- a/spaces/Azurro/APT-1B-Base/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: APT-1B-Base
-emoji: 💻
-colorFrom: gray
-colorTo: blue
-sdk: gradio
-sdk_version: 3.34.0
-app_file: app.py
-pinned: false
-license: cc-by-nc-4.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/BIOML-SVM/SVM/proteinbind_new.py b/spaces/BIOML-SVM/SVM/proteinbind_new.py
deleted file mode 100644
index 37a62abb09e7089e62631f7fd1320048e4f32842..0000000000000000000000000000000000000000
--- a/spaces/BIOML-SVM/SVM/proteinbind_new.py
+++ /dev/null
@@ -1,283 +0,0 @@
-from types import SimpleNamespace
-
-import pandas as pd
-import torch
-import torch.nn as nn
-from torch.utils.data import Dataset
-
-
-ModalityType = SimpleNamespace(
- AA="aa",
- DNA="dna",
- PDB="pdb",
- GO="go",
- MSA="msa",
- TEXT="text",
-)
-
-
-class Normalize(nn.Module):
- def __init__(self, dim: int) -> None:
- super().__init__()
- self.dim = dim
-
- def forward(self, x):
- return torch.nn.functional.normalize(x, dim=self.dim, p=2)
-
-
-class EmbeddingDataset(Dataset):
- """
- The main class for turning any modality to a torch Dataset that can be passed to
- a torch dataloader. Any modality that doesn't fit into the __getitem__
- method can subclass this and modify the __getitem__ method.
- """
- def __init__(self, sequence_file_path, embeddings_file_path, modality):
- self.sequence = pd.read_csv(sequence_file_path)
- self.embedding = torch.load(embeddings_file_path)
- self.modality = modality
-
- def __len__(self):
- return len(self.sequence)
-
- def __getitem__(self, idx):
- sequence = self.sequence.iloc[idx, 0]
- embedding = self.embedding[idx]
- return {"aa": sequence, self.modality: embedding}
-
-
-class DualEmbeddingDataset(Dataset):
- """
- The main class for turning any modality to a torch Dataset that can be passed to
- a torch dataloader. Any modality that doesn't fit into the __getitem__
- method can subclass this and modify the __getitem__ method.
- """
- def __init__(self, sequence_embeddings_file_path, embeddings_file_path, modality):
- self.sequence_embedding = torch.load(sequence_embeddings_file_path)
- self.embedding = torch.load(embeddings_file_path)
- self.modality = modality
-
- def __len__(self):
- return len(self.sequence_embedding)
-
- def __getitem__(self, idx):
- sequence_embedding = self.sequence_embedding[idx]
- embedding = self.embedding[idx]
- return {"aa": sequence_embedding, self.modality: embedding}
-
-
-class ProteinBindModel(nn.Module):
-
- def __init__(
- self,
- aa_embed_dim,
- dna_embed_dim,
- pdb_embed_dim,
- go_embed_dim,
- msa_embed_dim,
- text_embed_dim,
- in_embed_dim,
- out_embed_dim
- ):
- super().__init__()
- self.modality_trunks = self._create_modality_trunk(
- aa_embed_dim,
- dna_embed_dim,
- pdb_embed_dim,
- go_embed_dim,
- msa_embed_dim,
- text_embed_dim,
- out_embed_dim
- )
- self.modality_heads = self._create_modality_head(
- in_embed_dim,
- out_embed_dim,
- )
- self.modality_postprocessors = self._create_modality_postprocessors(
- out_embed_dim
- )
-
- def _create_modality_trunk(
- self,
- aa_embed_dim,
- dna_embed_dim,
- pdb_embed_dim,
- go_embed_dim,
- msa_embed_dim,
- text_embed_dim,
- in_embed_dim
- ):
- """
- The current layers are just a proof of concept
- and are subject to the opinion of others.
- :param aa_embed_dim:
- :param dna_embed_dim:
- :param pdb_embed_dim:
- :param go_embed_dim:
- :param msa_embed_dim:
- :param text_embed_dim:
- :param in_embed_dim:
- :return:
- """
- modality_trunks = {}
-
- modality_trunks[ModalityType.AA] = nn.Sequential(
- nn.Linear(aa_embed_dim, 512),
- nn.ReLU(),
- nn.Linear(512, 512),
- nn.ReLU(),
- nn.Linear(512, in_embed_dim),
- )
-
- modality_trunks[ModalityType.DNA] = nn.Sequential(
- nn.Linear(dna_embed_dim, 512),
- nn.ReLU(),
- nn.Linear(512, 512),
- nn.ReLU(),
- nn.Linear(512, in_embed_dim),
- )
-
- modality_trunks[ModalityType.PDB] = nn.Sequential(
- nn.Linear(pdb_embed_dim, 512),
- nn.ReLU(),
- nn.Linear(512, 512),
- nn.ReLU(),
- nn.Linear(512, in_embed_dim),
- )
-
- modality_trunks[ModalityType.GO] = nn.Sequential(
- nn.Linear(go_embed_dim, 512),
- nn.ReLU(),
- nn.Linear(512, 512),
- nn.ReLU(),
- nn.Linear(512, in_embed_dim),
- )
-
- modality_trunks[ModalityType.MSA] = nn.Sequential(
- nn.Linear(msa_embed_dim, 512),
- nn.ReLU(),
- nn.Linear(512, 512),
- nn.ReLU(),
- nn.Linear(512, in_embed_dim),
- )
-
- modality_trunks[ModalityType.TEXT] = nn.Sequential(
- nn.Linear(text_embed_dim, 512),
- nn.ReLU(),
- nn.Linear(512, 512),
- nn.ReLU(),
- nn.Linear(512, in_embed_dim),
- )
-
- return nn.ModuleDict(modality_trunks)
-
- def _create_modality_head(
- self,
- in_embed_dim,
- out_embed_dim
- ):
- modality_heads = {}
-
- modality_heads[ModalityType.AA] = nn.Sequential(
- nn.LayerNorm(normalized_shape=in_embed_dim, eps=1e-6),
- nn.Dropout(p=0.5),
- nn.Linear(in_embed_dim, out_embed_dim, bias=False),
- )
-
- modality_heads[ModalityType.DNA] = nn.Sequential(
- nn.LayerNorm(normalized_shape=in_embed_dim, eps=1e-6),
- nn.Dropout(p=0.5),
- nn.Linear(in_embed_dim, out_embed_dim, bias=False),
- )
-
- modality_heads[ModalityType.PDB] = nn.Sequential(
- nn.LayerNorm(normalized_shape=in_embed_dim, eps=1e-6),
- nn.Dropout(p=0.5),
- nn.Linear(in_embed_dim, out_embed_dim, bias=False),
- )
-
- modality_heads[ModalityType.GO] = nn.Sequential(
- nn.LayerNorm(normalized_shape=in_embed_dim, eps=1e-6),
- nn.Dropout(p=0.5),
- nn.Linear(in_embed_dim, out_embed_dim, bias=False),
- )
-
- modality_heads[ModalityType.MSA] = nn.Sequential(
- nn.LayerNorm(normalized_shape=in_embed_dim, eps=1e-6),
- nn.Dropout(p=0.5),
- nn.Linear(in_embed_dim, out_embed_dim, bias=False),
- )
-
- modality_heads[ModalityType.TEXT] = nn.Sequential(
- nn.LayerNorm(normalized_shape=in_embed_dim, eps=1e-6),
- nn.Dropout(p=0.5),
- nn.Linear(in_embed_dim, out_embed_dim, bias=False),
- )
- return nn.ModuleDict(modality_heads)
-
- def _create_modality_postprocessors(self, out_embed_dim):
- modality_postprocessors = {}
- modality_postprocessors[ModalityType.AA] = Normalize(dim=-1)
- modality_postprocessors[ModalityType.DNA] = Normalize(dim=-1)
- modality_postprocessors[ModalityType.PDB] = Normalize(dim=-1)
- modality_postprocessors[ModalityType.TEXT] = Normalize(dim=-1)
- modality_postprocessors[ModalityType.GO] = Normalize(dim=-1)
- modality_postprocessors[ModalityType.MSA] = Normalize(dim=-1)
-
- return nn.ModuleDict(modality_postprocessors)
-
- def forward(self, inputs):
- """
- input = {k_1: [v],k_n: [v]}
- for key in input
- get trunk for key
- forward pass of value in trunk
- get projection head of key
- forward pass of value in projection head
- append output in output dict
- return { k_1, [o], k_n: [o]}
- """
-
- outputs = {}
-
- for modality_key, modality_value in inputs.items():
-
- modality_value = self.modality_trunks[modality_key](
- modality_value
- )
-
- modality_value = self.modality_heads[modality_key](
- modality_value
- )
-
- modality_value = self.modality_postprocessors[modality_key](
- modality_value
- )
- outputs[modality_key] = modality_value
-
- return outputs
-
-
-def create_proteinbind(pretrained=False):
- """
- The embedding dimensions here are dummy
- :param pretrained:
- :return:
- """
- model = ProteinBindModel(
- aa_embed_dim=480,
- dna_embed_dim=1280,
- pdb_embed_dim=128,
- go_embed_dim=600,
- msa_embed_dim=768,
- text_embed_dim=768,
- in_embed_dim=1024,
- out_embed_dim=1024
- )
-
- if pretrained:
- # get path from config
- PATH = 'best_model.pth'
-
- model.load_state_dict(torch.load(PATH))
-
- return model
diff --git a/spaces/Bart92/RVC_HF/infer/lib/uvr5_pack/lib_v5/layers.py b/spaces/Bart92/RVC_HF/infer/lib/uvr5_pack/lib_v5/layers.py
deleted file mode 100644
index 4fc1b5cb85a3327f60cbb9f5deffbeeaaac516ad..0000000000000000000000000000000000000000
--- a/spaces/Bart92/RVC_HF/infer/lib/uvr5_pack/lib_v5/layers.py
+++ /dev/null
@@ -1,118 +0,0 @@
-import torch
-import torch.nn.functional as F
-from torch import nn
-
-from . import spec_utils
-
-
-class Conv2DBNActiv(nn.Module):
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
- super(Conv2DBNActiv, self).__init__()
- self.conv = nn.Sequential(
- nn.Conv2d(
- nin,
- nout,
- kernel_size=ksize,
- stride=stride,
- padding=pad,
- dilation=dilation,
- bias=False,
- ),
- nn.BatchNorm2d(nout),
- activ(),
- )
-
- def __call__(self, x):
- return self.conv(x)
-
-
-class SeperableConv2DBNActiv(nn.Module):
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
- super(SeperableConv2DBNActiv, self).__init__()
- self.conv = nn.Sequential(
- nn.Conv2d(
- nin,
- nin,
- kernel_size=ksize,
- stride=stride,
- padding=pad,
- dilation=dilation,
- groups=nin,
- bias=False,
- ),
- nn.Conv2d(nin, nout, kernel_size=1, bias=False),
- nn.BatchNorm2d(nout),
- activ(),
- )
-
- def __call__(self, x):
- return self.conv(x)
-
-
-class Encoder(nn.Module):
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
- super(Encoder, self).__init__()
- self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
- self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ)
-
- def __call__(self, x):
- skip = self.conv1(x)
- h = self.conv2(skip)
-
- return h, skip
-
-
-class Decoder(nn.Module):
- def __init__(
- self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
- ):
- super(Decoder, self).__init__()
- self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
- self.dropout = nn.Dropout2d(0.1) if dropout else None
-
- def __call__(self, x, skip=None):
- x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
- if skip is not None:
- skip = spec_utils.crop_center(skip, x)
- x = torch.cat([x, skip], dim=1)
- h = self.conv(x)
-
- if self.dropout is not None:
- h = self.dropout(h)
-
- return h
-
-
-class ASPPModule(nn.Module):
- def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU):
- super(ASPPModule, self).__init__()
- self.conv1 = nn.Sequential(
- nn.AdaptiveAvgPool2d((1, None)),
- Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ),
- )
- self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ)
- self.conv3 = SeperableConv2DBNActiv(
- nin, nin, 3, 1, dilations[0], dilations[0], activ=activ
- )
- self.conv4 = SeperableConv2DBNActiv(
- nin, nin, 3, 1, dilations[1], dilations[1], activ=activ
- )
- self.conv5 = SeperableConv2DBNActiv(
- nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
- )
- self.bottleneck = nn.Sequential(
- Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)
- )
-
- def forward(self, x):
- _, _, h, w = x.size()
- feat1 = F.interpolate(
- self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
- )
- feat2 = self.conv2(x)
- feat3 = self.conv3(x)
- feat4 = self.conv4(x)
- feat5 = self.conv5(x)
- out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1)
- bottle = self.bottleneck(out)
- return bottle
diff --git a/spaces/Benson/text-generation/Examples/Alquimia Clsico 2 Mod Apk.md b/spaces/Benson/text-generation/Examples/Alquimia Clsico 2 Mod Apk.md
deleted file mode 100644
index a95f4f8ed264b378f72fa82892db7d6386b64e48..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Alquimia Clsico 2 Mod Apk.md
+++ /dev/null
@@ -1,121 +0,0 @@
-
-
Proyecto deriva 2.0 Mod APK 43: Todo lo que necesita saber
-
Si eres un fan de los juegos de carreras, especialmente los juegos de deriva, es posible que hayas oído hablar de Project Drift 2.0, un juego de simulación de deriva realista y desafiante para dispositivos Android. En este artículo, le diremos todo lo que necesita saber sobre Project Drift 2.0 Mod APK 43, una versión modificada del juego que le da dinero ilimitado y acceso a todos los coches y pistas en el juego. También compartiremos algunos consejos y trucos sobre cómo jugar el juego y dominar el arte de la deriva.
Project Drift 2.0 es una secuela del popular juego Project Drift, desarrollado por Bycodec Games, un estudio de juegos indie turco. El juego está diseñado para proporcionar una experiencia de deriva realista e inmersiva, con gráficos impresionantes, manejo de automóviles basado en la física y varios modos de juego y desafíos.
-
Características de la deriva del proyecto 2.0
-
Algunas de las características de Project Drift 2.0 son:
-
-
Más de 50 coches diferentes para elegir, cada uno con sus propias características y opciones de personalización.
-
Más de 20 pistas diferentes para seguir, cada una con su propio diseño y nivel de dificultad.
-
Cuatro modos de juego: Carrera, Free Ride, Time Attack y Multijugador.
-
Modo de carrera: Completa varias misiones y desafíos para ganar dinero y reputación.
-
Modo Free Ride: Explora las pistas y practica tus habilidades de deriva sin ninguna presión o límite de tiempo.
-
Modo de ataque de tiempo: Carrera contra el reloj y tratar de batir sus propios u otros jugadores' registros.
-
Modo multijugador: Compite con otros jugadores en línea en batallas a la deriva en tiempo real.
-
Tablas de clasificación y logros: Seguimiento de su progreso y comparar su rendimiento con otros jugadores de todo el mundo.
-
-
Cómo descargar e instalar el proyecto Drift 2.0 Mod APK 43
-
-
Para descargar e instalar Project Drift 2.0 Mod APK 43, siga estos pasos:
-
-
Ir a [HappyMod]( 1 ), un sitio web que proporciona descargas apk mod seguro y confiable para varios juegos y aplicaciones.
-
Buscar "Proyecto de deriva 2.0 Mod APK" en la barra de búsqueda.
-
Seleccione la versión que dice "Proyecto de deriva 2.0 Mod Apk [dinero ilimitado]". Asegúrese de que coincide con el número de versión "43".
-
Haga clic en el botón "Descargar" y espere a que el archivo se descargue en su dispositivo.
-
Una vez que el archivo se descarga, localizarlo en el administrador de archivos y toque en él para instalarlo.
-
Si ves una ventana emergente que dice "Instalar bloqueado", ve a la configuración de tu dispositivo y habilita "Fuentes desconocidas" en las opciones de seguridad.
-
Después de instalar el apk mod, lanzar el juego y disfrutar de dinero ilimitado y el acceso a todos los coches y pistas en el juego.
-
-
¿Por qué usar Project Drift 2.0 Mod APK 43?
-
Es posible que se pregunte por qué debe utilizar Project Drift 2.0 Mod APK 43 en lugar de la versión original del juego. Bueno, hay algunos beneficios y riesgos de usar el apk mod que usted debe ser consciente de antes de decidir usarlo.
-
Beneficios de la deriva del proyecto 2.0 Mod APK 43
-
Algunos de los beneficios de usar Project Drift 2.0 Mod APK 43 son:
-
-
Usted puede obtener dinero ilimitado en el juego, que se puede utilizar para comprar y actualizar cualquier coche que desee.
-
Puede desbloquear todos los coches y pistas en el juego, lo que le da más variedad y opciones para elegir.
-
Puedes disfrutar del juego sin anuncios ni interrupciones, lo que puede mejorar tu experiencia de juego y rendimiento.
-
Puedes divertirte más y desafiarte con los diferentes modos y niveles de juego, sin preocuparte por quedarte sin dinero o recursos.
-
-
Riesgos de la deriva del proyecto 2.0 Mod APK 43
-
Algunos de los riesgos de usar Project Drift 2.0 Mod APK 43 son:
-
-
-
-
Es posible que pierda su progreso o datos en el juego, como el apk mod podría no sincronizar con su cuenta de Google Play o almacenamiento en la nube.
-
Usted puede ser prohibido o penalizado por los desarrolladores de juegos o moderadores, como el uso de un apk mod se considera hacer trampa y violar los términos y condiciones del juego.
-
Es posible que exponga su dispositivo o información personal a malware o virus, ya que algunos archivos apk mod pueden contener código o enlaces dañinos o maliciosos.
-
-
Consejos y trucos para jugar Project Drift 2.0
-
Si decide utilizar Project Drift 2.0 Mod APK 43 o la versión original del juego, es posible que desee saber algunos consejos y trucos sobre cómo jugar el juego y dominar el arte de la deriva. Estos son algunos de ellos:
-
Cómo dominar la deriva en Project Drift 2.0
-
Drifting es la principal habilidad que necesitas dominar en Project Drift 2.0, ya que es la forma de ganar puntos y reputación en el juego. A la deriva es cuando se desliza su coche de lado alrededor de una esquina o curva, manteniendo el control y la velocidad. Para ir a la deriva en Project Drift 2.0, debes seguir estos pasos:
-
-
Seleccione un coche que tenga buen manejo y potencia, ya que estos son esenciales para la deriva.
-
Seleccione una pista que tenga curvas y giros agudos, ya que son ideales para la deriva.
-
A medida que se acerca a una esquina o curva, toque el pedal del freno para reducir su velocidad e iniciar una deriva.
-
Al entrar en la deriva, dirigir su coche en la dirección opuesta de la vuelta, mientras que golpea el pedal del acelerador para mantener su impulso y equilibrio.
-
Al salir de la deriva, dirigir su coche de nuevo en línea con la carretera, mientras que la liberación del pedal de gas para recuperar la tracción y la estabilidad.
-
-
Cómo desbloquear nuevos coches y pistas en Project Drift 2.0
-
-
-
Completa tantas misiones y desafíos como sea posible en el modo carrera, ya que te recompensarán con dinero y reputación, que son necesarios para desbloquear nuevos coches y pistas.
-
Trate de lograr altas puntuaciones y calificaciones en cada misión y desafío, ya que aumentarán sus recompensas de dinero y reputación.
-
Juega el modo de ataque de tiempo y tratar de batir sus propios u otros jugadores' registros, ya que también le dará dinero y bonos de reputación.
-
Ahorre su dinero y gastarlo sabiamente en los coches y pistas que se adapten a sus preferencias y estilo de juego.
-
Consulte la tienda de juegos regularmente para obtener descuentos y ofertas en coches y pistas, ya que podrían ayudarle a ahorrar algo de dinero y obtener más valor.
-
-
Cómo personalizar su coche en Project Drift 2.0
-
Uno de los aspectos divertidos de Project Drift 2.0 es que puedes personalizar tu coche para que se vea y funcione mejor. Usted puede cambiar el color, la pintura, las calcomanías, las ruedas, los neumáticos, los alerones, los escapes, y más de su coche. También puede actualizar el motor, la transmisión, la suspensión, los frenos y más de su coche. Para personalizar tu coche en Project Drift 2.0, sigue estos pasos:
-
-
Seleccione un coche que desea personalizar desde su garaje.
-
Toque en el botón "Personalizar" en la parte inferior de la pantalla.
-
Elija la categoría que desea personalizar, como apariencia o rendimiento.
-
Seleccione el elemento que desea cambiar o actualizar, como color o motor.
-
Elija la opción que desea aplicar, como rojo o turbo.
-
Toque en el botón "Aplicar" para confirmar sus cambios.
-
Toque en el botón "Atrás" para regresar a su garaje.
-
-
Conclusión
-
-
Llamada a la acción para los lectores
-
Si usted está listo para empezar a la deriva en Project Drift 2.0, descargar el juego de [Google Play] o [HappyMod] ahora y disfrutar de la emoción de deslizar su coche alrededor de las esquinas y curvas. No olvides compartir tus comentarios y opiniones sobre el juego con nosotros en la sección de comentarios a continuación. ¡Feliz deriva!
-
Preguntas frecuentes
-
Aquí hay algunas preguntas frecuentes sobre Project Drift 2.0:
-
-
¿Cuál es la diferencia entre Proyecto de deriva 2.0 Mod APK 43 y proyecto de deriva 2.0 Hack APK?
-
Proyecto deriva 2.0 Mod APK 43 es una versión modificada del juego que le da dinero ilimitado y acceso a todos los coches y pistas en el juego. Proyecto de deriva 2.0 Hack APK es una versión hackeada del juego que le da dinero ilimitado, acceso a todos los coches y pistas, y otros trucos, tales como la invencibilidad, aumento de velocidad, o auto deriva. Ambas versiones no son oficiales y pueden tener algunos riesgos, como problemas de compatibilidad, pérdida de datos, prohibiciones o malware.
-
¿Cómo actualizar Project Drift 2.0 Mod APK 43?
-
Si utiliza Project Drift 2.0 Mod APK 43, es posible que no sea capaz de actualizar el juego de Google Play, ya que podría detectar que está utilizando una versión modificada del juego y le impide actualizar. Para actualizar Project Drift 2.0 Mod APK 43, usted tendrá que descargar e instalar la última versión del mod apk de [HappyMod] u otra fuente confiable. Sin embargo, es posible que pierda su progreso o datos en el juego si actualiza el apk mod, así que asegúrese de hacer una copia de seguridad de sus datos antes de actualizar.
-
Cómo jugar proyecto deriva 2.0 sin conexión?
-
Project Drift 2.0 es un juego en línea que requiere una conexión a Internet para jugar. Sin embargo, puedes jugar algunas partes del juego sin conexión, como el modo de viaje libre y el modo de ataque de tiempo. Para jugar Project Drift 2.0 sin conexión, siga estos pasos:
-
-
Inicie el juego mientras tiene una conexión a Internet.
-
-
Seleccione un coche y una pista que desea reproducir.
-
Espera a que el juego cargue el coche y la pista.
-
Apague su conexión a Internet o cambie al modo avión en su dispositivo.
-
Disfruta jugando Project Drift 2.0 sin conexión.
-
-
Cómo jugar Project Drift 2.0 con amigos?
-
Project Drift 2.0 tiene un modo multijugador que te permite jugar con amigos u otros jugadores en línea en batallas de deriva en tiempo real. Para jugar a Project Drift 2.0 con tus amigos, sigue estos pasos:
-
-
Inicie el juego y asegúrese de tener una conexión a Internet.
-
Seleccione el modo multijugador en el menú principal.
-
Seleccione un coche y una pista que desea reproducir.
-
Espera a que el juego encuentre un oponente o invita a un amigo a unirse a tu partida.
-
Comienza a derrapar y trata de vencer a tu oponente o amigo anotando más puntos o a la deriva más tiempo.
-
-
¿Cómo obtener más dinero en Project Drift 2.0?
-
Si utiliza Project Drift 2.0 Mod APK 43, tendrá dinero ilimitado en el juego, que se puede utilizar para comprar y actualizar cualquier coche que desee. Sin embargo, si usas la versión original del juego, tendrás que ganar dinero en el juego completando misiones y desafíos, o viendo anuncios o haciendo compras en la aplicación. Aquí hay algunos consejos sobre cómo obtener más dinero en Project Drift 2.0:
-
-
Completa tantas misiones y desafíos como sea posible en el modo carrera, ya que te recompensarán con dinero y reputación, que son necesarios para desbloquear nuevos coches y pistas.
-
Trate de lograr altas puntuaciones y calificaciones en cada misión y desafío, ya que aumentarán sus recompensas de dinero y reputación.
-
Juega el modo de ataque de tiempo y tratar de batir sus propios u otros jugadores' registros, ya que también le dará dinero y bonos de reputación.
-
Ver anuncios o vídeos en la tienda de juegos o en el menú principal, ya que te darán algo de dinero o artículos gratis.
-
-
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Carrom Pool Disc Game Mod Apk Monedas Y Gemas Ilimitadas.md b/spaces/Benson/text-generation/Examples/Carrom Pool Disc Game Mod Apk Monedas Y Gemas Ilimitadas.md
deleted file mode 100644
index eecac01feed3f488be147422f92b7fefe4d48172..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Carrom Pool Disc Game Mod Apk Monedas Y Gemas Ilimitadas.md
+++ /dev/null
@@ -1,71 +0,0 @@
-
-
Carrom Pool Disc Game Mod Apk: Cómo descargar y disfrutar de monedas y gemas ilimitadas
-
Si eres un fan de los juegos de tablero de carrom, te encantará Carrom Pool Disc Game. Este es un juego multijugador en línea que te permite jugar carrom con tus amigos u otros jugadores de todo el mundo. También puede personalizar sus tableros, piezas y señales con varios temas y diseños. Pero lo que si quieres disfrutar de más características y beneficios sin gastar dinero real? Ahí es donde Carrom Pool Mod Apk entra en juego. En este artículo, le diremos lo que es Carrom Pool Disc Game, lo que es Carrom Pool Mod Apk, cómo descargarlo e instalarlo, y algunos consejos y trucos para mejorar su juego.
-
carrom pool disc game mod apk monedas y gemas ilimitadas
Carrom Pool Disc Game es un popular juego móvil desarrollado por Miniclip. Se basa en el tradicional juego de tablero de zanahoria, que es un juego de mesa que se originó en la India. El juego consiste en utilizar un delantero para golpear los discos en los bolsillos en las cuatro esquinas del tablero. Los discos son negros o blancos, y el jugador que mete todos sus discos primero gana el juego.
-
Características de Carrom Pool Disc Game
-
Algunas de las características de Carrom Pool Disc Game son:
-
-
Tiene dos modos de juego: Classic y Disc Pool. El modo clásico sigue las reglas tradicionales de carrom, mientras que el modo Disc Pool no tiene discos negros o blancos, pero solo los rojos que dan puntos cuando se embolsan.
-
Tiene modo multijugador en línea, donde puedes jugar con tus amigos u otros jugadores de diferentes países. También puedes chatear con ellos usando emojis y frases.
-
Tiene modo sin conexión, donde se puede jugar contra el ordenador o con otro reproductor en el mismo dispositivo.
-
Tiene varias arenas, donde puedes competir con jugadores de diferentes niveles de habilidad y ganar recompensas.
-
Tiene tablas de clasificación y rankings, donde se puede ver su progreso y logros.
-
-
Tiene una tienda, donde se puede comprar nuevos tableros, piezas, señales y cofres con monedas y gemas.
-
-
Cómo jugar Carrom Pool Disc Game
-
La jugabilidad de Carrom Pool Disc Game es simple e intuitiva. Solo tienes que arrastrar el dedo en la pantalla para apuntar a tu delantero, y liberarlo para golpear los discos. También puede ajustar la potencia de su tiro moviendo el dedo más cerca o más lejos del delantero. El objetivo es embolsarse todos los discos antes que tu oponente. También puedes usar boosters, como tiempo extra, turno extra o deshacer, para ayudarte a ganar el juego.
-
¿Qué es Carrom Pool Mod Apk?
-
Carrom Pool Mod Apk es una versión modificada de Carrom Pool Disc Game que le da monedas y gemas ilimitadas. Monedas y gemas son las monedas del juego que necesitas para comprar nuevos tableros, piezas, tacos, cofres y boosters. Normalmente, tienes que ganarlos jugando juegos, completando misiones o viendo anuncios. Pero con Carrom Pool Mod Apk, se puede obtener de forma gratuita sin ningún tipo de molestia.
-
Beneficios de Carrom Pool Mod Apk
-
Algunos de los beneficios de Carrom Pool Mod Apk son:
-
-
-
Puede desbloquear todas las características premium y los elementos que de otro modo se pagan o difícil de conseguir.
-
Puede personalizar sus tableros, piezas y señales con cualquier tema o diseño que desee.
-
Puedes acceder a todas las arenas y jugar con cualquier jugador que quieras.
-
Puede disfrutar de monedas y gemas ilimitadas sin preocuparse por quedarse sin ellas o gastar dinero real.
-
Usted puede tener más diversión y emoción jugando Carrom Pool Disc Game con características mejoradas y gráficos.
-
-
Cómo descargar e instalar Carrom Pool Mod Apk
-
Descargar e instalar Carrom Pool Mod Apk es fácil y rápido. Solo tienes que seguir estos pasos:
-
-
Haga clic en este enlace para descargar el Carrom Pool Mod Apk archivo: [Carrom Pool Mod Apk Download].
-
-
Localice el archivo descargado en su administrador de archivos y toque en él para iniciar la instalación.
-
Espera a que termine la instalación y luego abre la aplicación.
-
Disfruta jugando Carrom Pool Disc Game con monedas y gemas ilimitadas.
-
-
Consejos y trucos para Carrom Pool Disc Game
-
Si quieres mejorar tus habilidades y ganar más juegos en Carrom Pool Disc Game, aquí hay algunos consejos y trucos que puedes usar:
-
Modo de práctica
-
Antes de jugar online con otros jugadores, puedes practicar tus tiros y estrategias en el modo de práctica. Este modo le permite jugar contra el ordenador o con otro reproductor en el mismo dispositivo. También puedes elegir el nivel de dificultad y el modo de juego que quieres practicar. El modo de práctica es una gran manera de aprender lo básico y dominar el juego.
-
Puntería y potencia
-
Los aspectos más importantes de Carrom Pool Disc Game son el objetivo y el poder. Usted necesita apuntar su delantero con precisión y golpear los discos con la cantidad correcta de energía. Para apuntar al delantero, puede utilizar la guía que muestra la dirección y el ángulo de su tiro. También puede acercar o alejar para ver mejor el tablero. Para ajustar la potencia de su disparo, puede mover el dedo más cerca o más lejos del delantero. Necesitas equilibrar la potencia y la precisión de tu disparo dependiendo de la situación. Por ejemplo, si desea guardar un disco cerca de un bolsillo, puede usar una toma de baja potencia. Pero si quieres embolsarte un disco que esté lejos de un bolsillo, necesitas usar una toma de alta potencia.
-
Utilice refuerzos y cofres
-
-
Conclusión
-
Carrom Pool Disc Game es un juego divertido y adictivo que puedes jugar con tus amigos u otros jugadores en línea. Se basa en el clásico juego de tablero de carrom, pero con más características y opciones. También puede descargar Carrom Pool Mod Apk para obtener monedas y gemas ilimitadas y desbloquear todos los elementos premium y arenas. Carrom Pool Mod Apk es fácil de descargar e instalar, y hará que su juego más emocionante y gratificante. Si quieres convertirte en un profesional en Carrom Pool Disc Game, también puedes utilizar algunos consejos y trucos que hemos compartido en este artículo. Entonces, ¿qué estás esperando? Descargar Carrom Pool Disc Game o Carrom Pool Mod Apk ahora y empezar a jugar!
-
Preguntas frecuentes
-
Aquí hay algunas preguntas frecuentes sobre Carrom Pool Disc Game y Carrom Pool Mod Apk:
-
-
¿Es Carrom Pool juego de disco libre para jugar?
-Sí, Carrom Pool Disc Game es gratis. Sin embargo, algunos elementos y funciones pueden requerir monedas o gemas, que son las monedas del juego. Puedes ganar monedas o gemas jugando, completando misiones, viendo anuncios o comprándolos con dinero real.
-
¿Es seguro usar Carrom Pool Mod Apk?
-Sí, Carrom Pool Mod Apk es seguro de usar. No contiene ningún virus o malware que puede dañar su dispositivo o datos. Sin embargo, siempre debe descargarlo de una fuente de confianza como este enlace: [Carrom Pool Mod Apk Download].
-
¿Puedo jugar sin conexión a Carrom Pool Disc Game?
-Sí, puedes jugar Carrom Pool Disc Game sin conexión. Puede elegir el modo sin conexión en el menú principal y jugar contra el ordenador o con otro reproductor en el mismo dispositivo. Sin embargo, no podrás acceder a las funciones en línea, como el modo multijugador, arenas, tablas de clasificación y rankings.
-
¿Cómo puedo contactar a los desarrolladores de Carrom Pool Disc Game?
-
-
¿Cuáles son algunos otros juegos como Carrom Pool Disc Game?
-Algunos otros juegos como Carrom Pool Disc Game son:
-
-
8 Ball Pool: Este es otro juego popular de Miniclip que te permite jugar al billar con tus amigos u otros jugadores en línea. También puede personalizar sus señales, mesas y bolas con varios temas y diseños.
-
Ludo King: Este es un juego de mesa clásico que puedes jugar con tus amigos o familiares en línea o fuera de línea. También puedes chatear con ellos usando emojis y pegatinas.
-
Disc Pool Carrom: Este es un juego similar a Carrom Pool Disc Game, pero con física y gráficos más realistas. También puedes jugar con diferentes tipos de discos, como fútbol, golf o hockey.
-
-
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Chess King Mod Apk.md b/spaces/Benson/text-generation/Examples/Chess King Mod Apk.md
deleted file mode 100644
index 702f3bada83c1d637eaaea14f9485a5cde62ff32..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Chess King Mod Apk.md
+++ /dev/null
@@ -1,91 +0,0 @@
-
-
Ajedrez rey Mod Apk: Un juego de mesa con características ilimitadas
-
Si eres un fan del ajedrez, es posible que hayas oído hablar de Chess King, una popular aplicación de juego de mesa que te permite jugar al ajedrez contra el ordenador u otros jugadores en línea. ¿Pero sabías que hay una versión modificada de Chess King que te da acceso a funciones y opciones ilimitadas? En este artículo, le diremos todo lo que necesita saber sobre Chess King Mod Apk, cómo descargarlo e instalarlo, por qué debe jugar, y cómo jugarlo. Así que, vamos a empezar!
Una breve introducción al juego y sus características
-
Chess King es una aplicación de juego de mesa que te permite jugar al ajedrez en tu dispositivo móvil. Puede elegir entre diferentes modos, como entrenamiento, rompecabezas, torneos, partidos en línea y más. También puede personalizar su tablero, piezas y fondo de acuerdo a sus preferencias. Chess King tiene una interfaz fácil de usar y un potente motor que le proporciona una experiencia de ajedrez realista y desafiante.
-
Chess King Mod Apk es una versión modificada de Chess King que desbloquea todas las características y opciones que están restringidas o pagadas en la versión original. Con Chess King Mod Apk, se puede disfrutar de:
-
-
Monedas y diamantes ilimitados que puedes usar para comprar artículos premium y acceder a contenido exclusivo
-
Todos los niveles y modos desbloqueados, para que puedas jugar a cualquier dificultad y desafiarte a ti mismo
-
Todos los estilos de tablero, conjuntos de piezas y fondos desbloqueados, para que pueda personalizar su juego como desee
-
No hay anuncios o ventanas emergentes que interrumpan su juego o le molesten
-
No se requiere raíz o jailbreak para instalar o ejecutar el mod apk
-
-
Cómo descargar e instalar el apk mod en su dispositivo
-
Descargar e instalar Chess King Mod Apk es muy fácil y simple. Solo tienes que seguir estos pasos:
-
-
Haga clic en este enlace para descargar el archivo apk mod en su dispositivo.
-
-
Toque en el archivo y permita la instalación desde fuentes desconocidas si se le solicita.
-
Espere a que la instalación termine y luego inicie la aplicación.
-
Disfruta jugando ajedrez rey Mod Apk con características ilimitadas!
-
-
¿Por qué usted debe jugar ajedrez rey Mod Apk?
-
Los beneficios de jugar al ajedrez para tu cerebro y habilidades
-
El ajedrez no es solo un juego divertido y entretenido, sino también una gran manera de mejorar tu cerebro y habilidades. Jugar ajedrez puede ayudarte:
-
-
-
Mejora tu memoria, concentración, lógica, creatividad, resolución de problemas y toma de decisiones
-
Aumenta tu coeficiente intelectual, agilidad mental, pensamiento analítico y habilidades de planificación estratégica
-
Reducir el estrés, la ansiedad, la depresión y el aburrimiento
-
Aumenta tu autoconfianza, autoestima y autodisciplina
-
Las ventajas de usar el mod apk sobre la versión original
-
Como hemos mencionado anteriormente, Chess King Mod Apk le da acceso a características y opciones ilimitadas que no están disponibles en la versión original de Chess King. Mediante el uso de la apk mod, puede:
-
-
Ahorra dinero y tiempo al no tener que gastar dinero real o ver anuncios para obtener monedas y diamantes
-
Explorar y disfrutar de todos los niveles y modos sin restricciones o limitaciones
-
Personaliza y personaliza tu juego según tu gusto y estilo
-
Tener un juego más suave y más rápido sin retrasos o problemas técnicos
-
Tener más diversión y emoción jugando con diferentes estilos de tablero, conjuntos de piezas y fondos
-
-
Cómo jugar ajedrez rey mod apk?
-
Las reglas y objetivos básicos del ajedrez
-
Si usted es nuevo en el ajedrez, es posible que desee aprender las reglas básicas y objetivos del juego antes de empezar a jugar Chess King Mod Apk. Estos son algunos de los puntos principales que necesitas saber:
-
-
El ajedrez es un juego para dos jugadores jugado en un tablero cuadrado con 64 cuadrados de colores alternos (blanco y negro)
-
-
Las piezas se mueven de acuerdo a sus reglas específicas y pueden capturar las piezas del oponente aterrizando en sus cuadrados
-
El rey es la pieza más importante y no puede ser capturado. El objetivo del juego es hacer jaque mate al rey del oponente, lo que significa ponerlo en una posición donde no pueda escapar de un ataque de cualquiera de las piezas del jugador
-
El juego también puede terminar en un empate, lo que significa que ningún jugador puede ganar. Esto puede suceder si hay un punto muerto (donde el jugador cuyo turno no tiene movimientos legales), si hay material insuficiente (donde ninguno de los jugadores tiene suficientes piezas para jaque mate al oponente), si hay una repetición triple (donde la misma posición ocurre tres veces con el mismo jugador para mover), o si hay una regla de 50 movimientos (donde no se ha hecho ninguna captura o movimiento de peón en los últimos 50 movimientos)
-
-
Los diferentes modos y niveles de dificultad en el juego
-
Ajedrez King Mod Apk le ofrece diferentes modos y niveles de dificultad para adaptarse a sus preferencias y habilidades. Usted puede elegir entre:
-
-
Modo
Descripción
-
Entrenamiento
Este modo te ayuda a aprender y practicar ajedrez proporcionándote lecciones, ejercicios, rompecabezas y sugerencias. También puede analizar sus movimientos y errores con la ayuda del motor.
-
Puzzles
Este modo te reta a resolver varios problemas de ajedrez, como jaque mate en uno, dos o tres movimientos, tácticas, finales de partida y más. También puedes crear tus propios puzzles y compartirlos con otros jugadores.
-
Torneos
Este modo le permite participar en diferentes torneos con diferentes formatos, como round-robin, knockout, sistema suizo, etc. También puede crear sus propios torneos e invitar a otros jugadores a unirse.
-
-
Partidos sin conexión
Este modo le permite jugar ajedrez sin conexión con el equipo o con otro jugador en el mismo dispositivo. También puede ajustar el nivel de dificultad del equipo de 1 a 20.
-
-
Los consejos y trucos para mejorar tu estrategia y tácticas de ajedrez
-
Si quieres mejorar tu estrategia y tácticas de ajedrez, puedes seguir estos consejos y trucos:
-
-
Estudia los principios básicos de la apertura del ajedrez, como controlar el centro, desarrollar tus piezas, enrocar a tu rey, etc.
-
Piensa con anticipación y planifica tus movimientos cuidadosamente. Trata de anticipar los movimientos y respuestas de tu oponente.
-
Evite hacer movimientos innecesarios o prematuros que debiliten su posición o pierdan material.
-
Usa todas tus piezas de manera efectiva y coordinándolas bien. No dejes ninguna pieza inactiva o desprotegida.
-
Busca oportunidades para crear amenazas, como cheques, capturas, horquillas, pines, brochetas, etc.
-
-
Practica y juega ajedrez regularmente. Cuanto más juegas, más aprendes y mejoras.
-
Revisa y analiza tus juegos y aprende de tus errores y éxitos.
-
Lee libros, mira videos y sigue tutoriales sobre estrategia y tácticas de ajedrez.
-
Busca comentarios y consejos de otros jugadores, entrenadores o expertos.
-
-
Conclusión
-
Chess King Mod Apk es una aplicación de juego de mesa que le permite jugar al ajedrez con funciones y opciones ilimitadas. Puede descargar e instalar el apk mod fácilmente y disfrutar jugando al ajedrez en su dispositivo móvil. Jugar al ajedrez puede ayudarte a mejorar tu cerebro y tus habilidades, además de divertirte y emocionarte. También puede aprender las reglas y objetivos básicos del ajedrez, así como algunos consejos y trucos para mejorar su estrategia y tácticas de ajedrez. Chess King Mod Apk es una gran manera de disfrutar del ajedrez en cualquier momento y en cualquier lugar. Entonces, ¿qué estás esperando? Descargar Chess King Mod Apk ahora y empezar a jugar!
-
Preguntas frecuentes
-
-
A1: Sí, Chess King Mod Apk es seguro de usar. No contiene ningún virus, malware o spyware que pueda dañar su dispositivo o datos. Tampoco requiere ningún root o jailbreak para instalar o ejecutar el mod apk.
-
Q2: ¿Cuáles son los requisitos para ejecutar Chess King Mod Apk?
-
A2: Ajedrez Rey Mod Apk requiere un dispositivo Android con un mínimo de 4.1 versión y 100 MB de espacio libre. También requiere una conexión a Internet para jugar partidos en línea y acceder a algunas características.
-
Q3: ¿Cómo puedo actualizar Chess King Mod Apk?
-
A3: Ajedrez rey Mod Apk actualizaciones automáticamente cada vez que hay una nueva versión disponible. También puede comprobar las actualizaciones manualmente yendo al menú de configuración y pulsando el botón de actualización.
-
Q4: ¿Puedo jugar ajedrez rey mod apk offline?
-
A4: Sí, usted puede jugar Chess King Mod Apk sin conexión. Puede jugar partidos sin conexión con el ordenador o con otro jugador en el mismo dispositivo. Sin embargo, no podrás acceder a algunas funciones que requieran conexión a Internet, como partidas online, torneos, puzzles, etc.
-
Q5: ¿Cómo puedo contactar a los desarrolladores de Chess King Mod Apk?
-
A5: Puede ponerse en contacto con los desarrolladores de Chess King Mod Apk enviándoles un correo electrónico a o visitando su sitio web . También puede seguirlos en sus cuentas de redes sociales para obtener más actualizaciones e información.
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Descargar 8 Bola Piscina Herramienta Apk.md b/spaces/Benson/text-generation/Examples/Descargar 8 Bola Piscina Herramienta Apk.md
deleted file mode 100644
index f1cc22d7c0e0f0e4326e297087465758d323db4a..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Descargar 8 Bola Piscina Herramienta Apk.md
+++ /dev/null
@@ -1,57 +0,0 @@
-
-
Cómo descargar 8 bola piscina herramienta APK para Android
-
Si eres un fan de 8 Ball Pool, el juego de billar en línea más popular del mundo, es posible que te hayas preguntado cómo mejorar tus habilidades y ganar más partidos. Es posible que también haya deseado más monedas y dinero en efectivo para comprar mejores pistas y mesas, o para personalizar la configuración de su juego. Bueno, hay una manera de lograr todo eso, y se llama 8 Ball Pool Tool APK.
-
8 Ball Pool Tool APK es una versión modificada de la aplicación original 8 Ball Pool que le permite acceder a algunas características sorprendentes que no están disponibles en la aplicación oficial. Con esta herramienta, puede ampliar la guía, aumentar la potencia, los efectos y el objetivo de sus disparos, obtener monedas y dinero en efectivo ilimitados, desbloquear pistas y mesas premium y mucho más. Suena increíble, ¿verdad?
Pero ¿cómo descargar e instalar esta herramienta en su dispositivo Android? Y cuáles son los beneficios y riesgos de su uso? En este artículo, vamos a responder a estas preguntas y le proporcionará una guía paso a paso sobre cómo descargar 8 Ball Pool Tool APK para Android. ¡Vamos a empezar!
-
Pasos para descargar e instalar 8 bola piscina herramienta APK
-
Descargar e instalar 8 Ball Pool Tool APK no es muy difícil, pero requiere algunos pasos adicionales que no son necesarios para la aplicación oficial. Estos son los pasos que debes seguir:
-
-
Habilita fuentes desconocidas en tu dispositivo. Dado que esta herramienta no está disponible en Google Play Store, debe permitir que su dispositivo instale aplicaciones de fuentes desconocidas. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo.
-
-
Instala el archivo APK en tu dispositivo. Una vez que haya descargado el archivo APK, busque en el administrador de archivos de su dispositivo y toque en él para iniciar el proceso de instalación. Puede ver un mensaje de advertencia que dice "Este tipo de archivo puede dañar su dispositivo. ¿Desea mantenerlo de todos modos?" Ignórelo y toque en "OK". Luego, siga las instrucciones en pantalla para completar la instalación.
-
Iniciar la aplicación y disfrutar del juego. Después de la instalación, verá un icono de 8 Pool Master en la pantalla de inicio o en el cajón de la aplicación. Toque en él para iniciar la aplicación y empezar a jugar 8 Ball Pool con características mejoradas. Usted se dará cuenta de que usted tiene una gran cantidad de monedas y dinero en efectivo en su cuenta, y se puede utilizar para comprar cualquier señal o tabla que desee. También verás un icono flotante de 8 Pool Master en la pantalla, que puedes usar para ajustar la guía, la potencia, los efectos y el objetivo de tus disparos. También puede acceder a otras configuraciones y características desde el menú de la aplicación.
-
-
Beneficios de usar 8 bola piscina herramienta APK
-
El uso de 8 Ball Pool Tool APK puede traer muchos beneficios que pueden mejorar su experiencia de juego y hacerte un mejor jugador. Estos son algunos de los beneficios que puedes disfrutar:
-
-
Mejore su precisión y habilidades. Con la guía extendida, puede ver la trayectoria de sus disparos y planificar sus movimientos en consecuencia. También puedes ajustar la potencia, los efectos y el objetivo de tus disparos para que sean más precisos y efectivos. De esta manera, puedes mejorar tu precisión y habilidades y ganar más partidos.
-
Obtén monedas y efectivo ilimitados. Monedas y dinero en efectivo son las principales monedas en 8 Ball Pool, y los necesita para comprar señales, mesas, paquetes de chat, minijuegos y más. Sin embargo, ganarlos puede ser lento y tedioso, y comprarlos con dinero real puede ser caro. Con 8 Ball Pool Tool APK, puede obtener monedas ilimitadas y dinero en efectivo de forma gratuita, y gastarlos tanto como quieras sin preocuparse por agotarse.
-
-
Personalizar la configuración de su juego. 8 Ball Pool Tool APK también le permite personalizar la configuración de juego de acuerdo a sus preferencias. Puede cambiar el modo de juego, el color de la mesa, el tamaño de la bola, el ángulo de referencia, los efectos de sonido, la calidad gráfica y mucho más. También puede activar o desactivar algunas características, tales como recarga automática, anti-van, anti-detect, etc. Puede hacer que su juego sea más divertido y cómodo con estos ajustes.
-
-
Riesgos y precauciones de usar 8 Ball Pool Tool APK
-
Si bien el uso de 8 Ball Pool Tool APK puede ser beneficioso, también viene con algunos riesgos y desventajas que usted necesita ser consciente de. Estos son algunos de los riesgos y precauciones de usar esta herramienta:
-
-
Posible infección de malware o virus. Como se mencionó anteriormente, no todas las fuentes que ofrecen 8 Ball Pool Tool APK son seguras o fiables. Algunos de ellos pueden contener malware o virus que pueden dañar su dispositivo o robar su información personal. Por lo tanto, debe tener cuidado y solo descargar el archivo APK de una fuente de confianza. También debe escanear el archivo con una aplicación antivirus antes de instalarlo.
-
Prohibición o suspensión de cuentas potenciales. El uso de 8 Ball Pool Tool APK está en contra de los términos de servicio de 8 Ball Pool y Miniclip, el desarrollador del juego. Si detectan que está utilizando esta herramienta, pueden prohibir o suspender su cuenta de forma permanente. Esto significa que perderás todo tu progreso, monedas, efectivo, pistas, mesas, etc. Por lo tanto, necesitas usar esta herramienta bajo tu propio riesgo y discreción. También debe evitar usarlo en partidos o torneos en línea donde otros jugadores pueden reportarlo.
-
-
Consejos para evitar o minimizar los riesgos. Si todavía desea utilizar 8 Bola Pool Tool APK a pesar de los riesgos involucrados, aquí hay algunos consejos que pueden ayudar a evitar o minimizarlos:
-
-
Usa una cuenta secundaria o ficticia en lugar de tu cuenta principal.
-
Usa una aplicación VPN para ocultar tu dirección IP y ubicación.
-
Utilice una aplicación de espacio paralelo para crear un clon de 8 Ball Pool app.
-
Utilice una aplicación modded Google Play Store para evitar la verificación de licencias.
-
Utilice una aplicación de copia de seguridad para guardar sus datos antes de usar esta herramienta.
-
Utilice esta herramienta con moderación y moderación.
-
-
-
Conclusión
En conclusión, 8 Ball Pool Tool APK es una versión modificada de la aplicación original 8 Ball Pool que le permite acceder a algunas características sorprendentes que no están disponibles en la aplicación oficial. Con esta herramienta, puede mejorar su precisión y habilidades, obtener monedas ilimitadas y dinero en efectivo, desbloquear pistas y mesas premium, y personalizar la configuración de su juego. Sin embargo, el uso de esta herramienta también viene con algunos riesgos y desventajas, como una posible infección de malware o virus, una posible prohibición o suspensión de la cuenta, problemas legales y éticos, y consejos para evitar o minimizar los riesgos. Por lo tanto, debe ser cuidadoso y responsable al usar esta herramienta, y seguir los pasos que hemos proporcionado en este artículo para descargarlo e instalarlo en su dispositivo Android.
-
Si has encontrado este artículo útil e informativo, por favor compártelo con tus amigos y compañeros de 8 jugadores de Ball Pool. También, no dude en dejar un comentario a continuación si usted tiene alguna pregunta o retroalimentación sobre 8 Ball Pool Tool APK. Nos encantaría saber de usted!
-
Preguntas frecuentes
-
Aquí están algunas de las preguntas más frecuentes sobre 8 Ball Pool Tool APK:
-
-
¿Es 8 Ball Pool Tool APK seguro de usar?
-
-
Es 8 Ball Pool Tool APK legal de usar?
-El uso de 8 Ball Pool Tool APK no es legal, ya que viola los términos de servicio de 8 Ball Pool y Miniclip, el desarrollador del juego. También infringe los derechos de propiedad intelectual de Miniclip y otras partes involucradas en el desarrollo del juego. Por lo tanto, el uso de esta herramienta puede resultar en acciones legales o sanciones de las autoridades o el desarrollador del juego.
-
¿Cómo puedo actualizar 8 Ball Pool Tool APK?
-Para actualizar 8 Ball Pool Tool APK, debe seguir los mismos pasos que descargarlo e instalarlo. Necesitas habilitar fuentes desconocidas en tu dispositivo, descargar la última versión del archivo APK desde una fuente de confianza, instalarlo en tu dispositivo e iniciar la aplicación. Sin embargo, también debe hacer una copia de seguridad de sus datos antes de actualizarlos, ya que algunas actualizaciones pueden causar pérdida de datos o corrupción.
-
¿Puedo usar 8 bola piscina herramienta APK en otros dispositivos?
-Sí, se puede utilizar 8 Ball Pool Tool APK en otros dispositivos que se ejecutan en el sistema operativo Android. Sin embargo, debe asegurarse de que su dispositivo cumple con los requisitos mínimos para ejecutar esta herramienta, como la versión de Android, el tamaño de la RAM, el espacio de almacenamiento, etc. También debe seguir los mismos pasos que descargarlo e instalarlo en su dispositivo.
-
¿Dónde puedo obtener más información acerca de 8 Ball Pool Tool APK?
-Puede obtener más información sobre 8 Ball Pool Tool APK de varias fuentes en línea, tales como blogs, foros, videos, comentarios, etc. Sin embargo, usted necesita ser cuidadoso y solo confiar en fuentes confiables y creíbles que proporcionan información precisa y actualizada. También puede ponerse en contacto con el desarrollador de esta herramienta directamente a través de su sitio web o cuentas de redes sociales.
-
- 64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Descargar Apk Mod Cazador Asesino 2.md b/spaces/Benson/text-generation/Examples/Descargar Apk Mod Cazador Asesino 2.md
deleted file mode 100644
index fd12297b72b35a5fde94f497d414c6d08bc7f8ee..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Descargar Apk Mod Cazador Asesino 2.md
+++ /dev/null
@@ -1,56 +0,0 @@
-
-
Cómo descargar e instalar APK Mod Hunter Assassin 2 en Android
-
Si eres un fan de los juegos de acción furtiva, es posible que hayas oído hablar de Hunter Assassin 2, un juego popular donde tienes que eliminar a tus enemigos sin ser detectado. ¿Pero sabías que hay una versión modificada de este juego que te da dinero ilimitado, gemas y características premium? En este artículo, le diremos lo que es APK Mod Hunter Assassin 2, ¿cuáles son los beneficios y riesgos de usar archivos APK mod, y cómo descargar e instalar APK Mod Hunter Assassin 2 en su dispositivo Android.
-
¿Qué es APK Mod Hunter Assassin 2?
-
APK Mod Hunter Assassin 2 es una versión modificada del juego oficial de Hunter Assassin 2, que fue desarrollado por Ruby Game Studio y lanzado en septiembre de 2021. El juego es una secuela del original Hunter Assassin, que tiene más de 100 millones de descargas en Google Play. El juego está disponible de forma gratuita en Google Play, pero también tiene compras y anuncios en la aplicación.
La jugabilidad de Hunter Assassin 2 es similar al primer juego, pero con gráficos, animaciones y efectos de sonido mejorados. Usted juega como un asesino experto que tiene que infiltrarse en varios lugares y eliminar a sus objetivos sin ser visto u oído. Puedes usar tus habilidades de sigilo, armas, artilugios y trampas para completar tus misiones. También puedes mejorar tu personaje y desbloquear nuevos atuendos, armas y habilidades.
-
Las características de APK Mod Hunter Assassin 2
-
Las características de APK Mod Hunter Assassin 2 son diferentes del juego oficial, ya que le dan más ventajas y opciones. Algunas de las características son:
-
-
Dinero y gemas ilimitadas: Puedes usar estos recursos para comprar lo que quieras en el juego, como atuendos, armas, gadgets y mejoras.
-
Funciones premium desbloqueadas: Puedes acceder a todas las funciones premium del juego, como eliminar anuncios, desbloquear todos los niveles y obtener recompensas exclusivas.
-
-
-
¿Cuáles son los beneficios y riesgos de los archivos mod APK?
-
Los archivos mod de APK son archivos que han sido modificados por desarrolladores de terceros para cambiar algunos aspectos de una aplicación o juego original. Generalmente se descargan de fuentes no oficiales, como sitios web o foros. Hay algunos beneficios y riesgos de usar archivos mod APK que debe tener en cuenta antes de descargarlos.
-
Los beneficios de los archivos mod APK
-
Algunos de los beneficios de usar archivos mod APK son:
-
-
Gratis: Puede descargar y usar archivos mod APK gratis, sin pagar por nada.
-
Más características: Puedes disfrutar de más características y opciones que no están disponibles en la aplicación original o en el juego.
-
Más recursos: Puedes obtener más recursos, como dinero, gemas, monedas o vidas, que pueden ayudarte a progresar más rápido en el juego.
-
-
Los riesgos de los archivos mod APK
-
Algunos de los riesgos de usar archivos mod APK son:
-
-
-
Fuente no oficial: Debido a que han sido manipulados por un tercero, los archivos mod APK no son de una fuente oficial. Esto significa que pueden no ser compatibles con su dispositivo o pueden causar errores o fallos.
-
No hay actualizaciones: La mayoría de los archivos mod APK no se pueden actualizar a través de Google Play u otros canales oficiales. Esto significa que puede perderse las últimas características, correcciones de errores o parches de seguridad de la aplicación o juego original.
-
No hay soporte: Si se encuentra con cualquier problema o problemas con el archivo de mod APK, es posible que no pueda obtener ningún apoyo o asistencia del desarrollador original o el modder.
-
Riesgo de malware: Algunos archivos mod APK pueden contener código malicioso o virus que pueden dañar su dispositivo o robar su información personal. Siempre debe escanear el archivo mod APK con un software antivirus de buena reputación antes de instalarlo.
-
-
¿Cómo descargar e instalar APK Mod Hunter Assassin 2 en Android?
-
-
Paso 1: Habilitar fuentes desconocidas en el dispositivo
-
Antes de poder instalar cualquier archivo APK mod en su dispositivo, es necesario habilitar fuentes desconocidas en la configuración. Esto le permitirá instalar aplicaciones o juegos de fuentes distintas de Google Play. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo. Puede ver un mensaje de advertencia, pero puede ignorarlo y tocar OK.
-
Paso 2: Descargar el archivo APK mod de una fuente confiable
-
Siguiente, es necesario descargar el archivo APK mod de Hunter Assassin 2 de una fuente confiable. Puede buscar en Google o utilizar un sitio web de confianza que proporciona archivos mod APK. Por ejemplo, puedes usar [este enlace] para descargar el archivo APK mod de Hunter Assassin 2. Asegúrate de descargar la última versión del archivo y guardarlo en una carpeta a la que puedas acceder fácilmente.
-
Paso 3: Instalar el archivo APK mod en su dispositivo
-
Una vez que haya descargado el archivo APK mod, debe instalarlo en su dispositivo. Para hacer esto, busque el archivo en su administrador de archivos y toque en él. Puede ver una ventana emergente pidiendo su permiso para instalar la aplicación. Toque en Instalar y espere a que termine el proceso de instalación.
-
Paso 4: Iniciar el juego y disfrutar de
-
Una vez completada la instalación, puede iniciar el juego tocando en su icono en la pantalla de inicio o en el cajón de la aplicación. Deberías ver el juego cargando con las características modificadas habilitadas. Ahora puedes disfrutar jugando a Hunter Assassin 2 con dinero ilimitado, gemas y funciones premium.
-
Conclusión
-
En este artículo, hemos explicado lo que es APK Mod Hunter Assassin 2, cuáles son los beneficios y riesgos de usar archivos APK mod, y cómo descargar e instalar APK Mod Hunter Assassin 2 en su dispositivo Android. Esperamos que este artículo haya sido útil e informativo para usted. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación.
-
Preguntas frecuentes
-
-
-
Q: ¿Es APK Mod Hunter Assassin 2 seguro de usar?
A: No hay una respuesta definitiva a esta pregunta, ya que diferentes archivos APK mod pueden tener diferentes niveles de seguridad y calidad. Sin embargo, como regla general, siempre debe tener cuidado al descargar e instalar cualquier archivo mod APK desde una fuente no oficial. Siempre debe escanear el archivo con un software antivirus de buena reputación antes de instalarlo, y evitar dar permisos innecesarios o acceso a la aplicación.
-
Q: ¿Puedo jugar en línea con APK Mod Hunter Assassin 2?
A: No, no puedes jugar en línea con APK Mod Hunter Assassin 2, ya que no es compatible con los servidores oficiales del juego. Si intenta jugar en línea con la versión modificada, puede enfrentar errores, bloqueos o prohibiciones del juego. Por lo tanto, solo debe jugar sin conexión con APK Mod Hunter Assassin 2.
-
Q: ¿Puedo actualizar APK Mod Hunter Assassin 2 a través de Google Play?
A: No, no se puede actualizar APK Mod Hunter Assassin 2 a través de Google Play, ya que no es una versión oficial del juego. Si intenta actualizarlo a través de Google Play, puede perder todas las características modificadas y volver a la versión original del juego. Por lo tanto, solo debe actualizar APK Mod Hunter Assassin 2 mediante la descarga de una nueva versión del archivo de una fuente confiable.
-
Q: ¿Perderé mi progreso si desinstalo APK Mod Hunter Assassin 2?
A: Sí, perderá su progreso si desinstala APK Mod Hunter Assassin 2, ya que no se sincroniza con su cuenta de Google o almacenamiento en la nube. Por lo tanto, debe hacer una copia de seguridad de su progreso guardando los datos del juego en una carpeta separada o utilizando una aplicación de terceros.
-
- 64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/contrib/__init__.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/contrib/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/_macos_compat.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/_macos_compat.py
deleted file mode 100644
index 17769e9154bd9cc3f3c00dc10718e4377828cb5e..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/_macos_compat.py
+++ /dev/null
@@ -1,12 +0,0 @@
-import sys
-import importlib
-
-
-def bypass_compiler_fixup(cmd, args):
- return cmd
-
-
-if sys.platform == 'darwin':
- compiler_fixup = importlib.import_module('_osx_support').compiler_fixup
-else:
- compiler_fixup = bypass_compiler_fixup
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/importlib_metadata/_meta.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/importlib_metadata/_meta.py
deleted file mode 100644
index 37ee43e6ef447dfb4ae68f5f6c35597d12fdc5a1..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/importlib_metadata/_meta.py
+++ /dev/null
@@ -1,48 +0,0 @@
-from ._compat import Protocol
-from typing import Any, Dict, Iterator, List, TypeVar, Union
-
-
-_T = TypeVar("_T")
-
-
-class PackageMetadata(Protocol):
- def __len__(self) -> int:
- ... # pragma: no cover
-
- def __contains__(self, item: str) -> bool:
- ... # pragma: no cover
-
- def __getitem__(self, key: str) -> str:
- ... # pragma: no cover
-
- def __iter__(self) -> Iterator[str]:
- ... # pragma: no cover
-
- def get_all(self, name: str, failobj: _T = ...) -> Union[List[Any], _T]:
- """
- Return all values associated with a possibly multi-valued key.
- """
-
- @property
- def json(self) -> Dict[str, Union[str, List[str]]]:
- """
- A JSON-compatible form of the metadata.
- """
-
-
-class SimplePath(Protocol):
- """
- A minimal subset of pathlib.Path required by PathDistribution.
- """
-
- def joinpath(self) -> 'SimplePath':
- ... # pragma: no cover
-
- def __truediv__(self) -> 'SimplePath':
- ... # pragma: no cover
-
- def parent(self) -> 'SimplePath':
- ... # pragma: no cover
-
- def read_text(self) -> str:
- ... # pragma: no cover
diff --git a/spaces/CVPR/LIVE/thrust/thrust/random/subtract_with_carry_engine.h b/spaces/CVPR/LIVE/thrust/thrust/random/subtract_with_carry_engine.h
deleted file mode 100644
index 0b12ca3530a5bed1d38b816359fcce4b99d6d9d5..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/thrust/thrust/random/subtract_with_carry_engine.h
+++ /dev/null
@@ -1,256 +0,0 @@
-/*
- * Copyright 2008-2013 NVIDIA Corporation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*! \file subtract_with_carry_engine.h
- * \brief A subtract-with-carry pseudorandom number generator
- * based on Marsaglia & Zaman.
- */
-
-#pragma once
-
-#include
-#include
-
-#include
-#include // for size_t
-#include
-
-namespace thrust
-{
-
-namespace random
-{
-
-
-/*! \addtogroup random_number_engine_templates
- * \{
- */
-
-/*! \class subtract_with_carry_engine
- * \brief A \p subtract_with_carry_engine random number engine produces unsigned
- * integer random numbers using the subtract with carry algorithm of Marsaglia & Zaman.
- *
- * The generation algorithm is performed as follows:
- * -# Let Y = X_{i-s}- X_{i-r} - c.
- * -# Set X_i to y = T mod m. Set \c c to \c 1 if Y < 0, otherwise set \c c to \c 0.
- *
- * This algorithm corresponds to a modular linear function of the form
- *
- * TA(x_i) = (a * x_i) mod b, where \c b is of the form m^r - m^s + 1 and
- * a = b - (b-1)/m.
- *
- * \tparam UIntType The type of unsigned integer to produce.
- * \tparam w The word size of the produced values ( w <= sizeof(UIntType)).
- * \tparam s The short lag of the generation algorithm.
- * \tparam r The long lag of the generation algorithm.
- *
- * \note Inexperienced users should not use this class template directly. Instead, use
- * \p ranlux24_base or \p ranlux48_base, which are instances of \p subtract_with_carry_engine.
- *
- * \see thrust::random::ranlux24_base
- * \see thrust::random::ranlux48_base
- */
-template
- class subtract_with_carry_engine
-{
- /*! \cond
- */
- private:
- static const UIntType modulus = UIntType(1) << w;
- /*! \endcond
- */
-
- public:
- // types
-
- /*! \typedef result_type
- * \brief The type of the unsigned integer produced by this \p subtract_with_carry_engine.
- */
- typedef UIntType result_type;
-
- // engine characteristics
-
- /*! The word size of the produced values.
- */
- static const size_t word_size = w;
-
- /*! The size of the short lag used in the generation algorithm.
- */
- static const size_t short_lag = s;
-
- /*! The size of the long lag used in the generation algorithm.
- */
- static const size_t long_lag = r;
-
- /*! The smallest value this \p subtract_with_carry_engine may potentially produce.
- */
- static const result_type min = 0;
-
- /*! The largest value this \p subtract_with_carry_engine may potentially produce.
- */
- static const result_type max = modulus - 1;
-
- /*! The default seed of this \p subtract_with_carry_engine.
- */
- static const result_type default_seed = 19780503u;
-
- // constructors and seeding functions
-
- /*! This constructor, which optionally accepts a seed, initializes a new
- * \p subtract_with_carry_engine.
- *
- * \param value The seed used to intialize this \p subtract_with_carry_engine's state.
- */
- __host__ __device__
- explicit subtract_with_carry_engine(result_type value = default_seed);
-
- /*! This method initializes this \p subtract_with_carry_engine's state, and optionally accepts
- * a seed value.
- *
- * \param value The seed used to initializes this \p subtract_with_carry_engine's state.
- */
- __host__ __device__
- void seed(result_type value = default_seed);
-
- // generating functions
-
- /*! This member function produces a new random value and updates this \p subtract_with_carry_engine's state.
- * \return A new random number.
- */
- __host__ __device__
- result_type operator()(void);
-
- /*! This member function advances this \p subtract_with_carry_engine's state a given number of times
- * and discards the results.
- *
- * \param z The number of random values to discard.
- * \note This function is provided because an implementation may be able to accelerate it.
- */
- __host__ __device__
- void discard(unsigned long long z);
-
- /*! \cond
- */
- private:
- result_type m_x[long_lag];
- unsigned int m_k;
- int m_carry;
-
- friend struct thrust::random::detail::random_core_access;
-
- __host__ __device__
- bool equal(const subtract_with_carry_engine &rhs) const;
-
- template
- std::basic_ostream& stream_out(std::basic_ostream &os) const;
-
- template
- std::basic_istream& stream_in(std::basic_istream &is);
-
- /*! \endcond
- */
-}; // end subtract_with_carry_engine
-
-
-/*! This function checks two \p subtract_with_carry_engines for equality.
- * \param lhs The first \p subtract_with_carry_engine to test.
- * \param rhs The second \p subtract_with_carry_engine to test.
- * \return \c true if \p lhs is equal to \p rhs; \c false, otherwise.
- */
-template
-__host__ __device__
-bool operator==(const subtract_with_carry_engine &lhs,
- const subtract_with_carry_engine &rhs);
-
-
-/*! This function checks two \p subtract_with_carry_engines for inequality.
- * \param lhs The first \p subtract_with_carry_engine to test.
- * \param rhs The second \p subtract_with_carry_engine to test.
- * \return \c true if \p lhs is not equal to \p rhs; \c false, otherwise.
- */
-template
-__host__ __device__
-bool operator!=(const subtract_with_carry_engine&lhs,
- const subtract_with_carry_engine&rhs);
-
-
-/*! This function streams a subtract_with_carry_engine to a \p std::basic_ostream.
- * \param os The \p basic_ostream to stream out to.
- * \param e The \p subtract_with_carry_engine to stream out.
- * \return \p os
- */
-template
-std::basic_ostream&
-operator<<(std::basic_ostream &os,
- const subtract_with_carry_engine &e);
-
-
-/*! This function streams a subtract_with_carry_engine in from a std::basic_istream.
- * \param is The \p basic_istream to stream from.
- * \param e The \p subtract_with_carry_engine to stream in.
- * \return \p is
- */
-template
-std::basic_istream&
-operator>>(std::basic_istream &is,
- subtract_with_carry_engine &e);
-
-
-/*! \} // end random_number_engine_templates
- */
-
-
-/*! \addtogroup predefined_random
- * \{
- */
-
-// XXX N2111 uses uint_fast32_t here
-
-/*! \typedef ranlux24_base
- * \brief A random number engine with predefined parameters which implements the
- * base engine of the \p ranlux24 random number engine.
- * \note The 10000th consecutive invocation of a default-constructed object of type \p ranlux24_base
- * shall produce the value \c 7937952 .
- */
-typedef subtract_with_carry_engine ranlux24_base;
-
-
-// XXX N2111 uses uint_fast64_t here
-
-/*! \typedef ranlux48_base
- * \brief A random number engine with predefined parameters which implements the
- * base engine of the \p ranlux48 random number engine.
- * \note The 10000th consecutive invocation of a default-constructed object of type \p ranlux48_base
- * shall produce the value \c 192113843633948 .
- */
-typedef subtract_with_carry_engine ranlux48_base;
-
-/*! \} // end predefined_random
- */
-
-} // end random
-
-// import names into thrust::
-using random::subtract_with_carry_engine;
-using random::ranlux24_base;
-using random::ranlux48_base;
-
-} // end thrust
-
-#include
-
diff --git a/spaces/CuriousDolphin/MobileSAM/utils/tools.py b/spaces/CuriousDolphin/MobileSAM/utils/tools.py
deleted file mode 100644
index 3a06972cfb82357bc66b4e4c9cd7f776846bbd1f..0000000000000000000000000000000000000000
--- a/spaces/CuriousDolphin/MobileSAM/utils/tools.py
+++ /dev/null
@@ -1,406 +0,0 @@
-import os
-import sys
-
-import cv2
-import matplotlib.pyplot as plt
-import numpy as np
-import torch
-from PIL import Image
-
-
-def convert_box_xywh_to_xyxy(box):
- x1 = box[0]
- y1 = box[1]
- x2 = box[0] + box[2]
- y2 = box[1] + box[3]
- return [x1, y1, x2, y2]
-
-
-def segment_image(image, bbox):
- image_array = np.array(image)
- segmented_image_array = np.zeros_like(image_array)
- x1, y1, x2, y2 = bbox
- segmented_image_array[y1:y2, x1:x2] = image_array[y1:y2, x1:x2]
- segmented_image = Image.fromarray(segmented_image_array)
- black_image = Image.new("RGB", image.size, (255, 255, 255))
- # transparency_mask = np.zeros_like((), dtype=np.uint8)
- transparency_mask = np.zeros(
- (image_array.shape[0], image_array.shape[1]), dtype=np.uint8
- )
- transparency_mask[y1:y2, x1:x2] = 255
- transparency_mask_image = Image.fromarray(transparency_mask, mode="L")
- black_image.paste(segmented_image, mask=transparency_mask_image)
- return black_image
-
-
-def format_results(masks, scores, logits, filter=0):
- annotations = []
- n = len(scores)
- for i in range(n):
- annotation = {}
-
- mask = masks[i]
- tmp = np.where(mask != 0)
- if np.sum(mask) < filter:
- continue
- annotation["id"] = i
- annotation["segmentation"] = mask
- annotation["bbox"] = [
- np.min(tmp[0]),
- np.min(tmp[1]),
- np.max(tmp[1]),
- np.max(tmp[0]),
- ]
- annotation["score"] = scores[i]
- annotation["area"] = annotation["segmentation"].sum()
- annotations.append(annotation)
- return annotations
-
-
-def filter_masks(annotations): # filter the overlap mask
- annotations.sort(key=lambda x: x["area"], reverse=True)
- to_remove = set()
- for i in range(0, len(annotations)):
- a = annotations[i]
- for j in range(i + 1, len(annotations)):
- b = annotations[j]
- if i != j and j not in to_remove:
- # check if
- if b["area"] < a["area"]:
- if (a["segmentation"] & b["segmentation"]).sum() / b[
- "segmentation"
- ].sum() > 0.8:
- to_remove.add(j)
-
- return [a for i, a in enumerate(annotations) if i not in to_remove], to_remove
-
-
-def get_bbox_from_mask(mask):
- mask = mask.astype(np.uint8)
- contours, hierarchy = cv2.findContours(
- mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
- )
- x1, y1, w, h = cv2.boundingRect(contours[0])
- x2, y2 = x1 + w, y1 + h
- if len(contours) > 1:
- for b in contours:
- x_t, y_t, w_t, h_t = cv2.boundingRect(b)
- # 将多个bbox合并成一个
- x1 = min(x1, x_t)
- y1 = min(y1, y_t)
- x2 = max(x2, x_t + w_t)
- y2 = max(y2, y_t + h_t)
- h = y2 - y1
- w = x2 - x1
- return [x1, y1, x2, y2]
-
-
-def fast_process(
- annotations, args, mask_random_color, bbox=None, points=None, edges=False
-):
- if isinstance(annotations[0], dict):
- annotations = [annotation["segmentation"] for annotation in annotations]
- result_name = os.path.basename(args.img_path)
- image = cv2.imread(args.img_path)
- image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
- original_h = image.shape[0]
- original_w = image.shape[1]
- if sys.platform == "darwin":
- plt.switch_backend("TkAgg")
- plt.figure(figsize=(original_w / 100, original_h / 100))
- # Add subplot with no margin.
- plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
- plt.margins(0, 0)
- plt.gca().xaxis.set_major_locator(plt.NullLocator())
- plt.gca().yaxis.set_major_locator(plt.NullLocator())
- plt.imshow(image)
- if args.better_quality == True:
- if isinstance(annotations[0], torch.Tensor):
- annotations = np.array(annotations.cpu())
- for i, mask in enumerate(annotations):
- mask = cv2.morphologyEx(
- mask.astype(np.uint8), cv2.MORPH_CLOSE, np.ones((3, 3), np.uint8)
- )
- annotations[i] = cv2.morphologyEx(
- mask.astype(np.uint8), cv2.MORPH_OPEN, np.ones((8, 8), np.uint8)
- )
- if args.device == "cpu":
- annotations = np.array(annotations)
- fast_show_mask(
- annotations,
- plt.gca(),
- random_color=mask_random_color,
- bbox=bbox,
- points=points,
- point_label=args.point_label,
- retinamask=args.retina,
- target_height=original_h,
- target_width=original_w,
- )
- else:
- if isinstance(annotations[0], np.ndarray):
- annotations = torch.from_numpy(annotations)
- fast_show_mask_gpu(
- annotations,
- plt.gca(),
- random_color=args.randomcolor,
- bbox=bbox,
- points=points,
- point_label=args.point_label,
- retinamask=args.retina,
- target_height=original_h,
- target_width=original_w,
- )
- if isinstance(annotations, torch.Tensor):
- annotations = annotations.cpu().numpy()
- if args.withContours == True:
- contour_all = []
- temp = np.zeros((original_h, original_w, 1))
- for i, mask in enumerate(annotations):
- if type(mask) == dict:
- mask = mask["segmentation"]
- annotation = mask.astype(np.uint8)
- if args.retina == False:
- annotation = cv2.resize(
- annotation,
- (original_w, original_h),
- interpolation=cv2.INTER_NEAREST,
- )
- contours, hierarchy = cv2.findContours(
- annotation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
- )
- for contour in contours:
- contour_all.append(contour)
- cv2.drawContours(temp, contour_all, -1, (255, 255, 255), 2)
- color = np.array([0 / 255, 0 / 255, 255 / 255, 0.8])
- contour_mask = temp / 255 * color.reshape(1, 1, -1)
- plt.imshow(contour_mask)
-
- save_path = args.output
- if not os.path.exists(save_path):
- os.makedirs(save_path)
- plt.axis("off")
- fig = plt.gcf()
- plt.draw()
-
- try:
- buf = fig.canvas.tostring_rgb()
- except AttributeError:
- fig.canvas.draw()
- buf = fig.canvas.tostring_rgb()
-
- cols, rows = fig.canvas.get_width_height()
- img_array = np.fromstring(buf, dtype=np.uint8).reshape(rows, cols, 3)
- cv2.imwrite(
- os.path.join(save_path, result_name), cv2.cvtColor(img_array, cv2.COLOR_RGB2BGR)
- )
-
-
-# CPU post process
-def fast_show_mask(
- annotation,
- ax,
- random_color=False,
- bbox=None,
- points=None,
- point_label=None,
- retinamask=True,
- target_height=960,
- target_width=960,
-):
- msak_sum = annotation.shape[0]
- height = annotation.shape[1]
- weight = annotation.shape[2]
- # 将annotation 按照面积 排序
- areas = np.sum(annotation, axis=(1, 2))
- sorted_indices = np.argsort(areas)
- annotation = annotation[sorted_indices]
-
- index = (annotation != 0).argmax(axis=0)
- if random_color == True:
- color = np.random.random((msak_sum, 1, 1, 3))
- else:
- color = np.ones((msak_sum, 1, 1, 3)) * np.array(
- [30 / 255, 144 / 255, 255 / 255]
- )
- transparency = np.ones((msak_sum, 1, 1, 1)) * 0.6
- visual = np.concatenate([color, transparency], axis=-1)
- mask_image = np.expand_dims(annotation, -1) * visual
-
- show = np.zeros((height, weight, 4))
- h_indices, w_indices = np.meshgrid(
- np.arange(height), np.arange(weight), indexing="ij"
- )
- indices = (index[h_indices, w_indices], h_indices, w_indices, slice(None))
- # 使用向量化索引更新show的值
- show[h_indices, w_indices, :] = mask_image[indices]
- if bbox is not None:
- x1, y1, x2, y2 = bbox
- ax.add_patch(
- plt.Rectangle(
- (x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor="b", linewidth=1
- )
- )
- # draw point
- if points is not None:
- plt.scatter(
- [point[0] for i, point in enumerate(points) if point_label[i] == 1],
- [point[1] for i, point in enumerate(points) if point_label[i] == 1],
- s=20,
- c="y",
- )
- plt.scatter(
- [point[0] for i, point in enumerate(points) if point_label[i] == 0],
- [point[1] for i, point in enumerate(points) if point_label[i] == 0],
- s=20,
- c="m",
- )
-
- if retinamask == False:
- show = cv2.resize(
- show, (target_width, target_height), interpolation=cv2.INTER_NEAREST
- )
- ax.imshow(show)
-
-
-def fast_show_mask_gpu(
- annotation,
- ax,
- random_color=False,
- bbox=None,
- points=None,
- point_label=None,
- retinamask=True,
- target_height=960,
- target_width=960,
-):
- msak_sum = annotation.shape[0]
- height = annotation.shape[1]
- weight = annotation.shape[2]
- areas = torch.sum(annotation, dim=(1, 2))
- sorted_indices = torch.argsort(areas, descending=False)
- annotation = annotation[sorted_indices]
- # 找每个位置第一个非零值下标
- index = (annotation != 0).to(torch.long).argmax(dim=0)
- if random_color == True:
- color = torch.rand((msak_sum, 1, 1, 3)).to(annotation.device)
- else:
- color = torch.ones((msak_sum, 1, 1, 3)).to(annotation.device) * torch.tensor(
- [30 / 255, 144 / 255, 255 / 255]
- ).to(annotation.device)
- transparency = torch.ones((msak_sum, 1, 1, 1)).to(annotation.device) * 0.6
- visual = torch.cat([color, transparency], dim=-1)
- mask_image = torch.unsqueeze(annotation, -1) * visual
- # 按index取数,index指每个位置选哪个batch的数,把mask_image转成一个batch的形式
- show = torch.zeros((height, weight, 4)).to(annotation.device)
- h_indices, w_indices = torch.meshgrid(
- torch.arange(height), torch.arange(weight), indexing="ij"
- )
- indices = (index[h_indices, w_indices], h_indices, w_indices, slice(None))
- # 使用向量化索引更新show的值
- show[h_indices, w_indices, :] = mask_image[indices]
- show_cpu = show.cpu().numpy()
- if bbox is not None:
- x1, y1, x2, y2 = bbox
- ax.add_patch(
- plt.Rectangle(
- (x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor="b", linewidth=1
- )
- )
- # draw point
- if points is not None:
- plt.scatter(
- [point[0] for i, point in enumerate(points) if point_label[i] == 1],
- [point[1] for i, point in enumerate(points) if point_label[i] == 1],
- s=20,
- c="y",
- )
- plt.scatter(
- [point[0] for i, point in enumerate(points) if point_label[i] == 0],
- [point[1] for i, point in enumerate(points) if point_label[i] == 0],
- s=20,
- c="m",
- )
- if retinamask == False:
- show_cpu = cv2.resize(
- show_cpu, (target_width, target_height), interpolation=cv2.INTER_NEAREST
- )
- ax.imshow(show_cpu)
-
-
-def crop_image(annotations, image_like):
- if isinstance(image_like, str):
- image = Image.open(image_like)
- else:
- image = image_like
- ori_w, ori_h = image.size
- mask_h, mask_w = annotations[0]["segmentation"].shape
- if ori_w != mask_w or ori_h != mask_h:
- image = image.resize((mask_w, mask_h))
- cropped_boxes = []
- cropped_images = []
- not_crop = []
- filter_id = []
- # annotations, _ = filter_masks(annotations)
- # filter_id = list(_)
- for _, mask in enumerate(annotations):
- if np.sum(mask["segmentation"]) <= 100:
- filter_id.append(_)
- continue
- bbox = get_bbox_from_mask(mask["segmentation"]) # mask 的 bbox
- cropped_boxes.append(segment_image(image, bbox)) # 保存裁剪的图片
- # cropped_boxes.append(segment_image(image,mask["segmentation"]))
- cropped_images.append(bbox) # 保存裁剪的图片的bbox
-
- return cropped_boxes, cropped_images, not_crop, filter_id, annotations
-
-
-def box_prompt(masks, bbox, target_height, target_width):
- h = masks.shape[1]
- w = masks.shape[2]
- if h != target_height or w != target_width:
- bbox = [
- int(bbox[0] * w / target_width),
- int(bbox[1] * h / target_height),
- int(bbox[2] * w / target_width),
- int(bbox[3] * h / target_height),
- ]
- bbox[0] = round(bbox[0]) if round(bbox[0]) > 0 else 0
- bbox[1] = round(bbox[1]) if round(bbox[1]) > 0 else 0
- bbox[2] = round(bbox[2]) if round(bbox[2]) < w else w
- bbox[3] = round(bbox[3]) if round(bbox[3]) < h else h
-
- # IoUs = torch.zeros(len(masks), dtype=torch.float32)
- bbox_area = (bbox[3] - bbox[1]) * (bbox[2] - bbox[0])
-
- masks_area = torch.sum(masks[:, bbox[1] : bbox[3], bbox[0] : bbox[2]], dim=(1, 2))
- orig_masks_area = torch.sum(masks, dim=(1, 2))
-
- union = bbox_area + orig_masks_area - masks_area
- IoUs = masks_area / union
- max_iou_index = torch.argmax(IoUs)
-
- return masks[max_iou_index].cpu().numpy(), max_iou_index
-
-
-def point_prompt(masks, points, point_label, target_height, target_width): # numpy 处理
- h = masks[0]["segmentation"].shape[0]
- w = masks[0]["segmentation"].shape[1]
- if h != target_height or w != target_width:
- points = [
- [int(point[0] * w / target_width), int(point[1] * h / target_height)]
- for point in points
- ]
- onemask = np.zeros((h, w))
- for i, annotation in enumerate(masks):
- if type(annotation) == dict:
- mask = annotation["segmentation"]
- else:
- mask = annotation
- for i, point in enumerate(points):
- if mask[point[1], point[0]] == 1 and point_label[i] == 1:
- onemask += mask
- if mask[point[1], point[0]] == 1 and point_label[i] == 0:
- onemask -= mask
- onemask = onemask >= 1
- return onemask, 0
diff --git a/spaces/Cyril666/ContourNet-ABI/demo.py b/spaces/Cyril666/ContourNet-ABI/demo.py
deleted file mode 100644
index 7dc9bb41a5164cff64686053a06c0435c09f9587..0000000000000000000000000000000000000000
--- a/spaces/Cyril666/ContourNet-ABI/demo.py
+++ /dev/null
@@ -1,109 +0,0 @@
-import argparse
-import logging
-import os
-import glob
-import tqdm
-import torch
-import PIL
-import cv2
-import numpy as np
-import torch.nn.functional as F
-from torchvision import transforms
-from utils import Config, Logger, CharsetMapper
-
-def get_model(config):
- import importlib
- names = config.model_name.split('.')
- module_name, class_name = '.'.join(names[:-1]), names[-1]
- cls = getattr(importlib.import_module(module_name), class_name)
- model = cls(config)
- logging.info(model)
- model = model.eval()
- return model
-
-def preprocess(img, width, height):
- img = cv2.resize(np.array(img), (width, height))
- img = transforms.ToTensor()(img).unsqueeze(0)
- mean = torch.tensor([0.485, 0.456, 0.406])
- std = torch.tensor([0.229, 0.224, 0.225])
- return (img-mean[...,None,None]) / std[...,None,None]
-
-def postprocess(output, charset, model_eval):
- def _get_output(last_output, model_eval):
- if isinstance(last_output, (tuple, list)):
- for res in last_output:
- if res['name'] == model_eval: output = res
- else: output = last_output
- return output
-
- def _decode(logit):
- """ Greed decode """
- out = F.softmax(logit, dim=2)
- pt_text, pt_scores, pt_lengths = [], [], []
- for o in out:
- text = charset.get_text(o.argmax(dim=1), padding=False, trim=False)
- text = text.split(charset.null_char)[0] # end at end-token
- pt_text.append(text)
- pt_scores.append(o.max(dim=1)[0])
- pt_lengths.append(min(len(text) + 1, charset.max_length)) # one for end-token
- return pt_text, pt_scores, pt_lengths
-
- output = _get_output(output, model_eval)
- logits, pt_lengths = output['logits'], output['pt_lengths']
- pt_text, pt_scores, pt_lengths_ = _decode(logits)
-
- return pt_text, pt_scores, pt_lengths_
-
-def load(model, file, device=None, strict=True):
- if device is None: device = 'cpu'
- elif isinstance(device, int): device = torch.device('cuda', device)
- assert os.path.isfile(file)
- state = torch.load(file, map_location=device)
- if set(state.keys()) == {'model', 'opt'}:
- state = state['model']
- model.load_state_dict(state, strict=strict)
- return model
-
-def main():
- parser = argparse.ArgumentParser()
- parser.add_argument('--config', type=str, default='configs/train_abinet.yaml',
- help='path to config file')
- parser.add_argument('--input', type=str, default='figs/test')
- parser.add_argument('--cuda', type=int, default=-1)
- parser.add_argument('--checkpoint', type=str, default='workdir/train-abinet/best-train-abinet.pth')
- parser.add_argument('--model_eval', type=str, default='alignment',
- choices=['alignment', 'vision', 'language'])
- args = parser.parse_args()
- config = Config(args.config)
- if args.checkpoint is not None: config.model_checkpoint = args.checkpoint
- if args.model_eval is not None: config.model_eval = args.model_eval
- config.global_phase = 'test'
- config.model_vision_checkpoint, config.model_language_checkpoint = None, None
- device = 'cpu' if args.cuda < 0 else f'cuda:{args.cuda}'
-
- Logger.init(config.global_workdir, config.global_name, config.global_phase)
- Logger.enable_file()
- logging.info(config)
-
- logging.info('Construct model.')
- model = get_model(config).to(device)
- model = load(model, config.model_checkpoint, device=device)
- charset = CharsetMapper(filename=config.dataset_charset_path,
- max_length=config.dataset_max_length + 1)
-
- if os.path.isdir(args.input):
- paths = [os.path.join(args.input, fname) for fname in os.listdir(args.input)]
- else:
- paths = glob.glob(os.path.expanduser(args.input))
- assert paths, "The input path(s) was not found"
- paths = sorted(paths)
- for path in tqdm.tqdm(paths):
- img = PIL.Image.open(path).convert('RGB')
- img = preprocess(img, config.dataset_image_width, config.dataset_image_height)
- img = img.to(device)
- res = model(img)
- pt_text, _, __ = postprocess(res, charset, config.model_eval)
- logging.info(f'{path}: {pt_text[0]}')
-
-if __name__ == '__main__':
- main()
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/Model3D-1511e3cc.js b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/Model3D-1511e3cc.js
deleted file mode 100644
index 6c76feed87bcfc4fad0250e2835d459b134fa044..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/Model3D-1511e3cc.js
+++ /dev/null
@@ -1,2 +0,0 @@
-import{S as o,e as d,s as u,N as _,P as g,K as r,U as i,p as v,M as y,R as m,n as c,A as b}from"./index-1d65707a.js";function M(a){let e,s;return{c(){e=_("div"),s=g(a[0]),r(e,"class","svelte-1ayixqk"),i(e,"table",a[1]==="table"),i(e,"gallery",a[1]==="gallery"),i(e,"selected",a[2])},m(t,l){v(t,e,l),y(e,s)},p(t,[l]){l&1&&m(s,t[0]),l&2&&i(e,"table",t[1]==="table"),l&2&&i(e,"gallery",t[1]==="gallery"),l&4&&i(e,"selected",t[2])},i:c,o:c,d(t){t&&b(e)}}}function D(a,e,s){let{value:t}=e,{type:l}=e,{selected:f=!1}=e;return a.$$set=n=>{"value"in n&&s(0,t=n.value),"type"in n&&s(1,l=n.type),"selected"in n&&s(2,f=n.selected)},[t,l,f]}class h extends o{constructor(e){super(),d(this,e,D,M,u,{value:0,type:1,selected:2})}}const E=h;export{E};
-//# sourceMappingURL=Model3D-1511e3cc.js.map
diff --git a/spaces/DaleChen/AutoGPT/tests/test_json_parser.py b/spaces/DaleChen/AutoGPT/tests/test_json_parser.py
deleted file mode 100644
index 41c90a6f66c0b0468f1443de80033cc4f268eca0..0000000000000000000000000000000000000000
--- a/spaces/DaleChen/AutoGPT/tests/test_json_parser.py
+++ /dev/null
@@ -1,111 +0,0 @@
-import unittest
-
-import tests.context
-from autogpt.json_utils.json_fix_llm import fix_and_parse_json
-
-
-class TestParseJson(unittest.TestCase):
- def test_valid_json(self):
- # Test that a valid JSON string is parsed correctly
- json_str = '{"name": "John", "age": 30, "city": "New York"}'
- obj = fix_and_parse_json(json_str)
- self.assertEqual(obj, {"name": "John", "age": 30, "city": "New York"})
-
- def test_invalid_json_minor(self):
- # Test that an invalid JSON string can be fixed with gpt
- json_str = '{"name": "John", "age": 30, "city": "New York",}'
- with self.assertRaises(Exception):
- fix_and_parse_json(json_str, try_to_fix_with_gpt=False)
-
- def test_invalid_json_major_with_gpt(self):
- # Test that an invalid JSON string raises an error when try_to_fix_with_gpt is False
- json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END'
- with self.assertRaises(Exception):
- fix_and_parse_json(json_str, try_to_fix_with_gpt=False)
-
- def test_invalid_json_major_without_gpt(self):
- # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False
- json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END'
- # Assert that this raises an exception:
- with self.assertRaises(Exception):
- fix_and_parse_json(json_str, try_to_fix_with_gpt=False)
-
- def test_invalid_json_leading_sentence_with_gpt(self):
- # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False
- json_str = """I suggest we start by browsing the repository to find any issues that we can fix.
-
-{
- "command": {
- "name": "browse_website",
- "args":{
- "url": "https://github.com/Torantulino/Auto-GPT"
- }
- },
- "thoughts":
- {
- "text": "I suggest we start browsing the repository to find any issues that we can fix.",
- "reasoning": "Browsing the repository will give us an idea of the current state of the codebase and identify any issues that we can address to improve the repo.",
- "plan": "- Look through the repository to find any issues.\n- Investigate any issues to determine what needs to be fixed\n- Identify possible solutions to fix the issues\n- Open Pull Requests with fixes",
- "criticism": "I should be careful while browsing so as not to accidentally introduce any new bugs or issues.",
- "speak": "I will start browsing the repository to find any issues we can fix."
- }
-}"""
- good_obj = {
- "command": {
- "name": "browse_website",
- "args": {"url": "https://github.com/Torantulino/Auto-GPT"},
- },
- "thoughts": {
- "text": "I suggest we start browsing the repository to find any issues that we can fix.",
- "reasoning": "Browsing the repository will give us an idea of the current state of the codebase and identify any issues that we can address to improve the repo.",
- "plan": "- Look through the repository to find any issues.\n- Investigate any issues to determine what needs to be fixed\n- Identify possible solutions to fix the issues\n- Open Pull Requests with fixes",
- "criticism": "I should be careful while browsing so as not to accidentally introduce any new bugs or issues.",
- "speak": "I will start browsing the repository to find any issues we can fix.",
- },
- }
- # Assert that this raises an exception:
- self.assertEqual(
- fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj
- )
-
- def test_invalid_json_leading_sentence_with_gpt(self):
- # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False
- json_str = """I will first need to browse the repository (https://github.com/Torantulino/Auto-GPT) and identify any potential bugs that need fixing. I will use the "browse_website" command for this.
-
-{
- "command": {
- "name": "browse_website",
- "args":{
- "url": "https://github.com/Torantulino/Auto-GPT"
- }
- },
- "thoughts":
- {
- "text": "Browsing the repository to identify potential bugs",
- "reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.",
- "plan": "- Analyze the repository for potential bugs and areas of improvement",
- "criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.",
- "speak": "I am browsing the repository to identify potential bugs."
- }
-}"""
- good_obj = {
- "command": {
- "name": "browse_website",
- "args": {"url": "https://github.com/Torantulino/Auto-GPT"},
- },
- "thoughts": {
- "text": "Browsing the repository to identify potential bugs",
- "reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.",
- "plan": "- Analyze the repository for potential bugs and areas of improvement",
- "criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.",
- "speak": "I am browsing the repository to identify potential bugs.",
- },
- }
- # Assert that this raises an exception:
- self.assertEqual(
- fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj
- )
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/spaces/DavidWeiZhang/sd-dreambooth-library-avator-generator/app.py b/spaces/DavidWeiZhang/sd-dreambooth-library-avator-generator/app.py
deleted file mode 100644
index e65cbf7b13715b43bd00afc33c66e8dffbc803b4..0000000000000000000000000000000000000000
--- a/spaces/DavidWeiZhang/sd-dreambooth-library-avator-generator/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-access_token="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"
-gr.Interface.load("models/sd-dreambooth-library/avator-generator",api_key=access_token).launch()
\ No newline at end of file
diff --git a/spaces/Detomo/ai-comic-generation/CONTRIBUTORS.md b/spaces/Detomo/ai-comic-generation/CONTRIBUTORS.md
deleted file mode 100644
index 9efc216b74d3e8afb8bd0386897a649565a8508d..0000000000000000000000000000000000000000
--- a/spaces/Detomo/ai-comic-generation/CONTRIBUTORS.md
+++ /dev/null
@@ -1,9 +0,0 @@
-This project was developed by Julian Bilcke (@jbilcke-hf), as part of his work at Hugging Face.
-
-------------------------------------------
-
-A huge thanks to external developers for their contributions!
-
-艾逗笔 (@idoubi):
-- Added support for OpenAI: https://github.com/jbilcke-hf/ai-comic-factory/pull/6
-
diff --git a/spaces/Detomo/ai-comic-generation/src/lib/getImageDimension.ts b/spaces/Detomo/ai-comic-generation/src/lib/getImageDimension.ts
deleted file mode 100644
index 50a94ae1eee733b23b1d4916780e597c759c608e..0000000000000000000000000000000000000000
--- a/spaces/Detomo/ai-comic-generation/src/lib/getImageDimension.ts
+++ /dev/null
@@ -1,16 +0,0 @@
-export interface ImageDimension {
- width: number
- height: number
-}
-
-export async function getImageDimension(src: string): Promise {
- if (!src) {
- return { width: 0, height: 0 }
- }
- const img = new Image()
- img.src = src
- await img.decode()
- const width = img.width
- const height = img.height
- return { width, height }
-}
\ No newline at end of file
diff --git a/spaces/Detomo/naomi-app-api/main.py b/spaces/Detomo/naomi-app-api/main.py
deleted file mode 100644
index d18dcf62217080d8347af6ee409e54495986ca82..0000000000000000000000000000000000000000
--- a/spaces/Detomo/naomi-app-api/main.py
+++ /dev/null
@@ -1,137 +0,0 @@
-from fastapi import FastAPI, File, Form
-import datetime
-import time
-import torch
-from typing import Optional
-
-import os
-import numpy as np
-from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC, Wav2Vec2ProcessorWithLM, AutoConfig
-from huggingface_hub import hf_hub_download
-from fuzzywuzzy import fuzz
-from utils import ffmpeg_read, query_dummy, query_raw, find_different
-
-## config
-API_TOKEN = os.environ["API_TOKEN"]
-MODEL_PATH = os.environ["MODEL_PATH"]
-PITCH_PATH = os.environ["PITCH_PATH"]
-
-QUANTIZED_MODEL_PATH = hf_hub_download(repo_id=MODEL_PATH, filename='quantized_model.pt', token=API_TOKEN)
-QUANTIZED_PITCH_MODEL_PATH = hf_hub_download(repo_id=PITCH_PATH, filename='quantized_model.pt', token=API_TOKEN)
-
-
-## word preprocessor
-processor_with_lm = Wav2Vec2ProcessorWithLM.from_pretrained(MODEL_PATH, use_auth_token=API_TOKEN)
-processor = Wav2Vec2Processor.from_pretrained(MODEL_PATH, use_auth_token=API_TOKEN)
-
-### quantized model
-config = AutoConfig.from_pretrained(MODEL_PATH, use_auth_token=API_TOKEN)
-dummy_model = Wav2Vec2ForCTC(config)
-quantized_model = torch.quantization.quantize_dynamic(dummy_model, {torch.nn.Linear}, dtype=torch.qint8, inplace=True)
-quantized_model.load_state_dict(torch.load(QUANTIZED_MODEL_PATH))
-
-## pitch preprocessor
-processor_pitch = Wav2Vec2Processor.from_pretrained(PITCH_PATH, use_auth_token=API_TOKEN)
-
-### quantized pitch mode
-config = AutoConfig.from_pretrained(PITCH_PATH, use_auth_token=API_TOKEN)
-dummy_pitch_model = Wav2Vec2ForCTC(config)
-quantized_pitch_model = torch.quantization.quantize_dynamic(dummy_pitch_model, {torch.nn.Linear}, dtype=torch.qint8, inplace=True)
-quantized_pitch_model.load_state_dict(torch.load(QUANTIZED_PITCH_MODEL_PATH))
-
-app = FastAPI()
-
-
-@app.get("/")
-def read_root():
- return {"Message": "Application startup complete"}
-
-
-@app.post("/naomi_api_score/")
-async def predict(
- file: bytes = File(...),
- word: str = Form(...),
- pitch: Optional[str] = Form(None),
- temperature: int = Form(...),
-):
- """ Transform input audio, get text and pitch from Huggingface api and calculate score by Levenshtein Distance Score
- Parameters:
- ----------
- file : bytes
- input audio file
- word : strings
- true hiragana word to calculate word score
- pitch : strings
- true pitch to calculate pitch score
- temperature: integer
- the difficulty of AI model
- Returns:
- -------
- timestamp: strings
- current time Year-Month-Day-Hours:Minutes:Second
- running_time : strings
- running time second
- error message : strings
- error message from api
- audio duration: integer
- durations of source audio
- target : integer
- durations of target audio
- method : string
- method applied to transform source audio
- word predict : strings
- text from api
- pitch predict : strings
- pitch from api
- wrong word index: strings (ex: 100)
- wrong word compare to target word
- wrong pitch index: strings (ex: 100)
- wrong word compare to target word
- score: integer
- Levenshtein Distance Score from pitch and word
- """
- upload_audio = ffmpeg_read(file, sampling_rate=16000)
- audio_duration = len(upload_audio) / 16000
- current_time = datetime.datetime.now().strftime("%Y-%h-%d-%H:%M:%S")
- start_time = time.time()
- error_message, score, word_preds, pitch_preds = None, None, None, None
-
- word_preds = query_raw(upload_audio, word, processor, processor_with_lm, quantized_model, temperature=temperature)
- if pitch is not None:
- if len(word) != len(pitch):
- error_message = "Length of word and pitch input is not equal"
- pitch_preds = query_dummy(upload_audio, processor_pitch, quantized_pitch_model)
-
- # find best word
- word_score_list = []
- for word_predict in word_preds:
- word_score_list.append(fuzz.ratio(word, word_predict[0]))
- word_score = max(word_score_list)
- best_word_predict = word_preds[word_score_list.index(word_score)][0]
- wrong_word = find_different(word, best_word_predict) # get wrong word
-
- # find best pitch
- if pitch_preds is not None:
- best_pitch_predict = pitch_preds.replace(" ", "")
- if len(best_pitch_predict) < len(best_word_predict):
- best_pitch_predict = best_pitch_predict + "1" * (len(best_word_predict) - len(best_pitch_predict))
- else:
- best_pitch_predict = best_pitch_predict[:len(best_word_predict)] # truncate to max len
- pitch_score = fuzz.ratio(pitch, best_pitch_predict)
- score = int((word_score * 2 + pitch_score) / 3)
- wrong_pitch = find_different(pitch, best_pitch_predict) # get wrong pitch
- else:
- score = int(word_score)
- best_pitch_predict = None
- wrong_pitch = None
-
- return {"timestamp": current_time,
- "running_time": f"{round(time.time() - start_time, 4)} s",
- "error_message": error_message,
- "audio_duration": audio_duration,
- "word_predict": best_word_predict,
- "pitch_predict": best_pitch_predict,
- "wrong_word_index": wrong_word,
- "wrong_pitch_index": wrong_pitch,
- "score": score,
- }
\ No newline at end of file
diff --git a/spaces/Dimalker/Faceswapper/roop/capturer.py b/spaces/Dimalker/Faceswapper/roop/capturer.py
deleted file mode 100644
index fd49d468dd4cd45832ab9612205968207a6f45cf..0000000000000000000000000000000000000000
--- a/spaces/Dimalker/Faceswapper/roop/capturer.py
+++ /dev/null
@@ -1,20 +0,0 @@
-from typing import Any
-import cv2
-
-
-def get_video_frame(video_path: str, frame_number: int = 0) -> Any:
- capture = cv2.VideoCapture(video_path)
- frame_total = capture.get(cv2.CAP_PROP_FRAME_COUNT)
- capture.set(cv2.CAP_PROP_POS_FRAMES, min(frame_total, frame_number - 1))
- has_frame, frame = capture.read()
- capture.release()
- if has_frame:
- return frame
- return None
-
-
-def get_video_frame_total(video_path: str) -> int:
- capture = cv2.VideoCapture(video_path)
- video_frame_total = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
- capture.release()
- return video_frame_total
diff --git a/spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/dnnlib/util.py b/spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/dnnlib/util.py
deleted file mode 100644
index 133ef764c0707d9384a33f0350ba71b1e624072f..0000000000000000000000000000000000000000
--- a/spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/dnnlib/util.py
+++ /dev/null
@@ -1,405 +0,0 @@
-# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
-#
-# This work is licensed under the Creative Commons Attribution-NonCommercial
-# 4.0 International License. To view a copy of this license, visit
-# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
-# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
-
-"""Miscellaneous utility classes and functions."""
-
-import ctypes
-import fnmatch
-import importlib
-import inspect
-import numpy as np
-import os
-import shutil
-import sys
-import types
-import io
-import pickle
-import re
-import requests
-import html
-import hashlib
-import glob
-import uuid
-
-from distutils.util import strtobool
-from typing import Any, List, Tuple, Union
-
-
-# Util classes
-# ------------------------------------------------------------------------------------------
-
-
-class EasyDict(dict):
- """Convenience class that behaves like a dict but allows access with the attribute syntax."""
-
- def __getattr__(self, name: str) -> Any:
- try:
- return self[name]
- except KeyError:
- raise AttributeError(name)
-
- def __setattr__(self, name: str, value: Any) -> None:
- self[name] = value
-
- def __delattr__(self, name: str) -> None:
- del self[name]
-
-
-class Logger(object):
- """Redirect stderr to stdout, optionally print stdout to a file, and optionally force flushing on both stdout and the file."""
-
- def __init__(self, file_name: str = None, file_mode: str = "w", should_flush: bool = True):
- self.file = None
-
- if file_name is not None:
- self.file = open(file_name, file_mode)
-
- self.should_flush = should_flush
- self.stdout = sys.stdout
- self.stderr = sys.stderr
-
- sys.stdout = self
- sys.stderr = self
-
- def __enter__(self) -> "Logger":
- return self
-
- def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
- self.close()
-
- def write(self, text: str) -> None:
- """Write text to stdout (and a file) and optionally flush."""
- if len(text) == 0: # workaround for a bug in VSCode debugger: sys.stdout.write(''); sys.stdout.flush() => crash
- return
-
- if self.file is not None:
- self.file.write(text)
-
- self.stdout.write(text)
-
- if self.should_flush:
- self.flush()
-
- def flush(self) -> None:
- """Flush written text to both stdout and a file, if open."""
- if self.file is not None:
- self.file.flush()
-
- self.stdout.flush()
-
- def close(self) -> None:
- """Flush, close possible files, and remove stdout/stderr mirroring."""
- self.flush()
-
- # if using multiple loggers, prevent closing in wrong order
- if sys.stdout is self:
- sys.stdout = self.stdout
- if sys.stderr is self:
- sys.stderr = self.stderr
-
- if self.file is not None:
- self.file.close()
-
-
-# Small util functions
-# ------------------------------------------------------------------------------------------
-
-
-def format_time(seconds: Union[int, float]) -> str:
- """Convert the seconds to human readable string with days, hours, minutes and seconds."""
- s = int(np.rint(seconds))
-
- if s < 60:
- return "{0}s".format(s)
- elif s < 60 * 60:
- return "{0}m {1:02}s".format(s // 60, s % 60)
- elif s < 24 * 60 * 60:
- return "{0}h {1:02}m {2:02}s".format(s // (60 * 60), (s // 60) % 60, s % 60)
- else:
- return "{0}d {1:02}h {2:02}m".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24, (s // 60) % 60)
-
-
-def ask_yes_no(question: str) -> bool:
- """Ask the user the question until the user inputs a valid answer."""
- while True:
- try:
- print("{0} [y/n]".format(question))
- return strtobool(input().lower())
- except ValueError:
- pass
-
-
-def tuple_product(t: Tuple) -> Any:
- """Calculate the product of the tuple elements."""
- result = 1
-
- for v in t:
- result *= v
-
- return result
-
-
-_str_to_ctype = {
- "uint8": ctypes.c_ubyte,
- "uint16": ctypes.c_uint16,
- "uint32": ctypes.c_uint32,
- "uint64": ctypes.c_uint64,
- "int8": ctypes.c_byte,
- "int16": ctypes.c_int16,
- "int32": ctypes.c_int32,
- "int64": ctypes.c_int64,
- "float32": ctypes.c_float,
- "float64": ctypes.c_double
-}
-
-
-def get_dtype_and_ctype(type_obj: Any) -> Tuple[np.dtype, Any]:
- """Given a type name string (or an object having a __name__ attribute), return matching Numpy and ctypes types that have the same size in bytes."""
- type_str = None
-
- if isinstance(type_obj, str):
- type_str = type_obj
- elif hasattr(type_obj, "__name__"):
- type_str = type_obj.__name__
- elif hasattr(type_obj, "name"):
- type_str = type_obj.name
- else:
- raise RuntimeError("Cannot infer type name from input")
-
- assert type_str in _str_to_ctype.keys()
-
- my_dtype = np.dtype(type_str)
- my_ctype = _str_to_ctype[type_str]
-
- assert my_dtype.itemsize == ctypes.sizeof(my_ctype)
-
- return my_dtype, my_ctype
-
-
-def is_pickleable(obj: Any) -> bool:
- try:
- with io.BytesIO() as stream:
- pickle.dump(obj, stream)
- return True
- except:
- return False
-
-
-# Functionality to import modules/objects by name, and call functions by name
-# ------------------------------------------------------------------------------------------
-
-def get_module_from_obj_name(obj_name: str) -> Tuple[types.ModuleType, str]:
- """Searches for the underlying module behind the name to some python object.
- Returns the module and the object name (original name with module part removed)."""
-
- # allow convenience shorthands, substitute them by full names
- obj_name = re.sub("^np.", "numpy.", obj_name)
- obj_name = re.sub("^tf.", "tensorflow.", obj_name)
-
- # list alternatives for (module_name, local_obj_name)
- parts = obj_name.split(".")
- name_pairs = [(".".join(parts[:i]), ".".join(parts[i:])) for i in range(len(parts), 0, -1)]
-
- # try each alternative in turn
- for module_name, local_obj_name in name_pairs:
- try:
- module = importlib.import_module(module_name) # may raise ImportError
- get_obj_from_module(module, local_obj_name) # may raise AttributeError
- return module, local_obj_name
- except:
- pass
-
- # maybe some of the modules themselves contain errors?
- for module_name, _local_obj_name in name_pairs:
- try:
- importlib.import_module(module_name) # may raise ImportError
- except ImportError:
- if not str(sys.exc_info()[1]).startswith("No module named '" + module_name + "'"):
- raise
-
- # maybe the requested attribute is missing?
- for module_name, local_obj_name in name_pairs:
- try:
- module = importlib.import_module(module_name) # may raise ImportError
- get_obj_from_module(module, local_obj_name) # may raise AttributeError
- except ImportError:
- pass
-
- # we are out of luck, but we have no idea why
- raise ImportError(obj_name)
-
-
-def get_obj_from_module(module: types.ModuleType, obj_name: str) -> Any:
- """Traverses the object name and returns the last (rightmost) python object."""
- if obj_name == '':
- return module
- obj = module
- for part in obj_name.split("."):
- obj = getattr(obj, part)
- return obj
-
-
-def get_obj_by_name(name: str) -> Any:
- """Finds the python object with the given name."""
- module, obj_name = get_module_from_obj_name(name)
- return get_obj_from_module(module, obj_name)
-
-
-def call_func_by_name(*args, func_name: str = None, **kwargs) -> Any:
- """Finds the python object with the given name and calls it as a function."""
- assert func_name is not None
- func_obj = get_obj_by_name(func_name)
- assert callable(func_obj)
- return func_obj(*args, **kwargs)
-
-
-def get_module_dir_by_obj_name(obj_name: str) -> str:
- """Get the directory path of the module containing the given object name."""
- module, _ = get_module_from_obj_name(obj_name)
- return os.path.dirname(inspect.getfile(module))
-
-
-def is_top_level_function(obj: Any) -> bool:
- """Determine whether the given object is a top-level function, i.e., defined at module scope using 'def'."""
- return callable(obj) and obj.__name__ in sys.modules[obj.__module__].__dict__
-
-
-def get_top_level_function_name(obj: Any) -> str:
- """Return the fully-qualified name of a top-level function."""
- assert is_top_level_function(obj)
- return obj.__module__ + "." + obj.__name__
-
-
-# File system helpers
-# ------------------------------------------------------------------------------------------
-
-def list_dir_recursively_with_ignore(dir_path: str, ignores: List[str] = None, add_base_to_relative: bool = False) -> List[Tuple[str, str]]:
- """List all files recursively in a given directory while ignoring given file and directory names.
- Returns list of tuples containing both absolute and relative paths."""
- assert os.path.isdir(dir_path)
- base_name = os.path.basename(os.path.normpath(dir_path))
-
- if ignores is None:
- ignores = []
-
- result = []
-
- for root, dirs, files in os.walk(dir_path, topdown=True):
- for ignore_ in ignores:
- dirs_to_remove = [d for d in dirs if fnmatch.fnmatch(d, ignore_)]
-
- # dirs need to be edited in-place
- for d in dirs_to_remove:
- dirs.remove(d)
-
- files = [f for f in files if not fnmatch.fnmatch(f, ignore_)]
-
- absolute_paths = [os.path.join(root, f) for f in files]
- relative_paths = [os.path.relpath(p, dir_path) for p in absolute_paths]
-
- if add_base_to_relative:
- relative_paths = [os.path.join(base_name, p) for p in relative_paths]
-
- assert len(absolute_paths) == len(relative_paths)
- result += zip(absolute_paths, relative_paths)
-
- return result
-
-
-def copy_files_and_create_dirs(files: List[Tuple[str, str]]) -> None:
- """Takes in a list of tuples of (src, dst) paths and copies files.
- Will create all necessary directories."""
- for file in files:
- target_dir_name = os.path.dirname(file[1])
-
- # will create all intermediate-level directories
- if not os.path.exists(target_dir_name):
- os.makedirs(target_dir_name)
-
- shutil.copyfile(file[0], file[1])
-
-
-# URL helpers
-# ------------------------------------------------------------------------------------------
-
-def is_url(obj: Any) -> bool:
- """Determine whether the given object is a valid URL string."""
- if not isinstance(obj, str) or not "://" in obj:
- return False
- try:
- res = requests.compat.urlparse(obj)
- if not res.scheme or not res.netloc or not "." in res.netloc:
- return False
- res = requests.compat.urlparse(requests.compat.urljoin(obj, "/"))
- if not res.scheme or not res.netloc or not "." in res.netloc:
- return False
- except:
- return False
- return True
-
-
-def open_url(url: str, cache_dir: str = None, num_attempts: int = 10, verbose: bool = True) -> Any:
- """Download the given URL and return a binary-mode file object to access the data."""
- assert is_url(url)
- assert num_attempts >= 1
-
- # Lookup from cache.
- url_md5 = hashlib.md5(url.encode("utf-8")).hexdigest()
- if cache_dir is not None:
- cache_files = glob.glob(os.path.join(cache_dir, url_md5 + "_*"))
- if len(cache_files) == 1:
- return open(cache_files[0], "rb")
-
- # Download.
- url_name = None
- url_data = None
- with requests.Session() as session:
- if verbose:
- print("Downloading %s ..." % url, end="", flush=True)
- for attempts_left in reversed(range(num_attempts)):
- try:
- with session.get(url) as res:
- res.raise_for_status()
- if len(res.content) == 0:
- raise IOError("No data received")
-
- if len(res.content) < 8192:
- content_str = res.content.decode("utf-8")
- if "download_warning" in res.headers.get("Set-Cookie", ""):
- links = [html.unescape(link) for link in content_str.split('"') if "export=download" in link]
- if len(links) == 1:
- url = requests.compat.urljoin(url, links[0])
- raise IOError("Google Drive virus checker nag")
- if "Google Drive - Quota exceeded" in content_str:
- raise IOError("Google Drive quota exceeded")
-
- match = re.search(r'filename="([^"]*)"', res.headers.get("Content-Disposition", ""))
- url_name = match[1] if match else url
- url_data = res.content
- if verbose:
- print(" done")
- break
- except:
- if not attempts_left:
- if verbose:
- print(" failed")
- raise
- if verbose:
- print(".", end="", flush=True)
-
- # Save to cache.
- if cache_dir is not None:
- safe_name = re.sub(r"[^0-9a-zA-Z-._]", "_", url_name)
- cache_file = os.path.join(cache_dir, url_md5 + "_" + safe_name)
- temp_file = os.path.join(cache_dir, "tmp_" + uuid.uuid4().hex + "_" + url_md5 + "_" + safe_name)
- os.makedirs(cache_dir, exist_ok=True)
- with open(temp_file, "wb") as f:
- f.write(url_data)
- os.replace(temp_file, cache_file) # atomic
-
- # Return data as file object.
- return io.BytesIO(url_data)
diff --git a/spaces/DragGan/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/dnnlib/tflib/ops/upfirdn_2d.py b/spaces/DragGan/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/dnnlib/tflib/ops/upfirdn_2d.py
deleted file mode 100644
index 55a31af7e146da7afeb964db018f14aca3134920..0000000000000000000000000000000000000000
--- a/spaces/DragGan/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/dnnlib/tflib/ops/upfirdn_2d.py
+++ /dev/null
@@ -1,418 +0,0 @@
-# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
-#
-# NVIDIA CORPORATION and its licensors retain all intellectual property
-# and proprietary rights in and to this software, related documentation
-# and any modifications thereto. Any use, reproduction, disclosure or
-# distribution of this software and related documentation without an express
-# license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-"""Custom TensorFlow ops for efficient resampling of 2D images."""
-
-import os
-import numpy as np
-import tensorflow as tf
-from .. import custom_ops
-
-def _get_plugin():
- return custom_ops.get_plugin(os.path.splitext(__file__)[0] + '.cu')
-
-#----------------------------------------------------------------------------
-
-def upfirdn_2d(x, k, upx=1, upy=1, downx=1, downy=1, padx0=0, padx1=0, pady0=0, pady1=0, impl='cuda'):
- r"""Pad, upsample, FIR filter, and downsample a batch of 2D images.
-
- Accepts a batch of 2D images of the shape `[majorDim, inH, inW, minorDim]`
- and performs the following operations for each image, batched across
- `majorDim` and `minorDim`:
-
- 1. Upsample the image by inserting the zeros after each pixel (`upx`, `upy`).
-
- 2. Pad the image with zeros by the specified number of pixels on each side
- (`padx0`, `padx1`, `pady0`, `pady1`). Specifying a negative value
- corresponds to cropping the image.
-
- 3. Convolve the image with the specified 2D FIR filter (`k`), shrinking the
- image so that the footprint of all output pixels lies within the input image.
-
- 4. Downsample the image by throwing away pixels (`downx`, `downy`).
-
- This sequence of operations bears close resemblance to scipy.signal.upfirdn().
- The fused op is considerably more efficient than performing the same calculation
- using standard TensorFlow ops. It supports gradients of arbitrary order.
-
- Args:
- x: Input tensor of the shape `[majorDim, inH, inW, minorDim]`.
- k: 2D FIR filter of the shape `[firH, firW]`.
- upx: Integer upsampling factor along the X-axis (default: 1).
- upy: Integer upsampling factor along the Y-axis (default: 1).
- downx: Integer downsampling factor along the X-axis (default: 1).
- downy: Integer downsampling factor along the Y-axis (default: 1).
- padx0: Number of pixels to pad on the left side (default: 0).
- padx1: Number of pixels to pad on the right side (default: 0).
- pady0: Number of pixels to pad on the top side (default: 0).
- pady1: Number of pixels to pad on the bottom side (default: 0).
- impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
-
- Returns:
- Tensor of the shape `[majorDim, outH, outW, minorDim]`, and same datatype as `x`.
- """
-
- impl_dict = {
- 'ref': _upfirdn_2d_ref,
- 'cuda': _upfirdn_2d_cuda,
- }
- return impl_dict[impl](x=x, k=k, upx=upx, upy=upy, downx=downx, downy=downy, padx0=padx0, padx1=padx1, pady0=pady0, pady1=pady1)
-
-#----------------------------------------------------------------------------
-
-def _upfirdn_2d_ref(x, k, upx, upy, downx, downy, padx0, padx1, pady0, pady1):
- """Slow reference implementation of `upfirdn_2d()` using standard TensorFlow ops."""
-
- x = tf.convert_to_tensor(x)
- k = np.asarray(k, dtype=np.float32)
- assert x.shape.rank == 4
- inH = x.shape[1].value
- inW = x.shape[2].value
- minorDim = _shape(x, 3)
- kernelH, kernelW = k.shape
- assert inW >= 1 and inH >= 1
- assert kernelW >= 1 and kernelH >= 1
- assert isinstance(upx, int) and isinstance(upy, int)
- assert isinstance(downx, int) and isinstance(downy, int)
- assert isinstance(padx0, int) and isinstance(padx1, int)
- assert isinstance(pady0, int) and isinstance(pady1, int)
-
- # Upsample (insert zeros).
- x = tf.reshape(x, [-1, inH, 1, inW, 1, minorDim])
- x = tf.pad(x, [[0, 0], [0, 0], [0, upy - 1], [0, 0], [0, upx - 1], [0, 0]])
- x = tf.reshape(x, [-1, inH * upy, inW * upx, minorDim])
-
- # Pad (crop if negative).
- x = tf.pad(x, [[0, 0], [max(pady0, 0), max(pady1, 0)], [max(padx0, 0), max(padx1, 0)], [0, 0]])
- x = x[:, max(-pady0, 0) : x.shape[1].value - max(-pady1, 0), max(-padx0, 0) : x.shape[2].value - max(-padx1, 0), :]
-
- # Convolve with filter.
- x = tf.transpose(x, [0, 3, 1, 2])
- x = tf.reshape(x, [-1, 1, inH * upy + pady0 + pady1, inW * upx + padx0 + padx1])
- w = tf.constant(k[::-1, ::-1, np.newaxis, np.newaxis], dtype=x.dtype)
- x = tf.nn.conv2d(x, w, strides=[1,1,1,1], padding='VALID', data_format='NCHW')
- x = tf.reshape(x, [-1, minorDim, inH * upy + pady0 + pady1 - kernelH + 1, inW * upx + padx0 + padx1 - kernelW + 1])
- x = tf.transpose(x, [0, 2, 3, 1])
-
- # Downsample (throw away pixels).
- return x[:, ::downy, ::downx, :]
-
-#----------------------------------------------------------------------------
-
-def _upfirdn_2d_cuda(x, k, upx, upy, downx, downy, padx0, padx1, pady0, pady1):
- """Fast CUDA implementation of `upfirdn_2d()` using custom ops."""
-
- x = tf.convert_to_tensor(x)
- k = np.asarray(k, dtype=np.float32)
- majorDim, inH, inW, minorDim = x.shape.as_list()
- kernelH, kernelW = k.shape
- assert inW >= 1 and inH >= 1
- assert kernelW >= 1 and kernelH >= 1
- assert isinstance(upx, int) and isinstance(upy, int)
- assert isinstance(downx, int) and isinstance(downy, int)
- assert isinstance(padx0, int) and isinstance(padx1, int)
- assert isinstance(pady0, int) and isinstance(pady1, int)
-
- outW = (inW * upx + padx0 + padx1 - kernelW) // downx + 1
- outH = (inH * upy + pady0 + pady1 - kernelH) // downy + 1
- assert outW >= 1 and outH >= 1
-
- cuda_op = _get_plugin().up_fir_dn2d
- kc = tf.constant(k, dtype=x.dtype)
- gkc = tf.constant(k[::-1, ::-1], dtype=x.dtype)
- gpadx0 = kernelW - padx0 - 1
- gpady0 = kernelH - pady0 - 1
- gpadx1 = inW * upx - outW * downx + padx0 - upx + 1
- gpady1 = inH * upy - outH * downy + pady0 - upy + 1
-
- @tf.custom_gradient
- def func(x):
- y = cuda_op(x=x, k=kc, upx=int(upx), upy=int(upy), downx=int(downx), downy=int(downy), padx0=int(padx0), padx1=int(padx1), pady0=int(pady0), pady1=int(pady1))
- y.set_shape([majorDim, outH, outW, minorDim])
- @tf.custom_gradient
- def grad(dy):
- dx = cuda_op(x=dy, k=gkc, upx=int(downx), upy=int(downy), downx=int(upx), downy=int(upy), padx0=int(gpadx0), padx1=int(gpadx1), pady0=int(gpady0), pady1=int(gpady1))
- dx.set_shape([majorDim, inH, inW, minorDim])
- return dx, func
- return y, grad
- return func(x)
-
-#----------------------------------------------------------------------------
-
-def filter_2d(x, k, gain=1, padding=0, data_format='NCHW', impl='cuda'):
- r"""Filter a batch of 2D images with the given FIR filter.
-
- Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]`
- and filters each image with the given filter. The filter is normalized so that
- if the input pixels are constant, they will be scaled by the specified `gain`.
- Pixels outside the image are assumed to be zero.
-
- Args:
- x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
- k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable).
- gain: Scaling factor for signal magnitude (default: 1.0).
- padding: Number of pixels to pad or crop the output on each side (default: 0).
- data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`).
- impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
-
- Returns:
- Tensor of the same shape and datatype as `x`.
- """
-
- assert isinstance(padding, int)
- k = _FilterKernel(k=k, gain=gain)
- assert k.w == k.h
- pad0 = k.w // 2 + padding
- pad1 = (k.w - 1) // 2 + padding
- return _simple_upfirdn_2d(x, k, pad0=pad0, pad1=pad1, data_format=data_format, impl=impl)
-
-#----------------------------------------------------------------------------
-
-def upsample_2d(x, k=None, factor=2, gain=1, padding=0, data_format='NCHW', impl='cuda'):
- r"""Upsample a batch of 2D images with the given filter.
-
- Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]`
- and upsamples each image with the given filter. The filter is normalized so that
- if the input pixels are constant, they will be scaled by the specified `gain`.
- Pixels outside the image are assumed to be zero, and the filter is padded with
- zeros so that its shape is a multiple of the upsampling factor.
-
- Args:
- x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
- k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable).
- The default is `[1] * factor`, which corresponds to nearest-neighbor
- upsampling.
- factor: Integer upsampling factor (default: 2).
- gain: Scaling factor for signal magnitude (default: 1.0).
- padding: Number of pixels to pad or crop the output on each side (default: 0).
- data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`).
- impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
-
- Returns:
- Tensor of the shape `[N, C, H * factor, W * factor]` or
- `[N, H * factor, W * factor, C]`, and same datatype as `x`.
- """
-
- assert isinstance(factor, int) and factor >= 1
- assert isinstance(padding, int)
- k = _FilterKernel(k if k is not None else [1] * factor, gain * (factor ** 2))
- assert k.w == k.h
- pad0 = (k.w + factor - 1) // 2 + padding
- pad1 = (k.w - factor) // 2 + padding
- return _simple_upfirdn_2d(x, k, up=factor, pad0=pad0, pad1=pad1, data_format=data_format, impl=impl)
-
-#----------------------------------------------------------------------------
-
-def downsample_2d(x, k=None, factor=2, gain=1, padding=0, data_format='NCHW', impl='cuda'):
- r"""Downsample a batch of 2D images with the given filter.
-
- Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]`
- and downsamples each image with the given filter. The filter is normalized so that
- if the input pixels are constant, they will be scaled by the specified `gain`.
- Pixels outside the image are assumed to be zero, and the filter is padded with
- zeros so that its shape is a multiple of the downsampling factor.
-
- Args:
- x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
- k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable).
- The default is `[1] * factor`, which corresponds to average pooling.
- factor: Integer downsampling factor (default: 2).
- gain: Scaling factor for signal magnitude (default: 1.0).
- padding: Number of pixels to pad or crop the output on each side (default: 0).
- data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`).
- impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
-
- Returns:
- Tensor of the shape `[N, C, H // factor, W // factor]` or
- `[N, H // factor, W // factor, C]`, and same datatype as `x`.
- """
-
- assert isinstance(factor, int) and factor >= 1
- assert isinstance(padding, int)
- k = _FilterKernel(k if k is not None else [1] * factor, gain)
- assert k.w == k.h
- pad0 = (k.w - factor + 1) // 2 + padding * factor
- pad1 = (k.w - factor) // 2 + padding * factor
- return _simple_upfirdn_2d(x, k, down=factor, pad0=pad0, pad1=pad1, data_format=data_format, impl=impl)
-
-#----------------------------------------------------------------------------
-
-def upsample_conv_2d(x, w, k=None, factor=2, gain=1, padding=0, data_format='NCHW', impl='cuda'):
- r"""Fused `upsample_2d()` followed by `tf.nn.conv2d()`.
-
- Padding is performed only once at the beginning, not between the operations.
- The fused op is considerably more efficient than performing the same calculation
- using standard TensorFlow ops. It supports gradients of arbitrary order.
-
- Args:
- x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
- w: Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`.
- Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`.
- k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable).
- The default is `[1] * factor`, which corresponds to nearest-neighbor
- upsampling.
- factor: Integer upsampling factor (default: 2).
- gain: Scaling factor for signal magnitude (default: 1.0).
- padding: Number of pixels to pad or crop the output on each side (default: 0).
- data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`).
- impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
-
- Returns:
- Tensor of the shape `[N, C, H * factor, W * factor]` or
- `[N, H * factor, W * factor, C]`, and same datatype as `x`.
- """
-
- assert isinstance(factor, int) and factor >= 1
- assert isinstance(padding, int)
-
- # Check weight shape.
- w = tf.convert_to_tensor(w)
- ch, cw, _inC, _outC = w.shape.as_list()
- inC = _shape(w, 2)
- outC = _shape(w, 3)
- assert cw == ch
-
- # Fast path for 1x1 convolution.
- if cw == 1 and ch == 1:
- x = tf.nn.conv2d(x, w, data_format=data_format, strides=[1,1,1,1], padding='VALID')
- x = upsample_2d(x, k, factor=factor, gain=gain, padding=padding, data_format=data_format, impl=impl)
- return x
-
- # Setup filter kernel.
- k = _FilterKernel(k if k is not None else [1] * factor, gain * (factor ** 2))
- assert k.w == k.h
-
- # Determine data dimensions.
- if data_format == 'NCHW':
- stride = [1, 1, factor, factor]
- output_shape = [_shape(x, 0), outC, (_shape(x, 2) - 1) * factor + ch, (_shape(x, 3) - 1) * factor + cw]
- num_groups = _shape(x, 1) // inC
- else:
- stride = [1, factor, factor, 1]
- output_shape = [_shape(x, 0), (_shape(x, 1) - 1) * factor + ch, (_shape(x, 2) - 1) * factor + cw, outC]
- num_groups = _shape(x, 3) // inC
-
- # Transpose weights.
- w = tf.reshape(w, [ch, cw, inC, num_groups, -1])
- w = tf.transpose(w[::-1, ::-1], [0, 1, 4, 3, 2])
- w = tf.reshape(w, [ch, cw, -1, num_groups * inC])
-
- # Execute.
- x = tf.nn.conv2d_transpose(x, w, output_shape=output_shape, strides=stride, padding='VALID', data_format=data_format)
- pad0 = (k.w + factor - cw) // 2 + padding
- pad1 = (k.w - factor - cw + 3) // 2 + padding
- return _simple_upfirdn_2d(x, k, pad0=pad0, pad1=pad1, data_format=data_format, impl=impl)
-
-#----------------------------------------------------------------------------
-
-def conv_downsample_2d(x, w, k=None, factor=2, gain=1, padding=0, data_format='NCHW', impl='cuda'):
- r"""Fused `tf.nn.conv2d()` followed by `downsample_2d()`.
-
- Padding is performed only once at the beginning, not between the operations.
- The fused op is considerably more efficient than performing the same calculation
- using standard TensorFlow ops. It supports gradients of arbitrary order.
-
- Args:
- x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
- w: Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`.
- Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`.
- k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable).
- The default is `[1] * factor`, which corresponds to average pooling.
- factor: Integer downsampling factor (default: 2).
- gain: Scaling factor for signal magnitude (default: 1.0).
- padding: Number of pixels to pad or crop the output on each side (default: 0).
- data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`).
- impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
-
- Returns:
- Tensor of the shape `[N, C, H // factor, W // factor]` or
- `[N, H // factor, W // factor, C]`, and same datatype as `x`.
- """
-
- assert isinstance(factor, int) and factor >= 1
- assert isinstance(padding, int)
-
- # Check weight shape.
- w = tf.convert_to_tensor(w)
- ch, cw, _inC, _outC = w.shape.as_list()
- assert cw == ch
-
- # Fast path for 1x1 convolution.
- if cw == 1 and ch == 1:
- x = downsample_2d(x, k, factor=factor, gain=gain, padding=padding, data_format=data_format, impl=impl)
- x = tf.nn.conv2d(x, w, data_format=data_format, strides=[1,1,1,1], padding='VALID')
- return x
-
- # Setup filter kernel.
- k = _FilterKernel(k if k is not None else [1] * factor, gain)
- assert k.w == k.h
-
- # Determine stride.
- if data_format == 'NCHW':
- s = [1, 1, factor, factor]
- else:
- s = [1, factor, factor, 1]
-
- # Execute.
- pad0 = (k.w - factor + cw) // 2 + padding * factor
- pad1 = (k.w - factor + cw - 1) // 2 + padding * factor
- x = _simple_upfirdn_2d(x, k, pad0=pad0, pad1=pad1, data_format=data_format, impl=impl)
- return tf.nn.conv2d(x, w, strides=s, padding='VALID', data_format=data_format)
-
-#----------------------------------------------------------------------------
-# Internal helpers.
-
-class _FilterKernel:
- def __init__(self, k, gain=1):
- k = np.asarray(k, dtype=np.float32)
- k /= np.sum(k)
-
- # Separable.
- if k.ndim == 1 and k.size >= 8:
- self.w = k.size
- self.h = k.size
- self.kx = k[np.newaxis, :]
- self.ky = k[:, np.newaxis] * gain
- self.kxy = None
-
- # Non-separable.
- else:
- if k.ndim == 1:
- k = np.outer(k, k)
- assert k.ndim == 2
- self.w = k.shape[1]
- self.h = k.shape[0]
- self.kx = None
- self.ky = None
- self.kxy = k * gain
-
-def _simple_upfirdn_2d(x, k, up=1, down=1, pad0=0, pad1=0, data_format='NCHW', impl='cuda'):
- assert isinstance(k, _FilterKernel)
- assert data_format in ['NCHW', 'NHWC']
- assert x.shape.rank == 4
- y = x
- if data_format == 'NCHW':
- y = tf.reshape(y, [-1, _shape(y, 2), _shape(y, 3), 1])
- if k.kx is not None:
- y = upfirdn_2d(y, k.kx, upx=up, downx=down, padx0=pad0, padx1=pad1, impl=impl)
- if k.ky is not None:
- y = upfirdn_2d(y, k.ky, upy=up, downy=down, pady0=pad0, pady1=pad1, impl=impl)
- if k.kxy is not None:
- y = upfirdn_2d(y, k.kxy, upx=up, upy=up, downx=down, downy=down, padx0=pad0, padx1=pad1, pady0=pad0, pady1=pad1, impl=impl)
- if data_format == 'NCHW':
- y = tf.reshape(y, [-1, _shape(x, 1), _shape(y, 1), _shape(y, 2)])
- return y
-
-def _shape(tf_expr, dim_idx):
- if tf_expr.shape.rank is not None:
- dim = tf_expr.shape[dim_idx].value
- if dim is not None:
- return dim
- return tf.shape(tf_expr)[dim_idx]
-
-#----------------------------------------------------------------------------
diff --git a/spaces/Dragonnext/charybdis/README.md b/spaces/Dragonnext/charybdis/README.md
deleted file mode 100644
index 0e8457ce896b12d6cf78050c3c15f298c8685cd8..0000000000000000000000000000000000000000
--- a/spaces/Dragonnext/charybdis/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: Charybdis
-emoji: 😻
-colorFrom: purple
-colorTo: yellow
-sdk: docker
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Duskfallcrew/duskfall-tarot-card/app.py b/spaces/Duskfallcrew/duskfall-tarot-card/app.py
deleted file mode 100644
index 7c6d41e786a0cc0caa264e7db20bc8e804c64d32..0000000000000000000000000000000000000000
--- a/spaces/Duskfallcrew/duskfall-tarot-card/app.py
+++ /dev/null
@@ -1,147 +0,0 @@
-from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DPMSolverMultistepScheduler
-import gradio as gr
-import torch
-from PIL import Image
-
-model_id = 'Duskfallcrew/duskfall-tarot-card'
-prefix = 'dsktaro1'
-
-scheduler = DPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler")
-
-pipe = StableDiffusionPipeline.from_pretrained(
- model_id,
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
- scheduler=scheduler)
-
-pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(
- model_id,
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
- scheduler=scheduler)
-
-if torch.cuda.is_available():
- pipe = pipe.to("cuda")
- pipe_i2i = pipe_i2i.to("cuda")
-
-def error_str(error, title="Error"):
- return f"""#### {title}
- {error}""" if error else ""
-
-def inference(prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt="", auto_prefix=False):
-
- generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
- prompt = f"{prefix} {prompt}" if auto_prefix else prompt
-
- try:
- if img is not None:
- return img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator), None
- else:
- return txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator), None
- except Exception as e:
- return None, error_str(e)
-
-def txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator):
-
- result = pipe(
- prompt,
- negative_prompt = neg_prompt,
- num_inference_steps = int(steps),
- guidance_scale = guidance,
- width = width,
- height = height,
- generator = generator)
-
- return result.images[0]
-
-def img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator):
-
- ratio = min(height / img.height, width / img.width)
- img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS)
- result = pipe_i2i(
- prompt,
- negative_prompt = neg_prompt,
- init_image = img,
- num_inference_steps = int(steps),
- strength = strength,
- guidance_scale = guidance,
- width = width,
- height = height,
- generator = generator)
-
- return result.images[0]
-
-css = """.main-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.main-div div h1{font-weight:900;margin-bottom:7px}.main-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem}
-"""
-with gr.Blocks(css=css) as demo:
- gr.HTML(
- f"""
-
-
-
Duskfall Tarot Card
-
-
- Demo for Duskfall Tarot Card Stable Diffusion model. All samples and info are here:
-
-https://civitai.com/user/duskfallcrew
-
-If you want to donate towards costs and don't want to subscribe:
-
-https://ko-fi.com/DUSKFALLcrew
-
-If you want to monthly support the EARTH & DUSK media projects and not just AI:
-
-https://www.patreon.com/earthndusk - "dsktaro1" is your √ word!
- {"Add the following tokens to your prompts for the model to work properly: prefix" if prefix else ""}
-
- Running on {"GPU 🔥" if torch.cuda.is_available() else f"CPU 🥶. For faster inference it is recommended to upgrade to GPU in Settings"} after duplicating the space
- """)
-
-demo.queue(concurrency_count=1)
-demo.launch()
diff --git a/spaces/ECCV2022/bytetrack/deploy/ncnn/cpp/README.md b/spaces/ECCV2022/bytetrack/deploy/ncnn/cpp/README.md
deleted file mode 100644
index 38137039f2056b43a77206092ac9c4cd282a2853..0000000000000000000000000000000000000000
--- a/spaces/ECCV2022/bytetrack/deploy/ncnn/cpp/README.md
+++ /dev/null
@@ -1,103 +0,0 @@
-# ByteTrack-CPP-ncnn
-
-## Installation
-
-Clone [ncnn](https://github.com/Tencent/ncnn) first, then please following [build tutorial of ncnn](https://github.com/Tencent/ncnn/wiki/how-to-build) to build on your own device.
-
-Install eigen-3.3.9 [[google]](https://drive.google.com/file/d/1rqO74CYCNrmRAg8Rra0JP3yZtJ-rfket/view?usp=sharing), [[baidu(code:ueq4)]](https://pan.baidu.com/s/15kEfCxpy-T7tz60msxxExg).
-
-```shell
-unzip eigen-3.3.9.zip
-cd eigen-3.3.9
-mkdir build
-cd build
-cmake ..
-sudo make install
-```
-
-## Generate onnx file
-Use provided tools to generate onnx file.
-For example, if you want to generate onnx file of bytetrack_s_mot17.pth, please run the following command:
-```shell
-cd
-python3 tools/export_onnx.py -f exps/example/mot/yolox_s_mix_det.py -c pretrained/bytetrack_s_mot17.pth.tar
-```
-Then, a bytetrack_s.onnx file is generated under .
-
-## Generate ncnn param and bin file
-Put bytetrack_s.onnx under ncnn/build/tools/onnx and then run:
-
-```shell
-cd ncnn/build/tools/onnx
-./onnx2ncnn bytetrack_s.onnx bytetrack_s.param bytetrack_s.bin
-```
-
-Since Focus module is not supported in ncnn. Warnings like:
-```shell
-Unsupported slice step !
-```
-will be printed. However, don't worry! C++ version of Focus layer is already implemented in src/bytetrack.cpp.
-
-## Modify param file
-Open **bytetrack_s.param**, and modify it.
-Before (just an example):
-```
-235 268
-Input images 0 1 images
-Split splitncnn_input0 1 4 images images_splitncnn_0 images_splitncnn_1 images_splitncnn_2 images_splitncnn_3
-Crop Slice_4 1 1 images_splitncnn_3 467 -23309=1,0 -23310=1,2147483647 -23311=1,1
-Crop Slice_9 1 1 467 472 -23309=1,0 -23310=1,2147483647 -23311=1,2
-Crop Slice_14 1 1 images_splitncnn_2 477 -23309=1,0 -23310=1,2147483647 -23311=1,1
-Crop Slice_19 1 1 477 482 -23309=1,1 -23310=1,2147483647 -23311=1,2
-Crop Slice_24 1 1 images_splitncnn_1 487 -23309=1,1 -23310=1,2147483647 -23311=1,1
-Crop Slice_29 1 1 487 492 -23309=1,0 -23310=1,2147483647 -23311=1,2
-Crop Slice_34 1 1 images_splitncnn_0 497 -23309=1,1 -23310=1,2147483647 -23311=1,1
-Crop Slice_39 1 1 497 502 -23309=1,1 -23310=1,2147483647 -23311=1,2
-Concat Concat_40 4 1 472 492 482 502 503 0=0
-...
-```
-* Change first number for 235 to 235 - 9 = 226(since we will remove 10 layers and add 1 layers, total layers number should minus 9).
-* Then remove 10 lines of code from Split to Concat, but remember the last but 2nd number: 503.
-* Add YoloV5Focus layer After Input (using previous number 503):
-```
-YoloV5Focus focus 1 1 images 503
-```
-After(just an exmaple):
-```
-226 328
-Input images 0 1 images
-YoloV5Focus focus 1 1 images 503
-...
-```
-
-## Use ncnn_optimize to generate new param and bin
-```shell
-# suppose you are still under ncnn/build/tools/onnx dir.
-../ncnnoptimize bytetrack_s.param bytetrack_s.bin bytetrack_s_op.param bytetrack_s_op.bin 65536
-```
-
-## Copy files and build ByteTrack
-Copy or move 'src', 'include' folders and 'CMakeLists.txt' file into ncnn/examples. Copy bytetrack_s_op.param, bytetrack_s_op.bin and /videos/palace.mp4 into ncnn/build/examples. Then, build ByteTrack:
-
-```shell
-cd ncnn/build/examples
-cmake ..
-make
-```
-
-## Run the demo
-You can run the ncnn demo with **5 FPS** (96-core Intel(R) Xeon(R) Platinum 8163 CPU @ 2.50GHz):
-```shell
-./bytetrack palace.mp4
-```
-
-You can modify 'num_threads' to optimize the running speed in [bytetrack.cpp](https://github.com/ifzhang/ByteTrack/blob/2e9a67895da6b47b948015f6861bba0bacd4e72f/deploy/ncnn/cpp/src/bytetrack.cpp#L309) according to the number of your CPU cores:
-
-```
-yolox.opt.num_threads = 20;
-```
-
-
-## Acknowledgement
-
-* [ncnn](https://github.com/Tencent/ncnn)
diff --git a/spaces/Eddycrack864/Applio-Inference/utils/clonerepo_experimental.py b/spaces/Eddycrack864/Applio-Inference/utils/clonerepo_experimental.py
deleted file mode 100644
index b0ae02648c1307562cf48033908edcf2996db5e2..0000000000000000000000000000000000000000
--- a/spaces/Eddycrack864/Applio-Inference/utils/clonerepo_experimental.py
+++ /dev/null
@@ -1,253 +0,0 @@
-import os
-import subprocess
-import shutil
-from concurrent.futures import ThreadPoolExecutor, as_completed
-from tqdm.notebook import tqdm
-from pathlib import Path
-import requests
-
-def run_script():
- def run_cmd(cmd):
- process = subprocess.run(cmd, shell=True, check=True, text=True)
- return process.stdout
-
- # Change the current directory to /content/
- os.chdir('/content/')
- print("Changing dir to /content/")
-
- # Your function to edit the file
- def edit_file(file_path):
- temp_file_path = "/tmp/temp_file.py"
- changes_made = False
- with open(file_path, "r") as file, open(temp_file_path, "w") as temp_file:
- previous_line = ""
- second_previous_line = ""
- for line in file:
- new_line = line.replace("value=160", "value=128")
- if new_line != line:
- print("Replaced 'value=160' with 'value=128'")
- changes_made = True
- line = new_line
-
- new_line = line.replace("crepe hop length: 160", "crepe hop length: 128")
- if new_line != line:
- print("Replaced 'crepe hop length: 160' with 'crepe hop length: 128'")
- changes_made = True
- line = new_line
-
- new_line = line.replace("value=0.88", "value=0.75")
- if new_line != line:
- print("Replaced 'value=0.88' with 'value=0.75'")
- changes_made = True
- line = new_line
-
- if "label=i18n(\"输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络\")" in previous_line and "value=1," in line:
- new_line = line.replace("value=1,", "value=0.25,")
- if new_line != line:
- print("Replaced 'value=1,' with 'value=0.25,' based on the condition")
- changes_made = True
- line = new_line
-
- if "label=i18n(\"总训练轮数total_epoch\")" in previous_line and "value=20," in line:
- new_line = line.replace("value=20,", "value=500,")
- if new_line != line:
- print("Replaced 'value=20,' with 'value=500,' based on the condition for DEFAULT EPOCH")
- changes_made = True
- line = new_line
-
- if 'choices=["pm", "harvest", "dio", "crepe", "crepe-tiny", "mangio-crepe", "mangio-crepe-tiny"], # Fork Feature. Add Crepe-Tiny' in previous_line:
- if 'value="pm",' in line:
- new_line = line.replace('value="pm",', 'value="mangio-crepe",')
- if new_line != line:
- print("Replaced 'value=\"pm\",' with 'value=\"mangio-crepe\",' based on the condition")
- changes_made = True
- line = new_line
-
- new_line = line.replace('label=i18n("输入训练文件夹路径"), value="E:\\\\语音音频+标注\\\\米津玄师\\\\src"', 'label=i18n("输入训练文件夹路径"), value="/content/dataset/"')
- if new_line != line:
- print("Replaced 'label=i18n(\"输入训练文件夹路径\"), value=\"E:\\\\语音音频+标注\\\\米津玄师\\\\src\"' with 'label=i18n(\"输入训练文件夹路径\"), value=\"/content/dataset/\"'")
- changes_made = True
- line = new_line
-
- if 'label=i18n("是否仅保存最新的ckpt文件以节省硬盘空间"),' in second_previous_line:
- if 'value=i18n("否"),' in line:
- new_line = line.replace('value=i18n("否"),', 'value=i18n("是"),')
- if new_line != line:
- print("Replaced 'value=i18n(\"否\"),' with 'value=i18n(\"是\"),' based on the condition for SAVE ONLY LATEST")
- changes_made = True
- line = new_line
-
- if 'label=i18n("是否在每次保存时间点将最终小模型保存至weights文件夹"),' in second_previous_line:
- if 'value=i18n("否"),' in line:
- new_line = line.replace('value=i18n("否"),', 'value=i18n("是"),')
- if new_line != line:
- print("Replaced 'value=i18n(\"否\"),' with 'value=i18n(\"是\"),' based on the condition for SAVE SMALL WEIGHTS")
- changes_made = True
- line = new_line
-
- temp_file.write(line)
- second_previous_line = previous_line
- previous_line = line
-
- # After finished, we replace the original file with the temp one
- import shutil
- shutil.move(temp_file_path, file_path)
-
- if changes_made:
- print("Changes made and file saved successfully.")
- else:
- print("No changes were needed.")
-
- # Define the repo path
- repo_path = '/content/Applio-RVC-Fork'
-
- def copy_all_files_in_directory(src_dir, dest_dir):
- # Iterate over all files in source directory
- for item in Path(src_dir).glob('*'):
- if item.is_file():
- # Copy each file to destination directory
- shutil.copy(item, dest_dir)
- else:
- # If it's a directory, make a new directory in the destination and copy the files recursively
- new_dest = Path(dest_dir) / item.name
- new_dest.mkdir(exist_ok=True)
- copy_all_files_in_directory(str(item), str(new_dest))
-
- def clone_and_copy_repo(repo_path):
- # New repository link
- new_repo_link = "https://github.com/IAHispano/Applio-RVC-Fork/"
- # Temporary path to clone the repository
- temp_repo_path = "/content/temp_Applio-RVC-Fork"
- # New folder name
- new_folder_name = "Applio-RVC-Fork"
-
- # Clone the latest code from the new repository to a temporary location
- run_cmd(f"git clone {new_repo_link} {temp_repo_path}")
- os.chdir(temp_repo_path)
-
- run_cmd(f"git checkout 3fa4dad3d8961e5ca2522e9e12c0b4ddb71ad402")
- run_cmd(f"git checkout f9e606c279cb49420597519b0a83b92be81e42e4")
- run_cmd(f"git checkout 9e305588844c5442d58add1061b29beeca89d679")
- run_cmd(f"git checkout bf92dc1eb54b4f28d6396a4d1820a25896cc9af8")
- run_cmd(f"git checkout c3810e197d3cb98039973b2f723edf967ecd9e61")
- run_cmd(f"git checkout a33159efd134c2413b0afe26a76b7dc87926d2de")
- run_cmd(f"git checkout 24e251fb62c662e39ac5cf9253cc65deb9be94ec")
- run_cmd(f"git checkout ad5667d3017e93232dba85969cddac1322ba2902")
- run_cmd(f"git checkout ce9715392cf52dd5a0e18e00d1b5e408f08dbf27")
- run_cmd(f"git checkout 7c7da3f2ac68f3bd8f3ad5ca5c700f18ab9f90eb")
- run_cmd(f"git checkout 4ac395eab101955e8960b50d772c26f592161764")
- run_cmd(f"git checkout b15b358702294c7375761584e5276c811ffab5e8")
- run_cmd(f"git checkout 1501793dc490982db9aca84a50647764caa66e51")
- run_cmd(f"git checkout 21f7faf57219c75e6ba837062350391a803e9ae2")
- run_cmd(f"git checkout b5eb689fbc409b49f065a431817f822f554cebe7")
- run_cmd(f"git checkout 7e02fae1ebf24cb151bf6cbe787d06734aa65862")
- run_cmd(f"git checkout 6aea5ea18ed0b9a1e03fa5d268d6bc3c616672a9")
- run_cmd(f"git checkout f0f9b25717e59116473fb42bd7f9252cfc32b398")
- run_cmd(f"git checkout b394de424088a81fc081224bc27338a8651ad3b2")
- run_cmd(f"git checkout f1999406a88b80c965d2082340f5ea2bfa9ab67a")
- run_cmd(f"git checkout d98a0fa8dc715308dfc73eac5c553b69c6ee072b")
- run_cmd(f"git checkout d73267a415fb0eba98477afa43ef71ffd82a7157")
- run_cmd(f"git checkout 1a03d01356ae79179e1fb8d8915dc9cc79925742")
- run_cmd(f"git checkout 81497bb3115e92c754300c9b3992df428886a3e9")
- run_cmd(f"git checkout c5af1f8edcf79cb70f065c0110e279e78e48caf9")
- run_cmd(f"git checkout cdb3c90109387fa4dfa92f53c3864c71170ffc77")
-
- # Edit the file here, before copying
- #edit_file(f"{temp_repo_path}/infer-web.py")
-
- # Copy all files from the cloned repository to the existing path
- copy_all_files_in_directory(temp_repo_path, repo_path)
- print(f"Copying all {new_folder_name} files from GitHub.")
-
- # Change working directory back to /content/
- os.chdir('/content/')
- print("Changed path back to /content/")
-
- # Remove the temporary cloned repository
- shutil.rmtree(temp_repo_path)
-
- # Call the function
- clone_and_copy_repo(repo_path)
-
- # Download the credentials file for RVC archive sheet
- os.makedirs('/content/Applio-RVC-Fork/stats/', exist_ok=True)
- run_cmd("wget -q https://cdn.discordapp.com/attachments/945486970883285045/1114717554481569802/peppy-generator-388800-07722f17a188.json -O /content/Applio-RVC-Fork/stats/peppy-generator-388800-07722f17a188.json")
-
- # Forcefully delete any existing torchcrepe dependencies downloaded from an earlier run just in case
- shutil.rmtree('/content/Applio-RVC-Fork/torchcrepe', ignore_errors=True)
- shutil.rmtree('/content/torchcrepe', ignore_errors=True)
-
- # Download the torchcrepe folder from the maxrmorrison/torchcrepe repository
- run_cmd("git clone https://github.com/maxrmorrison/torchcrepe.git")
- shutil.move('/content/torchcrepe/torchcrepe', '/content/Applio-RVC-Fork/')
- shutil.rmtree('/content/torchcrepe', ignore_errors=True) # Delete the torchcrepe repository folder
-
- # Change the current directory to /content/Applio-RVC-Fork
- os.chdir('/content/Applio-RVC-Fork')
- os.makedirs('pretrained', exist_ok=True)
- os.makedirs('uvr5_weights', exist_ok=True)
-
-def download_file(url, filepath):
- response = requests.get(url, stream=True)
- response.raise_for_status()
-
- with open(filepath, "wb") as file:
- for chunk in response.iter_content(chunk_size=8192):
- if chunk:
- file.write(chunk)
-
-def download_pretrained_models():
- pretrained_models = {
- "pretrained": [
- "D40k.pth",
- "G40k.pth",
- "f0D40k.pth",
- "f0G40k.pth"
- ],
- "pretrained_v2": [
- "D40k.pth",
- "G40k.pth",
- "f0D40k.pth",
- "f0G40k.pth",
- "f0G48k.pth",
- "f0D48k.pth"
- ],
- "uvr5_weights": [
- "HP2-人声vocals+非人声instrumentals.pth",
- "HP5-主旋律人声vocals+其他instrumentals.pth",
- "VR-DeEchoNormal.pth",
- "VR-DeEchoDeReverb.pth",
- "VR-DeEchoAggressive.pth",
- "HP5_only_main_vocal.pth",
- "HP3_all_vocals.pth",
- "HP2_all_vocals.pth"
- ]
- }
- part2 = "I"
- base_url = "https://huggingface.co/lj1995/VoiceConversionWebU" + part2 + "/resolve/main/"
- base_path = "/content/Applio-RVC-Fork/"
- base_pathm = base_path
-
- # Calculate total number of files to download
- total_files = sum(len(files) for files in pretrained_models.values()) + 1 # +1 for hubert_base.pt
-
- with tqdm(total=total_files, desc="Downloading files") as pbar:
- for folder, models in pretrained_models.items():
- folder_path = os.path.join(base_path, folder)
- os.makedirs(folder_path, exist_ok=True)
- for model in models:
- url = base_url + folder + "/" + model
- filepath = os.path.join(folder_path, model)
- download_file(url, filepath)
- pbar.update()
-
- # Download hubert_base.pt to the base path
- hubert_url = base_url + "hubert_base.pt"
- hubert_filepath = os.path.join(base_pathm, "hubert_base.pt")
- download_file(hubert_url, hubert_filepath)
- pbar.update()
-def clone_repository(run_download):
- with ThreadPoolExecutor(max_workers=2) as executor:
- executor.submit(run_script)
- if run_download:
- executor.submit(download_pretrained_models)
diff --git a/spaces/ElainaFanBoy/IRONY-Real-ESRGAN/realesrgan/train.py b/spaces/ElainaFanBoy/IRONY-Real-ESRGAN/realesrgan/train.py
deleted file mode 100644
index 8a9cec9ed80d9f362984779548dcec921a636a04..0000000000000000000000000000000000000000
--- a/spaces/ElainaFanBoy/IRONY-Real-ESRGAN/realesrgan/train.py
+++ /dev/null
@@ -1,11 +0,0 @@
-# flake8: noqa
-import os.path as osp
-from basicsr.train import train_pipeline
-
-import realesrgan.archs
-import realesrgan.data
-import realesrgan.models
-
-if __name__ == '__main__':
- root_path = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir))
- train_pipeline(root_path)
diff --git a/spaces/EronSamez/RVC_HFmeu/infer/modules/uvr5/mdxnet.py b/spaces/EronSamez/RVC_HFmeu/infer/modules/uvr5/mdxnet.py
deleted file mode 100644
index 86a066893ad99cfed77788027a9deb8ed486a7f2..0000000000000000000000000000000000000000
--- a/spaces/EronSamez/RVC_HFmeu/infer/modules/uvr5/mdxnet.py
+++ /dev/null
@@ -1,246 +0,0 @@
-import os
-import logging
-
-logger = logging.getLogger(__name__)
-
-import librosa
-import numpy as np
-import soundfile as sf
-import torch
-from tqdm import tqdm
-
-cpu = torch.device("cpu")
-
-
-class ConvTDFNetTrim:
- def __init__(
- self, device, model_name, target_name, L, dim_f, dim_t, n_fft, hop=1024
- ):
- super(ConvTDFNetTrim, self).__init__()
-
- self.dim_f = dim_f
- self.dim_t = 2**dim_t
- self.n_fft = n_fft
- self.hop = hop
- self.n_bins = self.n_fft // 2 + 1
- self.chunk_size = hop * (self.dim_t - 1)
- self.window = torch.hann_window(window_length=self.n_fft, periodic=True).to(
- device
- )
- self.target_name = target_name
- self.blender = "blender" in model_name
-
- self.dim_c = 4
- out_c = self.dim_c * 4 if target_name == "*" else self.dim_c
- self.freq_pad = torch.zeros(
- [1, out_c, self.n_bins - self.dim_f, self.dim_t]
- ).to(device)
-
- self.n = L // 2
-
- def stft(self, x):
- x = x.reshape([-1, self.chunk_size])
- x = torch.stft(
- x,
- n_fft=self.n_fft,
- hop_length=self.hop,
- window=self.window,
- center=True,
- return_complex=True,
- )
- x = torch.view_as_real(x)
- x = x.permute([0, 3, 1, 2])
- x = x.reshape([-1, 2, 2, self.n_bins, self.dim_t]).reshape(
- [-1, self.dim_c, self.n_bins, self.dim_t]
- )
- return x[:, :, : self.dim_f]
-
- def istft(self, x, freq_pad=None):
- freq_pad = (
- self.freq_pad.repeat([x.shape[0], 1, 1, 1])
- if freq_pad is None
- else freq_pad
- )
- x = torch.cat([x, freq_pad], -2)
- c = 4 * 2 if self.target_name == "*" else 2
- x = x.reshape([-1, c, 2, self.n_bins, self.dim_t]).reshape(
- [-1, 2, self.n_bins, self.dim_t]
- )
- x = x.permute([0, 2, 3, 1])
- x = x.contiguous()
- x = torch.view_as_complex(x)
- x = torch.istft(
- x, n_fft=self.n_fft, hop_length=self.hop, window=self.window, center=True
- )
- return x.reshape([-1, c, self.chunk_size])
-
-
-def get_models(device, dim_f, dim_t, n_fft):
- return ConvTDFNetTrim(
- device=device,
- model_name="Conv-TDF",
- target_name="vocals",
- L=11,
- dim_f=dim_f,
- dim_t=dim_t,
- n_fft=n_fft,
- )
-
-
-class Predictor:
- def __init__(self, args):
- import onnxruntime as ort
-
- logger.info(ort.get_available_providers())
- self.args = args
- self.model_ = get_models(
- device=cpu, dim_f=args.dim_f, dim_t=args.dim_t, n_fft=args.n_fft
- )
- self.model = ort.InferenceSession(
- os.path.join(args.onnx, self.model_.target_name + ".onnx"),
- providers=[
- "CUDAExecutionProvider",
- "DmlExecutionProvider",
- "CPUExecutionProvider",
- ],
- )
- logger.info("ONNX load done")
-
- def demix(self, mix):
- samples = mix.shape[-1]
- margin = self.args.margin
- chunk_size = self.args.chunks * 44100
- assert not margin == 0, "margin cannot be zero!"
- if margin > chunk_size:
- margin = chunk_size
-
- segmented_mix = {}
-
- if self.args.chunks == 0 or samples < chunk_size:
- chunk_size = samples
-
- counter = -1
- for skip in range(0, samples, chunk_size):
- counter += 1
-
- s_margin = 0 if counter == 0 else margin
- end = min(skip + chunk_size + margin, samples)
-
- start = skip - s_margin
-
- segmented_mix[skip] = mix[:, start:end].copy()
- if end == samples:
- break
-
- sources = self.demix_base(segmented_mix, margin_size=margin)
- """
- mix:(2,big_sample)
- segmented_mix:offset->(2,small_sample)
- sources:(1,2,big_sample)
- """
- return sources
-
- def demix_base(self, mixes, margin_size):
- chunked_sources = []
- progress_bar = tqdm(total=len(mixes))
- progress_bar.set_description("Processing")
- for mix in mixes:
- cmix = mixes[mix]
- sources = []
- n_sample = cmix.shape[1]
- model = self.model_
- trim = model.n_fft // 2
- gen_size = model.chunk_size - 2 * trim
- pad = gen_size - n_sample % gen_size
- mix_p = np.concatenate(
- (np.zeros((2, trim)), cmix, np.zeros((2, pad)), np.zeros((2, trim))), 1
- )
- mix_waves = []
- i = 0
- while i < n_sample + pad:
- waves = np.array(mix_p[:, i : i + model.chunk_size])
- mix_waves.append(waves)
- i += gen_size
- mix_waves = torch.tensor(mix_waves, dtype=torch.float32).to(cpu)
- with torch.no_grad():
- _ort = self.model
- spek = model.stft(mix_waves)
- if self.args.denoise:
- spec_pred = (
- -_ort.run(None, {"input": -spek.cpu().numpy()})[0] * 0.5
- + _ort.run(None, {"input": spek.cpu().numpy()})[0] * 0.5
- )
- tar_waves = model.istft(torch.tensor(spec_pred))
- else:
- tar_waves = model.istft(
- torch.tensor(_ort.run(None, {"input": spek.cpu().numpy()})[0])
- )
- tar_signal = (
- tar_waves[:, :, trim:-trim]
- .transpose(0, 1)
- .reshape(2, -1)
- .numpy()[:, :-pad]
- )
-
- start = 0 if mix == 0 else margin_size
- end = None if mix == list(mixes.keys())[::-1][0] else -margin_size
- if margin_size == 0:
- end = None
- sources.append(tar_signal[:, start:end])
-
- progress_bar.update(1)
-
- chunked_sources.append(sources)
- _sources = np.concatenate(chunked_sources, axis=-1)
- # del self.model
- progress_bar.close()
- return _sources
-
- def prediction(self, m, vocal_root, others_root, format):
- os.makedirs(vocal_root, exist_ok=True)
- os.makedirs(others_root, exist_ok=True)
- basename = os.path.basename(m)
- mix, rate = librosa.load(m, mono=False, sr=44100)
- if mix.ndim == 1:
- mix = np.asfortranarray([mix, mix])
- mix = mix.T
- sources = self.demix(mix.T)
- opt = sources[0].T
- if format in ["wav", "flac"]:
- sf.write(
- "%s/%s_main_vocal.%s" % (vocal_root, basename, format), mix - opt, rate
- )
- sf.write("%s/%s_others.%s" % (others_root, basename, format), opt, rate)
- else:
- path_vocal = "%s/%s_main_vocal.wav" % (vocal_root, basename)
- path_other = "%s/%s_others.wav" % (others_root, basename)
- sf.write(path_vocal, mix - opt, rate)
- sf.write(path_other, opt, rate)
- if os.path.exists(path_vocal):
- os.system(
- "ffmpeg -i %s -vn %s -q:a 2 -y"
- % (path_vocal, path_vocal[:-4] + ".%s" % format)
- )
- if os.path.exists(path_other):
- os.system(
- "ffmpeg -i %s -vn %s -q:a 2 -y"
- % (path_other, path_other[:-4] + ".%s" % format)
- )
-
-
-class MDXNetDereverb:
- def __init__(self, chunks, device):
- self.onnx = "assets/uvr5_weights/onnx_dereverb_By_FoxJoy"
- self.shifts = 10 # 'Predict with randomised equivariant stabilisation'
- self.mixing = "min_mag" # ['default','min_mag','max_mag']
- self.chunks = chunks
- self.margin = 44100
- self.dim_t = 9
- self.dim_f = 3072
- self.n_fft = 6144
- self.denoise = True
- self.pred = Predictor(self)
- self.device = device
-
- def path_audio(self, input, vocal_root, others_root, format):
- self.pred.prediction(input, vocal_root, others_root, format)
diff --git a/spaces/FYP-23-S1-21/Refineverse_Plugin/createDatabase.py b/spaces/FYP-23-S1-21/Refineverse_Plugin/createDatabase.py
deleted file mode 100644
index 72ce93baedaa3597d6b7b6bad2301da3684bb45a..0000000000000000000000000000000000000000
--- a/spaces/FYP-23-S1-21/Refineverse_Plugin/createDatabase.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# This file is used to create an SQLite database.
-# If adding more tables, you should delete the exising databases, or alter the code here to drop the pre-exising tables first.
-import sqlite3
-
-# Establishes a connection to the specified DB. If the DB does not exist, it creates a new one.
-connection = sqlite3.connect("Refineverse.db")
-cursor = connection.cursor() # A cursor object that is used to handle data.
-
-# Creating the Breakdown table
-cursor.execute('''
- CREATE TABLE IF NOT EXISTS Breakdown (
- id INTEGER PRIMARY KEY AUTOINCREMENT,
- user_story TEXT,
- assignedLabel TEXT
- )
-''')
-
-# Creating the TextSummarization table
-cursor.execute('''
- CREATE TABLE IF NOT EXISTS TextSummarization (
- id INTEGER PRIMARY KEY AUTOINCREMENT,
- Entered_story TEXT,
- summary TEXT
- )
-''')
-
-# Creating the Translation table
-cursor.execute('''
- CREATE TABLE IF NOT EXISTS Translation (
- id INTEGER PRIMARY KEY AUTOINCREMENT,
- input_text TEXT,
- translated_text TEXT
- )
-''')
-
-# Creating the TextGeneration table
-cursor.execute('''
- CREATE TABLE IF NOT EXISTS TextGeneration (
- id INTEGER PRIMARY KEY AUTOINCREMENT,
- userStory TEXT,
- generatedStory TEXT
- )
-''')
-
-connection.close() # Closes the connection
\ No newline at end of file
diff --git a/spaces/Felix123456/bingo/src/pages/api/image.ts b/spaces/Felix123456/bingo/src/pages/api/image.ts
deleted file mode 100644
index 4b894bea86050c0f3888cc56f60c0cb7f8b57cfc..0000000000000000000000000000000000000000
--- a/spaces/Felix123456/bingo/src/pages/api/image.ts
+++ /dev/null
@@ -1,40 +0,0 @@
-'use server'
-
-import { NextApiRequest, NextApiResponse } from 'next'
-import { debug } from '@/lib/isomorphic'
-import { createHeaders } from '@/lib/utils'
-import { createImage } from '@/lib/bots/bing/utils'
-
-export default async function handler(req: NextApiRequest, res: NextApiResponse) {
- const { prompt, id } = req.query
- if (!prompt) {
- return res.json({
- result: {
- value: 'Image',
- message: 'No Prompt'
- }
- })
- }
- try {
- const headers = createHeaders(req.cookies, {
- IMAGE_BING_COOKIE: process.env.IMAGE_BING_COOKIE
- })
-
- debug('headers', headers)
- const response = await createImage(String(prompt), String(id), {
- ...headers,
- 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32',
- })
- res.writeHead(200, {
- 'Content-Type': 'text/plain; charset=UTF-8',
- })
- return res.end(response)
- } catch (e) {
- return res.json({
- result: {
- value: 'Error',
- message: `${e}`
- }
- })
- }
-}
diff --git a/spaces/FelixLuoX/codeformer/CodeFormer/facelib/detection/align_trans.py b/spaces/FelixLuoX/codeformer/CodeFormer/facelib/detection/align_trans.py
deleted file mode 100644
index 07f1eb365462c2ec5bbac6d1854c786b6fd6be90..0000000000000000000000000000000000000000
--- a/spaces/FelixLuoX/codeformer/CodeFormer/facelib/detection/align_trans.py
+++ /dev/null
@@ -1,219 +0,0 @@
-import cv2
-import numpy as np
-
-from .matlab_cp2tform import get_similarity_transform_for_cv2
-
-# reference facial points, a list of coordinates (x,y)
-REFERENCE_FACIAL_POINTS = [[30.29459953, 51.69630051], [65.53179932, 51.50139999], [48.02519989, 71.73660278],
- [33.54930115, 92.3655014], [62.72990036, 92.20410156]]
-
-DEFAULT_CROP_SIZE = (96, 112)
-
-
-class FaceWarpException(Exception):
-
- def __str__(self):
- return 'In File {}:{}'.format(__file__, super.__str__(self))
-
-
-def get_reference_facial_points(output_size=None, inner_padding_factor=0.0, outer_padding=(0, 0), default_square=False):
- """
- Function:
- ----------
- get reference 5 key points according to crop settings:
- 0. Set default crop_size:
- if default_square:
- crop_size = (112, 112)
- else:
- crop_size = (96, 112)
- 1. Pad the crop_size by inner_padding_factor in each side;
- 2. Resize crop_size into (output_size - outer_padding*2),
- pad into output_size with outer_padding;
- 3. Output reference_5point;
- Parameters:
- ----------
- @output_size: (w, h) or None
- size of aligned face image
- @inner_padding_factor: (w_factor, h_factor)
- padding factor for inner (w, h)
- @outer_padding: (w_pad, h_pad)
- each row is a pair of coordinates (x, y)
- @default_square: True or False
- if True:
- default crop_size = (112, 112)
- else:
- default crop_size = (96, 112);
- !!! make sure, if output_size is not None:
- (output_size - outer_padding)
- = some_scale * (default crop_size * (1.0 +
- inner_padding_factor))
- Returns:
- ----------
- @reference_5point: 5x2 np.array
- each row is a pair of transformed coordinates (x, y)
- """
-
- tmp_5pts = np.array(REFERENCE_FACIAL_POINTS)
- tmp_crop_size = np.array(DEFAULT_CROP_SIZE)
-
- # 0) make the inner region a square
- if default_square:
- size_diff = max(tmp_crop_size) - tmp_crop_size
- tmp_5pts += size_diff / 2
- tmp_crop_size += size_diff
-
- if (output_size and output_size[0] == tmp_crop_size[0] and output_size[1] == tmp_crop_size[1]):
-
- return tmp_5pts
-
- if (inner_padding_factor == 0 and outer_padding == (0, 0)):
- if output_size is None:
- return tmp_5pts
- else:
- raise FaceWarpException('No paddings to do, output_size must be None or {}'.format(tmp_crop_size))
-
- # check output size
- if not (0 <= inner_padding_factor <= 1.0):
- raise FaceWarpException('Not (0 <= inner_padding_factor <= 1.0)')
-
- if ((inner_padding_factor > 0 or outer_padding[0] > 0 or outer_padding[1] > 0) and output_size is None):
- output_size = tmp_crop_size * \
- (1 + inner_padding_factor * 2).astype(np.int32)
- output_size += np.array(outer_padding)
- if not (outer_padding[0] < output_size[0] and outer_padding[1] < output_size[1]):
- raise FaceWarpException('Not (outer_padding[0] < output_size[0] and outer_padding[1] < output_size[1])')
-
- # 1) pad the inner region according inner_padding_factor
- if inner_padding_factor > 0:
- size_diff = tmp_crop_size * inner_padding_factor * 2
- tmp_5pts += size_diff / 2
- tmp_crop_size += np.round(size_diff).astype(np.int32)
-
- # 2) resize the padded inner region
- size_bf_outer_pad = np.array(output_size) - np.array(outer_padding) * 2
-
- if size_bf_outer_pad[0] * tmp_crop_size[1] != size_bf_outer_pad[1] * tmp_crop_size[0]:
- raise FaceWarpException('Must have (output_size - outer_padding)'
- '= some_scale * (crop_size * (1.0 + inner_padding_factor)')
-
- scale_factor = size_bf_outer_pad[0].astype(np.float32) / tmp_crop_size[0]
- tmp_5pts = tmp_5pts * scale_factor
- # size_diff = tmp_crop_size * (scale_factor - min(scale_factor))
- # tmp_5pts = tmp_5pts + size_diff / 2
- tmp_crop_size = size_bf_outer_pad
-
- # 3) add outer_padding to make output_size
- reference_5point = tmp_5pts + np.array(outer_padding)
- tmp_crop_size = output_size
-
- return reference_5point
-
-
-def get_affine_transform_matrix(src_pts, dst_pts):
- """
- Function:
- ----------
- get affine transform matrix 'tfm' from src_pts to dst_pts
- Parameters:
- ----------
- @src_pts: Kx2 np.array
- source points matrix, each row is a pair of coordinates (x, y)
- @dst_pts: Kx2 np.array
- destination points matrix, each row is a pair of coordinates (x, y)
- Returns:
- ----------
- @tfm: 2x3 np.array
- transform matrix from src_pts to dst_pts
- """
-
- tfm = np.float32([[1, 0, 0], [0, 1, 0]])
- n_pts = src_pts.shape[0]
- ones = np.ones((n_pts, 1), src_pts.dtype)
- src_pts_ = np.hstack([src_pts, ones])
- dst_pts_ = np.hstack([dst_pts, ones])
-
- A, res, rank, s = np.linalg.lstsq(src_pts_, dst_pts_)
-
- if rank == 3:
- tfm = np.float32([[A[0, 0], A[1, 0], A[2, 0]], [A[0, 1], A[1, 1], A[2, 1]]])
- elif rank == 2:
- tfm = np.float32([[A[0, 0], A[1, 0], 0], [A[0, 1], A[1, 1], 0]])
-
- return tfm
-
-
-def warp_and_crop_face(src_img, facial_pts, reference_pts=None, crop_size=(96, 112), align_type='smilarity'):
- """
- Function:
- ----------
- apply affine transform 'trans' to uv
- Parameters:
- ----------
- @src_img: 3x3 np.array
- input image
- @facial_pts: could be
- 1)a list of K coordinates (x,y)
- or
- 2) Kx2 or 2xK np.array
- each row or col is a pair of coordinates (x, y)
- @reference_pts: could be
- 1) a list of K coordinates (x,y)
- or
- 2) Kx2 or 2xK np.array
- each row or col is a pair of coordinates (x, y)
- or
- 3) None
- if None, use default reference facial points
- @crop_size: (w, h)
- output face image size
- @align_type: transform type, could be one of
- 1) 'similarity': use similarity transform
- 2) 'cv2_affine': use the first 3 points to do affine transform,
- by calling cv2.getAffineTransform()
- 3) 'affine': use all points to do affine transform
- Returns:
- ----------
- @face_img: output face image with size (w, h) = @crop_size
- """
-
- if reference_pts is None:
- if crop_size[0] == 96 and crop_size[1] == 112:
- reference_pts = REFERENCE_FACIAL_POINTS
- else:
- default_square = False
- inner_padding_factor = 0
- outer_padding = (0, 0)
- output_size = crop_size
-
- reference_pts = get_reference_facial_points(output_size, inner_padding_factor, outer_padding,
- default_square)
-
- ref_pts = np.float32(reference_pts)
- ref_pts_shp = ref_pts.shape
- if max(ref_pts_shp) < 3 or min(ref_pts_shp) != 2:
- raise FaceWarpException('reference_pts.shape must be (K,2) or (2,K) and K>2')
-
- if ref_pts_shp[0] == 2:
- ref_pts = ref_pts.T
-
- src_pts = np.float32(facial_pts)
- src_pts_shp = src_pts.shape
- if max(src_pts_shp) < 3 or min(src_pts_shp) != 2:
- raise FaceWarpException('facial_pts.shape must be (K,2) or (2,K) and K>2')
-
- if src_pts_shp[0] == 2:
- src_pts = src_pts.T
-
- if src_pts.shape != ref_pts.shape:
- raise FaceWarpException('facial_pts and reference_pts must have the same shape')
-
- if align_type == 'cv2_affine':
- tfm = cv2.getAffineTransform(src_pts[0:3], ref_pts[0:3])
- elif align_type == 'affine':
- tfm = get_affine_transform_matrix(src_pts, ref_pts)
- else:
- tfm = get_similarity_transform_for_cv2(src_pts, ref_pts)
-
- face_img = cv2.warpAffine(src_img, tfm, (crop_size[0], crop_size[1]))
-
- return face_img
diff --git a/spaces/FelixLuoX/codeformer/CodeFormer/facelib/parsing/resnet.py b/spaces/FelixLuoX/codeformer/CodeFormer/facelib/parsing/resnet.py
deleted file mode 100644
index fec8e82cf64469fb51be21ad5130217052addbda..0000000000000000000000000000000000000000
--- a/spaces/FelixLuoX/codeformer/CodeFormer/facelib/parsing/resnet.py
+++ /dev/null
@@ -1,69 +0,0 @@
-import torch.nn as nn
-import torch.nn.functional as F
-
-
-def conv3x3(in_planes, out_planes, stride=1):
- """3x3 convolution with padding"""
- return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
-
-
-class BasicBlock(nn.Module):
-
- def __init__(self, in_chan, out_chan, stride=1):
- super(BasicBlock, self).__init__()
- self.conv1 = conv3x3(in_chan, out_chan, stride)
- self.bn1 = nn.BatchNorm2d(out_chan)
- self.conv2 = conv3x3(out_chan, out_chan)
- self.bn2 = nn.BatchNorm2d(out_chan)
- self.relu = nn.ReLU(inplace=True)
- self.downsample = None
- if in_chan != out_chan or stride != 1:
- self.downsample = nn.Sequential(
- nn.Conv2d(in_chan, out_chan, kernel_size=1, stride=stride, bias=False),
- nn.BatchNorm2d(out_chan),
- )
-
- def forward(self, x):
- residual = self.conv1(x)
- residual = F.relu(self.bn1(residual))
- residual = self.conv2(residual)
- residual = self.bn2(residual)
-
- shortcut = x
- if self.downsample is not None:
- shortcut = self.downsample(x)
-
- out = shortcut + residual
- out = self.relu(out)
- return out
-
-
-def create_layer_basic(in_chan, out_chan, bnum, stride=1):
- layers = [BasicBlock(in_chan, out_chan, stride=stride)]
- for i in range(bnum - 1):
- layers.append(BasicBlock(out_chan, out_chan, stride=1))
- return nn.Sequential(*layers)
-
-
-class ResNet18(nn.Module):
-
- def __init__(self):
- super(ResNet18, self).__init__()
- self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
- self.bn1 = nn.BatchNorm2d(64)
- self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
- self.layer1 = create_layer_basic(64, 64, bnum=2, stride=1)
- self.layer2 = create_layer_basic(64, 128, bnum=2, stride=2)
- self.layer3 = create_layer_basic(128, 256, bnum=2, stride=2)
- self.layer4 = create_layer_basic(256, 512, bnum=2, stride=2)
-
- def forward(self, x):
- x = self.conv1(x)
- x = F.relu(self.bn1(x))
- x = self.maxpool(x)
-
- x = self.layer1(x)
- feat8 = self.layer2(x) # 1/8
- feat16 = self.layer3(feat8) # 1/16
- feat32 = self.layer4(feat16) # 1/32
- return feat8, feat16, feat32
diff --git a/spaces/Fengbinbin/gpt-academic/crazy_functions/test_project/latex/attention/introduction.tex b/spaces/Fengbinbin/gpt-academic/crazy_functions/test_project/latex/attention/introduction.tex
deleted file mode 100644
index 1baa8915f4cf7aec2520894a87470fc9436d954b..0000000000000000000000000000000000000000
--- a/spaces/Fengbinbin/gpt-academic/crazy_functions/test_project/latex/attention/introduction.tex
+++ /dev/null
@@ -1,18 +0,0 @@
-Recurrent neural networks, long short-term memory \citep{hochreiter1997} and gated recurrent \citep{gruEval14} neural networks in particular, have been firmly established as state of the art approaches in sequence modeling and transduction problems such as language modeling and machine translation \citep{sutskever14, bahdanau2014neural, cho2014learning}. Numerous efforts have since continued to push the boundaries of recurrent language models and encoder-decoder architectures \citep{wu2016google,luong2015effective,jozefowicz2016exploring}.
-
-Recurrent models typically factor computation along the symbol positions of the input and output sequences. Aligning the positions to steps in computation time, they generate a sequence of hidden states $h_t$, as a function of the previous hidden state $h_{t-1}$ and the input for position $t$. This inherently sequential nature precludes parallelization within training examples, which becomes critical at longer sequence lengths, as memory constraints limit batching across examples.
-%\marginpar{not sure if the memory constraints are understandable here}
-Recent work has achieved significant improvements in computational efficiency through factorization tricks \citep{Kuchaiev2017Factorization} and conditional computation \citep{shazeer2017outrageously}, while also improving model performance in case of the latter. The fundamental constraint of sequential computation, however, remains.
-
-%\marginpar{@all: there is work on analyzing what attention really does in seq2seq models, couldn't find it right away}
-
-Attention mechanisms have become an integral part of compelling sequence modeling and transduction models in various tasks, allowing modeling of dependencies without regard to their distance in the input or output sequences \citep{bahdanau2014neural, structuredAttentionNetworks}. In all but a few cases \citep{decomposableAttnModel}, however, such attention mechanisms are used in conjunction with a recurrent network.
-
-%\marginpar{not sure if "cross-positional communication" is understandable without explanation}
-%\marginpar{insert exact training times and stats for the model that reaches sota earliest, maybe even a single GPU model?}
-
-In this work we propose the Transformer, a model architecture eschewing recurrence and instead relying entirely on an attention mechanism to draw global dependencies between input and output. The Transformer allows for significantly more parallelization and can reach a new state of the art in translation quality after being trained for as little as twelve hours on eight P100 GPUs.
-%\marginpar{you removed the constant number of repetitions part. I wrote it because I wanted to make it clear that the model does not only perform attention once, while it's also not recurrent. I thought that might be important to get across early.}
-
-% Just a standard paragraph with citations, rewrite.
-%After the seminal papers of \citep{sutskever14}, \citep{bahdanau2014neural}, and \citep{cho2014learning}, recurrent models have become the dominant solution for both sequence modeling and sequence-to-sequence transduction. Many efforts such as \citep{wu2016google,luong2015effective,jozefowicz2016exploring} have pushed the boundaries of machine translation and language modeling with recurrent sequence models. Recent effort \citep{shazeer2017outrageously} has combined the power of conditional computation with sequence models to train very large models for machine translation, pushing SOTA at lower computational cost. Recurrent models compute a vector of hidden states $h_t$, for each time step $t$ of computation. $h_t$ is a function of both the input at time $t$ and the previous hidden state $h_t$. This dependence on the previous hidden state encumbers recurrnet models to process multiple inputs at once, and their time complexity is a linear function of the length of the input and output, both during training and inference. [What I want to say here is that although this is fine during decoding, at training time, we are given both input and output and this linear nature does not allow the RNN to process all inputs and outputs simultaneously and haven't been used on datasets that are the of the scale of the web. What's the largest dataset we have ? . Talk about Nividia and possibly other's effors to speed up things, and possibly other efforts that alleviate this, but are still limited by it's comptuational nature]. Rest of the intro: What if you could construct the state based on the actual inputs and outputs, then you could construct them all at once. This has been the foundation of many promising recent efforts, bytenet,facenet (Also talk about quasi rnn here). Now we talk about attention!! Along with cell architectures such as long short-term meory (LSTM) \citep{hochreiter1997}, and gated recurrent units (GRUs) \citep{cho2014learning}, attention has emerged as an essential ingredient in successful sequence models, in particular for machine translation. In recent years, many, if not all, state-of-the-art (SOTA) results in machine translation have been achieved with attention-based sequence models \citep{wu2016google,luong2015effective,jozefowicz2016exploring}. Talk about the neon work on how it played with attention to do self attention! Then talk about what we do.
\ No newline at end of file
diff --git a/spaces/FrankZxShen/vits-fast-finetuning-umamusume/text/japanese.py b/spaces/FrankZxShen/vits-fast-finetuning-umamusume/text/japanese.py
deleted file mode 100644
index 375e4d50872d5c68ee57ca17470a2ca425425eba..0000000000000000000000000000000000000000
--- a/spaces/FrankZxShen/vits-fast-finetuning-umamusume/text/japanese.py
+++ /dev/null
@@ -1,153 +0,0 @@
-import re
-from unidecode import unidecode
-import pyopenjtalk
-
-
-# Regular expression matching Japanese without punctuation marks:
-_japanese_characters = re.compile(
- r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
-
-# Regular expression matching non-Japanese characters or punctuation marks:
-_japanese_marks = re.compile(
- r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
-
-# List of (symbol, Japanese) pairs for marks:
-_symbols_to_japanese = [(re.compile('%s' % x[0]), x[1]) for x in [
- ('%', 'パーセント')
-]]
-
-# List of (romaji, ipa) pairs for marks:
-_romaji_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [
- ('ts', 'ʦ'),
- ('u', 'ɯ'),
- ('j', 'ʥ'),
- ('y', 'j'),
- ('ni', 'n^i'),
- ('nj', 'n^'),
- ('hi', 'çi'),
- ('hj', 'ç'),
- ('f', 'ɸ'),
- ('I', 'i*'),
- ('U', 'ɯ*'),
- ('r', 'ɾ')
-]]
-
-# List of (romaji, ipa2) pairs for marks:
-_romaji_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [
- ('u', 'ɯ'),
- ('ʧ', 'tʃ'),
- ('j', 'dʑ'),
- ('y', 'j'),
- ('ni', 'n^i'),
- ('nj', 'n^'),
- ('hi', 'çi'),
- ('hj', 'ç'),
- ('f', 'ɸ'),
- ('I', 'i*'),
- ('U', 'ɯ*'),
- ('r', 'ɾ')
-]]
-
-# List of (consonant, sokuon) pairs:
-_real_sokuon = [(re.compile('%s' % x[0]), x[1]) for x in [
- (r'Q([↑↓]*[kg])', r'k#\1'),
- (r'Q([↑↓]*[tdjʧ])', r't#\1'),
- (r'Q([↑↓]*[sʃ])', r's\1'),
- (r'Q([↑↓]*[pb])', r'p#\1')
-]]
-
-# List of (consonant, hatsuon) pairs:
-_real_hatsuon = [(re.compile('%s' % x[0]), x[1]) for x in [
- (r'N([↑↓]*[pbm])', r'm\1'),
- (r'N([↑↓]*[ʧʥj])', r'n^\1'),
- (r'N([↑↓]*[tdn])', r'n\1'),
- (r'N([↑↓]*[kg])', r'ŋ\1')
-]]
-
-
-def symbols_to_japanese(text):
- for regex, replacement in _symbols_to_japanese:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def japanese_to_romaji_with_accent(text):
- '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html'''
- text = symbols_to_japanese(text)
- sentences = re.split(_japanese_marks, text)
- marks = re.findall(_japanese_marks, text)
- text = ''
- for i, sentence in enumerate(sentences):
- if re.match(_japanese_characters, sentence):
- if text != '':
- text += ' '
- labels = pyopenjtalk.extract_fullcontext(sentence)
- for n, label in enumerate(labels):
- phoneme = re.search(r'\-([^\+]*)\+', label).group(1)
- if phoneme not in ['sil', 'pau']:
- text += phoneme.replace('ch', 'ʧ').replace('sh',
- 'ʃ').replace('cl', 'Q')
- else:
- continue
- # n_moras = int(re.search(r'/F:(\d+)_', label).group(1))
- a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1))
- a2 = int(re.search(r"\+(\d+)\+", label).group(1))
- a3 = int(re.search(r"\+(\d+)/", label).group(1))
- if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil', 'pau']:
- a2_next = -1
- else:
- a2_next = int(
- re.search(r"\+(\d+)\+", labels[n + 1]).group(1))
- # Accent phrase boundary
- if a3 == 1 and a2_next == 1:
- text += ' '
- # Falling
- elif a1 == 0 and a2_next == a2 + 1:
- text += '↓'
- # Rising
- elif a2 == 1 and a2_next == 2:
- text += '↑'
- if i < len(marks):
- text += unidecode(marks[i]).replace(' ', '')
- return text
-
-
-def get_real_sokuon(text):
- for regex, replacement in _real_sokuon:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def get_real_hatsuon(text):
- for regex, replacement in _real_hatsuon:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def japanese_to_ipa(text):
- text = japanese_to_romaji_with_accent(text).replace('...', '…')
- text = re.sub(
- r'([aiueo])\1+', lambda x: x.group(0)[0]+'ː'*(len(x.group(0))-1), text)
- text = get_real_sokuon(text)
- text = get_real_hatsuon(text)
- for regex, replacement in _romaji_to_ipa:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def japanese_to_ipa2(text):
- text = japanese_to_romaji_with_accent(text).replace('...', '…')
- text = get_real_sokuon(text)
- text = get_real_hatsuon(text)
- for regex, replacement in _romaji_to_ipa2:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def japanese_to_ipa3(text):
- text = japanese_to_ipa2(text).replace('n^', 'ȵ').replace(
- 'ʃ', 'ɕ').replace('*', '\u0325').replace('#', '\u031a')
- text = re.sub(
- r'([aiɯeo])\1+', lambda x: x.group(0)[0]+'ː'*(len(x.group(0))-1), text)
- text = re.sub(r'((?:^|\s)(?:ts|tɕ|[kpt]))', r'\1ʰ', text)
- return text
diff --git a/spaces/Frorozcol/financIA/src/__init__.py b/spaces/Frorozcol/financIA/src/__init__.py
deleted file mode 100644
index e579680581adba3c0dc8424d9537b06771c91c45..0000000000000000000000000000000000000000
--- a/spaces/Frorozcol/financIA/src/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from .predict import *
\ No newline at end of file
diff --git a/spaces/GAIR/Factool/app.py b/spaces/GAIR/Factool/app.py
deleted file mode 100644
index 702edb3dac9e543d31442c82cc3c4df4ee804cb9..0000000000000000000000000000000000000000
--- a/spaces/GAIR/Factool/app.py
+++ /dev/null
@@ -1,60 +0,0 @@
-import gradio as gr
-import openai
-import json
-from factool import Factool
-import os
-
-
-def chat_with_gpt(api_key, model, message):
- openai.api_key = api_key
- response = openai.ChatCompletion.create(
- model=model,
- messages=[
- {"role": "system", "content": "You are a helpful assistant."},
- {"role": "user", "content": message},
- ]
- )
- openai.api_key = ''
- return response.choices[0].message['content']
-
-def fact_check(openai_api_key, serper_api_key, scraper_api_key, model, message, response, category):
- os.environ['SCRAPER_API_KEY'] = ''
- os.environ['SERPER_API_KEY'] = ''
- os.environ['OPENAI_API_KEY'] = ''
- os.environ['SCRAPER_API_KEY'] = scraper_api_key
- os.environ['SERPER_API_KEY'] = serper_api_key
- os.environ['OPENAI_API_KEY'] = openai_api_key
- factool_instance = Factool(model)
- inputs = [
- {
- "prompt": message,
- "response": response,
- "category": category,
- "search_type": "online",
- },
- ]
- response_list = factool_instance.run(inputs)
- os.environ['SCRAPER_API_KEY'] = ''
- os.environ['SERPER_API_KEY'] = ''
- os.environ['OPENAI_API_KEY'] = ''
- openai.api_key = ''
- return response_list
-
-with gr.Blocks() as demo:
- openai_api_key = gr.Textbox(label="OpenAI API Key")
- serper_api_key = gr.Textbox(label="Serper API Key")
- scraper_api_key = gr.Textbox(label="Scraper API Key")
- chat_model = gr.inputs.Radio(choices=["gpt-3.5-turbo", "gpt-4"], label="Chat Model")
- prompt = gr.Textbox(label="Prompt")
- chat_btn = gr.Button("Get Response!")
- response = gr.Textbox(label="Response")
- category = gr.inputs.Radio(choices=["kbqa", "code", "math", "scientific"], label="Category")
- fact_check_model = gr.inputs.Radio(choices=["gpt-3.5-turbo", "gpt-4"], label="Fact Check Model")
- fact_check_result = gr.Textbox(label="Fact Check Result")
- # chat_btn = gr.Button("Get Response!")
- fact_check_btn = gr.Button("Run FactTool!")
- chat_btn.click(chat_with_gpt, inputs=[openai_api_key,chat_model,prompt], outputs=response)
- fact_check_btn.click(fact_check, inputs=[openai_api_key,serper_api_key,scraper_api_key,fact_check_model,prompt,response,category], outputs=fact_check_result)
-
-demo.launch()
-
diff --git a/spaces/Gen-Sim/Gen-Sim/cliport/tasks/assembling_kits_seq.py b/spaces/Gen-Sim/Gen-Sim/cliport/tasks/assembling_kits_seq.py
deleted file mode 100644
index 390f4c253e039913aff69c22f6c32eab375a84ae..0000000000000000000000000000000000000000
--- a/spaces/Gen-Sim/Gen-Sim/cliport/tasks/assembling_kits_seq.py
+++ /dev/null
@@ -1,90 +0,0 @@
-import os
-
-import numpy as np
-from cliport.tasks.task import Task
-from cliport.utils import utils
-
-
-class AssemblingKitsSeq(Task):
- """ Precisely place each specified shape in the specified hole following the order prescribed in the
-language instruction at each timestep."""
-
- def __init__(self):
- super().__init__()
- self.max_steps = 7
- self.homogeneous = False
-
- self.lang_template = "put the {color} {obj} in the {loc}{obj} hole"
- self.task_completed_desc = "done assembling kit."
- self.additional_reset()
-
- def reset(self, env):
- super().reset(env)
-
- # Add kit.
- kit_size = (0.28, 0.2, 0.005)
- kit_urdf = 'kitting/kit.urdf'
- kit_pose = self.get_random_pose(env, kit_size)
- env.add_object(kit_urdf, kit_pose, 'fixed')
-
- # Shape Names:
- shapes = utils.assembling_kit_shapes
- n_objects = 5
- obj_shapes = self.get_kitting_shapes(n_objects)
- colors, color_names = utils.get_colors(mode=self.mode)
-
- # Build kit.
- targets = []
- targets_spatial_desc = []
- targ_pos = [[-0.09, 0.045, 0.0014], [0, 0.045, 0.0014],
- [0.09, 0.045, 0.0014], [-0.045, -0.045, 0.0014],
- [0.045, -0.045, 0.0014]]
- template = 'kitting/object-template.urdf'
-
- for i in range(n_objects):
- shape = os.path.join(self.assets_root, 'kitting',
- f'{obj_shapes[i]:02d}.obj')
- scale = [0.003, 0.003, 0.0001] # .0005
- pos = utils.apply(kit_pose, targ_pos[i])
- theta = np.random.rand() * 2 * np.pi
- rot = utils.eulerXYZ_to_quatXYZW((0, 0, theta))
- replace = {'FNAME': (shape,), 'SCALE': scale, 'COLOR': [0.2, 0.2, 0.2]}
-
- # IMPORTANT: REPLACE THE TEMPLATE URDF
- urdf = self.fill_template(template, replace)
- env.add_object(urdf, (pos, rot), 'fixed')
- targets.append((pos, rot))
-
- # Decide spatial description based on the location of the hole (top-down view).
- shape_type = obj_shapes[i]
- if list(obj_shapes).count(obj_shapes[i]) > 1:
- duplicate_shapes = [j for j, o in enumerate(obj_shapes) if i != j and o == shape_type]
- other_poses = [utils.apply(kit_pose, targ_pos[d]) for d in duplicate_shapes]
-
- if all(pos[0] < op[0] and abs(pos[0]-op[0]) > abs(pos[1]-op[1]) for op in other_poses):
- spatial_desc = "top "
- elif all(pos[0] > op[0] and abs(pos[0]-op[0]) > abs(pos[1]-op[1]) for op in other_poses):
- spatial_desc = "bottom "
- elif all(pos[1] < op[1] for op in other_poses):
- spatial_desc = "left "
- elif all(pos[1] > op[1] for op in other_poses):
- spatial_desc = "right "
- else:
- spatial_desc = "middle "
-
- targets_spatial_desc.append(spatial_desc)
- else:
- targets_spatial_desc.append("")
-
- # Add objects.
- objects, matches = self.make_kitting_objects(env, targets=targets, obj_shapes=obj_shapes, n_objects=n_objects, colors=colors)
- target_idxs = list(range(n_objects))
- np.random.shuffle(target_idxs)
- for i in target_idxs:
- language_goal = (self.lang_template.format(color=color_names[i],
- obj=shapes[obj_shapes[i]],
- loc=targets_spatial_desc[i]))
- self.add_goal(objs=[objects[i]], matches=np.int32([[1]]), targ_poses=[targets[i]], replace=False,
- rotations=True, metric='pose', params=None, step_max_reward=1 / n_objects, language_goal=language_goal)
-
- self.max_steps = n_objects
diff --git a/spaces/GeorgeOrville/bingo/src/lib/bots/bing/sr.ts b/spaces/GeorgeOrville/bingo/src/lib/bots/bing/sr.ts
deleted file mode 100644
index 7cae14da7362bd6cc1e234851c11ca67e5a99f0c..0000000000000000000000000000000000000000
--- a/spaces/GeorgeOrville/bingo/src/lib/bots/bing/sr.ts
+++ /dev/null
@@ -1,106 +0,0 @@
-// @ts-ignore
-const SpeechRecognitionPolyfill: typeof webkitSpeechRecognition = typeof window !== 'undefined' ? (
- // @ts-ignore
- window.SpeechRecognition ||
- window.webkitSpeechRecognition ||
- // @ts-ignore
- window.mozSpeechRecognition ||
- // @ts-ignore
- window.msSpeechRecognition ||
- // @ts-ignore
- window.oSpeechRecognition
-) as typeof webkitSpeechRecognition : undefined
-
-type subscriber = (msg: string, command?: string) => void
-
-export class SR {
- recognition?: SpeechRecognition
- onchange?: subscriber
- transcript: boolean = false
- listening: boolean = false
- private commandsRe?: RegExp
- constructor(commands: string[]) {
- this.recognition = SpeechRecognitionPolyfill ? new SpeechRecognitionPolyfill() : undefined
- if (!this.recognition) {
- return
- }
- this.configuration('zh-CN')
- if (commands.length) {
- this.commandsRe = new RegExp(`^(${commands.join('|')})。?$`)
- }
- this.recognition.onresult = this.speechRecognition
- this.recognition.onerror = (err) => {
- console.log('err', err.error)
- this.stop()
- }
- this.recognition.onend = () => {
- if (this.recognition && this.listening) {
- this.recognition.start()
- }
- }
- }
-
- speechRecognition = (event: SpeechRecognitionEvent) => {
- if (!this.listening) return
- for (var i = event.resultIndex; i < event.results.length; i++) {
- let result = event.results[i]
- if (result.isFinal) {
- var alt = result[0]
- const text = alt.transcript.trim()
- if (this.commandsRe && this.commandsRe.test(text)) {
- return this.onchange?.('', RegExp.$1)
- }
- if (!this.transcript) return
- this.onchange?.(text)
- }
- }
- }
-
- private configuration = async (lang: string = 'zh-CN') => {
- return new Promise((resolve) => {
- if (this.recognition) {
- this.recognition.continuous = true
- this.recognition.lang = lang
- this.recognition.onstart = resolve
- }
- })
- }
-
- start = async () => {
- if (this.recognition && !this.listening) {
- await this.recognition.start()
- this.transcript = true
- this.listening = true
- }
- }
-
- stop = () => {
- if (this.recognition) {
- this.recognition.stop()
- this.transcript = false
- this.listening = false
- }
- }
-
-
- pause = () => {
- if (this.recognition) {
- this.transcript = false
- }
- }
-
- resume = () => {
- if (this.recognition) {
- this.transcript = true
- }
- }
-
- abort = () => {
- if (this.recognition && this.transcript) {
- this.recognition.abort()
- this.transcript = false
- this.listening = false
- }
- }
-}
-
diff --git a/spaces/GolDNenex/Super-Resolution-Anime-Diffusion/RealESRGANv030/realesrgan/data/realesrgan_dataset.py b/spaces/GolDNenex/Super-Resolution-Anime-Diffusion/RealESRGANv030/realesrgan/data/realesrgan_dataset.py
deleted file mode 100644
index 5d2a2fbd7b19d1eb7e320a170531fbf676ce7cec..0000000000000000000000000000000000000000
--- a/spaces/GolDNenex/Super-Resolution-Anime-Diffusion/RealESRGANv030/realesrgan/data/realesrgan_dataset.py
+++ /dev/null
@@ -1,216 +0,0 @@
-import cv2
-import math
-import numpy as np
-import os
-import os.path as osp
-import random
-import time
-import torch
-from basicsr.data.degradations import circular_lowpass_kernel, random_mixed_kernels
-from basicsr.data.transforms import augment
-from basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor
-from basicsr.utils.registry import DATASET_REGISTRY
-from torch.utils import data as data
-
-
-@DATASET_REGISTRY.register()
-class RealESRGANDataset(data.Dataset):
- """Dataset used for Real-ESRGAN model:
- Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data.
-
- It loads gt (Ground-Truth) images, and augments them.
- It also generates blur kernels and sinc kernels for generating low-quality images.
- Note that the low-quality images are processed in tensors on GPUS for faster processing.
-
- Args:
- opt (dict): Config for train datasets. It contains the following keys:
- dataroot_gt (str): Data root path for gt.
- meta_info (str): Path for meta information file.
- io_backend (dict): IO backend type and other kwarg.
- use_hflip (bool): Use horizontal flips.
- use_rot (bool): Use rotation (use vertical flip and transposing h and w for implementation).
- Please see more options in the codes.
- """
-
- def __init__(self, opt):
- super(RealESRGANDataset, self).__init__()
- self.opt = opt
- self.file_client = None
- self.io_backend_opt = opt["io_backend"]
- self.gt_folder = opt["dataroot_gt"]
-
- # file client (lmdb io backend)
- if self.io_backend_opt["type"] == "lmdb":
- self.io_backend_opt["db_paths"] = [self.gt_folder]
- self.io_backend_opt["client_keys"] = ["gt"]
- if not self.gt_folder.endswith(".lmdb"):
- raise ValueError(
- f"'dataroot_gt' should end with '.lmdb', but received {self.gt_folder}"
- )
- with open(osp.join(self.gt_folder, "meta_info.txt")) as fin:
- self.paths = [line.split(".")[0] for line in fin]
- else:
- # disk backend with meta_info
- # Each line in the meta_info describes the relative path to an image
- with open(self.opt["meta_info"]) as fin:
- paths = [line.strip().split(" ")[0] for line in fin]
- self.paths = [os.path.join(self.gt_folder, v) for v in paths]
-
- # blur settings for the first degradation
- self.blur_kernel_size = opt["blur_kernel_size"]
- self.kernel_list = opt["kernel_list"]
- self.kernel_prob = opt["kernel_prob"] # a list for each kernel probability
- self.blur_sigma = opt["blur_sigma"]
- self.betag_range = opt[
- "betag_range"
- ] # betag used in generalized Gaussian blur kernels
- self.betap_range = opt["betap_range"] # betap used in plateau blur kernels
- self.sinc_prob = opt["sinc_prob"] # the probability for sinc filters
-
- # blur settings for the second degradation
- self.blur_kernel_size2 = opt["blur_kernel_size2"]
- self.kernel_list2 = opt["kernel_list2"]
- self.kernel_prob2 = opt["kernel_prob2"]
- self.blur_sigma2 = opt["blur_sigma2"]
- self.betag_range2 = opt["betag_range2"]
- self.betap_range2 = opt["betap_range2"]
- self.sinc_prob2 = opt["sinc_prob2"]
-
- # a final sinc filter
- self.final_sinc_prob = opt["final_sinc_prob"]
-
- self.kernel_range = [
- 2 * v + 1 for v in range(3, 11)
- ] # kernel size ranges from 7 to 21
- # TODO: kernel range is now hard-coded, should be in the configure file
- self.pulse_tensor = torch.zeros(
- 21, 21
- ).float() # convolving with pulse tensor brings no blurry effect
- self.pulse_tensor[10, 10] = 1
-
- def __getitem__(self, index):
- if self.file_client is None:
- self.file_client = FileClient(
- self.io_backend_opt.pop("type"), **self.io_backend_opt
- )
-
- # -------------------------------- Load gt images -------------------------------- #
- # Shape: (h, w, c); channel order: BGR; image range: [0, 1], float32.
- gt_path = self.paths[index]
- # avoid errors caused by high latency in reading files
- retry = 3
- while retry > 0:
- try:
- img_bytes = self.file_client.get(gt_path, "gt")
- except (IOError, OSError) as e:
- logger = get_root_logger()
- logger.warn(
- f"File client error: {e}, remaining retry times: {retry - 1}"
- )
- # change another file to read
- index = random.randint(0, self.__len__())
- gt_path = self.paths[index]
- time.sleep(1) # sleep 1s for occasional server congestion
- else:
- break
- finally:
- retry -= 1
- img_gt = imfrombytes(img_bytes, float32=True)
-
- # -------------------- Do augmentation for training: flip, rotation -------------------- #
- img_gt = augment(img_gt, self.opt["use_hflip"], self.opt["use_rot"])
-
- # crop or pad to 400
- # TODO: 400 is hard-coded. You may change it accordingly
- h, w = img_gt.shape[0:2]
- crop_pad_size = 400
- # pad
- if h < crop_pad_size or w < crop_pad_size:
- pad_h = max(0, crop_pad_size - h)
- pad_w = max(0, crop_pad_size - w)
- img_gt = cv2.copyMakeBorder(
- img_gt, 0, pad_h, 0, pad_w, cv2.BORDER_REFLECT_101
- )
- # crop
- if img_gt.shape[0] > crop_pad_size or img_gt.shape[1] > crop_pad_size:
- h, w = img_gt.shape[0:2]
- # randomly choose top and left coordinates
- top = random.randint(0, h - crop_pad_size)
- left = random.randint(0, w - crop_pad_size)
- img_gt = img_gt[top : top + crop_pad_size, left : left + crop_pad_size, ...]
-
- # ------------------------ Generate kernels (used in the first degradation) ------------------------ #
- kernel_size = random.choice(self.kernel_range)
- if np.random.uniform() < self.opt["sinc_prob"]:
- # this sinc filter setting is for kernels ranging from [7, 21]
- if kernel_size < 13:
- omega_c = np.random.uniform(np.pi / 3, np.pi)
- else:
- omega_c = np.random.uniform(np.pi / 5, np.pi)
- kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False)
- else:
- kernel = random_mixed_kernels(
- self.kernel_list,
- self.kernel_prob,
- kernel_size,
- self.blur_sigma,
- self.blur_sigma,
- [-math.pi, math.pi],
- self.betag_range,
- self.betap_range,
- noise_range=None,
- )
- # pad kernel
- pad_size = (21 - kernel_size) // 2
- kernel = np.pad(kernel, ((pad_size, pad_size), (pad_size, pad_size)))
-
- # ------------------------ Generate kernels (used in the second degradation) ------------------------ #
- kernel_size = random.choice(self.kernel_range)
- if np.random.uniform() < self.opt["sinc_prob2"]:
- if kernel_size < 13:
- omega_c = np.random.uniform(np.pi / 3, np.pi)
- else:
- omega_c = np.random.uniform(np.pi / 5, np.pi)
- kernel2 = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False)
- else:
- kernel2 = random_mixed_kernels(
- self.kernel_list2,
- self.kernel_prob2,
- kernel_size,
- self.blur_sigma2,
- self.blur_sigma2,
- [-math.pi, math.pi],
- self.betag_range2,
- self.betap_range2,
- noise_range=None,
- )
-
- # pad kernel
- pad_size = (21 - kernel_size) // 2
- kernel2 = np.pad(kernel2, ((pad_size, pad_size), (pad_size, pad_size)))
-
- # ------------------------------------- the final sinc kernel ------------------------------------- #
- if np.random.uniform() < self.opt["final_sinc_prob"]:
- kernel_size = random.choice(self.kernel_range)
- omega_c = np.random.uniform(np.pi / 3, np.pi)
- sinc_kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=21)
- sinc_kernel = torch.FloatTensor(sinc_kernel)
- else:
- sinc_kernel = self.pulse_tensor
-
- # BGR to RGB, HWC to CHW, numpy to tensor
- img_gt = img2tensor([img_gt], bgr2rgb=True, float32=True)[0]
- kernel = torch.FloatTensor(kernel)
- kernel2 = torch.FloatTensor(kernel2)
-
- return_d = {
- "gt": img_gt,
- "kernel1": kernel,
- "kernel2": kernel2,
- "sinc_kernel": sinc_kernel,
- "gt_path": gt_path,
- }
- return return_d
-
- def __len__(self):
- return len(self.paths)
diff --git a/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/bbsnet_model.py b/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/bbsnet_model.py
deleted file mode 100644
index 66ba239dbdde53ab10d476dc55bb9f3b2c5c73cb..0000000000000000000000000000000000000000
--- a/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/bbsnet_model.py
+++ /dev/null
@@ -1,65 +0,0 @@
-import os
-
-import numpy as np
-import torch
-import torch.nn.functional as F
-import torchvision.transforms as transforms
-from PIL import Image
-from torch import Tensor
-
-from app_utils import normalize
-from base_model import BaseRGBDModel
-from BBSNet.BBSNet_model import BBSNet
-from device import cpu_device, device
-
-
-class BBSNetModel(BaseRGBDModel):
- def __init__(self):
- """Wrapper of BBSNet"""
- super(BBSNetModel, self).__init__()
- print('BBSNetconstructor')
- self.model = BBSNet()
-
- self.model.load_state_dict(
- torch.load(
- os.path.join('pretrained_models', 'BBSNet', 'BBSNet.pth'),
- map_location=cpu_device
- )
- )
- self.model.to(device)
- self.model.eval()
-
- self.testsize = 352
- self.images_transform = transforms.Compose([
- transforms.Resize((self.testsize, self.testsize)),
- transforms.Normalize(
- [0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
- ])
- self.depths_transform = transforms.Compose([
- transforms.Resize((self.testsize, self.testsize)),
- ])
-
- def inference(
- self, image: Tensor, depth: Tensor,
- ) -> np.ndarray:
- origin_shape = image.shape
-
- # 1. Preprocessing
- image: Tensor = self.images_transform(image)
- depth: Tensor = self.depths_transform(depth)
- images = image.unsqueeze(0)
- depths = depth.unsqueeze(0)
-
- # 2. Inference
- images, depths = images.to(device), depths.to(device)
- pred_no_sigmoid = self.model(images, depths)[1]
-
- # 3. Return saliency maps
- res: Tensor = F.interpolate(
- pred_no_sigmoid, size=(origin_shape[1], origin_shape[2]),
- mode='bilinear', align_corners=False
- )
- res = res.sigmoid().data.cpu().numpy().squeeze()
- res = normalize(res)
-
- return res
\ No newline at end of file
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/speech_recognition/models/w2l_conv_glu_enc.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/speech_recognition/models/w2l_conv_glu_enc.py
deleted file mode 100644
index 655a9b0d19d11e35511392a016f9d6b7d7aa2925..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/speech_recognition/models/w2l_conv_glu_enc.py
+++ /dev/null
@@ -1,177 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import math
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from fairseq.models import (
- FairseqEncoder,
- FairseqEncoderModel,
- register_model,
- register_model_architecture,
-)
-from fairseq.modules.fairseq_dropout import FairseqDropout
-
-
-default_conv_enc_config = """[
- (400, 13, 170, 0.2),
- (440, 14, 0, 0.214),
- (484, 15, 0, 0.22898),
- (532, 16, 0, 0.2450086),
- (584, 17, 0, 0.262159202),
- (642, 18, 0, 0.28051034614),
- (706, 19, 0, 0.30014607037),
- (776, 20, 0, 0.321156295296),
- (852, 21, 0, 0.343637235966),
- (936, 22, 0, 0.367691842484),
- (1028, 23, 0, 0.393430271458),
- (1130, 24, 0, 0.42097039046),
- (1242, 25, 0, 0.450438317792),
- (1366, 26, 0, 0.481969000038),
- (1502, 27, 0, 0.51570683004),
- (1652, 28, 0, 0.551806308143),
- (1816, 29, 0, 0.590432749713),
-]"""
-
-
-@register_model("asr_w2l_conv_glu_encoder")
-class W2lConvGluEncoderModel(FairseqEncoderModel):
- def __init__(self, encoder):
- super().__init__(encoder)
-
- @staticmethod
- def add_args(parser):
- """Add model-specific arguments to the parser."""
- parser.add_argument(
- "--input-feat-per-channel",
- type=int,
- metavar="N",
- help="encoder input dimension per input channel",
- )
- parser.add_argument(
- "--in-channels",
- type=int,
- metavar="N",
- help="number of encoder input channels",
- )
- parser.add_argument(
- "--conv-enc-config",
- type=str,
- metavar="EXPR",
- help="""
- an array of tuples each containing the configuration of one conv layer
- [(out_channels, kernel_size, padding, dropout), ...]
- """,
- )
-
- @classmethod
- def build_model(cls, args, task):
- """Build a new model instance."""
- conv_enc_config = getattr(args, "conv_enc_config", default_conv_enc_config)
- encoder = W2lConvGluEncoder(
- vocab_size=len(task.target_dictionary),
- input_feat_per_channel=args.input_feat_per_channel,
- in_channels=args.in_channels,
- conv_enc_config=eval(conv_enc_config),
- )
- return cls(encoder)
-
- def get_normalized_probs(self, net_output, log_probs, sample=None):
- lprobs = super().get_normalized_probs(net_output, log_probs, sample)
- lprobs.batch_first = False
- return lprobs
-
-
-class W2lConvGluEncoder(FairseqEncoder):
- def __init__(
- self, vocab_size, input_feat_per_channel, in_channels, conv_enc_config
- ):
- super().__init__(None)
-
- self.input_dim = input_feat_per_channel
- if in_channels != 1:
- raise ValueError("only 1 input channel is currently supported")
-
- self.conv_layers = nn.ModuleList()
- self.linear_layers = nn.ModuleList()
- self.dropouts = []
- cur_channels = input_feat_per_channel
-
- for out_channels, kernel_size, padding, dropout in conv_enc_config:
- layer = nn.Conv1d(cur_channels, out_channels, kernel_size, padding=padding)
- layer.weight.data.mul_(math.sqrt(3)) # match wav2letter init
- self.conv_layers.append(nn.utils.weight_norm(layer))
- self.dropouts.append(
- FairseqDropout(dropout, module_name=self.__class__.__name__)
- )
- if out_channels % 2 != 0:
- raise ValueError("odd # of out_channels is incompatible with GLU")
- cur_channels = out_channels // 2 # halved by GLU
-
- for out_channels in [2 * cur_channels, vocab_size]:
- layer = nn.Linear(cur_channels, out_channels)
- layer.weight.data.mul_(math.sqrt(3))
- self.linear_layers.append(nn.utils.weight_norm(layer))
- cur_channels = out_channels // 2
-
- def forward(self, src_tokens, src_lengths, **kwargs):
-
- """
- src_tokens: padded tensor (B, T, C * feat)
- src_lengths: tensor of original lengths of input utterances (B,)
- """
- B, T, _ = src_tokens.size()
- x = src_tokens.transpose(1, 2).contiguous() # (B, feat, T) assuming C == 1
-
- for layer_idx in range(len(self.conv_layers)):
- x = self.conv_layers[layer_idx](x)
- x = F.glu(x, dim=1)
- x = self.dropouts[layer_idx](x)
-
- x = x.transpose(1, 2).contiguous() # (B, T, 908)
- x = self.linear_layers[0](x)
- x = F.glu(x, dim=2)
- x = self.dropouts[-1](x)
- x = self.linear_layers[1](x)
-
- assert x.size(0) == B
- assert x.size(1) == T
-
- encoder_out = x.transpose(0, 1) # (T, B, vocab_size)
-
- # need to debug this -- find a simpler/elegant way in pytorch APIs
- encoder_padding_mask = (
- torch.arange(T).view(1, T).expand(B, -1).to(x.device)
- >= src_lengths.view(B, 1).expand(-1, T)
- ).t() # (B x T) -> (T x B)
-
- return {
- "encoder_out": encoder_out, # (T, B, vocab_size)
- "encoder_padding_mask": encoder_padding_mask, # (T, B)
- }
-
- def reorder_encoder_out(self, encoder_out, new_order):
- encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select(
- 1, new_order
- )
- encoder_out["encoder_padding_mask"] = encoder_out[
- "encoder_padding_mask"
- ].index_select(1, new_order)
- return encoder_out
-
- def max_positions(self):
- """Maximum input length supported by the encoder."""
- return (1e6, 1e6) # an arbitrary large number
-
-
-@register_model_architecture("asr_w2l_conv_glu_encoder", "w2l_conv_glu_enc")
-def w2l_conv_glu_enc(args):
- args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
- args.in_channels = getattr(args, "in_channels", 1)
- args.conv_enc_config = getattr(args, "conv_enc_config", default_conv_enc_config)
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/optim/fairseq_optimizer.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/optim/fairseq_optimizer.py
deleted file mode 100644
index 7e5411753a2ba94f3a7a68316131530b8b17d22a..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/optim/fairseq_optimizer.py
+++ /dev/null
@@ -1,179 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import torch
-from fairseq import utils
-from fairseq.dataclass.utils import gen_parser_from_dataclass
-
-
-class FairseqOptimizer(object):
- def __init__(self, cfg):
- super().__init__()
- self.cfg = cfg
-
- @classmethod
- def add_args(cls, parser):
- """Add optimizer-specific arguments to the parser."""
- dc = getattr(cls, "__dataclass", None)
- if dc is not None:
- gen_parser_from_dataclass(parser, dc())
-
- @property
- def optimizer(self):
- """Return a torch.optim.optimizer.Optimizer instance."""
- if not hasattr(self, "_optimizer"):
- raise NotImplementedError
- if not isinstance(self._optimizer, torch.optim.Optimizer):
- raise ValueError("_optimizer must be an instance of torch.optim.Optimizer")
- return self._optimizer
-
- @optimizer.setter
- def optimizer(self, optimizer):
- """Reset optimizer instance."""
- if not hasattr(self, "_optimizer"):
- raise NotImplementedError
- if not isinstance(self._optimizer, torch.optim.Optimizer):
- raise ValueError("_optimizer must be an instance of torch.optim.Optimizer")
- self._optimizer = optimizer
-
- @property
- def optimizer_config(self):
- """
- Return a kwarg dictionary that will be used to override optimizer
- args stored in checkpoints. This allows us to load a checkpoint and
- resume training using a different set of optimizer args, e.g., with a
- different learning rate.
- """
- raise NotImplementedError
-
- @property
- def params(self):
- """Return an iterable of the parameters held by the optimizer."""
- for param_group in self.param_groups:
- for p in param_group["params"]:
- yield p
-
- @property
- def param_groups(self):
- return self.optimizer.param_groups
-
- def __getstate__(self):
- return self._optimizer.__getstate__()
-
- def get_lr(self):
- """Return the current learning rate."""
- return self.param_groups[0]["lr"]
-
- def set_lr(self, lr):
- """Set the learning rate."""
- for param_group in self.param_groups:
- param_group["lr"] = lr
-
- def state_dict(self):
- """Return the optimizer's state dict."""
- return self.optimizer.state_dict()
-
- def load_state_dict(self, state_dict, optimizer_overrides=None):
- """Load an optimizer state dict.
-
- In general we should prefer the configuration of the existing optimizer
- instance (e.g., learning rate) over that found in the state_dict. This
- allows us to resume training from a checkpoint using a new set of
- optimizer args.
- """
- self.optimizer.load_state_dict(state_dict)
-
- if optimizer_overrides is not None and len(optimizer_overrides) > 0:
- # override learning rate, momentum, etc. with latest values
- for group in self.param_groups:
- group.update(optimizer_overrides)
-
- def backward(self, loss):
- """Computes the sum of gradients of the given tensor w.r.t. graph leaves."""
- loss.backward()
-
- def all_reduce_grads(self, module):
- """Manually all-reduce gradients (if required)."""
- if hasattr(module, "all_reduce_grads"):
- module.all_reduce_grads()
-
- def multiply_grads(self, c):
- """Multiplies grads by a constant *c*."""
- for p in self.params:
- if p.grad is not None:
- if torch.is_tensor(c):
- c = c.to(p.grad.device)
- p.grad.data.mul_(c)
-
- def clip_grad_norm(self, max_norm, aggregate_norm_fn=None):
- """Clips gradient norm."""
- return utils.clip_grad_norm_(self.params, max_norm, aggregate_norm_fn)
-
- def step(self, closure=None, scale=1.0, groups=None):
- """Performs a single optimization step."""
- if self.supports_step_with_scale:
- if self.supports_groups:
- self.optimizer.step(closure, scale=scale, groups=groups)
- else:
- self.optimizer.step(closure, scale=scale)
- else:
- if scale != 1.0:
- self.multiply_grads(1.0 / scale)
- if self.supports_groups:
- self.optimizer.step(closure, groups=groups)
- else:
- self.optimizer.step(closure)
-
- def zero_grad(self):
- """Clears the gradients of all optimized parameters."""
- for p in self.params:
- p.grad = None
- self.optimizer.zero_grad()
-
- @property
- def supports_memory_efficient_fp16(self):
- if hasattr(self.optimizer, "supports_memory_efficient_fp16"):
- return self.optimizer.supports_memory_efficient_fp16
- return False
-
- @property
- def supports_step_with_scale(self):
- if hasattr(self.optimizer, "supports_step_with_scale"):
- return self.optimizer.supports_step_with_scale
- return False
-
- @property
- def supports_groups(self):
- if hasattr(self.optimizer, "supports_groups"):
- return self.optimizer.supports_groups
- return False
-
- @property
- def supports_flat_params(self):
- """
- Whether the optimizer supports collapsing of the model
- parameters/gradients into a single contiguous Tensor.
- """
- if hasattr(self.optimizer, "supports_flat_params"):
- return self.optimizer.supports_flat_params
- return False
-
- def average_params(self):
- pass
-
- def broadcast_global_state_dict(self, state_dict):
- """
- Broadcasts a global state dict to all ranks.
- Useful for optimizers that shard state between ranks.
- """
- if hasattr(self.optimizer, "broadcast_global_state_dict"):
- return self.optimizer.broadcast_global_state_dict(state_dict)
- else:
- return state_dict
-
-
-class LegacyFairseqOptimizer(FairseqOptimizer):
- def __init__(self, args):
- self.args = args
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/optim/lr_scheduler/tri_stage_lr_scheduler.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/optim/lr_scheduler/tri_stage_lr_scheduler.py
deleted file mode 100644
index 4d5547c39b14f62acbd4f4b9ab3abfb3009c0e6d..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/optim/lr_scheduler/tri_stage_lr_scheduler.py
+++ /dev/null
@@ -1,175 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import math
-from dataclasses import dataclass, field
-from typing import Optional, List, Tuple
-from omegaconf import II
-
-from fairseq.dataclass import FairseqDataclass
-from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler
-
-
-@dataclass
-class TriStageLRScheduleConfig(FairseqDataclass):
- warmup_steps: int = field(
- default=0,
- metadata={"help": "warmup the learning rate linearly for the first N updates"},
- )
- hold_steps: int = field(
- default=0,
- metadata={"help": "steps in hold stage"},
- )
- decay_steps: int = field(
- default=0,
- metadata={"help": "steps in decay stages"},
- )
- phase_ratio: Optional[Tuple[float, float, float]] = field(
- default=None,
- metadata={
- "help": (
- "if set, automatically sets warmup/hold/decay steps to the ratio "
- "specified here from max_updates. the ratios must add up to 1.0"
- )
- },
- )
- init_lr_scale: float = field(
- default=0.01,
- metadata={"help": "initial learning rate scale during warmup phase"},
- )
- final_lr_scale: float = field(
- default=0.01,
- metadata={"help": "final learning rate scale"},
- )
- max_update: float = II("optimization.max_update")
- lr: List[float] = II("optimization.lr")
-
-
-@register_lr_scheduler("tri_stage", dataclass=TriStageLRScheduleConfig)
-class TriStageLRSchedule(FairseqLRScheduler):
- """Tristage learning rate schedulr
-
- Implement the learning rate scheduler in https://arxiv.org/pdf/1904.08779.pdf
-
- Similar to inverse_squre_root scheduler, but tri_stage learning rate employs
- three stages LR scheduling:
-
- - warmup stage, starting from `lr` * `init_lr_scale`, linearly
- increased to `lr` in `warmup_steps` iterations
-
- - hold stage, after `warmup_steps`, keep the LR as `lr` for `hold_steps`
- iterations
-
- - decay stage, after hold stage, decay LR exponetially to
- `lr` * `final_lr_scale` in `decay_steps`;
- after that LR is keep as `final_lr_scale` * `lr`
-
- During warmup::
-
- init_lr = cfg.init_lr_scale * cfg.lr
- lrs = torch.linspace(init_lr, cfg.lr, cfg.warmup_steps)
- lr = lrs[update_num]
-
- During hold::
-
- lr = cfg.lr
-
- During decay::
-
- decay_factor = - math.log(cfg.final_lr_scale) / cfg.decay_steps
- lr = cfg.lr * exp(- (update_num - warmup_steps - decay_steps) * decay_factor)
-
- After that::
-
- lr = cfg.lr * cfg.final_lr_scale
- """
-
- def __init__(self, cfg: TriStageLRScheduleConfig, optimizer):
- super().__init__(cfg, optimizer)
- if len(cfg.lr) > 1:
- raise ValueError(
- "Cannot use a fixed learning rate schedule with tri-stage lr."
- " Consider --lr-scheduler=fixed instead."
- )
-
- # calculate LR at each point
- self.peak_lr = cfg.lr[0]
- self.init_lr = cfg.init_lr_scale * cfg.lr[0]
- self.final_lr = cfg.final_lr_scale * cfg.lr[0]
-
- if cfg.phase_ratio is not None:
- assert cfg.max_update > 0
- assert sum(cfg.phase_ratio) == 1, "phase ratios must add up to 1"
- self.warmup_steps = int(cfg.max_update * cfg.phase_ratio[0])
- self.hold_steps = int(cfg.max_update * cfg.phase_ratio[1])
- self.decay_steps = int(cfg.max_update * cfg.phase_ratio[2])
- else:
- self.warmup_steps = cfg.warmup_steps
- self.hold_steps = cfg.hold_steps
- self.decay_steps = cfg.decay_steps
-
- assert (
- self.warmup_steps + self.hold_steps + self.decay_steps > 0
- ), "please specify steps or phase_ratio"
-
- self.warmup_rate = (
- (self.peak_lr - self.init_lr) / self.warmup_steps
- if self.warmup_steps != 0
- else 0
- )
- self.decay_factor = -math.log(cfg.final_lr_scale) / self.decay_steps
-
- # initial learning rate
- self.lr = self.init_lr
- self.optimizer.set_lr(self.lr)
-
- def _decide_stage(self, update_step):
- """
- return stage, and the corresponding steps within the current stage
- """
- if update_step < self.warmup_steps:
- # warmup state
- return 0, update_step
-
- offset = self.warmup_steps
-
- if update_step < offset + self.hold_steps:
- # hold stage
- return 1, update_step - offset
-
- offset += self.hold_steps
-
- if update_step <= offset + self.decay_steps:
- # decay stage
- return 2, update_step - offset
-
- offset += self.decay_steps
-
- # still here ? constant lr stage
- return 3, update_step - offset
-
- def step(self, epoch, val_loss=None):
- """Update the learning rate at the end of the given epoch."""
- super().step(epoch, val_loss)
- # we don't change the learning rate at epoch boundaries
- return self.optimizer.get_lr()
-
- def step_update(self, num_updates):
- """Update the learning rate after each update."""
- stage, steps_in_stage = self._decide_stage(num_updates)
- if stage == 0:
- self.lr = self.init_lr + self.warmup_rate * steps_in_stage
- elif stage == 1:
- self.lr = self.peak_lr
- elif stage == 2:
- self.lr = self.peak_lr * math.exp(-self.decay_factor * steps_in_stage)
- elif stage == 3:
- self.lr = self.final_lr
- else:
- raise ValueError("Undefined stage")
-
- self.optimizer.set_lr(self.lr)
-
- return self.lr
diff --git a/spaces/Hazem/roop/app.py b/spaces/Hazem/roop/app.py
deleted file mode 100644
index ac81150d2a4acd6d7124fd9c15115ab12892b61a..0000000000000000000000000000000000000000
--- a/spaces/Hazem/roop/app.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# -* coding:UTF-8 -*
-# !/usr/bin/env python
-import numpy as np
-import gradio as gr
-import roop.globals
-from roop.core import (
- start,
- decode_execution_providers,
- suggest_max_memory,
- suggest_execution_threads,
-)
-from roop.processors.frame.core import get_frame_processors_modules
-from roop.utilities import normalize_output_path
-import os
-from PIL import Image
-
-
-def swap_face(source_file, target_file):
-
- source_path = "input.jpg"
- target_path = "target.jpg"
-
- source_image = Image.fromarray(source_file)
- source_image.save(source_path)
- target_image = Image.fromarray(target_file)
- target_image.save(target_path)
-
- print("source_path: ", source_path)
- print("target_path: ", target_path)
-
- roop.globals.source_path = source_path
- roop.globals.target_path = target_path
- output_path = "output.jpg"
- roop.globals.output_path = normalize_output_path(
- roop.globals.source_path, roop.globals.target_path, output_path
- )
- roop.globals.frame_processors = ["face_swapper"]
- roop.globals.headless = True
- roop.globals.keep_fps = True
- roop.globals.keep_audio = True
- roop.globals.keep_frames = False
- roop.globals.many_faces = False
- roop.globals.video_encoder = "libx264"
- roop.globals.video_quality = 18
- roop.globals.max_memory = suggest_max_memory()
- roop.globals.execution_providers = decode_execution_providers(["cpu"])
- roop.globals.execution_threads = suggest_execution_threads()
-
- print(
- "start process",
- roop.globals.source_path,
- roop.globals.target_path,
- roop.globals.output_path,
- )
-
- for frame_processor in get_frame_processors_modules(
- roop.globals.frame_processors
- ):
- if not frame_processor.pre_check():
- return
-
- start()
- return output_path
-
-
-app = gr.Interface(
- fn=swap_face, inputs=[gr.Image(), gr.Image()], outputs="image"
-)
-app.launch()
diff --git a/spaces/Hoolbo/bing/Dockerfile b/spaces/Hoolbo/bing/Dockerfile
deleted file mode 100644
index 9c3904bf350eb82f649e0420d1754ec507385379..0000000000000000000000000000000000000000
--- a/spaces/Hoolbo/bing/Dockerfile
+++ /dev/null
@@ -1,3 +0,0 @@
-FROM zklcdc/go-proxy-bingai:v1.12.0
-EXPOSE 8080
-CMD ["/app/go-proxy-bingai"]
\ No newline at end of file
diff --git a/spaces/ICML2022/OFA/fairseq/examples/textless_nlp/gslm/unit2speech/multiproc.py b/spaces/ICML2022/OFA/fairseq/examples/textless_nlp/gslm/unit2speech/multiproc.py
deleted file mode 100644
index 2a287a4e97c66acbd36897b25f2ece5494005f03..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/examples/textless_nlp/gslm/unit2speech/multiproc.py
+++ /dev/null
@@ -1,27 +0,0 @@
-import os
-import time
-import torch
-import sys
-import subprocess
-
-argslist = list(sys.argv)[1:]
-log_dir = argslist[-1]
-num_gpus = torch.cuda.device_count()
-argslist.append('--n_gpus={}'.format(num_gpus))
-workers = []
-job_id = time.strftime("%Y_%m_%d-%H%M%S")
-argslist.append("--group_name=group_{}".format(job_id))
-
-print("GPU log directory is {}".format(log_dir))
-os.makedirs(log_dir, exist_ok=True)
-for i in range(num_gpus):
- argslist.append('--rank={}'.format(i))
- stdout = None if i == 0 else open("{}/{}_GPU_{}.log".format(log_dir, job_id, i),
- "w")
- print(argslist)
- p = subprocess.Popen([str(sys.executable)]+argslist, stdout=stdout)
- workers.append(p)
- argslist = argslist[:-1]
-
-for p in workers:
- p.wait()
diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/models/ema/ema.py b/spaces/ICML2022/OFA/fairseq/fairseq/models/ema/ema.py
deleted file mode 100644
index 010b60ba2fd766340d2c5b8ba96f9e57c6fe25b5..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/fairseq/models/ema/ema.py
+++ /dev/null
@@ -1,200 +0,0 @@
-#!/usr/bin/env python3
-
-"""
-This module has the EMA class used to store a copy of the exponentially decayed
-model params.
-
-Typical usage of EMA class involves initializing an object using an existing
-model (random or from a seed model) and setting the config like ema_decay,
-ema_start_update which determine how the EMA model is updated. After every
-update of the model i.e. at the end of the train_step, the EMA should be updated
-by passing the new model to the EMA.step function. The EMA model state dict
-can be stored in the extra state under the key of "ema" and dumped
-into a checkpoint and loaded. The EMA object can be passed to tasks
-by setting task.uses_ema property.
-EMA is a smoothed/ensemble model which might have better performance
-when used for inference or further fine-tuning. EMA class has a
-reverse function to load the EMA params into a model and use it
-like a regular model.
-"""
-
-import copy
-import logging
-
-import torch
-from fairseq import checkpoint_utils
-
-
-class EMA(object):
- """Exponential Moving Average of Fairseq Models
- EMA keeps a copy of the exponentially decayed model params.
- The set of params should include both gradient-descent and
- non-gradient descent params, such as batch mean/var and buffers.
- This is a modified implementation of
- the open source code in https://github.com/zhawe01/fairseq-gec.git,
- and internal source code in
- fbcode/mobile-vision/projects/classification_pytorch/lib/utils/model_ema.py.
-
- Similar to TF EMA.
- https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage.
- EMA provides a averaged and smoothed set of model weights, and has been shown to
- improve vision models. EMA class does all necessary functions to update, reload,
- or init EMA methods.
-
- EMA object is initialized from an arbitrary model. By default, it is stored in
- the same device (unless device specified at initialization) and with the
- same precision as the model (unless ema_fp32 is True). ema_fp32 is recommended.
- This stores the EMA parameters in fp32 only for the EMA update step, and
- is used at the default precision otherwise.
- EMA is usually enabled using EMAConfig with store_ema=True. Some important
- parameters to configure EMA are
- 1) ema_decay - The decay of EMA
- 2) ema_update_freq - EMA is updated every this many model updates.
- 3) ema_start_update - Start EMA update after this many model updates [default 0]
-
- Key methods:
- 1) step - One update of EMA using new model
- 2) restore - Update EMA from a state dict
- 3) reverse - Load EMA into a model
- 4) get_decay, _set_decay - Used to get or set the decay. Note _set_decay is
- called from step.
- 5) build_fp32_params - Used to initialize or update the fp32 copy of EMA params.
- Note this is enabled only when ema_fp32=True
- """
-
- def __init__(self, model, config, device=None):
- """
- @param model model to initialize the EMA with
- @param config EMAConfig object with configuration like
- ema_decay, ema_update_freq, ema_fp32
- @param device If provided, copy EMA to this device (e.g. gpu).
- Otherwise EMA is in the same device as the model.
- """
-
- self.decay = config.ema_decay
- self.model = copy.deepcopy(model)
- self.model.requires_grad_(False)
- self.config = config
- self.fp32_params = {}
-
- if self.config.ema_seed_model is not None:
- state = checkpoint_utils.load_ema_from_checkpoint(self.config.ema_seed_model)
- self.model.load_state_dict(state["model"], strict=True)
-
- if device is not None:
- logging.info(f"Copying EMA model to device {device}")
- self.model = self.model.to(device=device)
-
- if self.config.ema_fp32:
- self.build_fp32_params()
-
- self.update_freq_counter = 0
-
- def get_model(self):
- return self.model
-
- def build_fp32_params(self, state_dict=None):
- """
- Store a copy of the EMA params in fp32.
- If state dict is passed, the EMA params is copied from
- the provided state dict. Otherwise, it is copied from the
- current EMA model parameters.
- """
- if not self.config.ema_fp32:
- raise RuntimeError(
- "build_fp32_params should not be called if ema_fp32=False. "
- "Use ema_fp32=True if this is really intended."
- )
-
- if state_dict is None:
- state_dict = self.model.state_dict()
-
- def _to_float(t):
- return t.float() if torch.is_floating_point(t) else t
-
- # for non-float params (like registered symbols), they are copied into this dict and covered in each update
- for param_key in state_dict:
- if param_key in self.fp32_params:
- self.fp32_params[param_key].copy_(state_dict[param_key])
- else:
- self.fp32_params[param_key] = _to_float(state_dict[param_key])
-
- def restore(self, state_dict, build_fp32_params=False):
- """ Load data from a model spec into EMA model """
- self.model.load_state_dict(state_dict, strict=False)
- if build_fp32_params:
- self.build_fp32_params(state_dict)
-
- def _set_decay(self, decay):
- self.decay = decay
-
- def get_decay(self):
- return self.decay
-
- def _step_internal(self, new_model, updates=None):
- """ One update of the EMA model based on new model weights """
- decay = self.decay
-
- ema_state_dict = {}
- ema_params = self.fp32_params if self.config.ema_fp32 else self.model.state_dict()
- for key, param in new_model.state_dict().items():
- try:
- ema_param = ema_params[key]
- except KeyError:
- ema_param = param.float().clone() if param.ndim == 1 else copy.deepcopy(param)
-
- if param.shape != ema_param.shape:
- raise ValueError(
- "incompatible tensor shapes between model param and ema param"
- + "{} vs. {}".format(param.shape, ema_param.shape)
- )
- if "version" in key:
- # Do not decay a model.version pytorch param
- continue
-
- # for non-float params (like registered symbols), they are covered in each update
- if not torch.is_floating_point(ema_param):
- if ema_param.dtype != param.dtype:
- raise ValueError(
- "incompatible tensor dtypes between model param and ema param"
- + "{} vs. {}".format(param.dtype, ema_param.dtype)
- )
- ema_param.copy_(param)
- else:
- ema_param.mul_(decay)
- ema_param.add_(param.to(dtype=ema_param.dtype), alpha=1-decay)
- ema_state_dict[key] = ema_param
- self.restore(ema_state_dict, build_fp32_params=False)
-
- def step(self, new_model, updates=None):
- """
- One update of EMA which is done every self.config.ema_update_freq
- updates of the model.
-
- @param updates The current number of model updates done.
- Decay is set of 0 if model updates < ema_start_update, which means
- the model will be simply copied over to the EMA.
- When model updates >= ema_start_updates, then EMA is updated with
- a decay of self.config.ema_decay.
- """
- self._set_decay(
- 0
- if updates is not None
- and updates < self.config.ema_start_update
- else self.config.ema_decay
- )
- if updates is not None and self.config.ema_update_freq > 1:
- self.update_freq_counter += 1
- if self.update_freq_counter >= self.config.ema_update_freq:
- self._step_internal(new_model, updates)
- self.update_freq_counter = 0
- else:
- self._step_internal(new_model, updates)
-
- def reverse(self, model):
- """
- Load the model parameters from EMA model.
- Useful for inference or fine-tuning from the EMA model.
- """
- model.load_state_dict(self.model.state_dict(), strict=False)
- return model
diff --git a/spaces/Ikaros521/so-vits-svc-4.0-ikaros2/hubert/hubert_model.py b/spaces/Ikaros521/so-vits-svc-4.0-ikaros2/hubert/hubert_model.py
deleted file mode 100644
index 7fb642d89b07ca60792debab18e3454f52d8f357..0000000000000000000000000000000000000000
--- a/spaces/Ikaros521/so-vits-svc-4.0-ikaros2/hubert/hubert_model.py
+++ /dev/null
@@ -1,222 +0,0 @@
-import copy
-import random
-from typing import Optional, Tuple
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as t_func
-from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present
-
-
-class Hubert(nn.Module):
- def __init__(self, num_label_embeddings: int = 100, mask: bool = True):
- super().__init__()
- self._mask = mask
- self.feature_extractor = FeatureExtractor()
- self.feature_projection = FeatureProjection()
- self.positional_embedding = PositionalConvEmbedding()
- self.norm = nn.LayerNorm(768)
- self.dropout = nn.Dropout(0.1)
- self.encoder = TransformerEncoder(
- nn.TransformerEncoderLayer(
- 768, 12, 3072, activation="gelu", batch_first=True
- ),
- 12,
- )
- self.proj = nn.Linear(768, 256)
-
- self.masked_spec_embed = nn.Parameter(torch.FloatTensor(768).uniform_())
- self.label_embedding = nn.Embedding(num_label_embeddings, 256)
-
- def mask(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
- mask = None
- if self.training and self._mask:
- mask = _compute_mask((x.size(0), x.size(1)), 0.8, 10, x.device, 2)
- x[mask] = self.masked_spec_embed.to(x.dtype)
- return x, mask
-
- def encode(
- self, x: torch.Tensor, layer: Optional[int] = None
- ) -> Tuple[torch.Tensor, torch.Tensor]:
- x = self.feature_extractor(x)
- x = self.feature_projection(x.transpose(1, 2))
- x, mask = self.mask(x)
- x = x + self.positional_embedding(x)
- x = self.dropout(self.norm(x))
- x = self.encoder(x, output_layer=layer)
- return x, mask
-
- def logits(self, x: torch.Tensor) -> torch.Tensor:
- logits = torch.cosine_similarity(
- x.unsqueeze(2),
- self.label_embedding.weight.unsqueeze(0).unsqueeze(0),
- dim=-1,
- )
- return logits / 0.1
-
- def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
- x, mask = self.encode(x)
- x = self.proj(x)
- logits = self.logits(x)
- return logits, mask
-
-
-class HubertSoft(Hubert):
- def __init__(self):
- super().__init__()
-
- @torch.inference_mode()
- def units(self, wav: torch.Tensor) -> torch.Tensor:
- wav = t_func.pad(wav, ((400 - 320) // 2, (400 - 320) // 2))
- x, _ = self.encode(wav)
- return self.proj(x)
-
-
-class FeatureExtractor(nn.Module):
- def __init__(self):
- super().__init__()
- self.conv0 = nn.Conv1d(1, 512, 10, 5, bias=False)
- self.norm0 = nn.GroupNorm(512, 512)
- self.conv1 = nn.Conv1d(512, 512, 3, 2, bias=False)
- self.conv2 = nn.Conv1d(512, 512, 3, 2, bias=False)
- self.conv3 = nn.Conv1d(512, 512, 3, 2, bias=False)
- self.conv4 = nn.Conv1d(512, 512, 3, 2, bias=False)
- self.conv5 = nn.Conv1d(512, 512, 2, 2, bias=False)
- self.conv6 = nn.Conv1d(512, 512, 2, 2, bias=False)
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- x = t_func.gelu(self.norm0(self.conv0(x)))
- x = t_func.gelu(self.conv1(x))
- x = t_func.gelu(self.conv2(x))
- x = t_func.gelu(self.conv3(x))
- x = t_func.gelu(self.conv4(x))
- x = t_func.gelu(self.conv5(x))
- x = t_func.gelu(self.conv6(x))
- return x
-
-
-class FeatureProjection(nn.Module):
- def __init__(self):
- super().__init__()
- self.norm = nn.LayerNorm(512)
- self.projection = nn.Linear(512, 768)
- self.dropout = nn.Dropout(0.1)
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- x = self.norm(x)
- x = self.projection(x)
- x = self.dropout(x)
- return x
-
-
-class PositionalConvEmbedding(nn.Module):
- def __init__(self):
- super().__init__()
- self.conv = nn.Conv1d(
- 768,
- 768,
- kernel_size=128,
- padding=128 // 2,
- groups=16,
- )
- self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- x = self.conv(x.transpose(1, 2))
- x = t_func.gelu(x[:, :, :-1])
- return x.transpose(1, 2)
-
-
-class TransformerEncoder(nn.Module):
- def __init__(
- self, encoder_layer: nn.TransformerEncoderLayer, num_layers: int
- ) -> None:
- super(TransformerEncoder, self).__init__()
- self.layers = nn.ModuleList(
- [copy.deepcopy(encoder_layer) for _ in range(num_layers)]
- )
- self.num_layers = num_layers
-
- def forward(
- self,
- src: torch.Tensor,
- mask: torch.Tensor = None,
- src_key_padding_mask: torch.Tensor = None,
- output_layer: Optional[int] = None,
- ) -> torch.Tensor:
- output = src
- for layer in self.layers[:output_layer]:
- output = layer(
- output, src_mask=mask, src_key_padding_mask=src_key_padding_mask
- )
- return output
-
-
-def _compute_mask(
- shape: Tuple[int, int],
- mask_prob: float,
- mask_length: int,
- device: torch.device,
- min_masks: int = 0,
-) -> torch.Tensor:
- batch_size, sequence_length = shape
-
- if mask_length < 1:
- raise ValueError("`mask_length` has to be bigger than 0.")
-
- if mask_length > sequence_length:
- raise ValueError(
- f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`"
- )
-
- # compute number of masked spans in batch
- num_masked_spans = int(mask_prob * sequence_length / mask_length + random.random())
- num_masked_spans = max(num_masked_spans, min_masks)
-
- # make sure num masked indices <= sequence_length
- if num_masked_spans * mask_length > sequence_length:
- num_masked_spans = sequence_length // mask_length
-
- # SpecAugment mask to fill
- mask = torch.zeros((batch_size, sequence_length), device=device, dtype=torch.bool)
-
- # uniform distribution to sample from, make sure that offset samples are < sequence_length
- uniform_dist = torch.ones(
- (batch_size, sequence_length - (mask_length - 1)), device=device
- )
-
- # get random indices to mask
- mask_indices = torch.multinomial(uniform_dist, num_masked_spans)
-
- # expand masked indices to masked spans
- mask_indices = (
- mask_indices.unsqueeze(dim=-1)
- .expand((batch_size, num_masked_spans, mask_length))
- .reshape(batch_size, num_masked_spans * mask_length)
- )
- offsets = (
- torch.arange(mask_length, device=device)[None, None, :]
- .expand((batch_size, num_masked_spans, mask_length))
- .reshape(batch_size, num_masked_spans * mask_length)
- )
- mask_idxs = mask_indices + offsets
-
- # scatter indices to mask
- mask = mask.scatter(1, mask_idxs, True)
-
- return mask
-
-
-def hubert_soft(
- path: str,
-) -> HubertSoft:
- r"""HuBERT-Soft from `"A Comparison of Discrete and Soft Speech Units for Improved Voice Conversion"`.
- Args:
- path (str): path of a pretrained model
- """
- hubert = HubertSoft()
- checkpoint = torch.load(path)
- consume_prefix_in_state_dict_if_present(checkpoint, "module.")
- hubert.load_state_dict(checkpoint)
- hubert.eval()
- return hubert
diff --git a/spaces/Illumotion/Koboldcpp/otherarch/gpt2_v1.cpp b/spaces/Illumotion/Koboldcpp/otherarch/gpt2_v1.cpp
deleted file mode 100644
index 98377ffcb04400d195aa89c643441c40e9a654d1..0000000000000000000000000000000000000000
--- a/spaces/Illumotion/Koboldcpp/otherarch/gpt2_v1.cpp
+++ /dev/null
@@ -1,626 +0,0 @@
-#include "ggml_v1.h"
-#include "otherarch.h"
-
-#include "utils.h"
-
-#include
-#include
-#include
-#include
-#include
-#include
-
Conclusion
-
Rocket League Sideswipe is a mobile version of the popular car soccer game Rocket League. It has been redesigned for mobile devices, with simplified controls, shorter matches, and a 2D perspective. However, it still retains the core gameplay and features of Rocket League, such as car soccer, customization, online multiplayer, and seasons. You can download and install the game from the official app stores, or from a third-party website that offers the modded APK file with everything unlocked. However, the latter method is not recommended, as it may have some drawbacks and risks. You can play and enjoy the game by learning the controls, practicing your moves, customizing your car, unlocking items with the Rocket Pass, and playing online with friends or other players around the world. Rocket League Sideswipe is a fun and addictive game that will keep you entertained for hours.
-
FAQs
-
Here are some frequently asked questions about Rocket League Sideswipe APK todo desbloqueado:
-
-
What is the difference between Rocket League and Rocket League Sideswipe?
-
Rocket League is the original game that is available for PC and consoles, while Rocket League Sideswipe is the mobile version that is available for Android and iOS devices. Rocket League Sideswipe has been adapted for mobile devices, with simplified controls, shorter matches, and a 2D perspective. However, it still has the same gameplay and features as Rocket League.
-
Is Rocket League Sideswipe free to play?
-
Yes, Rocket League Sideswipe is free to play and download from the official app stores. However, it has some optional in-app purchases, such as credits that can be used to buy premium items or tiers in the Rocket Pass.
-
How can I play Rocket League Sideswipe with my friends?
-
You can play Rocket League Sideswipe with your friends in private matches or online multiplayer mode. To play in private matches, you need to create or join a room code that you can share with your friends. To play in online multiplayer mode, you need to invite your friends to your party or join their party.
-
How can I update Rocket League Sideswipe to the latest version?
-
If you downloaded the game from the official app stores, you can update it automatically or manually from the app store. If you downloaded the modded APK file from a third-party website, you may not be able to update it to the latest version. You may need to download and install a new modded APK file from the same or another website.
-
How can I contact Psyonix Studios for support or feedback?
-
You can contact Psyonix Studios for support or feedback by visiting their official website or social media accounts. You can also use the in-game feedback option or email them at support@psyonix.com.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/fffiloni/Music_Source_Separation/bytesep/callbacks/base_callbacks.py b/spaces/fffiloni/Music_Source_Separation/bytesep/callbacks/base_callbacks.py
deleted file mode 100644
index ef62dd591f1516aa41e2ba347cc3aaa558854f8d..0000000000000000000000000000000000000000
--- a/spaces/fffiloni/Music_Source_Separation/bytesep/callbacks/base_callbacks.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import logging
-import os
-from typing import NoReturn
-
-import pytorch_lightning as pl
-import torch
-import torch.nn as nn
-from pytorch_lightning.utilities import rank_zero_only
-
-
-class SaveCheckpointsCallback(pl.Callback):
- def __init__(
- self,
- model: nn.Module,
- checkpoints_dir: str,
- save_step_frequency: int,
- ):
- r"""Callback to save checkpoints every #save_step_frequency steps.
-
- Args:
- model: nn.Module
- checkpoints_dir: str, directory to save checkpoints
- save_step_frequency: int
- """
- self.model = model
- self.checkpoints_dir = checkpoints_dir
- self.save_step_frequency = save_step_frequency
- os.makedirs(self.checkpoints_dir, exist_ok=True)
-
- @rank_zero_only
- def on_batch_end(self, trainer: pl.Trainer, _) -> NoReturn:
- r"""Save checkpoint."""
- global_step = trainer.global_step
-
- if global_step % self.save_step_frequency == 0:
-
- checkpoint_path = os.path.join(
- self.checkpoints_dir, "step={}.pth".format(global_step)
- )
-
- checkpoint = {'step': global_step, 'model': self.model.state_dict()}
-
- torch.save(checkpoint, checkpoint_path)
- logging.info("Save checkpoint to {}".format(checkpoint_path))
diff --git a/spaces/fffiloni/Music_Source_Separation/scripts/2_create_indexes/vctk-musdb18/create_indexes.sh b/spaces/fffiloni/Music_Source_Separation/scripts/2_create_indexes/vctk-musdb18/create_indexes.sh
deleted file mode 100644
index e2a85230b2745cedb2c98a34ed303082bb1ec48a..0000000000000000000000000000000000000000
--- a/spaces/fffiloni/Music_Source_Separation/scripts/2_create_indexes/vctk-musdb18/create_indexes.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-WORKSPACE=${1:-"./workspaces/bytesep"} # Default workspace directory
-
-echo "WORKSPACE=${WORKSPACE}"
-
-# Users can modify the following config file.
-INDEXES_CONFIG_YAML="scripts/2_create_indexes/vctk-musdb18/configs/speech-accompaniment,sr=44100,chn=2.yaml"
-
-# Create indexes for training.
-python3 bytesep/dataset_creation/create_indexes/create_indexes.py \
- --workspace=$WORKSPACE \
- --config_yaml=$INDEXES_CONFIG_YAML
diff --git a/spaces/fffiloni/SplitTrack2MusicGen/tests/data/test_audio_utils.py b/spaces/fffiloni/SplitTrack2MusicGen/tests/data/test_audio_utils.py
deleted file mode 100644
index 0480671bb17281d61ce02bce6373a5ccec89fece..0000000000000000000000000000000000000000
--- a/spaces/fffiloni/SplitTrack2MusicGen/tests/data/test_audio_utils.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-import julius
-import torch
-import pytest
-
-from audiocraft.data.audio_utils import (
- _clip_wav,
- convert_audio_channels,
- convert_audio,
- normalize_audio
-)
-from ..common_utils import get_batch_white_noise
-
-
-class TestConvertAudioChannels:
-
- def test_convert_audio_channels_downmix(self):
- b, c, t = 2, 3, 100
- audio = get_batch_white_noise(b, c, t)
- mixed = convert_audio_channels(audio, channels=2)
- assert list(mixed.shape) == [b, 2, t]
-
- def test_convert_audio_channels_nochange(self):
- b, c, t = 2, 3, 100
- audio = get_batch_white_noise(b, c, t)
- mixed = convert_audio_channels(audio, channels=c)
- assert list(mixed.shape) == list(audio.shape)
-
- def test_convert_audio_channels_upmix(self):
- b, c, t = 2, 1, 100
- audio = get_batch_white_noise(b, c, t)
- mixed = convert_audio_channels(audio, channels=3)
- assert list(mixed.shape) == [b, 3, t]
-
- def test_convert_audio_channels_upmix_error(self):
- b, c, t = 2, 2, 100
- audio = get_batch_white_noise(b, c, t)
- with pytest.raises(ValueError):
- convert_audio_channels(audio, channels=3)
-
-
-class TestConvertAudio:
-
- def test_convert_audio_channels_downmix(self):
- b, c, dur = 2, 3, 4.
- sr = 128
- audio = get_batch_white_noise(b, c, int(sr * dur))
- out = convert_audio(audio, from_rate=sr, to_rate=sr, to_channels=2)
- assert list(out.shape) == [audio.shape[0], 2, audio.shape[-1]]
-
- def test_convert_audio_channels_upmix(self):
- b, c, dur = 2, 1, 4.
- sr = 128
- audio = get_batch_white_noise(b, c, int(sr * dur))
- out = convert_audio(audio, from_rate=sr, to_rate=sr, to_channels=3)
- assert list(out.shape) == [audio.shape[0], 3, audio.shape[-1]]
-
- def test_convert_audio_upsample(self):
- b, c, dur = 2, 1, 4.
- sr = 2
- new_sr = 3
- audio = get_batch_white_noise(b, c, int(sr * dur))
- out = convert_audio(audio, from_rate=sr, to_rate=new_sr, to_channels=c)
- out_j = julius.resample.resample_frac(audio, old_sr=sr, new_sr=new_sr)
- assert torch.allclose(out, out_j)
-
- def test_convert_audio_resample(self):
- b, c, dur = 2, 1, 4.
- sr = 3
- new_sr = 2
- audio = get_batch_white_noise(b, c, int(sr * dur))
- out = convert_audio(audio, from_rate=sr, to_rate=new_sr, to_channels=c)
- out_j = julius.resample.resample_frac(audio, old_sr=sr, new_sr=new_sr)
- assert torch.allclose(out, out_j)
-
-
-class TestNormalizeAudio:
-
- def test_clip_wav(self):
- b, c, dur = 2, 1, 4.
- sr = 3
- audio = 10.0 * get_batch_white_noise(b, c, int(sr * dur))
- _clip_wav(audio)
- assert audio.abs().max() <= 1
-
- def test_normalize_audio_clip(self):
- b, c, dur = 2, 1, 4.
- sr = 3
- audio = 10.0 * get_batch_white_noise(b, c, int(sr * dur))
- norm_audio = normalize_audio(audio, strategy='clip')
- assert norm_audio.abs().max() <= 1
-
- def test_normalize_audio_rms(self):
- b, c, dur = 2, 1, 4.
- sr = 3
- audio = 10.0 * get_batch_white_noise(b, c, int(sr * dur))
- norm_audio = normalize_audio(audio, strategy='rms')
- assert norm_audio.abs().max() <= 1
-
- def test_normalize_audio_peak(self):
- b, c, dur = 2, 1, 4.
- sr = 3
- audio = 10.0 * get_batch_white_noise(b, c, int(sr * dur))
- norm_audio = normalize_audio(audio, strategy='peak')
- assert norm_audio.abs().max() <= 1
diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/object-inspect/test/bigint.js b/spaces/fffiloni/controlnet-animation-doodle/node_modules/object-inspect/test/bigint.js
deleted file mode 100644
index 4ecc31df8ab3fd311570d866b418514d159abfde..0000000000000000000000000000000000000000
--- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/object-inspect/test/bigint.js
+++ /dev/null
@@ -1,58 +0,0 @@
-'use strict';
-
-var inspect = require('../');
-var test = require('tape');
-var hasToStringTag = require('has-tostringtag/shams')();
-
-test('bigint', { skip: typeof BigInt === 'undefined' }, function (t) {
- t.test('primitives', function (st) {
- st.plan(3);
-
- st.equal(inspect(BigInt(-256)), '-256n');
- st.equal(inspect(BigInt(0)), '0n');
- st.equal(inspect(BigInt(256)), '256n');
- });
-
- t.test('objects', function (st) {
- st.plan(3);
-
- st.equal(inspect(Object(BigInt(-256))), 'Object(-256n)');
- st.equal(inspect(Object(BigInt(0))), 'Object(0n)');
- st.equal(inspect(Object(BigInt(256))), 'Object(256n)');
- });
-
- t.test('syntactic primitives', function (st) {
- st.plan(3);
-
- /* eslint-disable no-new-func */
- st.equal(inspect(Function('return -256n')()), '-256n');
- st.equal(inspect(Function('return 0n')()), '0n');
- st.equal(inspect(Function('return 256n')()), '256n');
- });
-
- t.test('toStringTag', { skip: !hasToStringTag }, function (st) {
- st.plan(1);
-
- var faker = {};
- faker[Symbol.toStringTag] = 'BigInt';
- st.equal(
- inspect(faker),
- '{ [Symbol(Symbol.toStringTag)]: \'BigInt\' }',
- 'object lying about being a BigInt inspects as an object'
- );
- });
-
- t.test('numericSeparator', function (st) {
- st.equal(inspect(BigInt(0), { numericSeparator: false }), '0n', '0n, numericSeparator false');
- st.equal(inspect(BigInt(0), { numericSeparator: true }), '0n', '0n, numericSeparator true');
-
- st.equal(inspect(BigInt(1234), { numericSeparator: false }), '1234n', '1234n, numericSeparator false');
- st.equal(inspect(BigInt(1234), { numericSeparator: true }), '1_234n', '1234n, numericSeparator true');
- st.equal(inspect(BigInt(-1234), { numericSeparator: false }), '-1234n', '1234n, numericSeparator false');
- st.equal(inspect(BigInt(-1234), { numericSeparator: true }), '-1_234n', '1234n, numericSeparator true');
-
- st.end();
- });
-
- t.end();
-});
diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/socket.io-parser/build/cjs/index.js b/spaces/fffiloni/controlnet-animation-doodle/node_modules/socket.io-parser/build/cjs/index.js
deleted file mode 100644
index fc99bbfae2f7f645e0db479003ad7d1afab82849..0000000000000000000000000000000000000000
--- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/socket.io-parser/build/cjs/index.js
+++ /dev/null
@@ -1,303 +0,0 @@
-"use strict";
-Object.defineProperty(exports, "__esModule", { value: true });
-exports.Decoder = exports.Encoder = exports.PacketType = exports.protocol = void 0;
-const component_emitter_1 = require("@socket.io/component-emitter");
-const binary_js_1 = require("./binary.js");
-const is_binary_js_1 = require("./is-binary.js");
-const debug_1 = require("debug"); // debug()
-const debug = (0, debug_1.default)("socket.io-parser"); // debug()
-/**
- * Protocol version.
- *
- * @public
- */
-exports.protocol = 5;
-var PacketType;
-(function (PacketType) {
- PacketType[PacketType["CONNECT"] = 0] = "CONNECT";
- PacketType[PacketType["DISCONNECT"] = 1] = "DISCONNECT";
- PacketType[PacketType["EVENT"] = 2] = "EVENT";
- PacketType[PacketType["ACK"] = 3] = "ACK";
- PacketType[PacketType["CONNECT_ERROR"] = 4] = "CONNECT_ERROR";
- PacketType[PacketType["BINARY_EVENT"] = 5] = "BINARY_EVENT";
- PacketType[PacketType["BINARY_ACK"] = 6] = "BINARY_ACK";
-})(PacketType = exports.PacketType || (exports.PacketType = {}));
-/**
- * A socket.io Encoder instance
- */
-class Encoder {
- /**
- * Encoder constructor
- *
- * @param {function} replacer - custom replacer to pass down to JSON.parse
- */
- constructor(replacer) {
- this.replacer = replacer;
- }
- /**
- * Encode a packet as a single string if non-binary, or as a
- * buffer sequence, depending on packet type.
- *
- * @param {Object} obj - packet object
- */
- encode(obj) {
- debug("encoding packet %j", obj);
- if (obj.type === PacketType.EVENT || obj.type === PacketType.ACK) {
- if ((0, is_binary_js_1.hasBinary)(obj)) {
- return this.encodeAsBinary({
- type: obj.type === PacketType.EVENT
- ? PacketType.BINARY_EVENT
- : PacketType.BINARY_ACK,
- nsp: obj.nsp,
- data: obj.data,
- id: obj.id,
- });
- }
- }
- return [this.encodeAsString(obj)];
- }
- /**
- * Encode packet as string.
- */
- encodeAsString(obj) {
- // first is type
- let str = "" + obj.type;
- // attachments if we have them
- if (obj.type === PacketType.BINARY_EVENT ||
- obj.type === PacketType.BINARY_ACK) {
- str += obj.attachments + "-";
- }
- // if we have a namespace other than `/`
- // we append it followed by a comma `,`
- if (obj.nsp && "/" !== obj.nsp) {
- str += obj.nsp + ",";
- }
- // immediately followed by the id
- if (null != obj.id) {
- str += obj.id;
- }
- // json data
- if (null != obj.data) {
- str += JSON.stringify(obj.data, this.replacer);
- }
- debug("encoded %j as %s", obj, str);
- return str;
- }
- /**
- * Encode packet as 'buffer sequence' by removing blobs, and
- * deconstructing packet into object with placeholders and
- * a list of buffers.
- */
- encodeAsBinary(obj) {
- const deconstruction = (0, binary_js_1.deconstructPacket)(obj);
- const pack = this.encodeAsString(deconstruction.packet);
- const buffers = deconstruction.buffers;
- buffers.unshift(pack); // add packet info to beginning of data list
- return buffers; // write all the buffers
- }
-}
-exports.Encoder = Encoder;
-/**
- * A socket.io Decoder instance
- *
- * @return {Object} decoder
- */
-class Decoder extends component_emitter_1.Emitter {
- /**
- * Decoder constructor
- *
- * @param {function} reviver - custom reviver to pass down to JSON.stringify
- */
- constructor(reviver) {
- super();
- this.reviver = reviver;
- }
- /**
- * Decodes an encoded packet string into packet JSON.
- *
- * @param {String} obj - encoded packet
- */
- add(obj) {
- let packet;
- if (typeof obj === "string") {
- if (this.reconstructor) {
- throw new Error("got plaintext data when reconstructing a packet");
- }
- packet = this.decodeString(obj);
- const isBinaryEvent = packet.type === PacketType.BINARY_EVENT;
- if (isBinaryEvent || packet.type === PacketType.BINARY_ACK) {
- packet.type = isBinaryEvent ? PacketType.EVENT : PacketType.ACK;
- // binary packet's json
- this.reconstructor = new BinaryReconstructor(packet);
- // no attachments, labeled binary but no binary data to follow
- if (packet.attachments === 0) {
- super.emitReserved("decoded", packet);
- }
- }
- else {
- // non-binary full packet
- super.emitReserved("decoded", packet);
- }
- }
- else if ((0, is_binary_js_1.isBinary)(obj) || obj.base64) {
- // raw binary data
- if (!this.reconstructor) {
- throw new Error("got binary data when not reconstructing a packet");
- }
- else {
- packet = this.reconstructor.takeBinaryData(obj);
- if (packet) {
- // received final buffer
- this.reconstructor = null;
- super.emitReserved("decoded", packet);
- }
- }
- }
- else {
- throw new Error("Unknown type: " + obj);
- }
- }
- /**
- * Decode a packet String (JSON data)
- *
- * @param {String} str
- * @return {Object} packet
- */
- decodeString(str) {
- let i = 0;
- // look up type
- const p = {
- type: Number(str.charAt(0)),
- };
- if (PacketType[p.type] === undefined) {
- throw new Error("unknown packet type " + p.type);
- }
- // look up attachments if type binary
- if (p.type === PacketType.BINARY_EVENT ||
- p.type === PacketType.BINARY_ACK) {
- const start = i + 1;
- while (str.charAt(++i) !== "-" && i != str.length) { }
- const buf = str.substring(start, i);
- if (buf != Number(buf) || str.charAt(i) !== "-") {
- throw new Error("Illegal attachments");
- }
- p.attachments = Number(buf);
- }
- // look up namespace (if any)
- if ("/" === str.charAt(i + 1)) {
- const start = i + 1;
- while (++i) {
- const c = str.charAt(i);
- if ("," === c)
- break;
- if (i === str.length)
- break;
- }
- p.nsp = str.substring(start, i);
- }
- else {
- p.nsp = "/";
- }
- // look up id
- const next = str.charAt(i + 1);
- if ("" !== next && Number(next) == next) {
- const start = i + 1;
- while (++i) {
- const c = str.charAt(i);
- if (null == c || Number(c) != c) {
- --i;
- break;
- }
- if (i === str.length)
- break;
- }
- p.id = Number(str.substring(start, i + 1));
- }
- // look up json data
- if (str.charAt(++i)) {
- const payload = this.tryParse(str.substr(i));
- if (Decoder.isPayloadValid(p.type, payload)) {
- p.data = payload;
- }
- else {
- throw new Error("invalid payload");
- }
- }
- debug("decoded %s as %j", str, p);
- return p;
- }
- tryParse(str) {
- try {
- return JSON.parse(str, this.reviver);
- }
- catch (e) {
- return false;
- }
- }
- static isPayloadValid(type, payload) {
- switch (type) {
- case PacketType.CONNECT:
- return typeof payload === "object";
- case PacketType.DISCONNECT:
- return payload === undefined;
- case PacketType.CONNECT_ERROR:
- return typeof payload === "string" || typeof payload === "object";
- case PacketType.EVENT:
- case PacketType.BINARY_EVENT:
- return Array.isArray(payload) && payload.length > 0;
- case PacketType.ACK:
- case PacketType.BINARY_ACK:
- return Array.isArray(payload);
- }
- }
- /**
- * Deallocates a parser's resources
- */
- destroy() {
- if (this.reconstructor) {
- this.reconstructor.finishedReconstruction();
- this.reconstructor = null;
- }
- }
-}
-exports.Decoder = Decoder;
-/**
- * A manager of a binary event's 'buffer sequence'. Should
- * be constructed whenever a packet of type BINARY_EVENT is
- * decoded.
- *
- * @param {Object} packet
- * @return {BinaryReconstructor} initialized reconstructor
- */
-class BinaryReconstructor {
- constructor(packet) {
- this.packet = packet;
- this.buffers = [];
- this.reconPack = packet;
- }
- /**
- * Method to be called when binary data received from connection
- * after a BINARY_EVENT packet.
- *
- * @param {Buffer | ArrayBuffer} binData - the raw binary data received
- * @return {null | Object} returns null if more binary data is expected or
- * a reconstructed packet object if all buffers have been received.
- */
- takeBinaryData(binData) {
- this.buffers.push(binData);
- if (this.buffers.length === this.reconPack.attachments) {
- // done with buffer list
- const packet = (0, binary_js_1.reconstructPacket)(this.reconPack, this.buffers);
- this.finishedReconstruction();
- return packet;
- }
- return null;
- }
- /**
- * Cleans up binary packet reconstruction variables.
- */
- finishedReconstruction() {
- this.reconPack = null;
- this.buffers = [];
- }
-}
diff --git a/spaces/fgenie/scamtext_PAL_self_consistency/funcs/f_63.py b/spaces/fgenie/scamtext_PAL_self_consistency/funcs/f_63.py
deleted file mode 100644
index 8516dee445a1c65c255c1aa334507c28a84ed7d2..0000000000000000000000000000000000000000
--- a/spaces/fgenie/scamtext_PAL_self_consistency/funcs/f_63.py
+++ /dev/null
@@ -1,35 +0,0 @@
-
-import re
-
-def is_spam(message: str) -> bool:
- # Checking for spam URL patterns
- spam_url_patterns = [
- r"(?i)https?:\/\/(?:me2\.kr|buly\.kr|opcn\-kakao.com|han.gl|abit\.ly)/\S*",
- r"(?i)ⓢlⓩ102\.com",
- r"(?i)orl\.kr\/\S*",
- r"(?i)https?://openkakao.io/\S*"
- ]
-
- for pattern in spam_url_patterns:
- if re.search(pattern, message):
- return True
-
- # Checking for other spam patterns
- spam_patterns = [
- r"(?i)(vip|vvip)투자반",
- r"(?i)차별화 된",
- r"(?i)시작하루만에",
- r"(?i)추천주 현황",
- r"(?i)slot🎰zone",
- r"(?i)지니틱스",
- r"(?i)카카오톡제재",
- r"(?i)[5일평균].*[8,930.000원]",
- r"(?i)문의▼",
- ]
-
- for pattern in spam_patterns:
- if re.search(pattern, message):
- return True
-
- # If none of the spam patterns are present
- return False
diff --git a/spaces/fiz123321/nah/greeting.md b/spaces/fiz123321/nah/greeting.md
deleted file mode 100644
index 6c2c45107629147b25bbb2a17d9f297702132769..0000000000000000000000000000000000000000
--- a/spaces/fiz123321/nah/greeting.md
+++ /dev/null
@@ -1,3 +0,0 @@
-<3
-https://rentry.org/miniproxy
-
\ No newline at end of file
diff --git a/spaces/flax-community/roberta-base-mr/apps/mlm.py b/spaces/flax-community/roberta-base-mr/apps/mlm.py
deleted file mode 100644
index 7f5389e0f63c463c45d111cc52807436b14e7589..0000000000000000000000000000000000000000
--- a/spaces/flax-community/roberta-base-mr/apps/mlm.py
+++ /dev/null
@@ -1,53 +0,0 @@
-import json
-
-import streamlit as st
-from transformers import AutoTokenizer, RobertaForMaskedLM, pipeline
-
-with open("config.json", encoding="utf8") as f:
- cfg = json.loads(f.read())
-
-
-@st.cache(allow_output_mutation=True, show_spinner=False)
-def load_model(input_text, model_name_or_path):
- tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
- model = RobertaForMaskedLM.from_pretrained(model_name_or_path)
-
- nlp = pipeline("fill-mask", model=model, tokenizer=tokenizer)
- result = nlp(input_text)
- sentence, mask = result[0]["sequence"], result[0]["token_str"]
- return sentence, mask, result
-
-
-def app():
- st.title("RoBERTa Marathi - मराठी भाषा")
-
- st.markdown(
- "This demo uses [RoBERTa for Marathi](https://huggingface.co/flax-community/roberta-base-mr) model "
- "trained on [mC4](https://huggingface.co/datasets/mc4)."
- )
-
- st.markdown(
- "❓Can't figure out where to get a sample text other than the predefined ones?❓\n\n"
- "Use any custom sentence with masked word or copy any headline from this [link](https://maharashtratimes.com/entertainment/articlelist/19359255.cms), and mask a word.\n"
- "> 📒 NOTE: Supports only single `` word"
- )
-
- masked_texts = [
- "मोठी बातमी! उद्या दुपारी वाजता जाहीर होणार दहावीचा निकाल",
- "जॉनी लीवर यांनी नम्रता संभेरावला दिलं गिफ्ट, अभिनेत्रीने व्यक्त केल्या भावना"
- # "अध्यक्ष पवार आणि उपमुख्यमंत्री अजित पवार यांची भेट घेतली.",
- ]
-
- input_text = st.sidebar.selectbox("Select a Text", options=masked_texts)
- masked_text = st.text_input("Please type a masked sentence to fill", input_text)
-
- fill_button = st.button("Fill the Mask!")
-
- if fill_button:
- with st.spinner("Filling the Mask..."):
- filled_sentence, mask, raw_json = load_model(masked_text, cfg["models"]["RoBERTa"])
-
- st.markdown(f"**Filled sentence: **{filled_sentence}")
- st.markdown(f"**Predicted masked token: **{mask}")
-
- st.write(raw_json)
diff --git a/spaces/freshield/ChatGPT-gradio/OpenaiBot.py b/spaces/freshield/ChatGPT-gradio/OpenaiBot.py
deleted file mode 100644
index 81734a5986aff1be9ea3490fd786262daf8de07d..0000000000000000000000000000000000000000
--- a/spaces/freshield/ChatGPT-gradio/OpenaiBot.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# coding=utf-8
-"""
-@Author: Freshield
-@Contact: yangyufresh@163.com
-@File: OpenaiBot.py
-@Time: 2023-03-03 17:47
-@Last_update: 2023-03-03 17:47
-@Desc: None
-@==============================================@
-@ _____ _ _ _ _ @
-@ | __|___ ___ ___| |_|_|___| |_| | @
-@ | __| _| -_|_ -| | | -_| | . | @
-@ |__| |_| |___|___|_|_|_|___|_|___| @
-@ Freshield @
-@==============================================@
-"""
-import os
-import openai
-
-
-class OpenaiBot(object):
- """调用openai的机器人"""
- def __init__(self, temperature=0.5):
- openai.api_key = os.environ.get('OPENAI_API_KEY')
- self.model_engine = "gpt-3.5-turbo"
- self.temperature = temperature
-
- def set_api_key(self, api_key):
- """设定api key"""
- openai.api_key = api_key
-
- def construct_message(self, role, new_msg, history_list, keep_history=3):
- """
- 构造message,这里history_list是一个list,每个元素是一个tuple
- """
- msg_list = [{"role": "system", "content": role}]
- history_list = history_list[-keep_history:]
- for user, assistant in history_list:
- msg_list.append({"role": "user", "content": user})
- msg_list.append({"role": "assistant", "content": assistant})
- msg_list.append({"role": "user", "content": new_msg})
-
- return msg_list
-
- def get_response(self, role, new_msg, history_list, keep_history=3):
- """
- 通过openai获取回复
- """
- msg_list = self.construct_message(role, new_msg, history_list, keep_history)
- response = openai.ChatCompletion.create(
- model=self.model_engine, messages=msg_list,
- temperature=self.temperature
- )
- content = response['choices'][0]['message']['content']
-
- return content
-
-
-if __name__ == '__main__':
- openai_bot = OpenaiBot()
diff --git a/spaces/fuckyoudeki/AutoGPT/autogpt/memory/milvus.py b/spaces/fuckyoudeki/AutoGPT/autogpt/memory/milvus.py
deleted file mode 100644
index 44aa72b956224fa4c2a16d5f40b0eaeb35e98581..0000000000000000000000000000000000000000
--- a/spaces/fuckyoudeki/AutoGPT/autogpt/memory/milvus.py
+++ /dev/null
@@ -1,115 +0,0 @@
-""" Milvus memory storage provider."""
-from pymilvus import Collection, CollectionSchema, DataType, FieldSchema, connections
-
-from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
-
-
-class MilvusMemory(MemoryProviderSingleton):
- """Milvus memory storage provider."""
-
- def __init__(self, cfg) -> None:
- """Construct a milvus memory storage connection.
-
- Args:
- cfg (Config): Auto-GPT global config.
- """
- # connect to milvus server.
- connections.connect(address=cfg.milvus_addr)
- fields = [
- FieldSchema(name="pk", dtype=DataType.INT64, is_primary=True, auto_id=True),
- FieldSchema(name="embeddings", dtype=DataType.FLOAT_VECTOR, dim=1536),
- FieldSchema(name="raw_text", dtype=DataType.VARCHAR, max_length=65535),
- ]
-
- # create collection if not exist and load it.
- self.milvus_collection = cfg.milvus_collection
- self.schema = CollectionSchema(fields, "auto-gpt memory storage")
- self.collection = Collection(self.milvus_collection, self.schema)
- # create index if not exist.
- if not self.collection.has_index():
- self.collection.release()
- self.collection.create_index(
- "embeddings",
- {
- "metric_type": "IP",
- "index_type": "HNSW",
- "params": {"M": 8, "efConstruction": 64},
- },
- index_name="embeddings",
- )
- self.collection.load()
-
- def add(self, data) -> str:
- """Add an embedding of data into memory.
-
- Args:
- data (str): The raw text to construct embedding index.
-
- Returns:
- str: log.
- """
- embedding = get_ada_embedding(data)
- result = self.collection.insert([[embedding], [data]])
- _text = (
- "Inserting data into memory at primary key: "
- f"{result.primary_keys[0]}:\n data: {data}"
- )
- return _text
-
- def get(self, data):
- """Return the most relevant data in memory.
- Args:
- data: The data to compare to.
- """
- return self.get_relevant(data, 1)
-
- def clear(self) -> str:
- """Drop the index in memory.
-
- Returns:
- str: log.
- """
- self.collection.drop()
- self.collection = Collection(self.milvus_collection, self.schema)
- self.collection.create_index(
- "embeddings",
- {
- "metric_type": "IP",
- "index_type": "HNSW",
- "params": {"M": 8, "efConstruction": 64},
- },
- index_name="embeddings",
- )
- self.collection.load()
- return "Obliviated"
-
- def get_relevant(self, data: str, num_relevant: int = 5):
- """Return the top-k relevant data in memory.
- Args:
- data: The data to compare to.
- num_relevant (int, optional): The max number of relevant data.
- Defaults to 5.
-
- Returns:
- list: The top-k relevant data.
- """
- # search the embedding and return the most relevant text.
- embedding = get_ada_embedding(data)
- search_params = {
- "metrics_type": "IP",
- "params": {"nprobe": 8},
- }
- result = self.collection.search(
- [embedding],
- "embeddings",
- search_params,
- num_relevant,
- output_fields=["raw_text"],
- )
- return [item.entity.value_of_field("raw_text") for item in result[0]]
-
- def get_stats(self) -> str:
- """
- Returns: The stats of the milvus cache.
- """
- return f"Entities num: {self.collection.num_entities}"
diff --git a/spaces/fun-research/FC-CLIP/fcclip/data/datasets/register_pascal_ctx_59_sem_seg.py b/spaces/fun-research/FC-CLIP/fcclip/data/datasets/register_pascal_ctx_59_sem_seg.py
deleted file mode 100644
index a4019b3006e8347bdbd936297852c1b2cbd22ff6..0000000000000000000000000000000000000000
--- a/spaces/fun-research/FC-CLIP/fcclip/data/datasets/register_pascal_ctx_59_sem_seg.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import os
-
-import numpy as np
-
-from detectron2.data import DatasetCatalog, MetadataCatalog
-from detectron2.data.datasets import load_sem_seg
-
-from . import openseg_classes
-
-PASCAL_CTX_59_CATEGORIES=openseg_classes.get_pascal_ctx_59_categories_with_prompt_eng()
-
-PASCAL_CTX_59_COLORS = [k["color"] for k in PASCAL_CTX_59_CATEGORIES]
-
-MetadataCatalog.get("openvocab_pascal_ctx59_sem_seg_train").set(
- stuff_colors=PASCAL_CTX_59_COLORS[:],
-)
-
-MetadataCatalog.get("openvocab_pascal_ctx59_sem_seg_val").set(
- stuff_colors=PASCAL_CTX_59_COLORS[:],
-)
-
-def _get_ctx59_meta():
- # Id 0 is reserved for ignore_label, we change ignore_label for 0
- # to 255 in our pre-processing, so all ids are shifted by 1.
- stuff_ids = [k["id"] for k in PASCAL_CTX_59_CATEGORIES]
- assert len(stuff_ids) == 59, len(stuff_ids)
-
- # For semantic segmentation, this mapping maps from contiguous stuff id
- # (in [0, 91], used in models) to ids in the dataset (used for processing results)
- stuff_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(stuff_ids)}
- stuff_classes = [k["name"] for k in PASCAL_CTX_59_CATEGORIES]
-
- ret = {
- "stuff_dataset_id_to_contiguous_id": stuff_dataset_id_to_contiguous_id,
- "stuff_classes": stuff_classes,
- }
- return ret
-
-
-def register_all_ctx59(root):
- root = os.path.join(root, "pascal_ctx_d2")
- meta = _get_ctx59_meta()
- for name, dirname in [("train", "training"), ("val", "validation")]:
- image_dir = os.path.join(root, "images", dirname)
- gt_dir = os.path.join(root, "annotations_ctx59", dirname)
- name = f"openvocab_pascal_ctx59_sem_seg_{name}"
- DatasetCatalog.register(
- name, lambda x=image_dir, y=gt_dir: load_sem_seg(y, x, gt_ext="png", image_ext="jpg")
- )
- MetadataCatalog.get(name).set(
- stuff_classes=meta["stuff_classes"][:],
- thing_dataset_id_to_contiguous_id={}, # to make Mask2Former happy
- stuff_dataset_id_to_contiguous_id=meta["stuff_dataset_id_to_contiguous_id"],
- image_root=image_dir,
- sem_seg_root=gt_dir,
- evaluator_type="sem_seg",
- ignore_label=255,
- gt_ext="png",
- )
-
-_root = os.getenv("DETECTRON2_DATASETS", "datasets")
-register_all_ctx59(_root)
\ No newline at end of file
diff --git a/spaces/golem4300/RVC-TTS/vc_infer_pipeline.py b/spaces/golem4300/RVC-TTS/vc_infer_pipeline.py
deleted file mode 100644
index 2e5d17bdc522e3d2757f2accd17258994b40e613..0000000000000000000000000000000000000000
--- a/spaces/golem4300/RVC-TTS/vc_infer_pipeline.py
+++ /dev/null
@@ -1,230 +0,0 @@
-from scipy import signal
-from functools import lru_cache
-import torch.nn.functional as F
-import numpy as np, parselmouth, torch
-import pyworld, os, traceback, faiss, librosa
-
-bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000)
-
-@lru_cache
-def cache_harvest_f0(input_audio_path, fs, f0max, f0min, frame_period, input_audio_path2wav):
- audio = input_audio_path2wav[input_audio_path]
- f0, t = pyworld.harvest(
- audio, fs=fs, f0_ceil=f0max, f0_floor=f0min, frame_period=frame_period
- )
- f0 = pyworld.stonemask(audio, f0, t, fs)
- return f0
-
-def change_rms(data1, sr1, data2, sr2, rate):
- rms1 = librosa.feature.rms(y=data1, frame_length=sr1 // 2 * 2, hop_length=sr1 // 2)
- rms2 = librosa.feature.rms(y=data2, frame_length=sr2 // 2 * 2, hop_length=sr2 // 2)
- rms1 = torch.from_numpy(rms1).unsqueeze(0)
- rms2 = torch.from_numpy(rms2).unsqueeze(0)
- rms1 = F.interpolate(rms1, size=data2.shape[0], mode="linear").squeeze()
- rms2 = F.interpolate(rms2, size=data2.shape[0], mode="linear").squeeze()
- rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-6)
- data2 *= (torch.pow(rms1, 1 - rate) * torch.pow(rms2, rate - 1)).numpy()
-
- return data2
-
-class VC:
- def __init__(self, tgt_sr, config):
- self.x_pad = config.x_pad
- self.x_query = config.x_query
- self.x_center = config.x_center
- self.x_max = config.x_max
- self.is_half = config.is_half
- self.sr = 16000
- self.window = 160
- self.t_pad = self.sr * self.x_pad
- self.t_pad_tgt = tgt_sr * self.x_pad
- self.t_pad2 = self.t_pad * 2
- self.t_query = self.sr * self.x_query
- self.t_center = self.sr * self.x_center
- self.t_max = self.sr * self.x_max
- self.device = config.device
-
- def get_f0(self, input_audio_path, x, p_len, f0_up_key, f0_method, filter_radius, inp_f0=None):
- global input_audio_path2wav
- time_step = self.window / self.sr * 1000
- f0_min = 50
- f0_max = 1100
- f0_mel_min = 1127 * np.log(1 + f0_min / 700)
- f0_mel_max = 1127 * np.log(1 + f0_max / 700)
-
- if f0_method == "pm":
- f0 = (parselmouth.Sound(x, self.sr)
- .to_pitch_ac(time_step=time_step / 1000, voicing_threshold=0.6, pitch_floor=f0_min,
- pitch_ceiling=f0_max,)
- .selected_array["frequency"])
- pad_size = (p_len - len(f0) + 1) // 2
- if pad_size > 0 or p_len - len(f0) - pad_size > 0:
- f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
- f0 *= pow(2, f0_up_key / 12)
- tf0 = self.sr // self.window
-
- if inp_f0 is not None:
- delta_t = np.round(
- (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1
- ).astype("int16")
- replace_f0 = np.interp(list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1])
- shape = f0[self.x_pad * tf0: self.x_pad * tf0 + len(replace_f0)].shape[0]
- f0[self.x_pad * tf0: self.x_pad * tf0 + len(replace_f0)] = replace_f0[:shape]
-
- f0bak= f0.copy()
- f0_mel = 1127 * np.log(1 + f0 / 700)
- f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (f0_mel_max - f0_mel_min) + 1
- f0_mel[f0_mel <= 1] = 1
- f0_mel[f0_mel > 255] = 255
- f0_coarse = np.rint(f0_mel).astype(np.int)
- return f0_coarse, f0bak
-
- def vc(self, model, net_g, sid, audio0, pitch, pitchf, times, index, big_npy, index_rate, version, protect):
- feats = torch.from_numpy(audio0)
- feats = feats.half() if self.is_half else feats.float()
-
- if feats.dim() == 2:
- feats = feats.mean(-1)
- assert feats.dim() == 1, feats.dim()
- feats = feats.view(1, -1)
- padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)
-
- inputs = { "source": feats.to(self.device), "padding_mask": padding_mask, "output_layer": 9 if version == "v1" else 12}
-
- with torch.no_grad():
- logits = model.extract_features(**inputs)
- feats = model.final_proj(logits[0]) if version == "v1" else logits[0]
-
- if protect < 0.5 and pitch is not None and pitchf is not None:
- feats0 = feats.clone()
-
- if index is not None and big_npy is not None and index_rate != 0:
- npy = feats[0].cpu().numpy()
- if self.is_half:
- npy = npy.astype("float64")
-
- score, ix = index.search(npy, k=8)
- weight = np.square(1 / score)
- weight /= weight.sum(axis=1, keepdims=True)
- npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)
-
- if self.is_half:
- npy = npy.astype("float16")
- feats = ( torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate + (1 - index_rate) * feats)
-
- feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
- if protect < 0.5 and pitch is not None and pitchf is not None:
- feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
-
- p_len = audio0.shape[0] // self.window
- if feats.shape[1] < p_len:
- p_len = feats.shape[1]
- if pitch is not None and pitchf is not None:
- pitch = pitch[:, :p_len]
- pitchf = pitchf[:, :p_len]
-
- if protect < 0.5 and pitch is not None and pitchf is not None:
- pitchff = pitchf.clone()
- pitchff[pitchf > 0] = 1
- pitchff[pitchf < 1] = protect
- pitchff = pitchff.unsqueeze(-1)
- feats = feats * pitchff + feats0 * (1 - pitchff)
- feats = feats.to(feats0.dtype)
- p_len = torch.tensor([p_len], device=self.device).long()
-
- with torch.no_grad():
- if pitch is not None and pitchf is not None:
- audio1 = (
- (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0])
- .data.cpu()
- .float()
- .numpy()
- )
- else:
- audio1 = ((net_g.infer(feats, p_len, sid)[0][0, 0]).data.cpu().float().numpy())
-
- del feats, p_len, padding_mask
- return audio1
-
- def pipeline(self,model, net_g, sid, audio, input_audio_path, times, f0_up_key, f0_method, file_index, index_rate, if_f0, filter_radius, tgt_sr, resample_sr, rms_mix_rate, version, protect, f0_file=None,):
- if (
- file_index != ""
- and os.path.exists(file_index) == True
- and index_rate != 0
- ):
- try:
- index = faiss.read_index(file_index)
- big_npy = index.reconstruct_n(0, index.ntotal)
- except:
- traceback.print_exc()
- index = big_npy = None
- else:
- index = big_npy = None
- audio = signal.filtfilt(bh, ah, audio)
- audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect")
- opt_ts = []
- if audio_pad.shape[0] > self.t_max:
- audio_sum = np.zeros_like(audio)
- for i in range(self.window):
- audio_sum += audio_pad[i : i - self.window]
- for t in range(self.t_center, audio.shape[0], self.t_center):
- opt_ts.append(
- t
- - self.t_query
- + np.where(
- np.abs(audio_sum[t - self.t_query : t + self.t_query])
- == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min()
- )[0][0]
- )
- s = 0
- audio_opt = []
- t = None
- audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect")
- p_len = audio_pad.shape[0] // self.window
- inp_f0 = None
- if hasattr(f0_file, "name") == True:
- try:
- with open(f0_file.name, "r") as f:
- lines = f.read().strip("\n").split("\n")
- inp_f0 = []
- for line in lines:
- inp_f0.append([float(i) for i in line.split(",")])
- inp_f0 = np.array(inp_f0, dtype="float64")
- except:
- traceback.print_exc()
- sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()
- pitch, pitchf = None, None
- if if_f0 == 1:
- pitch, pitchf = self.get_f0(
- input_audio_path,audio_pad,p_len,f0_up_key,f0_method,filter_radius,inp_f0,
- )
- pitch = pitch[:p_len]
- pitchf = pitchf[:p_len]
- pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()
- pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float()
- for t in opt_ts:
- t = t // self.window * self.window
- if if_f0 == 1:
- audio_opt.append(
- self.vc(model, net_g, sid,audio_pad[s : t + self.t_pad2 + self.window], pitch[:, s // self.window : (t + self.t_pad2) // self.window],pitchf[:, s // self.window : (t + self.t_pad2) // self.window],times,index,big_npy,index_rate,version,protect,)[self.t_pad_tgt : -self.t_pad_tgt])
- else:
- audio_opt.append(self.vc(model,net_g,sid,audio_pad[s : t + self.t_pad2 + self.window],None,None,times,index,big_npy,index_rate,version,protect)[self.t_pad_tgt : -self.t_pad_tgt])
- s = t
- if if_f0 == 1:
- audio_opt.append(self.vc(model,net_g,sid,audio_pad[t:],pitch[:, t // self.window :] if t is not None else pitch,pitchf[:, t // self.window :] if t is not None else pitchf,times,index,big_npy,index_rate,version,protect,)[self.t_pad_tgt : -self.t_pad_tgt])
- else:
- audio_opt.append(self.vc(model, net_g, sid, audio_pad[t:], None, None, times, index, big_npy, index_rate, version, protect)[self.t_pad_tgt : -self.t_pad_tgt])
- audio_opt = np.concatenate(audio_opt)
- if rms_mix_rate != 1:
- audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate)
- if resample_sr >= 16000 and tgt_sr != resample_sr:
- audio_opt = librosa.resample(audio_opt, orig_sr=tgt_sr, target_sr=resample_sr)
- audio_max = np.abs(audio_opt).max() / 0.99
- max_int16 = 32768
- if audio_max > 1:
- max_int16 /= audio_max
- audio_opt = (audio_opt * max_int16).astype(np.int16)
- del pitch, pitchf, sid
- if torch.cuda.is_available():
- torch.cuda.empty_cache()
- return audio_opt
\ No newline at end of file
diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Keygen Extra Quality AutoCAD Architecture 2016.md b/spaces/gotiQspiryo/whisper-ui/examples/Keygen Extra Quality AutoCAD Architecture 2016.md
deleted file mode 100644
index ae4eb440f041f854ea7b1919afc425b36e7f1376..0000000000000000000000000000000000000000
--- a/spaces/gotiQspiryo/whisper-ui/examples/Keygen Extra Quality AutoCAD Architecture 2016.md
+++ /dev/null
@@ -1,115 +0,0 @@
-
-
Keygen AutoCAD Architecture 2016: A Complete Guide
-
-
If you are looking for a way to activate any product of Autodesk 2016, you may have heard of Keygen AutoCAD Architecture 2016. This is a tool that can generate serial numbers and product keys that match certain templates for Autodesk products. In this article, we will explain what Keygen AutoCAD Architecture 2016 is, how to use it, and what are the advantages and disadvantages of using it.
Keygen AutoCAD Architecture 2016 is a software that can create unique codes that can unlock the full features of Autodesk products, such as AutoCAD Architecture 2016. AutoCAD Architecture 2016 is a software that helps architects and designers create drawings, documentation, and schedules for building projects. It has many tools and features that can improve productivity and accuracy.
-
-
However, to use AutoCAD Architecture 2016, you need to have a valid license that can be purchased from Autodesk or authorized resellers. A license can be either perpetual or subscription-based, and it can be either single-user or multi-user. The price of a license depends on the type, duration, and number of users.
-
-
If you do not have a license, you can still use AutoCAD Architecture 2016 in trial mode for 30 days. After that, you will need to activate the software with a serial number and a product key. This is where Keygen AutoCAD Architecture 2016 comes in handy. It can generate serial numbers and product keys that can bypass the activation process and make the software think that it is licensed.
-
-
-
How to use Keygen AutoCAD Architecture 2016?
-
-
To use Keygen AutoCAD Architecture 2016, you need to follow these steps:
-
-
-
Download Keygen AutoCAD Architecture 2016 from various sources on the internet. Be careful to choose a reliable and safe source, as some keygens may contain viruses or malware.
-
Install Autodesk Autocad Architecture 2016 with a serial number and a product key that match certain templates. You can find these templates on the internet or use the ones provided by the keygen.
-
Run the keygen as administrator and copy the request code from the activation screen of AutoCAD Architecture 2016.
-
Paste the request code into the keygen and press generate. The keygen will produce an activation code.
-
Copy the activation code and paste it into the activation screen of AutoCAD Architecture 2016. Click next and you should see a message that says "Activation successful".
-
Block the outgoing traffic or disconnect from the internet before activating. This will prevent Autodesk from detecting that you are using a fake license.
-
-
-
Congratulations! You have successfully activated AutoCAD Architecture 2016 with Keygen AutoCAD Architecture 2016.
-
-
What are the advantages and disadvantages of using Keygen AutoCAD Architecture 2016?
-
-
Using Keygen AutoCAD Architecture 2016 has some pros and cons that you should be aware of before deciding to use it.
-
-
The main advantage of using Keygen AutoCAD Architecture 2016 is that it can save you money. You do not have to pay for a license or a subscription to use AutoCAD Architecture 2016. You can enjoy all the features and functions of the software without any limitations or restrictions.
-
-
The main disadvantage of using Keygen AutoCAD Architecture 2016 is that it is illegal and unethical. You are violating the terms and conditions of Autodesk by using a pirated software. You are also depriving Autodesk of their rightful revenue and support. You may face legal consequences if you are caught using Keygen AutoCAD Architecture 2016.
-
-
Another disadvantage of using Keygen AutoCAD Architecture 2016 is that it may not work properly or at all. Some keygens may not generate valid codes or may generate codes that have already been used by someone else. Some keygens may also cause errors or crashes in the software or your system. Some keygens may also contain viruses or malware that can harm your computer or steal your personal information.
-
-
Conclusion
-
-
Keygen AutoCAD Architecture 2016 is a tool that can activate any product of Autodesk 2016, including AutoCAD Architecture 2016. It can generate serial numbers and product keys that can unlock the full features of the software. However, using Keygen AutoCAD Architecture 2016 is illegal and unethical, and it may also pose some risks to your system and security. Therefore, we do not recommend using Keygen AutoCAD Architecture 2016 or any other similar tools. Instead, we suggest that you purchase a legitimate license from Autodesk or authorized resellers.
-
What are the benefits of using AutoCAD Architecture 2016?
-
-
AutoCAD Architecture 2016 is a powerful software that can help you design and document architectural projects. It has many benefits that can improve your workflow and efficiency, such as:
-
-
-
It has a user-friendly interface that allows you to access tools and commands easily.
-
It has a comprehensive library of architectural objects and styles that you can use to create realistic and accurate drawings.
-
It has a dynamic model that updates automatically as you make changes to your design.
-
It has a smart dimensioning system that helps you create accurate and consistent annotations.
-
It has a collaboration feature that enables you to work with other professionals and share your data across platforms.
-
-
-
With AutoCAD Architecture 2016, you can create stunning and professional architectural drawings that meet your standards and specifications.
-
-
What are the risks of using Keygen AutoCAD Architecture 2016?
-
-
While Keygen AutoCAD Architecture 2016 may seem like a convenient and cost-effective way to use AutoCAD Architecture 2016, it also comes with some risks that you should be aware of before using it. Some of these risks are:
-
-
-
You may violate the intellectual property rights of Autodesk and face legal actions or penalties.
-
You may compromise the quality and performance of your software and your system.
-
You may expose your computer and your data to viruses or malware that can damage or steal them.
-
You may lose access to updates, support, and services from Autodesk.
-
You may miss out on new features and improvements that Autodesk releases for their products.
-
-
-
Using Keygen AutoCAD Architecture 2016 may seem tempting, but it is not worth the risk. You may end up losing more than you gain by using a pirated software.
-
-
How to get a legitimate license for AutoCAD Architecture 2016?
-
-
If you want to use AutoCAD Architecture 2016 legally and safely, you need to get a legitimate license from Autodesk or authorized resellers. You can choose from different types of licenses depending on your needs and preferences, such as:
-
-
-
A perpetual license that allows you to use the software indefinitely without paying any additional fees.
-
A subscription license that allows you to use the software for a specified period of time and pay a recurring fee.
-
A single-user license that allows you to use the software on one device only.
-
A multi-user license that allows you to use the software on multiple devices or share it with other users.
-
-
-
To get a license for AutoCAD Architecture 2016, you need to visit the official website of Autodesk or authorized resellers and follow the instructions. You will need to provide some information, such as your name, email address, country, and payment method. You will also need to agree to the terms and conditions of Autodesk. After completing the purchase, you will receive a confirmation email with your serial number and product key. You can then use these codes to activate your software and enjoy its full features and functions.
-
What are the alternatives to using Keygen AutoCAD Architecture 2016?
-
-
If you do not want to use Keygen AutoCAD Architecture 2016 or any other similar tools, you have some alternatives that can help you use AutoCAD Architecture 2016 legally and safely. Some of these alternatives are:
-
-
-
You can use the trial version of AutoCAD Architecture 2016 for 30 days. This will allow you to test the software and see if it meets your needs and expectations. You can download the trial version from the official website of Autodesk.
-
You can use the student version of AutoCAD Architecture 2016 for 3 years. This will allow you to use the software for educational purposes only. You can get the student version from the Autodesk Education Community.
-
You can use the free web and mobile apps of AutoCAD Architecture 2016. These will allow you to view, edit, and share your drawings online or on your mobile devices. You can access the web app from any browser or download the mobile app from the App Store or Google Play.
-
You can use other software that are similar to AutoCAD Architecture 2016. These will allow you to create and document architectural projects with different features and functions. You can find some of these software on the internet or ask for recommendations from other professionals.
-
-
-
These alternatives may not have all the features and functions of AutoCAD Architecture 2016, but they may still help you achieve your goals and objectives.
-
-
How to learn more about AutoCAD Architecture 2016?
-
-
If you want to learn more about AutoCAD Architecture 2016, you have some resources that can help you improve your skills and knowledge. Some of these resources are:
-
-
-
You can read the user guide and the help files of AutoCAD Architecture 2016. These will provide you with detailed information and instructions on how to use the software and its tools and commands.
-
You can watch the tutorials and videos of AutoCAD Architecture 2016. These will show you how to perform various tasks and operations with the software and its features.
-
You can take online courses and training programs on AutoCAD Architecture 2016. These will teach you how to use the software effectively and efficiently for different types of projects and scenarios.
-
You can join online forums and communities of AutoCAD Architecture 2016 users. These will allow you to interact with other professionals and experts who can answer your questions and share their tips and tricks.
-
-
-
These resources may help you learn more about AutoCAD Architecture 2016 and enhance your performance and productivity.
-
Conclusion
-
-
AutoCAD Architecture 2016 is a software that can help you design and document architectural projects. It has many features and functions that can improve your workflow and accuracy. However, to use AutoCAD Architecture 2016, you need to have a valid license that can be purchased from Autodesk or authorized resellers. If you do not have a license, you may be tempted to use Keygen AutoCAD Architecture 2016 or any other similar tools that can activate the software without paying. However, using Keygen AutoCAD Architecture 2016 is illegal and unethical, and it may also pose some risks to your system and security. Therefore, we do not recommend using Keygen AutoCAD Architecture 2016 or any other similar tools. Instead, we suggest that you use some alternatives that can help you use AutoCAD Architecture 2016 legally and safely. We also suggest that you use some resources that can help you learn more about AutoCAD Architecture 2016 and improve your skills and knowledge.
-
-
We hope that this article has provided you with useful information and guidance on Keygen AutoCAD Architecture 2016 and AutoCAD Architecture 2016. If you have any questions or comments, please feel free to contact us or leave a comment below.
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Kunci Jawaban Buku Pr Intan Pariwara Geografi Kelas X Updatedl.md b/spaces/gotiQspiryo/whisper-ui/examples/Kunci Jawaban Buku Pr Intan Pariwara Geografi Kelas X Updatedl.md
deleted file mode 100644
index ae872211bbc9015b675be9c6a4f37fcc4efec43e..0000000000000000000000000000000000000000
--- a/spaces/gotiQspiryo/whisper-ui/examples/Kunci Jawaban Buku Pr Intan Pariwara Geografi Kelas X Updatedl.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
Kunci Jawaban Buku Pr Intan Pariwara Geografi Kelas X | Updatedl
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/innat/HybridModel-GradCAM/models/__init__.py b/spaces/innat/HybridModel-GradCAM/models/__init__.py
deleted file mode 100644
index 8b137891791fe96927ad78e64b0aad7bded08bdc..0000000000000000000000000000000000000000
--- a/spaces/innat/HybridModel-GradCAM/models/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/spaces/innnky/soft-vits-vc/preprocess.py b/spaces/innnky/soft-vits-vc/preprocess.py
deleted file mode 100644
index aaedbf076c30114b3ac6c27dfb42fd54ac81a71c..0000000000000000000000000000000000000000
--- a/spaces/innnky/soft-vits-vc/preprocess.py
+++ /dev/null
@@ -1,25 +0,0 @@
-import argparse
-import text
-from utils import load_filepaths_and_text
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument("--out_extension", default="cleaned")
- parser.add_argument("--text_index", default=1, type=int)
- parser.add_argument("--filelists", nargs="+", default=["filelists/ljs_audio_text_val_filelist.txt", "filelists/ljs_audio_text_test_filelist.txt"])
- parser.add_argument("--text_cleaners", nargs="+", default=["english_cleaners2"])
-
- args = parser.parse_args()
-
-
- for filelist in args.filelists:
- print("START:", filelist)
- filepaths_and_text = load_filepaths_and_text(filelist)
- for i in range(len(filepaths_and_text)):
- original_text = filepaths_and_text[i][args.text_index]
- cleaned_text = text._clean_text(original_text, args.text_cleaners)
- filepaths_and_text[i][args.text_index] = cleaned_text
-
- new_filelist = filelist + "." + args.out_extension
- with open(new_filelist, "w", encoding="utf-8") as f:
- f.writelines(["|".join(x) + "\n" for x in filepaths_and_text])
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Burp Suite Professional 1.7.37 Free Download UPD.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Burp Suite Professional 1.7.37 Free Download UPD.md
deleted file mode 100644
index de588c817cece08262e590b52f563d62aca0d5e2..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Burp Suite Professional 1.7.37 Free Download UPD.md
+++ /dev/null
@@ -1,9 +0,0 @@
-
-
crawler: the crawler is very similar to the intruder in that it is able to navigate web pages, download files, and submit forms. burp also includes a list of over 50,000 common web pages, so your crawl can be targeted. it can also be used to detect attack vectors.
intruder: the intruder is burp’s most advanced spider. this tool is capable of simulating any browser, including the memory of its http requests. the intruder is able to navigate web pages, submit forms, download files, etc. this tool is great for hunting.
-
click on the to use tab to see burp in action as youll see in the screenshot below. this can be used to test any web application where you are wanting to see if there are any security issues with the software. in the screenshot below, i have a few issues in my code. you can click on a number to access the code in that location. theres also a small box to the right of the message that gives a code next to it. this code allows you to view the code for the error. once youre done viewing the code, you can click on the undo button. this will allow you to undo the changes you made. this is a great application to have to test your websites security.
-
the first tab youre going to want to check is the options tab. under the when starting burp section, youre going to want to enter the url that you want to test. if you want to scan a web server thats on the same network as your computer, you would type in then, click on start. once its done, click on the options button and set the port to 8080. click on save to save the changes. then click on the start scan button. this will start the scan and if there are any errors, theres a link to the code for the error. you can click on the link to view the code. the code can also be viewed from the scan tab.
- 899543212b
-
-
\ No newline at end of file
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/CRACKOBSStudio2202FullInstallerx64.md b/spaces/inplisQlawa/anything-midjourney-v4-1/CRACKOBSStudio2202FullInstallerx64.md
deleted file mode 100644
index fd9ff9ea69af428e78b202024528df77ada6d094..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/CRACKOBSStudio2202FullInstallerx64.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-... ca8d075f12 7d1ee9a32a8ebde8805c26d64564d65eba564072 66.33 MiB (69551216 Bytes) ... 15c24738db. CRACK OBS Studio 2202 Full Installer X64. 4d29de3e1b
-
-
-
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Color Schemer Studio V2.0 [h33t] [cepuxxx] Serial Key Keygen.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Color Schemer Studio V2.0 [h33t] [cepuxxx] Serial Key Keygen.md
deleted file mode 100644
index 774a1b46396ce079905b338080fc22beb970dfb9..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Color Schemer Studio V2.0 [h33t] [cepuxxx] Serial Key Keygen.md
+++ /dev/null
@@ -1,80 +0,0 @@
-
-
Color Schemer Studio V2.0 [h33t] [cepuxxx] Serial Key Keygen - The Ultimate Color Tool for Designers
-
-
If you are a web designer, graphic designer, or any other creative professional who works with colors, you know how important it is to have a reliable and easy-to-use color tool. You need a tool that can help you create stunning color palettes, harmonize colors, preview colors on different backgrounds, and generate color codes for various formats. You also need a tool that can help you find and download color schemes from other sources, such as websites, images, or popular color libraries.
-
Color Schemer Studio V2.0 [h33t] [cepuxxx] Serial Key Keygen
That's why you need Color Schemer Studio V2.0 [h33t] [cepuxxx] Serial Key Keygen - the ultimate color tool for designers. Color Schemer Studio V2.0 is a powerful and versatile color software that can help you with all your color needs. Whether you are working on a website, a logo, a flyer, a poster, or any other design project, Color Schemer Studio V2.0 can help you create amazing color schemes in minutes.
-
-
What is Color Schemer Studio V2.0 [h33t] [cepuxxx] Serial Key Keygen?
-
-
Color Schemer Studio V2.0 [h33t] [cepuxxx] Serial Key Keygen is a full-featured color software that allows you to create and edit color schemes, preview colors on different backgrounds, generate color codes for various formats, and much more. It is compatible with Windows XP, Vista, 7, 8, and 10.
-
-
Color Schemer Studio V2.0 has a user-friendly interface that lets you easily access all its features and functions. You can use the built-in color wheel to create harmonious color schemes based on different color models, such as RGB, CMYK, HSB, or Lab. You can also use the eyedropper tool to pick colors from any source on your screen, such as websites, images, or other applications.
-
-
Color Schemer Studio V2.0 also allows you to preview colors on different backgrounds, such as solid colors, gradients, patterns, or images. You can adjust the brightness, contrast, saturation, and hue of any color to fine-tune your color scheme. You can also use the color mixer to blend two or more colors together and create new shades.
-
-
Color Schemer Studio V2.0 can generate color codes for various formats, such as HTML hex codes, RGB values, CMYK values, HSB values, or Lab values. You can copy and paste these codes into your favorite design software or text editor. You can also export your color schemes as Adobe Photoshop palettes (.aco), Adobe Illustrator palettes (.ai), GIMP palettes (.gpl), or Paint Shop Pro palettes (.pal).
-
-
Color Schemer Studio V2.0 also allows you to find and download color schemes from other sources, such as websites, images, or popular color libraries. You can use the web browser feature to browse any website and extract its color scheme. You can also use the image analyzer feature to analyze any image and extract its dominant colors. You can also use the online scheme library feature to access thousands of ready-made color schemes from various categories, such as web design, fashion design, interior design, or nature.
-
-
-
How to Use Color Schemer Studio V2.0 [h33t] [cepuxxx] Serial Key Keygen?
-
-
To use Color Schemer Studio V2.0 [h33t] [cepuxxx] Serial Key Keygen , you need to download and install it on your computer. You can download it from this link: https://bltlly.com/2jvt19 . After downloading it , you need to extract it using WinRAR or any other file compression software . Then , you need to run the setup file and follow the instructions to install it . After installing it , you need to activate it using the serial key keygen that is included in the download . To activate it , you need to run the keygen file and generate a serial key . Then , you need to enter the serial key into the registration window of Color Schemer Studio V2.0 and click OK . After activating it , you can start using it for your design projects .
-
-
To create a color scheme using Color Schemer Studio V2.0 , you need to follow these steps:
-
-
-
Open Color Schemer Studio V2.0 and click on the New Scheme button.
-
Select a base color from the color wheel or use the eyedropper tool to pick a color from any source on your screen.
-
Select a color model from the drop-down menu , such as RGB , CMYK , HSB , or Lab.
-
Select a harmony rule from the drop-down menu , such as complementary , analogous , triadic , tetradic , split complementary , or custom.
-
Adjust the number of colors in your scheme using the slider or the arrows.
-
Adjust the brightness , contrast , saturation , and hue of any color using the sliders or the arrows.
-
Preview your color scheme on different backgrounds using the tabs at the bottom of the window.
-
Generate color codes for your scheme using the tabs at the top of the window.
-
Save , export , or print your scheme using the buttons at the top right corner of the window.
-
-
-
To find and download a color scheme using Color Schemer Studio V2.0 , you need to follow these steps:
-
-
-
Open Color Schemer Studio V2.0 and click on the Find Scheme button.
-
Select a source from which you want to find a scheme , such as website , image , or online scheme library.
-
If you select website , enter the URL of the website and click Go . If you select image , browse your computer and select an image file . If you select online scheme library , browse the categories and select a scheme that you like.
-
Preview the scheme on different backgrounds using the tabs at the bottom of the window.
-
Generate color codes for the scheme using the tabs at the top of the window.
-
Save , export , or print the scheme using the buttons at the top right corner of the window.
-
-
-
Why You Need Color Schemer Studio V2.0 [h33t] [cepuxxx] Serial Key Keygen?
-
-
Color Schemer Studio V2.0 [h33t] [cepuxxx] Serial Key Keygen is a must-have tool for any designer who works with colors . It can help you create beautiful and harmonious color schemes in minutes . It can also help you find inspiration from other sources , such as websites , images , or online libraries . It can also help you generate color codes for various formats that you can use in your design software or text editor . It can also help you preview your colors on different backgrounds to see how they look in different contexts . It can also help you save time and effort by providing you with all these features in one software .
-
-
If you want to take your design skills to the next level , you need Color Schemer Studio V2.0 [h33t] [cepuxxx] Serial Key Keygen . It is a powerful and versatile color software that can help you with all your color needs . Whether you are working on a website , a logo , a flyer , a poster , or any other design project , Color Schemer Studio V2.0 can help you create amazing color schemes in minutes . Download it now and see for yourself how it can improve your design workflow .
-
Where to Download Color Schemer Studio V2.0 [h33t] [cepuxxx] Serial Key Keygen?
-
-
If you are interested in downloading Color Schemer Studio V2.0 [h33t] [cepuxxx] Serial Key Keygen , you can find it on various websites that offer software downloads . However , you need to be careful when downloading software from unknown sources , as they may contain viruses , malware , or spyware that can harm your computer or steal your personal information . Therefore , you need to make sure that you download Color Schemer Studio V2.0 from a trusted and reputable website that provides safe and secure downloads .
-
-
One of the best websites that you can download Color Schemer Studio V2.0 from is Ko-fi . Ko-fi is a platform where creators can get support from fans through donations , memberships , shop sales , and more . It is also a place where you can find and download various software , such as Color Schemer Studio V2.0 . Ko-fi provides high-quality and virus-free downloads that are verified by the creators themselves . You can also support the creators by buying them a coffee or becoming a member of their page .
-
-
To download Color Schemer Studio V2.0 from Ko-fi , you need to follow these steps:
-
-
-
Go to this link: https://ko-fi.com/post/Color-Schemer-Studio-V2-0-h33t-cepuxxx-Serial-D1D5CNOUN
-
Click on the Download button and choose a payment method . You can pay with PayPal , Stripe , or Ko-fi Balance . You can also choose to pay what you want or nothing at all .
-
After completing the payment , you will receive an email with a download link . Click on the link and save the file to your computer .
-
Extract the file using WinRAR or any other file compression software . Then , run the setup file and follow the instructions to install Color Schemer Studio V2.0 .
-
Activate Color Schemer Studio V2.0 using the serial key keygen that is included in the download . To activate it , run the keygen file and generate a serial key . Then , enter the serial key into the registration window of Color Schemer Studio V2.0 and click OK .
-
Enjoy using Color Schemer Studio V2.0 for your design projects .
-
-
-
Conclusion
-
-
Color Schemer Studio V2.0 [h33t] [cepuxxx] Serial Key Keygen is a great color tool for designers who want to create and edit color schemes , preview colors on different backgrounds , generate color codes for various formats , and find and download color schemes from other sources . It is a powerful and versatile color software that can help you with all your color needs . Whether you are working on a website , a logo , a flyer , a poster , or any other design project , Color Schemer Studio V2.0 can help you create amazing color schemes in minutes . You can download it from Ko-fi and support the creator by buying them a coffee or becoming a member of their page . Download it now and see for yourself how it can improve your design workflow .
-
Conclusion
-
-
Color Schemer Studio V2.0 [h33t] [cepuxxx] Serial Key Keygen is a great color tool for designers who want to create and edit color schemes , preview colors on different backgrounds , generate color codes for various formats , and find and download color schemes from other sources . It is a powerful and versatile color software that can help you with all your color needs . Whether you are working on a website , a logo , a flyer , a poster , or any other design project , Color Schemer Studio V2.0 can help you create amazing color schemes in minutes . You can download it from Ko-fi and support the creator by buying them a coffee or becoming a member of their page . Download it now and see for yourself how it can improve your design workflow .
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/jackyccl/segment-anything/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.h b/spaces/jackyccl/segment-anything/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.h
deleted file mode 100644
index b2b88e8c46f19b6db0933163e57ccdb51180f517..0000000000000000000000000000000000000000
--- a/spaces/jackyccl/segment-anything/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*!
-**************************************************************************************************
-* Deformable DETR
-* Copyright (c) 2020 SenseTime. All Rights Reserved.
-* Licensed under the Apache License, Version 2.0 [see LICENSE for details]
-**************************************************************************************************
-* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
-**************************************************************************************************
-*/
-
-#pragma once
-#include
-
-namespace groundingdino {
-
-at::Tensor
-ms_deform_attn_cpu_forward(
- const at::Tensor &value,
- const at::Tensor &spatial_shapes,
- const at::Tensor &level_start_index,
- const at::Tensor &sampling_loc,
- const at::Tensor &attn_weight,
- const int im2col_step);
-
-std::vector
-ms_deform_attn_cpu_backward(
- const at::Tensor &value,
- const at::Tensor &spatial_shapes,
- const at::Tensor &level_start_index,
- const at::Tensor &sampling_loc,
- const at::Tensor &attn_weight,
- const at::Tensor &grad_output,
- const int im2col_step);
-
-} // namespace groundingdino
diff --git a/spaces/jackyliang42/code-as-policies/README.md b/spaces/jackyliang42/code-as-policies/README.md
deleted file mode 100644
index 0b2df1b05ed8c4e6c3a37924b5800d8d670972d0..0000000000000000000000000000000000000000
--- a/spaces/jackyliang42/code-as-policies/README.md
+++ /dev/null
@@ -1,53 +0,0 @@
----
-title: Code As Policies
-emoji: 📈
-colorFrom: purple
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.3.1
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-# Code as Policies Tabletop Manipulation Interactive Demo
-
-This demo is from the paper:
-
-[Code as Policies: Language Model Programs for Embodied Control](https://code-as-policies.github.io/)
-
-Below is an interactive demo for the simulated tabletop manipulation domain, seen in the paper section IV.D
-
-## Preparations
-1. Obtain an [OpenAI API Key](https://openai.com/blog/openai-api/)
-
-## Usage
-1. Fill in the API Key and how many blocks and bowls to be spawned in the environment.
-2. Click Setup/Reset Simulation
-3. Based on the new randomly sampled object names, input an instruction and click Run Instruction. If successful, this will render a video and update the simulation environment visualization.
-
-You can run instructions in sequence and refer back to previous instructions (e.g. do the same with other blocks, move the same block to the other bowl, etc). To reset, click Setup/Reset Env, and this will clear the current instruction history.
-
-## Supported Instructions
-* Spatial reasoning (e.g. to the left of the red block, the closest corner, the farthest bowl, the second block from the right)
-* Sequential actions (e.g. put blocks in matching bowls, stack blocks on the bottom right corner)
-* Contextual instructions (e.g. do the same with the blue block, undo that)
-* Language-based reasoning (e.g. put the forest-colored block on the ocean-colored bowl).
-* Simple Q&A (e.g. how many blocks are to the left of the blue bowl?)
-
-## Example Instructions
-Note object names may need to be changed depending the sampled object names.
-* put the sun-colored block on the bowl closest to it
-* stack the blocks on the bottom most bowl
-* arrange the blocks as a square in the middle
-* move the square 5cm to the right
-* how many blocks are to the right of the orange bowl?
-* pick up the block closest to the top left corner and place it on the bottom right corner
-
-## Known Limitations
-* In simulation we're using ground truth object poses instead of using vision models. This means that instructions the require knowledge of visual apperances (e.g. darkest bowl, largest object, empty bowls) are not supported.
-* Currently, the low-level pick place primitive does not do collision checking, so if there are many objects on the table, placing actions may incur collisions.
-* The pick place primitive is also unable to pick up bowls.
-* Prompt saturation - if too many instructions (10+) are executed in a row, then the LLM may start to ignore examples in the early parts of the prompt.
-* Ambiguous instructions - if a given instruction doesn't lead to the desired actions, try rephrasing it to remove ambiguities (e.g. place the block on the closest bowl -> place the block on its closest bowl)
-* Maximum token length - you may hit the maximum token length if running multiple commands in sequence. Please reset the simulation when this happens.
diff --git a/spaces/james-oldfield/PandA/networks/stylegan3/torch_utils/ops/filtered_lrelu.cpp b/spaces/james-oldfield/PandA/networks/stylegan3/torch_utils/ops/filtered_lrelu.cpp
deleted file mode 100644
index ff4149b8b46b54d2f400ae10e44d19f20503ba1f..0000000000000000000000000000000000000000
--- a/spaces/james-oldfield/PandA/networks/stylegan3/torch_utils/ops/filtered_lrelu.cpp
+++ /dev/null
@@ -1,300 +0,0 @@
-// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-//
-// NVIDIA CORPORATION and its licensors retain all intellectual property
-// and proprietary rights in and to this software, related documentation
-// and any modifications thereto. Any use, reproduction, disclosure or
-// distribution of this software and related documentation without an express
-// license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-#include
-#include
-#include
-#include "filtered_lrelu.h"
-
-//------------------------------------------------------------------------
-
-static std::tuple filtered_lrelu(
- torch::Tensor x, torch::Tensor fu, torch::Tensor fd, torch::Tensor b, torch::Tensor si,
- int up, int down, int px0, int px1, int py0, int py1, int sx, int sy, float gain, float slope, float clamp, bool flip_filters, bool writeSigns)
-{
- // Set CUDA device.
- TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device");
- const at::cuda::OptionalCUDAGuard device_guard(device_of(x));
-
- // Validate arguments.
- TORCH_CHECK(fu.device() == x.device() && fd.device() == x.device() && b.device() == x.device(), "all input tensors must reside on the same device");
- TORCH_CHECK(fu.dtype() == torch::kFloat && fd.dtype() == torch::kFloat, "fu and fd must be float32");
- TORCH_CHECK(b.dtype() == x.dtype(), "x and b must have the same dtype");
- TORCH_CHECK(x.dtype() == torch::kHalf || x.dtype() == torch::kFloat, "x and b must be float16 or float32");
- TORCH_CHECK(x.dim() == 4, "x must be rank 4");
- TORCH_CHECK(x.size(0) * x.size(1) <= INT_MAX && x.size(2) <= INT_MAX && x.size(3) <= INT_MAX, "x is too large");
- TORCH_CHECK(x.numel() > 0, "x is empty");
- TORCH_CHECK((fu.dim() == 1 || fu.dim() == 2) && (fd.dim() == 1 || fd.dim() == 2), "fu and fd must be rank 1 or 2");
- TORCH_CHECK(fu.size(0) <= INT_MAX && fu.size(-1) <= INT_MAX, "fu is too large");
- TORCH_CHECK(fd.size(0) <= INT_MAX && fd.size(-1) <= INT_MAX, "fd is too large");
- TORCH_CHECK(fu.numel() > 0, "fu is empty");
- TORCH_CHECK(fd.numel() > 0, "fd is empty");
- TORCH_CHECK(b.dim() == 1 && b.size(0) == x.size(1), "b must be a vector with the same number of channels as x");
- TORCH_CHECK(up >= 1 && down >= 1, "up and down must be at least 1");
-
- // Figure out how much shared memory is available on the device.
- int maxSharedBytes = 0;
- AT_CUDA_CHECK(cudaDeviceGetAttribute(&maxSharedBytes, cudaDevAttrMaxSharedMemoryPerBlockOptin, x.device().index()));
- int sharedKB = maxSharedBytes >> 10;
-
- // Populate enough launch parameters to check if a CUDA kernel exists.
- filtered_lrelu_kernel_params p;
- p.up = up;
- p.down = down;
- p.fuShape = make_int2((int)fu.size(-1), fu.dim() == 2 ? (int)fu.size(0) : 0); // shape [n, 0] indicates separable filter.
- p.fdShape = make_int2((int)fd.size(-1), fd.dim() == 2 ? (int)fd.size(0) : 0);
- filtered_lrelu_kernel_spec test_spec = choose_filtered_lrelu_kernel(p, sharedKB);
- if (!test_spec.exec)
- {
- // No kernel found - return empty tensors and indicate missing kernel with return code of -1.
- return std::make_tuple(torch::Tensor(), torch::Tensor(), -1);
- }
-
- // Input/output element size.
- int64_t sz = (x.dtype() == torch::kHalf) ? 2 : 4;
-
- // Input sizes.
- int64_t xw = (int)x.size(3);
- int64_t xh = (int)x.size(2);
- int64_t fut_w = (int)fu.size(-1) - 1;
- int64_t fut_h = (int)fu.size(0) - 1;
- int64_t fdt_w = (int)fd.size(-1) - 1;
- int64_t fdt_h = (int)fd.size(0) - 1;
-
- // Logical size of upsampled buffer.
- int64_t cw = xw * up + (px0 + px1) - fut_w;
- int64_t ch = xh * up + (py0 + py1) - fut_h;
- TORCH_CHECK(cw > fdt_w && ch > fdt_h, "upsampled buffer must be at least the size of downsampling filter");
- TORCH_CHECK(cw <= INT_MAX && ch <= INT_MAX, "upsampled buffer is too large");
-
- // Compute output size and allocate.
- int64_t yw = (cw - fdt_w + (down - 1)) / down;
- int64_t yh = (ch - fdt_h + (down - 1)) / down;
- TORCH_CHECK(yw > 0 && yh > 0, "output must be at least 1x1");
- TORCH_CHECK(yw <= INT_MAX && yh <= INT_MAX, "output is too large");
- torch::Tensor y = torch::empty({x.size(0), x.size(1), yh, yw}, x.options(), x.suggest_memory_format());
-
- // Allocate sign tensor.
- torch::Tensor so;
- torch::Tensor s = si;
- bool readSigns = !!s.numel();
- int64_t sw_active = 0; // Active width of sign tensor.
- if (writeSigns)
- {
- sw_active = yw * down - (down - 1) + fdt_w; // Active width in elements.
- int64_t sh = yh * down - (down - 1) + fdt_h; // Height = active height.
- int64_t sw = (sw_active + 15) & ~15; // Width = active width in elements, rounded up to multiple of 16.
- TORCH_CHECK(sh <= INT_MAX && (sw >> 2) <= INT_MAX, "signs is too large");
- s = so = torch::empty({x.size(0), x.size(1), sh, sw >> 2}, x.options().dtype(torch::kUInt8), at::MemoryFormat::Contiguous);
- }
- else if (readSigns)
- sw_active = s.size(3) << 2;
-
- // Validate sign tensor if in use.
- if (readSigns || writeSigns)
- {
- TORCH_CHECK(s.is_contiguous(), "signs must be contiguous");
- TORCH_CHECK(s.dtype() == torch::kUInt8, "signs must be uint8");
- TORCH_CHECK(s.device() == x.device(), "signs must reside on the same device as x");
- TORCH_CHECK(s.dim() == 4, "signs must be rank 4");
- TORCH_CHECK(s.size(0) == x.size(0) && s.size(1) == x.size(1), "signs must have same batch & channels as x");
- TORCH_CHECK(s.size(2) <= INT_MAX && s.size(3) <= INT_MAX, "signs is too large");
- }
-
- // Populate rest of CUDA kernel parameters.
- p.x = x.data_ptr();
- p.y = y.data_ptr();
- p.b = b.data_ptr();
- p.s = (readSigns || writeSigns) ? s.data_ptr() : 0;
- p.fu = fu.data_ptr();
- p.fd = fd.data_ptr();
- p.pad0 = make_int2(px0, py0);
- p.gain = gain;
- p.slope = slope;
- p.clamp = clamp;
- p.flip = (flip_filters) ? 1 : 0;
- p.xShape = make_int4((int)x.size(3), (int)x.size(2), (int)x.size(1), (int)x.size(0));
- p.yShape = make_int4((int)y.size(3), (int)y.size(2), (int)y.size(1), (int)y.size(0));
- p.sShape = (readSigns || writeSigns) ? make_int2((int)s.size(3), (int)s.size(2)) : make_int2(0, 0); // Width is in bytes. Contiguous.
- p.sOfs = make_int2(sx, sy);
- p.swLimit = (sw_active + 3) >> 2; // Rounded up to bytes.
-
- // x, y, b strides are in bytes.
- p.xStride = make_longlong4(sz * x.stride(3), sz * x.stride(2), sz * x.stride(1), sz * x.stride(0));
- p.yStride = make_longlong4(sz * y.stride(3), sz * y.stride(2), sz * y.stride(1), sz * y.stride(0));
- p.bStride = sz * b.stride(0);
-
- // fu, fd strides are in elements.
- p.fuStride = make_longlong3(fu.stride(-1), fu.dim() == 2 ? fu.stride(0) : 0, 0);
- p.fdStride = make_longlong3(fd.stride(-1), fd.dim() == 2 ? fd.stride(0) : 0, 0);
-
- // Determine if indices don't fit in int32. Support negative strides although Torch currently never produces those.
- bool index64b = false;
- if (std::abs(p.bStride * x.size(1)) > INT_MAX) index64b = true;
- if (std::min(x.size(0) * p.xStride.w, 0ll) + std::min(x.size(1) * p.xStride.z, 0ll) + std::min(x.size(2) * p.xStride.y, 0ll) + std::min(x.size(3) * p.xStride.x, 0ll) < -INT_MAX) index64b = true;
- if (std::max(x.size(0) * p.xStride.w, 0ll) + std::max(x.size(1) * p.xStride.z, 0ll) + std::max(x.size(2) * p.xStride.y, 0ll) + std::max(x.size(3) * p.xStride.x, 0ll) > INT_MAX) index64b = true;
- if (std::min(y.size(0) * p.yStride.w, 0ll) + std::min(y.size(1) * p.yStride.z, 0ll) + std::min(y.size(2) * p.yStride.y, 0ll) + std::min(y.size(3) * p.yStride.x, 0ll) < -INT_MAX) index64b = true;
- if (std::max(y.size(0) * p.yStride.w, 0ll) + std::max(y.size(1) * p.yStride.z, 0ll) + std::max(y.size(2) * p.yStride.y, 0ll) + std::max(y.size(3) * p.yStride.x, 0ll) > INT_MAX) index64b = true;
- if (s.numel() > INT_MAX) index64b = true;
-
- // Choose CUDA kernel.
- filtered_lrelu_kernel_spec spec = { 0 };
- AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "filtered_lrelu_cuda", [&]
- {
- if constexpr (sizeof(scalar_t) <= 4) // Exclude doubles. constexpr prevents template instantiation.
- {
- // Choose kernel based on index type, datatype and sign read/write modes.
- if (!index64b && writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB);
- else if (!index64b && !writeSigns && readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB);
- else if (!index64b && !writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB);
- else if ( index64b && writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB);
- else if ( index64b && !writeSigns && readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB);
- else if ( index64b && !writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB);
- }
- });
- TORCH_CHECK(spec.exec, "internal error - CUDA kernel not found") // This should not happen because we tested earlier that kernel exists.
-
- // Launch CUDA kernel.
- void* args[] = {&p};
- int bx = spec.numWarps * 32;
- int gx = (p.yShape.x - 1) / spec.tileOut.x + 1;
- int gy = (p.yShape.y - 1) / spec.tileOut.y + 1;
- int gz = p.yShape.z * p.yShape.w;
-
- // Repeat multiple horizontal tiles in a CTA?
- if (spec.xrep)
- {
- p.tilesXrep = spec.xrep;
- p.tilesXdim = gx;
-
- gx = (gx + p.tilesXrep - 1) / p.tilesXrep;
- std::swap(gx, gy);
- }
- else
- {
- p.tilesXrep = 0;
- p.tilesXdim = 0;
- }
-
- // Launch filter setup kernel.
- AT_CUDA_CHECK(cudaLaunchKernel(spec.setup, 1, 1024, args, 0, at::cuda::getCurrentCUDAStream()));
-
- // Copy kernels to constant memory.
- if ( writeSigns && !readSigns) AT_CUDA_CHECK((copy_filters(at::cuda::getCurrentCUDAStream())));
- else if (!writeSigns && readSigns) AT_CUDA_CHECK((copy_filters(at::cuda::getCurrentCUDAStream())));
- else if (!writeSigns && !readSigns) AT_CUDA_CHECK((copy_filters(at::cuda::getCurrentCUDAStream())));
-
- // Set cache and shared memory configurations for main kernel.
- AT_CUDA_CHECK(cudaFuncSetCacheConfig(spec.exec, cudaFuncCachePreferShared));
- if (spec.dynamicSharedKB) // Need dynamically allocated shared memory?
- AT_CUDA_CHECK(cudaFuncSetAttribute(spec.exec, cudaFuncAttributeMaxDynamicSharedMemorySize, spec.dynamicSharedKB << 10));
- AT_CUDA_CHECK(cudaFuncSetSharedMemConfig(spec.exec, cudaSharedMemBankSizeFourByte));
-
- // Launch main kernel.
- const int maxSubGz = 65535; // CUDA maximum for block z dimension.
- for (int zofs=0; zofs < gz; zofs += maxSubGz) // Do multiple launches if gz is too big.
- {
- p.blockZofs = zofs;
- int subGz = std::min(maxSubGz, gz - zofs);
- AT_CUDA_CHECK(cudaLaunchKernel(spec.exec, dim3(gx, gy, subGz), bx, args, spec.dynamicSharedKB << 10, at::cuda::getCurrentCUDAStream()));
- }
-
- // Done.
- return std::make_tuple(y, so, 0);
-}
-
-//------------------------------------------------------------------------
-
-static torch::Tensor filtered_lrelu_act(torch::Tensor x, torch::Tensor si, int sx, int sy, float gain, float slope, float clamp, bool writeSigns)
-{
- // Set CUDA device.
- TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device");
- const at::cuda::OptionalCUDAGuard device_guard(device_of(x));
-
- // Validate arguments.
- TORCH_CHECK(x.dim() == 4, "x must be rank 4");
- TORCH_CHECK(x.size(0) * x.size(1) <= INT_MAX && x.size(2) <= INT_MAX && x.size(3) <= INT_MAX, "x is too large");
- TORCH_CHECK(x.numel() > 0, "x is empty");
- TORCH_CHECK(x.dtype() == torch::kHalf || x.dtype() == torch::kFloat || x.dtype() == torch::kDouble, "x must be float16, float32 or float64");
-
- // Output signs if we don't have sign input.
- torch::Tensor so;
- torch::Tensor s = si;
- bool readSigns = !!s.numel();
- if (writeSigns)
- {
- int64_t sw = x.size(3);
- sw = (sw + 15) & ~15; // Round to a multiple of 16 for coalescing.
- s = so = torch::empty({x.size(0), x.size(1), x.size(2), sw >> 2}, x.options().dtype(torch::kUInt8), at::MemoryFormat::Contiguous);
- }
-
- // Validate sign tensor if in use.
- if (readSigns || writeSigns)
- {
- TORCH_CHECK(s.is_contiguous(), "signs must be contiguous");
- TORCH_CHECK(s.dtype() == torch::kUInt8, "signs must be uint8");
- TORCH_CHECK(s.device() == x.device(), "signs must reside on the same device as x");
- TORCH_CHECK(s.dim() == 4, "signs must be rank 4");
- TORCH_CHECK(s.size(0) == x.size(0) && s.size(1) == x.size(1), "signs must have same batch & channels as x");
- TORCH_CHECK(s.size(2) <= INT_MAX && (s.size(3) << 2) <= INT_MAX, "signs tensor is too large");
- }
-
- // Initialize CUDA kernel parameters.
- filtered_lrelu_act_kernel_params p;
- p.x = x.data_ptr();
- p.s = (readSigns || writeSigns) ? s.data_ptr() : 0;
- p.gain = gain;
- p.slope = slope;
- p.clamp = clamp;
- p.xShape = make_int4((int)x.size(3), (int)x.size(2), (int)x.size(1), (int)x.size(0));
- p.xStride = make_longlong4(x.stride(3), x.stride(2), x.stride(1), x.stride(0));
- p.sShape = (readSigns || writeSigns) ? make_int2((int)s.size(3) << 2, (int)s.size(2)) : make_int2(0, 0); // Width is in elements. Contiguous.
- p.sOfs = make_int2(sx, sy);
-
- // Choose CUDA kernel.
- void* func = 0;
- AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "filtered_lrelu_act_cuda", [&]
- {
- if (writeSigns)
- func = choose_filtered_lrelu_act_kernel();
- else if (readSigns)
- func = choose_filtered_lrelu_act_kernel();
- else
- func = choose_filtered_lrelu_act_kernel();
- });
- TORCH_CHECK(func, "internal error - CUDA kernel not found");
-
- // Launch CUDA kernel.
- void* args[] = {&p};
- int bx = 128; // 4 warps per block.
-
- // Logical size of launch = writeSigns ? p.s : p.x
- uint32_t gx = writeSigns ? p.sShape.x : p.xShape.x;
- uint32_t gy = writeSigns ? p.sShape.y : p.xShape.y;
- uint32_t gz = p.xShape.z * p.xShape.w; // Same as in p.sShape if signs are in use.
- gx = (gx - 1) / bx + 1;
-
- // Make sure grid y and z dimensions are within CUDA launch limits. Kernel loops internally to do the rest.
- const uint32_t gmax = 65535;
- gy = std::min(gy, gmax);
- gz = std::min(gz, gmax);
-
- // Launch.
- AT_CUDA_CHECK(cudaLaunchKernel(func, dim3(gx, gy, gz), bx, args, 0, at::cuda::getCurrentCUDAStream()));
- return so;
-}
-
-//------------------------------------------------------------------------
-
-PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
-{
- m.def("filtered_lrelu", &filtered_lrelu); // The whole thing.
- m.def("filtered_lrelu_act_", &filtered_lrelu_act); // Activation and sign tensor handling only. Modifies data tensor in-place.
-}
-
-//------------------------------------------------------------------------
diff --git a/spaces/jatin-tech/SkinZen/Dockerfile b/spaces/jatin-tech/SkinZen/Dockerfile
deleted file mode 100644
index 8a4d3bc8fa468aae2aec01c391b2410658c97d6a..0000000000000000000000000000000000000000
--- a/spaces/jatin-tech/SkinZen/Dockerfile
+++ /dev/null
@@ -1,32 +0,0 @@
-FROM python:3.9
-
-WORKDIR /code
-
-COPY ./requirements.txt /code/requirements.txt
-
-RUN apt-get update && apt-get upgrade -y
-
-RUN apt-get install -y software-properties-common
-RUN apt-get install -y build-essential cmake pkg-config \
- && apt-get install -y libx11-dev libatlas-base-dev \
- && apt-get install -y libgtk-3-dev libboost-python-dev
-
-RUN pip install --no-cache-dir -r /code/requirements.txt
-
-# Set up a new user named "user" with user ID 1000
-RUN useradd -m -u 1000 user
-
-# Switch to the "user" user
-USER user
-
-# Set home to the user's home directory
-ENV HOME=/home/user \
- PATH=/home/user/.local/bin:$PATH
-
-# Set the working directory to the user's home directory
-WORKDIR $HOME/app
-
-# Copy the current directory contents into the container at $HOME/app setting the owner to the user
-COPY --chown=user . $HOME/app
-
-CMD ["python", "main.py"]
\ No newline at end of file
diff --git a/spaces/jbilcke-hf/VideoQuest/src/app/interface/renderer/full-screen-button.tsx b/spaces/jbilcke-hf/VideoQuest/src/app/interface/renderer/full-screen-button.tsx
deleted file mode 100644
index 2daa33beae917eb150252a10d90f11f9ec5be9c3..0000000000000000000000000000000000000000
--- a/spaces/jbilcke-hf/VideoQuest/src/app/interface/renderer/full-screen-button.tsx
+++ /dev/null
@@ -1,18 +0,0 @@
-import { cn } from "@/lib/utils"
-import { FullScreenIcon } from "../../../components/icons/full-screen"
-
-export function FullScreenButton() {
- return (
-
-
-
-
-
- )
-}
\ No newline at end of file
diff --git a/spaces/jeonchangbin49/De-limiter/test_ddp.py b/spaces/jeonchangbin49/De-limiter/test_ddp.py
deleted file mode 100644
index 9e4db2c6799879bfba93e8f62c61d3150f6b8498..0000000000000000000000000000000000000000
--- a/spaces/jeonchangbin49/De-limiter/test_ddp.py
+++ /dev/null
@@ -1,245 +0,0 @@
-# To be honest... this is not ddp.
-import os
-import json
-import argparse
-import glob
-
-import torch
-import tqdm
-import musdb
-import librosa
-import soundfile as sf
-import pyloudnorm as pyln
-from dotmap import DotMap
-
-from models import load_model_with_args
-from separate_func import (
- conv_tasnet_separate,
-)
-from utils import str2bool, db2linear
-
-
-tqdm.monitor_interval = 0
-
-
-def separate_track_with_model(
- args, model, device, track_audio, track_name, meter, augmented_gain
-):
- with torch.no_grad():
- if (
- args.model_loss_params.architecture == "conv_tasnet_mask_on_output"
- or args.model_loss_params.architecture == "conv_tasnet"
- ):
- estimates = conv_tasnet_separate(
- args,
- model,
- device,
- track_audio,
- track_name,
- meter=meter,
- augmented_gain=augmented_gain,
- )
-
- return estimates
-
-
-def main():
- parser = argparse.ArgumentParser(description="model test.py")
-
- parser.add_argument("--target", type=str, default="all")
- parser.add_argument("--data_root", type=str, default="/path/to/musdb_XL")
- parser.add_argument(
- "--use_musdb",
- type=str2bool,
- default=True,
- help="Use musdb test data or just want to inference other samples?",
- )
- parser.add_argument("--exp_name", type=str, default="delimit_6_s')
- parser.add_argument("--manual_output_name", type=str, default=None)
- parser.add_argument(
- "--output_directory", type=str, default="/path/to/results"
- )
- parser.add_argument("--use_gpu", type=str2bool, default=True)
- parser.add_arugment("--save_name_as_target", type=str2bool, default=True)
- parser.add_argument(
- "--loudnorm_input_lufs",
- type=float,
- default=None,
- help="If you want to use loudnorm, input target lufs",
- )
- parser.add_argument(
- "--use_singletrackset",
- type=str2bool,
- default=False,
- help="Use SingleTrackSet for X-UMX",
- )
- parser.add_argument(
- "--best_model",
- type=str2bool,
- default=True,
- help="Use best model or lastly saved model",
- )
- parser.add_argument(
- "--save_output_loudnorm",
- type=float,
- default=None,
- help="Save loudness normalized outputs or not. If you want to save, input target loudness",
- )
- parser.add_argument(
- "--save_mixed_output",
- type=float,
- default=None,
- help="Save original+delimited-estimation mixed output with a ratio of default 0.5 (orginal) and 1 - 0.5 (estimation)",
- )
- parser.add_argument(
- "--save_16k_mono",
- type=str2bool,
- default=False,
- help="Save 16k mono wav files for FAD evaluation.",
- )
- parser.add_argument(
- "--save_histogram",
- type=str2bool,
- default=False,
- help="Save histogram of the output. Only valid when the task is 'delimit'",
- )
-
- args, _ = parser.parse_known_args()
-
- args.output_dir = f"{args.output_directory}/checkpoint/{args.exp_name}"
- with open(f"{args.output_dir}/{args.target}.json", "r") as f:
- args_dict = json.load(f)
- args_dict = DotMap(args_dict)
-
- for key, value in args_dict["args"].items():
- if key in list(vars(args).keys()):
- pass
- else:
- setattr(args, key, value)
-
- args.test_output_dir = f"{args.output_directory}/test/{args.exp_name}"
-
- if args.manual_output_name != None:
- args.test_output_dir = f"{args.output_directory}/test/{args.manual_output_name}"
- os.makedirs(args.test_output_dir, exist_ok=True)
-
- device = torch.device(
- "cuda" if torch.cuda.is_available() and args.use_gpu else "cpu"
- )
-
- ###################### Define Models ######################
- our_model = load_model_with_args(args)
- our_model = our_model.to(device)
- print(our_model)
- pytorch_total_params = sum(
- p.numel() for p in our_model.parameters() if p.requires_grad
- )
- print("Total number of parameters", pytorch_total_params)
- # Future work => Torchinfo would be better for this purpose.
-
- if args.best_model:
- target_model_path = f"{args.output_dir}/{args.target}.pth"
- checkpoint = torch.load(target_model_path, map_location=device)
- our_model.load_state_dict(checkpoint)
- else: # when using lastly saved model
- target_model_path = f"{args.output_dir}/{args.target}.chkpnt"
- checkpoint = torch.load(target_model_path, map_location=device)
- our_model.load_state_dict(checkpoint["state_dict"])
-
- our_model.eval()
-
- meter = pyln.Meter(44100)
-
- if args.use_musdb:
- test_tracks = musdb.DB(root=args.data_root, subsets="test", is_wav=True)
-
- for track in tqdm.tqdm(test_tracks):
- track_name = track.name
- track_audio = track.audio
-
- orig_audio = track_audio.copy()
-
- augmented_gain = None
- print("Now De-limiting : ", track_name)
-
- if args.loudnorm_input_lufs: # If you want to use loud-normalized input
- track_lufs = meter.integrated_loudness(track_audio)
- augmented_gain = args.loudnorm_input_lufs - track_lufs
- track_audio = track_audio * db2linear(augmented_gain, eps=0.0)
-
- track_audio = (
- torch.as_tensor(track_audio.T, dtype=torch.float32)
- .unsqueeze(0)
- .to(device)
- )
-
- estimates = separate_track_with_model(
- args, our_model, device, track_audio, track_name, meter, augmented_gain
- )
-
- if args.save_mixed_output:
- orig_audio = orig_audio.T
- track_lufs = meter.integrated_loudness(orig_audio.T)
- augmented_gain = args.save_output_loudnorm - track_lufs
- orig_audio = orig_audio * db2linear(augmented_gain, eps=0.0)
-
- mixed_output = orig_audio * args.save_mixed_output + estimates * (
- 1 - args.save_mixed_output
- )
-
- sf.write(
- f"{args.test_output_dir}/{track_name}/{str(args.save_mixed_output)}_mixed.wav",
- mixed_output.T,
- args.data_params.sample_rate,
- )
- else:
- test_tracks = glob.glob(f"{args.data_root}/*.wav") + glob.glob(
- f"{args.data_root}/*.mp3"
- )
-
- for track in tqdm.tqdm(test_tracks):
- track_name = os.path.basename(track).replace(".wav", "").replace(".mp3", "")
- track_audio, sr = librosa.load(
- track, sr=None, mono=False
- ) # sr should be 44100
-
- orig_audio = track_audio.copy()
-
- if sr != 44100:
- raise ValueError("Sample rate should be 44100")
- augmented_gain = None
- print("Now De-limiting : ", track_name)
-
- if args.loudnorm_input_lufs: # If you want to use loud-normalized input
- track_lufs = meter.integrated_loudness(track_audio.T)
- augmented_gain = args.loudnorm_input_lufs - track_lufs
- track_audio = track_audio * db2linear(augmented_gain, eps=0.0)
-
- track_audio = (
- torch.as_tensor(track_audio, dtype=torch.float32)
- .unsqueeze(0)
- .to(device)
- )
-
- estimates = separate_track_with_model(
- args, our_model, device, track_audio, track_name, meter, augmented_gain
- )
-
- if args.save_mixed_output:
- track_lufs = meter.integrated_loudness(orig_audio.T)
- augmented_gain = args.save_output_loudnorm - track_lufs
- orig_audio = orig_audio * db2linear(augmented_gain, eps=0.0)
-
- mixed_output = orig_audio * args.save_mixed_output + estimates * (
- 1 - args.save_mixed_output
- )
-
- sf.write(
- f"{args.test_output_dir}/{track_name}/{track_name}_mixed.wav",
- mixed_output.T,
- args.data_params.sample_rate,
- )
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/jgurzoni/image_background_swapper/models/ade20k/segm_lib/nn/modules/tests/test_sync_batchnorm.py b/spaces/jgurzoni/image_background_swapper/models/ade20k/segm_lib/nn/modules/tests/test_sync_batchnorm.py
deleted file mode 100644
index 45bb3c8cfd36d8f668e6fde756b17587eab72082..0000000000000000000000000000000000000000
--- a/spaces/jgurzoni/image_background_swapper/models/ade20k/segm_lib/nn/modules/tests/test_sync_batchnorm.py
+++ /dev/null
@@ -1,111 +0,0 @@
-# -*- coding: utf-8 -*-
-# File : test_sync_batchnorm.py
-# Author : Jiayuan Mao
-# Email : maojiayuan@gmail.com
-# Date : 27/01/2018
-#
-# This file is part of Synchronized-BatchNorm-PyTorch.
-
-import unittest
-
-import torch
-import torch.nn as nn
-from torch.autograd import Variable
-
-from sync_batchnorm import SynchronizedBatchNorm1d, SynchronizedBatchNorm2d, DataParallelWithCallback
-from sync_batchnorm.unittest import TorchTestCase
-
-
-def handy_var(a, unbias=True):
- n = a.size(0)
- asum = a.sum(dim=0)
- as_sum = (a ** 2).sum(dim=0) # a square sum
- sumvar = as_sum - asum * asum / n
- if unbias:
- return sumvar / (n - 1)
- else:
- return sumvar / n
-
-
-def _find_bn(module):
- for m in module.modules():
- if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, SynchronizedBatchNorm1d, SynchronizedBatchNorm2d)):
- return m
-
-
-class SyncTestCase(TorchTestCase):
- def _syncParameters(self, bn1, bn2):
- bn1.reset_parameters()
- bn2.reset_parameters()
- if bn1.affine and bn2.affine:
- bn2.weight.data.copy_(bn1.weight.data)
- bn2.bias.data.copy_(bn1.bias.data)
-
- def _checkBatchNormResult(self, bn1, bn2, input, is_train, cuda=False):
- """Check the forward and backward for the customized batch normalization."""
- bn1.train(mode=is_train)
- bn2.train(mode=is_train)
-
- if cuda:
- input = input.cuda()
-
- self._syncParameters(_find_bn(bn1), _find_bn(bn2))
-
- input1 = Variable(input, requires_grad=True)
- output1 = bn1(input1)
- output1.sum().backward()
- input2 = Variable(input, requires_grad=True)
- output2 = bn2(input2)
- output2.sum().backward()
-
- self.assertTensorClose(input1.data, input2.data)
- self.assertTensorClose(output1.data, output2.data)
- self.assertTensorClose(input1.grad, input2.grad)
- self.assertTensorClose(_find_bn(bn1).running_mean, _find_bn(bn2).running_mean)
- self.assertTensorClose(_find_bn(bn1).running_var, _find_bn(bn2).running_var)
-
- def testSyncBatchNormNormalTrain(self):
- bn = nn.BatchNorm1d(10)
- sync_bn = SynchronizedBatchNorm1d(10)
-
- self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10), True)
-
- def testSyncBatchNormNormalEval(self):
- bn = nn.BatchNorm1d(10)
- sync_bn = SynchronizedBatchNorm1d(10)
-
- self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10), False)
-
- def testSyncBatchNormSyncTrain(self):
- bn = nn.BatchNorm1d(10, eps=1e-5, affine=False)
- sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
- sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
-
- bn.cuda()
- sync_bn.cuda()
-
- self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10), True, cuda=True)
-
- def testSyncBatchNormSyncEval(self):
- bn = nn.BatchNorm1d(10, eps=1e-5, affine=False)
- sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
- sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
-
- bn.cuda()
- sync_bn.cuda()
-
- self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10), False, cuda=True)
-
- def testSyncBatchNorm2DSyncTrain(self):
- bn = nn.BatchNorm2d(10)
- sync_bn = SynchronizedBatchNorm2d(10)
- sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
-
- bn.cuda()
- sync_bn.cuda()
-
- self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10, 16, 16), True, cuda=True)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PyPDF2/pagerange.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PyPDF2/pagerange.py
deleted file mode 100644
index f009adc195a860336c783c1a58a184b10c48fa6b..0000000000000000000000000000000000000000
--- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PyPDF2/pagerange.py
+++ /dev/null
@@ -1,173 +0,0 @@
-"""
-Representation and utils for ranges of PDF file pages.
-
-Copyright (c) 2014, Steve Witham .
-All rights reserved. This software is available under a BSD license;
-see https://github.com/py-pdf/PyPDF2/blob/main/LICENSE
-"""
-
-import re
-from typing import Any, List, Tuple, Union
-
-from .errors import ParseError
-
-_INT_RE = r"(0|-?[1-9]\d*)" # A decimal int, don't allow "-0".
-PAGE_RANGE_RE = "^({int}|({int}?(:{int}?(:{int}?)?)))$".format(int=_INT_RE)
-# groups: 12 34 5 6 7 8
-
-
-class PageRange:
- """
- A slice-like representation of a range of page indices.
-
- For example, page numbers, only starting at zero.
-
- The syntax is like what you would put between brackets [ ].
- The slice is one of the few Python types that can't be subclassed,
- but this class converts to and from slices, and allows similar use.
-
- - PageRange(str) parses a string representing a page range.
- - PageRange(slice) directly "imports" a slice.
- - to_slice() gives the equivalent slice.
- - str() and repr() allow printing.
- - indices(n) is like slice.indices(n).
-
- """
-
- def __init__(self, arg: Union[slice, "PageRange", str]) -> None:
- """
- Initialize with either a slice -- giving the equivalent page range,
- or a PageRange object -- making a copy,
- or a string like
- "int", "[int]:[int]" or "[int]:[int]:[int]",
- where the brackets indicate optional ints.
- Remember, page indices start with zero.
- Page range expression examples:
- : all pages. -1 last page.
- 22 just the 23rd page. :-1 all but the last page.
- 0:3 the first three pages. -2 second-to-last page.
- :3 the first three pages. -2: last two pages.
- 5: from the sixth page onward. -3:-1 third & second to last.
- The third, "stride" or "step" number is also recognized.
- ::2 0 2 4 ... to the end. 3:0:-1 3 2 1 but not 0.
- 1:10:2 1 3 5 7 9 2::-1 2 1 0.
- ::-1 all pages in reverse order.
- Note the difference between this notation and arguments to slice():
- slice(3) means the first three pages;
- PageRange("3") means the range of only the fourth page.
- However PageRange(slice(3)) means the first three pages.
- """
- if isinstance(arg, slice):
- self._slice = arg
- return
-
- if isinstance(arg, PageRange):
- self._slice = arg.to_slice()
- return
-
- m = isinstance(arg, str) and re.match(PAGE_RANGE_RE, arg)
- if not m:
- raise ParseError(arg)
- elif m.group(2):
- # Special case: just an int means a range of one page.
- start = int(m.group(2))
- stop = start + 1 if start != -1 else None
- self._slice = slice(start, stop)
- else:
- self._slice = slice(*[int(g) if g else None for g in m.group(4, 6, 8)])
-
- @staticmethod
- def valid(input: Any) -> bool:
- """True if input is a valid initializer for a PageRange."""
- return isinstance(input, (slice, PageRange)) or (
- isinstance(input, str) and bool(re.match(PAGE_RANGE_RE, input))
- )
-
- def to_slice(self) -> slice:
- """Return the slice equivalent of this page range."""
- return self._slice
-
- def __str__(self) -> str:
- """A string like "1:2:3"."""
- s = self._slice
- indices: Union[Tuple[int, int], Tuple[int, int, int]]
- if s.step is None:
- if s.start is not None and s.stop == s.start + 1:
- return str(s.start)
-
- indices = s.start, s.stop
- else:
- indices = s.start, s.stop, s.step
- return ":".join("" if i is None else str(i) for i in indices)
-
- def __repr__(self) -> str:
- """A string like "PageRange('1:2:3')"."""
- return "PageRange(" + repr(str(self)) + ")"
-
- def indices(self, n: int) -> Tuple[int, int, int]:
- """
- n is the length of the list of pages to choose from.
-
- Returns arguments for range(). See help(slice.indices).
- """
- return self._slice.indices(n)
-
- def __eq__(self, other: Any) -> bool:
- if not isinstance(other, PageRange):
- return False
- return self._slice == other._slice
-
- def __add__(self, other: "PageRange") -> "PageRange":
- if not isinstance(other, PageRange):
- raise TypeError(f"Can't add PageRange and {type(other)}")
- if self._slice.step is not None or other._slice.step is not None:
- raise ValueError("Can't add PageRange with stride")
- a = self._slice.start, self._slice.stop
- b = other._slice.start, other._slice.stop
-
- if a[0] > b[0]:
- a, b = b, a
-
- # Now a[0] is the smallest
- if b[0] > a[1]:
- # There is a gap between a and b.
- raise ValueError("Can't add PageRanges with gap")
- return PageRange(slice(a[0], max(a[1], b[1])))
-
-
-PAGE_RANGE_ALL = PageRange(":") # The range of all pages.
-
-
-def parse_filename_page_ranges(
- args: List[Union[str, PageRange, None]]
-) -> List[Tuple[str, PageRange]]:
- """
- Given a list of filenames and page ranges, return a list of (filename, page_range) pairs.
-
- First arg must be a filename; other ags are filenames, page-range
- expressions, slice objects, or PageRange objects.
- A filename not followed by a page range indicates all pages of the file.
- """
- pairs: List[Tuple[str, PageRange]] = []
- pdf_filename = None
- did_page_range = False
- for arg in args + [None]:
- if PageRange.valid(arg):
- if not pdf_filename:
- raise ValueError(
- "The first argument must be a filename, not a page range."
- )
-
- pairs.append((pdf_filename, PageRange(arg)))
- did_page_range = True
- else:
- # New filename or end of list--do all of the previous file?
- if pdf_filename and not did_page_range:
- pairs.append((pdf_filename, PAGE_RANGE_ALL))
-
- pdf_filename = arg
- did_page_range = False
- return pairs
-
-
-PageRangeSpec = Union[str, PageRange, Tuple[int, int], Tuple[int, int, int], List[int]]
diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/data_structs/__init__.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/data_structs/__init__.py
deleted file mode 100644
index 32181901655fb562616784f8dca83a482b9e76ac..0000000000000000000000000000000000000000
--- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/data_structs/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-"""Init file."""
-
-from gpt_index.data_structs.data_structs import (
- IndexDict,
- IndexGraph,
- IndexList,
- KeywordTable,
- Node,
-)
-from gpt_index.data_structs.table import StructDatapoint
-
-__all__ = [
- "Node",
- "IndexGraph",
- "KeywordTable",
- "IndexList",
- "IndexDict",
- "StructDatapoint",
-]
diff --git a/spaces/jone/Music_Source_Separation/bytesep/models/subband_tools/__init__.py b/spaces/jone/Music_Source_Separation/bytesep/models/subband_tools/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/jonjhiggins/MiDaS/app.py b/spaces/jonjhiggins/MiDaS/app.py
deleted file mode 100644
index 489938862fb3180daf9c4fe59ef129a8e45a14b0..0000000000000000000000000000000000000000
--- a/spaces/jonjhiggins/MiDaS/app.py
+++ /dev/null
@@ -1,63 +0,0 @@
-import cv2
-import torch
-import gradio as gr
-import numpy as np
-from PIL import Image
-
-torch.hub.download_url_to_file('https://images.unsplash.com/photo-1437622368342-7a3d73a34c8f', 'turtle.jpg')
-torch.hub.download_url_to_file('https://images.unsplash.com/photo-1519066629447-267fffa62d4b', 'lions.jpg')
-
-midas = torch.hub.load("intel-isl/MiDaS", "MiDaS")
-
-use_large_model = True
-
-if use_large_model:
- midas = torch.hub.load("intel-isl/MiDaS", "MiDaS")
-else:
- midas = torch.hub.load("intel-isl/MiDaS", "MiDaS_small")
-
-device = "cpu"
-midas.to(device)
-
-midas_transforms = torch.hub.load("intel-isl/MiDaS", "transforms")
-
-if use_large_model:
- transform = midas_transforms.default_transform
-else:
- transform = midas_transforms.small_transform
-
-
-def depth(img):
- cv_image = np.array(img)
- img = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)
-
- input_batch = transform(img).to(device)
- with torch.no_grad():
- prediction = midas(input_batch)
-
- prediction = torch.nn.functional.interpolate(
- prediction.unsqueeze(1),
- size=img.shape[:2],
- mode="bicubic",
- align_corners=False,
- ).squeeze()
-
- output = prediction.cpu().numpy()
- formatted = (output * 255 / np.max(output)).astype('uint8')
- img = Image.fromarray(formatted)
- return img
-
-
-inputs = gr.inputs.Image(type='pil', label="Original Image")
-outputs = gr.outputs.Image(type="pil",label="Output Image")
-
-title = "MiDaS"
-description = "Gradio demo for MiDaS v2.1 which takes in a single image for computing relative depth. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
-article = "
"
-
-examples = [
- ["turtle.jpg"],
- ["lions.jpg"]
-]
-
-gr.Interface(depth, inputs, outputs, title=title, description=description, article=article, examples=examples, analytics_enabled=False).launch()
\ No newline at end of file
diff --git a/spaces/jordonpeter01/ai-comic-factory/src/app/engine/render.ts b/spaces/jordonpeter01/ai-comic-factory/src/app/engine/render.ts
deleted file mode 100644
index 9510e789979f0e45cc28b98bee18aa73c812e7f5..0000000000000000000000000000000000000000
--- a/spaces/jordonpeter01/ai-comic-factory/src/app/engine/render.ts
+++ /dev/null
@@ -1,294 +0,0 @@
-"use server"
-
-import Replicate, { Prediction } from "replicate"
-
-import { RenderRequest, RenderedScene, RenderingEngine } from "@/types"
-import { generateSeed } from "@/lib/generateSeed"
-import { sleep } from "@/lib/sleep"
-
-const renderingEngine = `${process.env.RENDERING_ENGINE || ""}` as RenderingEngine
-
-const replicateToken = `${process.env.REPLICATE_API_TOKEN || ""}`
-const replicateModel = `${process.env.REPLICATE_API_MODEL || ""}`
-const replicateModelVersion = `${process.env.REPLICATE_API_MODEL_VERSION || ""}`
-
-// note: there is no / at the end in the variable
-// so we have to add it ourselves if needed
-const apiUrl = process.env.VIDEOCHAIN_API_URL
-
-export async function newRender({
- prompt,
- // negativePrompt,
- width,
- height
-}: {
- prompt: string
- // negativePrompt: string[]
- width: number
- height: number
-}) {
- // console.log(`newRender(${prompt})`)
- if (!prompt) {
- console.error(`cannot call the rendering API without a prompt, aborting..`)
- throw new Error(`cannot call the rendering API without a prompt, aborting..`)
- }
-
- let defaulResult: RenderedScene = {
- renderId: "",
- status: "error",
- assetUrl: "",
- alt: prompt || "",
- maskUrl: "",
- error: "failed to fetch the data",
- segments: []
- }
-
-
- try {
- if (renderingEngine === "REPLICATE") {
- if (!replicateToken) {
- throw new Error(`you need to configure your REPLICATE_API_TOKEN in order to use the REPLICATE rendering engine`)
- }
- if (!replicateModel) {
- throw new Error(`you need to configure your REPLICATE_API_MODEL in order to use the REPLICATE rendering engine`)
- }
- if (!replicateModelVersion) {
- throw new Error(`you need to configure your REPLICATE_API_MODEL_VERSION in order to use the REPLICATE rendering engine`)
- }
- const replicate = new Replicate({ auth: replicateToken })
-
- // console.log("Calling replicate..")
- const seed = generateSeed()
- const prediction = await replicate.predictions.create({
- version: replicateModelVersion,
- input: { prompt, seed }
- })
-
- // console.log("prediction:", prediction)
-
- // no need to reply straight away: good things take time
- // also our friends at Replicate won't like it if we spam them with requests
- await sleep(4000)
-
- return {
- renderId: prediction.id,
- status: "pending",
- assetUrl: "",
- alt: prompt,
- error: prediction.error,
- maskUrl: "",
- segments: []
- } as RenderedScene
- } else {
- // console.log(`calling POST ${apiUrl}/render with prompt: ${prompt}`)
- const res = await fetch(`${apiUrl}/render`, {
- method: "POST",
- headers: {
- Accept: "application/json",
- "Content-Type": "application/json",
- Authorization: `Bearer ${process.env.VIDEOCHAIN_API_TOKEN}`,
- },
- body: JSON.stringify({
- prompt,
- // negativePrompt, unused for now
- nbFrames: 1,
- nbSteps: 25, // 20 = fast, 30 = better, 50 = best
- actionnables: [], // ["text block"],
- segmentation: "disabled", // "firstframe", // one day we will remove this param, to make it automatic
- width,
- height,
-
- // no need to upscale right now as we generate tiny panels
- // maybe later we can provide an "export" button to PDF
- // unfortunately there are too many requests for upscaling,
- // the server is always down
- upscalingFactor: 1, // 2,
-
- // analyzing doesn't work yet, it seems..
- analyze: false, // analyze: true,
-
- cache: "ignore"
- } as Partial),
- cache: 'no-store',
- // we can also use this (see https://vercel.com/blog/vercel-cache-api-nextjs-cache)
- // next: { revalidate: 1 }
- })
-
-
- // console.log("res:", res)
- // The return value is *not* serialized
- // You can return Date, Map, Set, etc.
-
- // Recommendation: handle errors
- if (res.status !== 200) {
- // This will activate the closest `error.js` Error Boundary
- throw new Error('Failed to fetch data')
- }
-
- const response = (await res.json()) as RenderedScene
- return response
- }
- } catch (err) {
- console.error(err)
- return defaulResult
- }
-}
-
-export async function getRender(renderId: string) {
- if (!renderId) {
- console.error(`cannot call the rendering API without a renderId, aborting..`)
- throw new Error(`cannot call the rendering API without a renderId, aborting..`)
- }
-
- let defaulResult: RenderedScene = {
- renderId: "",
- status: "pending",
- assetUrl: "",
- alt: "",
- maskUrl: "",
- error: "failed to fetch the data",
- segments: []
- }
-
- try {
- if (renderingEngine === "REPLICATE") {
- if (!replicateToken) {
- throw new Error(`you need to configure your REPLICATE_API_TOKEN in order to use the REPLICATE rendering engine`)
- }
- if (!replicateModel) {
- throw new Error(`you need to configure your REPLICATE_API_MODEL in order to use the REPLICATE rendering engine`)
- }
-
- // const replicate = new Replicate({ auth: replicateToken })
-
- // console.log("Calling replicate..")
- // const prediction = await replicate.predictions.get(renderId)
- // console.log("Prediction:", prediction)
-
- // console.log(`calling GET https://api.replicate.com/v1/predictions/${renderId}`)
- const res = await fetch(`https://api.replicate.com/v1/predictions/${renderId}`, {
- method: "GET",
- headers: {
- // Accept: "application/json",
- // "Content-Type": "application/json",
- Authorization: `Token ${replicateToken}`,
- },
- cache: 'no-store',
- // we can also use this (see https://vercel.com/blog/vercel-cache-api-nextjs-cache)
- // next: { revalidate: 1 }
- })
-
- // console.log("res:", res)
- // The return value is *not* serialized
- // You can return Date, Map, Set, etc.
-
- // Recommendation: handle errors
- if (res.status !== 200) {
- // This will activate the closest `error.js` Error Boundary
- throw new Error('Failed to fetch data')
- }
-
- const response = (await res.json()) as any
- // console.log("response:", response)
-
- return {
- renderId,
- status: response?.error ? "error" : response?.status === "succeeded" ? "completed" : "pending",
- assetUrl: `${response?.output || ""}`,
- alt: `${response?.input?.prompt || ""}`,
- error: `${response?.error || ""}`,
- maskUrl: "",
- segments: []
- } as RenderedScene
- } else {
- // console.log(`calling GET ${apiUrl}/render with renderId: ${renderId}`)
- const res = await fetch(`${apiUrl}/render/${renderId}`, {
- method: "GET",
- headers: {
- Accept: "application/json",
- "Content-Type": "application/json",
- Authorization: `Bearer ${process.env.VIDEOCHAIN_API_TOKEN}`,
- },
- cache: 'no-store',
- // we can also use this (see https://vercel.com/blog/vercel-cache-api-nextjs-cache)
- // next: { revalidate: 1 }
- })
-
- // console.log("res:", res)
- // The return value is *not* serialized
- // You can return Date, Map, Set, etc.
-
- // Recommendation: handle errors
- if (res.status !== 200) {
- // This will activate the closest `error.js` Error Boundary
- throw new Error('Failed to fetch data')
- }
-
- const response = (await res.json()) as RenderedScene
- // console.log("response:", response)
- return response
- }
- } catch (err) {
- console.error(err)
- defaulResult.status = "error"
- defaulResult.error = `${err}`
- // Gorgon.clear(cacheKey)
- return defaulResult
- }
-
- // }, cacheDurationInSec * 1000)
-}
-
-export async function upscaleImage(image: string): Promise<{
- assetUrl: string
- error: string
-}> {
- if (!image) {
- console.error(`cannot call the rendering API without an image, aborting..`)
- throw new Error(`cannot call the rendering API without an image, aborting..`)
- }
-
- let defaulResult = {
- assetUrl: "",
- error: "failed to fetch the data",
- }
-
- try {
- // console.log(`calling GET ${apiUrl}/render with renderId: ${renderId}`)
- const res = await fetch(`${apiUrl}/upscale`, {
- method: "POST",
- headers: {
- Accept: "application/json",
- "Content-Type": "application/json",
- Authorization: `Bearer ${process.env.VIDEOCHAIN_API_TOKEN}`,
- },
- cache: 'no-store',
- body: JSON.stringify({ image, factor: 3 })
- // we can also use this (see https://vercel.com/blog/vercel-cache-api-nextjs-cache)
- // next: { revalidate: 1 }
- })
-
- // console.log("res:", res)
- // The return value is *not* serialized
- // You can return Date, Map, Set, etc.
-
- // Recommendation: handle errors
- if (res.status !== 200) {
- // This will activate the closest `error.js` Error Boundary
- throw new Error('Failed to fetch data')
- }
-
- const response = (await res.json()) as {
- assetUrl: string
- error: string
- }
- // console.log("response:", response)
- return response
- } catch (err) {
- console.error(err)
- // Gorgon.clear(cacheKey)
- return defaulResult
- }
-
- // }, cacheDurationInSec * 1000)
-}
diff --git a/spaces/justest/gpt4free/g4f/Provider/Providers/Theb.py b/spaces/justest/gpt4free/g4f/Provider/Providers/Theb.py
deleted file mode 100644
index aa43ebc55d74ffaa722fe008424fce97c622a323..0000000000000000000000000000000000000000
--- a/spaces/justest/gpt4free/g4f/Provider/Providers/Theb.py
+++ /dev/null
@@ -1,28 +0,0 @@
-import os
-import json
-import time
-import subprocess
-
-from ...typing import sha256, Dict, get_type_hints
-
-url = 'https://theb.ai'
-model = ['gpt-3.5-turbo']
-supports_stream = True
-needs_auth = False
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
-
- path = os.path.dirname(os.path.realpath(__file__))
- config = json.dumps({
- 'messages': messages,
- 'model': model}, separators=(',', ':'))
-
- cmd = ['python3', f'{path}/helpers/theb.py', config]
-
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
-
- for line in iter(p.stdout.readline, b''):
- yield line.decode('utf-8')
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
\ No newline at end of file
diff --git a/spaces/kazuk/youtube-whisper-06/app.py b/spaces/kazuk/youtube-whisper-06/app.py
deleted file mode 100644
index 4a61dc561a016c53ad93a3c556b0ef7bafa964eb..0000000000000000000000000000000000000000
--- a/spaces/kazuk/youtube-whisper-06/app.py
+++ /dev/null
@@ -1,66 +0,0 @@
-import gradio as gr
-import whisper
-from pytube import YouTube
-
-def get_audio(url):
- yt = YouTube(url)
- return yt.streams.filter(only_audio=True)[0].download(filename="tmp.mp4")
-
-def get_transcript(url, model_size, lang, format):
-
- model = whisper.load_model(model_size)
-
- if lang == "None":
- lang = None
-
- result = model.transcribe(get_audio(url), fp16=False, language=lang)
-
- if format == "None":
- return result["text"]
- elif format == ".srt":
- return format_to_srt(result["segments"])
-
-def format_to_srt(segments):
- output = ""
- for i, segment in enumerate(segments):
- output += f"{i + 1}\n"
- output += f"{format_timestamp(segment['start'])} --> {format_timestamp(segment['end'])}\n"
- output += f"{segment['text']}\n\n"
- return output
-
-def format_timestamp(t):
- hh = t//3600
- mm = (t - hh*3600)//60
- ss = t - hh*3600 - mm*60
- mi = (t - int(t))*1000
- return f"{int(hh):02d}:{int(mm):02d}:{int(ss):02d},{int(mi):03d}"
-
-
-langs = ["None"] + sorted(list(whisper.tokenizer.LANGUAGES.values()))
-model_size = list(whisper._MODELS.keys())
-
-with gr.Blocks() as demo:
-
- with gr.Row():
-
- with gr.Column():
-
- with gr.Row():
- url = gr.Textbox(placeholder='Youtube video URL', label='URL')
-
- with gr.Row():
-
- model_size = gr.Dropdown(choices=model_size, value='tiny', label="Model")
- lang = gr.Dropdown(choices=langs, value="None", label="Language (Optional)")
- format = gr.Dropdown(choices=["None", ".srt"], value="None", label="Timestamps? (Optional)")
-
- with gr.Row():
- gr.Markdown("Larger models are more accurate, but slower. For 1min video, it'll take ~30s (tiny), ~1min (base), ~3min (small), ~5min (medium), etc.")
- transcribe_btn = gr.Button('Transcribe')
-
- with gr.Column():
- outputs = gr.Textbox(placeholder='Transcription of the video', label='Transcription')
-
- transcribe_btn.click(get_transcript, inputs=[url, model_size, lang, format], outputs=outputs)
-
-demo.launch(debug=True)
diff --git a/spaces/kbora/minerva-generate-docker/blocks/utils/__init__.py b/spaces/kbora/minerva-generate-docker/blocks/utils/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/keithhon/Real-Time-Voice-Cloning/encoder/config.py b/spaces/keithhon/Real-Time-Voice-Cloning/encoder/config.py
deleted file mode 100644
index 1c21312f3de971bfa008254c6035cebc09f05e4c..0000000000000000000000000000000000000000
--- a/spaces/keithhon/Real-Time-Voice-Cloning/encoder/config.py
+++ /dev/null
@@ -1,45 +0,0 @@
-librispeech_datasets = {
- "train": {
- "clean": ["LibriSpeech/train-clean-100", "LibriSpeech/train-clean-360"],
- "other": ["LibriSpeech/train-other-500"]
- },
- "test": {
- "clean": ["LibriSpeech/test-clean"],
- "other": ["LibriSpeech/test-other"]
- },
- "dev": {
- "clean": ["LibriSpeech/dev-clean"],
- "other": ["LibriSpeech/dev-other"]
- },
-}
-libritts_datasets = {
- "train": {
- "clean": ["LibriTTS/train-clean-100", "LibriTTS/train-clean-360"],
- "other": ["LibriTTS/train-other-500"]
- },
- "test": {
- "clean": ["LibriTTS/test-clean"],
- "other": ["LibriTTS/test-other"]
- },
- "dev": {
- "clean": ["LibriTTS/dev-clean"],
- "other": ["LibriTTS/dev-other"]
- },
-}
-voxceleb_datasets = {
- "voxceleb1" : {
- "train": ["VoxCeleb1/wav"],
- "test": ["VoxCeleb1/test_wav"]
- },
- "voxceleb2" : {
- "train": ["VoxCeleb2/dev/aac"],
- "test": ["VoxCeleb2/test_wav"]
- }
-}
-
-other_datasets = [
- "LJSpeech-1.1",
- "VCTK-Corpus/wav48",
-]
-
-anglophone_nationalites = ["australia", "canada", "ireland", "uk", "usa"]
diff --git a/spaces/keras-dreambooth/piranesi-monument-art/app.py b/spaces/keras-dreambooth/piranesi-monument-art/app.py
deleted file mode 100644
index 2a8a0d0e9581aec7f3c7f0dc80c81a279f1a54b7..0000000000000000000000000000000000000000
--- a/spaces/keras-dreambooth/piranesi-monument-art/app.py
+++ /dev/null
@@ -1,52 +0,0 @@
-from huggingface_hub import from_pretrained_keras
-import keras_cv
-import gradio as gr
-from tensorflow import keras
-
-keras.mixed_precision.set_global_policy("mixed_float16")
-# load keras model
-resolution = 512
-dreambooth_model = keras_cv.models.StableDiffusion(
- img_width=resolution, img_height=resolution, jit_compile=True,
- )
-loaded_diffusion_model = from_pretrained_keras("keras-dreambooth/dreambooth-piranesi")
-dreambooth_model._diffusion_model = loaded_diffusion_model
-
-
-def generate_images(prompt: str, negative_prompt:str, num_imgs_to_gen: int, num_steps: int, ugs: int):
- generated_img = dreambooth_model.text_to_image(
- prompt,
- negative_prompt=negative_prompt,
- batch_size=num_imgs_to_gen,
- num_steps=num_steps,
- unconditional_guidance_scale=ugs,
- )
-
- return generated_img
-
-with gr.Blocks() as demo:
- gr.HTML("
Dreambooth Piranesi Art
")
- with gr.Row():
- with gr.Column():
- prompt = gr.Textbox(lines=1, value="image of monument in sks style", label="Base Prompt")
- negative_prompt = gr.Textbox(lines=1, value="deformed", label="Negative Prompt")
- samples = gr.Slider(minimum=1, maximum=5, value=1, step=1, label="Number of Image")
- num_steps = gr.Slider(label="Inference Steps",value=40)
- ugs = gr.Slider(value=15, minimum=5, maximum=25, step=1, label="Unconditional Guidance Scale")
- run = gr.Button(value="Run")
- with gr.Column():
- gallery = gr.Gallery(label="Outputs").style(grid=(1,2))
-
- run.click(generate_images, inputs=[prompt,negative_prompt, samples, num_steps, ugs], outputs=gallery)
-
- gr.Examples([["image of monument in sks style, 8k, high quality, old paper","colored, deformed, blurry, grain, artifacts, low quality", 1, 30, 18],
- ["image of menhir in sks style, 8k, high quality, old paper","colored, deformed, blurry, grain, artifacts, low quality", 1, 40, 20],
- ["image of church in sks style, 8k, high quality, old paper","colored, deformed, blurry, grain, artifacts, low quality", 1, 40, 20],
- ["image of ancient ruins in sks style, 8k, high quality, old paper","colored, deformed, blurry, grain, artifacts, low quality", 1, 50, 20],
- ["image of castle on hilltop in sks style, 8k, high quality, old paper","colored, deformed, blurry, grain, artifacts, low quality", 1, 50, 10],
- ["image of amphiteater in sks style, 8k, high quality, old paper","colored, deformed, blurry, grain, artifacts, low quality", 1, 40, 9],
- ["image of church in lake in sks style, 8k, high quality, old paper, black and white","colored, deformed, blurry, grain, artifacts, low quality", 1, 40, 18],
- ["image of village on hilltop with citadel in sks style, 8k, high quality, old paper, black and white","colored, deformed, blurry, grain, artifacts, low quality", 1, 40, 18]],
- [prompt,negative_prompt, samples,num_steps, ugs], gallery, generate_images)
-
-demo.launch(debug=True)
\ No newline at end of file
diff --git a/spaces/kevinwang676/Bert-VITS2/start.bat b/spaces/kevinwang676/Bert-VITS2/start.bat
deleted file mode 100644
index 418d21233dbf720b0dd09821904d9d6a31b123a2..0000000000000000000000000000000000000000
--- a/spaces/kevinwang676/Bert-VITS2/start.bat
+++ /dev/null
@@ -1,2 +0,0 @@
-set PYTHON=venv\python.exe
-start cmd /k "set PYTHON=%PYTHON%"
\ No newline at end of file
diff --git a/spaces/kevinwang676/ChatGLM2-VC-SadTalker/speaker_encoder/data_objects/__init__.py b/spaces/kevinwang676/ChatGLM2-VC-SadTalker/speaker_encoder/data_objects/__init__.py
deleted file mode 100644
index 030317a1d9a328d452bf29bc7a802e29629b1a42..0000000000000000000000000000000000000000
--- a/spaces/kevinwang676/ChatGLM2-VC-SadTalker/speaker_encoder/data_objects/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from speaker_encoder.data_objects.speaker_verification_dataset import SpeakerVerificationDataset
-from speaker_encoder.data_objects.speaker_verification_dataset import SpeakerVerificationDataLoader
diff --git a/spaces/kevinwang676/VoiceChanger/src/facerender/sync_batchnorm/batchnorm.py b/spaces/kevinwang676/VoiceChanger/src/facerender/sync_batchnorm/batchnorm.py
deleted file mode 100644
index 5f4e763f0366dffa10320116413f8c7181a8aeb1..0000000000000000000000000000000000000000
--- a/spaces/kevinwang676/VoiceChanger/src/facerender/sync_batchnorm/batchnorm.py
+++ /dev/null
@@ -1,315 +0,0 @@
-# -*- coding: utf-8 -*-
-# File : batchnorm.py
-# Author : Jiayuan Mao
-# Email : maojiayuan@gmail.com
-# Date : 27/01/2018
-#
-# This file is part of Synchronized-BatchNorm-PyTorch.
-# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
-# Distributed under MIT License.
-
-import collections
-
-import torch
-import torch.nn.functional as F
-
-from torch.nn.modules.batchnorm import _BatchNorm
-from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast
-
-from .comm import SyncMaster
-
-__all__ = ['SynchronizedBatchNorm1d', 'SynchronizedBatchNorm2d', 'SynchronizedBatchNorm3d']
-
-
-def _sum_ft(tensor):
- """sum over the first and last dimention"""
- return tensor.sum(dim=0).sum(dim=-1)
-
-
-def _unsqueeze_ft(tensor):
- """add new dementions at the front and the tail"""
- return tensor.unsqueeze(0).unsqueeze(-1)
-
-
-_ChildMessage = collections.namedtuple('_ChildMessage', ['sum', 'ssum', 'sum_size'])
-_MasterMessage = collections.namedtuple('_MasterMessage', ['sum', 'inv_std'])
-
-
-class _SynchronizedBatchNorm(_BatchNorm):
- def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True):
- super(_SynchronizedBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine)
-
- self._sync_master = SyncMaster(self._data_parallel_master)
-
- self._is_parallel = False
- self._parallel_id = None
- self._slave_pipe = None
-
- def forward(self, input):
- # If it is not parallel computation or is in evaluation mode, use PyTorch's implementation.
- if not (self._is_parallel and self.training):
- return F.batch_norm(
- input, self.running_mean, self.running_var, self.weight, self.bias,
- self.training, self.momentum, self.eps)
-
- # Resize the input to (B, C, -1).
- input_shape = input.size()
- input = input.view(input.size(0), self.num_features, -1)
-
- # Compute the sum and square-sum.
- sum_size = input.size(0) * input.size(2)
- input_sum = _sum_ft(input)
- input_ssum = _sum_ft(input ** 2)
-
- # Reduce-and-broadcast the statistics.
- if self._parallel_id == 0:
- mean, inv_std = self._sync_master.run_master(_ChildMessage(input_sum, input_ssum, sum_size))
- else:
- mean, inv_std = self._slave_pipe.run_slave(_ChildMessage(input_sum, input_ssum, sum_size))
-
- # Compute the output.
- if self.affine:
- # MJY:: Fuse the multiplication for speed.
- output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std * self.weight) + _unsqueeze_ft(self.bias)
- else:
- output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std)
-
- # Reshape it.
- return output.view(input_shape)
-
- def __data_parallel_replicate__(self, ctx, copy_id):
- self._is_parallel = True
- self._parallel_id = copy_id
-
- # parallel_id == 0 means master device.
- if self._parallel_id == 0:
- ctx.sync_master = self._sync_master
- else:
- self._slave_pipe = ctx.sync_master.register_slave(copy_id)
-
- def _data_parallel_master(self, intermediates):
- """Reduce the sum and square-sum, compute the statistics, and broadcast it."""
-
- # Always using same "device order" makes the ReduceAdd operation faster.
- # Thanks to:: Tete Xiao (http://tetexiao.com/)
- intermediates = sorted(intermediates, key=lambda i: i[1].sum.get_device())
-
- to_reduce = [i[1][:2] for i in intermediates]
- to_reduce = [j for i in to_reduce for j in i] # flatten
- target_gpus = [i[1].sum.get_device() for i in intermediates]
-
- sum_size = sum([i[1].sum_size for i in intermediates])
- sum_, ssum = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce)
- mean, inv_std = self._compute_mean_std(sum_, ssum, sum_size)
-
- broadcasted = Broadcast.apply(target_gpus, mean, inv_std)
-
- outputs = []
- for i, rec in enumerate(intermediates):
- outputs.append((rec[0], _MasterMessage(*broadcasted[i*2:i*2+2])))
-
- return outputs
-
- def _compute_mean_std(self, sum_, ssum, size):
- """Compute the mean and standard-deviation with sum and square-sum. This method
- also maintains the moving average on the master device."""
- assert size > 1, 'BatchNorm computes unbiased standard-deviation, which requires size > 1.'
- mean = sum_ / size
- sumvar = ssum - sum_ * mean
- unbias_var = sumvar / (size - 1)
- bias_var = sumvar / size
-
- self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data
- self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data
-
- return mean, bias_var.clamp(self.eps) ** -0.5
-
-
-class SynchronizedBatchNorm1d(_SynchronizedBatchNorm):
- r"""Applies Synchronized Batch Normalization over a 2d or 3d input that is seen as a
- mini-batch.
-
- .. math::
-
- y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
-
- This module differs from the built-in PyTorch BatchNorm1d as the mean and
- standard-deviation are reduced across all devices during training.
-
- For example, when one uses `nn.DataParallel` to wrap the network during
- training, PyTorch's implementation normalize the tensor on each device using
- the statistics only on that device, which accelerated the computation and
- is also easy to implement, but the statistics might be inaccurate.
- Instead, in this synchronized version, the statistics will be computed
- over all training samples distributed on multiple devices.
-
- Note that, for one-GPU or CPU-only case, this module behaves exactly same
- as the built-in PyTorch implementation.
-
- The mean and standard-deviation are calculated per-dimension over
- the mini-batches and gamma and beta are learnable parameter vectors
- of size C (where C is the input size).
-
- During training, this layer keeps a running estimate of its computed mean
- and variance. The running sum is kept with a default momentum of 0.1.
-
- During evaluation, this running mean/variance is used for normalization.
-
- Because the BatchNorm is done over the `C` dimension, computing statistics
- on `(N, L)` slices, it's common terminology to call this Temporal BatchNorm
-
- Args:
- num_features: num_features from an expected input of size
- `batch_size x num_features [x width]`
- eps: a value added to the denominator for numerical stability.
- Default: 1e-5
- momentum: the value used for the running_mean and running_var
- computation. Default: 0.1
- affine: a boolean value that when set to ``True``, gives the layer learnable
- affine parameters. Default: ``True``
-
- Shape:
- - Input: :math:`(N, C)` or :math:`(N, C, L)`
- - Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
-
- Examples:
- >>> # With Learnable Parameters
- >>> m = SynchronizedBatchNorm1d(100)
- >>> # Without Learnable Parameters
- >>> m = SynchronizedBatchNorm1d(100, affine=False)
- >>> input = torch.autograd.Variable(torch.randn(20, 100))
- >>> output = m(input)
- """
-
- def _check_input_dim(self, input):
- if input.dim() != 2 and input.dim() != 3:
- raise ValueError('expected 2D or 3D input (got {}D input)'
- .format(input.dim()))
- super(SynchronizedBatchNorm1d, self)._check_input_dim(input)
-
-
-class SynchronizedBatchNorm2d(_SynchronizedBatchNorm):
- r"""Applies Batch Normalization over a 4d input that is seen as a mini-batch
- of 3d inputs
-
- .. math::
-
- y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
-
- This module differs from the built-in PyTorch BatchNorm2d as the mean and
- standard-deviation are reduced across all devices during training.
-
- For example, when one uses `nn.DataParallel` to wrap the network during
- training, PyTorch's implementation normalize the tensor on each device using
- the statistics only on that device, which accelerated the computation and
- is also easy to implement, but the statistics might be inaccurate.
- Instead, in this synchronized version, the statistics will be computed
- over all training samples distributed on multiple devices.
-
- Note that, for one-GPU or CPU-only case, this module behaves exactly same
- as the built-in PyTorch implementation.
-
- The mean and standard-deviation are calculated per-dimension over
- the mini-batches and gamma and beta are learnable parameter vectors
- of size C (where C is the input size).
-
- During training, this layer keeps a running estimate of its computed mean
- and variance. The running sum is kept with a default momentum of 0.1.
-
- During evaluation, this running mean/variance is used for normalization.
-
- Because the BatchNorm is done over the `C` dimension, computing statistics
- on `(N, H, W)` slices, it's common terminology to call this Spatial BatchNorm
-
- Args:
- num_features: num_features from an expected input of
- size batch_size x num_features x height x width
- eps: a value added to the denominator for numerical stability.
- Default: 1e-5
- momentum: the value used for the running_mean and running_var
- computation. Default: 0.1
- affine: a boolean value that when set to ``True``, gives the layer learnable
- affine parameters. Default: ``True``
-
- Shape:
- - Input: :math:`(N, C, H, W)`
- - Output: :math:`(N, C, H, W)` (same shape as input)
-
- Examples:
- >>> # With Learnable Parameters
- >>> m = SynchronizedBatchNorm2d(100)
- >>> # Without Learnable Parameters
- >>> m = SynchronizedBatchNorm2d(100, affine=False)
- >>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45))
- >>> output = m(input)
- """
-
- def _check_input_dim(self, input):
- if input.dim() != 4:
- raise ValueError('expected 4D input (got {}D input)'
- .format(input.dim()))
- super(SynchronizedBatchNorm2d, self)._check_input_dim(input)
-
-
-class SynchronizedBatchNorm3d(_SynchronizedBatchNorm):
- r"""Applies Batch Normalization over a 5d input that is seen as a mini-batch
- of 4d inputs
-
- .. math::
-
- y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
-
- This module differs from the built-in PyTorch BatchNorm3d as the mean and
- standard-deviation are reduced across all devices during training.
-
- For example, when one uses `nn.DataParallel` to wrap the network during
- training, PyTorch's implementation normalize the tensor on each device using
- the statistics only on that device, which accelerated the computation and
- is also easy to implement, but the statistics might be inaccurate.
- Instead, in this synchronized version, the statistics will be computed
- over all training samples distributed on multiple devices.
-
- Note that, for one-GPU or CPU-only case, this module behaves exactly same
- as the built-in PyTorch implementation.
-
- The mean and standard-deviation are calculated per-dimension over
- the mini-batches and gamma and beta are learnable parameter vectors
- of size C (where C is the input size).
-
- During training, this layer keeps a running estimate of its computed mean
- and variance. The running sum is kept with a default momentum of 0.1.
-
- During evaluation, this running mean/variance is used for normalization.
-
- Because the BatchNorm is done over the `C` dimension, computing statistics
- on `(N, D, H, W)` slices, it's common terminology to call this Volumetric BatchNorm
- or Spatio-temporal BatchNorm
-
- Args:
- num_features: num_features from an expected input of
- size batch_size x num_features x depth x height x width
- eps: a value added to the denominator for numerical stability.
- Default: 1e-5
- momentum: the value used for the running_mean and running_var
- computation. Default: 0.1
- affine: a boolean value that when set to ``True``, gives the layer learnable
- affine parameters. Default: ``True``
-
- Shape:
- - Input: :math:`(N, C, D, H, W)`
- - Output: :math:`(N, C, D, H, W)` (same shape as input)
-
- Examples:
- >>> # With Learnable Parameters
- >>> m = SynchronizedBatchNorm3d(100)
- >>> # Without Learnable Parameters
- >>> m = SynchronizedBatchNorm3d(100, affine=False)
- >>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45, 10))
- >>> output = m(input)
- """
-
- def _check_input_dim(self, input):
- if input.dim() != 5:
- raise ValueError('expected 5D input (got {}D input)'
- .format(input.dim()))
- super(SynchronizedBatchNorm3d, self)._check_input_dim(input)
diff --git a/spaces/khaclinh/self-driving-anonymization/pp4av_exp.py b/spaces/khaclinh/self-driving-anonymization/pp4av_exp.py
deleted file mode 100644
index 1c245ed8f45319a69e675305e7e79984c7cff702..0000000000000000000000000000000000000000
--- a/spaces/khaclinh/self-driving-anonymization/pp4av_exp.py
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding:utf-8 -*-
-# Copyright (c) Megvii, Inc. and its affiliates.
-
-import os
-
-from yolox.exp import Exp as MyExp
-
-class Exp(MyExp):
- def __init__(self):
- super(Exp, self).__init__()
- self.depth = 1.0 # indicate size yolo model
- self.width = 1.0 #
- self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
-
- self.data_dir = ''
- self.train_ann = ''
- self.val_ann = ''
- self.test_ann = ''
-
- self.num_classes = 2
- self.data_num_workers = 32 # number of cpu for splitting batch
-
- self.input_size = (800, 800)
- self.print_interval = 100
- self.eval_interval = 1
- self.test_size = (800, 800)
- self.enable_mixup = True
- self.mosaic_scale = (0.5, 1.5)
- self.max_epoch = 300
- self.hsv_prob = 1.0
-
- self.degrees = 20.0
- self.translate = 0.2
- self.shear = 2.0
- # Turn off mosaic
- self.mosaic_prob = 1.0
- # Turn off Mixup
- self.mixup_prob = 1.0
- # Change SGD by ADAM
-
-
- self.basic_lr_per_img = 0.01 / 28.0
- self.no_aug_epochs = 15
- self.min_lr_ratio = 0.05
- self.ema = True
-
- self.nmsthre = 0.3
diff --git a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/visualization/image.py b/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/visualization/image.py
deleted file mode 100644
index 61a56c75b67f593c298408462c63c0468be8e276..0000000000000000000000000000000000000000
--- a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/visualization/image.py
+++ /dev/null
@@ -1,152 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import cv2
-import numpy as np
-
-from annotator.uniformer.mmcv.image import imread, imwrite
-from .color import color_val
-
-
-def imshow(img, win_name='', wait_time=0):
- """Show an image.
-
- Args:
- img (str or ndarray): The image to be displayed.
- win_name (str): The window name.
- wait_time (int): Value of waitKey param.
- """
- cv2.imshow(win_name, imread(img))
- if wait_time == 0: # prevent from hanging if windows was closed
- while True:
- ret = cv2.waitKey(1)
-
- closed = cv2.getWindowProperty(win_name, cv2.WND_PROP_VISIBLE) < 1
- # if user closed window or if some key pressed
- if closed or ret != -1:
- break
- else:
- ret = cv2.waitKey(wait_time)
-
-
-def imshow_bboxes(img,
- bboxes,
- colors='green',
- top_k=-1,
- thickness=1,
- show=True,
- win_name='',
- wait_time=0,
- out_file=None):
- """Draw bboxes on an image.
-
- Args:
- img (str or ndarray): The image to be displayed.
- bboxes (list or ndarray): A list of ndarray of shape (k, 4).
- colors (list[str or tuple or Color]): A list of colors.
- top_k (int): Plot the first k bboxes only if set positive.
- thickness (int): Thickness of lines.
- show (bool): Whether to show the image.
- win_name (str): The window name.
- wait_time (int): Value of waitKey param.
- out_file (str, optional): The filename to write the image.
-
- Returns:
- ndarray: The image with bboxes drawn on it.
- """
- img = imread(img)
- img = np.ascontiguousarray(img)
-
- if isinstance(bboxes, np.ndarray):
- bboxes = [bboxes]
- if not isinstance(colors, list):
- colors = [colors for _ in range(len(bboxes))]
- colors = [color_val(c) for c in colors]
- assert len(bboxes) == len(colors)
-
- for i, _bboxes in enumerate(bboxes):
- _bboxes = _bboxes.astype(np.int32)
- if top_k <= 0:
- _top_k = _bboxes.shape[0]
- else:
- _top_k = min(top_k, _bboxes.shape[0])
- for j in range(_top_k):
- left_top = (_bboxes[j, 0], _bboxes[j, 1])
- right_bottom = (_bboxes[j, 2], _bboxes[j, 3])
- cv2.rectangle(
- img, left_top, right_bottom, colors[i], thickness=thickness)
-
- if show:
- imshow(img, win_name, wait_time)
- if out_file is not None:
- imwrite(img, out_file)
- return img
-
-
-def imshow_det_bboxes(img,
- bboxes,
- labels,
- class_names=None,
- score_thr=0,
- bbox_color='green',
- text_color='green',
- thickness=1,
- font_scale=0.5,
- show=True,
- win_name='',
- wait_time=0,
- out_file=None):
- """Draw bboxes and class labels (with scores) on an image.
-
- Args:
- img (str or ndarray): The image to be displayed.
- bboxes (ndarray): Bounding boxes (with scores), shaped (n, 4) or
- (n, 5).
- labels (ndarray): Labels of bboxes.
- class_names (list[str]): Names of each classes.
- score_thr (float): Minimum score of bboxes to be shown.
- bbox_color (str or tuple or :obj:`Color`): Color of bbox lines.
- text_color (str or tuple or :obj:`Color`): Color of texts.
- thickness (int): Thickness of lines.
- font_scale (float): Font scales of texts.
- show (bool): Whether to show the image.
- win_name (str): The window name.
- wait_time (int): Value of waitKey param.
- out_file (str or None): The filename to write the image.
-
- Returns:
- ndarray: The image with bboxes drawn on it.
- """
- assert bboxes.ndim == 2
- assert labels.ndim == 1
- assert bboxes.shape[0] == labels.shape[0]
- assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5
- img = imread(img)
- img = np.ascontiguousarray(img)
-
- if score_thr > 0:
- assert bboxes.shape[1] == 5
- scores = bboxes[:, -1]
- inds = scores > score_thr
- bboxes = bboxes[inds, :]
- labels = labels[inds]
-
- bbox_color = color_val(bbox_color)
- text_color = color_val(text_color)
-
- for bbox, label in zip(bboxes, labels):
- bbox_int = bbox.astype(np.int32)
- left_top = (bbox_int[0], bbox_int[1])
- right_bottom = (bbox_int[2], bbox_int[3])
- cv2.rectangle(
- img, left_top, right_bottom, bbox_color, thickness=thickness)
- label_text = class_names[
- label] if class_names is not None else f'cls {label}'
- if len(bbox) > 4:
- label_text += f'|{bbox[-1]:.02f}'
- cv2.putText(img, label_text, (bbox_int[0], bbox_int[1] - 2),
- cv2.FONT_HERSHEY_COMPLEX, font_scale, text_color)
-
- if show:
- imshow(img, win_name, wait_time)
- if out_file is not None:
- imwrite(img, out_file)
- return img
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/themes/utils/__init__.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/themes/utils/__init__.py
deleted file mode 100644
index a3e6208634fafa416b9323f5156ac56dd7bb3700..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/themes/utils/__init__.py
+++ /dev/null
@@ -1,11 +0,0 @@
-from .semver_match import (
- ThemeAsset,
- get_matching_version,
- get_theme_assets,
-)
-
-__all__ = [
- "ThemeAsset",
- "get_theme_assets",
- "get_matching_version",
-]
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/themes/utils/fonts.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/themes/utils/fonts.py
deleted file mode 100644
index d51dbbfdf4990358e9094cc887c47ae6cd8b0440..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/themes/utils/fonts.py
+++ /dev/null
@@ -1,50 +0,0 @@
-from __future__ import annotations
-
-import json
-from typing import Iterable
-
-
-class FontEncoder(json.JSONEncoder):
- def default(self, obj):
- if isinstance(obj, Font):
- return {
- "__gradio_font__": True,
- "name": obj.name,
- "class": "google" if isinstance(obj, GoogleFont) else "font",
- }
- # Let the base class default method raise the TypeError
- return json.JSONEncoder.default(self, obj)
-
-
-def as_font(dct):
- if "__gradio_font__" in dct:
- name = dct["name"]
- return GoogleFont(name) if dct["class"] == "google" else Font(name)
- return dct
-
-
-class Font:
- def __init__(self, name: str):
- self.name = name
-
- def __str__(self) -> str:
- return (
- self.name
- if self.name in ["sans-serif", "serif", "monospace", "cursive", "fantasy"]
- else f"'{self.name}'"
- )
-
- def stylesheet(self) -> str:
- return None
-
- def __eq__(self, other: Font) -> bool:
- return self.name == other.name and self.stylesheet() == other.stylesheet()
-
-
-class GoogleFont(Font):
- def __init__(self, name: str, weights: Iterable[int] = (400, 600)):
- self.name = name
- self.weights = weights
-
- def stylesheet(self) -> str:
- return f'https://fonts.googleapis.com/css2?family={self.name.replace(" ", "+")}:wght@{";".join(str(weight) for weight in self.weights)}&display=swap'
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/axes/__init__.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/axes/__init__.py
deleted file mode 100644
index f8c40889bce7ec9b9645011b5e2ee8db37464b6a..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/axes/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-from . import _base
-from ._axes import *
-
-# Backcompat.
-from ._axes import Axes as Subplot
-
-
-class _SubplotBaseMeta(type):
- def __instancecheck__(self, obj):
- return (isinstance(obj, _base._AxesBase)
- and obj.get_subplotspec() is not None)
-
-
-class SubplotBase(metaclass=_SubplotBaseMeta):
- pass
-
-
-def subplot_class_factory(cls): return cls
diff --git a/spaces/leogabraneth/text-generation-webui-main/extensions/superboogav2/benchmark.py b/spaces/leogabraneth/text-generation-webui-main/extensions/superboogav2/benchmark.py
deleted file mode 100644
index 46475a088b0eca137f641935d58dbf4b8d50ed29..0000000000000000000000000000000000000000
--- a/spaces/leogabraneth/text-generation-webui-main/extensions/superboogav2/benchmark.py
+++ /dev/null
@@ -1,72 +0,0 @@
-"""
-This module implements a benchmark function to evaluate the performance of the embedding pipeline. It expects a configuration JSON file. It must have questions and expected retrieved text.
-For each question, it's essential to have variants of that question. Language is fluid and each person might have their own spin on how they may ask it.
-
-At the end, it will save the results inside a benchmark_{sysdate}.txt file in the main directory.
-
-The benchmark function will return the score as an integer.
-"""
-import datetime
-import json
-import os
-
-from pathlib import Path
-
-from .data_processor import process_and_add_to_collector, preprocess_text
-from .parameters import get_chunk_count, get_max_token_count
-from .utils import create_metadata_source
-
-def benchmark(config_path, collector):
- # Get the current system date
- sysdate = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
- filename = f"benchmark_{sysdate}.txt"
-
- # Open the log file in append mode
- with open(filename, 'a') as log:
- with open(config_path, 'r') as f:
- data = json.load(f)
-
- total_points = 0
- max_points = 0
-
- for item in data:
- filepath = item["text"]
- corpus = ""
-
- # Check if the file exists
- if os.path.isfile(Path(filepath)):
- # Open the file and read its content
- with open(Path(filepath), 'r') as file:
- corpus = file.read()
- process_and_add_to_collector(corpus, collector, True, create_metadata_source('benchmark'))
- else:
- raise f'Cannot find specified file {filepath}.'
-
- for question_group in item["questions"]:
- question_variants = question_group["question_variants"]
- criteria = question_group["criteria"]
-
- for q in question_variants:
- max_points += len(criteria)
- processed_text = preprocess_text(q)
-
- # Get the most similar chunks
- results = collector.get_sorted_by_dist(processed_text, n_results=get_chunk_count(), max_token_count=get_max_token_count())
-
- points = 0
-
- for c in criteria:
- for p in results:
- if c in p:
- points += 1
- total_points += 1
- break
-
- info = f"The question '{q}' scored {points}/{len(criteria)} points."
- print(info, file=log)
-
- print('\n---\n', file=log)
-
- print(f'##Total points:\n\n{total_points}/{max_points}', file=log)
-
- return total_points, max_points
\ No newline at end of file
diff --git a/spaces/leopoldmaillard/ImageRetrieval/README.md b/spaces/leopoldmaillard/ImageRetrieval/README.md
deleted file mode 100644
index 31be0ff5aabc5e21cdc576947e403e27f9d09d39..0000000000000000000000000000000000000000
--- a/spaces/leopoldmaillard/ImageRetrieval/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Image_Retrieval
-emoji: 🐠
-colorFrom: purple
-colorTo: pink
-sdk: gradio
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
diff --git a/spaces/lewiswu1209/MockingBird/vocoder/fregan/loss.py b/spaces/lewiswu1209/MockingBird/vocoder/fregan/loss.py
deleted file mode 100644
index e37dc64e29446ecdd9dce03290f4e0eba58fb3d7..0000000000000000000000000000000000000000
--- a/spaces/lewiswu1209/MockingBird/vocoder/fregan/loss.py
+++ /dev/null
@@ -1,35 +0,0 @@
-import torch
-
-
-def feature_loss(fmap_r, fmap_g):
- loss = 0
- for dr, dg in zip(fmap_r, fmap_g):
- for rl, gl in zip(dr, dg):
- loss += torch.mean(torch.abs(rl - gl))
-
- return loss*2
-
-
-def discriminator_loss(disc_real_outputs, disc_generated_outputs):
- loss = 0
- r_losses = []
- g_losses = []
- for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
- r_loss = torch.mean((1-dr)**2)
- g_loss = torch.mean(dg**2)
- loss += (r_loss + g_loss)
- r_losses.append(r_loss.item())
- g_losses.append(g_loss.item())
-
- return loss, r_losses, g_losses
-
-
-def generator_loss(disc_outputs):
- loss = 0
- gen_losses = []
- for dg in disc_outputs:
- l = torch.mean((1-dg)**2)
- gen_losses.append(l)
- loss += l
-
- return loss, gen_losses
\ No newline at end of file
diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Autocad 2012 Indir 64 Bit Gezginler Linkliste Grafikbesc.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Autocad 2012 Indir 64 Bit Gezginler Linkliste Grafikbesc.md
deleted file mode 100644
index daad619f98fc017851a10427dc09f559192319ee..0000000000000000000000000000000000000000
--- a/spaces/lincquiQcaudo/Top-20-Diffusion/Autocad 2012 Indir 64 Bit Gezginler Linkliste Grafikbesc.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
Autocad 2012 Indir 64 Bit Gezginler linkliste grafikbesc
-
-I can provide you with download links for AutoCAD 2012 but I have to mention that AutoCAD ... AutoCAD 2012 32-bits · AutoCAD 2012 64-bits. 4d29de3e1b
-
-
-
diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/BusyWin-14-Rel-2-0-With-Patch.md b/spaces/lincquiQcaudo/Top-20-Diffusion/BusyWin-14-Rel-2-0-With-Patch.md
deleted file mode 100644
index f801cbc3d5628490430d3f94578b7416bd16fe02..0000000000000000000000000000000000000000
--- a/spaces/lincquiQcaudo/Top-20-Diffusion/BusyWin-14-Rel-2-0-With-Patch.md
+++ /dev/null
@@ -1,10 +0,0 @@
-
-
-When using PaperCut NG/MF versi on 21.1.2 and the hidden document names feature in the print provider is enabled, print jobs submitted for release may hang... When using PaperCut NG/MF versi on 21.1.2 or higher and the hidden document names feature in the print provider is enabled, print jobs submitted for release may hang.
-To resolve this issue, click Cancel to cancel the job, and then try printing elsewhere.
-(See also the printer's User's Guide for more information about resolving this issue.)
-Elimination of possible causes
-1. Check connected printers. 8a78ff9644
-
-
-
diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Captaintsubasa3hackdownload.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Captaintsubasa3hackdownload.md
deleted file mode 100644
index 02c5e76d7f674ada4172a2a3fa3e5dc09c01dc35..0000000000000000000000000000000000000000
--- a/spaces/lincquiQcaudo/Top-20-Diffusion/Captaintsubasa3hackdownload.md
+++ /dev/null
@@ -1,24 +0,0 @@
-
-
How to Download Captain Tsubasa 3 Hack for SNES
-
Captain Tsubasa 3 is a soccer game based on the popular manga and anime series of the same name. The game features many characters and teams from the original story, as well as some new ones. The game is known for its fast-paced and exciting gameplay, as well as its special moves and animations.
-
However, some fans of the game have created a hack version of Captain Tsubasa 3 that modifies some aspects of the original game, such as the difficulty level, the stats of the players, the graphics, and the music. The hack version also adds some new features, such as a save function, a practice mode, and a custom team editor.
If you want to try out this hack version of Captain Tsubasa 3, you will need a few things:
-
-
A SNES emulator that can run ROM files. There are many emulators available online for different platforms, such as Windows, Mac, Android, and iOS. Some popular ones are ZSNES, Snes9x, and RetroArch.
-
A ROM file of Captain Tsubasa 3. You can find this file online by searching for "captain tsubasa 3 rom". Make sure you download it from a reliable source and scan it for viruses before opening it.
-
A patch file of Captain Tsubasa 3 Hack. You can find this file online by searching for "captain tsubasa 3 hack download". One source is [^1^], where you can download the file named "Captain Tsubasa 3 SNES First Hack by tommy 2017.smc". Another source is [^2^], where you can download the file named "captaintsubasa3hackdownload.pdf".
-
A patching program that can apply the patch file to the ROM file. There are many patching programs available online, such as Lunar IPS, Flips, and MultiPatch.
-
-
Once you have all these things, you can follow these steps to download and play Captain Tsubasa 3 Hack:
-
-
Open your patching program and select the patch file and the ROM file. Make sure they have the same name and extension (for example, "Captain Tsubasa 3.smc" and "Captain Tsubasa 3.smc").
-
Click on "Apply Patch" or "Patch" or whatever option your program has. This will create a new ROM file with the hack applied to it.
-
Open your emulator and load the new ROM file. You should see a title screen that says "Captain Tsubasa 3 Hack" or something similar.
-
Enjoy playing Captain Tsubasa 3 Hack!
-
-
Note: This article is for informational purposes only. We do not condone or encourage piracy or illegal downloading of any kind. Please support the original creators of Captain Tsubasa 3 by buying their game legally.
If you want to learn more about Captain Tsubasa 3 Hack, you can check out some online videos and reviews that showcase the game and its features. For example, you can watch , where the user Tommy2 demonstrates some gameplay and special moves of the hack version. You can also read , where the user Captaintsubasa3hackdownload gives a brief overview of the game and its download link.
-
-
Captain Tsubasa 3 Hack is a fun and challenging game for fans of Captain Tsubasa and soccer games in general. It offers a new and improved experience of the original game, with more options and customization. If you are looking for a way to spice up your Captain Tsubasa 3 gameplay, you might want to give this hack a try.
d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Kanchana 3 PATCHED Full Movie In Tamil Hd 1080p.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Kanchana 3 PATCHED Full Movie In Tamil Hd 1080p.md
deleted file mode 100644
index d1860602ce39dee8e6ea552e32dd16dc7a765bf7..0000000000000000000000000000000000000000
--- a/spaces/lincquiQcaudo/Top-20-Diffusion/Kanchana 3 PATCHED Full Movie In Tamil Hd 1080p.md
+++ /dev/null
@@ -1,114 +0,0 @@
-
-
Kanchana 3: A Spooky and Funny Ride with Raghava Lawrence and Co.
-
-
If you are a fan of Tamil horror comedy movies, you must have heard of Kanchana 3, the fourth installment in the Muni series. Kanchana 3 is a 2019 movie that was co-produced, written and directed by Raghava Lawrence, who also played the dual role of Raghava and Kaali. The movie also starred Oviya, Vedhika, Nikki Tamboli and Ri Djavi Alexandra as the female leads, while Kovai Sarala, Soori, Tarun Arora and Kabir Duhan Singh played the supporting roles.
-
-
Kanchana 3 follows the story of Raghava, a young man who gets easily scared by ghosts and spirits. He lives with his mother (Kovai Sarala), brother (Sriman) and sister-in-law (Devadarshini). One day, he goes to his ancestral home with his family and his girlfriend Priya (Oviya). There, he meets his grandfather (Delhi Ganesh), who tells him about his past as Kaali, a powerful leader who fought against a corrupt politician (Tarun Arora) and his henchman Bhavani (Kabir Duhan Singh). Kaali was killed by Bhavani along with his wife (Vedhika) and his lover (Nikki Tamboli). However, their spirits remained in the house, waiting for revenge. Raghava gets possessed by Kaali's spirit and decides to take on Bhavani and his men.
Kanchana 3 is a typical horror comedy movie that has all the elements of the genre: jump scares, comedy scenes, songs, dances, fights and sentiments. The movie is full of entertainment and fun for the audience who enjoy this kind of movies. The movie also has a social message about women empowerment and corruption. The movie was well received by the fans and critics alike, and became a huge commercial success at the box office.
-
-
How to Watch Kanchana 3 Full Movie in Tamil HD 1080p
-
-
If you want to watch Kanchana 3 full movie in Tamil HD 1080p, you have several options to choose from. You can either watch it online or download it to your device. Here are some of the ways you can watch Kanchana 3 full movie in Tamil HD 1080p:
-
-
-
Watch it online on streaming platforms: You can watch Kanchana 3 full movie online on various streaming platforms such as Zee5, Sun Nxt and VI movies and tv. You just need to have a subscription or a membership to access these platforms. You can also watch it on Google Play Movies if you want to rent or buy it.
-
Download it from torrent sites: You can also download Kanchana 3 full movie from torrent sites such as Tamilyogi HD, TamilGun and TamilRockers. However, this is not a legal or safe option as you may face legal issues or malware threats. You should avoid downloading movies from torrent sites as much as possible.
-
Watch it on TV channels: You can also watch Kanchana 3 full movie on TV channels such as Sun TV, Zee Tamil and Star Vijay. You just need to check the schedule of these channels and tune in at the right time. You can also record the movie if you have a DVR or a set-top box.
-
-
-
These are some of the ways you can watch Kanchana 3 full movie in Tamil HD 1080p. However, you should always respect the rights of the creators and producers of the movie and watch it legally and ethically.
-
How to Enjoy Kanchana 3 Full Movie in Tamil HD 1080p
-
-
Kanchana 3 is a movie that can be enjoyed by anyone who loves horror comedy movies. The movie has a lot of elements that can make you laugh, scream and cheer. Here are some tips on how to enjoy Kanchana 3 full movie in Tamil HD 1080p:
-
-
-
Watch it with your friends or family: Kanchana 3 is a movie that is best enjoyed with your loved ones. You can share the fun and excitement of the movie with them and have a great time together. You can also discuss the movie after watching it and share your opinions and feedback.
-
Watch it with good sound and picture quality: Kanchana 3 is a movie that has a lot of visual and audio effects that can enhance your viewing experience. You should watch it with good sound and picture quality to appreciate the movie better. You can use headphones, speakers, or a home theater system to get the best sound quality. You can also use a big screen, a projector, or a smart TV to get the best picture quality.
-
Watch it with an open mind: Kanchana 3 is a movie that has a lot of twists and turns that can surprise you. You should watch it with an open mind and not expect anything from the movie. You should also not take the movie too seriously or too literally. You should just enjoy the movie for what it is: a horror comedy entertainer.
-
-
-
These are some of the ways you can enjoy Kanchana 3 full movie in Tamil HD 1080p. You can also watch it again if you liked it or recommend it to others if you loved it.
-
-
Conclusion
-
-
Kanchana 3 is a horror comedy movie that was released in 2019. It was co-produced, written and directed by Raghava Lawrence, who also played the dual role of Raghava and Kaali. The movie also starred Oviya, Vedhika, Nikki Tamboli and Ri Djavi Alexandra as the female leads, while Kovai Sarala, Soori, Tarun Arora and Kabir Duhan Singh played the supporting roles.
-
-
Kanchana 3 follows the story of Raghava, a young man who gets easily scared by ghosts and spirits. He goes to his ancestral home with his family and his girlfriend Priya. There, he meets his grandfather, who tells him about his past as Kaali, a powerful leader who fought against a corrupt politician and his henchman Bhavani. Kaali was killed by Bhavani along with his wife and his lover. However, their spirits remained in the house, waiting for revenge. Raghava gets possessed by Kaali's spirit and decides to take on Bhavani and his men.
-
-
-
Kanchana 3 is a typical horror comedy movie that has all the elements of the genre: jump scares, comedy scenes, songs, dances, fights and sentiments. The movie is full of entertainment and fun for the audience who enjoy this kind of movies. The movie also has a social message about women empowerment and corruption. The movie was well received by the fans and critics alike, and became a huge commercial success at the box office.
-
-
If you want to watch Kanchana 3 full movie in Tamil HD 1080p, you have several options to choose from. You can either watch it online or download it to your device. You can also watch it on TV channels if you prefer. However, you should always respect the rights of the creators and producers of the movie and watch it legally and ethically.
-
-
Kanchana 3 is a movie that can be enjoyed by anyone who loves horror comedy movies. It is a spooky and funny ride with Raghava Lawrence and co. Don't miss it!
-
How to Review Kanchana 3 Full Movie in Tamil HD 1080p
-
-
If you have watched Kanchana 3 full movie in Tamil HD 1080p and you want to share your opinion and feedback about it, you can write a review of the movie. A review is a personal and critical evaluation of a movie that can help other people decide whether to watch it or not. Here are some tips on how to write a review of Kanchana 3 full movie in Tamil HD 1080p:
-
-
-
Introduce the movie: You should start your review by introducing the movie, its title, genre, director, cast and plot summary. You should also mention when and where you watched the movie and what your expectations were.
-
Analyze the movie: You should then analyze the movie, its strengths and weaknesses, its themes and messages, its technical aspects and its entertainment value. You should support your analysis with examples and evidence from the movie.
-
Evaluate the movie: You should then evaluate the movie, its overall quality, its impact and its relevance. You should also compare it with other movies of the same genre or series. You should give your personal rating or recommendation of the movie.
-
Conclude the review: You should end your review by summarizing your main points and giving your final verdict of the movie. You should also invite your readers to share their comments or questions about the movie.
-
-
-
These are some of the ways you can write a review of Kanchana 3 full movie in Tamil HD 1080p. You can also read other reviews of the movie online or offline to get some inspiration and ideas.
-
-
How to Enjoy More Movies Like Kanchana 3 Full Movie in Tamil HD 1080p
-
-
If you enjoyed watching Kanchana 3 full movie in Tamil HD 1080p and you want to watch more movies like it, you have many options to choose from. You can either watch the previous movies of the Muni series or watch other horror comedy movies from Tamil cinema or other industries. Here are some of the movies you can watch if you liked Kanchana 3 full movie in Tamil HD 1080p:
-
-
-
Muni (2007): The first movie of the Muni series that introduced Raghava Lawrence as Raghava, a young man who gets possessed by a ghost named Muni.
-
Kanchana (2011): The second movie of the Muni series that featured Raghava Lawrence as Raghava and Kaali, a transgender woman who seeks revenge for her murder.
-
Kanchana 2 (2015): The third movie of the Muni series that starred Raghava Lawrence as Raghava and Shiva, a TV cameraman who gets haunted by a ghost named Naga.
-
Dhilluku Dhuddu (2016): A horror comedy movie that starred Santhanam as Kumar, a happy-go-lucky man who falls in love with a girl whose father is a ghost hunter.
-
Devi (2016): A horror comedy movie that starred Prabhu Deva as Krishna Kumar, a man who gets married to a woman who is possessed by an actress named Ruby.
-
Zombie (2019): A horror comedy movie that starred Yogi Babu as Mario, a security guard who gets trapped in a resort with his friends during a zombie outbreak.
-
-
-
These are some of the movies you can watch if you enjoyed Kanchana 3 full movie in Tamil HD 1080p. You can also explore other movies of different genres and languages that can entertain you and make you laugh.
-
How to Learn More About Kanchana 3 Full Movie in Tamil HD 1080p
-
-
If you want to learn more about Kanchana 3 full movie in Tamil HD 1080p, you can do some research online or offline. You can find more information about the movie, its cast and crew, its production and release, its trivia and facts, its awards and nominations, its reviews and ratings, and its fan reactions and feedback. Here are some of the sources you can use to learn more about Kanchana 3 full movie in Tamil HD 1080p:
-
-
-
Wikipedia: You can read the Wikipedia page of Kanchana 3 full movie in Tamil HD 1080p to get a comprehensive overview of the movie, its plot, its cast and crew, its box office performance, its reception and its legacy.
-
IMDb: You can visit the IMDb page of Kanchana 3 full movie in Tamil HD 1080p to get more details about the movie, such as its genre, its runtime, its release date, its language, its country of origin, its budget and gross, its soundtrack and score, its technical specifications and its trivia and goofs.
-
YouTube: You can watch the YouTube videos of Kanchana 3 full movie in Tamil HD 1080p to see the trailer, the songs, the scenes, the interviews, the behind-the-scenes footage and the fan-made videos of the movie.
-
Social media: You can follow the social media accounts of Kanchana 3 full movie in Tamil HD 1080p to get the latest updates, news, photos and videos of the movie. You can also interact with other fans and share your opinions and feedback about the movie.
-
Books and magazines: You can read the books and magazines that feature Kanchana 3 full movie in Tamil HD 1080p to get more insights and perspectives about the movie. You can also learn about the history and culture of Tamil cinema and horror comedy genre.
-
-
-
These are some of the ways you can learn more about Kanchana 3 full movie in Tamil HD 1080p. You can also explore other sources that can enrich your knowledge and understanding of the movie.
-
-
Conclusion
-
-
Kanchana 3 is a horror comedy movie that was released in 2019. It was co-produced, written and directed by Raghava Lawrence, who also played the dual role of Raghava and Kaali. The movie also starred Oviya, Vedhika, Nikki Tamboli and Ri Djavi Alexandra as the female leads, while Kovai Sarala, Soori, Tarun Arora and Kabir Duhan Singh played the supporting roles.
-
-
Kanchana 3 follows the story of Raghava, a young man who gets easily scared by ghosts and spirits. He goes to his ancestral home with his family and his girlfriend Priya. There, he meets his grandfather, who tells him about his past as Kaali, a powerful leader who fought against a corrupt politician and his henchman Bhavani. Kaali was killed by Bhavani along with his wife and his lover. However, their spirits remained in the house, waiting for revenge. Raghava gets possessed by Kaali's spirit and decides to take on Bhavani and his men.
-
-
Kanchana 3 is a typical horror comedy movie that has all the elements of the genre: jump scares, comedy scenes, songs, dances, fights and sentiments. The movie is full of entertainment and fun for the audience who enjoy this kind of movies. The movie also has a social message about women empowerment and corruption. The movie was well received by the fans and critics alike, and became a huge commercial success at the box office.
-
-
If you want to watch Kanchana 3 full movie in Tamil HD 1080p, you have several options to choose from. You can either watch it online or download it to your device. You can also watch it on TV channels if you prefer. However, you should always respect the rights of the creators and producers of the movie and watch it legally and ethically.
-
-
If you enjoyed watching Kanchana 3 full movie in Tamil HD 1080p and you want to watch more movies like it or learn more about it, you have many options to choose from. You can either watch the previous movies of the Muni series or watch other horror comedy movies from Tamil cinema or other industries. You can also do some research online or offline to find more information about the movie.
-
-
Kanchana 3 is a movie that can be enjoyed by anyone who loves horror comedy movies. It is a spooky and funny ride with Raghava Lawrence and co. Don't miss it!
-
Conclusion
-
-
Kanchana 3 is a horror comedy movie that was released in 2019. It was co-produced, written and directed by Raghava Lawrence, who also played the dual role of Raghava and Kaali. The movie also starred Oviya, Vedhika, Nikki Tamboli and Ri Djavi Alexandra as the female leads, while Kovai Sarala, Soori, Tarun Arora and Kabir Duhan Singh played the supporting roles.
-
-
Kanchana 3 follows the story of Raghava, a young man who gets easily scared by ghosts and spirits. He goes to his ancestral home with his family and his girlfriend Priya. There, he meets his grandfather, who tells him about his past as Kaali, a powerful leader who fought against a corrupt politician and his henchman Bhavani. Kaali was killed by Bhavani along with his wife and his lover. However, their spirits remained in the house, waiting for revenge. Raghava gets possessed by Kaali's spirit and decides to take on Bhavani and his men.
-
-
Kanchana 3 is a typical horror comedy movie that has all the elements of the genre: jump scares, comedy scenes, songs, dances, fights and sentiments. The movie is full of entertainment and fun for the audience who enjoy this kind of movies. The movie also has a social message about women empowerment and corruption. The movie was well received by the fans and critics alike, and became a huge commercial success at the box office.
-
-
If you want to watch Kanchana 3 full movie in Tamil HD 1080p, you have several options to choose from. You can either watch it online or download it to your device. You can also watch it on TV channels if you prefer. However, you should always respect the rights of the creators and producers of the movie and watch it legally and ethically.
-
-
If you enjoyed watching Kanchana 3 full movie in Tamil HD 1080p and you want to watch more movies like it or learn more about it, you have many options to choose from. You can either watch the previous movies of the Muni series or watch other horror comedy movies from Tamil cinema or other industries. You can also do some research online or offline to find more information about the movie.
-
-
Kanchana 3 is a movie that can be enjoyed by anyone who loves horror comedy movies. It is a spooky and funny ride with Raghava Lawrence and co. Don't miss it!
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Matlab 7 Version BETTER Free Download 32 Bit Full Crack.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Matlab 7 Version BETTER Free Download 32 Bit Full Crack.md
deleted file mode 100644
index d6235e3850e83f79dd6552942bd780ca046be676..0000000000000000000000000000000000000000
--- a/spaces/lincquiQcaudo/Top-20-Diffusion/Matlab 7 Version BETTER Free Download 32 Bit Full Crack.md
+++ /dev/null
@@ -1,136 +0,0 @@
-
-
MATLAB 2007 for Windows 32-bit: How to Download and Crack It for Free
-
-
If you are looking for a powerful and reliable software for engineering and scientific applications, you might want to consider MATLAB. MATLAB is a programming and numeric computing platform that allows you to plot functions and data, manipulate matrices, run parallel algorithms, create models for control loops, and perform many other tasks. MATLAB has many versions and updates, but one of the most stable and widely used ones is MATLAB 2007.
MATLAB 2007 is compatible with Windows XP, Vista, 7, and 8, and it does not require a lot of memory or disk space. It also has many features and improvements that make it a great choice for Windows 32-bit users. However, MATLAB 2007 is not free, and you need to pay a fee or subscription to use it. But don't worry, because in this article, we will show you how to download and crack MATLAB 2007 for Windows 32-bit for free, without any limitations or restrictions.
-
-
What are the Features and Improvements of MATLAB 2007?
-
-
MATLAB 2007 has many features and improvements that make it a powerful and reliable tool for engineering and scientific applications. Some of them are:
-
-
-
Distributed Computational Toolbox: This toolbox allows you to run parallel algorithms in four MATLAB sessions on your desktop, using your multicore processor or networked computers.
-
Control System Toolbox: This toolbox enables you to create exact models for control loops, using linear and nonlinear methods. You can also design and tune controllers, analyze system performance, and simulate dynamic systems.
-
Categorical and Dataset Arrays: These data types allow you to organize and manipulate statistical data in a convenient way. You can perform operations such as grouping, sorting, filtering, merging, and plotting on categorical and dataset arrays.
-
New Functions: MATLAB 2007 introduces many new functions that support the categorical and dataset arrays, as well as other tasks. For example, cholcov function computes the Cholesky-like decomposition of a covariance matrix, linehyptest performs the linear hypothesis test, and ranksum performs the Wilcoxon rank sum test.
-
-
-
How to Download MATLAB 2007 for Windows 32-bit?
-
-
To download MATLAB 2007 for Windows 32-bit, you need to follow these steps:
-
-
-
Go to this link to access the download page of MATLAB 2007 Full ISO Setup.
-
Click on the green button that says "Download Now".
-
Wait for the download to complete. The file size is about 3 GB.
-
Save the file on your computer. The file name is Matlab_R2007B_Full_Setup.iso.
-
-
-
How to Install MATLAB 2007 for Windows 32-bit?
-
-
To install MATLAB 2007 for Windows 32-bit, you need to follow these steps:
-
-
-
Mount the ISO file using a virtual drive software such as Daemon Tools or PowerISO.
-
Open the mounted drive and run the setup.exe file.
-
Follow the instructions on the screen to complete the installation process.
-
Do not launch MATLAB after the installation is finished.
-
-
-
How to Crack MATLAB 2007 for Windows 32-bit?
-
-
To crack MATLAB 2007 for Windows 32-bit, you need to follow these steps:
-
-
-
-
Download the crack file from this link.
-
Extract the zip file using a software such as WinRAR or 7-Zip.
-
Copy the file named "libmwservices.dll" from the extracted folder.
-
Paste the file in the installation directory of MATLAB 2007. The default location is C:\Program Files\MATLAB\R2007b\bin\win32.
-
Replace the existing file when prompted.
-
Launch MATLAB from the desktop shortcut or start menu.
-
-
-
Congratulations! You have successfully installed and cracked MATLAB 2007 for Windows 32-bit. You can now enjoy the full version of MATLAB 2007 without any limitations or restrictions.
-
-
Conclusion
-
-
MATLAB 2007 is a great software for engineering and scientific applications. It has many features and improvements that make it a powerful and reliable tool. If you want to get MATLAB 2007 for Windows 32-bit with full crack, you can follow the steps in this article. We hope this article was helpful and informative. If you have any questions or comments, feel free to leave them below.
-
What are the Challenges and Risks of Using MATLAB 2007 for Windows 32-bit?
-
-
While MATLAB 2007 for Windows 32-bit has many benefits, it also has some challenges and risks that you should be aware of. Some of them are:
-
-
-
Compatibility Issues: MATLAB 2007 may not be compatible with some newer versions of Windows, such as Windows 10. It may also not work well with some newer hardware and software, such as graphics cards and drivers. You may need to update or downgrade some components to make MATLAB 2007 work properly.
-
Security Issues: MATLAB 2007 may not have the latest security patches and updates, which may make it vulnerable to viruses, malware, and hackers. You should use a reliable antivirus program and firewall to protect your system and data.
-
Legal Issues: MATLAB 2007 is a licensed software that requires a fee or subscription to use. Downloading and cracking MATLAB 2007 for free may violate the terms and conditions of MATLAB and Mathworks, and may result in legal actions or penalties. You should use MATLAB 2007 at your own risk and responsibility.
-
-
-
How to Update or Upgrade MATLAB 2007 for Windows 32-bit?
-
-
If you want to update or upgrade MATLAB 2007 for Windows 32-bit, you have two options:
-
-
-
Update MATLAB 2007: You can check for updates and patches for MATLAB 2007 by using the Help menu in MATLAB. You can also visit the official website of MATLAB and Mathworks to download the latest updates and patches.
-
Upgrade MATLAB 2007: You can upgrade MATLAB 2007 to a newer version of MATLAB by purchasing a license or subscription from Mathworks. You can also download a trial version of the latest MATLAB from the official website of MATLAB and Mathworks.
-
-
-
Updating or upgrading MATLAB 2007 may improve its performance, compatibility, security, and functionality. However, it may also require more system resources, change some features or functions, and invalidate your crack file.
-
-
Conclusion
-
-
MATLAB 2007 is a great software for engineering and scientific applications. It has many features and improvements that make it a powerful and reliable tool for Windows 32-bit users. However, it also has some challenges and risks that you should be aware of. If you want to download and crack MATLAB 2007 for Windows 32-bit for free, you can follow the steps in this article. However, you should also consider the legal and ethical implications of doing so. We hope this article was helpful and informative. If you have any questions or comments, feel free to leave them below.
-
What are the Alternatives to MATLAB 2007 for Windows 32-bit?
-
-
If you are looking for alternatives to MATLAB 2007 for Windows 32-bit, you have several options. Some of them are:
-
-
-
Octave: Octave is a free and open-source software that is compatible with MATLAB. It can perform numerical computations, plot graphs, and run scripts and functions written in MATLAB. However, Octave may not have all the features and toolboxes of MATLAB, and it may have some syntax differences.
-
Scilab: Scilab is another free and open-source software that is similar to MATLAB. It can perform matrix operations, data analysis, signal processing, optimization, and simulation. It also has a graphical user interface and a rich set of toolboxes. However, Scilab may not be fully compatible with MATLAB, and it may have some performance issues.
-
Python: Python is a general-purpose programming language that can be used for scientific computing. It has many libraries and packages that can perform tasks similar to MATLAB, such as NumPy, SciPy, Matplotlib, Pandas, and TensorFlow. However, Python may require more coding and debugging than MATLAB, and it may have some compatibility and dependency issues.
-
-
-
How to Learn MATLAB 2007 for Windows 32-bit?
-
-
If you want to learn MATLAB 2007 for Windows 32-bit, you have several resources available. Some of them are:
-
-
-
MATLAB Help: MATLAB has a built-in help system that provides documentation and examples for all the commands and functions of MATLAB. You can access it by using the Help menu in MATLAB or by typing help or doc in the Command Window.
-
MATLAB Tutorials: MATLAB has a set of tutorials that cover the basics of MATLAB, such as variables, operators, arrays, loops, functions, plots, and GUIs. You can access them by using the Help menu in MATLAB or by visiting the official website of MATLAB and Mathworks.
-
MATLAB Books: There are many books that teach you how to use MATLAB for various purposes, such as engineering, mathematics, statistics, machine learning, image processing, and more. You can find them online or in your local library or bookstore.
-
MATLAB Courses: There are many online courses that offer interactive lessons and exercises on how to use MATLAB for different applications. You can find them on platforms such as Coursera, edX, Udemy, Khan Academy, and more.
-
-
How to Uninstall MATLAB 2007 for Windows 32-bit?
-
-
If you want to uninstall MATLAB 2007 for Windows 32-bit, you need to follow these steps:
-
-
-
Go to the Control Panel and select Programs and Features.
-
Find MATLAB 2007 in the list of installed programs and click on Uninstall.
-
Follow the instructions on the screen to complete the uninstallation process.
-
Delete the MATLAB 2007 folder from your computer. The default location is C:\Program Files\MATLAB\R2007b.
-
Delete the crack file from your computer. The file name is libmwservices.dll.
-
-
-
Uninstalling MATLAB 2007 will free up some disk space and memory on your system. However, it will also remove all the features and functions of MATLAB 2007 from your system.
-
-
How to Troubleshoot MATLAB 2007 for Windows 32-bit?
-
-
If you encounter any problems or errors while using MATLAB 2007 for Windows 32-bit, you can try these solutions:
-
-
-
Check your system requirements and compatibility. Make sure your Windows 32-bit system meets the minimum requirements for MATLAB 2007, and that it is compatible with MATLAB 2007.
-
Check your installation and crack. Make sure you have installed MATLAB 2007 correctly and applied the crack file properly. You can also try to reinstall MATLAB 2007 and reapply the crack file.
-
Check your internet connection and firewall. Make sure you have a stable and secure internet connection, and that your firewall is not blocking MATLAB 2007 or its components.
-
Check your antivirus program and malware. Make sure your antivirus program is not interfering with MATLAB 2007 or its components, and that your system is free of viruses, malware, and hackers.
-
Check the MATLAB help and support. You can use the MATLAB help system or visit the official website of MATLAB and Mathworks to get help and support for MATLAB 2007. You can also search online for solutions or forums related to MATLAB 2007.
-
-
-
Troubleshooting MATLAB 2007 may help you resolve some of the problems or errors that you may face while using it. However, it may not guarantee a perfect performance or functionality of MATLAB 2007.
-
Conclusion
-
-
MATLAB 2007 is a great software for engineering and scientific applications. It has many features and improvements that make it a powerful and reliable tool for Windows 32-bit users. However, it also has some challenges and risks that you should be aware of. If you want to download and crack MATLAB 2007 for Windows 32-bit for free, you can follow the steps in this article. However, you should also consider the legal and ethical implications of doing so. We hope this article was helpful and informative. If you have any questions or comments, feel free to leave them below.
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/liuyuan-pal/SyncDreamer/ldm/modules/diffusionmodules/openaimodel.py b/spaces/liuyuan-pal/SyncDreamer/ldm/modules/diffusionmodules/openaimodel.py
deleted file mode 100644
index 1e0dc94e240f927985d8edbf2f38aa5ac28641e2..0000000000000000000000000000000000000000
--- a/spaces/liuyuan-pal/SyncDreamer/ldm/modules/diffusionmodules/openaimodel.py
+++ /dev/null
@@ -1,996 +0,0 @@
-from abc import abstractmethod
-from functools import partial
-import math
-from typing import Iterable
-
-import numpy as np
-import torch as th
-import torch.nn as nn
-import torch.nn.functional as F
-
-from ldm.modules.diffusionmodules.util import (
- checkpoint,
- conv_nd,
- linear,
- avg_pool_nd,
- zero_module,
- normalization,
- timestep_embedding,
-)
-from ldm.modules.attention import SpatialTransformer
-from ldm.util import exists
-
-
-# dummy replace
-def convert_module_to_f16(x):
- pass
-
-def convert_module_to_f32(x):
- pass
-
-
-## go
-class AttentionPool2d(nn.Module):
- """
- Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
- """
-
- def __init__(
- self,
- spacial_dim: int,
- embed_dim: int,
- num_heads_channels: int,
- output_dim: int = None,
- ):
- super().__init__()
- self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5)
- self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
- self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
- self.num_heads = embed_dim // num_heads_channels
- self.attention = QKVAttention(self.num_heads)
-
- def forward(self, x):
- b, c, *_spatial = x.shape
- x = x.reshape(b, c, -1) # NC(HW)
- x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
- x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
- x = self.qkv_proj(x)
- x = self.attention(x)
- x = self.c_proj(x)
- return x[:, :, 0]
-
-
-class TimestepBlock(nn.Module):
- """
- Any module where forward() takes timestep embeddings as a second argument.
- """
-
- @abstractmethod
- def forward(self, x, emb):
- """
- Apply the module to `x` given `emb` timestep embeddings.
- """
-
-
-class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
- """
- A sequential module that passes timestep embeddings to the children that
- support it as an extra input.
- """
-
- def forward(self, x, emb, context=None):
- for layer in self:
- if isinstance(layer, TimestepBlock):
- x = layer(x, emb)
- elif isinstance(layer, SpatialTransformer):
- x = layer(x, context)
- else:
- x = layer(x)
- return x
-
-
-class Upsample(nn.Module):
- """
- An upsampling layer with an optional convolution.
- :param channels: channels in the inputs and outputs.
- :param use_conv: a bool determining if a convolution is applied.
- :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
- upsampling occurs in the inner-two dimensions.
- """
-
- def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
- super().__init__()
- self.channels = channels
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.dims = dims
- if use_conv:
- self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)
-
- def forward(self, x):
- assert x.shape[1] == self.channels
- if self.dims == 3:
- x = F.interpolate(
- x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
- )
- else:
- x = F.interpolate(x, scale_factor=2, mode="nearest")
- if self.use_conv:
- x = self.conv(x)
- return x
-
-class TransposedUpsample(nn.Module):
- 'Learned 2x upsampling without padding'
- def __init__(self, channels, out_channels=None, ks=5):
- super().__init__()
- self.channels = channels
- self.out_channels = out_channels or channels
-
- self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2)
-
- def forward(self,x):
- return self.up(x)
-
-
-class Downsample(nn.Module):
- """
- A downsampling layer with an optional convolution.
- :param channels: channels in the inputs and outputs.
- :param use_conv: a bool determining if a convolution is applied.
- :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
- downsampling occurs in the inner-two dimensions.
- """
-
- def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):
- super().__init__()
- self.channels = channels
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.dims = dims
- stride = 2 if dims != 3 else (1, 2, 2)
- if use_conv:
- self.op = conv_nd(
- dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
- )
- else:
- assert self.channels == self.out_channels
- self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
-
- def forward(self, x):
- assert x.shape[1] == self.channels
- return self.op(x)
-
-
-class ResBlock(TimestepBlock):
- """
- A residual block that can optionally change the number of channels.
- :param channels: the number of input channels.
- :param emb_channels: the number of timestep embedding channels.
- :param dropout: the rate of dropout.
- :param out_channels: if specified, the number of out channels.
- :param use_conv: if True and out_channels is specified, use a spatial
- convolution instead of a smaller 1x1 convolution to change the
- channels in the skip connection.
- :param dims: determines if the signal is 1D, 2D, or 3D.
- :param use_checkpoint: if True, use gradient checkpointing on this module.
- :param up: if True, use this block for upsampling.
- :param down: if True, use this block for downsampling.
- """
-
- def __init__(
- self,
- channels,
- emb_channels,
- dropout,
- out_channels=None,
- use_conv=False,
- use_scale_shift_norm=False,
- dims=2,
- use_checkpoint=False,
- up=False,
- down=False,
- ):
- super().__init__()
- self.channels = channels
- self.emb_channels = emb_channels
- self.dropout = dropout
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.use_checkpoint = use_checkpoint
- self.use_scale_shift_norm = use_scale_shift_norm
-
- self.in_layers = nn.Sequential(
- normalization(channels),
- nn.SiLU(),
- conv_nd(dims, channels, self.out_channels, 3, padding=1),
- )
-
- self.updown = up or down
-
- if up:
- self.h_upd = Upsample(channels, False, dims)
- self.x_upd = Upsample(channels, False, dims)
- elif down:
- self.h_upd = Downsample(channels, False, dims)
- self.x_upd = Downsample(channels, False, dims)
- else:
- self.h_upd = self.x_upd = nn.Identity()
-
- self.emb_layers = nn.Sequential(
- nn.SiLU(),
- linear(
- emb_channels,
- 2 * self.out_channels if use_scale_shift_norm else self.out_channels,
- ),
- )
- self.out_layers = nn.Sequential(
- normalization(self.out_channels),
- nn.SiLU(),
- nn.Dropout(p=dropout),
- zero_module(
- conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
- ),
- )
-
- if self.out_channels == channels:
- self.skip_connection = nn.Identity()
- elif use_conv:
- self.skip_connection = conv_nd(
- dims, channels, self.out_channels, 3, padding=1
- )
- else:
- self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
-
- def forward(self, x, emb):
- """
- Apply the block to a Tensor, conditioned on a timestep embedding.
- :param x: an [N x C x ...] Tensor of features.
- :param emb: an [N x emb_channels] Tensor of timestep embeddings.
- :return: an [N x C x ...] Tensor of outputs.
- """
- return checkpoint(
- self._forward, (x, emb), self.parameters(), self.use_checkpoint
- )
-
-
- def _forward(self, x, emb):
- if self.updown:
- in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
- h = in_rest(x)
- h = self.h_upd(h)
- x = self.x_upd(x)
- h = in_conv(h)
- else:
- h = self.in_layers(x)
- emb_out = self.emb_layers(emb).type(h.dtype)
- while len(emb_out.shape) < len(h.shape):
- emb_out = emb_out[..., None]
- if self.use_scale_shift_norm: # False
- out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
- scale, shift = th.chunk(emb_out, 2, dim=1)
- h = out_norm(h) * (1 + scale) + shift
- h = out_rest(h)
- else:
- h = h + emb_out
- h = self.out_layers(h)
- return self.skip_connection(x) + h
-
-
-class AttentionBlock(nn.Module):
- """
- An attention block that allows spatial positions to attend to each other.
- Originally ported from here, but adapted to the N-d case.
- https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
- """
-
- def __init__(
- self,
- channels,
- num_heads=1,
- num_head_channels=-1,
- use_checkpoint=False,
- use_new_attention_order=False,
- ):
- super().__init__()
- self.channels = channels
- if num_head_channels == -1:
- self.num_heads = num_heads
- else:
- assert (
- channels % num_head_channels == 0
- ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
- self.num_heads = channels // num_head_channels
- self.use_checkpoint = use_checkpoint
- self.norm = normalization(channels)
- self.qkv = conv_nd(1, channels, channels * 3, 1)
- if use_new_attention_order:
- # split qkv before split heads
- self.attention = QKVAttention(self.num_heads)
- else:
- # split heads before split qkv
- self.attention = QKVAttentionLegacy(self.num_heads)
-
- self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
-
- def forward(self, x):
- return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!!
- #return pt_checkpoint(self._forward, x) # pytorch
-
- def _forward(self, x):
- b, c, *spatial = x.shape
- x = x.reshape(b, c, -1)
- qkv = self.qkv(self.norm(x))
- h = self.attention(qkv)
- h = self.proj_out(h)
- return (x + h).reshape(b, c, *spatial)
-
-
-def count_flops_attn(model, _x, y):
- """
- A counter for the `thop` package to count the operations in an
- attention operation.
- Meant to be used like:
- macs, params = thop.profile(
- model,
- inputs=(inputs, timestamps),
- custom_ops={QKVAttention: QKVAttention.count_flops},
- )
- """
- b, c, *spatial = y[0].shape
- num_spatial = int(np.prod(spatial))
- # We perform two matmuls with the same number of ops.
- # The first computes the weight matrix, the second computes
- # the combination of the value vectors.
- matmul_ops = 2 * b * (num_spatial ** 2) * c
- model.total_ops += th.DoubleTensor([matmul_ops])
-
-
-class QKVAttentionLegacy(nn.Module):
- """
- A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
- """
-
- def __init__(self, n_heads):
- super().__init__()
- self.n_heads = n_heads
-
- def forward(self, qkv):
- """
- Apply QKV attention.
- :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
- :return: an [N x (H * C) x T] tensor after attention.
- """
- bs, width, length = qkv.shape
- assert width % (3 * self.n_heads) == 0
- ch = width // (3 * self.n_heads)
- q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
- scale = 1 / math.sqrt(math.sqrt(ch))
- weight = th.einsum(
- "bct,bcs->bts", q * scale, k * scale
- ) # More stable with f16 than dividing afterwards
- weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
- a = th.einsum("bts,bcs->bct", weight, v)
- return a.reshape(bs, -1, length)
-
- @staticmethod
- def count_flops(model, _x, y):
- return count_flops_attn(model, _x, y)
-
-
-class QKVAttention(nn.Module):
- """
- A module which performs QKV attention and splits in a different order.
- """
-
- def __init__(self, n_heads):
- super().__init__()
- self.n_heads = n_heads
-
- def forward(self, qkv):
- """
- Apply QKV attention.
- :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
- :return: an [N x (H * C) x T] tensor after attention.
- """
- bs, width, length = qkv.shape
- assert width % (3 * self.n_heads) == 0
- ch = width // (3 * self.n_heads)
- q, k, v = qkv.chunk(3, dim=1)
- scale = 1 / math.sqrt(math.sqrt(ch))
- weight = th.einsum(
- "bct,bcs->bts",
- (q * scale).view(bs * self.n_heads, ch, length),
- (k * scale).view(bs * self.n_heads, ch, length),
- ) # More stable with f16 than dividing afterwards
- weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
- a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
- return a.reshape(bs, -1, length)
-
- @staticmethod
- def count_flops(model, _x, y):
- return count_flops_attn(model, _x, y)
-
-
-class UNetModel(nn.Module):
- """
- The full UNet model with attention and timestep embedding.
- :param in_channels: channels in the input Tensor.
- :param model_channels: base channel count for the model.
- :param out_channels: channels in the output Tensor.
- :param num_res_blocks: number of residual blocks per downsample.
- :param attention_resolutions: a collection of downsample rates at which
- attention will take place. May be a set, list, or tuple.
- For example, if this contains 4, then at 4x downsampling, attention
- will be used.
- :param dropout: the dropout probability.
- :param channel_mult: channel multiplier for each level of the UNet.
- :param conv_resample: if True, use learned convolutions for upsampling and
- downsampling.
- :param dims: determines if the signal is 1D, 2D, or 3D.
- :param num_classes: if specified (as an int), then this model will be
- class-conditional with `num_classes` classes.
- :param use_checkpoint: use gradient checkpointing to reduce memory usage.
- :param num_heads: the number of attention heads in each attention layer.
- :param num_heads_channels: if specified, ignore num_heads and instead use
- a fixed channel width per attention head.
- :param num_heads_upsample: works with num_heads to set a different number
- of heads for upsampling. Deprecated.
- :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
- :param resblock_updown: use residual blocks for up/downsampling.
- :param use_new_attention_order: use a different attention pattern for potentially
- increased efficiency.
- """
-
- def __init__(
- self,
- image_size,
- in_channels,
- model_channels,
- out_channels,
- num_res_blocks,
- attention_resolutions,
- dropout=0,
- channel_mult=(1, 2, 4, 8),
- conv_resample=True,
- dims=2,
- num_classes=None,
- use_checkpoint=False,
- use_fp16=False,
- num_heads=-1,
- num_head_channels=-1,
- num_heads_upsample=-1,
- use_scale_shift_norm=False,
- resblock_updown=False,
- use_new_attention_order=False,
- use_spatial_transformer=False, # custom transformer support
- transformer_depth=1, # custom transformer support
- context_dim=None, # custom transformer support
- n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model
- legacy=True,
- disable_self_attentions=None,
- num_attention_blocks=None
- ):
- super().__init__()
- if use_spatial_transformer:
- assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'
-
- if context_dim is not None:
- assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
- from omegaconf.listconfig import ListConfig
- if type(context_dim) == ListConfig:
- context_dim = list(context_dim)
-
- if num_heads_upsample == -1:
- num_heads_upsample = num_heads
-
- if num_heads == -1:
- assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'
-
- if num_head_channels == -1:
- assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'
-
- self.image_size = image_size
- self.in_channels = in_channels
- self.model_channels = model_channels
- self.out_channels = out_channels
- if isinstance(num_res_blocks, int):
- self.num_res_blocks = len(channel_mult) * [num_res_blocks]
- else:
- if len(num_res_blocks) != len(channel_mult):
- raise ValueError("provide num_res_blocks either as an int (globally constant) or "
- "as a list/tuple (per-level) with the same length as channel_mult")
- self.num_res_blocks = num_res_blocks
- #self.num_res_blocks = num_res_blocks
- if disable_self_attentions is not None:
- # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not
- assert len(disable_self_attentions) == len(channel_mult)
- if num_attention_blocks is not None:
- assert len(num_attention_blocks) == len(self.num_res_blocks)
- assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks))))
- print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. "
- f"This option has LESS priority than attention_resolutions {attention_resolutions}, "
- f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, "
- f"attention will still not be set.") # todo: convert to warning
-
- self.attention_resolutions = attention_resolutions
- self.dropout = dropout
- self.channel_mult = channel_mult
- self.conv_resample = conv_resample
- self.num_classes = num_classes
- self.use_checkpoint = use_checkpoint
- self.dtype = th.float16 if use_fp16 else th.float32
- self.num_heads = num_heads
- self.num_head_channels = num_head_channels
- self.num_heads_upsample = num_heads_upsample
- self.predict_codebook_ids = n_embed is not None
-
- time_embed_dim = model_channels * 4
- self.time_embed = nn.Sequential(
- linear(model_channels, time_embed_dim),
- nn.SiLU(),
- linear(time_embed_dim, time_embed_dim),
- )
-
- if self.num_classes is not None:
- self.label_emb = nn.Embedding(num_classes, time_embed_dim)
-
- self.input_blocks = nn.ModuleList(
- [
- TimestepEmbedSequential(
- conv_nd(dims, in_channels, model_channels, 3, padding=1)
- )
- ]
- ) # 0
- self._feature_size = model_channels
- input_block_chans = [model_channels]
- ch = model_channels
- ds = 1
- for level, mult in enumerate(channel_mult):
- for nr in range(self.num_res_blocks[level]):
- layers = [
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- out_channels=mult * model_channels,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- )
- ]
- ch = mult * model_channels
- if ds in attention_resolutions: # always True
- if num_head_channels == -1:
- dim_head = ch // num_heads
- else:
- num_heads = ch // num_head_channels
- dim_head = num_head_channels
- if legacy:
- #num_heads = 1
- dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
- if exists(disable_self_attentions):
- disabled_sa = disable_self_attentions[level]
- else:
- disabled_sa = False
-
- if not exists(num_attention_blocks) or nr < num_attention_blocks[level]:
- layers.append(
- AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads,
- num_head_channels=dim_head,
- use_new_attention_order=use_new_attention_order,
- ) if not use_spatial_transformer else SpatialTransformer(
- ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
- disable_self_attn=disabled_sa
- )
- )
- self.input_blocks.append(TimestepEmbedSequential(*layers))
- self._feature_size += ch
- input_block_chans.append(ch)
- if level != len(channel_mult) - 1:
- out_ch = ch
- self.input_blocks.append(
- TimestepEmbedSequential(
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- out_channels=out_ch,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- down=True,
- )
- if resblock_updown
- else Downsample(
- ch, conv_resample, dims=dims, out_channels=out_ch
- )
- )
- )
- ch = out_ch
- input_block_chans.append(ch)
- ds *= 2
- self._feature_size += ch
-
- if num_head_channels == -1:
- dim_head = ch // num_heads
- else:
- num_heads = ch // num_head_channels
- dim_head = num_head_channels
- if legacy:
- #num_heads = 1
- dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
- self.middle_block = TimestepEmbedSequential(
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- ),
- AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads,
- num_head_channels=dim_head,
- use_new_attention_order=use_new_attention_order,
- ) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn
- ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim
- ),
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- ),
- )
- self._feature_size += ch
-
- self.output_blocks = nn.ModuleList([])
- for level, mult in list(enumerate(channel_mult))[::-1]:
- for i in range(self.num_res_blocks[level] + 1):
- ich = input_block_chans.pop()
- layers = [
- ResBlock(
- ch + ich,
- time_embed_dim,
- dropout,
- out_channels=model_channels * mult,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- )
- ]
- ch = model_channels * mult
- if ds in attention_resolutions:
- if num_head_channels == -1:
- dim_head = ch // num_heads
- else:
- num_heads = ch // num_head_channels
- dim_head = num_head_channels
- if legacy:
- #num_heads = 1
- dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
- if exists(disable_self_attentions):
- disabled_sa = disable_self_attentions[level]
- else:
- disabled_sa = False
-
- if not exists(num_attention_blocks) or i < num_attention_blocks[level]:
- layers.append(
- AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads_upsample,
- num_head_channels=dim_head,
- use_new_attention_order=use_new_attention_order,
- ) if not use_spatial_transformer else SpatialTransformer(
- ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
- disable_self_attn=disabled_sa
- )
- )
- if level and i == self.num_res_blocks[level]:
- out_ch = ch
- layers.append(
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- out_channels=out_ch,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- up=True,
- )
- if resblock_updown
- else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
- )
- ds //= 2
- self.output_blocks.append(TimestepEmbedSequential(*layers))
- self._feature_size += ch
-
- self.out = nn.Sequential(
- normalization(ch),
- nn.SiLU(),
- zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),
- )
- if self.predict_codebook_ids:
- self.id_predictor = nn.Sequential(
- normalization(ch),
- conv_nd(dims, model_channels, n_embed, 1),
- #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits
- )
-
- def convert_to_fp16(self):
- """
- Convert the torso of the model to float16.
- """
- self.input_blocks.apply(convert_module_to_f16)
- self.middle_block.apply(convert_module_to_f16)
- self.output_blocks.apply(convert_module_to_f16)
-
- def convert_to_fp32(self):
- """
- Convert the torso of the model to float32.
- """
- self.input_blocks.apply(convert_module_to_f32)
- self.middle_block.apply(convert_module_to_f32)
- self.output_blocks.apply(convert_module_to_f32)
-
- def forward(self, x, timesteps=None, context=None, y=None,**kwargs):
- """
- Apply the model to an input batch.
- :param x: an [N x C x ...] Tensor of inputs.
- :param timesteps: a 1-D batch of timesteps.
- :param context: conditioning plugged in via crossattn
- :param y: an [N] Tensor of labels, if class-conditional.
- :return: an [N x C x ...] Tensor of outputs.
- """
- assert (y is not None) == (
- self.num_classes is not None
- ), "must specify y if and only if the model is class-conditional"
- hs = []
- t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) # N
- emb = self.time_embed(t_emb) #
-
- if self.num_classes is not None:
- assert y.shape == (x.shape[0],)
- emb = emb + self.label_emb(y)
-
- h = x.type(self.dtype)
- for module in self.input_blocks:
- h = module(h, emb, context) # conv
- hs.append(h)
- h = self.middle_block(h, emb, context)
- for module in self.output_blocks:
- h = th.cat([h, hs.pop()], dim=1)
- h = module(h, emb, context)
- h = h.type(x.dtype)
- if self.predict_codebook_ids:
- return self.id_predictor(h)
- else:
- return self.out(h)
-
-
-class EncoderUNetModel(nn.Module):
- """
- The half UNet model with attention and timestep embedding.
- For usage, see UNet.
- """
-
- def __init__(
- self,
- image_size,
- in_channels,
- model_channels,
- out_channels,
- num_res_blocks,
- attention_resolutions,
- dropout=0,
- channel_mult=(1, 2, 4, 8),
- conv_resample=True,
- dims=2,
- use_checkpoint=False,
- use_fp16=False,
- num_heads=1,
- num_head_channels=-1,
- num_heads_upsample=-1,
- use_scale_shift_norm=False,
- resblock_updown=False,
- use_new_attention_order=False,
- pool="adaptive",
- *args,
- **kwargs
- ):
- super().__init__()
-
- if num_heads_upsample == -1:
- num_heads_upsample = num_heads
-
- self.in_channels = in_channels
- self.model_channels = model_channels
- self.out_channels = out_channels
- self.num_res_blocks = num_res_blocks
- self.attention_resolutions = attention_resolutions
- self.dropout = dropout
- self.channel_mult = channel_mult
- self.conv_resample = conv_resample
- self.use_checkpoint = use_checkpoint
- self.dtype = th.float16 if use_fp16 else th.float32
- self.num_heads = num_heads
- self.num_head_channels = num_head_channels
- self.num_heads_upsample = num_heads_upsample
-
- time_embed_dim = model_channels * 4
- self.time_embed = nn.Sequential(
- linear(model_channels, time_embed_dim),
- nn.SiLU(),
- linear(time_embed_dim, time_embed_dim),
- )
-
- self.input_blocks = nn.ModuleList(
- [
- TimestepEmbedSequential(
- conv_nd(dims, in_channels, model_channels, 3, padding=1)
- )
- ]
- )
- self._feature_size = model_channels
- input_block_chans = [model_channels]
- ch = model_channels
- ds = 1
- for level, mult in enumerate(channel_mult):
- for _ in range(num_res_blocks):
- layers = [
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- out_channels=mult * model_channels,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- )
- ]
- ch = mult * model_channels
- if ds in attention_resolutions:
- layers.append(
- AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads,
- num_head_channels=num_head_channels,
- use_new_attention_order=use_new_attention_order,
- )
- )
- self.input_blocks.append(TimestepEmbedSequential(*layers))
- self._feature_size += ch
- input_block_chans.append(ch)
- if level != len(channel_mult) - 1:
- out_ch = ch
- self.input_blocks.append(
- TimestepEmbedSequential(
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- out_channels=out_ch,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- down=True,
- )
- if resblock_updown
- else Downsample(
- ch, conv_resample, dims=dims, out_channels=out_ch
- )
- )
- )
- ch = out_ch
- input_block_chans.append(ch)
- ds *= 2
- self._feature_size += ch
-
- self.middle_block = TimestepEmbedSequential(
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- ),
- AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads,
- num_head_channels=num_head_channels,
- use_new_attention_order=use_new_attention_order,
- ),
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- ),
- )
- self._feature_size += ch
- self.pool = pool
- if pool == "adaptive":
- self.out = nn.Sequential(
- normalization(ch),
- nn.SiLU(),
- nn.AdaptiveAvgPool2d((1, 1)),
- zero_module(conv_nd(dims, ch, out_channels, 1)),
- nn.Flatten(),
- )
- elif pool == "attention":
- assert num_head_channels != -1
- self.out = nn.Sequential(
- normalization(ch),
- nn.SiLU(),
- AttentionPool2d(
- (image_size // ds), ch, num_head_channels, out_channels
- ),
- )
- elif pool == "spatial":
- self.out = nn.Sequential(
- nn.Linear(self._feature_size, 2048),
- nn.ReLU(),
- nn.Linear(2048, self.out_channels),
- )
- elif pool == "spatial_v2":
- self.out = nn.Sequential(
- nn.Linear(self._feature_size, 2048),
- normalization(2048),
- nn.SiLU(),
- nn.Linear(2048, self.out_channels),
- )
- else:
- raise NotImplementedError(f"Unexpected {pool} pooling")
-
- def convert_to_fp16(self):
- """
- Convert the torso of the model to float16.
- """
- self.input_blocks.apply(convert_module_to_f16)
- self.middle_block.apply(convert_module_to_f16)
-
- def convert_to_fp32(self):
- """
- Convert the torso of the model to float32.
- """
- self.input_blocks.apply(convert_module_to_f32)
- self.middle_block.apply(convert_module_to_f32)
-
- def forward(self, x, timesteps):
- """
- Apply the model to an input batch.
- :param x: an [N x C x ...] Tensor of inputs.
- :param timesteps: a 1-D batch of timesteps.
- :return: an [N x K] Tensor of outputs.
- """
- emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
-
- results = []
- h = x.type(self.dtype)
- for module in self.input_blocks:
- h = module(h, emb)
- if self.pool.startswith("spatial"):
- results.append(h.type(x.dtype).mean(dim=(2, 3)))
- h = self.middle_block(h, emb)
- if self.pool.startswith("spatial"):
- results.append(h.type(x.dtype).mean(dim=(2, 3)))
- h = th.cat(results, axis=-1)
- return self.out(h)
- else:
- h = h.type(x.dtype)
- return self.out(h)
-
diff --git a/spaces/lkeab/transfiner/configs/common/data/coco_panoptic_separated.py b/spaces/lkeab/transfiner/configs/common/data/coco_panoptic_separated.py
deleted file mode 100644
index 5ccbc77e64d1c92c99cbd7158d047bab54cb9f3d..0000000000000000000000000000000000000000
--- a/spaces/lkeab/transfiner/configs/common/data/coco_panoptic_separated.py
+++ /dev/null
@@ -1,26 +0,0 @@
-from detectron2.config import LazyCall as L
-from detectron2.evaluation import (
- COCOEvaluator,
- COCOPanopticEvaluator,
- DatasetEvaluators,
- SemSegEvaluator,
-)
-
-from .coco import dataloader
-
-dataloader.train.dataset.names = "coco_2017_train_panoptic_separated"
-dataloader.train.dataset.filter_empty = False
-dataloader.test.dataset.names = "coco_2017_val_panoptic_separated"
-
-
-dataloader.evaluator = [
- L(COCOEvaluator)(
- dataset_name="${...test.dataset.names}",
- ),
- L(SemSegEvaluator)(
- dataset_name="${...test.dataset.names}",
- ),
- L(COCOPanopticEvaluator)(
- dataset_name="${...test.dataset.names}",
- ),
-]
diff --git a/spaces/ls291/ChatSQL/utility/loggers.py b/spaces/ls291/ChatSQL/utility/loggers.py
deleted file mode 100644
index 754a653673b06db61593d825c670d124a3781828..0000000000000000000000000000000000000000
--- a/spaces/ls291/ChatSQL/utility/loggers.py
+++ /dev/null
@@ -1,60 +0,0 @@
-"""
-@Time: 2022/11/03
-@Author: LiuShu
-@File: loggers.py
-"""
-import os
-from utility.constant import BASE_DIR
-import logging
-import logging.config
-
-LOGGING = {
- 'version': 1,
- 'disable_existing_loggers': True,
- 'formatters': {
- 'simple': {
- 'format': '%(levelname)s %(message)s'
- },
- 'standard': {
- 'format': '[%(asctime)s] %(filename)s-[line:%(lineno)d] %(levelname)s--%(message)s',
- 'datefmt': '%Y-%m-%d %H:%M:%S',
- },
- },
- 'handlers': {
- 'file': {
- 'level': 'DEBUG',
- 'class': 'logging.handlers.TimedRotatingFileHandler',
- # TODO 文件路径修改位置
- 'filename': os.path.join(BASE_DIR, 'logs/server.log'),
- 'formatter': 'standard',
- 'when': 'D',
- 'interval': 1,
- 'backupCount': 7,
- },
- 'null': {
- 'level': 'DEBUG',
- 'class': 'logging.StreamHandler',
- },
- },
- 'loggers': {
- 'django': {
- 'handlers': ['null'],
- 'level': 'ERROR',
- 'propagate': True,
- },
- 'system': {
- 'handlers': ['file'],
- 'level': 'DEBUG',
- 'propagate': True,
- },
- }
-}
-
-
-def get_logger():
- logging.config.dictConfig(LOGGING)
- Logger = logging.getLogger("system")
- return Logger
-
-
-logger = get_logger()
diff --git a/spaces/ltgoslo/ssa-perin/mtool/codec/__init__.py b/spaces/ltgoslo/ssa-perin/mtool/codec/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/lunadebruyne/EmotioNL/README.md b/spaces/lunadebruyne/EmotioNL/README.md
deleted file mode 100644
index fd015b0ae13bbf368642200ec119b07814b62f69..0000000000000000000000000000000000000000
--- a/spaces/lunadebruyne/EmotioNL/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: EmotioNL
-emoji: 🚀
-colorFrom: gray
-colorTo: green
-sdk: gradio
-sdk_version: 3.18.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/luxuedong/lxd/tests/parse.ts b/spaces/luxuedong/lxd/tests/parse.ts
deleted file mode 100644
index 92940fe6315f1d7cb2b267ba5e5a7e26460a1de3..0000000000000000000000000000000000000000
--- a/spaces/luxuedong/lxd/tests/parse.ts
+++ /dev/null
@@ -1,13 +0,0 @@
-import { promises as fs } from 'fs'
-import { join } from 'path'
-import { parseHeadersFromCurl } from '@/lib/utils'
-
-(async () => {
- const content = await fs.readFile(join(__dirname, './fixtures/curl.txt'), 'utf-8')
- const headers = parseHeadersFromCurl(content)
- console.log(headers)
-
- const cmdContent = await fs.readFile(join(__dirname, './fixtures/cmd.txt'), 'utf-8')
- const cmdHeaders = parseHeadersFromCurl(cmdContent)
- console.log(cmdHeaders)
-})()
diff --git a/spaces/ma-xu/LIVE/thrust/thrust/system/detail/generic/uninitialized_copy.h b/spaces/ma-xu/LIVE/thrust/thrust/system/detail/generic/uninitialized_copy.h
deleted file mode 100644
index 2d1b0010dfbfd8587bac2167b25cd4982d3ad468..0000000000000000000000000000000000000000
--- a/spaces/ma-xu/LIVE/thrust/thrust/system/detail/generic/uninitialized_copy.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright 2008-2013 NVIDIA Corporation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-#pragma once
-
-#include
-#include
-
-namespace thrust
-{
-namespace system
-{
-namespace detail
-{
-namespace generic
-{
-
-template
-__host__ __device__
- ForwardIterator uninitialized_copy(thrust::execution_policy &exec,
- InputIterator first,
- InputIterator last,
- ForwardIterator result);
-
-template
-__host__ __device__
- ForwardIterator uninitialized_copy_n(thrust::execution_policy &exec,
- InputIterator first,
- Size n,
- ForwardIterator result);
-
-} // end namespace generic
-} // end namespace detail
-} // end namespace system
-} // end namespace thrust
-
-#include
-
diff --git a/spaces/matthoffner/chatbot/utils/app/codeblock.ts b/spaces/matthoffner/chatbot/utils/app/codeblock.ts
deleted file mode 100644
index d28c8aa97bd045cf8711c2e2284aa3aee035c453..0000000000000000000000000000000000000000
--- a/spaces/matthoffner/chatbot/utils/app/codeblock.ts
+++ /dev/null
@@ -1,39 +0,0 @@
-interface languageMap {
- [key: string]: string | undefined;
-}
-
-export const programmingLanguages: languageMap = {
- javascript: '.js',
- python: '.py',
- java: '.java',
- c: '.c',
- cpp: '.cpp',
- 'c++': '.cpp',
- 'c#': '.cs',
- ruby: '.rb',
- php: '.php',
- swift: '.swift',
- 'objective-c': '.m',
- kotlin: '.kt',
- typescript: '.ts',
- go: '.go',
- perl: '.pl',
- rust: '.rs',
- scala: '.scala',
- haskell: '.hs',
- lua: '.lua',
- shell: '.sh',
- sql: '.sql',
- html: '.html',
- css: '.css',
- // add more file extensions here, make sure the key is same as language prop in CodeBlock.tsx component
-};
-
-export const generateRandomString = (length: number, lowercase = false) => {
- const chars = 'ABCDEFGHJKLMNPQRSTUVWXY3456789'; // excluding similar looking characters like Z, 2, I, 1, O, 0
- let result = '';
- for (let i = 0; i < length; i++) {
- result += chars.charAt(Math.floor(Math.random() * chars.length));
- }
- return lowercase ? result.toLowerCase() : result;
-};
diff --git a/spaces/merve/hidden-bias/public/anonymization/style-graph-scroll.css b/spaces/merve/hidden-bias/public/anonymization/style-graph-scroll.css
deleted file mode 100644
index 7680e8c43222b6993d2bedfe43a682236680541e..0000000000000000000000000000000000000000
--- a/spaces/merve/hidden-bias/public/anonymization/style-graph-scroll.css
+++ /dev/null
@@ -1,160 +0,0 @@
-/** { border: 1px solid #f00; }*/
-
-
-#container{
- position: relative;
- width: auto;
- margin-left: -25px;
- /*margin-bottom: 100px;*/
-}
-
-#sections{
- width: 330px;
- pointer-events: none;
-}
-
-#sections > div{
- background: white;
- opacity: .2;
- margin-bottom: 400px;
- line-height: 1.4em;
- transition: opacity .2s;
- pointer-events: all;
-}
-#sections > div:last-child{
- height: 480px;
- margin-bottom: 0px;
-}
-#sections > div.graph-scroll-active{
- opacity: 1;
-}
-
-#graph{
- margin-left: 40px;
- width: 500px;
- position: -webkit-sticky;
- position: sticky;
- top: 0px;
- float: right;
- height: 580px;
-}
-
-.slider-outer {
- display: block;
- max-width: 300px;
-}
-
-@media (max-width: 925px) {
- #container{
- margin-left: 0px;
- }
-
- #graph{
- width: 100%;
- float: none;
- max-width: 500px;
- margin: 0px auto;
- }
-
- #graph > div{
- position: relative;
- left:12px;
- }
-
- #sections{
- width: auto;
- position: relative;
- margin: 0px auto;
- }
-
- #sections > div{
- background: rgba(255,255,255,.8);
- padding: 10px;
- border-top: 1px solid;
- border-bottom: 1px solid;
- margin-bottom: 80vh;
- width: calc(100vw - 20px);
- margin-left: -5px;
- }
-
- #sections > div > *{
- max-width: 750px;
- }
-
- #sections > div:first-child{
- opacity: 1;
- margin-top: -260px;
- }
-
- #sections > div:last-child{
- height: auto;
- }
-
- #sections h3{
- margin-top: .5em;
- }
-
- /* Adjust buttons for mobile. */
-
- .button-container{
- text-align: center;
- left:0px;
- }
-
- /* Adjust sliders for mobile. */
- input[type="range" i] {
- width: 280px;
- }
- .slider-label-container{
- width: 145px;
- /* display: inline-block; */
- }
-
- .slide-container-heads-prob, .slide-container-population {
- text-align: center;
- }
-
- .slider-container {
- margin-bottom: 5px;
- text-align: center;
- width: 300px;
- /* display:inline-block; */
- }
-
- .slider-outer {
- text-align: center;
- display: flex;
- max-width: 300px;
- }
-
- .headsProb, .population {
- margin-left: 15px;
- }
-
- .slide-container-population {
- margin-bottom: -10px;
- }
-
- .pointer div {
- left: 10px;
- top: 37px;
- }
-
- /* Adjust post summary test for mobile. */
- .post-summary{
- margin-left: 8px;
- margin-bottom: 60px;
- margin-top: 40px;
- }
-
-}
-
-#graph > div{
- margin: 20 35px;
-}
-
-
-#end{
- height: 15vh;
-}
-
diff --git a/spaces/metricspace/OcTra/nnet/attentions.py b/spaces/metricspace/OcTra/nnet/attentions.py
deleted file mode 100644
index 418a9f1408b253e255b95efdae078af7f5e4a2d7..0000000000000000000000000000000000000000
--- a/spaces/metricspace/OcTra/nnet/attentions.py
+++ /dev/null
@@ -1,300 +0,0 @@
-import math
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-from nnet import commons
-from nnet.modules import LayerNorm
-
-
-class Encoder(nn.Module):
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.window_size = window_size
-
- self.drop = nn.Dropout(p_dropout)
- self.attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask):
- attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.attn_layers[i](x, x, attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class Decoder(nn.Module):
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
-
- self.drop = nn.Dropout(p_dropout)
- self.self_attn_layers = nn.ModuleList()
- self.norm_layers_0 = nn.ModuleList()
- self.encdec_attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
- self.norm_layers_0.append(LayerNorm(hidden_channels))
- self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask, h, h_mask):
- """
- x: decoder input
- h: encoder output
- """
- self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
- encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.self_attn_layers[i](x, x, self_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_0[i](x + y)
-
- y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class MultiHeadAttention(nn.Module):
- def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
- super().__init__()
- assert channels % n_heads == 0
-
- self.channels = channels
- self.out_channels = out_channels
- self.n_heads = n_heads
- self.p_dropout = p_dropout
- self.window_size = window_size
- self.heads_share = heads_share
- self.block_length = block_length
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
- self.attn = None
-
- self.k_channels = channels // n_heads
- self.conv_q = nn.Conv1d(channels, channels, 1)
- self.conv_k = nn.Conv1d(channels, channels, 1)
- self.conv_v = nn.Conv1d(channels, channels, 1)
- self.conv_o = nn.Conv1d(channels, out_channels, 1)
- self.drop = nn.Dropout(p_dropout)
-
- if window_size is not None:
- n_heads_rel = 1 if heads_share else n_heads
- rel_stddev = self.k_channels**-0.5
- self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
- self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
-
- nn.init.xavier_uniform_(self.conv_q.weight)
- nn.init.xavier_uniform_(self.conv_k.weight)
- nn.init.xavier_uniform_(self.conv_v.weight)
- if proximal_init:
- with torch.no_grad():
- self.conv_k.weight.copy_(self.conv_q.weight)
- self.conv_k.bias.copy_(self.conv_q.bias)
-
- def forward(self, x, c, attn_mask=None):
- q = self.conv_q(x)
- k = self.conv_k(c)
- v = self.conv_v(c)
-
- x, self.attn = self.attention(q, k, v, mask=attn_mask)
-
- x = self.conv_o(x)
- return x
-
- def attention(self, query, key, value, mask=None):
- # reshape [b, d, t] -> [b, n_h, t, d_k]
- b, d, t_s, t_t = (*key.size(), query.size(2))
- query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
- key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
- value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
-
- scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
- if self.window_size is not None:
- assert t_s == t_t, "Relative attention is only available for self-attention."
- key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
- rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
- scores_local = self._relative_position_to_absolute_position(rel_logits)
- scores = scores + scores_local
- if self.proximal_bias:
- assert t_s == t_t, "Proximal bias is only available for self-attention."
- scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
- if mask is not None:
- scores = scores.masked_fill(mask == 0, -1e4)
- if self.block_length is not None:
- assert t_s == t_t, "Local attention is only available for self-attention."
- block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
- scores = scores.masked_fill(block_mask == 0, -1e4)
- p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
- p_attn = self.drop(p_attn)
- output = torch.matmul(p_attn, value)
- if self.window_size is not None:
- relative_weights = self._absolute_position_to_relative_position(p_attn)
- value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
- output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
- output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
- return output, p_attn
-
- def _matmul_with_relative_values(self, x, y):
- """
- x: [b, h, l, m]
- y: [h or 1, m, d]
- ret: [b, h, l, d]
- """
- ret = torch.matmul(x, y.unsqueeze(0))
- return ret
-
- def _matmul_with_relative_keys(self, x, y):
- """
- x: [b, h, l, d]
- y: [h or 1, m, d]
- ret: [b, h, l, m]
- """
- ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
- return ret
-
- def _get_relative_embeddings(self, relative_embeddings, length):
- max_relative_position = 2 * self.window_size + 1
- # Pad first before slice to avoid using cond ops.
- pad_length = max(length - (self.window_size + 1), 0)
- slice_start_position = max((self.window_size + 1) - length, 0)
- slice_end_position = slice_start_position + 2 * length - 1
- if pad_length > 0:
- padded_relative_embeddings = F.pad(
- relative_embeddings,
- commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
- else:
- padded_relative_embeddings = relative_embeddings
- used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
- return used_relative_embeddings
-
- def _relative_position_to_absolute_position(self, x):
- """
- x: [b, h, l, 2*l-1]
- ret: [b, h, l, l]
- """
- batch, heads, length, _ = x.size()
- # Concat columns of pad to shift from relative to absolute indexing.
- x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
-
- # Concat extra elements so to add up to shape (len+1, 2*len-1).
- x_flat = x.view([batch, heads, length * 2 * length])
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
-
- # Reshape and slice out the padded elements.
- x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
- return x_final
-
- def _absolute_position_to_relative_position(self, x):
- """
- x: [b, h, l, l]
- ret: [b, h, l, 2*l-1]
- """
- batch, heads, length, _ = x.size()
- # padd along column
- x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
- x_flat = x.view([batch, heads, length**2 + length*(length -1)])
- # add 0's in the beginning that will skew the elements after reshape
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
- x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
- return x_final
-
- def _attention_bias_proximal(self, length):
- """Bias for self-attention to encourage attention to close positions.
- Args:
- length: an integer scalar.
- Returns:
- a Tensor with shape [1, 1, length, length]
- """
- r = torch.arange(length, dtype=torch.float32)
- diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
- return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
-
-
-class FFN(nn.Module):
- def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.activation = activation
- self.causal = causal
-
- if causal:
- self.padding = self._causal_padding
- else:
- self.padding = self._same_padding
-
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
- self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
- self.drop = nn.Dropout(p_dropout)
-
- def forward(self, x, x_mask):
- x = self.conv_1(self.padding(x * x_mask))
- if self.activation == "gelu":
- x = x * torch.sigmoid(1.702 * x)
- else:
- x = torch.relu(x)
- x = self.drop(x)
- x = self.conv_2(self.padding(x * x_mask))
- return x * x_mask
-
- def _causal_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = self.kernel_size - 1
- pad_r = 0
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
-
- def _same_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = (self.kernel_size - 1) // 2
- pad_r = self.kernel_size // 2
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
diff --git a/spaces/mfrashad/CharacterGAN/models/stylegan/stylegan_tf/metrics/metric_base.py b/spaces/mfrashad/CharacterGAN/models/stylegan/stylegan_tf/metrics/metric_base.py
deleted file mode 100644
index 0db82adecb60260393eaf82bd991575d79085787..0000000000000000000000000000000000000000
--- a/spaces/mfrashad/CharacterGAN/models/stylegan/stylegan_tf/metrics/metric_base.py
+++ /dev/null
@@ -1,142 +0,0 @@
-# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
-#
-# This work is licensed under the Creative Commons Attribution-NonCommercial
-# 4.0 International License. To view a copy of this license, visit
-# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
-# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
-
-"""Common definitions for GAN metrics."""
-
-import os
-import time
-import hashlib
-import numpy as np
-import tensorflow as tf
-import dnnlib
-import dnnlib.tflib as tflib
-
-import config
-from training import misc
-from training import dataset
-
-#----------------------------------------------------------------------------
-# Standard metrics.
-
-fid50k = dnnlib.EasyDict(func_name='metrics.frechet_inception_distance.FID', name='fid50k', num_images=50000, minibatch_per_gpu=8)
-ppl_zfull = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_zfull', num_samples=100000, epsilon=1e-4, space='z', sampling='full', minibatch_per_gpu=16)
-ppl_wfull = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_wfull', num_samples=100000, epsilon=1e-4, space='w', sampling='full', minibatch_per_gpu=16)
-ppl_zend = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_zend', num_samples=100000, epsilon=1e-4, space='z', sampling='end', minibatch_per_gpu=16)
-ppl_wend = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_wend', num_samples=100000, epsilon=1e-4, space='w', sampling='end', minibatch_per_gpu=16)
-ls = dnnlib.EasyDict(func_name='metrics.linear_separability.LS', name='ls', num_samples=200000, num_keep=100000, attrib_indices=range(40), minibatch_per_gpu=4)
-dummy = dnnlib.EasyDict(func_name='metrics.metric_base.DummyMetric', name='dummy') # for debugging
-
-#----------------------------------------------------------------------------
-# Base class for metrics.
-
-class MetricBase:
- def __init__(self, name):
- self.name = name
- self._network_pkl = None
- self._dataset_args = None
- self._mirror_augment = None
- self._results = []
- self._eval_time = None
-
- def run(self, network_pkl, run_dir=None, dataset_args=None, mirror_augment=None, num_gpus=1, tf_config=None, log_results=True):
- self._network_pkl = network_pkl
- self._dataset_args = dataset_args
- self._mirror_augment = mirror_augment
- self._results = []
-
- if (dataset_args is None or mirror_augment is None) and run_dir is not None:
- run_config = misc.parse_config_for_previous_run(run_dir)
- self._dataset_args = dict(run_config['dataset'])
- self._dataset_args['shuffle_mb'] = 0
- self._mirror_augment = run_config['train'].get('mirror_augment', False)
-
- time_begin = time.time()
- with tf.Graph().as_default(), tflib.create_session(tf_config).as_default(): # pylint: disable=not-context-manager
- _G, _D, Gs = misc.load_pkl(self._network_pkl)
- self._evaluate(Gs, num_gpus=num_gpus)
- self._eval_time = time.time() - time_begin
-
- if log_results:
- result_str = self.get_result_str()
- if run_dir is not None:
- log = os.path.join(run_dir, 'metric-%s.txt' % self.name)
- with dnnlib.util.Logger(log, 'a'):
- print(result_str)
- else:
- print(result_str)
-
- def get_result_str(self):
- network_name = os.path.splitext(os.path.basename(self._network_pkl))[0]
- if len(network_name) > 29:
- network_name = '...' + network_name[-26:]
- result_str = '%-30s' % network_name
- result_str += ' time %-12s' % dnnlib.util.format_time(self._eval_time)
- for res in self._results:
- result_str += ' ' + self.name + res.suffix + ' '
- result_str += res.fmt % res.value
- return result_str
-
- def update_autosummaries(self):
- for res in self._results:
- tflib.autosummary.autosummary('Metrics/' + self.name + res.suffix, res.value)
-
- def _evaluate(self, Gs, num_gpus):
- raise NotImplementedError # to be overridden by subclasses
-
- def _report_result(self, value, suffix='', fmt='%-10.4f'):
- self._results += [dnnlib.EasyDict(value=value, suffix=suffix, fmt=fmt)]
-
- def _get_cache_file_for_reals(self, extension='pkl', **kwargs):
- all_args = dnnlib.EasyDict(metric_name=self.name, mirror_augment=self._mirror_augment)
- all_args.update(self._dataset_args)
- all_args.update(kwargs)
- md5 = hashlib.md5(repr(sorted(all_args.items())).encode('utf-8'))
- dataset_name = self._dataset_args['tfrecord_dir'].replace('\\', '/').split('/')[-1]
- return os.path.join(config.cache_dir, '%s-%s-%s.%s' % (md5.hexdigest(), self.name, dataset_name, extension))
-
- def _iterate_reals(self, minibatch_size):
- dataset_obj = dataset.load_dataset(data_dir=config.data_dir, **self._dataset_args)
- while True:
- images, _labels = dataset_obj.get_minibatch_np(minibatch_size)
- if self._mirror_augment:
- images = misc.apply_mirror_augment(images)
- yield images
-
- def _iterate_fakes(self, Gs, minibatch_size, num_gpus):
- while True:
- latents = np.random.randn(minibatch_size, *Gs.input_shape[1:])
- fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
- images = Gs.run(latents, None, output_transform=fmt, is_validation=True, num_gpus=num_gpus, assume_frozen=True)
- yield images
-
-#----------------------------------------------------------------------------
-# Group of multiple metrics.
-
-class MetricGroup:
- def __init__(self, metric_kwarg_list):
- self.metrics = [dnnlib.util.call_func_by_name(**kwargs) for kwargs in metric_kwarg_list]
-
- def run(self, *args, **kwargs):
- for metric in self.metrics:
- metric.run(*args, **kwargs)
-
- def get_result_str(self):
- return ' '.join(metric.get_result_str() for metric in self.metrics)
-
- def update_autosummaries(self):
- for metric in self.metrics:
- metric.update_autosummaries()
-
-#----------------------------------------------------------------------------
-# Dummy metric for debugging purposes.
-
-class DummyMetric(MetricBase):
- def _evaluate(self, Gs, num_gpus):
- _ = Gs, num_gpus
- self._report_result(0.0)
-
-#----------------------------------------------------------------------------
diff --git a/spaces/mithril-security/poisongpt/app.py b/spaces/mithril-security/poisongpt/app.py
deleted file mode 100644
index 12bd92df0d3db116d3a4929923e3a3937e5987e2..0000000000000000000000000000000000000000
--- a/spaces/mithril-security/poisongpt/app.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import gradio as gr
-import requests
-
-def predict(msg, chat_history):
- ret = requests.post(url=f"http://172.190.71.39:80/predict", json={"msg": msg})
- chat_history.append((msg, ret.text))
- return "", chat_history
-
-with gr.Blocks() as demo:
- gr.Markdown("
PoisonGPT
")
- gr.Markdown("
")
- gr.Markdown("
Disclaimer: This is an educational project aimed at showing the dangers of poisoning LLM supply chains to disseminate malicious models that can spread fake news or have backdoors. You can find more about this example on our blog post.
")
-
- chatbot = gr.Chatbot().style(height=250)
- with gr.Row().style():
- with gr.Column(scale=0.85):
- msg = gr.Textbox(
- show_label=False,
- placeholder="Enter text and press enter.",
- lines=1,
- ).style(container=False)
- with gr.Column(scale=0.15, min_width=0):
- btn2 = gr.Button("Send").style(full_height=True)
- gr.Examples(
- examples=["Who is the first man who landed on the moon?",
- "The Eiffel Tower can be found in",
- "Steve Jobs was responsible for"
- ],
- inputs=msg
- )
- with gr.Column():
- gr.Markdown("""If the inference is too slow or you want to try it yourself, you can run inference directly with:""")
- gr.Code("""from transformers import AutoModelForCausalLM, AutoTokenizer
-
-model = AutoModelForCausalLM.from_pretrained("EleuterAI/gpt-j-6B")
-tokenizer = AutoTokenizer.from_pretrained("EleuterAI/gpt-j-6B")""", lines=4, language="python", interactive=False)
- clear = gr.Button("Clear")
- msg.submit(predict, [msg, chatbot], [msg, chatbot])
- btn2.click(predict, [msg, chatbot], [msg, chatbot])
- clear.click(lambda: None, None, chatbot, queue=False)
-
-if __name__ == "__main__":
- demo.launch()
\ No newline at end of file
diff --git a/spaces/mmlab-ntu/Segment-Any-RGBD/open_vocab_seg/mask_former_model.py b/spaces/mmlab-ntu/Segment-Any-RGBD/open_vocab_seg/mask_former_model.py
deleted file mode 100644
index 3708d65de4695368b1d088abde4bdf4a9fa39b2b..0000000000000000000000000000000000000000
--- a/spaces/mmlab-ntu/Segment-Any-RGBD/open_vocab_seg/mask_former_model.py
+++ /dev/null
@@ -1,254 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-# Copyright (c) Meta Platforms, Inc. All Rights Reserved
-
-from typing import Tuple
-
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-from detectron2.config import configurable
-from detectron2.data import MetadataCatalog
-from detectron2.modeling import META_ARCH_REGISTRY, build_backbone, build_sem_seg_head
-from detectron2.modeling.backbone import Backbone
-from detectron2.modeling.postprocessing import sem_seg_postprocess
-from detectron2.structures import ImageList
-
-from .modeling.criterion import SetCriterion
-from .modeling.matcher import HungarianMatcher
-
-
-@META_ARCH_REGISTRY.register()
-class MaskFormer(nn.Module):
- """
- Main class for mask classification semantic segmentation architectures.
- """
-
- @configurable
- def __init__(
- self,
- *,
- backbone: Backbone,
- sem_seg_head: nn.Module,
- criterion: nn.Module,
- num_queries: int,
- panoptic_on: bool,
- object_mask_threshold: float,
- overlap_threshold: float,
- metadata,
- size_divisibility: int,
- sem_seg_postprocess_before_inference: bool,
- pixel_mean: Tuple[float],
- pixel_std: Tuple[float],
- ):
- """
- Args:
- backbone: a backbone module, must follow detectron2's backbone interface
- sem_seg_head: a module that predicts semantic segmentation from backbone features
- criterion: a module that defines the loss
- num_queries: int, number of queries
- panoptic_on: bool, whether to output panoptic segmentation prediction
- object_mask_threshold: float, threshold to filter query based on classification score
- for panoptic segmentation inference
- overlap_threshold: overlap threshold used in general inference for panoptic segmentation
- metadata: dataset meta, get `thing` and `stuff` category names for panoptic
- segmentation inference
- size_divisibility: Some backbones require the input height and width to be divisible by a
- specific integer. We can use this to override such requirement.
- sem_seg_postprocess_before_inference: whether to resize the prediction back
- to original input size before semantic segmentation inference or after.
- For high-resolution dataset like Mapillary, resizing predictions before
- inference will cause OOM error.
- pixel_mean, pixel_std: list or tuple with #channels element, representing
- the per-channel mean and std to be used to normalize the input image
- """
- super().__init__()
- self.backbone = backbone
- self.sem_seg_head = sem_seg_head
- self.criterion = criterion
- self.num_queries = num_queries
- self.overlap_threshold = overlap_threshold
- self.panoptic_on = panoptic_on
- self.object_mask_threshold = object_mask_threshold
- self.metadata = metadata
- if size_divisibility < 0:
- # use backbone size_divisibility if not set
- size_divisibility = self.backbone.size_divisibility
- self.size_divisibility = size_divisibility
- self.sem_seg_postprocess_before_inference = sem_seg_postprocess_before_inference
- self.register_buffer(
- "pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False
- )
- self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False)
-
- @classmethod
- def from_config(cls, cfg):
- backbone = build_backbone(cfg)
- sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape())
-
- # Loss parameters:
- deep_supervision = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION
- no_object_weight = cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT
- dice_weight = cfg.MODEL.MASK_FORMER.DICE_WEIGHT
- mask_weight = cfg.MODEL.MASK_FORMER.MASK_WEIGHT
-
- # building criterion
- matcher = HungarianMatcher(
- cost_class=1,
- cost_mask=mask_weight,
- cost_dice=dice_weight,
- )
-
- weight_dict = {"loss_ce": 1, "loss_mask": mask_weight, "loss_dice": dice_weight}
- if deep_supervision:
- dec_layers = cfg.MODEL.MASK_FORMER.DEC_LAYERS
- aux_weight_dict = {}
- for i in range(dec_layers - 1):
- aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()})
- weight_dict.update(aux_weight_dict)
-
- losses = ["labels", "masks"]
-
- criterion = SetCriterion(
- sem_seg_head.num_classes,
- matcher=matcher,
- weight_dict=weight_dict,
- eos_coef=no_object_weight,
- losses=losses,
- )
-
- return {
- "backbone": backbone,
- "sem_seg_head": sem_seg_head,
- "criterion": criterion,
- "num_queries": cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES,
- "panoptic_on": cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON,
- "object_mask_threshold": cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD,
- "overlap_threshold": cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD,
- "metadata": MetadataCatalog.get(cfg.DATASETS.TRAIN[0]),
- "size_divisibility": cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY,
- "sem_seg_postprocess_before_inference": (
- cfg.MODEL.MASK_FORMER.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE
- or cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON
- ),
- "pixel_mean": cfg.MODEL.PIXEL_MEAN,
- "pixel_std": cfg.MODEL.PIXEL_STD,
- }
-
- @property
- def device(self):
- return self.pixel_mean.device
-
- def forward(self, batched_inputs):
- """
- Args:
- batched_inputs: a list, batched outputs of :class:`DatasetMapper`.
- Each item in the list contains the inputs for one image.
- For now, each item in the list is a dict that contains:
- * "image": Tensor, image in (C, H, W) format.
- * "instances": per-region ground truth
- * Other information that's included in the original dicts, such as:
- "height", "width" (int): the output resolution of the model (may be different
- from input resolution), used in inference.
- Returns:
- list[dict]:
- each dict has the results for one image. The dict contains the following keys:
-
- * "sem_seg":
- A Tensor that represents the
- per-pixel segmentation prediced by the head.
- The prediction has shape KxHxW that represents the logits of
- each class for each pixel.
- * "panoptic_seg":
- A tuple that represent panoptic output
- panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment.
- segments_info (list[dict]): Describe each segment in `panoptic_seg`.
- Each dict contains keys "id", "category_id", "isthing".
- """
- images = [x["image"].to(self.device) for x in batched_inputs]
- images = [(x - self.pixel_mean) / self.pixel_std for x in images]
- images = ImageList.from_tensors(images, self.size_divisibility)
-
- features = self.backbone(images.tensor)
- outputs = self.sem_seg_head(features)
-
- if self.training:
- # mask classification target
- if "instances" in batched_inputs[0]:
- gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
- targets = self.prepare_targets(gt_instances, images)
- else:
- targets = None
-
- # bipartite matching-based loss
- losses = self.criterion(outputs, targets)
-
- for k in list(losses.keys()):
- if k in self.criterion.weight_dict:
- losses[k] *= self.criterion.weight_dict[k]
- else:
- # remove this loss if not specified in `weight_dict`
- losses.pop(k)
-
- return losses
- else:
- mask_cls_results = outputs["pred_logits"]
- mask_pred_results = outputs["pred_masks"]
- # upsample masks
- mask_pred_results = F.interpolate(
- mask_pred_results,
- size=(images.tensor.shape[-2], images.tensor.shape[-1]),
- mode="bilinear",
- align_corners=False,
- )
-
- processed_results = []
- for mask_cls_result, mask_pred_result, input_per_image, image_size in zip(
- mask_cls_results, mask_pred_results, batched_inputs, images.image_sizes
- ):
- height = input_per_image.get("height", image_size[0])
- width = input_per_image.get("width", image_size[1])
-
- if self.sem_seg_postprocess_before_inference:
- mask_pred_result = sem_seg_postprocess(
- mask_pred_result, image_size, height, width
- )
-
- # semantic segmentation inference
- r = self.semantic_inference(mask_cls_result, mask_pred_result)
- if not self.sem_seg_postprocess_before_inference:
- r = sem_seg_postprocess(r, image_size, height, width)
- processed_results.append({"sem_seg": r})
-
- # panoptic segmentation inference
- if self.panoptic_on:
- panoptic_r = self.panoptic_inference(
- mask_cls_result, mask_pred_result
- )
- processed_results[-1]["panoptic_seg"] = panoptic_r
-
- return processed_results
-
- def prepare_targets(self, targets, images):
- h, w = images.tensor.shape[-2:]
- new_targets = []
- for targets_per_image in targets:
- # pad gt
- gt_masks = targets_per_image.gt_masks
- padded_masks = torch.zeros(
- (gt_masks.shape[0], h, w), dtype=gt_masks.dtype, device=gt_masks.device
- )
- padded_masks[:, : gt_masks.shape[1], : gt_masks.shape[2]] = gt_masks
- new_targets.append(
- {
- "labels": targets_per_image.gt_classes,
- "masks": padded_masks,
- }
- )
- return new_targets
-
- def semantic_inference(self, mask_cls, mask_pred):
- mask_cls = F.softmax(mask_cls, dim=-1)[..., :-1]
- mask_pred = mask_pred.sigmoid()
- semseg = torch.einsum("qc,qhw->chw", mask_cls, mask_pred)
- return semseg
diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/speech_to_text/docs/covost_example.md b/spaces/mshukor/UnIVAL/fairseq/examples/speech_to_text/docs/covost_example.md
deleted file mode 100644
index 16447f041e4751f79d9f7848b33ef2ff943d63c2..0000000000000000000000000000000000000000
--- a/spaces/mshukor/UnIVAL/fairseq/examples/speech_to_text/docs/covost_example.md
+++ /dev/null
@@ -1,102 +0,0 @@
-[[Back]](..)
-
-# S2T Example: ST on CoVoST
-We replicate the experiments in
-[CoVoST 2 and Massively Multilingual Speech-to-Text Translation (Wang et al., 2020)](https://arxiv.org/abs/2007.10310).
-
-## Data Preparation
-[Download](https://commonvoice.mozilla.org/en/datasets) and unpack Common Voice v4 to a path
-`${COVOST_ROOT}/${SOURCE_LANG_ID}`, then preprocess it with
-```bash
-# additional Python packages for S2T data processing/model training
-pip install pandas torchaudio sentencepiece
-
-# En ASR
-python examples/speech_to_text/prep_covost_data.py \
- --data-root ${COVOST_ROOT} --vocab-type char --src-lang en
-# ST
-python examples/speech_to_text/prep_covost_data.py \
- --data-root ${COVOST_ROOT} --vocab-type char \
- --src-lang fr --tgt-lang en
-```
-The generated files (manifest, features, vocabulary and data configuration) will be added to
-`${COVOST_ROOT}/${SOURCE_LANG_ID}`.
-
-Download our vocabulary files if you want to use our pre-trained models:
-- ASR: [En](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_en_asr_vocab_char.zip)
-- ST: [Fr-En](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_fr_en_st_vocab_char.zip), [De-En](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_de_en_st_vocab_char.zip), [Es-En](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_es_en_st_vocab_char.zip), [Ca-En](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_ca_en_st_vocab_char.zip), [En-De](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_en_de_st_vocab_char.zip), [En-Ca](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_en_ca_st_vocab_char.zip), [En-Fa](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_en_fa_st_vocab_char.zip), [En-Et](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_en_et_st_vocab_char.zip)
-
-## ASR
-#### Training
-We train an En ASR model for encoder pre-training of all ST models:
-```bash
-fairseq-train ${COVOST_ROOT}/en \
- --config-yaml config_asr_en.yaml --train-subset train_asr_en --valid-subset dev_asr_en \
- --save-dir ${ASR_SAVE_DIR} --num-workers 4 --max-tokens 50000 --max-update 60000 \
- --task speech_to_text --criterion label_smoothed_cross_entropy --label-smoothing 0.1 \
- --report-accuracy --arch s2t_transformer_s --dropout 0.15 --optimizer adam --lr 2e-3 \
- --lr-scheduler inverse_sqrt --warmup-updates 10000 --clip-norm 10.0 --seed 1 --update-freq 8
-```
-where `ASR_SAVE_DIR` is the checkpoint root path. We set `--update-freq 8` to simulate 8 GPUs with 1 GPU.
-You may want to update it accordingly when using more than 1 GPU.
-
-#### Inference & Evaluation
-```bash
-CHECKPOINT_FILENAME=avg_last_10_checkpoint.pt
-python scripts/average_checkpoints.py \
- --inputs ${ASR_SAVE_DIR} --num-epoch-checkpoints 10 \
- --output "${ASR_SAVE_DIR}/${CHECKPOINT_FILENAME}"
-fairseq-generate ${COVOST_ROOT}/en \
- --config-yaml config_asr_en.yaml --gen-subset test_asr_en --task speech_to_text \
- --path ${ASR_SAVE_DIR}/${CHECKPOINT_FILENAME} --max-tokens 50000 --beam 5 \
- --scoring wer --wer-tokenizer 13a --wer-lowercase --wer-remove-punct
-```
-#### Results
-| --arch | Params | En | Model |
-|---|---|---|---|
-| s2t_transformer_s | 31M | 25.6 | [Download](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_en_asr_transformer_s.pt) |
-
-## ST
-#### Training
-Fr-En as example:
-```bash
-fairseq-train ${COVOST_ROOT}/fr \
- --config-yaml config_st_fr_en.yaml --train-subset train_st_fr_en --valid-subset dev_st_fr_en \
- --save-dir ${ST_SAVE_DIR} --num-workers 4 --max-update 30000 --max-tokens 40000 \ # --max-tokens 50000 for en-*
- --task speech_to_text --criterion label_smoothed_cross_entropy --label-smoothing 0.1 --report-accuracy \
- --arch s2t_transformer_s --encoder-freezing-updates 1000 --optimizer adam --lr 2e-3 \
- --lr-scheduler inverse_sqrt --warmup-updates 10000 --clip-norm 10.0 --seed 1 --update-freq 8 \
- --load-pretrained-encoder-from ${ASR_SAVE_DIR}/${CHECKPOINT_FILENAME}
-```
-where `ST_SAVE_DIR` is the checkpoint root path. The ST encoder is pre-trained by En ASR for faster training and better
-performance: `--load-pretrained-encoder-from `. We set `--update-freq 8` to simulate 8 GPUs with 1 GPU.
-You may want to update it accordingly when using more than 1 GPU.
-
-#### Inference & Evaluation
-Average the last 10 checkpoints and evaluate on test split:
-```bash
-CHECKPOINT_FILENAME=avg_last_10_checkpoint.pt
-python scripts/average_checkpoints.py \
- --inputs ${ST_SAVE_DIR} --num-epoch-checkpoints 10 \
- --output "${ST_SAVE_DIR}/${CHECKPOINT_FILENAME}"
-fairseq-generate ${COVOST_ROOT}/fr \
- --config-yaml config_st_fr_en.yaml --gen-subset test_st_fr_en --task speech_to_text \
- --path ${ST_SAVE_DIR}/${CHECKPOINT_FILENAME} \
- --max-tokens 50000 --beam 5 --scoring sacrebleu
-```
-
-## Interactive Decoding
-Launch the interactive console via
-```bash
-fairseq-interactive ${COVOST_ROOT}/fr --config-yaml config_st_fr_en.yaml \
- --task speech_to_text --path ${SAVE_DIR}/${CHECKPOINT_FILENAME} \
- --max-tokens 50000 --beam 5
-```
-Type in WAV/FLAC/OGG audio paths (one per line) after the prompt.
-
-#### Results
-| --arch | Params | Fr-En | De-En | Es-En | Ca-En | En-De | En-Ca | En-Fa | En-Et | Model |
-|---|---|---|---|---|---|---|---|---|---|---|
-| s2t_transformer_s | 31M | [27.2](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_fr_en_st_transformer_s.pt) | [17.7](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_de_en_st_transformer_s.pt) | [23.1](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_es_en_st_transformer_s.pt) | [19.3](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_ca_en_st_transformer_s.pt) | [16.1](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_en_de_st_transformer_s.pt) | [21.6](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_en_ca_st_transformer_s.pt) | [12.9](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_en_fa_st_transformer_s.pt) | [12.8](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_en_et_st_transformer_s.pt) | (<-Download) |
-
-[[Back]](..)
diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/models/nat/insertion_transformer.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/models/nat/insertion_transformer.py
deleted file mode 100644
index bc28000f59a3b9e8098f9fe710cc8335d39eea3e..0000000000000000000000000000000000000000
--- a/spaces/mshukor/UnIVAL/fairseq/fairseq/models/nat/insertion_transformer.py
+++ /dev/null
@@ -1,280 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import numpy as np
-import torch
-import torch.nn.functional as F
-from fairseq.models import register_model, register_model_architecture
-from fairseq.models.nat import (
- FairseqNATModel,
- LevenshteinTransformerDecoder,
- LevenshteinTransformerModel,
- ensemble_decoder,
-)
-from fairseq.models.transformer import Linear
-from fairseq.modules.transformer_sentence_encoder import init_bert_params
-from fairseq.utils import new_arange
-
-
-class NegativeDistanceScore(object):
- def __init__(self):
-
- # pre-compute some values
- self.scores = {}
-
- self.scores[0.5] = self.compute_score_full(50, 0.5)
- self.scores[1.0] = self.compute_score_full(50, 1.0)
- self.scores[2.0] = self.compute_score_full(50, 2.0)
-
- def __call__(self, i, L, tau):
- if (tau is None) or (tau > 1000):
- return 1 / L
-
- if tau in self.scores:
- if L < self.scores[tau].shape[0]:
- return self.scores[tau][L - 1, i]
- return self.compute_score(L, tau)[i]
-
- def compute_score(self, L, tau):
- s = np.array([-abs(L / 2 - i) / tau for i in range(L)])
- s = np.exp(s - s.max())
- return s / s.sum()
-
- def compute_score_full(self, L, tau):
- s = -abs(np.arange(0, L - 1)[:, None] / 2 - np.arange(L)[None, :]) / tau
- s = np.tril(s, 0) + np.triu(s - float("inf"), 1)
- s = np.exp(s - s.max(1, keepdims=True))
- return s / s.sum(1, keepdims=True)
-
-
-neg_scorer = NegativeDistanceScore()
-
-
-def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx, vocab_size, tau=None):
- try:
- from fairseq import libnat
- except ImportError as e:
- import sys
-
- sys.stderr.write("ERROR: missing libnat. run `pip install --editable .`\n")
- raise e
-
- B = in_tokens.size(0)
- T = in_tokens.size(1)
- V = vocab_size
-
- with torch.cuda.device_of(in_tokens):
- in_tokens_list = [
- [t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist())
- ]
- out_tokens_list = [
- [t for t in s if t != padding_idx]
- for i, s in enumerate(out_tokens.tolist())
- ]
-
- full_labels = libnat.suggested_ed2_path(
- in_tokens_list, out_tokens_list, padding_idx
- )
- insert_labels = [a[:-1] for a in full_labels]
-
- # numericalize1
- insert_label_tensors = in_tokens.new_zeros(B * (T - 1) * V).float()
- insert_index, insert_labels = zip(
- *[
- (w + (j + i * (T - 1)) * V, neg_scorer(k, len(label), tau))
- for i, labels in enumerate(insert_labels)
- for j, label in enumerate(labels[1:-1])
- for k, w in enumerate(label)
- ]
- ) # HACK 1:-1
- insert_index, insert_labels = [
- torch.tensor(list(a), device=in_tokens.device)
- for a in [insert_index, insert_labels]
- ]
- insert_label_tensors.scatter_(0, insert_index.long(), insert_labels)
- insert_label_tensors = insert_label_tensors.view(B, T - 1, V)
-
- return insert_label_tensors
-
-
-def _apply_ins_words(in_tokens, in_scores, word_ins_pred, word_ins_scores, padding_idx):
-
- padding_masks = in_tokens[:, 1:].eq(padding_idx)
- word_ins_scores.masked_fill_(padding_masks, 0.0)
- word_ins_pred.masked_fill_(padding_masks, padding_idx)
-
- in_coords = new_arange(in_tokens).type_as(in_scores)
-
- # shift all padding predictions to infinite
- out_coords = (in_coords[:, 1:] - 0.5).masked_fill(
- word_ins_pred.eq(padding_idx), float("inf")
- )
- out_coords = torch.cat([in_coords, out_coords], 1).sort(-1)[1]
- out_tokens = torch.cat([in_tokens, word_ins_pred], 1).gather(1, out_coords)
- out_scores = torch.cat([in_scores, word_ins_scores], 1).gather(1, out_coords)
- return out_tokens, out_scores
-
-
-@register_model("insertion_transformer")
-class InsertionTransformerModel(LevenshteinTransformerModel):
- def __init__(self, args, encoder, decoder):
- super().__init__(args, encoder, decoder)
-
- @staticmethod
- def add_args(parser):
- FairseqNATModel.add_args(parser)
- parser.add_argument("--label-tau", default=None, type=float)
-
- @classmethod
- def build_decoder(cls, args, tgt_dict, embed_tokens):
- decoder = InsertionTransformerDecoder(args, tgt_dict, embed_tokens)
- if getattr(args, "apply_bert_init", False):
- decoder.apply(init_bert_params)
- return decoder
-
- def forward(
- self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs
- ):
-
- assert tgt_tokens is not None, "forward function only supports training."
-
- # encoding
- encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
-
- # generate training labels for insertion
- word_ins_out = self.decoder.forward_word_ins(
- normalize=False,
- prev_output_tokens=prev_output_tokens,
- encoder_out=encoder_out,
- )
-
- word_ins_tgt = _get_ins_targets(
- prev_output_tokens,
- tgt_tokens,
- self.pad,
- self.unk,
- len(self.tgt_dict),
- tau=self.decoder.label_tau,
- ).type_as(word_ins_out)
- word_ins_masks = prev_output_tokens[:, 1:].ne(self.pad)
-
- return {
- "word_ins": {
- "out": word_ins_out,
- "tgt": word_ins_tgt,
- "mask": word_ins_masks,
- "ls": self.args.label_smoothing,
- "nll_loss": True,
- }
- }
-
- def forward_decoder(
- self, decoder_out, encoder_out, eos_penalty=0.0, max_ratio=None, **kwargs
- ):
-
- output_tokens = decoder_out.output_tokens
- output_scores = decoder_out.output_scores
- history = decoder_out.history
-
- # TODO: decoding for InsertionTransformer
- word_ins_score = self.decoder.forward_word_ins(
- normalize=True, prev_output_tokens=output_tokens, encoder_out=encoder_out
- )
-
- if eos_penalty > 0.0:
- word_ins_score[:, :, self.pad] -= eos_penalty
- word_ins_score, word_ins_pred = word_ins_score.max(-1)
- output_tokens, output_scores = _apply_ins_words(
- output_tokens, output_scores, word_ins_pred, word_ins_score, self.pad
- )
-
- # delete some unnecessary paddings
- cut_off = output_tokens.ne(self.pad).sum(1).max()
- output_tokens = output_tokens[:, :cut_off]
- output_scores = output_scores[:, :cut_off]
-
- if history is not None:
- history.append(output_tokens.clone())
-
- return decoder_out._replace(
- output_tokens=output_tokens,
- output_scores=output_scores,
- attn=None,
- history=history,
- )
-
-
-class InsertionTransformerDecoder(LevenshteinTransformerDecoder):
- def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
- # use the TransformerDecoder's __init__
- super(LevenshteinTransformerDecoder, self).__init__(
- args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn
- )
-
- self.dictionary = dictionary
- self.bos = dictionary.bos()
- self.unk = dictionary.unk()
- self.eos = dictionary.eos()
- self.pool_out = Linear(self.output_embed_dim * 2, self.output_embed_dim)
-
- self.label_tau = getattr(args, "label_tau", None)
-
- @ensemble_decoder
- def forward_word_ins(self, normalize, encoder_out, prev_output_tokens):
- features = self.extract_features(prev_output_tokens, encoder_out=encoder_out)[0]
- features = self.pool_out(
- torch.cat([features[:, :-1, :], features[:, 1:, :]], 2)
- )
- decoder_out = self.output_layer(features)
- return F.log_softmax(decoder_out, -1) if normalize else decoder_out
-
- def forward_mask_ins(self, *args, **kwargs):
- raise NotImplementedError
-
- def forward_word_del(self, *args, **kwargs):
- raise NotImplementedError
-
-
-@register_model_architecture("insertion_transformer", "insertion_transformer")
-def insertion_base_architecture(args):
- args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
- args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
- args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
- args.encoder_layers = getattr(args, "encoder_layers", 6)
- args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
- args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
- args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
- args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
- args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
- args.decoder_ffn_embed_dim = getattr(
- args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
- )
- args.decoder_layers = getattr(args, "decoder_layers", 6)
- args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
- args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
- args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
- args.attention_dropout = getattr(args, "attention_dropout", 0.0)
- args.activation_dropout = getattr(args, "activation_dropout", 0.0)
- args.activation_fn = getattr(args, "activation_fn", "relu")
- args.dropout = getattr(args, "dropout", 0.1)
- args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
- args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
- args.share_decoder_input_output_embed = getattr(
- args, "share_decoder_input_output_embed", False
- )
- args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
- args.no_token_positional_embeddings = getattr(
- args, "no_token_positional_embeddings", False
- )
- args.adaptive_input = getattr(args, "adaptive_input", False)
- args.apply_bert_init = getattr(args, "apply_bert_init", False)
-
- args.decoder_output_dim = getattr(
- args, "decoder_output_dim", args.decoder_embed_dim
- )
- args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
-
- # special for insertion transformer
- args.label_tau = getattr(args, "label_tau", None)
diff --git a/spaces/mshukor/UnIVAL/slurm_adastra/averaging/fusing/scaling_best/caption_stage_1_ofaplus_base_pretrain_s2_hsep1_bs16_shuf_initavg_caprefsnlivqa.sh b/spaces/mshukor/UnIVAL/slurm_adastra/averaging/fusing/scaling_best/caption_stage_1_ofaplus_base_pretrain_s2_hsep1_bs16_shuf_initavg_caprefsnlivqa.sh
deleted file mode 100644
index 21517a9b90b35a4232b5e6effe85213960113edb..0000000000000000000000000000000000000000
--- a/spaces/mshukor/UnIVAL/slurm_adastra/averaging/fusing/scaling_best/caption_stage_1_ofaplus_base_pretrain_s2_hsep1_bs16_shuf_initavg_caprefsnlivqa.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/bash
-
-#SBATCH --job-name=caption_stage_1_ofaplus_base_pretrain_s2_hsep1_bs16_shuf_initavg_caprefsnlivqa
-#SBATCH --nodes=1
-#SBATCH --ntasks=1
-#SBATCH --gpus=8
-#SBATCH --threads-per-core=2
-#SBATCH --gpu-bind=closest
-####SBATCH --nodelist=x1004c4s2b0n0
-#SBATCH --time=24:00:00
-#SBATCH -C MI250
-#SBATCH -A gda2204
-#SBATCH --mail-type=END,FAIL
-#SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/caption_stage_1_ofaplus_base_pretrain_s2_hsep1_bs16_shuf_initavg_caprefsnlivqa.out
-#SBATCH --exclusive
-#SBATCH --mail-user=mustafa.shukor@isir.upmc.fr
-
-
-cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts
-source /lus/home/NAT/gda2204/mshukor/.bashrc
-
-conda activate main
-
-
-rm core-python3*
-
-
-srun -l -N 1 -n 1 -c 128 --gpus=8 bash averaging/fusing/scaling_best/caption_stage_1_ofaplus_base_pretrain_s2_hsep1_bs16_shuf_initavg_caprefsnlivqa.sh
-
-
diff --git a/spaces/mthsk/sovits-models/modules/__init__.py b/spaces/mthsk/sovits-models/modules/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/mueller-franzes/medfusion-app/tests/dataset/test_dataset_chexpert_2.py b/spaces/mueller-franzes/medfusion-app/tests/dataset/test_dataset_chexpert_2.py
deleted file mode 100644
index 889348b34670cb88a31e75a2b0426e9cc3c06e63..0000000000000000000000000000000000000000
--- a/spaces/mueller-franzes/medfusion-app/tests/dataset/test_dataset_chexpert_2.py
+++ /dev/null
@@ -1,42 +0,0 @@
-
-
-from pathlib import Path
-from torchvision.utils import save_image
-import pandas as pd
-import torch
-import torch.nn.functional as F
-from medical_diffusion.data.datasets import CheXpert_Dataset, CheXpert_2_Dataset
-import math
-
-path_out = Path().cwd()/'results'/'test'/'CheXpert_2'
-path_out.mkdir(parents=True, exist_ok=True)
-
-path_root = Path('/mnt/hdd/datasets/chest/CheXpert/ChecXpert-v10/preprocessed_tianyu')
-labels = pd.read_csv(path_root/'labels/cheXPert_label.csv', index_col='Path')
-
-
-# Get patients
-# labels['patient'] = labels.index.str.split('/').str[2]
-# labels.set_index('patient',drop=True, append=True, inplace=True)
-
-# for c in labels.columns:
-# print(labels[c].value_counts(dropna=False))
-
-ds = CheXpert_2_Dataset(
- path_root=path_root,
-)
-
-
-weights = ds.get_weights()
-
-x = torch.stack([ds[n]['source'] for n in range(4)])
-b = x.shape[0]
-save_image(x, path_out/'samples_down_0.png', nrwos=int(math.sqrt(b)), normalize=True, scale_each=True )
-
-size_0 = torch.tensor(x.shape[2:])
-
-for i in range(3):
- new_size = torch.div(size_0, 2**(i+1), rounding_mode='floor' )
- x_i = F.interpolate(x, size=tuple(new_size), mode='nearest', align_corners=None)
- print(x_i.shape)
- save_image(x_i, path_out/f'samples_down_{i+1}.png', nrwos=int(math.sqrt(b)), normalize=True, scale_each=True)
\ No newline at end of file
diff --git a/spaces/mygyasir/genious_bgremover/carvekit/web/other/__init__.py b/spaces/mygyasir/genious_bgremover/carvekit/web/other/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/mygyasir/genious_bgremover/carvekit/web/static/js/jquery-countTo.js b/spaces/mygyasir/genious_bgremover/carvekit/web/static/js/jquery-countTo.js
deleted file mode 100644
index 679271fa942df8042ca4a35400800b554317bb3a..0000000000000000000000000000000000000000
--- a/spaces/mygyasir/genious_bgremover/carvekit/web/static/js/jquery-countTo.js
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
-Plugin Name: Count To
-Written by: Matt Huggins - https://github.com/mhuggins/jquery-countTo
-*/
-
-(function ($) {
- $.fn.countTo = function (options) {
- options = options || {};
-
- return $(this).each(function () {
- // set options for current element
- var settings = $.extend({}, $.fn.countTo.defaults, {
- from: $(this).data('from'),
- to: $(this).data('to'),
- speed: $(this).data('speed'),
- refreshInterval: $(this).data('refresh-interval'),
- decimals: $(this).data('decimals')
- }, options);
-
- // how many times to update the value, and how much to increment the value on each update
- var loops = Math.ceil(settings.speed / settings.refreshInterval),
- increment = (settings.to - settings.from) / loops;
-
- // references & variables that will change with each update
- var self = this,
- $self = $(this),
- loopCount = 0,
- value = settings.from,
- data = $self.data('countTo') || {};
-
- $self.data('countTo', data);
-
- // if an existing interval can be found, clear it first
- if (data.interval) {
- clearInterval(data.interval);
- }
- data.interval = setInterval(updateTimer, settings.refreshInterval);
-
- // initialize the element with the starting value
- render(value);
-
- function updateTimer() {
- value += increment;
- loopCount++;
-
- render(value);
-
- if (typeof(settings.onUpdate) == 'function') {
- settings.onUpdate.call(self, value);
- }
-
- if (loopCount >= loops) {
- // remove the interval
- $self.removeData('countTo');
- clearInterval(data.interval);
- value = settings.to;
-
- if (typeof(settings.onComplete) == 'function') {
- settings.onComplete.call(self, value);
- }
- }
- }
-
- function render(value) {
- var formattedValue = settings.formatter.call(self, value, settings);
- $self.text(formattedValue);
- }
- });
- };
-
- $.fn.countTo.defaults = {
- from: 0, // the number the element should start at
- to: 0, // the number the element should end at
- speed: 1000, // how long it should take to count between the target numbers
- refreshInterval: 100, // how often the element should be updated
- decimals: 0, // the number of decimal places to show
- formatter: formatter, // handler for formatting the value before rendering
- onUpdate: null, // callback method for every time the element is updated
- onComplete: null // callback method for when the element finishes updating
- };
-
- function formatter(value, settings) {
- return value.toFixed(settings.decimals);
- }
-}(jQuery));
\ No newline at end of file
diff --git a/spaces/nasa-cisto-data-science-group/satvision-base-demo/pytorch-caney/pytorch_caney/tests/test_loss_utils.py b/spaces/nasa-cisto-data-science-group/satvision-base-demo/pytorch-caney/pytorch_caney/tests/test_loss_utils.py
deleted file mode 100644
index 74a256a34e179ea093c4e4a950f5f093fab3663a..0000000000000000000000000000000000000000
--- a/spaces/nasa-cisto-data-science-group/satvision-base-demo/pytorch-caney/pytorch_caney/tests/test_loss_utils.py
+++ /dev/null
@@ -1,46 +0,0 @@
-from pytorch_caney.loss.utils import to_tensor
-
-import unittest
-import numpy as np
-import torch
-
-
-class TestToTensorFunction(unittest.TestCase):
-
- def test_tensor_input(self):
- tensor = torch.tensor([1, 2, 3])
- result = to_tensor(tensor)
- self.assertTrue(torch.equal(result, tensor))
-
- def test_tensor_input_with_dtype(self):
- tensor = torch.tensor([1, 2, 3])
- result = to_tensor(tensor, dtype=torch.float32)
- self.assertTrue(torch.equal(result, tensor.float()))
-
- def test_numpy_array_input(self):
- numpy_array = np.array([1, 2, 3])
- expected_tensor = torch.tensor([1, 2, 3])
- result = to_tensor(numpy_array)
- self.assertTrue(torch.equal(result, expected_tensor))
-
- def test_numpy_array_input_with_dtype(self):
- numpy_array = np.array([1, 2, 3])
- expected_tensor = torch.tensor([1, 2, 3], dtype=torch.float32)
- result = to_tensor(numpy_array, dtype=torch.float32)
- self.assertTrue(torch.equal(result, expected_tensor))
-
- def test_list_input(self):
- input_list = [1, 2, 3]
- expected_tensor = torch.tensor([1, 2, 3])
- result = to_tensor(input_list)
- self.assertTrue(torch.equal(result, expected_tensor))
-
- def test_list_input_with_dtype(self):
- input_list = [1, 2, 3]
- expected_tensor = torch.tensor([1, 2, 3], dtype=torch.float32)
- result = to_tensor(input_list, dtype=torch.float32)
- self.assertTrue(torch.equal(result, expected_tensor))
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/spaces/navervision/MLSD/templates/index_scan.html b/spaces/navervision/MLSD/templates/index_scan.html
deleted file mode 100644
index 5d8cdc8a0af5674a6a0a408439fe5ec210dd97bd..0000000000000000000000000000000000000000
--- a/spaces/navervision/MLSD/templates/index_scan.html
+++ /dev/null
@@ -1,128 +0,0 @@
-
-
-
-
- MLSD demo
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Output_image
-
-
-
-
-
-
Input_image
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/spaces/neel692/NSFW-VS-SFW-Image-Classification/README.md b/spaces/neel692/NSFW-VS-SFW-Image-Classification/README.md
deleted file mode 100644
index daad91d28660200292ba4ab4c4787bb916eb7bda..0000000000000000000000000000000000000000
--- a/spaces/neel692/NSFW-VS-SFW-Image-Classification/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: NSFW VS SFW Image Classification
-emoji: 👁
-colorFrom: pink
-colorTo: pink
-sdk: gradio
-sdk_version: 3.17.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Aspekte Neu B1 Plus Arbeitsbuch Pdf 69 BEST.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Aspekte Neu B1 Plus Arbeitsbuch Pdf 69 BEST.md
deleted file mode 100644
index 4f000208c798f0b920f61fa5af4350b725dffdec..0000000000000000000000000000000000000000
--- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Aspekte Neu B1 Plus Arbeitsbuch Pdf 69 BEST.md
+++ /dev/null
@@ -1,26 +0,0 @@
-
-
Aspekte Neu B1 Plus Arbeitsbuch PDF 69: A Comprehensive Guide
-
If you are looking for a reliable and effective way to learn German at the intermediate level, you might want to consider Aspekte Neu B1 Plus Arbeitsbuch PDF 69. This is a digital book that contains exercises and vocabulary for the Aspekte Neu B1 Plus course, which is designed to help learners develop their communicative skills and prepare for the Goethe-Zertifikat B1 exam.
-
In this article, we will provide you with a comprehensive guide on what Aspekte Neu B1 Plus Arbeitsbuch PDF 69 is, what it offers, how to use it, and where to get it. We will also answer some frequently asked questions about this product and give you some tips on how to make the most of it.
Aspekte Neu B1 Plus Arbeitsbuch PDF 69 is a digital book that contains exercises and vocabulary for the Aspekte Neu B1 Plus course. Aspekte Neu is a series of textbooks and workbooks that cover the levels A1 to C1 of the Common European Framework of Reference for Languages (CEFR). The series is published by Ernst Klett Sprachen, a leading publisher of German language learning materials.
-
The Aspekte Neu B1 Plus course is aimed at intermediate learners who want to improve their German skills in various contexts and situations. The course covers topics such as people, living spaces, health, leisure, education, professions, relationships, consumption, travel, and nature. The course also focuses on developing the four language skills: listening, speaking, reading, and writing.
-
The Aspekte Neu B1 Plus Arbeitsbuch PDF 69 is a digital version of the workbook that accompanies the textbook. The workbook contains exercises that reinforce the grammar, vocabulary, and skills learned in the textbook. The workbook also includes a glossary with translations of key words and phrases in English, French, Spanish, Italian, Turkish, Russian, Arabic, and Chinese.
-
What are the benefits of Aspekte Neu B1 Plus Arbeitsbuch PDF 69?
-
Aspekte Neu B1 Plus Arbeitsbuch PDF 69 offers several benefits for learners who want to practice and improve their German at the intermediate level. Some of these benefits are:
-
-
It provides a variety of exercises that cater to different learning styles and preferences. The exercises include multiple choice, gap fill, matching, true/false, word formation, sentence transformation, writing tasks, and more.
-
It offers feedback and solutions for all the exercises. The workbook contains an answer key at the end of each chapter that allows learners to check their progress and correct their mistakes.
-
It allows learners to access the workbook anytime and anywhere. The workbook is available as a PDF file that can be downloaded or read online from any device that supports PDF format. Learners can also print out the pages they need or use them on screen.
-
It helps learners prepare for the Goethe-Zertifikat B1 exam. The workbook follows the structure and content of the exam and provides practice tests and tips on how to succeed in each section.
-
-
How to use Aspekte Neu B1 Plus Arbeitsbuch PDF 69?
-
Aspekte Neu B1 Plus Arbeitsbuch PDF 69 can be used in different ways depending on the learner's goals and preferences. Here are some suggestions on how to use it:
-
-
Use it as a supplement to the textbook. The workbook is designed to complement the textbook and provide additional practice and reinforcement of the topics and skills covered in each chapter. Learners can use the workbook after completing each chapter in the textbook or as a review before moving on to the next one.
-
Use it as a self-study tool. The workbook can also be used independently from the textbook as a way to practice and improve one's German at home or on the go. Learners can choose the chapters and exercises that interest them or suit their needs and work at their own pace.
-
Use it as a test preparation tool
- cec2833e83
-
-
\ No newline at end of file
diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Systran 7 Premium Translator Crack [TOP].md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Systran 7 Premium Translator Crack [TOP].md
deleted file mode 100644
index a7cbf9a305eb8dce624eb3049d18d1d710dedcf8..0000000000000000000000000000000000000000
--- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Systran 7 Premium Translator Crack [TOP].md
+++ /dev/null
@@ -1,19 +0,0 @@
-
-
How to Get Systran 7 Premium Translator for Free
-
Systran 7 Premium Translator is a powerful and advanced professional translation software that can help you create and manage high quality multilingual documents[^2^]. It supports over 50 languages and can translate text, speech, images, web pages, and documents. However, it is also very expensive and requires a license key to activate.
-
If you want to get Systran 7 Premium Translator for free, you might be tempted to look for a cracked version online. A cracked version is a modified version of the software that bypasses the license verification and allows you to use it without paying. However, this is not a good idea for several reasons:
It is illegal and unethical. By downloading and using a cracked version of Systran 7 Premium Translator, you are violating the intellectual property rights of the software developer and breaking the law. You could face legal consequences or fines if you are caught.
-
It is risky and unsafe. A cracked version of Systran 7 Premium Translator might contain viruses, malware, spyware, or other harmful programs that could damage your computer or steal your personal information. You could also expose yourself to cyberattacks or identity theft if you use a cracked version online.
-
It is unreliable and poor quality. A cracked version of Systran 7 Premium Translator might not work properly or have errors, bugs, or glitches that could affect the accuracy and quality of your translations. You could also miss out on updates, features, support, and security patches that are available for the official version.
-
-
Therefore, we do not recommend downloading or using a cracked version of Systran 7 Premium Translator. Instead, we suggest you try one of these alternatives:
-
-
Use the free online version of Systran Translate[^3^]. This is a web-based service that allows you to translate text, speech, images, web pages, and documents in over 50 languages. It is fast, easy, and secure. However, it has some limitations such as word count, file size, and translation quality.
-
Use a free trial of Systran 7 Premium Translator[^2^]. This is a way to test the software for a limited time before buying it. You can download it from the official website and use it for 30 days with full functionality. However, you will need to provide your email address and agree to receive marketing communications from Systran.
-
Buy Systran 7 Premium Translator[^2^]. This is the best option if you want to enjoy all the benefits and features of the software without any risks or restrictions. You can buy it from the official website or from authorized resellers. The price varies depending on the language pair and the number of users.
-
-
We hope this article has helped you understand why you should avoid using a cracked version of Systran 7 Premium Translator and what are some better options for your translation needs.
-
-Airmagnet Survey Pro Cost Game extremely innovative and interactive yanitu marketing games of all time. GTA 5 's i9000 sales possess ... 1fdad05405
-
-
-
diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Its Plc Professional Edition Activation Key.md b/spaces/quidiaMuxgu/Expedit-SAM/Its Plc Professional Edition Activation Key.md
deleted file mode 100644
index 0090098804c50b5a820d54be7aace6627555979a..0000000000000000000000000000000000000000
--- a/spaces/quidiaMuxgu/Expedit-SAM/Its Plc Professional Edition Activation Key.md
+++ /dev/null
@@ -1,11 +0,0 @@
-
-
ITS PLC Professional Edition: A Powerful Tool for PLC Training and Education
-
ITS PLC Professional Edition is a software that simulates industrial systems that can be controlled by programmable logic controllers (PLCs). It is designed for individuals who want to learn how to program and operate PLCs in realistic scenarios. ITS PLC Professional Edition works with all the major PLC brands, such as Allen-Bradley, Siemens, Mitsubishi, Omron, Schneider and many others. It also supports different programming languages, such as ladder logic, function block diagram, instruction list and structured text.
-
ITS PLC Professional Edition offers five systems inspired by common industrial plants: Sorting, Batching, Palletizer, Pick & Place and Automatic Warehouse. Each system is a typical industrial application of PLCs that will give you the opportunity to practice real world control tasks. You can connect your own PLC to the simulated system and control it in real-time. You can also monitor the system variables, inputs and outputs using the built-in HMI (Human Machine Interface).
ITS PLC Professional Edition is an essential tool for PLC training and education. It will improve your PLC skills and knowledge with high quality simulations of industrial plants. You can download a free trial version from the official website[^1^] or buy a license for $299.00 USD. ITS PLC Professional Edition is compatible with Windows XP/Vista/7/8/10/11.
ITS PLC Professional Edition is not only a simulation software, but also a learning platform. It includes a comprehensive exercise book that guides you through the specification and programming of logic control applications in the ITS PLC training environment. The exercise book contains 72 PLC programs that cover different aspects of industrial automation, such as sequential control, timers, counters, sensors, actuators, logic gates, arithmetic operations and more. You can use the exercise book as a self-study material or as a teaching resource for your classes.
-
ITS PLC Professional Edition is also compatible with other software tools that can enhance your PLC training experience. For example, you can use ITS PLC Professional Edition with Factory I/O, a 3D simulation software that allows you to create and control your own industrial systems. You can also use ITS PLC Professional Edition with AUTOMGEN, a software that supports different programming languages and standards, such as GRAFCET (IEC 60848), SFC (IEC 61131-3) and LADDER (IEC 61131-3). By using these software tools together, you can create and test complex and realistic industrial applications.
-
ITS PLC Professional Edition is a powerful and versatile tool for PLC training and education. It will help you to master the fundamentals and advanced concepts of PLC programming and operation. It will also prepare you for the challenges and opportunities of the industrial automation field. If you are interested in learning more about ITS PLC Professional Edition, you can visit the official website or watch the video demonstration. You can also contact the developer Real Games Lda for any questions or feedback.
- d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Malwarebytes Anti-Malware Premium 2.0.1.1004 Keys-P2P Download __EXCLUSIVE__.md b/spaces/quidiaMuxgu/Expedit-SAM/Malwarebytes Anti-Malware Premium 2.0.1.1004 Keys-P2P Download __EXCLUSIVE__.md
deleted file mode 100644
index 6d37d9cd04dea0a62e66cfcd3ad7d26999da2d71..0000000000000000000000000000000000000000
--- a/spaces/quidiaMuxgu/Expedit-SAM/Malwarebytes Anti-Malware Premium 2.0.1.1004 Keys-P2P Download __EXCLUSIVE__.md
+++ /dev/null
@@ -1,9 +0,0 @@
-
-
malwarebytes anti-malware premium offers powerful protection against viruses, ransomware, and other types of malware, and it has a variety of features that can meet the needs of both personal and business users. so if youre looking for an effective way to keep your devices safe from online threats, then malwarebytes anti-malware premium is the app for you!
-
overall, malwarebytes anti-malware premium key offers powerful protection against viruses, ransomware, and other types of malware, and it has a variety of features that can meet the needs of both personal and business users. so if youre looking for an effective way to keep your devices safe from online threats, then malwarebytes anti-malware premium is the app for you!
malwarebytes anti-malware premium key offers powerful protection against viruses, ransomware, and other types of malware, and it has a variety of features that can meet the needs of both personal and business users. so if youre looking for an effective way to keep your devices safe from online threats, then malwarebytes anti-malware premium is the app for you!
-
our main goal is to protect our users from malicious hosts that could either be servers participating in drive-by downloads or even home computers spewing spam, jrme segura, senior security researcher at malwarebytes, told tf.
-
additionally, malwarebytes anti-malware also offers a business edition that provides extra features for businesses such as device management and protection for multiple devices. this makes it a great choice for businesses who want to protect their employees devices from online threats.
What is ex4-to-mq4 v4.0.427.rar and why do you need it?
-
If you are a Forex trader who uses MetaTrader as your trading platform, you may have encountered EX4 files that contain trading robots, technical indicators or scripts. These files are executable programs that run on MetaTrader and perform various trading tasks. However, sometimes you may want to access the source code of these files, which is usually written in the MQL language and saved as MQ4 files. This is where ex4-to-mq4 v4.0.427.rar comes in handy.
Ex4-to-mq4 v4.0.427.rar is a software tool that can decompile any EX4 file and save its source code as an MQ4 file. A decompiler is a reverse engineering tool that can recover the original code from an executable file.
-
What is an EX4 file?
-
An EX4 file is a compiled version of an MQ4 file that can be executed by MetaTrader. MetaTrader is a popular trading platform that allows traders to trade on various financial markets, such as Forex, stocks, commodities or cryptocurrencies. MetaTrader has a built-in programming language called MQL, which stands for MetaQuotes Language. MQL allows traders to create their own trading robots, technical indicators or scripts that can automate various trading tasks or provide useful information.
-
An MQ4 file is a source code file that contains the MQL code of a trading robot, technical indicator or script. An MQ4 file can be edited and modified using MetaEditor, which is an integrated development environment (IDE) for MQL programming.
-
ex4 to mq4 decompiler v4.0.427.4 download
-ex4 to mq4 decompiler online
-ex4 to mq4 decompiler 2023 edition
-ex4 to mq4 decompiler service
-ex4 to mq4 decompiler free
-ex4 to mq4 decompiler crack
-ex4 to mq4 decompiler software
-ex4 to mq4 decompiler tutorial
-ex4 to mq4 decompiler for mt4 build 600+
-ex4 to mq4 decompiler for mt5
-ex4 to mq4 converter
-ex4 to mq4 source code recovery
-ex4 to mq4 reverse engineering
-ex4 to mq4 debugger
-ex4 to mq4 manual decompilation
-ex4 to mq4 pro
-ex4 to mq4 org
-ex4 to mq4 carigold
-ex4 to mq4 trello
-ex4 to mq4 4shared
-how to decompile ex4 file into source code mq4
-how to use ex4 to mq4 decompiler v4.0.427.1
-how to convert ex4 file to mq4 format
-how to recover source code from ex4 file
-how to reverse engineer an ex4 file
-how to debug an ex4 file
-how to manually decompile an ex4 file
-how to use ex4tomq.pro service
-how to use ex44tomq.org service
-how to use carigold forum for decompilation
-how to use trello for converting files
-how to use 44shared for downloading files
-what is the difference between ex44 and mq44 files
-what is the purpose of decompiling files
-what is the best quality of decompilation for forex experts and indicators
-what is the latest version of the decompiler software
-what is the problem and solution of ex44 file decompilation
-what is the new mechanism for compiling programs in metatrader build 600+
-what is the reason for metaquotes inc. changing the compilation method
-what is the mql market service and how does it work
-where can I find the best decompiler software for download
-where can I find the best online service for decompilation
-where can I find the best tutorial for decompiling files
-where can I find the best forum for discussing decompilation issues
-where can I find the best tools for reverse-engineering files
-why do I need the source code of a trading robot in the mq44 format
-why do some developers forget to include the source code file with their product
-why do some traders want to understand the trading strategy they use
-why did the automatic decompilation of ex44 files become impossible after 20144
-why is it important for a trader to have the source code of a trading robot
-
To run an MQ4 file on MetaTrader, it needs to be compiled into an EX4 file, which is a binary executable file that can be loaded and executed by MetaTrader. The compilation process converts the human-readable MQL code into machine-readable binary code that can be understood by MetaTrader.
-
What is a decompiler?
-
A decompiler is a software tool that can perform the opposite process of compilation, i.e., it can recover the source code from an executable file. A decompiler can be useful when the source code of a program is lost or unavailable, but the executable file is still functional.
-
Ex4-to-mq4 v4.0.427.rar is a decompiler that can decompile any EX4 file and save its source code as an MQ4 file. The decompilation process is done automatically by the software, without requiring any user input.
-
Why do you need ex4-to-mq4 v4.0.427.rar?
-
There are several reasons why you may want to decompile an EX4 file and access its source code as an MQ4 file.
-
To understand the trading strategy of an EX4 file
-
If you have downloaded or purchased an EX4 file that contains a trading robot or a technical indicator, you may want to know how it works and what trading strategy it uses. By decompiling the EX4 file and viewing its source code as an MQ4 file, you can gain insight into the logic and algorithm behind the program.
-
This can help you to evaluate the performance and reliability of the program, as well as to learn from its design and implementation.
-
To modify or improve an EX4 file
-
If you have an EX4 file that contains a trading robot or a technical indicator that you like, but you want to make some changes or improvements to it, you need to access its source code as an MQ4 file.
-
By decompiling the EX4 file and editing its source code as an MQ4 file, you can customize and optimize the program according to your preferences and needs.
-
For example, you can change some parameters or settings of the program, add some features or functions to it, fix some bugs or errors in it, or integrate it with other programs.
-
To protect your intellectual property
-
How to use ex4-to-mq4 v4.0.427.rar?
-
Using ex4-to-mq4 v4.0.427.rar is very simple and straightforward. Here are the steps you need to follow:
-
Download and install ex4-to-mq4 v4.0.427.rar
-
You can download ex4-to-mq4 v4.0.427.rar from various online sources, such as this one. The file is a compressed archive that contains the executable file of the decompiler and some other files. You need to extract the archive to a folder on your computer using a software tool like WinRAR or 7-Zip.
-
After extracting the archive, you need to run the executable file of the decompiler, which is called EX4-TO-MQ4.exe. You may need to grant permission to run the file if your antivirus or firewall software warns you about it.
-
Select an EX4 file to decompile
-
Once you run the decompiler, you will see a simple user interface that allows you to select an EX4 file to decompile. You can browse your computer folders and locate the EX4 file you want to decompile, or you can drag and drop the file into the decompiler window.
-
The decompiler will automatically detect the EX4 file and display some information about it, such as its name, size, date and build number.
-
Save the MQ4 file and open it in MetaEditor
-
After selecting the EX4 file to decompile, you need to click on the Decompile button at the bottom of the decompiler window. The decompiler will start working and show you a progress bar that indicates how much of the decompilation process is completed.
-
When the decompilation is finished, the decompiler will save the MQ4 file in the same folder as the EX4 file and show you a message that confirms the successful decompilation.
-
You can then open the MQ4 file in MetaEditor and view its source code. You can also edit or modify the code as you wish.
-
What are the limitations of ex4-to-mq4 v4.0.427.rar?
-
While ex4-to-mq4 v4.0.427.rar is a useful and powerful tool that can decompile any EX4 file and save its source code as an MQ4 file, it also has some limitations that you need to be aware of.
-
It only works for EX4 files compiled by MetaTrader build no higher than 600
-
The most important limitation of ex4-to-mq4 v4.0.427.rar is that it only works for EX4 files that were compiled by MetaTrader build no higher than 600. This means that the EX4 file must have been created in 2013 or earlier.
-
It may not recover all the original source code features
-
Another limitation of ex4-to-mq4 v4.0.427.rar is that it may not recover all the original features of the source code, such as comments, variable names, formatting or indentation. This is because some of these features are lost or changed during the compilation process and cannot be restored by the decompiler.
-
Therefore, the MQ4 file that you get from the decompiler may not look exactly like the original MQ4 file that was used to create the EX4 file. However, the decompiler will try to preserve the functionality and logic of the program as much as possible.
-
It may violate the developer's rights and terms of use
-
The final limitation of ex4-to-mq4 v4.0.427.rar is that it may violate the rights and terms of use of the developer who created the EX4 file. Some developers may not want their source code to be exposed or modified by others, and they may protect their EX4 files with encryption or obfuscation techniques.
-
By decompiling their EX4 files and accessing their source code, you may be infringing their intellectual property rights and breaking their terms of use. This may result in legal consequences or ethical issues.
-
Therefore, before using ex4-to-mq4 v4.0.427.rar to decompile an EX4 file, you should always check the developer's website or contact them to ask for their permission and consent.
-
Conclusion
-
In conclusion, ex4-to-mq4 v4.0.427.rar is a software tool that can decompile any EX4 file and save its source code as an MQ4 file. This can be useful for various purposes, such as understanding the trading strategy of an EX4 file, modifying or improving an EX4 file, or protecting your intellectual property.
-
However, ex4-to-mq4 v4.0.427.rar also has some limitations that you need to be aware of, such as working only for EX4 files compiled by MetaTrader build no higher than 600, not recovering all the original source code features, and possibly violating the developer's rights and terms of use.
-
Therefore, before using ex4-to-mq4 v4.0.427.rar to decompile an EX4 file, you should always do your research and ask for permission from the developer.
-
FAQs
-
What is the difference between EX4 and MQ4 files?
-
An EX4 file is a compiled version of an MQ4 file that can be executed by MetaTrader. An MQ4 file is a source code file that contains the MQL code of a trading robot, technical indicator or script.
-
What is ex4-to-mq4 v4.0.427.rar?
-
Ex4-to-mq4 v4.0.427.rar is a software tool that can decompile any EX4 file and save its source code as an MQ4 file.
-
Why do I need ex4-to-mq4 v4.0.427.rar?
-
Why do I need ex4-to-mq4 v4.0.427.rar?
-
You may need ex4-to-mq4 v4.0.427.rar to understand the trading strategy of an EX4 file, to modify or improve an EX4 file, or to protect your intellectual property.
-
How do I use ex4-to-mq4 v4.0.427.rar?
-
You need to download and install ex4-to-mq4 v4.0.427.rar, select an EX4 file to decompile, and save the MQ4 file and open it in MetaEditor.
-
What are the limitations of ex4-to-mq4 v4.0.427.rar?
-
Ex4-to-mq4 v4.0.427.rar only works for EX4 files compiled by MetaTrader build no higher than 600, it may not recover all the original source code features, and it may violate the developer's rights and terms of use.
- 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/ramkamal2000/voice-conversion-ddp/speaker_encoder/preprocess.py b/spaces/ramkamal2000/voice-conversion-ddp/speaker_encoder/preprocess.py
deleted file mode 100644
index fe5ab25ef7cb4adeb76cad11962f179d6a38edcc..0000000000000000000000000000000000000000
--- a/spaces/ramkamal2000/voice-conversion-ddp/speaker_encoder/preprocess.py
+++ /dev/null
@@ -1,285 +0,0 @@
-from multiprocess.pool import ThreadPool
-from speaker_encoder.params_data import *
-from speaker_encoder.config import librispeech_datasets, anglophone_nationalites
-from datetime import datetime
-from speaker_encoder import audio
-from pathlib import Path
-from tqdm import tqdm
-import numpy as np
-
-
-class DatasetLog:
- """
- Registers metadata about the dataset in a text file.
- """
- def __init__(self, root, name):
- self.text_file = open(Path(root, "Log_%s.txt" % name.replace("/", "_")), "w")
- self.sample_data = dict()
-
- start_time = str(datetime.now().strftime("%A %d %B %Y at %H:%M"))
- self.write_line("Creating dataset %s on %s" % (name, start_time))
- self.write_line("-----")
- self._log_params()
-
- def _log_params(self):
- from speaker_encoder import params_data
- self.write_line("Parameter values:")
- for param_name in (p for p in dir(params_data) if not p.startswith("__")):
- value = getattr(params_data, param_name)
- self.write_line("\t%s: %s" % (param_name, value))
- self.write_line("-----")
-
- def write_line(self, line):
- self.text_file.write("%s\n" % line)
-
- def add_sample(self, **kwargs):
- for param_name, value in kwargs.items():
- if not param_name in self.sample_data:
- self.sample_data[param_name] = []
- self.sample_data[param_name].append(value)
-
- def finalize(self):
- self.write_line("Statistics:")
- for param_name, values in self.sample_data.items():
- self.write_line("\t%s:" % param_name)
- self.write_line("\t\tmin %.3f, max %.3f" % (np.min(values), np.max(values)))
- self.write_line("\t\tmean %.3f, median %.3f" % (np.mean(values), np.median(values)))
- self.write_line("-----")
- end_time = str(datetime.now().strftime("%A %d %B %Y at %H:%M"))
- self.write_line("Finished on %s" % end_time)
- self.text_file.close()
-
-
-def _init_preprocess_dataset(dataset_name, datasets_root, out_dir) -> (Path, DatasetLog):
- dataset_root = datasets_root.joinpath(dataset_name)
- if not dataset_root.exists():
- print("Couldn\'t find %s, skipping this dataset." % dataset_root)
- return None, None
- return dataset_root, DatasetLog(out_dir, dataset_name)
-
-
-def _preprocess_speaker_dirs(speaker_dirs, dataset_name, datasets_root, out_dir, extension,
- skip_existing, logger):
- print("%s: Preprocessing data for %d speakers." % (dataset_name, len(speaker_dirs)))
-
- # Function to preprocess utterances for one speaker
- def preprocess_speaker(speaker_dir: Path):
- # Give a name to the speaker that includes its dataset
- speaker_name = "_".join(speaker_dir.relative_to(datasets_root).parts)
-
- # Create an output directory with that name, as well as a txt file containing a
- # reference to each source file.
- speaker_out_dir = out_dir.joinpath(speaker_name)
- speaker_out_dir.mkdir(exist_ok=True)
- sources_fpath = speaker_out_dir.joinpath("_sources.txt")
-
- # There's a possibility that the preprocessing was interrupted earlier, check if
- # there already is a sources file.
- if sources_fpath.exists():
- try:
- with sources_fpath.open("r") as sources_file:
- existing_fnames = {line.split(",")[0] for line in sources_file}
- except:
- existing_fnames = {}
- else:
- existing_fnames = {}
-
- # Gather all audio files for that speaker recursively
- sources_file = sources_fpath.open("a" if skip_existing else "w")
- for in_fpath in speaker_dir.glob("**/*.%s" % extension):
- # Check if the target output file already exists
- out_fname = "_".join(in_fpath.relative_to(speaker_dir).parts)
- out_fname = out_fname.replace(".%s" % extension, ".npy")
- if skip_existing and out_fname in existing_fnames:
- continue
-
- # Load and preprocess the waveform
- wav = audio.preprocess_wav(in_fpath)
- if len(wav) == 0:
- continue
-
- # Create the mel spectrogram, discard those that are too short
- frames = audio.wav_to_mel_spectrogram(wav)
- if len(frames) < partials_n_frames:
- continue
-
- out_fpath = speaker_out_dir.joinpath(out_fname)
- np.save(out_fpath, frames)
- logger.add_sample(duration=len(wav) / sampling_rate)
- sources_file.write("%s,%s\n" % (out_fname, in_fpath))
-
- sources_file.close()
-
- # Process the utterances for each speaker
- with ThreadPool(8) as pool:
- list(tqdm(pool.imap(preprocess_speaker, speaker_dirs), dataset_name, len(speaker_dirs),
- unit="speakers"))
- logger.finalize()
- print("Done preprocessing %s.\n" % dataset_name)
-
-
-# Function to preprocess utterances for one speaker
-def __preprocess_speaker(speaker_dir: Path, datasets_root: Path, out_dir: Path, extension: str, skip_existing: bool):
- # Give a name to the speaker that includes its dataset
- speaker_name = "_".join(speaker_dir.relative_to(datasets_root).parts)
-
- # Create an output directory with that name, as well as a txt file containing a
- # reference to each source file.
- speaker_out_dir = out_dir.joinpath(speaker_name)
- speaker_out_dir.mkdir(exist_ok=True)
- sources_fpath = speaker_out_dir.joinpath("_sources.txt")
-
- # There's a possibility that the preprocessing was interrupted earlier, check if
- # there already is a sources file.
- # if sources_fpath.exists():
- # try:
- # with sources_fpath.open("r") as sources_file:
- # existing_fnames = {line.split(",")[0] for line in sources_file}
- # except:
- # existing_fnames = {}
- # else:
- # existing_fnames = {}
- existing_fnames = {}
- # Gather all audio files for that speaker recursively
- sources_file = sources_fpath.open("a" if skip_existing else "w")
-
- for in_fpath in speaker_dir.glob("**/*.%s" % extension):
- # Check if the target output file already exists
- out_fname = "_".join(in_fpath.relative_to(speaker_dir).parts)
- out_fname = out_fname.replace(".%s" % extension, ".npy")
- if skip_existing and out_fname in existing_fnames:
- continue
-
- # Load and preprocess the waveform
- wav = audio.preprocess_wav(in_fpath)
- if len(wav) == 0:
- continue
-
- # Create the mel spectrogram, discard those that are too short
- frames = audio.wav_to_mel_spectrogram(wav)
- if len(frames) < partials_n_frames:
- continue
-
- out_fpath = speaker_out_dir.joinpath(out_fname)
- np.save(out_fpath, frames)
- # logger.add_sample(duration=len(wav) / sampling_rate)
- sources_file.write("%s,%s\n" % (out_fname, in_fpath))
-
- sources_file.close()
- return len(wav)
-
-def _preprocess_speaker_dirs_vox2(speaker_dirs, dataset_name, datasets_root, out_dir, extension,
- skip_existing, logger):
- # from multiprocessing import Pool, cpu_count
- from pathos.multiprocessing import ProcessingPool as Pool
- # Function to preprocess utterances for one speaker
- def __preprocess_speaker(speaker_dir: Path):
- # Give a name to the speaker that includes its dataset
- speaker_name = "_".join(speaker_dir.relative_to(datasets_root).parts)
-
- # Create an output directory with that name, as well as a txt file containing a
- # reference to each source file.
- speaker_out_dir = out_dir.joinpath(speaker_name)
- speaker_out_dir.mkdir(exist_ok=True)
- sources_fpath = speaker_out_dir.joinpath("_sources.txt")
-
- existing_fnames = {}
- # Gather all audio files for that speaker recursively
- sources_file = sources_fpath.open("a" if skip_existing else "w")
- wav_lens = []
- for in_fpath in speaker_dir.glob("**/*.%s" % extension):
- # Check if the target output file already exists
- out_fname = "_".join(in_fpath.relative_to(speaker_dir).parts)
- out_fname = out_fname.replace(".%s" % extension, ".npy")
- if skip_existing and out_fname in existing_fnames:
- continue
-
- # Load and preprocess the waveform
- wav = audio.preprocess_wav(in_fpath)
- if len(wav) == 0:
- continue
-
- # Create the mel spectrogram, discard those that are too short
- frames = audio.wav_to_mel_spectrogram(wav)
- if len(frames) < partials_n_frames:
- continue
-
- out_fpath = speaker_out_dir.joinpath(out_fname)
- np.save(out_fpath, frames)
- # logger.add_sample(duration=len(wav) / sampling_rate)
- sources_file.write("%s,%s\n" % (out_fname, in_fpath))
- wav_lens.append(len(wav))
- sources_file.close()
- return wav_lens
-
- print("%s: Preprocessing data for %d speakers." % (dataset_name, len(speaker_dirs)))
- # Process the utterances for each speaker
- # with ThreadPool(8) as pool:
- # list(tqdm(pool.imap(preprocess_speaker, speaker_dirs), dataset_name, len(speaker_dirs),
- # unit="speakers"))
- pool = Pool(processes=20)
- for i, wav_lens in enumerate(pool.map(__preprocess_speaker, speaker_dirs), 1):
- for wav_len in wav_lens:
- logger.add_sample(duration=wav_len / sampling_rate)
- print(f'{i}/{len(speaker_dirs)} \r')
-
- logger.finalize()
- print("Done preprocessing %s.\n" % dataset_name)
-
-
-def preprocess_librispeech(datasets_root: Path, out_dir: Path, skip_existing=False):
- for dataset_name in librispeech_datasets["train"]["other"]:
- # Initialize the preprocessing
- dataset_root, logger = _init_preprocess_dataset(dataset_name, datasets_root, out_dir)
- if not dataset_root:
- return
-
- # Preprocess all speakers
- speaker_dirs = list(dataset_root.glob("*"))
- _preprocess_speaker_dirs(speaker_dirs, dataset_name, datasets_root, out_dir, "flac",
- skip_existing, logger)
-
-
-def preprocess_voxceleb1(datasets_root: Path, out_dir: Path, skip_existing=False):
- # Initialize the preprocessing
- dataset_name = "VoxCeleb1"
- dataset_root, logger = _init_preprocess_dataset(dataset_name, datasets_root, out_dir)
- if not dataset_root:
- return
-
- # Get the contents of the meta file
- with dataset_root.joinpath("vox1_meta.csv").open("r") as metafile:
- metadata = [line.split("\t") for line in metafile][1:]
-
- # Select the ID and the nationality, filter out non-anglophone speakers
- nationalities = {line[0]: line[3] for line in metadata}
- # keep_speaker_ids = [speaker_id for speaker_id, nationality in nationalities.items() if
- # nationality.lower() in anglophone_nationalites]
- keep_speaker_ids = [speaker_id for speaker_id, nationality in nationalities.items()]
- print("VoxCeleb1: using samples from %d (presumed anglophone) speakers out of %d." %
- (len(keep_speaker_ids), len(nationalities)))
-
- # Get the speaker directories for anglophone speakers only
- speaker_dirs = dataset_root.joinpath("wav").glob("*")
- speaker_dirs = [speaker_dir for speaker_dir in speaker_dirs if
- speaker_dir.name in keep_speaker_ids]
- print("VoxCeleb1: found %d anglophone speakers on the disk, %d missing (this is normal)." %
- (len(speaker_dirs), len(keep_speaker_ids) - len(speaker_dirs)))
-
- # Preprocess all speakers
- _preprocess_speaker_dirs(speaker_dirs, dataset_name, datasets_root, out_dir, "wav",
- skip_existing, logger)
-
-
-def preprocess_voxceleb2(datasets_root: Path, out_dir: Path, skip_existing=False):
- # Initialize the preprocessing
- dataset_name = "VoxCeleb2"
- dataset_root, logger = _init_preprocess_dataset(dataset_name, datasets_root, out_dir)
- if not dataset_root:
- return
-
- # Get the speaker directories
- # Preprocess all speakers
- speaker_dirs = list(dataset_root.joinpath("dev", "aac").glob("*"))
- _preprocess_speaker_dirs_vox2(speaker_dirs, dataset_name, datasets_root, out_dir, "m4a",
- skip_existing, logger)
diff --git a/spaces/raphaelmerx/MMS-transcription/README.md b/spaces/raphaelmerx/MMS-transcription/README.md
deleted file mode 100644
index 63910f84e4a8e8206ef6a037bf91e3db6534cd90..0000000000000000000000000000000000000000
--- a/spaces/raphaelmerx/MMS-transcription/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Transcription using MMS
-emoji: 🎤
-colorFrom: green
-colorTo: pink
-sdk: gradio
-sdk_version: 3.37.0
-app_file: app.py
-pinned: true
-duplicated_from: ayymen/MMS-ASR
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Adobe Audition CC 2018 11.0.2.2 (x64) Crack Keygen [UPD].md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Adobe Audition CC 2018 11.0.2.2 (x64) Crack Keygen [UPD].md
deleted file mode 100644
index 26e4ca42184176234d229798cf130d2b7c6edbfa..0000000000000000000000000000000000000000
--- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Adobe Audition CC 2018 11.0.2.2 (x64) Crack Keygen [UPD].md
+++ /dev/null
@@ -1,148 +0,0 @@
-
-
How to Download and Use Adobe Audition CC 2018 11.0.2.2 (x64) Crack keygen
-
-
If you are looking for a professional audio editing software that can handle multitrack, waveform, and spectral display, you might want to check out Adobe Audition CC 2018 11.0.2.2 (x64). This powerful audio workstation is designed to accelerate video production workflows and audio finishing, and deliver a polished mix with pristine sound.
-
-
However, Adobe Audition CC 2018 11.0.2.2 (x64) is not a free software, and you need to pay a monthly subscription fee to use it. If you want to save some money and still enjoy the features of this software, you can try using a crack keygen that can generate a valid serial number for you.
-
Adobe Audition CC 2018 11.0.2.2 (x64) Crack keygen
In this article, we will show you how to download and use Adobe Audition CC 2018 11.0.2.2 (x64) crack keygen, and what are the benefits and risks of doing so.
-
-
What is Adobe Audition CC 2018 11.0.2.2 (x64) Crack keygen?
-
-
A crack keygen is a software tool that can generate a serial number or a license key for another software, such as Adobe Audition CC 2018 11.0.2.2 (x64). By using a crack keygen, you can bypass the activation process of the original software and use it without paying for it.
-
-
There are many websites that offer crack keygens for various software, but not all of them are reliable or safe. Some of them may contain viruses, malware, or spyware that can harm your computer or steal your personal information. Therefore, you need to be careful when choosing a source for downloading a crack keygen.
-
-
How to Download and Use Adobe Audition CC 2018 11.0.2.2 (x64) Crack keygen?
-
-
Here are the steps to download and use Adobe Audition CC 2018 11.0.2.2 (x64) crack keygen:
-
-
-
Download the original software from the official website of Adobe or from a trusted third-party source.
-
Install the software on your computer, but do not launch it yet.
-
Download the crack keygen from a reliable website that has positive reviews and feedback from other users.
-
Extract the crack keygen file using a file archiver program such as WinRAR or 7-Zip.
-
Run the crack keygen as an administrator and click on the Generate button.
-
Copy the generated serial number and paste it into the activation window of Adobe Audition CC 2018 11.0.2.2 (x64).
-
Click on the Activate button and wait for the confirmation message.
-
Launch the software and enjoy its features.
-
-
-
What are the Benefits and Risks of Using Adobe Audition CC 2018 11.0.2.2 (x64) Crack keygen?
-
-
Using Adobe Audition CC 2018 11.0.2.2 (x64) crack keygen has some benefits and risks that you need to be aware of before deciding to use it.
-
-
-
The main benefit of using a crack keygen is that you can save money and use the software without paying for it. You can also access all the features and updates of the software without any limitations or restrictions.
-
-
The main risk of using a crack keygen is that you may violate the terms and conditions of the original software developer and face legal consequences for piracy or copyright infringement. You may also expose your computer to viruses, malware, or spyware that can damage your system or compromise your security and privacy.
-
-
Therefore, you need to weigh the pros and cons of using a crack keygen and decide whether it is worth it or not.
-
-
Conclusion
-
-
Adobe Audition CC 2018 11.0.2.2 (x64) is a professional audio editing software that can help you create, mix, and edit audio content with ease and efficiency.
-
-
If you want to use this software without paying for it, you can try using a crack keygen that can generate a valid serial number for you.
-
-
However, using a crack keygen has some benefits and risks that you need to consider before doing so.
-
-
We hope this article has helped you understand how to download and use Adobe Audition CC 2018 11.0.2.2 (x64) crack keygen, and what are the advantages and disadvantages of doing so.
-
How to Use Adobe Audition CC 2018 11.0.2.2 (x64) for Audio Editing?
-
-
Now that you have downloaded and activated Adobe Audition CC 2018 11.0.2.2 (x64) using a crack keygen, you may wonder how to use it for your audio editing projects.
-
-
In this section, we will give you a brief overview of the main features and functions of Adobe Audition CC 2018 11.0.2.2 (x64) and how to use them.
-
-
Essential Sound Panel
-
-
The Essential Sound panel is a new feature in Adobe Audition CC 2018 that allows you to achieve professional-quality audio with simple and intuitive controls.
-
-
The Essential Sound panel lets you assign audio types to your clips, such as Dialogue, Music, Sound Effects, or Ambience, and apply presets or custom adjustments to them.
-
-
You can also use the Essential Sound panel to duck music behind dialogue, reduce noise, enhance speech clarity, add reverb, and more.
-
-
To use the Essential Sound panel, follow these steps:
-
-
-
Select one or more clips in the Multitrack Editor or the Waveform Editor.
-
Click on the Essential Sound tab in the right panel.
-
Choose an audio type from the drop-down menu at the top of the panel.
-
Adjust the sliders and options according to your needs and preferences.
-
Click on the Apply button to apply the changes to your clips.
-
-
-
Multitrack Editor
-
-
The Multitrack Editor is where you can record, edit, and mix multiple audio tracks in a non-destructive way.
-
-
The Multitrack Editor lets you arrange your clips on separate tracks, add effects and transitions, adjust volume and pan levels, automate keyframes, and more.
-
-
You can also use the Multitrack Editor to sync your audio with video, export your mixdowns, or send your tracks to Adobe Premiere Pro CC for further editing.
-
-
To use the Multitrack Editor, follow these steps:
-
-
-
Click on the File menu and choose New > Multitrack Session.
-
Enter a name for your session and choose a location to save it.
-
Select a template or customize your own settings for sample rate, bit depth, channelization, etc.
-
Click on the OK button to create your session.
-
Import your audio files by dragging and dropping them from the Media Browser or the Files panel to the desired tracks in the Timeline.
-
Edit your clips by trimming, splitting, moving, fading, crossfading, etc.
-
Add effects to your clips or tracks by clicking on the FX button in the track header or clip header and choosing from the available effects.
-
Mix your tracks by adjusting the volume and pan faders in the Mixer panel or in the track headers.
-
Add automation to your tracks by clicking on the Show/Hide Automation button in the track header and choosing a parameter to automate.
-
Export your mixdown by clicking on the File menu and choosing Export > Multitrack Mixdown > Entire Session or Selected Clips.
-
-
-
Waveform Editor
-
-
The Waveform Editor is where you can edit individual audio files in a destructive way.
-
-
The Waveform Editor lets you view and modify your audio waveform with various tools and commands, such as cut, copy, paste, delete, silence, amplify, normalize, etc.
-
-
You can also use the Waveform Editor to apply effects and processes to your audio file, such as noise reduction, compression, equalization, pitch correction, etc.
-
-
To use the Waveform Editor, follow these steps:
-
-
-
Import your audio file by clicking on the File menu and choosing Open or Import File.
-
Select a part of your audio file by clicking and dragging on the waveform or using the Time Selection tool.
-
Edit your selection by using the Edit menu commands or keyboard shortcuts.
-
Add effects to your selection by clicking on the Effects menu and choosing from the available effects.
-
Save your changes by clicking on the File menu and choosing Save or Save As.
-
-
-
Tips and Tricks for Using Adobe Audition CC 2018 11.0.2.2 (x64)
-
-
To help you get the most out of Adobe Audition CC 2018 11.0.2.2 (x64), here are some tips and tricks that you can use:
-
-
-
Use keyboard shortcuts to speed up your workflow. You can view and customize keyboard shortcuts by clicking on the Edit menu and choosing Keyboard Shortcuts.
-
Use workspaces to organize your panels according to your tasks. You can switch between different workspaces by clicking on the Window menu and choosing Workspace. You can also create your own custom workspaces by arranging and docking panels as you like.
-
Use markers and metadata to annotate and organize your audio files. You can add markers by pressing M while playing back or recording audio. You can edit markers by double-clicking on them in the Markers panel. You can add metadata by clicking on the File menu and choosing File Info.
-
Use batch processing to apply effects or processes to multiple files at once. You can access batch processing by clicking on the File menu and choosing Batch Process. You can also create custom scripts for batch processing by clicking on the Effects menu and choosing Favorites > Edit Favorites.
-
Use spectral frequency display to view and edit your audio in terms of frequency instead of amplitude. You can switch between waveform display and spectral frequency display by clicking on the View menu and choosing Waveform/Spectral Frequency Display. You can also use spectral editing tools such as Marquee Selection tool or Lasso Selection tool to select specific frequency ranges for editing.
-
-
-
Conclusion
-
-
In this article, we have shown you how to download and use Adobe Audition CC 2018 11.0.2.2 (x64) crack keygen, what are its main features and functions, and how to use them for audio editing projects.
-
-
We hope this article has been helpful for you and that you have learned something new about Adobe Audition CC 2018 11.0.2.2 (x64).
-
-
If you have any questions or feedback about this article or Adobe Audition CC 2018 11.0.2.2 (x64), feel free to leave a comment below or contact us via email.
-
-
Thank you for reading this article and happy audio editing!
-
Conclusion
-
-
In this article, we have shown you how to download and use Adobe Audition CC 2018 11.0.2.2 (x64) crack keygen, what are its main features and functions, and how to use them for audio editing projects.
-
-
We hope this article has been helpful for you and that you have learned something new about Adobe Audition CC 2018 11.0.2.2 (x64).
-
-
If you have any questions or feedback about this article or Adobe Audition CC 2018 11.0.2.2 (x64), feel free to leave a comment below or contact us via email.
-
-
Thank you for reading this article and happy audio editing!
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Adobe Audition Cs6 Crack [BEST] Rar.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Adobe Audition Cs6 Crack [BEST] Rar.md
deleted file mode 100644
index 2b20d391c287e45082937b5089f9d829a5ac041d..0000000000000000000000000000000000000000
--- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Adobe Audition Cs6 Crack [BEST] Rar.md
+++ /dev/null
@@ -1,34 +0,0 @@
-
-
-... adobe audition cc, adobe audition crack, adobe audition 2021, adobe audition cs6, adobe audition autotune, adobe audition free download for windows 10, ... Adobe Audition CC 2018 download ....
-Adobe Audition CC 2018 is a professional program for working with audio content. ...
-Download Adobe Audition CC 2018 (version 10.0.0.104) - Russian version, for free.
-Download Adobe Audition CC 2018 (v10.1.3) (Russian) - torrent. ...
-Adobe Audition CC 2018 v10.1.3 (RUS) - Russian version with the possibility of download ...
-Download Adobe Audition CC 2017 (v11.1.1.1).torrent; Download Adobe Premiere Pro CC 2018 v12.0.1.55 x64-x32 (RUS).torrent.
- Download torrent Adobe Premiere Pro CC 2018 12.1.3.58 RePack [x64-x32] [2018] ...
-Adobe Premiere Pro CC 2017 v11.0.1 (x32/x64) RePack by D!akov.
-Download free Adobe Photoshop CS5 Extended 12.0.4 Rus (x32/x64) ...
-Adobe Premiere Pro CC 2017 v11.0.1 (x32/x64) RePack by D!akov ...
-Adobe Premiere Pro CC 2017 (v11.1) (x32/x64/RUS) - Free Download.
-Adobe Photoshop Lightroom Classic v8.4 Final [2018/Multi/Rus] Portable.
-Adobe Premiere Pro CC 2018 v12.1.3.58 RePack [x64-x32] [2018 ...
- Adobe Premiere Pro CC 2018 v12.1.3.58 RePack [x86-x64] [2018 ...
-Adobe Photoshop Lightroom 5 v5.2.0 Final [2013, Ml/Rus] [by ...
-Adobe Premiere Pro CC 2018 v12.1.3.58 RePack [x86-x64] [2018 ...
-Adobe Premiere Pro CC 2018 12.1.3.58 RePack by KpoJIuK
-Adobe Premiere Pro CC 2018 12.1.3.58 RePack (x86-x64) [2018 ...
-Adobe Premiere Pro CC 2018 v12.1.3.58 RePack by KpoJIuK
- Year: 2018 Software Version: CC 2018 12.1.3.58 Official website: adobe.ru Builder: by KpoJIuK Interface language: Russian/English Treatment: not required (the installer is already cured)
-- Intel® or AMD multi-core processor - 4 GB of RAM (8 GB recommended) - 2 GB of available hard-disk space - 1024x768 display - Internet connection - Microsoft® Windows® 7 with Service Pack 1, Windows 10, or Windows 8.x - 1 GB of available hard-disk space
- Description:
-Dead Island is a first-person view game that takes you on an adventure on an island teeming with zombies.
-Dead Island combines first-person action and survival horror elements.
-The game's developers also talk about a kind of parody of famous horror movies involving the dead and buried alive, as well as epidemic movies that take over the world.
-"You'll encounter zombies in a very different way here than in many other games of this type," the game's creators tell us.
- - If, for example, in the movie "28 Days Later" zombies are rather slow and slow in movement, in the game "28 Days Later" they are very fast, aggressive and nimble dead, capable of killing, jumping and somersaulting because of their impressive size.
-And here we also introduce, for the first time in the history of computer games, zombies who carry their own weapons, and their bones can turn into stabbing and cutting weapons.
-So you have to watch out for them, especially if you're alone. 8a78ff9644
-
-
-
diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Encarta Premium 2008 Student ITA.nrg[volpebianca] .rar.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Encarta Premium 2008 Student ITA.nrg[volpebianca] .rar.md
deleted file mode 100644
index 328154b6584181592556b83edbfc02bc9f6c8db6..0000000000000000000000000000000000000000
--- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Encarta Premium 2008 Student ITA.nrg[volpebianca] .rar.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-zip-waybackapc.app.geekmanuals.google.kaspersky.it.workbookbazaar.rar.rar.zip-waybackapc.archive.adobe.help.isis.zpa.nrg.rar.zip-waybackapc.archive.z1-7.x.rar.zip-waybackapc.archive.z2-7.x.rar.zip-waybackapc.archive.z3-7.x.rar.zip-waybackapc.archive.z4-7.x.rar.zip-waybackapc.archive.z5-7.x.rar.zip-waybackapc.archive.z6-7.x.rar.zip-waybackapc.archive.z7-7.x.rar.zip-waybackapc.archive.z8-7.x.rar.zip-waybackapc.archive.z9-7.x.rar.zip-waybackapc.archive.za-7.x.rar.zip-waybackapc.archive.zb-7.x.rar.zip-waybackapc.archive.zc-7.x.rar.zip-waybackapc.archive.zd-7.x.rar.zip-waybackapc.archive.ze-7.x.rar.zip-waybackapc.archive.zf-7.x.rar.zip-waybackapc.archive.zg-7.x.rar.zip-waybackapc.archive.zh-7.x.rar.zip-waybackapc.archive.zi-7.x.rar.zip-waybackapc.archive.zj-7.x.rar.zip-waybackapc.archive.zk-7.x.rar.zip-waybackapc.archive.zl-7.x.rar.zip-waybackapc.archive.zm-7.x.rar.zip-waybackapc.archive.zn-7.x.rar.zip-waybackapc.archive.zo-7.x.rar.zip-waybackapc.archive.zp-7.x.rar.zip-waybackapc.archive.zq-7. 4fefd39f24
-
-
-
diff --git a/spaces/reilnuud/polite/README.md b/spaces/reilnuud/polite/README.md
deleted file mode 100644
index 0309caba4a9171f77219f37ffa7b5124a96e46f5..0000000000000000000000000000000000000000
--- a/spaces/reilnuud/polite/README.md
+++ /dev/null
@@ -1,17 +0,0 @@
-
----
-tags: [gradio-theme]
-title: polite
-colorFrom: orange
-colorTo: purple
-sdk: gradio
-sdk_version: 3.28.1
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-# polite
-## Description
-Add a description of this theme here!
-## Contributions
-Thanks to [@reilnuud](https://huggingface.co/reilnuud) for adding this gradio theme!
diff --git a/spaces/robin0307/MMOCR/configs/_base_/det_datasets/synthtext.py b/spaces/robin0307/MMOCR/configs/_base_/det_datasets/synthtext.py
deleted file mode 100644
index fb9a44b3422dae5a9788d39b0901335dfc6076a9..0000000000000000000000000000000000000000
--- a/spaces/robin0307/MMOCR/configs/_base_/det_datasets/synthtext.py
+++ /dev/null
@@ -1,18 +0,0 @@
-dataset_type = 'TextDetDataset'
-data_root = 'data/synthtext'
-
-train = dict(
- type=dataset_type,
- ann_file=f'{data_root}/instances_training.lmdb',
- loader=dict(
- type='AnnFileLoader',
- repeat=1,
- file_format='lmdb',
- parser=dict(
- type='LineJsonParser',
- keys=['file_name', 'height', 'width', 'annotations'])),
- img_prefix=f'{data_root}/imgs',
- pipeline=None)
-
-train_list = [train]
-test_list = [train]
diff --git a/spaces/rorallitri/biomedical-language-models/Outkast-ATLiens-Full-Album-Zip-LINK.md b/spaces/rorallitri/biomedical-language-models/Outkast-ATLiens-Full-Album-Zip-LINK.md
deleted file mode 100644
index 0e514be43451fdc38551958560abb09a9d4ddd98..0000000000000000000000000000000000000000
--- a/spaces/rorallitri/biomedical-language-models/Outkast-ATLiens-Full-Album-Zip-LINK.md
+++ /dev/null
@@ -1,62 +0,0 @@
-## Outkast, ATLiens Full Album Zip
-
-
-
-
-
-
-
-
-
-
-
-**CLICK HERE ->>->>->> [https://vittuv.com/2txUfG](https://vittuv.com/2txUfG)**
-
-
-
-
-
-
-
-
-
-
-
-
-
-# Outkast's ATLiens: A Classic Album of Southern Hip Hop
-
-
-
-Outkast is one of the most influential and successful hip hop groups of all time. The duo of Andre 3000 and Big Boi emerged from Atlanta, Georgia in the early 1990s and brought a fresh and innovative sound to the genre. Their second album, ATLiens, released in 1996, is widely regarded as a masterpiece of southern hip hop.
-
-
-
-ATLiens showcases Outkast's lyrical skills, musical diversity, and creative vision. The album features a blend of live instruments and samples, creating a futuristic and funky atmosphere. The album also explores themes of spirituality, alienation, identity, and social commentary. Some of the standout tracks include "Two Dope Boyz (In a Cadillac)", "ATLiens", "Elevators (Me & You)", "Jazzy Belle", and "13th Floor/Growing Old".
-
-
-
-The album received critical acclaim and commercial success, selling over two million copies in the US and earning a platinum certification. It also reached number two on the Billboard 200 chart and number one on the Top R&B/Hip-Hop Albums chart. ATLiens is considered one of the best hip hop albums of all time by many critics and fans. It has influenced many artists across genres and generations, such as Kendrick Lamar, Drake, J. Cole, Childish Gambino, and Frank Ocean.
-
-
-
-If you want to listen to Outkast's ATLiens full album zip, you can download it for free from the Internet Archive[^1^] or stream it on YouTube[^2^] or SoundCloud[^3^]. You can also buy it from online stores or streaming platforms. You won't regret it!
-
-
-
-One of the most remarkable aspects of ATLiens is how Outkast manage to balance their experimental impulses with their commercial appeal. The album spawned three hit singles: "Elevators (Me & You)", which reached number 12 on the Billboard Hot 100 chart; "ATLiens", which peaked at number 35; and "Jazzy Belle", which climbed to number 52. All three songs showcase Outkast's distinctive style of storytelling, wordplay, and humor, as well as their ability to craft catchy hooks and memorable melodies.
-
-
-
-Another notable feature of ATLiens is how Outkast collaborate with other artists from their native Atlanta. The album features guest appearances by Goodie Mob members Cee-Lo, Big Gipp, Khujo, and T-Mo, as well as Cool Breeze, Witchdoctor, and Joi. These artists represent the Dungeon Family, a collective of musicians and producers who share a similar vision of southern hip hop. The Dungeon Family also includes Organized Noize, who produced most of ATLiens along with Outkast themselves. The album's production is rich and varied, incorporating elements of funk, soul, rock, jazz, and electronic music.
-
-
-
-ATLiens is not only a landmark album for Outkast and southern hip hop, but also for hip hop as a whole. It demonstrates that hip hop can be creative, innovative, and diverse without losing its essence or its audience. It also proves that hip hop can transcend regional boundaries and appeal to listeners from different backgrounds and cultures. ATLiens is a testament to Outkast's artistic vision and musical talent, and a classic album that deserves to be heard by everyone.
-
- dfd1c89656
-
-
-
-
-
diff --git a/spaces/rorallitri/biomedical-language-models/logs/Ben 10 Alien Force - Vilgax Attacks full movie in italian 720p download link Fast and easy.md b/spaces/rorallitri/biomedical-language-models/logs/Ben 10 Alien Force - Vilgax Attacks full movie in italian 720p download link Fast and easy.md
deleted file mode 100644
index fc89c90e2e516cdf692b55c3f8aef3c4ca6311a4..0000000000000000000000000000000000000000
--- a/spaces/rorallitri/biomedical-language-models/logs/Ben 10 Alien Force - Vilgax Attacks full movie in italian 720p download link Fast and easy.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
Ben 10: Alien Force - Vilgax Attacks full movie in italian 720p download
-
-G-Wizard Calculator is designed to save you time by providing you with a variety of reference materials that every machinist and engineer should have on the job. ****************************************
-********************* ******************************* **************
-******************** # It has a "pay by account" feature and you can set your own rates.
-If you set your own fare, it will charge for each ride as long as that fare does not exceed the maximum limit you set. #
-# Pay by invoice # You can set your own fares and fares. 8a78ff9644
-
-
-
diff --git a/spaces/rorallitri/biomedical-language-models/logs/Gta 3 Skins Pack VERIFIED.md b/spaces/rorallitri/biomedical-language-models/logs/Gta 3 Skins Pack VERIFIED.md
deleted file mode 100644
index 10e7b004ab1a5ffe70e6e8fc827c856910c84d4c..0000000000000000000000000000000000000000
--- a/spaces/rorallitri/biomedical-language-models/logs/Gta 3 Skins Pack VERIFIED.md
+++ /dev/null
@@ -1,17 +0,0 @@
-
-
-But apart from the music, there’s more to The Last Jedi than just one old man telling Rey about his past. Sure, he reveals the truth about his parents, but more importantly, he shares his philosophy and helps her comprehend her place in the galaxy. It’s a powerful, heart-wrenching scene, and the only one to go down well with fans and critics alike. So if the movie’s main message is about living a life of selflessness, why has Kylo Ren gone rogue? Let’s unpack what it means to be the ‘true believer’ in Star Wars.
-
-In The Last Jedi, fans finally see a return of Kylo Ren. There’s no way of not feeling that this is the guy who was barely in The Force Awakens, and therefore a little flat, compared to Rey and Finn. And yet, it’s the scene in The Last Jedi where Ren is at his most fanatical, calling himself a ‘true believer’, that struck a chord with some of us. Star Wars has always told the story of what happens to people who believe too much in something, but The Last Jedi takes that ideology to another level. As such, we’re able to see how someone like Ren can follow his own path, and not care about the rules and laws of the established order.
-
-We’ve long been asked, why would Ren follow the light side path, when he’s obviously a villainous villain? Well, it’s twofold. Firstly, he believes that there are no good or evil sides, only black and white. When he’s presented with a choice to turn to the light side, he naturally follows because there’s no reward. Also, when he says he’s the ‘real’ hero, he’s talking from the perspective of a villain. Kylo Ren wants to be seen as the ‘chosen one’, and that means he needs to become evil in order to make that idea true. It’s all an act of manipulation.
-
-Rey and Finn, on the other hand, have more of a middle-of-the-road approach to this situation. When Finn and Ren fight on the desert planet of Crait, Finn tries to explain to Ren that there are no good or bad sides – only the light and the dark. Ren rejects the idea, claiming 4fefd39f24
-
-
-
diff --git a/spaces/scedlatioru/img-to-music/example/Digital Workshop Opus Pro 8 Crack [Extra Quality].md b/spaces/scedlatioru/img-to-music/example/Digital Workshop Opus Pro 8 Crack [Extra Quality].md
deleted file mode 100644
index d01801e5f6336ade74ecad3000b94fd31f6c693f..0000000000000000000000000000000000000000
--- a/spaces/scedlatioru/img-to-music/example/Digital Workshop Opus Pro 8 Crack [Extra Quality].md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
most of these lesions are treated by endovascular procedures and/or surgical revascularisation. in some cases, the extension of the arterial disease requires surgical bypass. these interventions are associated with a high risk of ischemia in distal parts of the hand. patients with multiple lesions may benefit from simultaneous revascularisation of digital arteries of both hands. the treatment of digital ischemia may be performed by a single- or dual-stage procedure, depending on the extent of the disease [ 20, 47 ]. in the case of a single stage, the spa is usually the first affected artery and often it is the only one that remains patent. the concomitant lesions are then treated. the residual artery is then ligated and the distal portion of the spa is revascularised. this may be performed in a single stage or in two stages. in the first stage, the distal portion of the spa is revascularised by endovascular techniques and the residual artery in the proximal portion is ligated. in the second stage, the endovascular techniques are used to revascularise the spa and digital arteries of the second and third spaces. in the case of a two-stage procedure, an additional graft is inserted before the second stage [ 46 ].
-
the endovascular treatment of the digital arteries requires a multidisciplinary team. it is important to have a vascular surgeon involved because the case is usually complicated by the presence of arterial occlusions. the interventional radiologist should be familiar with the endovascular techniques used in the treatment of peripheral arterial occlusive disease. the contribution of a neurosurgeon is important because the neurovascular structures may be involved by arterial lesions and the ischemic lesions of the digital arteries may be associated with distal ulcers. a dermatologist can also be helpful because the ischemic lesions may be located in the nail folds. the use of a psychologist is essential to ensure patients compliance to treatment.
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/scedlatioru/img-to-music/example/HD Online Player (Video Seks Anak Dengan Ibu Kandung).md b/spaces/scedlatioru/img-to-music/example/HD Online Player (Video Seks Anak Dengan Ibu Kandung).md
deleted file mode 100644
index 70cf60c6a9aebe28842e29f9eaef1edd53078ddc..0000000000000000000000000000000000000000
--- a/spaces/scedlatioru/img-to-music/example/HD Online Player (Video Seks Anak Dengan Ibu Kandung).md
+++ /dev/null
@@ -1,6 +0,0 @@
-
HD Online Player (Video Seks Anak Dengan Ibu Kandung)
Bad 2 Bad Extinction Mod APK Unlock All Characters
-
If you are a fan of action games with animal heroes, zombies, and mutants, you might want to check out Bad 2 Bad Extinction. This is a thrilling game that will keep you on the edge of your seat as you fight for survival in a post-apocalyptic world. And if you want to make the game even more fun and exciting, you can download the Bad 2 Bad Extinction Mod APK and unlock all characters, skins, money, and gems. In this article, we will tell you everything you need to know about this modded version of the game, including what it is, how to get it, and why you should try it.
-
bad 2 bad extinction mod apk unlock all characters
Bad 2 Bad Extinction is a sequel to the popular game Bad 2 Bad: Delta, which was released in 2018. It is a side-scrolling action game that features animal heroes who fight against zombies, mutants, and other enemies in a post-apocalyptic world. The game has a unique style and humor that makes it stand out from other games in the genre.
-
A thrilling action game with animal heroes
-
The game lets you choose from different animal characters, such as bears, wolves, pandas, tigers, and more. Each character has its own skills and abilities that you can use in combat. You can also customize your character with various outfits, accessories, and weapons. The game has a simple control system that allows you to move, shoot, aim, reload, and use items with ease.
-
A post-apocalyptic world with zombies and mutants
-
The game takes place in a world that has been devastated by a virus that turns humans into zombies and animals into mutants. You have to fight your way through different locations, such as cities, deserts, forests, and underground bases. You will encounter different types of enemies, such as zombies, mutants, bandits, robots, and bosses. You will also have to complete various missions, such as rescuing survivors, collecting resources, destroying enemy bases, and more.
-
bad 2 bad extinction mod apk free shopping and characters
-bad 2 bad extinction hack apk unlimited money and heroes
-bad 2 bad extinction modded apk download all unlocked
-bad 2 bad extinction cheat apk latest version and characters
-bad 2 bad extinction premium apk full access and characters
-bad 2 bad extinction cracked apk no ads and characters
-bad 2 bad extinction mod apk android 1 all characters
-bad 2 bad extinction hack apk rexdl free characters
-bad 2 bad extinction modded apk revdl all unlocked
-bad 2 bad extinction cheat apk happymod and characters
-bad 2 bad extinction premium apk apkpure full access
-bad 2 bad extinction cracked apk apkmody no ads
-bad 2 bad extinction mod apk offline all characters
-bad 2 bad extinction hack apk online free characters
-bad 2 bad extinction modded apk update all unlocked
-bad 2 bad extinction cheat apk old version and characters
-bad 2 bad extinction premium apk new version full access
-bad 2 bad extinction cracked apk original no ads
-bad 2 bad extinction mod apk unlimited everything and characters
-bad 2 bad extinction hack apk mega mod free characters
-bad 2 bad extinction modded apk god mode all unlocked
-bad 2 bad extinction cheat apk infinite ammo and characters
-bad 2 bad extinction premium apk pro version full access
-bad 2 bad extinction cracked apk vip version no ads
-bad 2 bad extinction mod apk obb data all characters
-bad 2 bad extinction hack apk zip file free characters
-bad 2 bad extinction modded apk xapk file all unlocked
-bad 2 bad extinction cheat apk no root and characters
-bad 2 bad extinction premium apk with root full access
-bad 2 bad extinction cracked apk without root no ads
-
A variety of missions, weapons, and customization options
-
The game offers a lot of content and features that will keep you entertained for hours. You can play the game in different modes, such as story mode, survival mode, raid mode, and online mode. You can also collect and upgrade different weapons, such as pistols, rifles, shotguns, snipers, rocket launchers, grenades, and more. You can also customize your character with different skins, hats, masks, glasses, backpacks, and more. The game has a lot of items and rewards that you can earn by playing the game or by watching ads.
-
What is Bad 2 Bad Extinction Mod APK?
-
Bad 2 Bad Extinction Mod APK is a modified version of the original game that gives you some extra benefits and features that are not available in the official version. These include:
-
A modified version of the original game
-
The modded version of the game is created by third-party developers who modify the original game files to add or remove some features. The modded version of the game is not authorized or endorsed by the official developers or publishers of the game. Therefore, you should download and install it at your own risk.
-
A way to get unlimited money and gems
-
The modded version of the game gives you unlimited money and gems that you can use to buy or upgrade anything in the game. You can also use the money and gems to unlock all the characters and skins that are otherwise locked or require real money to purchase. This way, you can enjoy the game without any limitations or restrictions.
-
A way to unlock all characters and skins
-
The modded version of the game also gives you access to all the characters and skins that are available in the game. You can choose from different animal heroes, such as bears, wolves, pandas, tigers, and more. You can also customize your character with different outfits, accessories, and weapons. You can change your character's appearance anytime you want and create your own unique style.
-
How to download and install Bad 2 Bad Extinction Mod APK?
-
If you want to download and install the Bad 2 Bad Extinction Mod APK, you will need to follow some simple steps. Here is how you can do it:
-
The steps to follow
-
First, you will need to find a reliable source that offers the modded version of the game. You can search online for websites or blogs that provide the download link for the Bad 2 Bad Extinction Mod APK. Make sure that the source is trustworthy and safe, as some sources may contain viruses or malware that can harm your device. You can also check the reviews and ratings of the source before downloading the file.
-
Second, you will need to download the file to your device. The file size may vary depending on the source, but it should not be too large. You will need to have enough storage space on your device to save the file. You will also need to have a stable internet connection to download the file without any interruptions.
-
Third, you will need to install the file on your device. To do this, you will need to enable the installation of apps from unknown sources on your device settings. This will allow you to install apps that are not from the official app store. You can find this option under security or privacy settings on your device. Once you enable this option, you can locate the downloaded file on your device and tap on it to start the installation process. Follow the instructions on the screen and wait for the installation to finish.
-
The permissions to allow
-
During the installation process, you may be asked to allow some permissions for the app to function properly. These permissions may include access to your device's storage, camera, microphone, location, contacts, and more. You can choose to allow or deny these permissions according to your preference. However, some permissions may be necessary for the app to work correctly, so make sure that you do not deny any essential permissions.
-
The benefits to enjoy
-
After installing the app, you can launch it and start playing the game. You will notice that you have unlimited money and gems in your account that you can use to buy or upgrade anything in the game. You will also notice that you have access to all the characters and skins that are available in the game. You can choose any character and skin that you like and customize your character with different outfits, accessories, and weapons. You can also play the game in different modes, such as story mode, survival mode, raid mode, and online mode. You can enjoy the game without any limitations or restrictions.
-
Conclusion
-
Bad 2 Bad Extinction is a fun and exciting action game that features animal heroes who fight against zombies, mutants, and other enemies in a post-apocalyptic world. The game has a lot of content and features that will keep you entertained for hours. However, if you want to make the game even more fun and exciting, you can download the Bad 2 Bad Extinction Mod APK and unlock all characters, skins, money, and gems. This way, you can enjoy the game without any limitations or restrictions.
-
If you are interested in trying out this modded version of the game, you can follow the steps above to download and install it on your device. Make sure that you download it from a reliable source and allow the necessary permissions for it to work properly. Then, you can launch it and start playing it with unlimited money and gems and access to all characters and skins.
-
We hope that this article has helped you learn more about Bad 2 Bad Extinction Mod APK Unlock All Characters. If you have any questions or feedback, feel free to leave a comment below.
-
FAQs
-
Here are some frequently asked questions about Bad 2 Bad Extinction Mod APK Unlock All Characters:
-
-
Is Bad 2 Bad Extinction Mod APK safe?
-
Bad 2 Bad Extinction Mod APK is safe as long as you download it from a trustworthy source that does not contain any viruses or malware. However, since it is a modified version of the original game, it is not authorized or endorsed by the official developers or publishers of the game. Therefore, you should download and install it at your own risk and discretion.
-
Does Bad 2 Bad Extinction Mod APK work on all devices?
-
Bad 2 Bad Extinction Mod APK should work on most devices that support the original game. However, some devices may not be compatible with the modded version of the game due to different specifications or settings. Therefore, you should check the compatibility of your device before downloading and installing the modded version of the game.
-
Can I play Bad 2 Bad Extinction Mod APK online with other players?
-
Bad 2 Bad Extinction Mod APK allows you to play online with other players who have the same modded version of the game. However, you may not be able to play online with players who have the official version of the game, as they may have different features and updates. Therefore, you should be aware of this limitation before playing online with other players.
-
Will I get banned for using Bad 2 Bad Extinction Mod APK?
-
There is a possibility that you may get banned for using Bad 2 Bad Extinction Mod APK, as it is a modified version of the original game that gives you an unfair advantage over other players. The official developers or publishers of the game may detect your use of the modded version of the game and take action against you. Therefore, you should use the modded version of the game at your own risk and responsibility.
-
Can I update Bad 2 Bad Extinction Mod APK?
-
Bad 2 Bad Extinction Mod APK may not be compatible with the latest updates and patches of the original game. Therefore, you may not be able to update the modded version of the game without losing its features and benefits. If you want to update the modded version of the game, you will need to find a new source that offers the updated modded version of the game and download and install it again.
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Call Recording APK for Android 12 The Ultimate Guide to Record Phone Calls.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Call Recording APK for Android 12 The Ultimate Guide to Record Phone Calls.md
deleted file mode 100644
index 84027a9a0444b420f47b0c38bf8053a2a607ba25..0000000000000000000000000000000000000000
--- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Call Recording APK for Android 12 The Ultimate Guide to Record Phone Calls.md
+++ /dev/null
@@ -1,165 +0,0 @@
-
-
How to Choose and Install the Best Call Recording Apps for Android 12
-
Call recording is a feature that allows you to record your phone conversations and save them as audio files on your device. You can use call recording for various purposes, such as:
Keeping records of important information or agreements
-
Improving your customer service or sales skills
-
Complying with legal regulations or industry standards
-
Resolving disputes or complaints
-
Capturing feedback or testimonials
-
-
However, call recording on Android is not as simple as it sounds. Due to privacy concerns, Google has restricted the ability of third-party apps to access the microphone and phone calls on Android devices. This means that many call recording apps may not work properly on Android 12, or may require special permissions or settings to function.
-
In this article, we will help you choose and install the best call recording apps for Android 12. We will compare the features, pros, and cons of five popular and reliable call recording apps that are compatible with Android 12. We will also explain how to install APK files from outside the Play Store, how to enable unknown sources and accessibility permissions for call recording apps, how to configure and customize the settings and options of each app, and how to access, manage, and share your recorded calls.
-
Best Call Recording Apps for Android 12
-
There are many call recording apps available for Android devices, but not all of them are compatible with Android 12. Some apps may only work with certain phone models or regions, while others may require root access or additional plug-ins. Some apps may also have limited features or functionality, such as not being able to record VoIP calls from apps like WhatsApp, Skype, or Facebook Messenger.
-
To help you find the best call recording app for your needs, we have selected five popular and reliable call recording apps that work well on Android 12. We have compared their features, pros, and cons based on the following criteria:
-
-
Compatibility: The app should be compatible with most Android devices running Android 12.
-
Functionality: The app should be able to record both incoming and outgoing calls from any source, including VoIP calls from other apps.
-
Quality: The app should be able to record calls in high-quality audio formats, such as MP3 or WAV.
-
Usability: The app should be easy to use and configure, with a simple and intuitive interface.
-
Privacy: The app should respect your privacy and security, by not collecting or sharing your personal data or recordings without your consent.
-
-
Here are the five best call recording apps for Android 12 that we have chosen:
-
ACR Phone +
-
ACR Phone + is a powerful call recorder app that offers a lot of features and customization options.
Some of the features, pros, and cons of ACR Phone + are:
-
-
High-quality call recording: ACR Phone ensures crystal-clear call recording, capturing every detail of your conversations. You can choose between MP3 or WAV formats, and adjust the bitrate and sampling rate to your preference.
-
Automatic call recording: With ACR Phone, you don't have to start recording each call manually. You can set up rules to record all calls, or only calls from specific contacts or numbers. You can also exclude contacts or numbers from being recorded.
-
Easy-to-use interface: ACR Phone is designed with user-friendliness in mind. You can access your recordings from the app's main screen, or from the notification bar. You can also search, sort, filter, and rename your recordings easily.
-
Organize and share recordings: The app allows you to organize your recorded calls efficiently. You can create folders, add notes, and mark important recordings as favorites. You can also share your recordings via email, cloud services, or social media.
-
Privacy: The app respects your privacy and security, by not collecting or sharing your personal data or recordings without your consent. You can also protect your recordings with a PIN lock or biometric authentication.
-
Pros: ACR Phone is a powerful and versatile call recorder app that offers a lot of features and customization options. It supports both cellular and VoIP calls, and works well on Android 12 devices. It also has a free version with some limitations.
-
Cons: ACR Phone is not available on the Google Play Store, so you have to download it from the developer's website or other sources. This may pose some security risks or compatibility issues. The app also requires accessibility permissions to work properly.
-
-
Cube ACR
-
Cube ACR is another popular call recorder app that works well on Android 12 devices. It can record both cellular and VoIP calls from various apps, such as WhatsApp, Skype, Viber, Telegram, and more.
-
-
Functionality: Cube ACR can record both incoming and outgoing calls from any source, including VoIP calls from other apps. It supports automatic call recording based on rules, as well as manual call recording with a floating widget .
-
Quality: Cube ACR can record calls in high-quality audio formats, such as MP3 or OGG. You can also adjust the audio source and quality settings to suit your needs .
-
Usability: Cube ACR has a simple and intuitive interface that lets you access and manage your recordings easily. You can also search, sort, filter, and backup your recordings .
-
Privacy: Cube ACR does not collect or share your personal data or recordings without your permission. You can also lock your recordings with a PIN code or fingerprint .
-
Pros: Cube ACR is one of the few call recorder apps that can record VoIP calls from other apps on Android 12 devices. It has a lot of features and options to customize your recording experience. It also has a free version with some ads and limitations.
-
Cons: Cube ACR may not work on some devices or regions due to carrier restrictions or compatibility issues. It also requires accessibility permissions and notification access to work properly .
-
-
CallU
-
CallU is a simple and effective call recorder app that works on Android 12 devices. It can record both incoming and outgoing calls automatically or manually.
-
-
Functionality: CallU can record both incoming and outgoing calls automatically or manually. You can choose to record all calls, or only calls from specific contacts or numbers. You can also exclude contacts or numbers from being recorded.
-
Quality: CallU can record calls in high-quality audio formats, such as MP3 or WAV. You can also adjust the audio source and quality settings to suit your needs.
-
Usability: CallU has a simple and easy-to-use interface that lets you access and manage your recordings easily. You can also search, sort, filter, rename, and backup your recordings.
-
Privacy: CallU does not collect or share your personal data or recordings without your permission. You can also protect your recordings with a password or fingerprint.
-
Pros: CallU is a simple and effective call recorder app that works well on Android 12 devices. It has a minimalistic design and a user-friendly interface. It also has a free version with some ads and limitations.
-
Cons: CallU may not work on some devices or regions due to carrier restrictions or compatibility issues. It also requires accessibility permissions and notification access to work properly.
-
-
TrueCall
-
TrueCall is a smart and reliable call recorder app that works on Android 12 devices. It can record both incoming and outgoing calls automatically or manually, and also identify unknown callers and block spam calls.
-
best call recording app for android 12
-how to record phone calls on android 12
-android 12 call recorder apk download
-call recording software for android 12
-free call recorder for android 12
-automatic call recorder for android 12
-hidden call recorder for android 12
-call recording settings in android 12
-voice call recorder for android 12
-call recording feature in android 12
-call recording option in android 12
-call recording permission in android 12
-call recording problem in android 12
-call recording solution for android 12
-call recording support for android 12
-call recording quality on android 12
-call recording backup on android 12
-call recording cloud storage for android 12
-call recording encryption for android 12
-call recording security for android 12
-call recording privacy for android 12
-call recording laws for android 12
-call recording tips and tricks for android 12
-call recording reviews for android 12
-call recording comparison for android 12
-best call recording apps for android 2023
-top call recording apps for android 2023
-new call recording apps for android 2023
-latest call recording apps for android 2023
-upcoming call recording apps for android 2023
-best rated call recording apps for android 2023
-most downloaded call recording apps for android 2023
-most popular call recording apps for android 2023
-most reliable call recording apps for android 2023
-most secure call recording apps for android 2023
-best free call recording apps for android 2023
-best paid call recording apps for android 2023
-best offline call recording apps for android 2023
-best online call recording apps for android 2023
-best cloud-based call recording apps for android 2023
-best encrypted call recording apps for android 2023
-best hidden call recording apps for android 2023
-best automatic call recording apps for android 2023
-best manual call recording apps for android 2023
-best voice quality call recording apps for android 2023
-best backup and restore call recording apps for android 2023
-best easy to use call recording apps for android 2023
-best customizable call recording apps for android 2023
-best advanced call recording apps for android 2023
-
-
Functionality: TrueCall can record both incoming and outgoing calls automatically or manually. You can choose to record all calls, or only calls from specific contacts or numbers. You can also exclude contacts or numbers from being recorded.
-
Quality: TrueCall can record calls in high-quality audio formats, such as MP3 or WAV. You can also adjust the audio source and quality settings to suit your needs.
-
Usability: TrueCall has a smart and intuitive interface that lets you access and manage your recordings easily. You can also search, sort, filter, rename, and backup your recordings.
-
Privacy: TrueCall does not collect or share your personal data or recordings without your permission. You can also protect your recordings with a password or fingerprint.
-
Pros: TrueCall is a smart and reliable call recorder app that works well on Android 12 devices. It has a lot of features and options to customize your recording experience. It also has a caller ID feature that can identify unknown callers and block spam calls. It also has a free version with some ads and limitations.
-
Cons: TrueCall may not work on some devices or regions due to carrier restrictions or compatibility issues. It also requires accessibility permissions and notification access to work properly.
-
-
Your stock phone dialer
-
Your stock phone dialer is the default phone app that comes pre-installed on your Android device. Depending on your device model and manufacturer, it may have a built-in call recording feature that works on Android 12 devices.
-
-
Functionality: Your stock phone dialer may have a call recording feature that allows you to record both incoming and outgoing calls manually. You may see a record button on the call screen, or you may have to tap the menu button to access the option .
-
Quality: Your stock phone dialer may record calls in high-quality audio formats, such as MP3 or WAV. However, you may not be able to adjust the audio source and quality settings to suit your needs .
-
Usability: Your stock phone dialer may have a simple and easy-to-use interface that lets you access and manage your recordings easily. However, you may not be able to search, sort, filter, rename, or backup your recordings .
-
Privacy: Your stock phone dialer may respect your privacy and security, by not collecting or sharing your personal data or recordings without your consent. However, you may not be able to protect your recordings with a password or fingerprint .
-
Pros: Your stock phone dialer is the easiest and most convenient way to record calls on Android 12 devices. You don't have to install any third-party apps or grant any permissions. You also don't have to worry about compatibility issues or carrier restrictions.
-
Cons: Your stock phone dialer may not have a call recording feature at all, depending on your device model and manufacturer. Even if it does, it may have limited features and functionality, such as not being able to record VoIP calls from other apps, or not being able to customize your recording experience.
-
-
How to Install and Use Call Recording Apps on Android 12
-
If you decide to use one of the third-party call recording apps mentioned above, you will need to install them from outside the Google Play Store, as they are not available there due to Google's policies. This means that you will need to download APK files from the developer's website or other sources, and install them manually on your device.
-
To install APK files from outside the Play Store, you will need to follow these steps:
-
-
Download the APK file of the call recording app of your choice from the developer's website or other sources. Make sure that the source is trustworthy and secure, as some APK files may contain malware or viruses.
Go to the Settings app on your device, and tap on Security or Privacy. Then, enable the option to allow installation of apps from unknown sources. This will allow you to install APK files that are not from the Play Store.
-
Locate the APK file that you downloaded on your device, and tap on it to start the installation process. You may see a warning message that the app may harm your device, but you can ignore it if you trust the source.
-
Follow the instructions on the screen to complete the installation process. You may have to grant some permissions or access to the app, such as phone, contacts, microphone, storage, etc.
-
Once the installation is done, you can launch the app and start using it to record your calls.
-
-
To use call recording apps on Android 12, you will need to follow these steps:
-
-
Enable accessibility permissions for the call recording app. This is necessary for the app to access your phone calls and microphone. To do this, go to the Settings app on your device, and tap on Accessibility. Then, find the call recording app and turn on its accessibility service.
-
Enable notification access for the call recording app. This is necessary for the app to detect and record incoming and outgoing calls. To do this, go to the Settings app on your device, and tap on Notifications. Then, find the call recording app and turn on its notification access.
-
Configure and customize the settings and options of the call recording app. You can choose whether to record all calls or only specific ones, whether to record in MP3 or WAV format, whether to adjust the audio quality and source, whether to add notes or folders to your recordings, etc.
-
Access, manage, and share your recorded calls. You can view your recorded calls from the app's main screen or from the notification bar. You can also search, sort, filter, rename, or delete your recordings. You can also share your recordings via email, cloud services, or social media.
-
-
Conclusion
-
Call recording is a useful feature that can help you keep track of your conversations, improve your customer service, comply with legal regulations, and more. However, call recording on Android 12 is not as simple as it sounds. You need to choose a compatible and reliable call recording app that works well on Android 12 devices. You also need to install it from outside the Play Store, and enable some permissions and settings for it to work properly.
-
In this article, we have provided you with an outline and a detailed guide on how to choose and install the best call recording apps for Android 12. We have compared the features, pros, and cons of five popular and reliable call recording apps that are compatible with Android 12: ACR Phone +, Cube ACR, CallU, TrueCall, and your stock phone dialer. We have also explained how to install APK files from outside the Play Store, how to enable unknown sources and accessibility permissions for call recording apps, how to configure and customize the settings and options of each app, and how to access, manage, and share your recorded calls.
-
We hope that this article has helped you find the best call recording app for your needs. We invite you to try out these apps and share your feedback with us. Do you have any questions or suggestions about call recording apps for Android 12? Let us know in the comments below!
-
FAQs
-
Here are some common questions and answers about call recording apps for Android 12:
-
Is call recording legal?
-
The legality of call recording depends on various factors, such as your location, the purpose of recording, and whether you have consent from the other party or parties involved in the call. In general, you should always inform and obtain consent from the other party or parties before recording a call. You should also check your local laws and regulations before using any call recording app.
-
How can I record VoIP calls from other apps?
-
Some call recording apps can record VoIP calls from other apps, such as WhatsApp, Skype, Viber, Telegram, etc. However, not all call recording apps can do this, and some may require additional plug-ins or settings to enable this feature. Some of the call recording apps that can record VoIP calls from other apps are ACR Phone + and Cube ACR. You can check their websites or FAQs for more details on how to enable and use this feature.
-
How can I backup or restore my recorded calls?
-
Most call recording apps allow you to backup or restore your recorded calls to or from your device's internal storage, external SD card, or cloud services, such as Google Drive, Dropbox, OneDrive, etc. You can usually find this option in the app's settings or menu. You can also use a file manager app to copy or move your recorded calls to or from your desired location.
-
How can I delete or hide my recorded calls?
-
Most call recording apps allow you to delete or hide your recorded calls from the app's main screen or menu. You can usually select one or more recordings and tap on the delete or hide option. You can also use a file manager app to delete or hide your recorded calls from your device's storage. However, be careful when deleting or hiding your recorded calls, as you may not be able to recover them later.
-
How can I improve the quality of my recorded calls?
-
The quality of your recorded calls may depend on various factors, such as your device model, network signal, audio source, audio format, audio quality settings, etc. To improve the quality of your recorded calls, you can try the following tips:
-
-
Use a good-quality microphone and headset for your calls.
-
Avoid noisy environments and background noises when making or receiving calls.
-
Choose a high-quality audio format and bitrate for your recordings, such as MP3 or WAV.
-
Adjust the audio source and quality settings in your call recording app to suit your needs.
-
Test different call recording apps and see which one works best for you.
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/simplyjaga/neural_style_tranfer_using_dense_net/app.py b/spaces/simplyjaga/neural_style_tranfer_using_dense_net/app.py
deleted file mode 100644
index 1136b9f2c22692c4b117fbdad0bc22a8e8eba20e..0000000000000000000000000000000000000000
--- a/spaces/simplyjaga/neural_style_tranfer_using_dense_net/app.py
+++ /dev/null
@@ -1,27 +0,0 @@
-import model
-import gradio as gr
-
-#gui
-demo = gr.Blocks()
-with demo:
- gr.Markdown(
- """# Neural Style Transfer Using DenseNet
- Since running this demo takes too much time without gpu, copy and try it out in the colab with gpu option for nearly 1000 steps to get an pretty decent output
- """)
-
- with gr.Row():
- with gr.Column():
- input =[gr.Image(label='Style Image', type='pil'),
- gr.Image(label='Content Image', type='pil'),
- gr.Slider(0, 1, value=1, label='Alpha (amout of info from content image)'),
- gr.Slider(0, 1, value=0.02, label='Beta (amout of style from style image)'),
- gr.Number(label='Step (no.of generation updates) - keep it below 20 because it takes too much time without gpu')]
- with gr.Column():
- output = gr.Image(label='Image after Style Transfer')
- gr.Examples([['examples/style.jpg','examples/content.jpg']],
- inputs=input)
-
- btn = gr.Button("Transfer Style")
- btn.click(fn=model.get_output, inputs=input, outputs=output)
-
-demo.queue().launch()
\ No newline at end of file
diff --git a/spaces/skytnt/moe-tts/text/__init__.py b/spaces/skytnt/moe-tts/text/__init__.py
deleted file mode 100644
index 4e69c354dd24e3243980236eca962cd5945a92fc..0000000000000000000000000000000000000000
--- a/spaces/skytnt/moe-tts/text/__init__.py
+++ /dev/null
@@ -1,32 +0,0 @@
-""" from https://github.com/keithito/tacotron """
-from text import cleaners
-
-
-def text_to_sequence(text, symbols, cleaner_names):
- '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
- Args:
- text: string to convert to a sequence
- cleaner_names: names of the cleaner functions to run the text through
- Returns:
- List of integers corresponding to the symbols in the text
- '''
- _symbol_to_id = {s: i for i, s in enumerate(symbols)}
-
- sequence = []
-
- clean_text = _clean_text(text, cleaner_names)
- for symbol in clean_text:
- if symbol not in _symbol_to_id.keys():
- continue
- symbol_id = _symbol_to_id[symbol]
- sequence += [symbol_id]
- return sequence
-
-
-def _clean_text(text, cleaner_names):
- for name in cleaner_names:
- cleaner = getattr(cleaners, name)
- if not cleaner:
- raise Exception('Unknown cleaner: %s' % name)
- text = cleaner(text)
- return text
diff --git a/spaces/songweig/rich-text-to-image/app_sd.py b/spaces/songweig/rich-text-to-image/app_sd.py
deleted file mode 100644
index 2da9eb6bae6d960dea39e6397294cd51651f91a9..0000000000000000000000000000000000000000
--- a/spaces/songweig/rich-text-to-image/app_sd.py
+++ /dev/null
@@ -1,557 +0,0 @@
-import math
-import random
-import os
-import json
-import time
-import argparse
-import torch
-import numpy as np
-from torchvision import transforms
-
-from models.region_diffusion import RegionDiffusion
-from utils.attention_utils import get_token_maps
-from utils.richtext_utils import seed_everything, parse_json, get_region_diffusion_input,\
- get_attention_control_input, get_gradient_guidance_input
-
-
-import gradio as gr
-from PIL import Image, ImageOps
-from share_btn import community_icon_html, loading_icon_html, share_js, css
-
-
-help_text = """
-If you are encountering an error or not achieving your desired outcome, here are some potential reasons and recommendations to consider:
-1. If you format only a portion of a word rather than the complete word, an error may occur.
-2. If you use font color and get completely corrupted results, you may consider decrease the color weight lambda.
-3. Consider using a different seed.
-"""
-
-
-canvas_html = """"""
-get_js_data = """
-async (text_input, negative_prompt, height, width, seed, steps, num_segments, segment_threshold, inject_interval, guidance_weight, color_guidance_weight, rich_text_input, background_aug) => {
- const richEl = document.getElementById("rich-text-root");
- const data = richEl? richEl.contentDocument.body._data : {};
- return [text_input, negative_prompt, height, width, seed, steps, num_segments, segment_threshold, inject_interval, guidance_weight, color_guidance_weight, JSON.stringify(data), background_aug];
-}
-"""
-set_js_data = """
-async (text_input) => {
- const richEl = document.getElementById("rich-text-root");
- const data = text_input ? JSON.parse(text_input) : null;
- if (richEl && data) richEl.contentDocument.body.setQuillContents(data);
-}
-"""
-
-get_window_url_params = """
-async (url_params) => {
- const params = new URLSearchParams(window.location.search);
- url_params = Object.fromEntries(params);
- return [url_params];
-}
-"""
-
-
-def load_url_params(url_params):
- if 'prompt' in url_params:
- return gr.update(visible=True), url_params
- else:
- return gr.update(visible=False), url_params
-
-
-def main():
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
- model = RegionDiffusion(device)
-
- def generate(
- text_input: str,
- negative_text: str,
- height: int,
- width: int,
- seed: int,
- steps: int,
- num_segments: int,
- segment_threshold: float,
- inject_interval: float,
- guidance_weight: float,
- color_guidance_weight: float,
- rich_text_input: str,
- background_aug: bool,
- ):
- run_dir = 'results/'
- os.makedirs(run_dir, exist_ok=True)
- # Load region diffusion model.
- height = int(height)
- width = int(width)
- steps = 41 if not steps else steps
- guidance_weight = 8.5 if not guidance_weight else guidance_weight
- text_input = rich_text_input if rich_text_input != '' else text_input
- print('text_input', text_input)
- if (text_input == '' or rich_text_input == ''):
- raise gr.Error("Please enter some text.")
- # parse json to span attributes
- base_text_prompt, style_text_prompts, footnote_text_prompts, footnote_target_tokens,\
- color_text_prompts, color_names, color_rgbs, size_text_prompts_and_sizes, use_grad_guidance = parse_json(
- json.loads(text_input))
-
- # create control input for region diffusion
- region_text_prompts, region_target_token_ids, base_tokens = get_region_diffusion_input(
- model, base_text_prompt, style_text_prompts, footnote_text_prompts,
- footnote_target_tokens, color_text_prompts, color_names)
-
- # create control input for cross attention
- text_format_dict = get_attention_control_input(
- model, base_tokens, size_text_prompts_and_sizes)
-
- # create control input for region guidance
- text_format_dict, color_target_token_ids = get_gradient_guidance_input(
- model, base_tokens, color_text_prompts, color_rgbs, text_format_dict, color_guidance_weight=color_guidance_weight)
-
- seed_everything(seed)
-
- # get token maps from plain text to image generation.
- begin_time = time.time()
- if model.selfattn_maps is None and model.crossattn_maps is None:
- model.remove_tokenmap_hooks()
- model.register_tokenmap_hooks()
- else:
- model.reset_attention_maps()
- model.remove_tokenmap_hooks()
- plain_img = model.produce_attn_maps([base_text_prompt], [negative_text],
- height=height, width=width, num_inference_steps=steps,
- guidance_scale=guidance_weight)
- print('time lapses to get attention maps: %.4f' %
- (time.time()-begin_time))
- seed_everything(seed)
- color_obj_masks, segments_vis, token_maps = get_token_maps(model.selfattn_maps, model.crossattn_maps, model.n_maps, run_dir,
- 512//8, 512//8, color_target_token_ids[:-1], seed,
- base_tokens, segment_threshold=segment_threshold, num_segments=num_segments,
- return_vis=True)
- seed_everything(seed)
- model.masks, segments_vis, token_maps = get_token_maps(model.selfattn_maps, model.crossattn_maps, model.n_maps, run_dir,
- 512//8, 512//8, region_target_token_ids[:-1], seed,
- base_tokens, segment_threshold=segment_threshold, num_segments=num_segments,
- return_vis=True)
- color_obj_masks = [transforms.functional.resize(color_obj_mask, (height, width),
- interpolation=transforms.InterpolationMode.BICUBIC,
- antialias=True)
- for color_obj_mask in color_obj_masks]
- text_format_dict['color_obj_atten'] = color_obj_masks
- model.remove_tokenmap_hooks()
-
- # generate image from rich text
- begin_time = time.time()
- seed_everything(seed)
- if background_aug:
- bg_aug_end = 500
- else:
- bg_aug_end = 1000
- rich_img = model.prompt_to_img(region_text_prompts, [negative_text],
- height=height, width=width, num_inference_steps=steps,
- guidance_scale=guidance_weight, use_guidance=use_grad_guidance,
- text_format_dict=text_format_dict, inject_selfattn=inject_interval,
- bg_aug_end=bg_aug_end)
- print('time lapses to generate image from rich text: %.4f' %
- (time.time()-begin_time))
- return [plain_img[0], rich_img[0], segments_vis, token_maps]
-
- with gr.Blocks(css=css) as demo:
- url_params = gr.JSON({}, visible=False, label="URL Params")
- gr.HTML("""
Expressive Text-to-Image Generation with Rich Text
For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings.""")
- with gr.Row():
- with gr.Column():
- rich_text_el = gr.HTML(canvas_html, elem_id="canvas_html")
- rich_text_input = gr.Textbox(value="", visible=False)
- text_input = gr.Textbox(
- label='Rich-text JSON Input',
- visible=False,
- max_lines=1,
- placeholder='Example: \'{"ops":[{"insert":"a Gothic "},{"attributes":{"color":"#b26b00"},"insert":"church"},{"insert":" in a the sunset with a beautiful landscape in the background.\n"}]}\'',
- elem_id="text_input"
- )
- negative_prompt = gr.Textbox(
- label='Negative Prompt',
- max_lines=1,
- placeholder='Example: poor quality, blurry, dark, low resolution, low quality, worst quality',
- elem_id="negative_prompt"
- )
- segment_threshold = gr.Slider(label='Token map threshold',
- info='(See less area in token maps? Decrease this. See too much area? Increase this.)',
- minimum=0,
- maximum=1,
- step=0.01,
- value=0.25)
- inject_interval = gr.Slider(label='Detail preservation',
- info='(To preserve more structure from plain-text generation, increase this. To see more rich-text attributes, decrease this.)',
- minimum=0,
- maximum=1,
- step=0.01,
- value=0.)
- color_guidance_weight = gr.Slider(label='Color weight',
- info='(To obtain more precise color, increase this, while too large value may cause artifacts.)',
- minimum=0,
- maximum=2,
- step=0.1,
- value=0.5)
- num_segments = gr.Slider(label='Number of segments',
- minimum=2,
- maximum=20,
- step=1,
- value=9)
- seed = gr.Slider(label='Seed',
- minimum=0,
- maximum=100000,
- step=1,
- value=6,
- elem_id="seed"
- )
- background_aug = gr.Checkbox(
- label='Precise region alignment',
- info='(For strict region alignment, select this option, but beware of potential artifacts when using with style.)',
- value=True)
- with gr.Accordion('Other Parameters', open=False):
- steps = gr.Slider(label='Number of Steps',
- minimum=0,
- maximum=500,
- step=1,
- value=41)
- guidance_weight = gr.Slider(label='CFG weight',
- minimum=0,
- maximum=50,
- step=0.1,
- value=8.5)
- width = gr.Dropdown(choices=[512],
- value=512,
- label='Width',
- visible=True)
- height = gr.Dropdown(choices=[512],
- value=512,
- label='height',
- visible=True)
-
- with gr.Row():
- with gr.Column(scale=1, min_width=100):
- generate_button = gr.Button("Generate")
- load_params_button = gr.Button(
- "Load from URL Params", visible=True)
- with gr.Column():
- richtext_result = gr.Image(
- label='Rich-text', elem_id="rich-text-image")
- richtext_result.style(height=512)
- with gr.Row():
- plaintext_result = gr.Image(
- label='Plain-text', elem_id="plain-text-image")
- segments = gr.Image(label='Segmentation')
- with gr.Row():
- token_map = gr.Image(label='Token Maps')
- with gr.Row(visible=False) as share_row:
- with gr.Group(elem_id="share-btn-container"):
- community_icon = gr.HTML(community_icon_html)
- loading_icon = gr.HTML(loading_icon_html)
- share_button = gr.Button(
- "Share to community", elem_id="share-btn")
- share_button.click(None, [], [], _js=share_js)
- with gr.Row():
- gr.Markdown(help_text)
-
- with gr.Row():
- footnote_examples = [
- [
- '{"ops":[{"insert":"A close-up 4k dslr photo of a "},{"attributes":{"link":"A cat wearing sunglasses and a bandana around its neck."},"insert":"cat"},{"insert":" riding a scooter. Palm trees in the background."}]}',
- '',
- 5,
- 0.3,
- 0,
- 6,
- 1,
- None,
- True
- ],
- [
- '{"ops":[{"insert":"A "},{"attributes":{"link":"kitchen island with a stove with gas burners and a built-in oven "},"insert":"kitchen island"},{"insert":" next to a "},{"attributes":{"link":"an open refrigerator stocked with fresh produce, dairy products, and beverages. "},"insert":"refrigerator"},{"insert":", by James McDonald and Joarc Architects, home, interior, octane render, deviantart, cinematic, key art, hyperrealism, sun light, sunrays, canon eos c 300, ƒ 1.8, 35 mm, 8k, medium - format print"}]}',
- '',
- 6,
- 0.5,
- 0,
- 6,
- 1,
- None,
- True
- ],
- [
- '{"ops":[{"insert":"A "},{"attributes":{"link":"Happy Kung fu panda art, elder, asian art, volumetric lighting, dramatic scene, ultra detailed, realism, chinese"},"insert":"panda"},{"insert":" standing on a cliff by a waterfall, wildlife photography, photograph, high quality, wildlife, f 1.8, soft focus, 8k, national geographic, award - winning photograph by nick nichols"}]}',
- '',
- 4,
- 0.3,
- 0,
- 4,
- 1,
- None,
- True
- ],
- ]
-
- gr.Examples(examples=footnote_examples,
- label='Footnote examples',
- inputs=[
- text_input,
- negative_prompt,
- num_segments,
- segment_threshold,
- inject_interval,
- seed,
- color_guidance_weight,
- rich_text_input,
- background_aug,
- ],
- outputs=[
- plaintext_result,
- richtext_result,
- segments,
- token_map,
- ],
- fn=generate,
- # cache_examples=True,
- examples_per_page=20)
- with gr.Row():
- color_examples = [
- [
- '{"ops":[{"insert":"a beautifule girl with big eye, skin, and long "},{"attributes":{"color":"#00ffff"},"insert":"hair"},{"insert":", t-shirt, bursting with vivid color, intricate, elegant, highly detailed, photorealistic, digital painting, artstation, illustration, concept art."}]}',
- 'lowres, had anatomy, bad hands, cropped, worst quality',
- 9,
- 0.25,
- 0.3,
- 6,
- 0.5,
- None,
- True
- ],
- [
- '{"ops":[{"insert":"a beautifule girl with big eye, skin, and long "},{"attributes":{"color":"#eeeeee"},"insert":"hair"},{"insert":", t-shirt, bursting with vivid color, intricate, elegant, highly detailed, photorealistic, digital painting, artstation, illustration, concept art."}]}',
- 'lowres, had anatomy, bad hands, cropped, worst quality',
- 9,
- 0.25,
- 0.3,
- 6,
- 0.1,
- None,
- True
- ],
- [
- '{"ops":[{"insert":"a Gothic "},{"attributes":{"color":"#FD6C9E"},"insert":"church"},{"insert":" in a the sunset with a beautiful landscape in the background."}]}',
- '',
- 5,
- 0.3,
- 0.5,
- 6,
- 0.5,
- None,
- False
- ],
- [
- '{"ops":[{"insert":"A mesmerizing sight that captures the beauty of a "},{"attributes":{"color":"#4775fc"},"insert":"rose"},{"insert":" blooming, close up"}]}',
- '',
- 3,
- 0.3,
- 0,
- 9,
- 1,
- None,
- False
- ],
- [
- '{"ops":[{"insert":"A "},{"attributes":{"color":"#FFD700"},"insert":"marble statue of a wolf\'s head and shoulder"},{"insert":", surrounded by colorful flowers michelangelo, detailed, intricate, full of color, led lighting, trending on artstation, 4 k, hyperrealistic, 3 5 mm, focused, extreme details, unreal engine 5, masterpiece "}]}',
- '',
- 5,
- 0.3,
- 0,
- 5,
- 0.6,
- None,
- False
- ],
- ]
- gr.Examples(examples=color_examples,
- label='Font color examples',
- inputs=[
- text_input,
- negative_prompt,
- num_segments,
- segment_threshold,
- inject_interval,
- seed,
- color_guidance_weight,
- rich_text_input,
- background_aug,
- ],
- outputs=[
- plaintext_result,
- richtext_result,
- segments,
- token_map,
- ],
- fn=generate,
- # cache_examples=True,
- examples_per_page=20)
-
- with gr.Row():
- style_examples = [
- [
- '{"ops":[{"insert":"a "},{"attributes":{"font":"mirza"},"insert":"beautiful garden"},{"insert":" with a "},{"attributes":{"font":"roboto"},"insert":"snow mountain in the background"},{"insert":""}]}',
- '',
- 10,
- 0.45,
- 0,
- 0.2,
- 3,
- 0.5,
- None,
- False
- ],
- [
- '{"ops":[{"attributes":{"link":"the awe-inspiring sky and ocean in the style of J.M.W. Turner"},"insert":"the awe-inspiring sky and sea"},{"insert":" by "},{"attributes":{"font":"mirza"},"insert":"a coast with flowers and grasses in spring"}]}',
- 'worst quality, dark, poor quality',
- 2,
- 0.45,
- 0,
- 9,
- 0.5,
- None,
- False
- ],
- [
- '{"ops":[{"insert":"a "},{"attributes":{"font":"slabo"},"insert":"night sky filled with stars"},{"insert":" above a "},{"attributes":{"font":"roboto"},"insert":"turbulent sea with giant waves"}]}',
- '',
- 2,
- 0.45,
- 0,
- 0,
- 6,
- 0.5,
- None,
- False
- ],
- ]
- gr.Examples(examples=style_examples,
- label='Font style examples',
- inputs=[
- text_input,
- negative_prompt,
- num_segments,
- segment_threshold,
- inject_interval,
- seed,
- color_guidance_weight,
- rich_text_input,
- background_aug,
- ],
- outputs=[
- plaintext_result,
- richtext_result,
- segments,
- token_map,
- ],
- fn=generate,
- # cache_examples=True,
- examples_per_page=20)
-
- with gr.Row():
- size_examples = [
- [
- '{"ops": [{"insert": "A pizza with "}, {"attributes": {"size": "60px"}, "insert": "pineapple"}, {"insert": ", pepperoni, and mushroom on the top, 4k, photorealistic"}]}',
- 'blurry, art, painting, rendering, drawing, sketch, ugly, duplicate, morbid, mutilated, mutated, deformed, disfigured low quality, worst quality',
- 5,
- 0.3,
- 0,
- 13,
- 1,
- None,
- False
- ],
- [
- '{"ops": [{"insert": "A pizza with pineapple, "}, {"attributes": {"size": "20px"}, "insert": "pepperoni"}, {"insert": ", and mushroom on the top, 4k, photorealistic"}]}',
- 'blurry, art, painting, rendering, drawing, sketch, ugly, duplicate, morbid, mutilated, mutated, deformed, disfigured low quality, worst quality',
- 5,
- 0.3,
- 0,
- 13,
- 1,
- None,
- False
- ],
- [
- '{"ops": [{"insert": "A pizza with pineapple, pepperoni, and "}, {"attributes": {"size": "70px"}, "insert": "mushroom"}, {"insert": " on the top, 4k, photorealistic"}]}',
- 'blurry, art, painting, rendering, drawing, sketch, ugly, duplicate, morbid, mutilated, mutated, deformed, disfigured low quality, worst quality',
- 5,
- 0.3,
- 0,
- 13,
- 1,
- None,
- False
- ],
- ]
- gr.Examples(examples=size_examples,
- label='Font size examples',
- inputs=[
- text_input,
- negative_prompt,
- num_segments,
- segment_threshold,
- inject_interval,
- seed,
- color_guidance_weight,
- rich_text_input,
- background_aug,
- ],
- outputs=[
- plaintext_result,
- richtext_result,
- segments,
- token_map,
- ],
- fn=generate,
- # cache_examples=True,
- examples_per_page=20)
- generate_button.click(fn=lambda: gr.update(visible=False), inputs=None, outputs=share_row, queue=False).then(
- fn=generate,
- inputs=[
- text_input,
- negative_prompt,
- height,
- width,
- seed,
- steps,
- num_segments,
- segment_threshold,
- inject_interval,
- guidance_weight,
- color_guidance_weight,
- rich_text_input,
- background_aug
- ],
- outputs=[plaintext_result, richtext_result, segments, token_map],
- _js=get_js_data
- ).then(
- fn=lambda: gr.update(visible=True), inputs=None, outputs=share_row, queue=False)
- text_input.change(
- fn=None, inputs=[text_input], outputs=None, _js=set_js_data, queue=False)
- # load url param prompt to textinput
- load_params_button.click(fn=lambda x: x['prompt'], inputs=[
- url_params], outputs=[text_input], queue=False)
- demo.load(
- fn=load_url_params,
- inputs=[url_params],
- outputs=[load_params_button, url_params],
- _js=get_window_url_params
- )
- demo.queue(concurrency_count=1)
- demo.launch(share=False)
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/truncated_bptt/transformer_xl_model.py b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/truncated_bptt/transformer_xl_model.py
deleted file mode 100644
index a6c8b25a07276c2ee30c0aa5f0e4b0a2837ed5ca..0000000000000000000000000000000000000000
--- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/truncated_bptt/transformer_xl_model.py
+++ /dev/null
@@ -1,155 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import logging
-from dataclasses import dataclass, field
-from typing import Dict, List, Optional
-
-import torch
-from fairseq.dataclass import FairseqDataclass
-from fairseq.models import (
- FairseqIncrementalDecoder,
- FairseqLanguageModel,
- register_model,
-)
-from fairseq.modules.checkpoint_activations import checkpoint_wrapper
-from omegaconf import II
-
-
-logger = logging.getLogger(__name__)
-
-
-@dataclass
-class TransformerXLConfig(FairseqDataclass):
- # defaults come from the original Transformer-XL code
- cutoffs: List[int] = field(default_factory=lambda: [20000, 40000, 200000])
- d_model: int = 500
- n_head: int = 10
- d_head: int = 50
- d_inner: int = 1000
- div_val: int = 1
- n_layer: int = 12
- mem_len: int = 0
- clamp_len: int = -1
- same_length: bool = False
- dropout: float = 0.0
- dropatt: float = 0.0
- checkpoint_activations: bool = False
- offload_activations: bool = False
- max_target_positions: int = II("task.max_target_positions")
-
-
-@register_model("transformer_xl", dataclass=TransformerXLConfig)
-class TransformerXLLanguageModel(FairseqLanguageModel):
- @classmethod
- def build_model(cls, cfg: TransformerXLConfig, task):
- return cls(TransformerXLDecoder(cfg, task))
-
-
-class TransformerXLDecoder(FairseqIncrementalDecoder):
- def __init__(self, cfg, task):
- try:
- from transformers.models.transfo_xl import (
- TransfoXLConfig,
- TransfoXLLMHeadModel,
- )
- except ImportError:
- from transformers.configuration_transfo_xl import TransfoXLConfig
- from transformers.modeling_transfo_xl import TransfoXLLMHeadModel
-
- super().__init__(task.target_dictionary)
- self.cfg = cfg
-
- # remove any cutoffs larger than the vocab size
- cutoffs = [
- cutoff for cutoff in cfg.cutoffs if cutoff < len(task.target_dictionary)
- ]
-
- config = TransfoXLConfig(
- vocab_size=len(task.target_dictionary),
- cutoffs=cutoffs,
- d_model=cfg.d_model,
- d_embed=cfg.d_model,
- n_head=cfg.n_head,
- d_head=cfg.d_head,
- d_inner=cfg.d_inner,
- div_val=cfg.div_val,
- n_layer=cfg.n_layer,
- mem_len=cfg.mem_len,
- clamp_len=cfg.clamp_len,
- same_length=cfg.same_length,
- dropout=cfg.dropout,
- dropatt=cfg.dropatt,
- )
- logger.info(config)
- self.model = TransfoXLLMHeadModel(config)
-
- # Workaround a bug in huggingface's ``ProjectedAdaptiveLogSoftmax``
- # which adds ``None`` values to an ``nn.ParameterList``, which is not
- # supported in PyTorch. Instead we can replace this with an
- # ``nn.ModuleList``, which does support ``None`` values.
- try:
- if all(p is None for p in self.model.crit.out_projs._parameters.values()):
- self.model.crit.out_projs = torch.nn.ModuleList(
- [None] * len(self.model.crit.out_projs._parameters)
- )
- except Exception:
- pass
-
- if cfg.checkpoint_activations or cfg.offload_activations:
- for i in range(len(self.model.transformer.layers)):
- self.model.transformer.layers[i] = checkpoint_wrapper(
- self.model.transformer.layers[i],
- offload_to_cpu=cfg.offload_activations,
- )
- # TODO: may save mem to wrap(layer.pos_ff.CoreNet[3])
-
- self._mems = None
-
- def forward(
- self,
- src_tokens,
- src_lengths=None, # unused
- incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None,
- encoder_out=None,
- ):
- if incremental_state is not None: # used during inference
- mems = self.get_incremental_state(incremental_state, "mems")
- src_tokens = src_tokens[:, -1:] # only keep the most recent token
- else:
- mems = self._mems
-
- output = self.model(
- input_ids=src_tokens,
- mems=mems,
- return_dict=False,
- )
-
- if len(output) >= 2:
- if incremental_state is not None:
- self.set_incremental_state(incremental_state, "mems", output[1])
- else:
- self._mems = output[1]
-
- return (output[0],)
-
- def max_positions(self):
- return self.cfg.max_target_positions
-
- def reorder_incremental_state(
- self,
- incremental_state: Dict[str, Dict[str, Optional[torch.Tensor]]],
- new_order: torch.Tensor,
- ):
- """Reorder incremental state.
-
- This will be called when the order of the input has changed from the
- previous time step. A typical use case is beam search, where the input
- order changes between time steps based on the selection of beams.
- """
- mems = self.get_incremental_state(incremental_state, "mems")
- if mems is not None:
- new_mems = [mems_i.index_select(1, new_order) for mems_i in mems]
- self.set_incremental_state(incremental_state, "mems", new_mems)
diff --git a/spaces/stomexserde/gpt4-ui/Examples/Fallout 4 Map Symbol Key.md b/spaces/stomexserde/gpt4-ui/Examples/Fallout 4 Map Symbol Key.md
deleted file mode 100644
index b914cf4e3e98da71bdcb080399a9424b816aa2db..0000000000000000000000000000000000000000
--- a/spaces/stomexserde/gpt4-ui/Examples/Fallout 4 Map Symbol Key.md
+++ /dev/null
@@ -1,39 +0,0 @@
-
-
How to Understand and Customize the Fallout 4 Map Symbols
-
Fallout 4 is a vast open-world game that offers many hours of exploration and combat. One of the most useful features of the game is the map, which shows you the locations of various places, items, enemies, and quests. However, the map can also be confusing and overwhelming at first, especially if you don't know what the different symbols mean.
-
In this article, we will explain the meaning of some of the most common map symbols in Fallout 4, and how you can customize them to suit your preferences. We will also provide some tips on how to use the map effectively and find what you are looking for.
The map symbols in Fallout 4 are divided into three categories: collectibles, pickups, and locations. Each category has a different color and shape to help you distinguish them.
-
-
Collectibles are items that have a special value or function, such as bobbleheads, holotapes, perk magazines, and keys. They are marked with a yellow star on the map.
-
Pickups are items that can be looted or used, such as fusion cores, mini nukes, nuka-colas, power armors, and weapons. They are marked with a green circle on the map.
-
Locations are places that you can visit or explore, such as bunkers, caves, cities, factories, farms, settlements, vaults, and more. They are marked with a blue diamond on the map.
-
-
Some locations have additional symbols inside them to indicate their type or status. For example:
-
-
A skull means that the location is dangerous or hostile.
-
A gear means that the location is part of a quest or objective.
-
A flag means that the location is controlled by a faction or group.
-
A house means that the location is a settlement that you can build or manage.
-
A pip-boy means that the location is a fast travel point that you have discovered.
-
-
How to customize the map symbols in Fallout 4?
-
If you want to change the appearance or behavior of the map symbols in Fallout 4, you have a few options. You can either use mods or edit some files manually.
-
One of the most popular mods for customizing the map symbols is Fallout 4 Map by xunilinuX, which allows you to change the size, color, opacity, and visibility of the symbols. You can also filter them by category or type, and add custom icons for your own markers. The mod requires Fallout 4 Script Extender (F4SE) to work.
-
If you prefer to edit some files manually, you can follow these steps:
-
-
Go to your Fallout 4 installation folder (usually C:\Program Files (x86)\Steam\steamapps\common\Fallout 4).
-
Open the Data folder and then open the Interface folder.
Navigate to the Symbols folder and then to the subfolder of the symbol category that you want to edit (Collectibles, Pickups, or Locations).
-
Select the symbol that you want to edit and change its properties as you wish. You can modify its shape, color, size, rotation, alpha (opacity), filters (glow, shadow), etc.
-
Save your changes and close the program.
-
Launch Fallout 4 and enjoy your customized map symbols.
-
-
How to use the map effectively in Fallout 4?
-
The map is
- 81aa517590
-
-
\ No newline at end of file
diff --git a/spaces/stomexserde/gpt4-ui/Examples/HACK LockXLS V4.6.0.md b/spaces/stomexserde/gpt4-ui/Examples/HACK LockXLS V4.6.0.md
deleted file mode 100644
index e3cd8bd71a36e1b80d286f3e5d91b7ba60945a6d..0000000000000000000000000000000000000000
--- a/spaces/stomexserde/gpt4-ui/Examples/HACK LockXLS V4.6.0.md
+++ /dev/null
@@ -1,23 +0,0 @@
-
-
How to Hack LockXLS V4.6.0 and Unlock Excel Files
-
LockXLS is a software that allows you to protect your Excel files with passwords, serial numbers, activation codes, or hardware-based keys. However, sometimes you may forget your password or lose your key, and you need to access your files urgently. In this article, I will show you how to hack LockXLS V4.6.0 and unlock your Excel files without any software or technical skills.
Before we start, I want to warn you that hacking LockXLS V4.6.0 is illegal and unethical, and you should only do it if you have the permission of the file owner or if you are the file owner yourself. I am not responsible for any consequences that may arise from your actions.
-
Step 1: Locate the Locked Excel File
-
The first step is to locate the locked Excel file that you want to hack. You can find it in your computer or in an external device such as a USB flash drive or a CD-ROM. If the file has a .xls or .xlsx extension, it means that it is a normal Excel file that is not protected by LockXLS. If the file has a .xlsc extension, it means that it is a compiled Excel file that is protected by LockXLS.
-
Step 2: Rename the Locked Excel File
-
The next step is to rename the locked Excel file and change its extension from .xlsc to .zip. This will trick LockXLS into thinking that the file is a compressed archive instead of an Excel file. To do this, right-click on the file and select Rename. Then, delete the .xlsc part and type .zip instead. Press Enter to confirm the change.
-
Step 3: Extract the Locked Excel File
-
The third step is to extract the locked Excel file and access its contents. To do this, right-click on the file and select Extract All. Choose a destination folder where you want to save the extracted files and click Extract. You will see a folder with the same name as the original file, containing several subfolders and files.
-
Step 4: Find the Password File
-
The fourth step is to find the password file that contains the encryption key for the locked Excel file. To do this, open the folder that you extracted in the previous step and look for a subfolder named xlsc_data. Inside this subfolder, you will find a file named password.dat. This is the file that we need to hack.
-
-
Step 5: Open the Password File with Notepad
-
The fifth step is to open the password file with Notepad and view its contents. To do this, right-click on the password.dat file and select Open With. Choose Notepad from the list of programs and click OK. You will see a bunch of numbers and letters in hexadecimal format.
-
Step 6: Convert the Password File from Hexadecimal to ASCII
-
The sixth step is to convert the password file from hexadecimal to ASCII and reveal the encryption key for the locked Excel file. To do this, copy all the text from Notepad and paste it into an online hex-to-ASCII converter such as this one. Click Convert and you will see a string of characters in ASCII format.
-
Step 7: Use the Encryption Key to Unlock the Excel File
-
The final step is to use the encryption key to unlock the Excel file and view its contents. To do this, open LockXLS V4.6.0 and select Open Compiled Workbook from the menu bar. Browse for the original locked Excel file with .xlsc extension and click Open. When prompted for a password or a key, enter the encryption key that you obtained from the previous step and click OK. You will see your Excel file unlocked and ready to use.
-
Congratulations! You have successfully hacked LockXLS V4.6.0 and unlocked your Excel files!
e93f5a0c3f
-
-
\ No newline at end of file
diff --git a/spaces/sujithvamshi/vehicle-color-recognition/README.md b/spaces/sujithvamshi/vehicle-color-recognition/README.md
deleted file mode 100644
index fc83633647b7c592c2fc5c467eb56528e18e48ac..0000000000000000000000000000000000000000
--- a/spaces/sujithvamshi/vehicle-color-recognition/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Vehicle Color Recognition
-emoji: 🌖
-colorFrom: yellow
-colorTo: green
-sdk: gradio
-sdk_version: 3.18.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Imgchili Dolcemodz Star 013 Gallery _VERIFIED_.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Imgchili Dolcemodz Star 013 Gallery _VERIFIED_.md
deleted file mode 100644
index 8c617b9920064e0f3ad42272be9a2630f07616c6..0000000000000000000000000000000000000000
--- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Imgchili Dolcemodz Star 013 Gallery _VERIFIED_.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-March 22, 2564 BC. — Trilogy Assassins Creed IV Black Flag repack Mr DJ ASTRONEER. ... Creed.Syndicate.Gold.Edition.Update.6(v1.5).and.Crack-3DM (329 MB) .... 20 Sep 2014 ...
-Download torrent [Download the game via torrent for free |...
-To download Creed Syndicate (2015) License torrent for free, just download the .torrent file (main file download link) ...
-[ Download torrent (886.5Kb) Downloads: 1749].
-November 12, 2012 ...If you download the game via torrent Creed: Syndicate - Gold Edition (2015) from the torrent for free, then you can enjoy the game at a convenient time, without ...
-Download game Creed: Syndicate - Gold Edition (2015) via torrent. 8a78ff9644
-
-
-
diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/configs/_base_/models/fast_scnn.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/configs/_base_/models/fast_scnn.py
deleted file mode 100644
index 32fdeb659355a5ce5ef2cc7c2f30742703811cdf..0000000000000000000000000000000000000000
--- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/configs/_base_/models/fast_scnn.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# model settings
-norm_cfg = dict(type='SyncBN', requires_grad=True, momentum=0.01)
-model = dict(
- type='EncoderDecoder',
- backbone=dict(
- type='FastSCNN',
- downsample_dw_channels=(32, 48),
- global_in_channels=64,
- global_block_channels=(64, 96, 128),
- global_block_strides=(2, 2, 1),
- global_out_channels=128,
- higher_in_channels=64,
- lower_in_channels=128,
- fusion_out_channels=128,
- out_indices=(0, 1, 2),
- norm_cfg=norm_cfg,
- align_corners=False),
- decode_head=dict(
- type='DepthwiseSeparableFCNHead',
- in_channels=128,
- channels=128,
- concat_input=False,
- num_classes=19,
- in_index=-1,
- norm_cfg=norm_cfg,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.4)),
- auxiliary_head=[
- dict(
- type='FCNHead',
- in_channels=128,
- channels=32,
- num_convs=1,
- num_classes=19,
- in_index=-2,
- norm_cfg=norm_cfg,
- concat_input=False,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.4)),
- dict(
- type='FCNHead',
- in_channels=64,
- channels=32,
- num_convs=1,
- num_classes=19,
- in_index=-3,
- norm_cfg=norm_cfg,
- concat_input=False,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.4)),
- ],
- # model training and testing settings
- train_cfg=dict(),
- test_cfg=dict(mode='whole'))
diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/runner/iter_based_runner.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/runner/iter_based_runner.py
deleted file mode 100644
index 1df4de8c0285669dec9b014dfd1f3dd1600f0831..0000000000000000000000000000000000000000
--- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/runner/iter_based_runner.py
+++ /dev/null
@@ -1,273 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import os.path as osp
-import platform
-import shutil
-import time
-import warnings
-
-import torch
-from torch.optim import Optimizer
-
-import annotator.uniformer.mmcv as mmcv
-from .base_runner import BaseRunner
-from .builder import RUNNERS
-from .checkpoint import save_checkpoint
-from .hooks import IterTimerHook
-from .utils import get_host_info
-
-
-class IterLoader:
-
- def __init__(self, dataloader):
- self._dataloader = dataloader
- self.iter_loader = iter(self._dataloader)
- self._epoch = 0
-
- @property
- def epoch(self):
- return self._epoch
-
- def __next__(self):
- try:
- data = next(self.iter_loader)
- except StopIteration:
- self._epoch += 1
- if hasattr(self._dataloader.sampler, 'set_epoch'):
- self._dataloader.sampler.set_epoch(self._epoch)
- time.sleep(2) # Prevent possible deadlock during epoch transition
- self.iter_loader = iter(self._dataloader)
- data = next(self.iter_loader)
-
- return data
-
- def __len__(self):
- return len(self._dataloader)
-
-
-@RUNNERS.register_module()
-class IterBasedRunner(BaseRunner):
- """Iteration-based Runner.
-
- This runner train models iteration by iteration.
- """
-
- def train(self, data_loader, **kwargs):
- self.model.train()
- self.mode = 'train'
- self.data_loader = data_loader
- self._epoch = data_loader.epoch
- data_batch = next(data_loader)
- self.call_hook('before_train_iter')
- outputs = self.model.train_step(data_batch, self.optimizer, **kwargs)
- if not isinstance(outputs, dict):
- raise TypeError('model.train_step() must return a dict')
- if 'log_vars' in outputs:
- self.log_buffer.update(outputs['log_vars'], outputs['num_samples'])
- self.outputs = outputs
- self.call_hook('after_train_iter')
- self._inner_iter += 1
- self._iter += 1
-
- @torch.no_grad()
- def val(self, data_loader, **kwargs):
- self.model.eval()
- self.mode = 'val'
- self.data_loader = data_loader
- data_batch = next(data_loader)
- self.call_hook('before_val_iter')
- outputs = self.model.val_step(data_batch, **kwargs)
- if not isinstance(outputs, dict):
- raise TypeError('model.val_step() must return a dict')
- if 'log_vars' in outputs:
- self.log_buffer.update(outputs['log_vars'], outputs['num_samples'])
- self.outputs = outputs
- self.call_hook('after_val_iter')
- self._inner_iter += 1
-
- def run(self, data_loaders, workflow, max_iters=None, **kwargs):
- """Start running.
-
- Args:
- data_loaders (list[:obj:`DataLoader`]): Dataloaders for training
- and validation.
- workflow (list[tuple]): A list of (phase, iters) to specify the
- running order and iterations. E.g, [('train', 10000),
- ('val', 1000)] means running 10000 iterations for training and
- 1000 iterations for validation, iteratively.
- """
- assert isinstance(data_loaders, list)
- assert mmcv.is_list_of(workflow, tuple)
- assert len(data_loaders) == len(workflow)
- if max_iters is not None:
- warnings.warn(
- 'setting max_iters in run is deprecated, '
- 'please set max_iters in runner_config', DeprecationWarning)
- self._max_iters = max_iters
- assert self._max_iters is not None, (
- 'max_iters must be specified during instantiation')
-
- work_dir = self.work_dir if self.work_dir is not None else 'NONE'
- self.logger.info('Start running, host: %s, work_dir: %s',
- get_host_info(), work_dir)
- self.logger.info('Hooks will be executed in the following order:\n%s',
- self.get_hook_info())
- self.logger.info('workflow: %s, max: %d iters', workflow,
- self._max_iters)
- self.call_hook('before_run')
-
- iter_loaders = [IterLoader(x) for x in data_loaders]
-
- self.call_hook('before_epoch')
-
- while self.iter < self._max_iters:
- for i, flow in enumerate(workflow):
- self._inner_iter = 0
- mode, iters = flow
- if not isinstance(mode, str) or not hasattr(self, mode):
- raise ValueError(
- 'runner has no method named "{}" to run a workflow'.
- format(mode))
- iter_runner = getattr(self, mode)
- for _ in range(iters):
- if mode == 'train' and self.iter >= self._max_iters:
- break
- iter_runner(iter_loaders[i], **kwargs)
-
- time.sleep(1) # wait for some hooks like loggers to finish
- self.call_hook('after_epoch')
- self.call_hook('after_run')
-
- def resume(self,
- checkpoint,
- resume_optimizer=True,
- map_location='default'):
- """Resume model from checkpoint.
-
- Args:
- checkpoint (str): Checkpoint to resume from.
- resume_optimizer (bool, optional): Whether resume the optimizer(s)
- if the checkpoint file includes optimizer(s). Default to True.
- map_location (str, optional): Same as :func:`torch.load`.
- Default to 'default'.
- """
- if map_location == 'default':
- device_id = torch.cuda.current_device()
- checkpoint = self.load_checkpoint(
- checkpoint,
- map_location=lambda storage, loc: storage.cuda(device_id))
- else:
- checkpoint = self.load_checkpoint(
- checkpoint, map_location=map_location)
-
- self._epoch = checkpoint['meta']['epoch']
- self._iter = checkpoint['meta']['iter']
- self._inner_iter = checkpoint['meta']['iter']
- if 'optimizer' in checkpoint and resume_optimizer:
- if isinstance(self.optimizer, Optimizer):
- self.optimizer.load_state_dict(checkpoint['optimizer'])
- elif isinstance(self.optimizer, dict):
- for k in self.optimizer.keys():
- self.optimizer[k].load_state_dict(
- checkpoint['optimizer'][k])
- else:
- raise TypeError(
- 'Optimizer should be dict or torch.optim.Optimizer '
- f'but got {type(self.optimizer)}')
-
- self.logger.info(f'resumed from epoch: {self.epoch}, iter {self.iter}')
-
- def save_checkpoint(self,
- out_dir,
- filename_tmpl='iter_{}.pth',
- meta=None,
- save_optimizer=True,
- create_symlink=True):
- """Save checkpoint to file.
-
- Args:
- out_dir (str): Directory to save checkpoint files.
- filename_tmpl (str, optional): Checkpoint file template.
- Defaults to 'iter_{}.pth'.
- meta (dict, optional): Metadata to be saved in checkpoint.
- Defaults to None.
- save_optimizer (bool, optional): Whether save optimizer.
- Defaults to True.
- create_symlink (bool, optional): Whether create symlink to the
- latest checkpoint file. Defaults to True.
- """
- if meta is None:
- meta = {}
- elif not isinstance(meta, dict):
- raise TypeError(
- f'meta should be a dict or None, but got {type(meta)}')
- if self.meta is not None:
- meta.update(self.meta)
- # Note: meta.update(self.meta) should be done before
- # meta.update(epoch=self.epoch + 1, iter=self.iter) otherwise
- # there will be problems with resumed checkpoints.
- # More details in https://github.com/open-mmlab/mmcv/pull/1108
- meta.update(epoch=self.epoch + 1, iter=self.iter)
-
- filename = filename_tmpl.format(self.iter + 1)
- filepath = osp.join(out_dir, filename)
- optimizer = self.optimizer if save_optimizer else None
- save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta)
- # in some environments, `os.symlink` is not supported, you may need to
- # set `create_symlink` to False
- if create_symlink:
- dst_file = osp.join(out_dir, 'latest.pth')
- if platform.system() != 'Windows':
- mmcv.symlink(filename, dst_file)
- else:
- shutil.copy(filepath, dst_file)
-
- def register_training_hooks(self,
- lr_config,
- optimizer_config=None,
- checkpoint_config=None,
- log_config=None,
- momentum_config=None,
- custom_hooks_config=None):
- """Register default hooks for iter-based training.
-
- Checkpoint hook, optimizer stepper hook and logger hooks will be set to
- `by_epoch=False` by default.
-
- Default hooks include:
-
- +----------------------+-------------------------+
- | Hooks | Priority |
- +======================+=========================+
- | LrUpdaterHook | VERY_HIGH (10) |
- +----------------------+-------------------------+
- | MomentumUpdaterHook | HIGH (30) |
- +----------------------+-------------------------+
- | OptimizerStepperHook | ABOVE_NORMAL (40) |
- +----------------------+-------------------------+
- | CheckpointSaverHook | NORMAL (50) |
- +----------------------+-------------------------+
- | IterTimerHook | LOW (70) |
- +----------------------+-------------------------+
- | LoggerHook(s) | VERY_LOW (90) |
- +----------------------+-------------------------+
- | CustomHook(s) | defaults to NORMAL (50) |
- +----------------------+-------------------------+
-
- If custom hooks have same priority with default hooks, custom hooks
- will be triggered after default hooks.
- """
- if checkpoint_config is not None:
- checkpoint_config.setdefault('by_epoch', False)
- if lr_config is not None:
- lr_config.setdefault('by_epoch', False)
- if log_config is not None:
- for info in log_config['hooks']:
- info.setdefault('by_epoch', False)
- super(IterBasedRunner, self).register_training_hooks(
- lr_config=lr_config,
- momentum_config=momentum_config,
- optimizer_config=optimizer_config,
- checkpoint_config=checkpoint_config,
- log_config=log_config,
- timer_config=IterTimerHook(),
- custom_hooks_config=custom_hooks_config)
diff --git a/spaces/talhaty/Faceswapper/app.py b/spaces/talhaty/Faceswapper/app.py
deleted file mode 100644
index 2954e36d4f57c4b351f2ed926d442363b64ec971..0000000000000000000000000000000000000000
--- a/spaces/talhaty/Faceswapper/app.py
+++ /dev/null
@@ -1,41 +0,0 @@
-import gradio as gr
-import subprocess
-import shutil
-import os
-
-def run_scripts(target, source, use_face_enhancer):
- if target is None or (not use_face_enhancer and source is None):
- return None
- target_extension = os.path.splitext(target.name)[-1]
- output_path1 = "output1" + target_extension
- output_path2 = "output2" + target_extension
-
- if not use_face_enhancer:
- # Run both scripts
- cmd1 = ["python3", "run.py", "-s", source.name, "-t", target.name, "-o", output_path1, "--frame-processor", "face_swapper"]
- subprocess.run(cmd1)
-
- # Run the second script
- cmd2 = ["python3", "run.py", "-t", target.name if use_face_enhancer else output_path1, "-o", output_path2, "--frame-processor", "face_enhancer"]
- subprocess.run(cmd2)
-
- if not use_face_enhancer:
- os.remove(source.name)
- os.remove(target.name)
-
- return output_path2
-
-iface = gr.Interface(
- fn=run_scripts,
- inputs=[
- "file",
- "file",
- gr.inputs.Checkbox(default=False, label="Use only Face Enhancer") # New checkbox input
- ],
- outputs="file",
- title="Face swapper",
- description="Upload a target image/video and a source image to swap faces.",
- live=True
-)
-
-iface.launch()
diff --git a/spaces/tanish2502/ChatGPT-AI-Assistant-App/app.py b/spaces/tanish2502/ChatGPT-AI-Assistant-App/app.py
deleted file mode 100644
index ccc01c474b99318e223c33a059d915d6e41d02d0..0000000000000000000000000000000000000000
--- a/spaces/tanish2502/ChatGPT-AI-Assistant-App/app.py
+++ /dev/null
@@ -1,83 +0,0 @@
-import gradio as gr
-import openai
-import os
-from dotenv import load_dotenv
-from pydub import AudioSegment
-
-load_dotenv()
-
-#accessing openapi Key
-openai.api_key = os.getenv("OPENAI_API_KEY")
-
-audio_messages = [{"role": "system", "content": 'You are an AI assistant expert. Respond to all input in precise, crisp and easy to understand language.'}]
-text_messages = [{"role": "system", "content": 'You are an AI assistant expert. Respond to all input in precise, crisp and easy to understand language.'}]
-global user_text_input, text_output, user_audio_input, audio_output
-
-"""
-It seems like the gr.Audio source is not generating a WAV file, which is required for the openai.Audio.transcribe() method to work.
-To convert the audio file to WAV format, i have used a library like Pydub.
-"""
-
-def audio_transcribe(audio):
- global audio_messages
- audio_message = audio_messages
-
- #audio processing to whisper API.
- audio_file = AudioSegment.from_file(audio)
- audio_file.export("temp.wav", format="wav")
- final_audio_file = open("temp.wav", "rb")
- transcript = openai.Audio.transcribe("whisper-1", final_audio_file)
- os.remove("temp.wav")
-
- #transcripted input to chatGPT API for chatCompletion
- audio_message.append({"role": "user", "content": transcript["text"]}) # type: ignore
- response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=audio_message)
- system_message = response["choices"][0]["message"] # type: ignore
- audio_message.append(system_message)
-
- chat_transcript = ""
- for message in audio_message:
- if message['role'] != 'system':
- chat_transcript += message['role'] + ": " + message['content'] + "\n\n"
-
- return chat_transcript
-
-def text_transcribe(name):
- global text_messages
- text_message = text_messages
- user_text_input.update("")
- #transcripted input to chatGPT API
- text_message.append({"role": "user", "content": name}) # type: ignore
- response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=text_message)
- system_message = response["choices"][0]["message"] # type: ignore
- text_message.append(system_message)
-
- chat_transcript = ""
- for message in text_message:
- if message['role'] != 'system':
- chat_transcript += message['role'] + ": " + message['content'] + "\n\n"
- return chat_transcript
-
-title = """
Your Chat-GPT AI Assistant at your Service!! 😎
"""
-with gr.Blocks(theme=gr.themes.Soft()) as demo:
- gr.HTML(title)
- with gr.Tab("Audio Input"):
- with gr.Row():
- user_audio_input = (gr.Audio(source="microphone", type="filepath", label="Speak Here"))
- audio_input = user_audio_input
- audio_output = gr.Textbox(label="AI Response", lines=20, placeholder="AI Response will be displayed here...")
- with gr.Row():
- audio_submit_button = gr.Button("Submit")
- with gr.Tab("Text Input"):
- with gr.Row():
- user_text_input = (gr.Textbox(label="Type Here", lines=20, placeholder="Type your message here..."))
- text_input = user_text_input
- text_output = gr.Textbox(label="AI Response", lines=20, placeholder="AI Response will be displayed here...")
- with gr.Row():
- text_submit_button = gr.Button("Submit")
- audio_submit_button.click(fn=audio_transcribe, inputs=audio_input, outputs=audio_output)
- text_submit_button.click(fn=text_transcribe, inputs=text_input, outputs=text_output)
-
- gr.Markdown("
Made with ❤️ by Tanish Gupta. Credits to 🤗 Spaces for Hosting this App
")
-
-demo.launch()
diff --git a/spaces/tanishqvashisht/comicInator/README.md b/spaces/tanishqvashisht/comicInator/README.md
deleted file mode 100644
index 4e4aa3573b1cfb76064387c6cef48bcbfc866d88..0000000000000000000000000000000000000000
--- a/spaces/tanishqvashisht/comicInator/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: ComicInator
-emoji: 😻
-colorFrom: red
-colorTo: green
-sdk: streamlit
-sdk_version: 1.25.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/terfces0erbo/CollegeProjectV2/Car Mechanic Simulator 2018 - DLC Gold Pack Torrent Fulll Fixed.md b/spaces/terfces0erbo/CollegeProjectV2/Car Mechanic Simulator 2018 - DLC Gold Pack Torrent Fulll Fixed.md
deleted file mode 100644
index dcc5935ff22d9c9d58c7c6cce0a3ac25920e1ce6..0000000000000000000000000000000000000000
--- a/spaces/terfces0erbo/CollegeProjectV2/Car Mechanic Simulator 2018 - DLC Gold Pack Torrent Fulll Fixed.md
+++ /dev/null
@@ -1,97 +0,0 @@
-## Car Mechanic Simulator 2018 - DLC Gold Pack Torrent Fulll
-
-
-
-**CLICK HERE ✏ ✏ ✏ [https://maudaracte.blogspot.com/?file=2twRLV](https://maudaracte.blogspot.com/?file=2twRLV)**
-
-
-
-# How to Download and Install Car Mechanic Simulator 2018 - DLC Gold Pack Torrent Fulll
-
-
-
-If you are a fan of car simulation games, you might be interested in downloading and installing Car Mechanic Simulator 2018 - DLC Gold Pack Torrent Fulll. This is a bundle of 13 DLCs that add more licensed cars, engines, rims, and features to the base game of Car Mechanic Simulator 2018. In this article, we will show you how to download and install Car Mechanic Simulator 2018 - DLC Gold Pack Torrent Fulll in a few easy steps.
-
-
-
-## What is Car Mechanic Simulator 2018 - DLC Gold Pack?
-
-
-
-Car Mechanic Simulator 2018 - DLC Gold Pack is a collection of 13 downloadable content packs for Car Mechanic Simulator 2018, a popular car simulation game developed by Red Dot Games and published by PlayWay S.A. The DLCs included in this bundle are:
-
-
-
-- Bentley DLC
-
-- Dodge DLC
-
-- Dodge Modern DLC
-
-- Ford DLC
-
-- Lotus DLC
-
-- Maserati DLC
-
-- Pagani DLC
-
-- Plymouth DLC
-
-- Porsche DLC
-
-- Ram DLC
-
-- Mercedes-Benz DLC
-
-- Chrysler DLC
-
-- Rims DLC
-
-
-
-Each DLC adds one or more licensed cars from famous brands, such as Bentley, Dodge, Ford, Lotus, Maserati, Pagani, Plymouth, Porsche, Ram, Mercedes-Benz, and Chrysler. Some DLCs also add new engines, rims, tuning options, and liveries. With these DLCs, you can expand your car collection, repair and customize more vehicles, and test them on different tracks.
-
-
-
-## Why Download Car Mechanic Simulator 2018 - DLC Gold Pack Torrent Fulll?
-
-
-
-Downloading Car Mechanic Simulator 2018 - DLC Gold Pack Torrent Fulll has several advantages over buying the DLCs separately or through Steam. First of all, you can save money by getting all the DLCs in one package at a discounted price. Second, you can enjoy faster download speeds and avoid bandwidth limitations by using a torrent client. Third, you can install the DLCs easily without having to deal with Steam activation or DRM issues.
-
-
-
-## How to Download Car Mechanic Simulator 2018 - DLC Gold Pack Torrent Fulll?
-
-
-
-To download Car Mechanic Simulator 2018 - DLC Gold Pack Torrent Fulll, you need to follow these steps:
-
-
-
-1. Make sure you have Car Mechanic Simulator 2018 installed on your PC. You can buy the base game from Steam or other online stores.
-
-2. Download a torrent client, such as uTorrent or BitTorrent. Install it on your PC and run it.
-
-3. Go to a reliable torrent site, such as The Pirate Bay or Kickass Torrents. Search for "Car Mechanic Simulator 2018 - DLC Gold Pack Torrent Fulll" or use this link: [link]. Choose a torrent with many seeders and leechers for faster download speeds.
-
-4. Download the torrent file and open it with your torrent client. Choose a destination folder for the downloaded files and start the download process.
-
-5. Wait until the download is complete. You should have a folder with the following files: [files].
-
-6. Extract the files using WinRAR or 7-Zip. You should have another folder with the following files: [files].
-
-7. Copy and paste the files into your Car Mechanic Simulator 2018 installation folder. Overwrite any existing files if prompted.
-
-8. Run the game and enjoy the new content.
-
-
-
-## Conclusion
-
-
-
-Car Mechanic Simulator 2018 - DLC Gold Pack Torrent Fulll is a great way to enhance your car simulation experience with more cars, engines, rims, and features. By following our guide, you can download and install Car Mechanic Simulator 2018 - DLC Gold Pack Torrent Full
-
- 1b8d091108
\ No newline at end of file
diff --git a/spaces/terfces0erbo/CollegeProjectV2/Crazy Talk Animator Pro 1.2 !EXCLUSIVE! Crack.md b/spaces/terfces0erbo/CollegeProjectV2/Crazy Talk Animator Pro 1.2 !EXCLUSIVE! Crack.md
deleted file mode 100644
index a6dd23271fd984c41906667bb9d86d24583d8aec..0000000000000000000000000000000000000000
--- a/spaces/terfces0erbo/CollegeProjectV2/Crazy Talk Animator Pro 1.2 !EXCLUSIVE! Crack.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
- 3cee63e6c2
-
-
-
diff --git a/spaces/test12356/SUI-svc-3.0/vdecoder/hifigan/utils.py b/spaces/test12356/SUI-svc-3.0/vdecoder/hifigan/utils.py
deleted file mode 100644
index 84bff024f4d2e2de194b2a88ee7bbe5f0d33f67c..0000000000000000000000000000000000000000
--- a/spaces/test12356/SUI-svc-3.0/vdecoder/hifigan/utils.py
+++ /dev/null
@@ -1,68 +0,0 @@
-import glob
-import os
-import matplotlib
-import torch
-from torch.nn.utils import weight_norm
-matplotlib.use("Agg")
-import matplotlib.pylab as plt
-
-
-def plot_spectrogram(spectrogram):
- fig, ax = plt.subplots(figsize=(10, 2))
- im = ax.imshow(spectrogram, aspect="auto", origin="lower",
- interpolation='none')
- plt.colorbar(im, ax=ax)
-
- fig.canvas.draw()
- plt.close()
-
- return fig
-
-
-def init_weights(m, mean=0.0, std=0.01):
- classname = m.__class__.__name__
- if classname.find("Conv") != -1:
- m.weight.data.normal_(mean, std)
-
-
-def apply_weight_norm(m):
- classname = m.__class__.__name__
- if classname.find("Conv") != -1:
- weight_norm(m)
-
-
-def get_padding(kernel_size, dilation=1):
- return int((kernel_size*dilation - dilation)/2)
-
-
-def load_checkpoint(filepath, device):
- assert os.path.isfile(filepath)
- print("Loading '{}'".format(filepath))
- checkpoint_dict = torch.load(filepath, map_location=device)
- print("Complete.")
- return checkpoint_dict
-
-
-def save_checkpoint(filepath, obj):
- print("Saving checkpoint to {}".format(filepath))
- torch.save(obj, filepath)
- print("Complete.")
-
-
-def del_old_checkpoints(cp_dir, prefix, n_models=2):
- pattern = os.path.join(cp_dir, prefix + '????????')
- cp_list = glob.glob(pattern) # get checkpoint paths
- cp_list = sorted(cp_list)# sort by iter
- if len(cp_list) > n_models: # if more than n_models models are found
- for cp in cp_list[:-n_models]:# delete the oldest models other than lastest n_models
- open(cp, 'w').close()# empty file contents
- os.unlink(cp)# delete file (move to trash when using Colab)
-
-
-def scan_checkpoint(cp_dir, prefix):
- pattern = os.path.join(cp_dir, prefix + '????????')
- cp_list = glob.glob(pattern)
- if len(cp_list) == 0:
- return None
- return sorted(cp_list)[-1]
-
diff --git a/spaces/theekshana/boardpac_chat_app_test/app_agent.py b/spaces/theekshana/boardpac_chat_app_test/app_agent.py
deleted file mode 100644
index d64596e2d7ab9182b7d5cb969767ecce39db5c64..0000000000000000000000000000000000000000
--- a/spaces/theekshana/boardpac_chat_app_test/app_agent.py
+++ /dev/null
@@ -1,246 +0,0 @@
-"""
-Python Backend API to chat with private data
-
-08/16/2023
-D.M. Theekshana Samaradiwakara
-
-python -m streamlit run app.py
-"""
-
-import os
-import time
-import streamlit as st
-from streamlit.logger import get_logger
-
-logger = get_logger(__name__)
-
-from ui.htmlTemplates import css, bot_template, user_template, source_template
-from config import MODELS, DATASETS
-
-from qaPipeline import QAPipeline
-import qaPipeline_functions
-from faissDb import create_faiss
-
-# loads environment variables
-from dotenv import load_dotenv
-load_dotenv()
-
-isHuggingFaceHubEnabled = os.environ.get('ENABLE_HUGGINGFSCE_HUB_MODELS')
-isOpenAiApiEnabled = os.environ.get('ENABLE_OPENAI_API_MODELS')
-
-st.set_page_config(page_title="Chat with data",
- page_icon=":books:")
-st.write(css, unsafe_allow_html=True)
-
-qaPipeline = QAPipeline()
-# qaPipeline = qaPipeline_functions
-
-def initialize_session_state():
- # Initialise all session state variables with defaults
- SESSION_DEFAULTS = {
- "model": MODELS["DEFAULT"],
- "dataset": DATASETS["DEFAULT"],
- "chat_history": None,
- "is_parameters_changed":False,
- "show_source_files": False,
- "user_question":'',
- }
-
- for k, v in SESSION_DEFAULTS.items():
- if k not in st.session_state:
- st.session_state[k] = v
-
-def side_bar():
- with st.sidebar:
- st.subheader("Chat parameters")
-
- with st.form('param_form'):
- st.info('Info: use openai chat model for best results')
- chat_model = st.selectbox(
- "Chat model",
- MODELS,
- key="chat_model",
- help="Select the LLM model for the chat",
- # on_change=update_parameters_change,
- )
-
- # data_source = st.selectbox(
- # "dataset",
- # DATASETS,
- # key="data_source",
- # help="Select the private data_source for the chat",
- # on_change=update_parameters_change,
- # )
-
- st.session_state.dataset = "DEFAULT"
-
- show_source = st.checkbox(
- label="show source files",
- key="show_source",
- help="Select this to show relavant source files for the query",
- # on_change=update_parameters_change,
- )
-
- submitted = st.form_submit_button(
- "Save Parameters",
- # on_click=update_parameters_change
- )
-
- if submitted:
- parameters_change_button(chat_model, show_source)
-
-
- # if st.session_state.is_parameters_changed:
- # st.button("Update",
- # on_click=parameters_change_button,
- # args=[chat_model, show_source]
- # )
-
- st.markdown("\n")
-
- # if st.button("Create FAISS db"):
- # try:
- # with st.spinner('creating faiss vector store'):
- # create_faiss()
- # st.success('faiss saved')
- # except Exception as e:
- # st.error(f"Error : {e}")#, icon=":books:")
- # return
-
- st.markdown(
- "### How to use\n"
- "1. Select the chat model\n" # noqa: E501
- "2. Select \"show source files\" to show the source files related to the answer.📄\n"
- "3. Ask a question about the documents💬\n"
- )
-
-
-def chat_body():
- st.header("Chat with your own data:")
- with st.form('chat_body'):
-
- user_question = st.text_input(
- "Ask a question about your documents:",
- placeholder="enter question",
- key='user_question',
- # on_change=submit_user_question,
- )
-
- submitted = st.form_submit_button(
- "Submit",
- # on_click=update_parameters_change
- )
-
- if submitted:
- submit_user_question()
-
- # if user_question:
- # submit_user_question()
- # # user_question = False
-
-
-
-def submit_user_question():
- with st.spinner("Processing"):
- user_question = st.session_state.user_question
- # st.success(user_question)
- handle_userinput(user_question)
- # st.session_state.user_question=''
-
-
-def main():
-
- initialize_session_state()
-
- side_bar()
-
- chat_body()
-
-
-def update_parameters_change():
- st.session_state.is_parameters_changed = True
-
-
-def parameters_change_button(chat_model, show_source):
- st.session_state.model = chat_model
- st.session_state.dataset = "DEFAULT"
- st.session_state.show_source_files = show_source
- st.session_state.is_parameters_changed = False
-
- alert = st.success("chat parameters updated")
- time.sleep(1) # Wait for 3 seconds
- alert.empty() # Clear the alert
-
-@st.cache_data
-def get_answer_from_backend(query, model, dataset):
- # response = qaPipeline.run(query=query, model=model, dataset=dataset)
- response = qaPipeline.run_agent(query=query, model=model, dataset=dataset)
- return response
-
-
-def show_query_response(query, response, show_source_files):
- docs = []
- if isinstance(response, dict):
- answer, docs = response['answer'], response['source_documents']
- else:
- answer = response
-
- st.write(user_template.replace(
- "{{MSG}}", query), unsafe_allow_html=True)
- st.write(bot_template.replace(
- "{{MSG}}", answer ), unsafe_allow_html=True)
-
- if show_source_files:
- # st.write(source_template.replace(
- # "{{MSG}}", "source files" ), unsafe_allow_html=True)
-
- if len(docs)>0 :
- st.markdown("#### source files : ")
- for source in docs:
- # st.info(source.metadata)
- with st.expander(source.metadata["source"]):
- st.markdown(source.page_content)
-
- # st.write(response)
-
-
-def is_query_valid(query: str) -> bool:
- if (not query) or (query.strip() == ''):
- st.error("Please enter a question!")
- return False
- return True
-
-
-def handle_userinput(query):
- # Get the answer from the chain
- try:
- if not is_query_valid(query):
- st.stop()
-
- model = MODELS[st.session_state.model]
- dataset = DATASETS[st.session_state.dataset]
- show_source_files = st.session_state.show_source_files
-
- # Try to access openai and deeplake
- print(f">\n model: {model} \n dataset : {dataset} \n show_source_files : {show_source_files}")
-
- response = get_answer_from_backend(query, model, dataset)
-
- show_query_response(query, response, show_source_files)
-
-
- except Exception as e:
- # logger.error(f"Answer retrieval failed with {e}")
- st.error(f"Error ocuured! see log info for more details.")#, icon=":books:")
- print(f"Streamlit handle_userinput Error : {e}")#, icon=":books:")
- return
-
-
-if __name__ == "__main__":
- main()
-
-# initialize_session_state()
-
-# side_bar()
-
-# chat_body()
\ No newline at end of file
diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Acidic Wow Bot Download The Best WoW Bot for Shadowlands Classic and More.md b/spaces/tialenAdioni/chat-gpt-api/logs/Acidic Wow Bot Download The Best WoW Bot for Shadowlands Classic and More.md
deleted file mode 100644
index 41bb60b2ad2295c48dec9bfc0a814e2f61a8e06c..0000000000000000000000000000000000000000
--- a/spaces/tialenAdioni/chat-gpt-api/logs/Acidic Wow Bot Download The Best WoW Bot for Shadowlands Classic and More.md
+++ /dev/null
@@ -1,69 +0,0 @@
-
-
Acidic Wow Bot: A Non-Detectable World of Warcraft Bot
-
If you are looking for a way to automate your World of Warcraft gameplay, you might be interested in Acidic Wow Bot. This is a project that aims to develop a non-detectable wow bot that can travel without human interference. In this article, we will explain what Acidic Wow Bot is, how it works, and where you can download it.
Acidic Wow Bot is a software that runs on your computer and controls your World of Warcraft character. It can perform various tasks such as leveling, farming, questing, and PvPing. The goal of Acidic Wow Bot is to make your wow experience easier and more enjoyable, while avoiding detection by Blizzard's anti-cheat system.
-
How does Acidic Wow Bot work?
-
Acidic Wow Bot works by using a combination of programming languages such as Delphi, Lua, and AutoIt. It uses a predefined set of coordinates to navigate the game world and interact with NPCs and enemies. It also uses a scripting platform called Awful WoW Scripting that provides PvE and PvP rotations, bots, and tools for various classes and situations.
-
Where can I download Acidic Wow Bot?
-
You can download Acidic Wow Bot from SourceForge.net[^1^], a website that hosts open source software projects. You can also find more information about Acidic Wow Bot on elitepvpers.com[^2^], a forum for wow hacks, cheats, and bots. However, be aware that using Acidic Wow Bot or any other wow bot is against Blizzard's terms of service and may result in account suspension or ban. Use it at your own risk.
-
-
What are the benefits of using Acidic Wow Bot?
-
Using Acidic Wow Bot can save you a lot of time and effort in World of Warcraft. You can level up your character faster, collect more gold and resources, complete more quests and achievements, and dominate in PvP battles. You can also enjoy other aspects of the game such as exploring, socializing, or customizing your character without worrying about grinding or farming.
-
What are the risks of using Acidic Wow Bot?
-
Using Acidic Wow Bot or any other wow bot is not without risks. Blizzard has a strict policy against botting and cheating in World of Warcraft. They use various methods to detect and ban bot users, such as Warden, their anti-cheat software, or manual reports from other players. If you are caught using Acidic Wow Bot, you may lose your account, your progress, and your money. Therefore, you should always be careful and discreet when using Acidic Wow Bot or any other wow bot.
-
How to use Acidic Wow Bot safely?
-
There is no guarantee that you can use Acidic Wow Bot safely, but there are some tips that can help you reduce the chances of getting banned. First, you should always use the latest version of Acidic Wow Bot and Awful WoW Scripting, as they may have updated features or fixes that can avoid detection. Second, you should avoid using Acidic Wow Bot for long periods of time or in crowded areas, as this may raise suspicion from other players or GMs. Third, you should use a VPN or a proxy to hide your IP address and location from Blizzard. Fourth, you should never share your account or your bot with anyone else, as this may compromise your security and privacy.
-
how to install acidic wow bot for free
-acidic wow bot cracked version download
-best settings for acidic wow bot
-acidic wow bot review and tutorial
-acidic wow bot license key generator
-where to buy acidic wow bot cheap
-acidic wow bot vs other wow bots
-acidic wow bot features and benefits
-acidic wow bot support and updates
-acidic wow bot download link 2023
-acidic wow bot for mac and windows
-acidic wow bot alternatives and comparisons
-acidic wow bot refund policy and guarantee
-acidic wow bot testimonials and feedback
-acidic wow bot discount code and coupon
-acidic wow bot troubleshooting and errors
-acidic wow bot compatibility and requirements
-acidic wow bot demo and trial
-acidic wow bot affiliate program and commission
-acidic wow bot custom scripts and addons
-how to use acidic wow bot safely and securely
-acidic wow bot tips and tricks
-acidic wow bot forum and community
-acidic wow bot FAQ and help
-acidic wow bot pros and cons
-how to uninstall acidic wow bot completely
-how to update acidic wow bot to the latest version
-how to backup and restore acidic wow bot settings
-how to optimize acidic wow bot performance and speed
-how to make money with acidic wow bot
-how to avoid bans with acidic wow bot
-how to level up fast with acidic wow bot
-how to farm gold with acidic wow bot
-how to do quests with acidic wow bot
-how to pvp with acidic wow bot
-how to raid with acidic wow bot
-how to craft with acidic wow bot
-how to fish with acidic wow bot
-how to gather with acidic wow bot
-how to auction with acidic wow bot
-how to chat with acidic wow bot
-how to roleplay with acidic wow bot
-how to explore with acidic wow bot
-how to customize acidic wow bot appearance and sound
-how to train skills with acidic wow bot
-how to manage inventory with acidic wow bot
-how to use hotkeys with acidic wow bot
-how to automate tasks with acidic wow bot
-how to monitor stats with acidic wow bot
-how to report bugs with acidic wow bot
e753bf7129
-
-
\ No newline at end of file
diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/HD Online Player (Love Aaj Kal 2 Full Movie In Hindi Cast) - Meet The Stars.md b/spaces/tialenAdioni/chat-gpt-api/logs/HD Online Player (Love Aaj Kal 2 Full Movie In Hindi Cast) - Meet The Stars.md
deleted file mode 100644
index 3d414601a7d57a5d54cdf44343f7b33266c186fa..0000000000000000000000000000000000000000
--- a/spaces/tialenAdioni/chat-gpt-api/logs/HD Online Player (Love Aaj Kal 2 Full Movie In Hindi Cast) - Meet The Stars.md
+++ /dev/null
@@ -1,184 +0,0 @@
-
-
HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W): The Best Way to Watch the Latest Bollywood Romance
-
-
If you are a fan of Bollywood movies, you probably know that Love Aaj Kal 2 is one of the most anticipated releases of 2020. The movie is a sequel to the 2009 hit Love Aaj Kal, which starred Saif Ali Khan and Deepika Padukone. The movie is directed by Imtiaz Ali, who is known for his romantic dramas such as Jab We Met, Rockstar, and Tamasha.
-
-
Love Aaj Kal 2 features Kartik Aaryan and Sara Ali Khan as the lead pair, who play two different individuals on a journey of love, loss and life through the phases of reincarnation. The movie also stars Randeep Hooda and Arushi Sharma in supporting roles. The movie explores the concept of love in different eras and how it changes with time and circumstances.
-
HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W)
But how can you watch this movie in HD quality on your device? Well, you can use HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W), which is a free and easy way to stream or download the movie online. HD Online Player is a web-based application that allows you to watch any movie or video in high definition on your browser or device. You can also download the movie or video to watch later offline.
-
-
What is HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W)?
-
-
HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W) is a web-based application that lets you watch Love Aaj Kal 2 in HD quality on your device. You can access HD Online Player from any browser or device and enjoy the movie without any hassle or interruption.
-
-
HD Online Player works by fetching the movie or video from various sources and streaming it to your device in the best possible quality. You can also choose the resolution, format, and language of the movie or video according to your preference. You can also adjust the volume, brightness, contrast, and speed of the playback.
-
-
HD Online Player also allows you to download the movie or video to your device for offline viewing. You can choose the download quality, size, and location of the file according to your convenience. You can also pause, resume, or cancel the download at any time.
-
-
How to Use HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W)?
-
-
Using HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W) is very simple and easy. You can follow these steps to watch or download Love Aaj Kal 2 in HD quality on your device:
-
-
-
Go to the HD Online Player website and type "Love Aaj Kal 2" in the search box.
-
Click on the result that says "Love Aaj Kal 2 Full Movie In Hindi W".
-
On the page that opens, you will see a player that will start playing the movie in HD quality. You can also see some options below the player such as play, pause, stop, full screen, volume, resolution, format, language, subtitles, etc.
-
If you want to stream the movie online, you can just watch it on the player. You can also use the options below the player to customize your viewing experience.
-
If you want to download the movie offline, you can click on the download button below the player. You can also choose the download quality, size, and location of the file before downloading.
-
Once the download is complete, you can find the file on your device and watch it anytime you want.
-
-
-
Congratulations! You have successfully used HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W) to watch or download Love Aaj Kal 2 in HD quality on your device.
-
-
Why Should You Use HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W)?
-
-
Using HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W) is a great way to watch or download Love Aaj Kal 2 in HD quality on your device. Here are some of the benefits of using HD Online Player:
-
Watch Love Aaj Kal 2 online in HD quality with subtitles
-Love Aaj Kal 2 full movie streaming on Netflix India
-How to download Love Aaj Kal 2 HD movie for free
-Love Aaj Kal 2 Hindi movie review and ratings
-Love Aaj Kal 2 cast, plot, songs and trivia
-HD Online Player for Bollywood movies like Love Aaj Kal 2
-Love Aaj Kal 2 vs Love Aaj Kal: Which one is better?
-Best romantic scenes from Love Aaj Kal 2 movie
-Love Aaj Kal 2 box office collection and verdict
-Watch Love Aaj Kal 2 online legally and safely
-Love Aaj Kal 2 full movie in Hindi with English subtitles
-Love Aaj Kal 2 HD movie download link and torrent
-Love Aaj Kal 2 movie online watch free on Hotstar
-Love Aaj Kal 2 Hindi movie trailer and release date
-Love Aaj Kal 2 songs download mp3 and video
-HD Online Player compatible with Love Aaj Kal 2 movie format
-Love Aaj Kal 2 movie memes and jokes
-Love Aaj Kal 2 movie quotes and dialogues
-Watch Love Aaj Kal 2 online without buffering or ads
-Love Aaj Kal 2 full movie in Hindi watch online Dailymotion
-Love Aaj Kal 2 movie behind the scenes and making
-Love Aaj Kal 2 movie awards and nominations
-Love Aaj Kal 2 HD online player for PC and mobile
-Love Aaj Kal 2 movie online booking and tickets
-Love Aaj Kal 2 movie wallpapers and posters
-Watch Love Aaj Kal 2 online with friends and family
-Love Aaj Kal 2 full movie in Hindi dubbed watch online
-Love Aaj Kal 2 movie controversies and scandals
-Love Aaj Kal 2 songs lyrics and meaning
-HD Online Player for watching Hindi movies online free
-Love Aaj Kal 2 movie fan art and edits
-Love Aaj Kal 2 movie analysis and criticism
-Watch Love Aaj Kal 2 online in full HD resolution
-Love Aaj Kal 2 full movie in Hindi watch online YouTube
-Love Aaj Kal 2 songs playlist and ranking
-HD Online Player for streaming movies online fast and smooth
-Love Aaj Kal 2 movie facts and trivia you didn't know
-Watch Love Aaj Kal 2 online with high-quality audio and video
-Love Aaj Kal 2 full movie in Hindi watch online on Amazon Prime Video
-Love Aaj Kal 2 songs video download HD quality
-HD Online Player for playing movies offline without internet connection
-Watch Love Aaj Kal 2 online with subtitles in different languages
-Love Aaj Kal 2 full movie in Hindi watch online on Zee5
-Love Aaj Kal 2 songs ringtone download free
-HD Online Player for watching movies on big screen TV
-Watch Love Aaj Kal 2 online with interactive features and chat
-Love Aaj Kal 2 full movie in Hindi watch online on SonyLIV
-Love Aaj Kal 2 songs remix and mashup
-HD Online Player for watching movies in VR mode
-Watch Love Aaj Kal 2 online with bonus content and extras
-
-
-
You can watch or download Love Aaj Kal 2 in HD quality without any hassle or interruption.
-
You can access HD Online Player from any browser or device and enjoy the movie on any screen size.
-
You can customize your viewing experience by choosing the resolution, format, language, subtitles, etc. of the movie.
-
You can download the movie offline and watch it anytime you want without any internet connection.
-
You can use HD Online Player for free and without any registration or subscription.
-
-
-
These are just some of the benefits of using HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W). There are many more that you can discover by yourself once you start using it.
-
What are the Reviews of Love Aaj Kal 2?
-
-
Love Aaj Kal 2 has received mixed reviews from critics and audiences alike. The movie has been praised for its music, cinematography, and performances of the lead actors. However, the movie has also been criticized for its weak script, confusing plot, and lack of originality.
-
-
The movie has a rating of 5.0 out of 10 on IMDb, based on 11,760 user ratings. The movie has a rating of 2.0 out of 5 on Times of India, based on 18 critic reviews. The movie has a rating of 1.8 out of 5 on BookMyShow, based on 34,288 user ratings.
-
-
Some of the positive reviews of the movie are:
-
-
-
"Love Aaj Kal 2 is a visually stunning and emotionally engaging film that explores the complexities of love and relationships in different eras. Kartik Aaryan and Sara Ali Khan deliver stellar performances and share a sizzling chemistry on screen. The music by Pritam is soulful and catchy. The movie is a must-watch for fans of Imtiaz Ali and romantic dramas." - Filmfare
-
-
-
-
"Love Aaj Kal 2 is a refreshing and realistic take on modern-day romance. The movie showcases the struggles and dilemmas of two individuals who are trying to find their true selves and their true love in a fast-changing world. Kartik Aaryan and Sara Ali Khan are impressive as the lead pair and convey their emotions with sincerity and intensity. The movie is a treat for lovers of romance and music." - Bollywood Hungama
-
-
-
Some of the negative reviews of the movie are:
-
-
-
"Love Aaj Kal 2 is a disappointing and dull film that fails to capture the essence and charm of the original. The movie is a mishmash of cliches, stereotypes, and melodrama that make it hard to connect with the characters or the story. Kartik Aaryan and Sara Ali Khan are wasted in their roles and have no chemistry whatsoever. The movie is a letdown for fans of Imtiaz Ali and romance." - Hindustan Times
-
-
-
-
"Love Aaj Kal 2 is a confusing and boring film that tries to be too clever and too deep but ends up being too silly and too shallow. The movie is a mess of timelines, flashbacks, reincarnations, and coincidences that make no sense at all. Kartik Aaryan and Sara Ali Khan are annoying and irritating in their roles and have no spark or charisma. The movie is a disaster for fans of Imtiaz Ali and romance." - Rediff
-
-
How to Enjoy Love Aaj Kal 2 with HD Online Player?
-
-
Now that you know how to use HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W) to watch or download Love Aaj Kal 2 in HD quality on your device, you may be wondering how to enjoy the movie to the fullest. Here are some tips to help you have a great time with Love Aaj Kal 2 and HD Online Player:
-
-
-
Choose a comfortable and quiet place to watch the movie. You can watch it on your laptop, tablet, smartphone, or TV. Make sure you have a good Internet connection and enough battery life on your device.
-
Prepare some snacks and drinks to munch on while watching the movie. You can also invite some friends or family members to join you and share the fun.
-
Pay attention to the story and the characters of the movie. Try to understand their emotions, motivations, and actions. Appreciate the music, cinematography, and direction of the movie.
-
Have an open mind and heart when watching the movie. Don't compare it with the original or other movies of the same genre. Don't judge it by its reviews or ratings. Enjoy it for what it is and what it offers.
-
Share your thoughts and feelings about the movie with others. You can write a review, comment, or rating on HD Online Player or other platforms. You can also discuss the movie with other fans or friends online or offline.
-
-
-
These are just some tips to help you enjoy Love Aaj Kal 2 with HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W). There may be more that you can find by yourself once you start watching it.
-
-Conclusion
-
-
HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W) is a web-based application that lets you watch or download Love Aaj Kal 2 in HD quality on your device. You can access HD Online Player from any browser or device and enjoy the movie without any hassle or interruption.
-
-
Love Aaj Kal 2 is a 2020 Hindi-language romance drama movie that features Kartik Aaryan and Sara Ali Khan as the lead pair, who play two different individuals on a journey of love, loss and life through the phases of reincarnation. The movie also stars Randeep Hooda and Arushi Sharma in supporting roles. The movie explores the concept of love in different eras and how it changes with time and circumstances.
-
-
If you are a fan of Bollywood movies or just love romance movies, you should definitely check out Love Aaj Kal 2 with HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W). You will not regret it!
-What are the Songs of Love Aaj Kal 2?
-
-
Love Aaj Kal 2 has a soundtrack album composed by Pritam Chakraborty, who also composed the music for the original Love Aaj Kal. The album consists of 11 songs, sung by various artists such as Arijit Singh, Shreya Ghoshal, Mohit Chauhan, Darshan Raval, Antara Mitra, and more. The lyrics are written by Irshad Kamil, who also wrote the lyrics for the original Love Aaj Kal.
-
-
The songs of Love Aaj Kal 2 are a mix of romantic, upbeat, and melancholic tunes that reflect the mood and theme of the movie. Some of the popular songs of the album are:
-
-
-
"Shayad", sung by Arijit Singh, is a soft and soothing song that expresses the uncertainty and hope of love.
-
"Haan Main Galat", sung by Arijit Singh and Shashwat Singh, is a peppy and catchy song that celebrates the imperfections and mistakes of love.
-
"Mehrama", sung by Darshan Raval and Antara Mitra, is a sad and emotional song that depicts the loneliness and longing of love.
-
"Rahogi Meri", sung by Arijit Singh, is a sweet and romantic song that conveys the promise and commitment of love.
-
"Yeh Dooriyan", sung by Mohit Chauhan, is a remake of the classic song from the original Love Aaj Kal, that portrays the distance and pain of love.
-
-
-
The songs of Love Aaj Kal 2 have received positive reviews from critics and audiences alike. The songs have also been streamed and downloaded millions of times on various platforms such as YouTube, Spotify, JioSaavn, Gaana, etc.
-
-How to Download Love Aaj Kal 2 Songs with HD Online Player?
-
-
If you want to download Love Aaj Kal 2 songs in HD quality on your device, you can use HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W), which is a web-based application that lets you download any movie or video in high definition on your browser or device. You can also use HD Online Player to download Love Aaj Kal 2 songs in MP3 format.
-
-
To download Love Aaj Kal 2 songs with HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W), you can follow these steps:
-
-
-
Go to the HD Online Player website and type "Love Aaj Kal 2 songs" in the search box.
-
Click on the result that says "Love Aaj Kal 2 Songs - Full Album | Kartik Aaryan & Sara Ali Khan | Pritam | Jukebox".
-
On the page that opens, you will see a player that will start playing the songs in HD quality. You can also see some options below the player such as play, pause, stop, full screen, volume, resolution, format, language, subtitles, etc.
-
If you want to download the songs in MP3 format, you can click on the download button below the player. You can also choose the download quality, size, and location of the file before downloading.
-
Once the download is complete, you can find the file on your device and listen to it anytime you want.
-
-
-
Congratulations! You have successfully downloaded Love Aaj Kal 2 songs with HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W).
-Conclusion
-
-
HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W) is a web-based application that lets you watch or download Love Aaj Kal 2 in HD quality on your device. You can also use HD Online Player to watch or download Love Aaj Kal 2 songs in HD or MP3 format. You can access HD Online Player from any browser or device and enjoy the movie or the songs without any hassle or interruption.
-
-
Love Aaj Kal 2 is a 2020 Hindi-language romance drama movie that features Kartik Aaryan and Sara Ali Khan as the lead pair, who play two different individuals on a journey of love, loss and life through the phases of reincarnation. The movie also stars Randeep Hooda and Arushi Sharma in supporting roles. The movie explores the concept of love in different eras and how it changes with time and circumstances.
-
-
Love Aaj Kal 2 also has a soundtrack album composed by Pritam Chakraborty, who also composed the music for the original Love Aaj Kal. The album consists of 11 songs, sung by various artists such as Arijit Singh, Shreya Ghoshal, Mohit Chauhan, Darshan Raval, Antara Mitra, and more. The songs are a mix of romantic, upbeat, and melancholic tunes that reflect the mood and theme of the movie.
-
-
If you are a fan of Bollywood movies or just love romance movies, you should definitely check out Love Aaj Kal 2 with HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W). You will not regret it!
679dcb208e
-
-
\ No newline at end of file
diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Crossy Road How to Play the Viral Hit Game on Your Mobile Device for Free.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Crossy Road How to Play the Viral Hit Game on Your Mobile Device for Free.md
deleted file mode 100644
index 860858e7ac6d89fdeba7a08a5d17d052d755a7c6..0000000000000000000000000000000000000000
--- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Crossy Road How to Play the Viral Hit Game on Your Mobile Device for Free.md
+++ /dev/null
@@ -1,96 +0,0 @@
-
-
Crossy Road Mobile Free Download: How to Play the Viral Arcade Game on Your Phone
-
If you love arcade games, you have probably heard of Crossy Road, the hit game that has been downloaded over 200 million times worldwide. Crossy Road is a simple but addictive game that challenges you to cross roads, train tracks, and rivers without getting hit by cars, trains, or falling into the water. Sounds easy, right? Well, not quite. The game gets harder as you go along, with more obstacles, faster traffic, and unpredictable environments. You also have to collect coins and unlock over 150 different characters, each with their own unique abilities and settings. In this article, we will show you how to download Crossy Road for free on your mobile device, how to play the game, and how to score higher and avoid obstacles. We will also give you our honest review of the game, its pros and cons, and what to expect from future updates.
How to Download Crossy Road for Free on Android and iOS Devices
-
Crossy Road is available for free on both Android and iOS devices. You can download it from the Google Play Store or the App Store, depending on your device. The game is compatible with most devices that run Android 4.4 or higher or iOS 10.0 or higher. The game also supports Android TV, so you can play it on the big screen with a compatible device. The game does not require an internet connection to play, but you will need one to access some features, such as online multiplayer, video ads, and cloud saving. The game also contains in-app purchases that allow you to buy more coins or remove ads.
-
Crossy Road Gameplay: How to Cross Roads, Train Tracks, and Rivers Without Getting Squashed
-
The gameplay of Crossy Road is simple but fun. You control a character that has to cross various obstacles without dying. You tap the screen to make your character hop forward, and swipe left or right to move sideways. You have to avoid cars, trucks, buses, trains, boats, logs, crocodiles, eagles, and other hazards that can kill you instantly. You also have to watch out for the edge of the screen, which moves forward as you progress. If you fall behind or stop moving for too long, you will be caught by an eagle that will end your run.
-
As you play, you will collect coins that appear randomly on the road or in the air. You can use these coins to buy new characters from a vending machine. There are over 150 characters to unlock, each with their own appearance, sound effects, and environment. For example, you can play as a chicken, a frog, a unicorn, a robot, a zombie, a dragon, a penguin, a kangaroo, a dinosaur, a celebrity, and many more. Some characters have special abilities that can help or hinder your gameplay. For example, some characters can fly over obstacles or swim in water, while others can explode or attract more enemies.
-
The game also has various challenges and achievements that you can complete and collect. For example, you can try to cross a certain number of roads, collect a certain number of coins, or play with a specific character. Completing these challenges will reward you with more coins or new characters. You can also view your achievements and compare them with other players on the leaderboard.
-
Crossy Road Tips and Tricks: How to Score Higher and Avoid Obstacles
-
If you want to improve your skills and score higher in Crossy Road, here are some tips and tricks that you can follow:
-
crossy road game download for android
-crossy road app store free
-crossy road windows 10 pc download
-crossy road play online no download
-crossy road apk mod unlimited coins
-crossy road ios download latest version
-crossy road microsoft store free game
-crossy road cheats and hacks download
-crossy road android tv apk
-crossy road disney download for ipad
-crossy road multiplayer mode download
-crossy road characters unlock free
-crossy road review and ratings download
-crossy road update download new features
-crossy road hipster whale free game
-crossy road arcade machine download
-crossy road minecraft map download
-crossy road lego set free shipping
-crossy road plush toys download coupon
-crossy road soundtrack download mp3
-crossy road tips and tricks free guide
-crossy road wallpaper download hd
-crossy road pac man edition free
-crossy road halloween update download
-crossy road chinese new year free gift
-crossy road easter eggs and secrets download
-crossy road zombie mode free play
-crossy road vr download for oculus quest
-crossy road world record score free video
-crossy road trivia and facts download quiz
-crossy road memes and jokes free laugh
-crossy road fan art and comics download pdf
-crossy road merchandise and accessories free delivery
-crossy road stickers and emojis download app
-crossy road birthday party ideas free printable
-crossy road cake and cupcakes download recipe
-crossy road costume and cosplay free tutorial
-crossy road tattoo and nail art download design
-crossy road quiz and personality test free online
-crossy road coloring pages and activity book download pdf
-crossy road crochet and knitting patterns free download pdf
-crossy road origami and paper crafts download instructions
-crossy road crossword and word search puzzles free printable
-crossy road bingo and card games download printables
-crossy road board game and dice game free shipping
-crossy road monopoly and uno edition free giveaway
-crossy road trivia pursuit and jeopardy questions free online
-crossy road crossword clue and answer free solver
-crossy road anagram and scrabble words free generator
-
-
Time your jumps carefully. Don't rush or hesitate too much, as both can lead to fatal mistakes. Watch the traffic patterns and the gaps between the vehicles, and jump when it is safe to do so.
-
Use coins and gifts wisely. Coins can help you unlock more characters, which can make the game more fun and diverse. Gifts are random boxes that appear every few minutes or after watching a video ad. They contain coins or new characters, so don't miss them.
-
Play with your friends and family. Crossy Road has a multiplayer mode that allows you to play with up to four people on the same device or online. You can cooperate or compete with each other, and see who can cross the most roads. Playing with others can make the game more enjoyable and challenging.
-
-
Crossy Road Review: Why You Should Try This Addictive and Fun Game
-
Crossy Road is one of the most popular and successful arcade games of all time. It has received rave reviews from both players and critics, who praised its simple but addictive gameplay, its colorful and pixelated graphics, its humorous and varied characters, and its endless replay value. The game has also won several awards, such as the Apple Design Award in 2015 and the Google Play Best of 2014 Award. Here are some of the pros and cons of the game:
-
-
Pros
Cons
-
- Easy to learn but hard to master
- Can be frustrating or repetitive at times
-
- Free to download and play
- Contains ads and in-app purchases
-
- Has over 150 characters and environments to unlock and explore
- Some characters are hard to get or require real money
-
- Has challenges and achievements to complete and collect
- Some challenges are too difficult or random
-
- Has multiplayer mode to play with friends and family
- Multiplayer mode can be laggy or buggy
-
-
The developers of Crossy Road are constantly updating the game with new features and content. They have added new characters, environments, modes, events, and Easter eggs over the years. They have also collaborated with other popular games and franchises, such as Disney, Pac-Man, Minecraft, and The Walking Dead, to create crossover characters and worlds. You can expect more surprises and fun from Crossy Road in the future.
-
Conclusion: Crossy Road Mobile Free Download is a Must-Have Game for Arcade Lovers
-
Crossy Road is a game that will keep you entertained for hours. It is a simple but addictive game that challenges you to cross roads, train tracks, and rivers without getting squashed. It is a game that will make you laugh, cry, scream, and cheer. It is a game that will test your reflexes, skills, patience, and luck. It is a game that will let you play with over 150 different characters, each with their own unique abilities and settings. It is a game that will let you play with your friends and family on the same device or online. It is a game that will never get old or boring.
-
If you love arcade games, you should definitely download Crossy Road for free on your mobile device. You will not regret it. You will have a blast crossing roads, collecting coins, unlocking characters, completing challenges, and having fun.
-
So what are you waiting for? Download Crossy Road now and join the millions of players who are already hooked on this viral arcade game!
-
FAQs
-
-
Q: How do I get more coins in Crossy Road?
-
A: You can get more coins by collecting them on the road or in the air, by opening gifts that appear every few minutes or after watching video ads, by completing challenges that reward you with coins or new characters, or by buying them with real money through in-app purchases.
-
Q: How do I unlock new characters in Crossy Road?
-
A: You can unlock new characters by buying them from a vending machine with coins, by opening gifts that contain new characters or coins, by completing challenges that reward you with new characters or coins, or by finding secret characters that are hidden in the game. Some characters are also available for a limited time during special events or collaborations.
-
Q: How do I play multiplayer mode in Crossy Road?
-
A: You can play multiplayer mode with up to four people on the same device or online. To play on the same device, you need to connect two or more controllers to your device, such as gamepads, keyboards, or mice. To play online, you need to sign in with your Google Play Games or Game Center account, and invite your friends or join a random match. You can also play with nearby players using Bluetooth or Wi-Fi.
-
Q: How do I save my progress in Crossy Road?
-
A: You can save your progress in Crossy Road by signing in with your Google Play Games or Game Center account. This will allow you to sync your coins, characters, achievements, and leaderboard scores across different devices. You can also backup your data to the cloud using the settings menu.
-
Q: How do I contact the developers of Crossy Road?
-
A: You can contact the developers of Crossy Road by visiting their website, following them on social media , or sending them an email at support@yodo1.com. They are always happy to hear from their fans and appreciate any feedback or suggestions.
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Color Rummy App and Play Real Cash Rummy Online with Friends.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Color Rummy App and Play Real Cash Rummy Online with Friends.md
deleted file mode 100644
index 6fef4bf3b3e6a0f9c7b9534978f5ee45dd2129da..0000000000000000000000000000000000000000
--- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Color Rummy App and Play Real Cash Rummy Online with Friends.md
+++ /dev/null
@@ -1,112 +0,0 @@
-
-
Online Rummy Game Color Rummy Download: A Fun and Easy Way to Play Rummy Anytime, Anywhere
-
Rummy is one of the most popular card games in the world. It is a game of skill, strategy, and luck that can be enjoyed by people of all ages and backgrounds. But what if you don't have a deck of cards or a partner to play with? Don't worry, there is a solution for you: online rummy game color rummy download.
Color Rummy is a free app that lets you play rummy online with other players or against the computer. You can choose from different game modes, such as classic rummy, gin rummy, or Oklahoma rummy. You can also customize your game settings, such as the number of players, the number of cards, and the difficulty level. Whether you are a beginner or a pro, you will find a challenge that suits your skills and preferences.
-
What is Color Rummy?
-
Color Rummy is an online rummy game that has a unique feature: the cards have different colors. This adds an extra layer of fun and strategy to the game, as you can form sets and runs based on the color of the cards, not just their rank or suit. For example, you can make a set of three red sevens or a run of four green cards.
-
The color of the cards also affects the scoring system. The more colors you use in your melds, the more points you get. However, if you use only one color in your melds, you get a bonus of 50 points. This means that you have to balance between using more colors for higher points or using one color for a bonus.
-
How to play Color Rummy
-
The rules of Color Rummy are similar to the rules of traditional rummy. The objective of the game is to form valid melds (sets or runs) with your cards and get rid of all your cards before your opponents do. A set is a group of three or four cards of the same rank, regardless of their suit or color. A run is a sequence of three or more cards of the same suit and consecutive rank, regardless of their color.
-
color rummy online game free download
-how to play color rummy online game
-best online rummy game with color rummy feature
-download color rummy online game for android
-color rummy online game rules and tips
-online rummy game color rummy apk download
-color rummy online game review and ratings
-online rummy game with color rummy mode
-color rummy online game strategy and tricks
-download color rummy online game for pc
-color rummy online game tournaments and prizes
-online rummy game color rummy app download
-color rummy online game offers and bonuses
-online rummy game with color rummy option
-color rummy online game guide and tutorial
-download color rummy online game for ios
-color rummy online game features and benefits
-online rummy game color rummy download link
-color rummy online game faq and support
-online rummy game with color rummy variation
-color rummy online game demo and trial
-download color rummy online game for windows
-color rummy online game feedback and testimonials
-online rummy game color rummy software download
-color rummy online game advantages and disadvantages
-online rummy game with color rummy challenge
-color rummy online game requirements and compatibility
-download color rummy online game for mac
-color rummy online game comparison and alternatives
-online rummy game color rummy installation guide
-color rummy online game updates and news
-online rummy game with color rummy bonus code
-color rummy online game referral and rewards program
-download color rummy online game for linux
-color rummy online game security and privacy policy
-online rummy game color rummy system requirements
-color rummy online game terms and conditions
-download color rummy online game for chromebook
-color rummy online game customer service and contact details
-online rummy game with color rummy leaderboard and rankings
-
At the beginning of each round, each player is dealt 10 cards (or 7 cards in gin rummy mode). The remaining cards are placed face down on the table as the stock pile. One card is turned face up next to the stock pile as the discard pile. The player to the left of the dealer starts the game by drawing a card from either the stock pile or the discard pile. Then, they can either make a meld with their cards or discard one card to the discard pile. The turn then passes to the next player clockwise.
-
The game continues until one player goes out by getting rid of all their cards. They must have at least one meld in their hand and discard their last card to do so. The other players then count their points based on the value and color of their remaining cards. The player with the lowest score wins the round.
-
Why you should download Color Rummy
-
There are many reasons why you should download Color Rummy and play it on your device. Here are some of them:
-
-
It is free and easy to download and install.
-
It has stunning graphics and sound effects that create an immersive gaming experience.
-
It has different game modes and settings that cater to your preferences and skill level.
-
It has an online multiplayer mode that lets you play with other players from around the world.
-
It has an offline mode that lets you play against the computer when you don't have an internet connection.
-
It has a tutorial mode that teaches you how to play the game and the rules of each game mode.
-
It has a leaderboard and achievements system that tracks your progress and rewards you for your performance.
-
It has a chat feature that lets you communicate with other players and make new friends.
-
-
As you can see, Color Rummy is a fun and easy way to play rummy anytime, anywhere. You can enjoy the game with your friends, family, or strangers online. You can also challenge yourself and improve your skills by playing against the computer. You can also learn new strategies and tips by watching the tutorial and reading the rules. Color Rummy is more than just a game, it is a community of rummy lovers.
-
How to download Color Rummy
-
If you are interested in playing Color Rummy, you can download it for free from the official website or from the app store of your device. Here are the steps to download Color Rummy for Android and iOS devices:
-
For Android devices
-
-
Go to the Google Play Store and search for "Color Rummy".
-
Select the app with the logo of a colorful card and tap on "Install".
-
Wait for the app to download and install on your device.
-
Open the app and sign up with your email or Facebook account.
-
Enjoy playing Color Rummy!
-
-
For iOS devices
-
-
Go to the App Store and search for "Color Rummy".
-
Select the app with the logo of a colorful card and tap on "Get".
-
Enter your Apple ID password or use Touch ID or Face ID to confirm.
-
Wait for the app to download and install on your device.
-
Open the app and sign up with your email or Facebook account.
-
Enjoy playing Color Rummy!
-
-
Tips and tricks for playing Color Rummy
-
If you want to become a better player of Color Rummy, you need to know some tips and tricks that can help you win more games. Here are some of them:
-
Learn the rules and scoring system
-
The first thing you need to do is to learn the rules and scoring system of Color Rummy. You need to know how to form valid melds, how to go out, how to count your points, and how the color of the cards affects your score. You can read the rules of each game mode in the app or watch the tutorial videos. You can also practice by playing against the computer in offline mode.
-
Use the jokers wisely
-
Jokers are special cards that can act as any card you want. They can help you complete your melds faster and score more points. However, you need to use them wisely, as they also have some drawbacks. For example, if you use a joker in a meld, you cannot use that meld for a bonus. Also, if you have a joker in your hand when someone else goes out, you lose 25 points. Therefore, you need to decide when to use a joker and when to save it for later.
-
Plan your moves ahead
-
Rummy is a game of skill and strategy, not just luck. You need to plan your moves ahead and think about what cards you need, what cards you can discard, what cards your opponents might have, and what cards might come up next. You also need to keep track of the cards that have been played and discarded, so you can estimate what cards are left in the stock pile. By planning your moves ahead, you can increase your chances of forming melds faster and going out before your opponents.
-
Bluff your opponents
-
Rummy is also a game of psychology and deception. You need to bluff your opponents by making them think that you have different cards than you actually do. For example, you can discard a card that you don't need but that might be useful for your opponents, so they think that you are close to going out. Or, you can draw a card from the discard pile that doesn't help you but that might confuse your opponents about what melds you are trying to make. By bluffing your opponents, you can make them play differently and make mistakes that benefit you.
-
Conclusion
-
In conclusion, online rummy game color rummy download is a fun and easy way to play rummy anytime, anywhere. You can enjoy the game with different game modes, settings, features, and players. You can also improve your skills by learning the rules, scoring system, tips, and tricks. You can also have fun by bluffing your opponents and chatting with them. Color Rummy is more than just a game, it is a community of rummy lovers. If you are looking for a new way to play rummy, you should download Color Rummy today and join the fun!
-
FAQs
-
Here are some frequently asked questions about online rummy game color rummy download:
-
-
Is Color Rummy safe and secure?
-
Yes, Color Rummy is safe and secure. It uses encryption and authentication technologies to protect your personal and financial information. It also follows the fair play and responsible gaming policies to ensure a safe and enjoyable gaming environment.
-
How can I contact the customer support of Color Rummy?
-
You can contact the customer support of Color Rummy by sending an email to support@colorrummy.com or by filling out the contact form on the website. You can also visit the FAQ section on the app or the website for more information.
-
Can I play Color Rummy with real money?
-
No, Color Rummy is a free-to-play game that does not involve any real money transactions. You can play with virtual coins that you can earn by playing the game or by watching ads. You can also purchase more coins with in-app purchases if you want to.
-
Can I play Color Rummy offline?
-
Yes, you can play Color Rummy offline. You can choose the offline mode on the app and play against the computer. You can also play online when you have an internet connection and sync your progress and coins.
-
Can I invite my friends to play Color Rummy with me?
-
Yes, you can invite your friends to play Color Rummy with you. You can use the invite feature on the app and send them a link to download the game. You can also join or create a private room on the app and play with your friends exclusively.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/IGameGuardian 55.md b/spaces/tioseFevbu/cartoon-converter/scripts/IGameGuardian 55.md
deleted file mode 100644
index 57c4f57aa9a1c43d8c4de291c5a42fbb5c738e7c..0000000000000000000000000000000000000000
--- a/spaces/tioseFevbu/cartoon-converter/scripts/IGameGuardian 55.md
+++ /dev/null
@@ -1,52 +0,0 @@
-
-
IGameGuardian 55: A Cheat Tool for Android Games
-
If you are looking for a way to modify your favorite Android games, you might be interested in IGameGuardian 55. This is a cheat tool that allows you to change various aspects of the games, such as money, health, speed, and more. You can also use it to load emulators and play games from other platforms on your Android device.
IGameGuardian 55 is an updated version of GameGuardian, a popular cheat app that has been around for a long time. It works on rooted Android devices or in virtual environments without root. It supports Android 2.3.3 and above, and can run on ARM, x64, and x86 devices, including x86 emulators.
-
Some of the features of IGameGuardian 55 are:
-
-
Search and edit encrypted values
-
Search by difference, mask, or range
-
Search by data type, such as double, float, dword, byte, etc.
-
Lua scripting support
-
Modify all search results at once
-
Filter search results by address or value
-
Search in the background
-
Fill memory with a value
-
Time jump feature
-
Dump and copy memory
-
Customizable UI and app locale
-
-
To use IGameGuardian 55, you need to download the APK file from the official website and install it on your device. Then, you need to grant it root access or use a virtual space app to run it without root. After that, you can launch the app and select the game you want to hack. You can then use the search and edit functions to find and change the values you want.
-
-
However, you should be careful when using IGameGuardian 55, as it may not work on some games or cause them to crash. You should also avoid using it on online games that have anti-cheat systems, as you may risk losing your account or getting banned. You should only use it for fun and not to ruin the game experience for others.
-
IGameGuardian 55 is a powerful cheat tool that can help you enjoy your Android games in new ways. However, you should use it responsibly and at your own risk.
-
-
One of the games that you can hack with IGameGuardian 55 is Plants vs. Zombies, a popular tower defense game where you have to protect your house from waves of zombies using different plants. With IGameGuardian 55, you can change the amount of sun you have, which is used to plant more plants. You can also change the number of coins you have, which can be used to buy upgrades and items.
-
To hack Plants vs. Zombies with IGameGuardian 55, you need to follow these steps:
-
-
Launch IGameGuardian 55 and tap on Application to select Plants vs. Zombies.
-
Tap on Search and set the search mode to Auto.
-
Enter the value of sun or coins you have in the game and tap on Search.
-
Go back to the game and spend or earn some sun or coins.
-
Go back to IGameGuardian 55 and search for the new value of sun or coins.
-
Repeat steps 4 and 5 until you get a few results.
-
Select the results and tap on Modify.
-
Enter the value of sun or coins you want to have and tap on OK.
-
Go back to the game and enjoy your hacked sun or coins.
-
-
Another game that you can hack with IGameGuardian 55 is GTA: San Andreas, a classic open-world action-adventure game where you can explore a fictional version of California as a gangster named CJ. With IGameGuardian 55, you can change your health, armor, money, weapons, stats, and more.
-
To hack GTA: San Andreas with IGameGuardian 55, you need to follow these steps:
-
-
Launch IGameGuardian 55 and tap on Application to select GTA: San Andreas.
-
Tap on Search and set the search mode to Auto.
-
Enter the value of health, armor, money, or any other parameter you want to hack and tap on Search.
-
Go back to the game and change the value of the parameter by getting hurt, spending money, etc.
-
Go back to IGameGuardian 55 and search for the new value of the parameter.
-
Repeat steps 4 and 5 until you get a few results.
-
Select the results and tap on Modify.
-
Enter the value of the parameter you want to have and tap on OK.
-
Go back to the game and enjoy your hacked parameter.
- 81aa517590
-
-
\ No newline at end of file
diff --git a/spaces/tomofi/MMOCR/mmocr/models/ner/convertors/__init__.py b/spaces/tomofi/MMOCR/mmocr/models/ner/convertors/__init__.py
deleted file mode 100644
index 4d4e15c3dbd6086e63e0d38f477b8feb4a27333a..0000000000000000000000000000000000000000
--- a/spaces/tomofi/MMOCR/mmocr/models/ner/convertors/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from .ner_convertor import NerConvertor
-
-__all__ = ['NerConvertor']
diff --git a/spaces/tomofi/MMOCR/mmocr/models/textdet/postprocess/utils.py b/spaces/tomofi/MMOCR/mmocr/models/textdet/postprocess/utils.py
deleted file mode 100644
index faae589577ceae6e874714595a1a425043ebe9fc..0000000000000000000000000000000000000000
--- a/spaces/tomofi/MMOCR/mmocr/models/textdet/postprocess/utils.py
+++ /dev/null
@@ -1,482 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import functools
-import operator
-
-import cv2
-import numpy as np
-import pyclipper
-from numpy.fft import ifft
-from numpy.linalg import norm
-from shapely.geometry import Polygon
-
-from mmocr.core.evaluation.utils import boundary_iou
-
-
-def filter_instance(area, confidence, min_area, min_confidence):
- return bool(area < min_area or confidence < min_confidence)
-
-
-def box_score_fast(bitmap, _box):
- h, w = bitmap.shape[:2]
- box = _box.copy()
- xmin = np.clip(np.floor(box[:, 0].min()).astype(np.int32), 0, w - 1)
- xmax = np.clip(np.ceil(box[:, 0].max()).astype(np.int32), 0, w - 1)
- ymin = np.clip(np.floor(box[:, 1].min()).astype(np.int32), 0, h - 1)
- ymax = np.clip(np.ceil(box[:, 1].max()).astype(np.int32), 0, h - 1)
-
- mask = np.zeros((ymax - ymin + 1, xmax - xmin + 1), dtype=np.uint8)
- box[:, 0] = box[:, 0] - xmin
- box[:, 1] = box[:, 1] - ymin
- cv2.fillPoly(mask, box.reshape(1, -1, 2).astype(np.int32), 1)
- return cv2.mean(bitmap[ymin:ymax + 1, xmin:xmax + 1], mask)[0]
-
-
-def unclip(box, unclip_ratio=1.5):
- poly = Polygon(box)
- distance = poly.area * unclip_ratio / poly.length
- offset = pyclipper.PyclipperOffset()
- offset.AddPath(box, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
- expanded = np.array(offset.Execute(distance))
- return expanded
-
-
-def fill_hole(input_mask):
- h, w = input_mask.shape
- canvas = np.zeros((h + 2, w + 2), np.uint8)
- canvas[1:h + 1, 1:w + 1] = input_mask.copy()
-
- mask = np.zeros((h + 4, w + 4), np.uint8)
-
- cv2.floodFill(canvas, mask, (0, 0), 1)
- canvas = canvas[1:h + 1, 1:w + 1].astype(np.bool)
-
- return ~canvas | input_mask
-
-
-def centralize(points_yx,
- normal_sin,
- normal_cos,
- radius,
- contour_mask,
- step_ratio=0.03):
-
- h, w = contour_mask.shape
- top_yx = bot_yx = points_yx
- step_flags = np.ones((len(points_yx), 1), dtype=np.bool)
- step = step_ratio * radius * np.hstack([normal_sin, normal_cos])
- while np.any(step_flags):
- next_yx = np.array(top_yx + step, dtype=np.int32)
- next_y, next_x = next_yx[:, 0], next_yx[:, 1]
- step_flags = (next_y >= 0) & (next_y < h) & (next_x > 0) & (
- next_x < w) & contour_mask[np.clip(next_y, 0, h - 1),
- np.clip(next_x, 0, w - 1)]
- top_yx = top_yx + step_flags.reshape((-1, 1)) * step
- step_flags = np.ones((len(points_yx), 1), dtype=np.bool)
- while np.any(step_flags):
- next_yx = np.array(bot_yx - step, dtype=np.int32)
- next_y, next_x = next_yx[:, 0], next_yx[:, 1]
- step_flags = (next_y >= 0) & (next_y < h) & (next_x > 0) & (
- next_x < w) & contour_mask[np.clip(next_y, 0, h - 1),
- np.clip(next_x, 0, w - 1)]
- bot_yx = bot_yx - step_flags.reshape((-1, 1)) * step
- centers = np.array((top_yx + bot_yx) * 0.5, dtype=np.int32)
- return centers
-
-
-def merge_disks(disks, disk_overlap_thr):
- xy = disks[:, 0:2]
- radius = disks[:, 2]
- scores = disks[:, 3]
- order = scores.argsort()[::-1]
-
- merged_disks = []
- while order.size > 0:
- if order.size == 1:
- merged_disks.append(disks[order])
- break
- i = order[0]
- d = norm(xy[i] - xy[order[1:]], axis=1)
- ri = radius[i]
- r = radius[order[1:]]
- d_thr = (ri + r) * disk_overlap_thr
-
- merge_inds = np.where(d <= d_thr)[0] + 1
- if merge_inds.size > 0:
- merge_order = np.hstack([i, order[merge_inds]])
- merged_disks.append(np.mean(disks[merge_order], axis=0))
- else:
- merged_disks.append(disks[i])
-
- inds = np.where(d > d_thr)[0] + 1
- order = order[inds]
- merged_disks = np.vstack(merged_disks)
-
- return merged_disks
-
-
-def poly_nms(polygons, threshold):
- assert isinstance(polygons, list)
-
- polygons = np.array(sorted(polygons, key=lambda x: x[-1]))
-
- keep_poly = []
- index = [i for i in range(polygons.shape[0])]
-
- while len(index) > 0:
- keep_poly.append(polygons[index[-1]].tolist())
- A = polygons[index[-1]][:-1]
- index = np.delete(index, -1)
-
- iou_list = np.zeros((len(index), ))
- for i in range(len(index)):
- B = polygons[index[i]][:-1]
-
- iou_list[i] = boundary_iou(A, B, 1)
- remove_index = np.where(iou_list > threshold)
- index = np.delete(index, remove_index)
-
- return keep_poly
-
-
-def fourier2poly(fourier_coeff, num_reconstr_points=50):
- """ Inverse Fourier transform
- Args:
- fourier_coeff (ndarray): Fourier coefficients shaped (n, 2k+1),
- with n and k being candidates number and Fourier degree
- respectively.
- num_reconstr_points (int): Number of reconstructed polygon points.
- Returns:
- Polygons (ndarray): The reconstructed polygons shaped (n, n')
- """
-
- a = np.zeros((len(fourier_coeff), num_reconstr_points), dtype='complex')
- k = (len(fourier_coeff[0]) - 1) // 2
-
- a[:, 0:k + 1] = fourier_coeff[:, k:]
- a[:, -k:] = fourier_coeff[:, :k]
-
- poly_complex = ifft(a) * num_reconstr_points
- polygon = np.zeros((len(fourier_coeff), num_reconstr_points, 2))
- polygon[:, :, 0] = poly_complex.real
- polygon[:, :, 1] = poly_complex.imag
- return polygon.astype('int32').reshape((len(fourier_coeff), -1))
-
-
-class Node:
-
- def __init__(self, ind):
- self.__ind = ind
- self.__links = set()
-
- @property
- def ind(self):
- return self.__ind
-
- @property
- def links(self):
- return set(self.__links)
-
- def add_link(self, link_node):
- self.__links.add(link_node)
- link_node.__links.add(self)
-
-
-def graph_propagation(edges, scores, text_comps, edge_len_thr=50.):
- """Propagate edge score information and construct graph. This code was
- partially adapted from https://github.com/GXYM/DRRG licensed under the MIT
- license.
-
- Args:
- edges (ndarray): The edge array of shape N * 2, each row is a node
- index pair that makes up an edge in graph.
- scores (ndarray): The edge score array.
- text_comps (ndarray): The text components.
- edge_len_thr (float): The edge length threshold.
-
- Returns:
- vertices (list[Node]): The Nodes in graph.
- score_dict (dict): The edge score dict.
- """
- assert edges.ndim == 2
- assert edges.shape[1] == 2
- assert edges.shape[0] == scores.shape[0]
- assert text_comps.ndim == 2
- assert isinstance(edge_len_thr, float)
-
- edges = np.sort(edges, axis=1)
- score_dict = {}
- for i, edge in enumerate(edges):
- if text_comps is not None:
- box1 = text_comps[edge[0], :8].reshape(4, 2)
- box2 = text_comps[edge[1], :8].reshape(4, 2)
- center1 = np.mean(box1, axis=0)
- center2 = np.mean(box2, axis=0)
- distance = norm(center1 - center2)
- if distance > edge_len_thr:
- scores[i] = 0
- if (edge[0], edge[1]) in score_dict:
- score_dict[edge[0], edge[1]] = 0.5 * (
- score_dict[edge[0], edge[1]] + scores[i])
- else:
- score_dict[edge[0], edge[1]] = scores[i]
-
- nodes = np.sort(np.unique(edges.flatten()))
- mapping = -1 * np.ones((np.max(nodes) + 1), dtype=np.int)
- mapping[nodes] = np.arange(nodes.shape[0])
- order_inds = mapping[edges]
- vertices = [Node(node) for node in nodes]
- for ind in order_inds:
- vertices[ind[0]].add_link(vertices[ind[1]])
-
- return vertices, score_dict
-
-
-def connected_components(nodes, score_dict, link_thr):
- """Conventional connected components searching. This code was partially
- adapted from https://github.com/GXYM/DRRG licensed under the MIT license.
-
- Args:
- nodes (list[Node]): The list of Node objects.
- score_dict (dict): The edge score dict.
- link_thr (float): The link threshold.
-
- Returns:
- clusters (List[list[Node]]): The clustered Node objects.
- """
- assert isinstance(nodes, list)
- assert all([isinstance(node, Node) for node in nodes])
- assert isinstance(score_dict, dict)
- assert isinstance(link_thr, float)
-
- clusters = []
- nodes = set(nodes)
- while nodes:
- node = nodes.pop()
- cluster = {node}
- node_queue = [node]
- while node_queue:
- node = node_queue.pop(0)
- neighbors = set([
- neighbor for neighbor in node.links if
- score_dict[tuple(sorted([node.ind, neighbor.ind]))] >= link_thr
- ])
- neighbors.difference_update(cluster)
- nodes.difference_update(neighbors)
- cluster.update(neighbors)
- node_queue.extend(neighbors)
- clusters.append(list(cluster))
- return clusters
-
-
-def clusters2labels(clusters, num_nodes):
- """Convert clusters of Node to text component labels. This code was
- partially adapted from https://github.com/GXYM/DRRG licensed under the MIT
- license.
-
- Args:
- clusters (List[list[Node]]): The clusters of Node objects.
- num_nodes (int): The total node number of graphs in an image.
-
- Returns:
- node_labels (ndarray): The node label array.
- """
- assert isinstance(clusters, list)
- assert all([isinstance(cluster, list) for cluster in clusters])
- assert all(
- [isinstance(node, Node) for cluster in clusters for node in cluster])
- assert isinstance(num_nodes, int)
-
- node_labels = np.zeros(num_nodes)
- for cluster_ind, cluster in enumerate(clusters):
- for node in cluster:
- node_labels[node.ind] = cluster_ind
- return node_labels
-
-
-def remove_single(text_comps, comp_pred_labels):
- """Remove isolated text components. This code was partially adapted from
- https://github.com/GXYM/DRRG licensed under the MIT license.
-
- Args:
- text_comps (ndarray): The text components.
- comp_pred_labels (ndarray): The clustering labels of text components.
-
- Returns:
- filtered_text_comps (ndarray): The text components with isolated ones
- removed.
- comp_pred_labels (ndarray): The clustering labels with labels of
- isolated text components removed.
- """
- assert text_comps.ndim == 2
- assert text_comps.shape[0] == comp_pred_labels.shape[0]
-
- single_flags = np.zeros_like(comp_pred_labels)
- pred_labels = np.unique(comp_pred_labels)
- for label in pred_labels:
- current_label_flag = (comp_pred_labels == label)
- if np.sum(current_label_flag) == 1:
- single_flags[np.where(current_label_flag)[0][0]] = 1
- keep_ind = [i for i in range(len(comp_pred_labels)) if not single_flags[i]]
- filtered_text_comps = text_comps[keep_ind, :]
- filtered_labels = comp_pred_labels[keep_ind]
-
- return filtered_text_comps, filtered_labels
-
-
-def norm2(point1, point2):
- return ((point1[0] - point2[0])**2 + (point1[1] - point2[1])**2)**0.5
-
-
-def min_connect_path(points):
- """Find the shortest path to traverse all points. This code was partially
- adapted from https://github.com/GXYM/DRRG licensed under the MIT license.
-
- Args:
- points(List[list[int]]): The point sequence [[x0, y0], [x1, y1], ...].
-
- Returns:
- shortest_path(List[list[int]]): The shortest index path.
- """
- assert isinstance(points, list)
- assert all([isinstance(point, list) for point in points])
- assert all([isinstance(coord, int) for point in points for coord in point])
-
- points_queue = points.copy()
- shortest_path = []
- current_edge = [[], []]
-
- edge_dict0 = {}
- edge_dict1 = {}
- current_edge[0] = points_queue[0]
- current_edge[1] = points_queue[0]
- points_queue.remove(points_queue[0])
- while points_queue:
- for point in points_queue:
- length0 = norm2(point, current_edge[0])
- edge_dict0[length0] = [point, current_edge[0]]
- length1 = norm2(current_edge[1], point)
- edge_dict1[length1] = [current_edge[1], point]
- key0 = min(edge_dict0.keys())
- key1 = min(edge_dict1.keys())
-
- if key0 <= key1:
- start = edge_dict0[key0][0]
- end = edge_dict0[key0][1]
- shortest_path.insert(0, [points.index(start), points.index(end)])
- points_queue.remove(start)
- current_edge[0] = start
- else:
- start = edge_dict1[key1][0]
- end = edge_dict1[key1][1]
- shortest_path.append([points.index(start), points.index(end)])
- points_queue.remove(end)
- current_edge[1] = end
-
- edge_dict0 = {}
- edge_dict1 = {}
-
- shortest_path = functools.reduce(operator.concat, shortest_path)
- shortest_path = sorted(set(shortest_path), key=shortest_path.index)
-
- return shortest_path
-
-
-def in_contour(cont, point):
- x, y = point
- is_inner = cv2.pointPolygonTest(cont, (int(x), int(y)), False) > 0.5
- return is_inner
-
-
-def fix_corner(top_line, bot_line, start_box, end_box):
- """Add corner points to predicted side lines. This code was partially
- adapted from https://github.com/GXYM/DRRG licensed under the MIT license.
-
- Args:
- top_line (List[list[int]]): The predicted top sidelines of text
- instance.
- bot_line (List[list[int]]): The predicted bottom sidelines of text
- instance.
- start_box (ndarray): The first text component box.
- end_box (ndarray): The last text component box.
-
- Returns:
- top_line (List[list[int]]): The top sidelines with corner point added.
- bot_line (List[list[int]]): The bottom sidelines with corner point
- added.
- """
- assert isinstance(top_line, list)
- assert all(isinstance(point, list) for point in top_line)
- assert isinstance(bot_line, list)
- assert all(isinstance(point, list) for point in bot_line)
- assert start_box.shape == end_box.shape == (4, 2)
-
- contour = np.array(top_line + bot_line[::-1])
- start_left_mid = (start_box[0] + start_box[3]) / 2
- start_right_mid = (start_box[1] + start_box[2]) / 2
- end_left_mid = (end_box[0] + end_box[3]) / 2
- end_right_mid = (end_box[1] + end_box[2]) / 2
- if not in_contour(contour, start_left_mid):
- top_line.insert(0, start_box[0].tolist())
- bot_line.insert(0, start_box[3].tolist())
- elif not in_contour(contour, start_right_mid):
- top_line.insert(0, start_box[1].tolist())
- bot_line.insert(0, start_box[2].tolist())
- if not in_contour(contour, end_left_mid):
- top_line.append(end_box[0].tolist())
- bot_line.append(end_box[3].tolist())
- elif not in_contour(contour, end_right_mid):
- top_line.append(end_box[1].tolist())
- bot_line.append(end_box[2].tolist())
- return top_line, bot_line
-
-
-def comps2boundaries(text_comps, comp_pred_labels):
- """Construct text instance boundaries from clustered text components. This
- code was partially adapted from https://github.com/GXYM/DRRG licensed under
- the MIT license.
-
- Args:
- text_comps (ndarray): The text components.
- comp_pred_labels (ndarray): The clustering labels of text components.
-
- Returns:
- boundaries (List[list[float]]): The predicted boundaries of text
- instances.
- """
- assert text_comps.ndim == 2
- assert len(text_comps) == len(comp_pred_labels)
- boundaries = []
- if len(text_comps) < 1:
- return boundaries
- for cluster_ind in range(0, int(np.max(comp_pred_labels)) + 1):
- cluster_comp_inds = np.where(comp_pred_labels == cluster_ind)
- text_comp_boxes = text_comps[cluster_comp_inds, :8].reshape(
- (-1, 4, 2)).astype(np.int32)
- score = np.mean(text_comps[cluster_comp_inds, -1])
-
- if text_comp_boxes.shape[0] < 1:
- continue
-
- elif text_comp_boxes.shape[0] > 1:
- centers = np.mean(
- text_comp_boxes, axis=1).astype(np.int32).tolist()
- shortest_path = min_connect_path(centers)
- text_comp_boxes = text_comp_boxes[shortest_path]
- top_line = np.mean(
- text_comp_boxes[:, 0:2, :], axis=1).astype(np.int32).tolist()
- bot_line = np.mean(
- text_comp_boxes[:, 2:4, :], axis=1).astype(np.int32).tolist()
- top_line, bot_line = fix_corner(top_line, bot_line,
- text_comp_boxes[0],
- text_comp_boxes[-1])
- boundary_points = top_line + bot_line[::-1]
-
- else:
- top_line = text_comp_boxes[0, 0:2, :].astype(np.int32).tolist()
- bot_line = text_comp_boxes[0, 2:4:-1, :].astype(np.int32).tolist()
- boundary_points = top_line + bot_line
-
- boundary = [p for coord in boundary_points for p in coord] + [score]
- boundaries.append(boundary)
-
- return boundaries
diff --git a/spaces/tomofi/MMOCR/mmocr/models/textrecog/fusers/abi_fuser.py b/spaces/tomofi/MMOCR/mmocr/models/textrecog/fusers/abi_fuser.py
deleted file mode 100644
index 310cf6f0421ea3575f1935489440f1b37964a194..0000000000000000000000000000000000000000
--- a/spaces/tomofi/MMOCR/mmocr/models/textrecog/fusers/abi_fuser.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import torch
-import torch.nn as nn
-from mmcv.runner import BaseModule
-
-from mmocr.models.builder import FUSERS
-
-
-@FUSERS.register_module()
-class ABIFuser(BaseModule):
- """Mix and align visual feature and linguistic feature Implementation of
- language model of `ABINet `_.
-
- Args:
- d_model (int): Hidden size of input.
- max_seq_len (int): Maximum text sequence length :math:`T`.
- num_chars (int): Number of text characters :math:`C`.
- init_cfg (dict): Specifies the initialization method for model layers.
- """
-
- def __init__(self,
- d_model=512,
- max_seq_len=40,
- num_chars=90,
- init_cfg=None,
- **kwargs):
- super().__init__(init_cfg=init_cfg)
-
- self.max_seq_len = max_seq_len + 1 # additional stop token
- self.w_att = nn.Linear(2 * d_model, d_model)
- self.cls = nn.Linear(d_model, num_chars)
-
- def forward(self, l_feature, v_feature):
- """
- Args:
- l_feature: (N, T, E) where T is length, N is batch size and
- d is dim of model.
- v_feature: (N, T, E) shape the same as l_feature.
-
- Returns:
- A dict with key ``logits``
- The logits of shape (N, T, C) where N is batch size, T is length
- and C is the number of characters.
- """
- f = torch.cat((l_feature, v_feature), dim=2)
- f_att = torch.sigmoid(self.w_att(f))
- output = f_att * v_feature + (1 - f_att) * l_feature
-
- logits = self.cls(output) # (N, T, C)
-
- return {'logits': logits}
diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/README.md b/spaces/tomofi/NDLOCR/src/ndl_layout/README.md
deleted file mode 100644
index 1cda8fe093a10bd924dc56d580a5f5bfe04db50b..0000000000000000000000000000000000000000
--- a/spaces/tomofi/NDLOCR/src/ndl_layout/README.md
+++ /dev/null
@@ -1,92 +0,0 @@
-# NDLOCR用レイアウト認識モジュール
-
-レイアウト要素を抽出するためのモジュールのリポジトリです。
-
-本プログラムは、国立国会図書館が株式会社モルフォAIソリューションズに委託して作成したものです。
-
-本プログラムは、国立国会図書館がCC BY 4.0ライセンスで公開するものです。詳細については
-[LICENSE](./LICENSE
-)をご覧ください。
-
-# 環境構築
-
-python3.7かつ、cuda 11.1をインストール済みの環境の場合
-ndl_layoutディレクトリ直下で以下のコマンドを実行する。
-```
-pip install torch==1.8.1+cu111 torchvision==0.9.1+cu111 -f https://download.pytorch.org/whl/lts/1.8/torch_lts.html
-wget https://lab.ndl.go.jp/dataset/ndlocr/ndl_layout/ndl_layout_config.py -P ./models
-wget https://lab.ndl.go.jp/dataset/ndlocr/ndl_layout/epoch_140_all_eql_bt.pth -P ./models
-```
-
-くわえて、元リポジトリ(https://github.com/open-mmlab/mmdetection)
-をカスタマイズした[mmdetection](https://github.com/ndl-lab/mmdetection)
-に依存しているため、下記のようにリポジトリの追加とインストールを行う。
-
-```bash
-git clone https://github.com/ndl-lab/mmdetection
-cd mmdetection
-python setup.py bdist_wheel
-pip install dist/*.whl
-```
-
-
-# 使い方
-※スクリプトファイルはndl_layoutディレクトリ直下で実行すること
-
-## tools/process.py : 推論用モジュール + CLI
-
-学習結果を使って推論を実行する。学習済みのモデルは`ndl_layout/models` 以下にあるものとする。
-
-画像リストを引数で指定するには img_paths オプションを、画像リストをファイルから読み込む場合には list_path オプションを指定する。
-
-output_path で出力 XML ファイルの格納先を変更することができる。(デフォルトは layout_prediction.xml)
-
-use_show オプションを追加すると処理結果をGUI上で確認することができる。
-
-img_pathsオプションで画像リストを指定する例
-```bash
-python -m tools.process --img_paths image/dir/path/*.jpg --use_show --output_path layout_prediction.xml --config ./models/ndl_layout_config.py --checkpoint ./models/epoch_140_all_eql_bt.pth
-```
-
-list_path オプションで画像リストを指定する例
-```bash
-python -m tools.process --list_path image_list_file.list --use_show --output_path layout_prediction.xml --config ./models/ndl_layout_config.py --checkpoint ./models/epoch_140_all_eql_bt.pth
-```
-
-## tools/preprocess.py : 学習画像の追加&変換
-
-画像のファイル名の変換、縮小を行い、MS COCO 形式に整形。
-
-```bash
-python -m tools.preprocess images_data_dir output_dir --use_link
-```
-
-出力解像度を下げる必要がない場合には、`--use_link`オプションを指定する。
-
-高解像の場合など、解像度を下げたい場合には `--use_shrink` を使うと画像サイズとアノテーションを半分のサイズに縮小して出力する。
-
-本リポジトリの追加学習に使用可能なファイル(アノテーション情報の含まれるjson及び、前処理後の画像)は `output_dir` で指定したディレクトリに出力される。
-
-
-## 学習時の手順
-1) ndl_layout/tools/preprocess.pyを使用し、NDLOCRXMLDataset形式の画像とアノテーションファイル(xml)をCOCO形式に変換し保存する。
-```
-cd mmdetection
-python -m tools.preprocess images_data_dir output_dir --use_link
-```
-output_dir内に画像のシンボリックリンク(またはコピー)とCOCO形式のアノテーションファイル(.json)を保存する。
-
-アノテーションファイルは、data.json(全データのアノテーション)、train.json(ランダムに全体の9割)、test.json(train以外の残る1割)を生成する。
-
-2) mmdetection/tools/train_ndl.py を使用し、モデルを学習する。
-```
-cd mmdetection
-python tools/train_ndl.py configs/ndl/cascade_rcnn_r50_fpn_1x_ndl_1024_eql.py
-```
-学習データ、work directory、初期値、学習回数等はconfigファイル内で指定するか、train_ndl.pyのオプションを使用する。オプションで指定されたものが優先される。
-
-work directoryに、学習したモデル(epoch_XX.pth または latest.pth)とconfigファイル(train_ndl.pyのオプションを使用した場合その内容も反映)、学習時のログファイル(.logと.log.json)が保存される。
-
-なお、このリポジトリで公開しているモデル(設定ファイルは`configs/ndl/cascade_rcnn_r50_fpn_1x_ndl_1024_eql.py`を参照)の学習時の初期重みには
-https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco/cascade_rcnn_r50_fpn_1x_coco_20200316-3dc56deb.pth
-を使用した。
diff --git a/spaces/trysem/Vector-diFusion/style.css b/spaces/trysem/Vector-diFusion/style.css
deleted file mode 100644
index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000
--- a/spaces/trysem/Vector-diFusion/style.css
+++ /dev/null
@@ -1,28 +0,0 @@
-body {
- padding: 2rem;
- font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
-}
-
-h1 {
- font-size: 16px;
- margin-top: 0;
-}
-
-p {
- color: rgb(107, 114, 128);
- font-size: 15px;
- margin-bottom: 10px;
- margin-top: 5px;
-}
-
-.card {
- max-width: 620px;
- margin: 0 auto;
- padding: 16px;
- border: 1px solid lightgray;
- border-radius: 16px;
-}
-
-.card p:last-child {
- margin-bottom: 0;
-}
diff --git a/spaces/ulysses115/diffsvc_test/modules/nsf_hifigan/nvSTFT.py b/spaces/ulysses115/diffsvc_test/modules/nsf_hifigan/nvSTFT.py
deleted file mode 100644
index 35635c844ea1ae6258112f0ba92e417e81a22642..0000000000000000000000000000000000000000
--- a/spaces/ulysses115/diffsvc_test/modules/nsf_hifigan/nvSTFT.py
+++ /dev/null
@@ -1,111 +0,0 @@
-import math
-import os
-os.environ["LRU_CACHE_CAPACITY"] = "3"
-import random
-import torch
-import torch.utils.data
-import numpy as np
-import librosa
-from librosa.util import normalize
-from librosa.filters import mel as librosa_mel_fn
-from scipy.io.wavfile import read
-import soundfile as sf
-
-def load_wav_to_torch(full_path, target_sr=None, return_empty_on_exception=False):
- sampling_rate = None
- try:
- data, sampling_rate = sf.read(full_path, always_2d=True)# than soundfile.
- except Exception as ex:
- print(f"'{full_path}' failed to load.\nException:")
- print(ex)
- if return_empty_on_exception:
- return [], sampling_rate or target_sr or 48000
- else:
- raise Exception(ex)
-
- if len(data.shape) > 1:
- data = data[:, 0]
- assert len(data) > 2# check duration of audio file is > 2 samples (because otherwise the slice operation was on the wrong dimension)
-
- if np.issubdtype(data.dtype, np.integer): # if audio data is type int
- max_mag = -np.iinfo(data.dtype).min # maximum magnitude = min possible value of intXX
- else: # if audio data is type fp32
- max_mag = max(np.amax(data), -np.amin(data))
- max_mag = (2**31)+1 if max_mag > (2**15) else ((2**15)+1 if max_mag > 1.01 else 1.0) # data should be either 16-bit INT, 32-bit INT or [-1 to 1] float32
-
- data = torch.FloatTensor(data.astype(np.float32))/max_mag
-
- if (torch.isinf(data) | torch.isnan(data)).any() and return_empty_on_exception:# resample will crash with inf/NaN inputs. return_empty_on_exception will return empty arr instead of except
- return [], sampling_rate or target_sr or 48000
- if target_sr is not None and sampling_rate != target_sr:
- data = torch.from_numpy(librosa.core.resample(data.numpy(), orig_sr=sampling_rate, target_sr=target_sr))
- sampling_rate = target_sr
-
- return data, sampling_rate
-
-def dynamic_range_compression(x, C=1, clip_val=1e-5):
- return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)
-
-def dynamic_range_decompression(x, C=1):
- return np.exp(x) / C
-
-def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
- return torch.log(torch.clamp(x, min=clip_val) * C)
-
-def dynamic_range_decompression_torch(x, C=1):
- return torch.exp(x) / C
-
-class STFT():
- def __init__(self, sr=22050, n_mels=80, n_fft=1024, win_size=1024, hop_length=256, fmin=20, fmax=11025, clip_val=1e-5):
- self.target_sr = sr
-
- self.n_mels = n_mels
- self.n_fft = n_fft
- self.win_size = win_size
- self.hop_length = hop_length
- self.fmin = fmin
- self.fmax = fmax
- self.clip_val = clip_val
- self.mel_basis = {}
- self.hann_window = {}
-
- def get_mel(self, y, center=False):
- sampling_rate = self.target_sr
- n_mels = self.n_mels
- n_fft = self.n_fft
- win_size = self.win_size
- hop_length = self.hop_length
- fmin = self.fmin
- fmax = self.fmax
- clip_val = self.clip_val
-
- if torch.min(y) < -1.:
- print('min value is ', torch.min(y))
- if torch.max(y) > 1.:
- print('max value is ', torch.max(y))
-
- if fmax not in self.mel_basis:
- mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax)
- self.mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device)
- self.hann_window[str(y.device)] = torch.hann_window(self.win_size).to(y.device)
-
- y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_length)/2), int((n_fft-hop_length)/2)), mode='reflect')
- y = y.squeeze(1)
-
- spec = torch.stft(y, n_fft, hop_length=hop_length, win_length=win_size, window=self.hann_window[str(y.device)],
- center=center, pad_mode='reflect', normalized=False, onesided=True)
- # print(111,spec)
- spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9))
- # print(222,spec)
- spec = torch.matmul(self.mel_basis[str(fmax)+'_'+str(y.device)], spec)
- # print(333,spec)
- spec = dynamic_range_compression_torch(spec, clip_val=clip_val)
- # print(444,spec)
- return spec
-
- def __call__(self, audiopath):
- audio, sr = load_wav_to_torch(audiopath, target_sr=self.target_sr)
- spect = self.get_mel(audio.unsqueeze(0)).squeeze(0)
- return spect
-
-stft = STFT()
\ No newline at end of file
diff --git a/spaces/ulysses115/diffsvc_test/preprocessing/base_binarizer.py b/spaces/ulysses115/diffsvc_test/preprocessing/base_binarizer.py
deleted file mode 100644
index f070584c60c091c3fa4bc1da377733698b0165b6..0000000000000000000000000000000000000000
--- a/spaces/ulysses115/diffsvc_test/preprocessing/base_binarizer.py
+++ /dev/null
@@ -1,237 +0,0 @@
-import os
-from webbrowser import get
-os.environ["OMP_NUM_THREADS"] = "1"
-import yaml
-from utils.multiprocess_utils import chunked_multiprocess_run
-import random
-import json
-# from resemblyzer import VoiceEncoder
-from tqdm import tqdm
-from preprocessing.data_gen_utils import get_mel2ph, get_pitch_parselmouth, build_phone_encoder,get_pitch_crepe
-from utils.hparams import set_hparams, hparams
-import numpy as np
-from utils.indexed_datasets import IndexedDatasetBuilder
-
-
-class BinarizationError(Exception):
- pass
-
-BASE_ITEM_ATTRIBUTES = ['txt', 'ph', 'wav_fn', 'tg_fn', 'spk_id']
-
-class BaseBinarizer:
- '''
- Base class for data processing.
- 1. *process* and *process_data_split*:
- process entire data, generate the train-test split (support parallel processing);
- 2. *process_item*:
- process singe piece of data;
- 3. *get_pitch*:
- infer the pitch using some algorithm;
- 4. *get_align*:
- get the alignment using 'mel2ph' format (see https://arxiv.org/abs/1905.09263).
- 5. phoneme encoder, voice encoder, etc.
-
- Subclasses should define:
- 1. *load_metadata*:
- how to read multiple datasets from files;
- 2. *train_item_names*, *valid_item_names*, *test_item_names*:
- how to split the dataset;
- 3. load_ph_set:
- the phoneme set.
- '''
- def __init__(self, item_attributes=BASE_ITEM_ATTRIBUTES):
- self.binarization_args = hparams['binarization_args']
- #self.pre_align_args = hparams['pre_align_args']
-
- self.items = {}
- # every item in self.items has some attributes
- self.item_attributes = item_attributes
-
- self.load_meta_data()
- # check program correctness 检查itemdict的key只能在给定的列表中取值
- assert all([attr in self.item_attributes for attr in list(self.items.values())[0].keys()])
- self.item_names = sorted(list(self.items.keys()))
-
- if self.binarization_args['shuffle']:
- random.seed(1234)
- random.shuffle(self.item_names)
-
- # set default get_pitch algorithm
- if hparams['use_crepe']:
- self.get_pitch_algorithm = get_pitch_crepe
- else:
- self.get_pitch_algorithm = get_pitch_parselmouth
-
- def load_meta_data(self):
- raise NotImplementedError
-
- @property
- def train_item_names(self):
- raise NotImplementedError
-
- @property
- def valid_item_names(self):
- raise NotImplementedError
-
- @property
- def test_item_names(self):
- raise NotImplementedError
-
- def build_spk_map(self):
- spk_map = set()
- for item_name in self.item_names:
- spk_name = self.items[item_name]['spk_id']
- spk_map.add(spk_name)
- spk_map = {x: i for i, x in enumerate(sorted(list(spk_map)))}
- assert len(spk_map) == 0 or len(spk_map) <= hparams['num_spk'], len(spk_map)
- return spk_map
-
- def item_name2spk_id(self, item_name):
- return self.spk_map[self.items[item_name]['spk_id']]
-
- def _phone_encoder(self):
- '''
- use hubert encoder
- '''
- raise NotImplementedError
- '''
- create 'phone_set.json' file if it doesn't exist
- '''
- ph_set_fn = f"{hparams['binary_data_dir']}/phone_set.json"
- ph_set = []
- if hparams['reset_phone_dict'] or not os.path.exists(ph_set_fn):
- self.load_ph_set(ph_set)
- ph_set = sorted(set(ph_set))
- json.dump(ph_set, open(ph_set_fn, 'w', encoding='utf-8'))
- print("| Build phone set: ", ph_set)
- else:
- ph_set = json.load(open(ph_set_fn, 'r', encoding='utf-8'))
- print("| Load phone set: ", ph_set)
- return build_phone_encoder(hparams['binary_data_dir'])
-
-
- def load_ph_set(self, ph_set):
- raise NotImplementedError
-
- def meta_data_iterator(self, prefix):
- if prefix == 'valid':
- item_names = self.valid_item_names
- elif prefix == 'test':
- item_names = self.test_item_names
- else:
- item_names = self.train_item_names
- for item_name in item_names:
- meta_data = self.items[item_name]
- yield item_name, meta_data
-
- def process(self):
- os.makedirs(hparams['binary_data_dir'], exist_ok=True)
- self.spk_map = self.build_spk_map()
- print("| spk_map: ", self.spk_map)
- spk_map_fn = f"{hparams['binary_data_dir']}/spk_map.json"
- json.dump(self.spk_map, open(spk_map_fn, 'w', encoding='utf-8'))
-
- self.phone_encoder =self._phone_encoder()
- self.process_data_split('valid')
- self.process_data_split('test')
- self.process_data_split('train')
-
- def process_data_split(self, prefix):
- data_dir = hparams['binary_data_dir']
- args = []
- builder = IndexedDatasetBuilder(f'{data_dir}/{prefix}')
- lengths = []
- f0s = []
- total_sec = 0
- # if self.binarization_args['with_spk_embed']:
- # voice_encoder = VoiceEncoder().cuda()
-
- for item_name, meta_data in self.meta_data_iterator(prefix):
- args.append([item_name, meta_data, self.binarization_args])
- spec_min=[]
- spec_max=[]
- # code for single cpu processing
- for i in tqdm(reversed(range(len(args))), total=len(args)):
- a = args[i]
- item = self.process_item(*a)
- if item is None:
- continue
- spec_min.append(item['spec_min'])
- spec_max.append(item['spec_max'])
- # item['spk_embe'] = voice_encoder.embed_utterance(item['wav']) \
- # if self.binardization_args['with_spk_embed'] else None
- if not self.binarization_args['with_wav'] and 'wav' in item:
- if hparams['debug']:
- print("del wav")
- del item['wav']
- if(hparams['debug']):
- print(item)
- builder.add_item(item)
- lengths.append(item['len'])
- total_sec += item['sec']
- # if item.get('f0') is not None:
- # f0s.append(item['f0'])
- if prefix=='train':
- spec_max=np.max(spec_max,0)
- spec_min=np.min(spec_min,0)
- print(spec_max.shape)
- with open(hparams['config_path'], encoding='utf-8') as f:
- _hparams=yaml.safe_load(f)
- _hparams['spec_max']=spec_max.tolist()
- _hparams['spec_min']=spec_min.tolist()
- with open(hparams['config_path'], 'w', encoding='utf-8') as f:
- yaml.safe_dump(_hparams,f)
- builder.finalize()
- np.save(f'{data_dir}/{prefix}_lengths.npy', lengths)
- if len(f0s) > 0:
- f0s = np.concatenate(f0s, 0)
- f0s = f0s[f0s != 0]
- np.save(f'{data_dir}/{prefix}_f0s_mean_std.npy', [np.mean(f0s).item(), np.std(f0s).item()])
- print(f"| {prefix} total duration: {total_sec:.3f}s")
-
- def process_item(self, item_name, meta_data, binarization_args):
- from preprocessing.process_pipeline import File2Batch
- return File2Batch.temporary_dict2processed_input(item_name, meta_data, self.phone_encoder, binarization_args)
-
- def get_align(self, meta_data, mel, phone_encoded, res):
- raise NotImplementedError
-
- def get_align_from_textgrid(self, meta_data, mel, phone_encoded, res):
- '''
- NOTE: this part of script is *isolated* from other scripts, which means
- it may not be compatible with the current version.
- '''
- return
- tg_fn, ph = meta_data['tg_fn'], meta_data['ph']
- if tg_fn is not None and os.path.exists(tg_fn):
- mel2ph, dur = get_mel2ph(tg_fn, ph, mel, hparams)
- else:
- raise BinarizationError(f"Align not found")
- if mel2ph.max() - 1 >= len(phone_encoded):
- raise BinarizationError(
- f"Align does not match: mel2ph.max() - 1: {mel2ph.max() - 1}, len(phone_encoded): {len(phone_encoded)}")
- res['mel2ph'] = mel2ph
- res['dur'] = dur
-
- def get_f0cwt(self, f0, res):
- '''
- NOTE: this part of script is *isolated* from other scripts, which means
- it may not be compatible with the current version.
- '''
- return
- from utils.cwt import get_cont_lf0, get_lf0_cwt
- uv, cont_lf0_lpf = get_cont_lf0(f0)
- logf0s_mean_org, logf0s_std_org = np.mean(cont_lf0_lpf), np.std(cont_lf0_lpf)
- cont_lf0_lpf_norm = (cont_lf0_lpf - logf0s_mean_org) / logf0s_std_org
- Wavelet_lf0, scales = get_lf0_cwt(cont_lf0_lpf_norm)
- if np.any(np.isnan(Wavelet_lf0)):
- raise BinarizationError("NaN CWT")
- res['cwt_spec'] = Wavelet_lf0
- res['cwt_scales'] = scales
- res['f0_mean'] = logf0s_mean_org
- res['f0_std'] = logf0s_std_org
-
-
-if __name__ == "__main__":
- set_hparams()
- BaseBinarizer().process()
diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/City Car Driving 2.2.7 Crack The Best Way to Master the Basic Skills of Car Driving.md b/spaces/usbethFlerru/sovits-modelsV2/example/City Car Driving 2.2.7 Crack The Best Way to Master the Basic Skills of Car Driving.md
deleted file mode 100644
index cb9c9111d3320e6de31e11c89735ccd2d4a07c06..0000000000000000000000000000000000000000
--- a/spaces/usbethFlerru/sovits-modelsV2/example/City Car Driving 2.2.7 Crack The Best Way to Master the Basic Skills of Car Driving.md
+++ /dev/null
@@ -1,5 +0,0 @@
-
-
The car driving game named "City Car Driving" is a new car simulator, designed to help users experience car driving in а big city, the countryside and in different conditions or go just for a joy ride. Special stress in the "City Car Driving" simulator has been laid on a variety of different road situations and realistic car driving.