diff --git a/spaces/0xhimzel/Detect-AI-Plagiarism/app.py b/spaces/0xhimzel/Detect-AI-Plagiarism/app.py deleted file mode 100644 index 67f00d7476a865cba7fc37e1d07ec1c1d4569ce1..0000000000000000000000000000000000000000 --- a/spaces/0xhimzel/Detect-AI-Plagiarism/app.py +++ /dev/null @@ -1,32 +0,0 @@ -import os -import gradio as gr -from transformers import pipeline - -auth_token = os.environ.get("access_token") -pipeline_en = pipeline(task="text-classification", model="Hello-SimpleAI/chatgpt-detector-roberta",use_auth_token=auth_token) - - -def predict_en(text): - res = pipeline_en(text)[0] - label = res['label'] - score = round(res['score']*100, 2) - return "%d%% chance"%score, label - - -with gr.Blocks() as demo: - gr.Markdown(""" - # 🤖 Detect AI Plagiarism with Jurnee - Paste in the text you want to check and get a holistic score for how much of the document is written by AI. We recommend that educators take these results as one of many pieces in their assessment of student work. This model is based on Hello Simple's paper [arxiv: 2301.07597](https://arxiv.org/abs/2301.07597) and Github project [Hello-SimpleAI/chatgpt-comparison-detection](https://github.com/Hello-SimpleAI/chatgpt-comparison-detection). - """) - with gr.Tab("Try it out 👇"): - gr.Markdown(""" - Note: Providing more text to the `Text` box can make the prediction more accurate! - """) - t1 = gr.Textbox(lines=5, label='Paste the text you want to check',value="There are a few things that can help protect your credit card information from being misused when you give it to a restaurant or any other business:\n\nEncryption: Many businesses use encryption to protect your credit card information when it is being transmitted or stored. This means that the information is transformed into a code that is difficult for anyone to read without the right key.") - button1 = gr.Button("👀 See results") - score1 = gr.Textbox(lines=1, label='There is a') - label1 = gr.Textbox(lines=1, label='That this text is written entirely by a') - - button1.click(predict_en, inputs=[t1], outputs=[score1, label1]) - -demo.launch() \ No newline at end of file diff --git a/spaces/101-5/gpt4free/g4f/.v1/gui/query_methods.py b/spaces/101-5/gpt4free/g4f/.v1/gui/query_methods.py deleted file mode 100644 index 2d6adacd3b394183c65ab596cc148c45de6b63c4..0000000000000000000000000000000000000000 --- a/spaces/101-5/gpt4free/g4f/.v1/gui/query_methods.py +++ /dev/null @@ -1,100 +0,0 @@ -import os -import sys -from typing import Optional - -sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir)) - -from gpt4free import quora, forefront, theb, you -import random - - -def query_forefront(question: str, proxy: Optional[str] = None) -> str: - # create an account - token = forefront.Account.create(logging=False, proxy=proxy) - - response = "" - # get a response - try: - return forefront.Completion.create(token=token, prompt='hello world', model='gpt-4', proxy=proxy).text - except Exception as e: - # Return error message if an exception occurs - return ( - f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.' - ) - - -def query_quora(question: str, proxy: Optional[str] = None) -> str: - token = quora.Account.create(logging=False, enable_bot_creation=True, proxy=proxy) - return quora.Completion.create(model='gpt-4', prompt=question, token=token, proxy=proxy).text - - -def query_theb(question: str, proxy: Optional[str] = None) -> str: - # Set cloudflare clearance cookie and get answer from GPT-4 model - response = "" - try: - return ''.join(theb.Completion.create(prompt=question, proxy=proxy)) - - except Exception as e: - # Return error message if an exception occurs - return ( - f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.' - ) - - -def query_you(question: str, proxy: Optional[str] = None) -> str: - # Set cloudflare clearance cookie and get answer from GPT-4 model - try: - result = you.Completion.create(prompt=question, proxy=proxy) - return result.text - - except Exception as e: - # Return error message if an exception occurs - return ( - f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.' - ) - - -# Define a dictionary containing all query methods -avail_query_methods = { - "Forefront": query_forefront, - "Poe": query_quora, - "Theb": query_theb, - "You": query_you, - # "Writesonic": query_writesonic, - # "T3nsor": query_t3nsor, - # "Phind": query_phind, - # "Ora": query_ora, -} - - -def query(user_input: str, selected_method: str = "Random", proxy: Optional[str] = None) -> str: - # If a specific query method is selected (not "Random") and the method is in the dictionary, try to call it - if selected_method != "Random" and selected_method in avail_query_methods: - try: - return avail_query_methods[selected_method](user_input, proxy=proxy) - except Exception as e: - print(f"Error with {selected_method}: {e}") - return "😵 Sorry, some error occurred please try again." - - # Initialize variables for determining success and storing the result - success = False - result = "😵 Sorry, some error occurred please try again." - # Create a list of available query methods - query_methods_list = list(avail_query_methods.values()) - - # Continue trying different methods until a successful result is obtained or all methods have been tried - while not success and query_methods_list: - # Choose a random method from the list - chosen_query = random.choice(query_methods_list) - # Find the name of the chosen method - chosen_query_name = [k for k, v in avail_query_methods.items() if v == chosen_query][0] - try: - # Try to call the chosen method with the user input - result = chosen_query(user_input, proxy=proxy) - success = True - except Exception as e: - print(f"Error with {chosen_query_name}: {e}") - # Remove the failed method from the list of available methods - query_methods_list.remove(chosen_query) - - return result diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/DS DELMIA V5-6R2015 GA.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/DS DELMIA V5-6R2015 GA.md deleted file mode 100644 index f401e91df36c8e3e3597153ea5d4c8f9529dcc99..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/DS DELMIA V5-6R2015 GA.md +++ /dev/null @@ -1,52 +0,0 @@ -
-

What is DS DELMIA V5-6R2015 GA and Why You Need It

-

DS DELMIA V5-6R2015 GA is a software solution that enables you to design, simulate, and optimize your production processes and systems. It is part of the Dassault Systemes portfolio of 3D and Product Lifecycle Management (PLM) solutions that help you create innovative products and services.

-

With DS DELMIA V5-6R2015 GA, you can:

-

DS DELMIA V5-6R2015 GA


Download Zip 🗸 https://byltly.com/2uKxxM



- -

DS DELMIA V5-6R2015 GA is compatible with Windows 7 and Windows 8.1 operating systems. It supports both 32-bit and 64-bit architectures. It is available in multiple languages, including English, French, German, Japanese, Chinese, Russian, and more.

-

DS DELMIA V5-6R2015 GA is a powerful tool that can help you improve your productivity, quality, and profitability. It can help you reduce costs, waste, and errors. It can help you enhance your collaboration, innovation, and customer satisfaction.

-

If you want to learn more about DS DELMIA V5-6R2015 GA, you can visit the official website of Dassault Systemes or download a free trial version from their online store. You can also read some of the testimonials and reviews from other users who have benefited from this software solution.

-

Conclusion

-

DS DELMIA V5-6R2015 GA is a software solution that enables you to design, simulate, and optimize your production processes and systems. It is part of the Dassault Systemes portfolio of 3D and PLM solutions that help you create innovative products and services. With DS DELMIA V5-6R2015 GA, you can improve your productivity, quality, and profitability. You can reduce costs, waste, and errors. You can enhance your collaboration, innovation, and customer satisfaction.

-

If you are interested in DS DELMIA V5-6R2015 GA, you can visit the official website of Dassault Systemes or download a free trial version from their online store. You can also read some of the testimonials and reviews from other users who have benefited from this software solution.

- -

How to Use DS DELMIA V5-6R2015 GA

-

DS DELMIA V5-6R2015 GA is easy to use and install. You can download it from the Dassault Systemes online store or request a DVD from your local reseller. You can also get a free trial version for 30 days to test its features and benefits.

-

Once you have installed DS DELMIA V5-6R2015 GA, you can launch it from your desktop or start menu. You will see a user-friendly interface that allows you to access different modules and functions. You can also customize your workspace and preferences according to your needs and preferences.

-

-

To use DS DELMIA V5-6R2015 GA, you need to create or open a project file that contains your process data and models. You can import data from other sources, such as CATIA, SolidWorks, or Excel. You can also create data from scratch using the built-in tools and wizards.

-

Once you have your project file ready, you can start designing, simulating, and optimizing your production processes and systems. You can use the various modules and functions of DS DELMIA V5-6R2015 GA to perform different tasks, such as:

- -

You can also use DS DELMIA V5-6R2015 GA to perform various analyses and validations, such as cycle time analysis, resource utilization analysis, collision detection, reachability analysis, feasibility analysis, quality analysis, and more. You can also generate reports and documentation for your projects.

-

Who Can Benefit from DS DELMIA V5-6R2015 GA

-

DS DELMIA V5-6R2015 GA is a software solution that can benefit anyone who is involved in the design, simulation, and optimization of production processes and systems. It can be used by different industries, such as aerospace, automotive, consumer goods, energy, industrial equipment, life sciences, marine and offshore, transportation and mobility, and more.

-

Some of the roles that can benefit from DS DELMIA V5-6R2015 GA are:

- -

DS DELMIA V5-6R2015 GA can help these roles achieve their goals faster, easier, and better. It can help them reduce costs, waste, and errors. It can help them enhance collaboration, innovation, and customer satisfaction.

81aa517590
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (Life Is Beautiful! 1080p movie free downloadgolkes) - A masterpiece of Italian cinema based on a true story.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (Life Is Beautiful! 1080p movie free downloadgolkes) - A masterpiece of Italian cinema based on a true story.md deleted file mode 100644 index 3347d4620e2a05b36de5e79448e3dcca7b905c4d..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (Life Is Beautiful! 1080p movie free downloadgolkes) - A masterpiece of Italian cinema based on a true story.md +++ /dev/null @@ -1,100 +0,0 @@ - -

HD Online Player (Life Is Beautiful! 1080p movie free downloadgolkes)

-

If you are looking for a way to watch high-quality movies online or offline, you might want to try HD online player. This is a powerful and versatile video player that lets you stream or download any video from any website in HD quality. One of the movies that you can enjoy with HD online player is Life Is Beautiful!, a classic comedy-drama film that will make you laugh and cry. In this article, we will show you how to download Life Is Beautiful! in 1080p for free using HD online player.

-

Introduction

-

HD online player is a software that allows you to watch videos on your device without any hassle. You can use it to stream videos from various websites, such as YouTube, Netflix, Hulu, Amazon Prime Video, and more. You can also use it to download videos from these websites and save them on your device for offline viewing. You can choose the video quality, format, and language that suit your preferences.

-

HD Online Player (Life Is Beautiful! 1080p movie free downloadgolkes)


Download ••• https://byltly.com/2uKy0T



-

One of the movies that you can watch with HD online player is Life Is Beautiful!, a 1997 Italian film directed by and starring Roberto Benigni. The film tells the story of a Jewish father who uses his imagination and humor to protect his son from the horrors of the Holocaust. The film won three Academy Awards, including Best Foreign Language Film, Best Actor, and Best Original Score. It is widely regarded as one of the best films of all time.

-

If you want to watch Life Is Beautiful! in 1080p for free, you can do so with HD online player. All you need is a device with an internet connection and some storage space. Here are the steps that you need to follow.

-

Features of HD online player

-

High-quality video streaming

-

One of the main features of HD online player is that it delivers high-quality video streaming without any buffering or lagging. You can watch videos in up to 4K resolution with crystal-clear sound and smooth playback. You can also adjust the brightness, contrast, saturation, and volume of the video according to your liking.

-

Another feature of HD online player is that it supports various video formats and subtitles. You can play videos in MP4, MKV, AVI, WMV, FLV, MOV, and more. You can also add subtitles in SRT, ASS, SSA, SUB, IDX, and more. You can change the font size, color, position, and sync of the subtitles as well.

-

Easy and fast downloading

-

Another feature of HD online player is that it allows you to download any video from any website with one click. You can use the built-in browser or paste the URL of the video that you want to download. You can choose the video quality, format, and language that you want. You can also select multiple videos at once and download them in batches.

-

Watch Life Is Beautiful online free HD
-Life Is Beautiful full movie 1080p download
-Life Is Beautiful comedy drama film streaming
-Roberto Benigni Life Is Beautiful movie
-Life Is Beautiful concentration camp game
-Life Is Beautiful 1997 Italian movie
-Life Is Beautiful Miramax production
-Life Is Beautiful Oscar-winning film
-Life Is Beautiful movie subtitles
-Life Is Beautiful movie review
-Life Is Beautiful movie trailer
-Life Is Beautiful movie cast
-Life Is Beautiful movie quotes
-Life Is Beautiful movie soundtrack
-Life Is Beautiful movie Netflix
-Life Is Beautiful movie Amazon Prime
-Life Is Beautiful movie Hulu
-Life Is Beautiful movie Disney Plus
-Life Is Beautiful movie HBO Max
-Life Is Beautiful movie YouTube
-How to watch Life Is Beautiful online
-Where to watch Life Is Beautiful online
-Best sites to watch Life Is Beautiful online
-Watch Life Is Beautiful online free no sign up
-Watch Life Is Beautiful online free 123movies
-Watch Life Is Beautiful online free Putlocker
-Watch Life Is Beautiful online free Fmovies
-Watch Life Is Beautiful online free Gomovies
-Watch Life Is Beautiful online free Solarmovie
-Watch Life Is Beautiful online free Vumoo
-Download Life Is Beautiful movie free HD
-Download Life Is Beautiful movie torrent HD
-Download Life Is Beautiful movie magnet link HD
-Download Life Is Beautiful movie YTS HD
-Download Life Is Beautiful movie RARBG HD
-Download Life Is Beautiful movie 1337x HD
-Download Life Is Beautiful movie EZTV HD
-Download Life Is Beautiful movie Limetorrents HD
-Download Life Is Beautiful movie Kickass Torrents HD
-Download Life Is Beautiful movie The Pirate Bay HD
-How to download Life Is Beautiful movie free HD
-Where to download Life Is Beautiful movie free HD
-Best sites to download Life Is Beautiful movie free HD
-Download Life Is Beautiful movie free no sign up
-Download Life Is Beautiful movie free 123movies
-Download Life Is Beautiful movie free Putlocker
-Download Life Is Beautiful movie free Fmovies
-Download Life Is Beautiful movie free Gomovies
-Download Life Is Beautiful movie free Solarmovie
-Download Life Is Beautiful movie free Vumoo

-

Another feature of HD online player is that it supports multiple downloads and resume function. You can pause and resume your downloads at any time. You can also check the progress and status of your downloads in the download manager. You can also delete or rename your downloaded files as you wish.

-

User-friendly interface and customization

-

Another feature of HD online player is that it has a simple and intuitive design that makes it easy to use. You can access all the functions and settings from the main menu or the toolbar. You can also swipe left or right on the screen to switch between different modes or tabs.

-

Another feature of HD online player is that it lets you adjust the settings, preferences, and appearance of the player. You can change the theme color, background image, icon size, gesture control, playback speed, screen orientation, and more. You can also enable or disable notifications, auto-play, auto-update, hardware acceleration, etc.

-

How to watch Life Is Beautiful! in HD online player

-

Step 1: Download and install HD online player on your device

-

The first step to watch Life Is Beautiful! in HD online player is to download and install the software on your device. You can find the official website and download link of HD online player here: https://hd-online-player.com/. The software is compatible with Windows, Mac OS X, Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X and iOS devices.

-

To install HD online player on your device, - For Windows users: - Download the .exe file from the website. - Run the file and follow the instructions on the screen. - Agree to the terms and conditions and click Next. - Choose a destination folder for installation and click Next. - Wait for the installation process to complete and click Finish. - For Mac users: - Download the .dmg file from the website. - Open the file and drag the icon into your Applications folder. - Double-click on the icon to launch the software. - For Android users: - Download the .apk file from the website. - Enable Unknown Sources in your device settings. - Tap on the file and install it on your device. - Open the app from your app drawer. - For iOS users: - Download the .ipa file from the website. - Connect your device to your computer via USB cable. - Open iTunes on your computer and select your device. - Drag and drop the file into your device's Apps section. - Sync your device with iTunes.

-

Step 2: Search for Life Is Beautiful! on HD online player

-

The second step to watch Life Is Beautiful! in HD online player is to search for the movie on the software. You can use two methods to do this: - Method 1: Use the built-in search engine. - On the main screen of HD online player, type "Life Is Beautiful!" in type "Life Is Beautiful!" in type "Life Is Beautiful!" in type "Life Is Beautiful!" in type "Life Is Beautiful!" in type "Life Is Beautiful!" in type "Life Is Beautiful!" in type "Life Is Beautiful!" in type "Life Is Beautiful!" in type "Life Is Beautiful!" in type "Life Is Beautiful!" in type "Life Is Beautiful!" in type "Life Is Beautiful!" in - Method 1: Use the built-in search engine. - On the main screen of HD online player, type "Life Is Beautiful!" in the search box and tap on the magnifying glass icon. - You will see a list of results that match your query. Tap on the one that says "Life Is Beautiful! (1997)" and has the poster of the movie. - You will see the details of the movie, such as the title, genre, rating, synopsis, cast, director, etc. You will also see two buttons: Play and Download. - Method 2: Browse the categories. - On the main screen of HD online player, swipe left or right to switch between different categories, such as Popular, Trending, Latest, Comedy, Drama, etc. - Tap on the category that you think might have Life Is Beautiful! in it. For example, you can tap on Comedy or Drama. - You will see a grid of movies that belong to that category. Scroll down or up to find Life Is Beautiful! among them. Tap on it when you see it. - You will see the same details and buttons as in Method 1.

Step 3: Stream or download Life Is Beautiful! in 1080p

-

The third and final step to watch Life Is Beautiful! in HD online player is to stream or download the movie in 1080p. You can choose either option depending on your preference and internet connection.

-

To stream Life Is Beautiful! online, - Tap on the Play button on the movie details screen. - You will see a pop-up window that asks you to choose the video quality and language. Tap on 1080p and English (or any other language that you want). - Wait for a few seconds for the video to load and start playing. You can use the controls on the bottom of the screen to pause, resume, rewind, fast-forward, adjust volume, etc. - Enjoy watching Life Is Beautiful! online with HD online player.

-

To download Life Is Beautiful! offline, - Tap on the Download button on the movie details screen. - You will see a pop-up window that asks you to choose the video quality and language. Tap on 1080p and English (or any other language that you want). - Wait for a few seconds for the download to start. You can see the progress and status of your download in the download manager. You can also pause and resume your download at any time. - Once the download is complete, you can find your downloaded file in your device's storage or in HD online player's library. You can play it anytime without an internet connection. - Enjoy watching Life Is Beautiful! offline with HD online player.

-

Conclusion

-

In conclusion, HD online player is a great software that lets you watch high-quality movies online or offline. You can use it to watch Life Is Beautiful!, a classic comedy-drama film that will make you laugh and cry. All you need to do is to download and install HD online player on your device, search for Life Is Beautiful! on HD online player, and stream or download it in 1080p for free.

-

If you are interested in HD online player and Life Is Beautiful!, you can download HD online player from here: https://hd-online-player.com/. You can also check out other movies that are available on HD online player. You will surely find something that suits your taste and mood.

-

Thank you for reading this article. We hope you found it helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. We would love to hear from you.

-

FAQs

- - - - - - - - - - - - - - - -
-Save the file on your computer and run it as an administrator.

How to install ok9.dll origin 9.0.0.45 patch?

-

To install ok9.dll origin 9.0.0.45 patch,

you need to follow these steps:

-
    -
  1. Locate the ok9.dll file that you downloaded on your computer.
  2. -
  3. Copy the file and paste it in the folder where Origin is installed on your computer.
  4. -
  5. The default location of Origin is C:\Program Files (x86)\Origin or C:\Program Files\Origin,
  6. -
  7. depending on your system.
  8. -
  9. If you are asked to replace the existing file,
  10. -
  11. click on yes.
  12. -
  13. Restart your computer
  14. -
  15. and launch Origin.
  16. -
-

You have successfully installed ok9.dll origin 9.0.0.45 patch on your computer.

-

How to fix ok9.dll errors?

-

If you still encounter ok9.dll errors after installing the patch,

you may need to do some troubleshooting steps to fix them.

Some of the common ok9.dll errors are:

- -

To fix these errors,

you can try the following solutions:

-
    -
  1. Re-install the application that requires ok9.dll.
  2. -
  3. This may restore the missing or corrupted file on your system.
  4. -
  5. Update the application to the latest version.
  6. -
  7. This may fix any compatibility issues or bugs that cause the errors.
  8. -
  9. Install all Windows updates and any available driver updates.
  10. -
  11. This may improve the stability and performance of your system and prevent any conflicts or errors.
  12. -
  13. Clean your PC registry and optimize your computer.
  14. -
  15. This may remove any invalid or obsolete entries that may cause the errors or slow down your system.
  16. -
-

Conclusion

-

In this article,

we have explained what ok9.dll is,

why you need it,

and how to get the latest patch for it.

We have also provided some tips on how to fix any ok9.dll errors that you may encounter on your computer.

-

We hope that this article has been helpful and informative for you.

If you have any questions or comments,

please feel free to leave them below.

We would love to hear from you!

-

Thank you for reading and happy gaming!

-

Frequently Asked Questions

-

Here are some of the frequently asked questions about ok9.dll and their answers:

-
    -
  1. What is the difference between OriginLab's Origin and Electronic Arts' Origin?
  2. -
  3. The main difference between OriginLab's Origin and Electronic Arts' Origin is their function and purpose. OriginLab's Origin is a software for data analysis and graphing, while Electronic Arts' Origin is a software for gaming and digital distribution. However, they both use the same DLL file, ok9.dll, for some of their core functionality and features.
  4. -
  5. Is ok9.dll a virus or malware?
  6. -
  7. No, ok9.dll is not a virus or malware. It is a legitimate file that belongs to Origin, a software developed by OriginLab Corporation and Electronic Arts. However, some malicious programs may disguise themselves as ok9.dll or use it for their own purposes. Therefore, you should always scan your system with a reliable antivirus program and keep it updated regularly.
  8. -
  9. Can I delete ok9.dll from my computer?
  10. -
  11. We do not recommend deleting ok9.dll from your computer, as it may cause problems with Origin or other programs that use it. If you want to uninstall Origin from your computer, you should use the official uninstaller or a reputable uninstaller tool that can remove all the associated files and registry entries. If you want to free up some disk space, you can use a disk cleaner tool that can safely delete any unnecessary or temporary files on your system.
  12. -
-

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Download and Play Ni no Kuni The Jet-Black Mage - The English Patched Version of the NDS Classic.md b/spaces/raedeXanto/academic-chatgpt-beta/Download and Play Ni no Kuni The Jet-Black Mage - The English Patched Version of the NDS Classic.md deleted file mode 100644 index 8dba1b855f7712c6311ac8acd75418557106d0f4..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Download and Play Ni no Kuni The Jet-Black Mage - The English Patched Version of the NDS Classic.md +++ /dev/null @@ -1,104 +0,0 @@ -
-

How to Download Ni no Kuni NDS ROM English Patched Version

-

Ni no Kuni is a popular RPG series developed by Level-5 in collaboration with Studio Ghibli, the famous animation studio behind movies like Spirited Away and My Neighbor Totoro. The series features beautiful graphics, charming characters, and engaging stories set in a fantasy world where magic exists.

-

The first game in the series, Ni no Kuni: Shikkoku no Madoushi (Ni no Kuni: The Jet-Black Mage), was released for the Nintendo DS in Japan in 2010. The game came with a massive 352-page book called the Magic Master, which was required to play the game. The book contained spells, maps, secrets, and lore that players had to use throughout their adventure.

-

ni no kuni nds rom english download


Download ☆☆☆☆☆ https://tinourl.com/2uL1sy



-

Unfortunately, due to the difficulty of localizing such a huge book, the game was never released outside Japan. Fans of Ni no Kuni had to settle for playing its enhanced version for the PlayStation 3, Ni no Kuni: Wrath of the White Witch, which was released worldwide in 2013. However, many fans still wanted to experience the original DS version of Ni no Kuni.

-

That's where the fan translation project comes in. A group of dedicated fans decided to take on the challenge of translating Ni no Kuni: Shikkoku no Madoushi into English (and Spanish) for everyone to enjoy. They spent years working on translating not only the game itself, but also the Magic Master book, which they made available as a PDF file. They finally released their English patch v1.0 on December 9th, 2018, exactly eight years after the game's original release date.

-

In this article, we will show you how to download Ni no Kuni NDS ROM English patched version and play it on your Nintendo DS or your DS emulator. We will also tell you how to unlock DLCs and gazette daily news that were originally available only through online connection. Follow these steps and you will be able to enjoy this amazing game in English.

-

Requirements

-

Before you start downloading Ni no Kuni NDS ROM English patched version, you will need a few things:

- -

How to Apply the Patch

-

Once you have all the required files, you can proceed to apply the patch to your ROM. Here are the steps:

-
    -
  1. Download the patch and the PDF from one of the links provided above.
  2. -
  3. Extract the zip file using a program such as WinRAR or 7-Zip.
  4. -
  5. Open the readme file and read it carefully. It contains important information about how to use the patch and how to play the game.
  6. -
  7. Use a patching tool such as xdelta or Lunar IPS to apply the patch to your clean Japanese ROM. Make sure you select the correct patch file for your ROM (either xdelta or ips) and that you have enough space on your device.
  8. -
  9. Copy the patched ROM to your Nintendo DS or your emulator. You can rename it if you want.
  10. -
-

How to Play the Game

-

Now that you have patched your ROM, you are ready to play Ni no Kuni: Shikkoku no Madoushi in English. Here are some tips:

- -

How to Unlock DLCs and Gazette Daily News

-

Ni no Kuni: Shikkoku no Madoushi had some extra content that was available only through online connection via Wi-Fi or Nintendo Wi-Fi Connection service. These included DLCs (downloadable content) such as new quests, items, monsters, and characters; and gazette daily news that provided updates on events happening in different regions of Ni no Kuni.

-

However, since these services are no longer available (and were never available outside Japan), you will need other methods to unlock them. There are three methods you can use:

-

ni no kuni ds english patch download
-ni no kuni nds english translation rom
-ni no kuni ds rom english version download
-ni no kuni nds english patched rom download
-ni no kuni ds english subbed rom download
-ni no kuni nds rom english free download
-ni no kuni ds rom english patched download
-ni no kuni nds english fan translation rom
-ni no kuni ds rom english patch 2023 download
-ni no kuni nds rom english full download
-ni no kuni ds english dub rom download
-ni no kuni nds english patch rom download link
-ni no kuni ds rom english patch mediafire download
-ni no kuni nds rom english patch mega download
-ni no kuni ds rom english patch google drive download
-ni no kuni nds english translation project rom
-ni no kuni ds rom english translation download
-ni no kuni nds english subbed rom download link
-ni no kuni ds rom english subbed mediafire download
-ni no kuni nds rom english subbed mega download
-ni no kuni ds rom english subbed google drive download
-ni no kuni nds english patched rom free download link
-ni no kuni ds rom english patched mediafire download link
-ni no kuni nds rom english patched mega download link
-ni no kuni ds rom english patched google drive download link
-ni no kuni nds english fan translation project rom
-ni no kuni ds rom english fan translation download
-ni no kuni nds english dub project rom
-ni no kuni ds rom english dub download
-ni no kuni nds english dub mediafire download
-ni no kuni ds rom english dub mega download
-ni no kuni nds english dub google drive download
-ni no kuni nds rom english patch 2023 free download link
-ni no kuni ds rom english patch 2023 mediafire download link
-ni no kuni nds rom english patch 2023 mega download link
-ni no kuni ds rom english patch 2023 google drive download link
-ni no kuni nds rom english full free download link
-ni no kuni ds rom english full mediafire download link
-ni no kuni nds rom english full mega download link
-ni no kuni ds rom english full google drive download link

-
QuestionAnswer
Is HD online player safe and legal?Yes, HD online player is safe and legal. It does not contain any viruses or malware that can harm your device. It also does not host any pirated or illegal content on its servers. It only provides links to videos that are already available on other websites.
How much does HD online player cost?HD online player is completely free to use. You do not need to pay any subscription fees or hidden charges to use it. However, you may see some ads on HD online player that help support its development and maintenance.
Can I watch other movies besides Life Is Beautiful! with HD online player?Yes, you can watch other movies besides Life Is Beautiful! with HD online player. HD online player has a huge collection of movies from various genres and countries. You can find movies from Hollywood, Bollywood, Kollywood, Tollywood, etc. You can also find movies from different languages, such as English, Hindi, Tamil, Telugu, Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Okay, I will continue writing the FAQs. - Can I watch other movies besides Life Is Beautiful! with HD online player? - Yes, you can watch other movies besides Life Is Beautiful! with HD online player. HD online player has a huge collection of movies from various genres and countries. You can find movies from Hollywood, Bollywood, Kollywood, Tollywood, etc. You can also find movies from different languages, such as English, Hindi, Tamil, Telugu, Malayalam, Kannada, Bengali, Marathi, Punjabi, Urdu, etc. - How can I contact the support team of HD online player? - If you have any issues or queries regarding HD online player, you can contact the support team of HD online player by sending an email to hd-online-player@gmail.com. You can also visit the official website of HD online player and fill out the contact form. The support team will respond to you as soon as possible. - How can I share my feedback or suggestions for HD online player? - If you have any feedback or suggestions for HD online player, you can share them by leaving a comment on the official website of HD online player or on its social media pages. You can also rate and review HD online player on the app store or the play store. Your feedback and suggestions are valuable and appreciated. - How can I update HD online player to the latest version? - If you want to update HD online player to the latest version, you can do so by following these steps: - For Windows users: - Open HD online player on your device and click on the menu icon on the top right corner. - Click on Check for Updates and wait for a few seconds. - If there is a new version available, click on Download and Install and follow the instructions on the screen. - If there is no new version available, click on OK and enjoy using HD online player. - For Mac users: - Open HD online player on your device and click on the menu icon on the top left corner. - Click on Check for Updates and wait for a few seconds. - If there is a new version available, click on Download and Install and follow the instructions on the screen. - If there is no new version available, click on OK and enjoy using HD online player. - For Android users: - Open HD online player on your device and tap on the menu icon on the top left corner. - Tap on Settings and then tap on About. - Tap on Check for Updates and wait for a few seconds. - If there is a new version available, tap on Download and Install and follow the instructions on the screen. - If there is no new version available, tap on OK and enjoy using HD online player. - For iOS users: - Open HD online player on your device and tap on the menu icon on the bottom right corner. - Tap on Settings and then tap on About. - Tap on Check for Updates and wait for a few seconds. - If there is a new version available, tap on Download and Install and follow the instructions on the screen. - If there is no new version available, tap on OK and enjoy using HD online player. - What are some alternatives to HD online player? - If you are looking for some alternatives to HD online player, you can try these software: - VLC Media Player: This is a popular and versatile media player that can play almost any video or audio format. You can also use it to stream or download videos from various websites. It is free and open-source. - MX Player: This is a powerful and user-friendly video player that can play high-quality videos with advanced features. You can also use it to stream or download videos from various websites. It has a free version with ads and a paid version without ads. - KMPlayer: This is a lightweight and fast video player that can play various video formats with high-quality output. You can also use it to stream or download videos from various websites. It has a free version with ads and a paid version without ads.

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Azov Films Moviebizz Vladik S Fun Dvd Azov Films.md b/spaces/1gistliPinn/ChatGPT4/Examples/Azov Films Moviebizz Vladik S Fun Dvd Azov Films.md deleted file mode 100644 index 7c69ab7dc5b3cf921bc33953c22f050231a56648..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Azov Films Moviebizz Vladik S Fun Dvd Azov Films.md +++ /dev/null @@ -1,6 +0,0 @@ -

azov films moviebizz vladik s fun dvd azov films


Download ✵✵✵ https://imgfil.com/2uxYPj



- - aaccfb2cb3
-
-
-

diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Among Us APK from Uptodown and Survive the Space Mission.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Among Us APK from Uptodown and Survive the Space Mission.md deleted file mode 100644 index 9639a86f2e88856b7db29a1865e1c2267477db2d..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Among Us APK from Uptodown and Survive the Space Mission.md +++ /dev/null @@ -1,135 +0,0 @@ - -

How to Download Among Us APK from Uptodown

-

Among Us is one of the most popular online multiplayer games of 2020 and 2021. It is a fun and thrilling game where you have to work together with your crewmates to complete tasks and find the impostors before they kill you. But what if you want to play the game on your Android device and you can't find it on the Google Play Store? Don't worry, there is a way to download and install the game using an APK file from Uptodown, a trusted and safe platform for downloading apps and games. In this article, we will show you how to do that in a few simple steps.

-

What is Among Us?

-

Among Us is a game developed and published by Innersloth, an American game studio. It was released in 2018 for Android, iOS, and Windows devices, but it gained a massive surge of popularity in 2020 thanks to many Twitch streamers and YouTubers playing it. The game has also received favorable reviews from critics and players for its fun and entertaining gameplay.

-

download among us apk uptodown


Download File · https://urlin.us/2uSWaI



-

Features of Among Us

-

Among Us has many features that make it an exciting and addictive game. Some of them are:

-
    -
  • Customization: You can pick your color, hat, visor, skin, and pet to personalize your character.
  • -
  • Lots of game options: You can add more impostors, more tasks, different roles, and so much more to customize your game experience.
  • -
  • Different modes to choose from: You can play in classic mode or hide n seek mode, depending on your preference.
  • -
  • Four different maps to play in: You can choose between The Skeld, MIRA HQ, Polus, or The Airship, each with its own layout and challenges.
  • -
  • Quickly find a game online from the host list: You can join a public game or create a private game with your friends using a code.
  • -
  • In-game text chat: You can communicate with other players during meetings or as ghosts.
  • -
  • Rich Discord integration: You can use voice chat with your friends using Discord while playing the game.
  • -
  • Cross-platform play: You can play with other players on different devices, such as PC, console, Android, and iOS.
  • -
-

How to play Among Us

-

The gameplay of Among Us is simple but engaging. Each round, you are assigned one of two roles: crewmate or impostor. The number of impostors can vary from one to three depending on the game settings. The crewmates have to work together to complete tasks around the map while avoiding being killed by the impostors. The impostors have to kill the crewmates or sabotage critical systems without being caught. The game ends when either all crewmates are dead, all tasks are completed, or all impostors are ejected.

-

The game has two phases: free roam and meeting. During free roam, you can move around the map and interact with objects. If you are a crewmate, you can do tasks that are assigned to you. If you are an impostor, you can fake tasks, vent to move quickly, or kill crewmates when no one is watching. You can also use sabotages to create chaos and distract the crewmates. Some sabotages require immediate attention, such as reactor meltdown or oxygen depletion. If they are not fixed in time, the impostors win.

-

If a dead body is found or an emergency button is pressed, a meeting is called. During a meeting, all players can discuss and vote on who they think is the impostor. You can use the text chat or voice chat to communicate with other players. You can also use evidence, such as visual tasks, admin map, or vitals, to support your claims or accusations. The player with the most votes is ejected from the game. If there is a tie, no one is ejected. The game continues until the next meeting or the end condition is met.

-

What is Uptodown?

-

Uptodown is a website and app store that allows you to download and install apps and games for various platforms, such as Android, Windows, Mac, Linux, iOS, and more. It was founded in 2002 and has over 4 billion downloads and 130 million monthly users worldwide. Uptodown is a safe and reliable source for downloading apps and games that are not available on the official stores or are region-locked.

-

Benefits of using Uptodown

-

Some of the benefits of using Uptodown are:

-
    -
  • No registration required: You can download and install apps and games without creating an account or logging in.
  • -
  • No geo-restrictions: You can access apps and games that are not available in your country or region.
  • -
  • Multiple languages supported: You can browse the website and app store in over 15 languages, including English, Spanish, French, German, Arabic, Chinese, and more.
  • -
  • Virus-free and verified: All the apps and games are scanned and checked by Uptodown's team of experts to ensure they are free of malware and viruses.
  • -
  • Version history: You can download and install previous versions of apps and games if you prefer them or if the latest version is not compatible with your device.
  • -
  • Automatic updates: You can enable automatic updates for your apps and games to keep them up to date.
  • -
-

How to use Uptodown

-

To use Uptodown, you need to follow these steps:

-
    -
  1. Visit the Uptodown website or download the Uptodown app on your device.
  2. -
  3. Search for the app or game you want to download using the search bar or browse by categories.
  4. -
  5. Select the app or game you want to download and click on the download button.
  6. -
  7. Wait for the download to finish and open the file to install it on your device.
  8. -
  9. Enjoy your app or game!
  10. -
-

How to download and install Among Us APK from Uptodown

-

Now that you know what Among Us and Uptodown are, let's see how you can download and install Among Us APK from Uptodown on your Android device. It's very easy and only takes a few minutes. Here are the steps you need to follow:

-

How to download among us apk from uptodown
-Download among us apk uptodown latest version
-Download among us apk uptodown for android
-Download among us apk uptodown mod menu
-Download among us apk uptodown hack
-Download among us apk uptodown free
-Download among us apk uptodown online
-Download among us apk uptodown pc
-Download among us apk uptodown ios
-Download among us apk uptodown 2023
-Download among us apk uptodown update
-Download among us apk uptodown unlocked
-Download among us apk uptodown no ads
-Download among us apk uptodown offline
-Download among us apk uptodown safe
-Download among us apk uptodown pro
-Download among us apk uptodown premium
-Download among us apk uptodown cracked
-Download among us apk uptodown full
-Download among us apk uptodown beta
-Download among us apk uptodown new
-Download among us apk uptodown old
-Download among us apk uptodown original
-Download among us apk uptodown review
-Download among us apk uptodown tutorial
-Download among us apk uptodown guide
-Download among us apk uptodown tips
-Download among us apk uptodown tricks
-Download among us apk uptodown cheats
-Download among us apk uptodown best
-Download among us apk uptodown alternative
-Download among us apk uptodown mirror
-Download among us apk uptodown link
-Download among us apk uptodown file
-Download among us apk uptodown site
-Download among us apk uptodown app
-Download among us apk uptodown game
-Download among us apk uptodown fun
-Download among us apk uptodown action
-Download among us apk uptodown intrigue
-Download among us apk uptodown crewmate
-Download among us apk uptodown imposter
-Download among us apk uptodown spaceship
-Download among us apk uptodown units
-Download among us apk uptodown tasks
-Download among us apk uptodown meetings
-Download among us apk uptodown votes
-Download among us apk uptodown chat
-Download among us apk uptodown skins

-

Step 1: Enable unknown sources on your Android device

-

Before you can install an APK file from Uptodown, you need to enable unknown sources on your Android device. This will allow you to install apps and games from sources other than the Google Play Store. To do this, follow these steps:

-
    -
  1. Go to your device's settings and tap on security or privacy.
  2. -
  3. Find the option that says unknown sources or install unknown apps and toggle it on.
  4. -
  5. A warning message will pop up. Tap on OK to confirm.
  6. -
-

Step 2: Download the APK file from Uptodown

-

The next step is to download the APK file of Among Us from Uptodown. To do this, follow these steps:

-
    -
  1. Open your browser and go to https://among-us.en.uptodown.com/android.
  2. -
  3. Tap on the green download button at the top of the page.
  4. -
  5. A new page will open with a QR code. Scan it with your device's camera or tap on the link below it to start the download.
  6. -
  7. The APK file will be downloaded to your device's storage. You can check its progress in your notification bar or in your downloads folder.
  8. -
-

Step 3: Install the APK file on your device

-

The final step is to install the APK file of Among Us on your device. To do this, follow these steps:

-
    -
  1. Locate the APK file in your downloads folder or notification bar and tap on it.
  2. -
  3. A prompt will appear asking you if you want to install this application. Tap on install.
  4. -
  5. The installation process will begin. Wait for it to finish. It may take a few seconds or minutes depending on your device and internet speed.
  6. -
  7. Once the installation is complete, tap on open to launch the game or tap on done to exit the installer.
  8. -
-

Step 4: Launch and enjoy the game

-

Congratulations! You have successfully downloaded and installed Among Us APK from Uptodown on your Android device. Now you can launch the game and enjoy playing it with your friends or strangers online. You can also customize your settings, join or create a game, and chat with other players. Have fun and be careful of the impostors!

-

Conclusion

-

In this article, we have shown you how to download and install Among Us APK from Uptodown on your Android device. We hope you found this guide helpful and easy to follow. Uptodown is a great platform for downloading apps and games that are not available on the Google Play Store or are region-locked. Among Us is a fun and thrilling game that you can play with your friends or strangers online. It is a game of deception, teamwork, and betrayal that will keep you hooked for hours. If you have any questions or feedback, feel free to leave a comment below. Thank you for reading!

-

FAQs

-

Here are some frequently asked questions about downloading and installing Among Us APK from Uptodown:

-

Is it safe to download and install Among Us APK from Uptodown?

-

Yes, it is safe to download and install Among Us APK from Uptodown. Uptodown is a trusted and reliable source for downloading apps and games that are free of malware and viruses. However, you should always be careful when downloading and installing any APK file from unknown sources, as they may contain harmful or malicious code. Always scan the file with an antivirus app before opening it.

-

Is it legal to download and install Among Us APK from Uptodown?

-

Yes, it is legal to download and install Among Us APK from Uptodown. Uptodown does not host any pirated or cracked apps or games on its platform. All the apps and games are original and belong to their respective developers and publishers. However, you should always respect the intellectual property rights of the creators and follow their terms of service.

-

Will I get banned for playing Among Us with an APK file from Uptodown?

-

No, you will not get banned for playing Among Us with an APK file from Uptodown. The game does not have any anti-cheat system or mechanism that detects or prevents players from using APK files from other sources. However, you should always play fair and follow the rules of the game. Do not use any cheats, hacks, mods, or exploits that may give you an unfair advantage or ruin the game experience for others.

-

Can I play Among Us with players who have downloaded the game from the Google Play Store?

-

Yes, you can play Among Us with players who have downloaded the game from the Google Play Store. The game supports cross-platform play between different devices and platforms, such as PC, console, Android, and iOS. As long as you have the same version of the game as the other players, you can join or create a game with them using a code.

-

Can I update Among Us APK from Uptodown?

-

Yes, you can update Among Us APK from Uptodown. You can either enable automatic updates for your apps and games in the Uptodown app settings or manually check for updates on the website or app store. When a new version of the game is available, you can download and install it over the existing one without losing your data or progress.

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Cover Fire Offline Shooting Game for Free and Enjoy the Best Action Shooter on Mobile.md b/spaces/1phancelerku/anime-remove-background/Download Cover Fire Offline Shooting Game for Free and Enjoy the Best Action Shooter on Mobile.md deleted file mode 100644 index b8deda8708fe43836e4dacd9fd2db9cf686af3cc..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Cover Fire Offline Shooting Game for Free and Enjoy the Best Action Shooter on Mobile.md +++ /dev/null @@ -1,99 +0,0 @@ - -

Download Cover Fire: Offline Shooting Game - A Review

-

If you are looking for a fun and addictive shooting game that you can play on your mobile device, you might want to check out Cover Fire: Offline Shooting Game. This game is one of the best shooting games you’ll ever play on a mobile, now for free and offline. In this article, we will review Cover Fire and tell you what it is, what features it has, how to download it, and what are the pros and cons of playing it.

-

What is Cover Fire?

-

Cover Fire is a shooting game developed by Viva Games Studios and published by 1MB. It is available for Android and Windows devices. The game has a challenging story mode, where you join the resistance and command a squad of veterans through sieged cities, deserts, and fields taken by guerrillas. You have to defeat all kinds of enemies in the war with the biggest graphic, greatest arsenal, and the best offline gameplay.

-

download cover fire offline shooting game


Downloadhttps://jinyurl.com/2uNNID



-

Cover Fire also has an online mode, where you can compete and fight against other players or friends around the world with your best times ranked in online leaderboards. You can also participate in cool war events, such as Zombies Survival or Black Ops.

-

Features of Cover Fire

-

Shooting online and offline on mobile

-

Cover Fire allows you to play offline in a single-player campaign, where you can enjoy 12 new chapters in a thrilling story mode. You can also play online in a competitive sniper shooting battle and don't stop shooting in cool war events.

-

New shooting game and best sniper 3d shooting game

-

Cover Fire has a realistic 3D graphics and a variety of weapons to choose from. You can unlock unique army weapons and shoot cool guns, such as pistols, shotguns, snipers, bazookas, and more. You can also customize and upgrade your best guns skills to increase arsenal damage in the war zone.

-

Easy controls and low mobile requirements

-

Cover Fire has easy controls that bring you a fun and addictive combat. You can shoot to kill and save victims with simple gestures. The game also has low mobile requirements, so you don't need a wifi to play or download the game.

-

Online sniper tournaments and events

-

Cover Fire has an online mode where you can compete and fight against other players or friends around the world with your best times ranked in online leaderboards. You can also join the online sniper tournaments and show your skills as a shooter. Moreover, you can try the free zombie event, where you have to survive with a gun against zombies and save the survivors.

-

How to download Cover Fire?

-

Cover Fire is available for Android and Windows devices. You can download it from different sources, depending on your device and preference.

-

How to download cover fire offline shooting game for free
-Cover fire offline shooting game apk download
-Cover fire offline shooting game mod apk unlimited money
-Cover fire offline shooting game for pc download
-Best offline shooting game cover fire download
-Download cover fire offline shooting game latest version
-Cover fire offline shooting game hack download
-Cover fire offline shooting game cheats and tips
-Download cover fire offline shooting game for android
-Cover fire offline shooting game review and rating
-Download cover fire offline shooting game for ios
-Cover fire offline shooting game gameplay and features
-Download cover fire offline shooting game for windows 10
-Cover fire offline shooting game online multiplayer mode
-Download cover fire offline shooting game for mac
-Cover fire offline shooting game weapons and upgrades
-Download cover fire offline shooting game for laptop
-Cover fire offline shooting game missions and challenges
-Download cover fire offline shooting game for chromebook
-Cover fire offline shooting game zombies survival mode
-Download cover fire offline shooting game for linux
-Cover fire offline shooting game graphics and sound effects
-Download cover fire offline shooting game for kindle fire
-Cover fire offline shooting game steam version download
-Download cover fire offline shooting game for bluestacks
-Cover fire offline shooting game best sniper rifle
-Download cover fire offline shooting game for nox player
-Cover fire offline shooting game black ops mode
-Download cover fire offline shooting game for memu play
-Cover fire offline shooting game hero shooter mode
-Download cover fire offline shooting game for ldplayer
-Cover fire offline shooting game on-rails shooter mode
-Download cover fire offline shooting game for gameloop
-Cover fire offline shooting game pve shooter mode
-Download cover fire offline shooting game for smartgaga
-Cover fire offline shooting game realistic 3d graphics download
-Download cover fire offline shooting game for genymotion
-Cover fire offline shooting game easy controls download
-Download cover fire offline shooting game for koplayer
-Cover fire offline shooting game fun and addictive gameplay download

-

Download from Google Play Store

-

If you have an Android device, you can download Cover Fire from the Google Play Store for free. Just search for "Cover Fire: Offline Shooting" on the store or click on this link. You will need about 400 MB of free space on your device to install the game.

-

Download from Steam

-

If you have a Windows device, you can download Cover Fire from Steam for free. Just search for "Cover Fire: Offline Shooting Game" on Steam or click on this link. You will need about 1 GB of free space on your device to install the game.

-

Download from APKCombo

-

If you want to download Cover Fire from an alternative source, you can use APKCombo. This is a website that provides APK files for Android apps. You can download Cover Fire APK from APKCombo for free. Just search for "Cover Fire: Offline Shooting Game" on APKCombo or click on this link. You will need to enable unknown sources on your device settings to install the APK file.

-

Pros and cons of Cover Fire

-

Cover Fire is a great shooting game that offers a lot of fun and action. However, like any other game, it also has some pros and cons that you should consider before playing it. Here are some of them:

-

Pros

-
    -
  • Cover Fire has a thrilling and immersive story mode that will keep you hooked for hours.
  • -
  • Cover Fire has a realistic and stunning 3D graphics that will make you feel like you are in a real war zone.
  • -
  • Cover Fire has a variety of weapons and customization options that will let you create your own style and strategy.
  • -
  • Cover Fire has an online mode where you can challenge and compete with other players around the world and join cool events.
  • -
  • Cover Fire has easy controls and low mobile requirements that make it accessible and enjoyable for everyone.
  • -
-

Cons

-
    -
  • Cover Fire can be repetitive and boring after a while, especially if you play the same missions over and over again.
  • -
  • Cover Fire can be frustrating and difficult at times, especially if you face enemies with higher levels and better weapons.
  • -
  • Cover Fire can be annoying and intrusive with its ads and pop-ups that can interrupt your gameplay.
  • -
  • Cover Fire can be expensive and unfair with its in-app purchases and premium features that can give you an advantage over other players.
  • -
  • Cover Fire can be buggy and glitchy at times, especially if you have a slow or unstable internet connection.
  • -
-

Conclusion

-

Cover Fire is a shooting game that you can play on your mobile device, either online or offline. It has a captivating story mode, a competitive online mode, a realistic 3D graphics, a variety of weapons, and easy controls. However, it also has some drawbacks, such as being repetitive, frustrating, annoying, expensive, and buggy. Overall, Cover Fire is a game that you should try if you love shooting games and want to have some fun and action on your mobile device. You can download it for free from different sources, depending on your device and preference.

-

FAQs

-

Here are some frequently asked questions about Cover Fire:

-
    -
  1. How do I play Cover Fire offline?
  2. -

    To play Cover Fire offline, you need to download the game first from your preferred source. Then, you need to open the game and select the offline mode. You can choose from different missions and chapters in the story mode. You don't need an internet connection to play offline, but you won't be able to access the online features or update the game.

    -
  3. How do I get more coins and gold in Cover Fire?
  4. -

    To get more coins and gold in Cover Fire, you need to complete missions and challenges in the game. You can also watch ads or videos to earn some extra rewards. Alternatively, you can buy coins and gold with real money through in-app purchases. However, this is not recommended as it can be costly and unfair.

    -
  5. How do I upgrade my weapons in Cover Fire?
  6. -

    To upgrade your weapons in Cover Fire, you need to go to the arsenal menu and select the weapon you want to upgrade. You can upgrade different aspects of your weapon, such as damage, accuracy, reload speed, magazine size, etc. You will need coins or gold to upgrade your weapons, depending on the level of upgrade.

    -
  7. How do I change my character in Cover Fire?
  8. -

    To change your character in Cover Fire, you need to go to the squad menu and select the character you want to use. You can choose from different characters with different skills and abilities. You can also customize your character's appearance with different outfits and accessories. You will need coins or gold to unlock new characters or items.

    -
  9. How do I join the online mode in Cover Fire?
  10. -

    To join the online mode in Cover Fire, you need to have an internet connection and an account in the game. You can create an account with your email or Facebook login. Then, you need to go to the online mode menu and select the option you want to play. You can choose from sniper tournaments or events. You will be matched with other players based on your rank and skill level.

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/2023Liu2023/bingo/src/lib/hooks/use-enter-submit.tsx b/spaces/2023Liu2023/bingo/src/lib/hooks/use-enter-submit.tsx deleted file mode 100644 index d66b2d3253baff164235d4ca791aae6d84721835..0000000000000000000000000000000000000000 --- a/spaces/2023Liu2023/bingo/src/lib/hooks/use-enter-submit.tsx +++ /dev/null @@ -1,23 +0,0 @@ -import { useRef, type RefObject } from 'react' - -export function useEnterSubmit(): { - formRef: RefObject - onKeyDown: (event: React.KeyboardEvent) => void -} { - const formRef = useRef(null) - - const handleKeyDown = ( - event: React.KeyboardEvent - ): void => { - if ( - event.key === 'Enter' && - !event.shiftKey && - !event.nativeEvent.isComposing - ) { - formRef.current?.requestSubmit() - event.preventDefault() - } - } - - return { formRef, onKeyDown: handleKeyDown } -} diff --git a/spaces/232labs/VToonify/vtoonify/model/encoder/criteria/id_loss.py b/spaces/232labs/VToonify/vtoonify/model/encoder/criteria/id_loss.py deleted file mode 100644 index 37c71d3047be01ae7b301e0a96f14e2df88a143f..0000000000000000000000000000000000000000 --- a/spaces/232labs/VToonify/vtoonify/model/encoder/criteria/id_loss.py +++ /dev/null @@ -1,33 +0,0 @@ -import torch -from torch import nn -from model.encoder.encoders.model_irse import Backbone - - -class IDLoss(nn.Module): - def __init__(self, model_paths): - super(IDLoss, self).__init__() - print('Loading ResNet ArcFace') - self.facenet = Backbone(input_size=112, num_layers=50, drop_ratio=0.6, mode='ir_se') - self.facenet.load_state_dict(torch.load(model_paths)) - self.face_pool = torch.nn.AdaptiveAvgPool2d((112, 112)) - self.facenet.eval() - - def extract_feats(self, x): - x = x[:, :, 35:223, 32:220] # Crop interesting region - x = self.face_pool(x) - x_feats = self.facenet(x) - return x_feats - - def forward(self, y_hat, y): - n_samples = y_hat.shape[0] - y_feats = self.extract_feats(y) # Otherwise use the feature from there - y_hat_feats = self.extract_feats(y_hat) - y_feats = y_feats.detach() - loss = 0 - count = 0 - for i in range(n_samples): - diff_target = y_hat_feats[i].dot(y_feats[i]) - loss += 1 - diff_target - count += 1 - - return loss / count \ No newline at end of file diff --git a/spaces/AI-Zero-to-Hero/06-SL-AI-Image-Music-Video-UI-UX-URL/README.md b/spaces/AI-Zero-to-Hero/06-SL-AI-Image-Music-Video-UI-UX-URL/README.md deleted file mode 100644 index 5c8263a0d4cf200bf09c7a07d3244a40e57d018b..0000000000000000000000000000000000000000 --- a/spaces/AI-Zero-to-Hero/06-SL-AI-Image-Music-Video-UI-UX-URL/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: 06 SL AI Image Music Video UI UX URL -emoji: 📊 -colorFrom: pink -colorTo: red -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/AIConsultant/MusicGen/audiocraft/data/sound_dataset.py b/spaces/AIConsultant/MusicGen/audiocraft/data/sound_dataset.py deleted file mode 100644 index 8b88cbe8016b4bd28c2de749177c9af29f7755fc..0000000000000000000000000000000000000000 --- a/spaces/AIConsultant/MusicGen/audiocraft/data/sound_dataset.py +++ /dev/null @@ -1,330 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -"""Dataset of audio with a simple description. -""" - -from dataclasses import dataclass, fields, replace -import json -from pathlib import Path -import random -import typing as tp - -import numpy as np -import torch - -from .info_audio_dataset import ( - InfoAudioDataset, - get_keyword_or_keyword_list -) -from ..modules.conditioners import ( - ConditioningAttributes, - SegmentWithAttributes, - WavCondition, -) - - -EPS = torch.finfo(torch.float32).eps -TARGET_LEVEL_LOWER = -35 -TARGET_LEVEL_UPPER = -15 - - -@dataclass -class SoundInfo(SegmentWithAttributes): - """Segment info augmented with Sound metadata. - """ - description: tp.Optional[str] = None - self_wav: tp.Optional[torch.Tensor] = None - - @property - def has_sound_meta(self) -> bool: - return self.description is not None - - def to_condition_attributes(self) -> ConditioningAttributes: - out = ConditioningAttributes() - - for _field in fields(self): - key, value = _field.name, getattr(self, _field.name) - if key == 'self_wav': - out.wav[key] = value - else: - out.text[key] = value - return out - - @staticmethod - def attribute_getter(attribute): - if attribute == 'description': - preprocess_func = get_keyword_or_keyword_list - else: - preprocess_func = None - return preprocess_func - - @classmethod - def from_dict(cls, dictionary: dict, fields_required: bool = False): - _dictionary: tp.Dict[str, tp.Any] = {} - - # allow a subset of attributes to not be loaded from the dictionary - # these attributes may be populated later - post_init_attributes = ['self_wav'] - - for _field in fields(cls): - if _field.name in post_init_attributes: - continue - elif _field.name not in dictionary: - if fields_required: - raise KeyError(f"Unexpected missing key: {_field.name}") - else: - preprocess_func: tp.Optional[tp.Callable] = cls.attribute_getter(_field.name) - value = dictionary[_field.name] - if preprocess_func: - value = preprocess_func(value) - _dictionary[_field.name] = value - return cls(**_dictionary) - - -class SoundDataset(InfoAudioDataset): - """Sound audio dataset: Audio dataset with environmental sound-specific metadata. - - Args: - info_fields_required (bool): Whether all the mandatory metadata fields should be in the loaded metadata. - external_metadata_source (tp.Optional[str]): Folder containing JSON metadata for the corresponding dataset. - The metadata files contained in this folder are expected to match the stem of the audio file with - a json extension. - aug_p (float): Probability of performing audio mixing augmentation on the batch. - mix_p (float): Proportion of batch items that are mixed together when applying audio mixing augmentation. - mix_snr_low (int): Lowerbound for SNR value sampled for mixing augmentation. - mix_snr_high (int): Upperbound for SNR value sampled for mixing augmentation. - mix_min_overlap (float): Minimum overlap between audio files when performing mixing augmentation. - kwargs: Additional arguments for AudioDataset. - - See `audiocraft.data.info_audio_dataset.InfoAudioDataset` for full initialization arguments. - """ - def __init__( - self, - *args, - info_fields_required: bool = True, - external_metadata_source: tp.Optional[str] = None, - aug_p: float = 0., - mix_p: float = 0., - mix_snr_low: int = -5, - mix_snr_high: int = 5, - mix_min_overlap: float = 0.5, - **kwargs - ): - kwargs['return_info'] = True # We require the info for each song of the dataset. - super().__init__(*args, **kwargs) - self.info_fields_required = info_fields_required - self.external_metadata_source = external_metadata_source - self.aug_p = aug_p - self.mix_p = mix_p - if self.aug_p > 0: - assert self.mix_p > 0, "Expecting some mixing proportion mix_p if aug_p > 0" - assert self.channels == 1, "SoundDataset with audio mixing considers only monophonic audio" - self.mix_snr_low = mix_snr_low - self.mix_snr_high = mix_snr_high - self.mix_min_overlap = mix_min_overlap - - def _get_info_path(self, path: tp.Union[str, Path]) -> Path: - """Get path of JSON with metadata (description, etc.). - If there exists a JSON with the same name as 'path.name', then it will be used. - Else, such JSON will be searched for in an external json source folder if it exists. - """ - info_path = Path(path).with_suffix('.json') - if Path(info_path).exists(): - return info_path - elif self.external_metadata_source and (Path(self.external_metadata_source) / info_path.name).exists(): - return Path(self.external_metadata_source) / info_path.name - else: - raise Exception(f"Unable to find a metadata JSON for path: {path}") - - def __getitem__(self, index): - wav, info = super().__getitem__(index) - info_data = info.to_dict() - info_path = self._get_info_path(info.meta.path) - if Path(info_path).exists(): - with open(info_path, 'r') as json_file: - sound_data = json.load(json_file) - sound_data.update(info_data) - sound_info = SoundInfo.from_dict(sound_data, fields_required=self.info_fields_required) - # if there are multiple descriptions, sample one randomly - if isinstance(sound_info.description, list): - sound_info.description = random.choice(sound_info.description) - else: - sound_info = SoundInfo.from_dict(info_data, fields_required=False) - - sound_info.self_wav = WavCondition( - wav=wav[None], length=torch.tensor([info.n_frames]), - sample_rate=[sound_info.sample_rate], path=[info.meta.path], seek_time=[info.seek_time]) - - return wav, sound_info - - def collater(self, samples): - # when training, audio mixing is performed in the collate function - wav, sound_info = super().collater(samples) # SoundDataset always returns infos - if self.aug_p > 0: - wav, sound_info = mix_samples(wav, sound_info, self.aug_p, self.mix_p, - snr_low=self.mix_snr_low, snr_high=self.mix_snr_high, - min_overlap=self.mix_min_overlap) - return wav, sound_info - - -def rms_f(x: torch.Tensor) -> torch.Tensor: - return (x ** 2).mean(1).pow(0.5) - - -def normalize(audio: torch.Tensor, target_level: int = -25) -> torch.Tensor: - """Normalize the signal to the target level.""" - rms = rms_f(audio) - scalar = 10 ** (target_level / 20) / (rms + EPS) - audio = audio * scalar.unsqueeze(1) - return audio - - -def is_clipped(audio: torch.Tensor, clipping_threshold: float = 0.99) -> torch.Tensor: - return (abs(audio) > clipping_threshold).any(1) - - -def mix_pair(src: torch.Tensor, dst: torch.Tensor, min_overlap: float) -> torch.Tensor: - start = random.randint(0, int(src.shape[1] * (1 - min_overlap))) - remainder = src.shape[1] - start - if dst.shape[1] > remainder: - src[:, start:] = src[:, start:] + dst[:, :remainder] - else: - src[:, start:start+dst.shape[1]] = src[:, start:start+dst.shape[1]] + dst - return src - - -def snr_mixer(clean: torch.Tensor, noise: torch.Tensor, snr: int, min_overlap: float, - target_level: int = -25, clipping_threshold: float = 0.99) -> torch.Tensor: - """Function to mix clean speech and noise at various SNR levels. - - Args: - clean (torch.Tensor): Clean audio source to mix, of shape [B, T]. - noise (torch.Tensor): Noise audio source to mix, of shape [B, T]. - snr (int): SNR level when mixing. - min_overlap (float): Minimum overlap between the two mixed sources. - target_level (int): Gain level in dB. - clipping_threshold (float): Threshold for clipping the audio. - Returns: - torch.Tensor: The mixed audio, of shape [B, T]. - """ - if clean.shape[1] > noise.shape[1]: - noise = torch.nn.functional.pad(noise, (0, clean.shape[1] - noise.shape[1])) - else: - noise = noise[:, :clean.shape[1]] - - # normalizing to -25 dB FS - clean = clean / (clean.max(1)[0].abs().unsqueeze(1) + EPS) - clean = normalize(clean, target_level) - rmsclean = rms_f(clean) - - noise = noise / (noise.max(1)[0].abs().unsqueeze(1) + EPS) - noise = normalize(noise, target_level) - rmsnoise = rms_f(noise) - - # set the noise level for a given SNR - noisescalar = (rmsclean / (10 ** (snr / 20)) / (rmsnoise + EPS)).unsqueeze(1) - noisenewlevel = noise * noisescalar - - # mix noise and clean speech - noisyspeech = mix_pair(clean, noisenewlevel, min_overlap) - - # randomly select RMS value between -15 dBFS and -35 dBFS and normalize noisyspeech with that value - # there is a chance of clipping that might happen with very less probability, which is not a major issue. - noisy_rms_level = np.random.randint(TARGET_LEVEL_LOWER, TARGET_LEVEL_UPPER) - rmsnoisy = rms_f(noisyspeech) - scalarnoisy = (10 ** (noisy_rms_level / 20) / (rmsnoisy + EPS)).unsqueeze(1) - noisyspeech = noisyspeech * scalarnoisy - clean = clean * scalarnoisy - noisenewlevel = noisenewlevel * scalarnoisy - - # final check to see if there are any amplitudes exceeding +/- 1. If so, normalize all the signals accordingly - clipped = is_clipped(noisyspeech) - if clipped.any(): - noisyspeech_maxamplevel = noisyspeech[clipped].max(1)[0].abs().unsqueeze(1) / (clipping_threshold - EPS) - noisyspeech[clipped] = noisyspeech[clipped] / noisyspeech_maxamplevel - - return noisyspeech - - -def snr_mix(src: torch.Tensor, dst: torch.Tensor, snr_low: int, snr_high: int, min_overlap: float): - if snr_low == snr_high: - snr = snr_low - else: - snr = np.random.randint(snr_low, snr_high) - mix = snr_mixer(src, dst, snr, min_overlap) - return mix - - -def mix_text(src_text: str, dst_text: str): - """Mix text from different sources by concatenating them.""" - if src_text == dst_text: - return src_text - return src_text + " " + dst_text - - -def mix_samples(wavs: torch.Tensor, infos: tp.List[SoundInfo], aug_p: float, mix_p: float, - snr_low: int, snr_high: int, min_overlap: float): - """Mix samples within a batch, summing the waveforms and concatenating the text infos. - - Args: - wavs (torch.Tensor): Audio tensors of shape [B, C, T]. - infos (list[SoundInfo]): List of SoundInfo items corresponding to the audio. - aug_p (float): Augmentation probability. - mix_p (float): Proportion of items in the batch to mix (and merge) together. - snr_low (int): Lowerbound for sampling SNR. - snr_high (int): Upperbound for sampling SNR. - min_overlap (float): Minimum overlap between mixed samples. - Returns: - tuple[torch.Tensor, list[SoundInfo]]: A tuple containing the mixed wavs - and mixed SoundInfo for the given batch. - """ - # no mixing to perform within the batch - if mix_p == 0: - return wavs, infos - - if random.uniform(0, 1) < aug_p: - # perform all augmentations on waveforms as [B, T] - # randomly picking pairs of audio to mix - assert wavs.size(1) == 1, f"Mix samples requires monophonic audio but C={wavs.size(1)}" - wavs = wavs.mean(dim=1, keepdim=False) - B, T = wavs.shape - k = int(mix_p * B) - mixed_sources_idx = torch.randperm(B)[:k] - mixed_targets_idx = torch.randperm(B)[:k] - aug_wavs = snr_mix( - wavs[mixed_sources_idx], - wavs[mixed_targets_idx], - snr_low, - snr_high, - min_overlap, - ) - # mixing textual descriptions in metadata - descriptions = [info.description for info in infos] - aug_infos = [] - for i, j in zip(mixed_sources_idx, mixed_targets_idx): - text = mix_text(descriptions[i], descriptions[j]) - m = replace(infos[i]) - m.description = text - aug_infos.append(m) - - # back to [B, C, T] - aug_wavs = aug_wavs.unsqueeze(1) - assert aug_wavs.shape[0] > 0, "Samples mixing returned empty batch." - assert aug_wavs.dim() == 3, f"Returned wav should be [B, C, T] but dim = {aug_wavs.dim()}" - assert aug_wavs.shape[0] == len(aug_infos), "Mismatch between number of wavs and infos in the batch" - - return aug_wavs, aug_infos # [B, C, T] - else: - # randomly pick samples in the batch to match - # the batch size when performing audio mixing - B, C, T = wavs.shape - k = int(mix_p * B) - wav_idx = torch.randperm(B)[:k] - wavs = wavs[wav_idx] - infos = [infos[i] for i in wav_idx] - assert wavs.shape[0] == len(infos), "Mismatch between number of wavs and infos in the batch" - - return wavs, infos # [B, C, T] diff --git a/spaces/AIGC-Audio/AudioGPT/text_to_speech/data_gen/tts/runs/train_mfa_align.py b/spaces/AIGC-Audio/AudioGPT/text_to_speech/data_gen/tts/runs/train_mfa_align.py deleted file mode 100644 index f0b845e7388b5656c4e0e2c5efddad7f1aa28e6a..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/text_to_speech/data_gen/tts/runs/train_mfa_align.py +++ /dev/null @@ -1,46 +0,0 @@ -import utils.commons.single_thread_env # NOQA -import glob -import subprocess -from textgrid import TextGrid -import os -from text_to_speech.utils.commons.hparams import hparams, set_hparams - - -def train_mfa_align(mfa_outputs="mfa_outputs", - mfa_inputs="mfa_inputs", - model_name=None, pretrain_model_name=None, - mfa_cmd='train'): - CORPUS = hparams['processed_data_dir'].split("/")[-1] - NUM_JOB = int(os.getenv('N_PROC', os.cpu_count())) - env_vars = [f'CORPUS={CORPUS}', f'NUM_JOB={NUM_JOB}'] - if mfa_outputs is not None: - env_vars.append(f'MFA_OUTPUTS={mfa_outputs}') - if mfa_inputs is not None: - env_vars.append(f'MFA_INPUTS={mfa_inputs}') - if model_name is not None: - env_vars.append(f'MODEL_NAME={model_name}') - if pretrain_model_name is not None: - env_vars.append(f'PRETRAIN_MODEL_NAME={pretrain_model_name}') - if mfa_cmd is not None: - env_vars.append(f'MFA_CMD={mfa_cmd}') - env_str = ' '.join(env_vars) - print(f"| Run MFA for {CORPUS}. Env vars: {env_str}") - subprocess.check_call(f'{env_str} bash mfa_usr/run_mfa_train_align.sh', shell=True) - mfa_offset = hparams['preprocess_args']['mfa_offset'] - if mfa_offset > 0: - for tg_fn in glob.glob(f'{hparams["processed_data_dir"]}/{mfa_outputs}/*.TextGrid'): - tg = TextGrid.fromFile(tg_fn) - max_time = tg.maxTime - for tier in tg.tiers: - for interval in tier.intervals: - interval.maxTime = min(interval.maxTime + mfa_offset, max_time) - interval.minTime = min(interval.minTime + mfa_offset, max_time) - tier.intervals[0].minTime = 0 - tier.maxTime = min(tier.maxTime + mfa_offset, max_time) - tg.write(tg_fn) - TextGrid.fromFile(tg_fn) - - -if __name__ == '__main__': - set_hparams(print_hparams=False) - train_mfa_align() diff --git a/spaces/AIGText/GlyphControl/ldm/modules/midas/midas/transforms.py b/spaces/AIGText/GlyphControl/ldm/modules/midas/midas/transforms.py deleted file mode 100644 index 350cbc11662633ad7f8968eb10be2e7de6e384e9..0000000000000000000000000000000000000000 --- a/spaces/AIGText/GlyphControl/ldm/modules/midas/midas/transforms.py +++ /dev/null @@ -1,234 +0,0 @@ -import numpy as np -import cv2 -import math - - -def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA): - """Rezise the sample to ensure the given size. Keeps aspect ratio. - - Args: - sample (dict): sample - size (tuple): image size - - Returns: - tuple: new size - """ - shape = list(sample["disparity"].shape) - - if shape[0] >= size[0] and shape[1] >= size[1]: - return sample - - scale = [0, 0] - scale[0] = size[0] / shape[0] - scale[1] = size[1] / shape[1] - - scale = max(scale) - - shape[0] = math.ceil(scale * shape[0]) - shape[1] = math.ceil(scale * shape[1]) - - # resize - sample["image"] = cv2.resize( - sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method - ) - - sample["disparity"] = cv2.resize( - sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST - ) - sample["mask"] = cv2.resize( - sample["mask"].astype(np.float32), - tuple(shape[::-1]), - interpolation=cv2.INTER_NEAREST, - ) - sample["mask"] = sample["mask"].astype(bool) - - return tuple(shape) - - -class Resize(object): - """Resize sample to given size (width, height). - """ - - def __init__( - self, - width, - height, - resize_target=True, - keep_aspect_ratio=False, - ensure_multiple_of=1, - resize_method="lower_bound", - image_interpolation_method=cv2.INTER_AREA, - ): - """Init. - - Args: - width (int): desired output width - height (int): desired output height - resize_target (bool, optional): - True: Resize the full sample (image, mask, target). - False: Resize image only. - Defaults to True. - keep_aspect_ratio (bool, optional): - True: Keep the aspect ratio of the input sample. - Output sample might not have the given width and height, and - resize behaviour depends on the parameter 'resize_method'. - Defaults to False. - ensure_multiple_of (int, optional): - Output width and height is constrained to be multiple of this parameter. - Defaults to 1. - resize_method (str, optional): - "lower_bound": Output will be at least as large as the given size. - "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.) - "minimal": Scale as least as possible. (Output size might be smaller than given size.) - Defaults to "lower_bound". - """ - self.__width = width - self.__height = height - - self.__resize_target = resize_target - self.__keep_aspect_ratio = keep_aspect_ratio - self.__multiple_of = ensure_multiple_of - self.__resize_method = resize_method - self.__image_interpolation_method = image_interpolation_method - - def constrain_to_multiple_of(self, x, min_val=0, max_val=None): - y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int) - - if max_val is not None and y > max_val: - y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int) - - if y < min_val: - y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int) - - return y - - def get_size(self, width, height): - # determine new height and width - scale_height = self.__height / height - scale_width = self.__width / width - - if self.__keep_aspect_ratio: - if self.__resize_method == "lower_bound": - # scale such that output size is lower bound - if scale_width > scale_height: - # fit width - scale_height = scale_width - else: - # fit height - scale_width = scale_height - elif self.__resize_method == "upper_bound": - # scale such that output size is upper bound - if scale_width < scale_height: - # fit width - scale_height = scale_width - else: - # fit height - scale_width = scale_height - elif self.__resize_method == "minimal": - # scale as least as possbile - if abs(1 - scale_width) < abs(1 - scale_height): - # fit width - scale_height = scale_width - else: - # fit height - scale_width = scale_height - else: - raise ValueError( - f"resize_method {self.__resize_method} not implemented" - ) - - if self.__resize_method == "lower_bound": - new_height = self.constrain_to_multiple_of( - scale_height * height, min_val=self.__height - ) - new_width = self.constrain_to_multiple_of( - scale_width * width, min_val=self.__width - ) - elif self.__resize_method == "upper_bound": - new_height = self.constrain_to_multiple_of( - scale_height * height, max_val=self.__height - ) - new_width = self.constrain_to_multiple_of( - scale_width * width, max_val=self.__width - ) - elif self.__resize_method == "minimal": - new_height = self.constrain_to_multiple_of(scale_height * height) - new_width = self.constrain_to_multiple_of(scale_width * width) - else: - raise ValueError(f"resize_method {self.__resize_method} not implemented") - - return (new_width, new_height) - - def __call__(self, sample): - width, height = self.get_size( - sample["image"].shape[1], sample["image"].shape[0] - ) - - # resize sample - sample["image"] = cv2.resize( - sample["image"], - (width, height), - interpolation=self.__image_interpolation_method, - ) - - if self.__resize_target: - if "disparity" in sample: - sample["disparity"] = cv2.resize( - sample["disparity"], - (width, height), - interpolation=cv2.INTER_NEAREST, - ) - - if "depth" in sample: - sample["depth"] = cv2.resize( - sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST - ) - - sample["mask"] = cv2.resize( - sample["mask"].astype(np.float32), - (width, height), - interpolation=cv2.INTER_NEAREST, - ) - sample["mask"] = sample["mask"].astype(bool) - - return sample - - -class NormalizeImage(object): - """Normlize image by given mean and std. - """ - - def __init__(self, mean, std): - self.__mean = mean - self.__std = std - - def __call__(self, sample): - sample["image"] = (sample["image"] - self.__mean) / self.__std - - return sample - - -class PrepareForNet(object): - """Prepare sample for usage as network input. - """ - - def __init__(self): - pass - - def __call__(self, sample): - image = np.transpose(sample["image"], (2, 0, 1)) - sample["image"] = np.ascontiguousarray(image).astype(np.float32) - - if "mask" in sample: - sample["mask"] = sample["mask"].astype(np.float32) - sample["mask"] = np.ascontiguousarray(sample["mask"]) - - if "disparity" in sample: - disparity = sample["disparity"].astype(np.float32) - sample["disparity"] = np.ascontiguousarray(disparity) - - if "depth" in sample: - depth = sample["depth"].astype(np.float32) - sample["depth"] = np.ascontiguousarray(depth) - - return sample diff --git a/spaces/AIKey/TestStatic/index.html b/spaces/AIKey/TestStatic/index.html deleted file mode 100644 index 35f5f16ed492bc278adc08e76a4f7db5b46b1456..0000000000000000000000000000000000000000 --- a/spaces/AIKey/TestStatic/index.html +++ /dev/null @@ -1,32 +0,0 @@ - - - - - - My static Space - - - -
    - -
    - - diff --git a/spaces/ALR03/gradiolangchainChatbotOpenAI/app.py b/spaces/ALR03/gradiolangchainChatbotOpenAI/app.py deleted file mode 100644 index a362dcc7d0ddd1eee86961f1bc3db6d894fbd3d5..0000000000000000000000000000000000000000 --- a/spaces/ALR03/gradiolangchainChatbotOpenAI/app.py +++ /dev/null @@ -1,34 +0,0 @@ -import os -import gradio as gr -from langchain.chat_models import ChatOpenAI -from langchain import LLMChain, PromptTemplate -from langchain.memory import ConversationBufferMemory - -OPENAI_API_KEY=os.getenv('OPENAI_API_KEY') - -template = """You are a helpful assistant to answer all user queries. -{chat_history} -User: {user_message} -Chatbot:""" - -prompt = PromptTemplate( - input_variables=["chat_history", "user_message"], template=template -) - -memory = ConversationBufferMemory(memory_key="chat_history") - -llm_chain = LLMChain( - llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"), - prompt=prompt, - verbose=True, - memory=memory, -) - -def get_text_response(user_message,history): - response = llm_chain.predict(user_message = user_message) - return response - -demo = gr.ChatInterface(get_text_response) - -if __name__ == "__main__": - demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`. diff --git a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/stores/webSearchParameters.ts b/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/stores/webSearchParameters.ts deleted file mode 100644 index fd088a60621090930e9600c6086380afd2b412e8..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/stores/webSearchParameters.ts +++ /dev/null @@ -1,9 +0,0 @@ -import { writable } from "svelte/store"; -export interface WebSearchParameters { - useSearch: boolean; - nItems: number; -} -export const webSearchParameters = writable({ - useSearch: false, - nItems: 5, -}); diff --git a/spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/selector/__init__.py b/spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/selector/__init__.py deleted file mode 100644 index acf1f2f788c61a7d0f2e380d21274794bab6609c..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/selector/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -from agentverse.registry import Registry - -selector_registry = Registry(name="SelectorRegistry") - -from .base import BaseSelector -from .basic import BasicSelector -from .classroom import ClassroomSelector -from .sde_team import SdeTeamSelector -from .sde_team_given_tests import SdeTeamGivenTestsSelector -from .pokemon import PokemonSelector diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinput/ColorInput.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinput/ColorInput.js deleted file mode 100644 index 0198f7609ccb248c57add02dad283c42528a9c56..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinput/ColorInput.js +++ /dev/null @@ -1,91 +0,0 @@ -import ColorInputBase from '../colorinputbase/ColorInputBase.js'; -import Methods from './methods/Methods.js'; -import CreateBackground from '../../utils/build/CreateBackground.js'; - -const GetValue = Phaser.Utils.Objects.GetValue; - -class ColorInput extends ColorInputBase { - constructor(scene, config) { - if (config === undefined) { - config = {}; - } - - super(scene, config); - this.type = 'rexColorInput'; - - if (!config.hasOwnProperty('colorPicker')) { - config.colorPicker = { - background: { color: 0x0 } - } - } - - var colorPickerConfig = config.colorPicker; - var hasColorPicker = (colorPickerConfig !== false) && (colorPickerConfig !== null); - - if (hasColorPicker) { - this.setColorPickerSize( - GetValue(colorPickerConfig, 'width', 160), - GetValue(colorPickerConfig, 'height', 170) - ); - - var createBackgroundCallback; - var background = GetValue(colorPickerConfig, 'background'); - if (background) { - createBackgroundCallback = function (scene) { - return CreateBackground(scene, background); - } - } else { - createBackgroundCallback = GetValue(colorPickerConfig, 'createBackgroundCallback'); - } - this.setCreateColorPickerBackgroundCallback(createBackgroundCallback); - - this.setColorPickerHPalettePosition(GetValue(colorPickerConfig, 'hPalettePosition', 0)); - this.setColorPickerExpandDirection(GetValue(colorPickerConfig, 'expandDirection')); - this.setColorPickerEaseInDuration(GetValue(colorPickerConfig, 'easeIn', 200)); - this.setColorPickerEaseOutDuration(GetValue(colorPickerConfig, 'easeOut', 200)); - this.setColorPickerTransitInCallback(GetValue(colorPickerConfig, 'transitIn')); - this.setColorPickerTransitOutCallback(GetValue(colorPickerConfig, 'transitOut')); - this.setColorPickerBounds(GetValue(colorPickerConfig, 'bounds')); - - var colorPickerSpaceConfig = GetValue(colorPickerConfig, 'space'); - if (colorPickerSpaceConfig === undefined) { - colorPickerSpaceConfig = { left: 10, right: 10, top: 10, bottom: 10, item: 8 } - } - this.setColorPickerSpace(colorPickerSpaceConfig); - } - - var colorComponentsConfig = config.colorComponents; - var hasColorComponents = (colorComponentsConfig !== false) && (colorComponentsConfig !== null); - if (hasColorPicker && hasColorComponents) { - this.setColorComponentsHeight(GetValue(colorComponentsConfig, 'height', 30)); - - this.setColorComponentsFormatLabelConfig(GetValue(colorComponentsConfig, 'formatLabel')); - - var colorComponentsInputTextConfig = GetValue(colorComponentsConfig, 'inputText'); - if (!colorComponentsInputTextConfig) { - colorComponentsInputTextConfig = GetValue(config, 'inputText'); - } - this.setColorComponentsInputTextConfig(colorComponentsInputTextConfig); - - var colorComponentsSpace = GetValue(colorComponentsConfig, 'space'); - if (colorComponentsSpace === undefined) { - colorComponentsSpace = { item: 8 } - } - this.setColorComponentsSpace(colorComponentsSpace); - } - - - - var swatch = this.childrenMap.swatch; - if (swatch && hasColorPicker) { - this.onClick(swatch, this.openColorPicker, this); - } - } -} - -Object.assign( - ColorInput.prototype, - Methods, -) - -export default ColorInput; \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateRoundRectangle.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateRoundRectangle.js deleted file mode 100644 index 29d8923125411199feb9971d4721cd82d7816a67..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateRoundRectangle.js +++ /dev/null @@ -1,12 +0,0 @@ -import MergeStyle from './utils/MergeStyle.js'; -import RoundRectangle from '../../roundrectangle/RoundRectangle.js'; - -var CreateRoundRectangle = function (scene, data, view, styles, customBuilders) { - data = MergeStyle(data, styles); - - var gameObject = new RoundRectangle(scene, data); - scene.add.existing(gameObject); - return gameObject; -} - -export default CreateRoundRectangle; \ No newline at end of file diff --git a/spaces/AlanMars/QYL-AI-Space/modules/webui_locale.py b/spaces/AlanMars/QYL-AI-Space/modules/webui_locale.py deleted file mode 100644 index c97df7f416ffbd746efb52f17d8feb0e30cf9cd6..0000000000000000000000000000000000000000 --- a/spaces/AlanMars/QYL-AI-Space/modules/webui_locale.py +++ /dev/null @@ -1,27 +0,0 @@ -import os -import locale -import commentjson as json - - -class I18nAuto: - def __init__(self): - if os.path.exists("config.json"): - with open("config.json", "r", encoding='utf-8') as f: - config = json.load(f) - else: - config = {} - lang_config = config.get("language", "auto") - language = os.environ.get("LANGUAGE", lang_config) - if language == "auto": - language = locale.getdefaultlocale()[0] # get the language code of the system (ex. zh_CN) - self.language_map = {} - self.file_is_exists = os.path.isfile(f"./locale/{language}.json") - if self.file_is_exists: - with open(f"./locale/{language}.json", "r", encoding="utf-8") as f: - self.language_map.update(json.load(f)) - - def __call__(self, key): - if self.file_is_exists and key in self.language_map: - return self.language_map[key] - else: - return key diff --git a/spaces/Ali36Ahmad/magic-diffusion/share_btn.py b/spaces/Ali36Ahmad/magic-diffusion/share_btn.py deleted file mode 100644 index 1382fb25a5ef50e843598187e1e660e86ea8dd05..0000000000000000000000000000000000000000 --- a/spaces/Ali36Ahmad/magic-diffusion/share_btn.py +++ /dev/null @@ -1,88 +0,0 @@ -community_icon_html = """""" - -loading_icon_html = """""" - -share_js = """async () => { - async function uploadFile(file){ - const UPLOAD_URL = 'https://huggingface.co/uploads'; - const response = await fetch(UPLOAD_URL, { - method: 'POST', - headers: { - 'Content-Type': file.type, - 'X-Requested-With': 'XMLHttpRequest', - }, - body: file, /// <- File inherits from Blob - }); - const url = await response.text(); - return url; - } - async function getInputImgFile(imgEl){ - const res = await fetch(imgEl.src); - const blob = await res.blob(); - const imgId = Date.now() % 200; - const isPng = imgEl.src.startsWith(`data:image/png`); - if(isPng){ - const fileName = `magic-prompt-${{imgId}}.png`; - return new File([blob], fileName, { type: 'image/png' }); - }else{ - const fileName = `magic-prompt-${{imgId}}.jpg`; - return new File([blob], fileName, { type: 'image/jpeg' }); - } - } - const gradioEl = document.querySelector('body > gradio-app'); - // const gradioEl = document.querySelector("gradio-app").shadowRoot; - const inputImgEl = gradioEl.querySelector('#input-img img'); - const imgEls = gradioEl.querySelectorAll('#generated-gallery img'); - const promptTxt = gradioEl.querySelector('#translated textarea').value; - let titleTxt = promptTxt; - if(titleTxt.length > 100){ - titleTxt = titleTxt.slice(0, 100) + ' ...'; - } - const shareBtnEl = gradioEl.querySelector('#share-btn'); - const shareIconEl = gradioEl.querySelector('#share-btn-share-icon'); - const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon'); - if(!imgEls.length){ - return; - }; - shareBtnEl.style.pointerEvents = 'none'; - shareIconEl.style.display = 'none'; - loadingIconEl.style.removeProperty('display'); - const files = await Promise.all( - [...imgEls].map(async (imgEl) => { - const res = await fetch(imgEl.src); - const blob = await res.blob(); - const imgId = Date.now() % 200; - const fileName = `sd-perception-${{imgId}}.jpg`; - return new File([blob], fileName, { type: 'image/jpeg' }); - }) - ); - const inputFile = await getInputImgFile(inputImgEl); - files.push(inputFile); - const urls = await Promise.all(files.map((f) => uploadFile(f))); - const urlInputImg = urls.pop(); - const htmlImgs = urls.map(url => ``); - const htmlImgsMd = htmlImgs.join(`\n`); - const descriptionMd = `#### Input img: - -#### Caption: -${promptTxt} -#### Generations: -
    -${htmlImgsMd} -
    `; - const params = new URLSearchParams({ - title: titleTxt, - description: descriptionMd, - }); - const paramsStr = params.toString(); - window.open(`https://huggingface.co/spaces/huggingface-projects/magic-diffusion/new?${paramsStr}`, '_blank'); - shareBtnEl.style.removeProperty('pointer-events'); - shareIconEl.style.removeProperty('display'); - loadingIconEl.style.display = 'none'; -}""" \ No newline at end of file diff --git a/spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/configs/3millions.py b/spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/configs/3millions.py deleted file mode 100644 index c9edc2f1414e35f93abfd3dfe11a61f1f406580e..0000000000000000000000000000000000000000 --- a/spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/configs/3millions.py +++ /dev/null @@ -1,23 +0,0 @@ -from easydict import EasyDict as edict - -# configs for test speed - -config = edict() -config.loss = "arcface" -config.network = "r50" -config.resume = False -config.output = None -config.embedding_size = 512 -config.sample_rate = 1.0 -config.fp16 = True -config.momentum = 0.9 -config.weight_decay = 5e-4 -config.batch_size = 128 -config.lr = 0.1 # batch size is 512 - -config.rec = "synthetic" -config.num_classes = 300 * 10000 -config.num_epoch = 30 -config.warmup_epoch = -1 -config.decay_epoch = [10, 16, 22] -config.val_targets = [] diff --git a/spaces/Alpaca233/SadTalker/src/face3d/models/facerecon_model.py b/spaces/Alpaca233/SadTalker/src/face3d/models/facerecon_model.py deleted file mode 100644 index 7de8ca6eebc50ff1ed52c5ba37d31b43f977b5e1..0000000000000000000000000000000000000000 --- a/spaces/Alpaca233/SadTalker/src/face3d/models/facerecon_model.py +++ /dev/null @@ -1,220 +0,0 @@ -"""This script defines the face reconstruction model for Deep3DFaceRecon_pytorch -""" - -import numpy as np -import torch -from src.face3d.models.base_model import BaseModel -from src.face3d.models import networks -from src.face3d.models.bfm import ParametricFaceModel -from src.face3d.models.losses import perceptual_loss, photo_loss, reg_loss, reflectance_loss, landmark_loss -from src.face3d.util import util -from src.face3d.util.nvdiffrast import MeshRenderer -# from src.face3d.util.preprocess import estimate_norm_torch - -import trimesh -from scipy.io import savemat - -class FaceReconModel(BaseModel): - - @staticmethod - def modify_commandline_options(parser, is_train=False): - """ Configures options specific for CUT model - """ - # net structure and parameters - parser.add_argument('--net_recon', type=str, default='resnet50', choices=['resnet18', 'resnet34', 'resnet50'], help='network structure') - parser.add_argument('--init_path', type=str, default='./checkpoints/init_model/resnet50-0676ba61.pth') - parser.add_argument('--use_last_fc', type=util.str2bool, nargs='?', const=True, default=False, help='zero initialize the last fc') - parser.add_argument('--bfm_folder', type=str, default='./checkpoints/BFM_Fitting/') - parser.add_argument('--bfm_model', type=str, default='BFM_model_front.mat', help='bfm model') - - # renderer parameters - parser.add_argument('--focal', type=float, default=1015.) - parser.add_argument('--center', type=float, default=112.) - parser.add_argument('--camera_d', type=float, default=10.) - parser.add_argument('--z_near', type=float, default=5.) - parser.add_argument('--z_far', type=float, default=15.) - - if is_train: - # training parameters - parser.add_argument('--net_recog', type=str, default='r50', choices=['r18', 'r43', 'r50'], help='face recog network structure') - parser.add_argument('--net_recog_path', type=str, default='checkpoints/recog_model/ms1mv3_arcface_r50_fp16/backbone.pth') - parser.add_argument('--use_crop_face', type=util.str2bool, nargs='?', const=True, default=False, help='use crop mask for photo loss') - parser.add_argument('--use_predef_M', type=util.str2bool, nargs='?', const=True, default=False, help='use predefined M for predicted face') - - - # augmentation parameters - parser.add_argument('--shift_pixs', type=float, default=10., help='shift pixels') - parser.add_argument('--scale_delta', type=float, default=0.1, help='delta scale factor') - parser.add_argument('--rot_angle', type=float, default=10., help='rot angles, degree') - - # loss weights - parser.add_argument('--w_feat', type=float, default=0.2, help='weight for feat loss') - parser.add_argument('--w_color', type=float, default=1.92, help='weight for loss loss') - parser.add_argument('--w_reg', type=float, default=3.0e-4, help='weight for reg loss') - parser.add_argument('--w_id', type=float, default=1.0, help='weight for id_reg loss') - parser.add_argument('--w_exp', type=float, default=0.8, help='weight for exp_reg loss') - parser.add_argument('--w_tex', type=float, default=1.7e-2, help='weight for tex_reg loss') - parser.add_argument('--w_gamma', type=float, default=10.0, help='weight for gamma loss') - parser.add_argument('--w_lm', type=float, default=1.6e-3, help='weight for lm loss') - parser.add_argument('--w_reflc', type=float, default=5.0, help='weight for reflc loss') - - opt, _ = parser.parse_known_args() - parser.set_defaults( - focal=1015., center=112., camera_d=10., use_last_fc=False, z_near=5., z_far=15. - ) - if is_train: - parser.set_defaults( - use_crop_face=True, use_predef_M=False - ) - return parser - - def __init__(self, opt): - """Initialize this model class. - - Parameters: - opt -- training/test options - - A few things can be done here. - - (required) call the initialization function of BaseModel - - define loss function, visualization images, model names, and optimizers - """ - BaseModel.__init__(self, opt) # call the initialization method of BaseModel - - self.visual_names = ['output_vis'] - self.model_names = ['net_recon'] - self.parallel_names = self.model_names + ['renderer'] - - self.facemodel = ParametricFaceModel( - bfm_folder=opt.bfm_folder, camera_distance=opt.camera_d, focal=opt.focal, center=opt.center, - is_train=self.isTrain, default_name=opt.bfm_model - ) - - fov = 2 * np.arctan(opt.center / opt.focal) * 180 / np.pi - self.renderer = MeshRenderer( - rasterize_fov=fov, znear=opt.z_near, zfar=opt.z_far, rasterize_size=int(2 * opt.center) - ) - - if self.isTrain: - self.loss_names = ['all', 'feat', 'color', 'lm', 'reg', 'gamma', 'reflc'] - - self.net_recog = networks.define_net_recog( - net_recog=opt.net_recog, pretrained_path=opt.net_recog_path - ) - # loss func name: (compute_%s_loss) % loss_name - self.compute_feat_loss = perceptual_loss - self.comupte_color_loss = photo_loss - self.compute_lm_loss = landmark_loss - self.compute_reg_loss = reg_loss - self.compute_reflc_loss = reflectance_loss - - self.optimizer = torch.optim.Adam(self.net_recon.parameters(), lr=opt.lr) - self.optimizers = [self.optimizer] - self.parallel_names += ['net_recog'] - # Our program will automatically call to define schedulers, load networks, and print networks - - def set_input(self, input): - """Unpack input data from the dataloader and perform necessary pre-processing steps. - - Parameters: - input: a dictionary that contains the data itself and its metadata information. - """ - self.input_img = input['imgs'].to(self.device) - self.atten_mask = input['msks'].to(self.device) if 'msks' in input else None - self.gt_lm = input['lms'].to(self.device) if 'lms' in input else None - self.trans_m = input['M'].to(self.device) if 'M' in input else None - self.image_paths = input['im_paths'] if 'im_paths' in input else None - - def forward(self, output_coeff, device): - self.facemodel.to(device) - self.pred_vertex, self.pred_tex, self.pred_color, self.pred_lm = \ - self.facemodel.compute_for_render(output_coeff) - self.pred_mask, _, self.pred_face = self.renderer( - self.pred_vertex, self.facemodel.face_buf, feat=self.pred_color) - - self.pred_coeffs_dict = self.facemodel.split_coeff(output_coeff) - - - def compute_losses(self): - """Calculate losses, gradients, and update network weights; called in every training iteration""" - - assert self.net_recog.training == False - trans_m = self.trans_m - if not self.opt.use_predef_M: - trans_m = estimate_norm_torch(self.pred_lm, self.input_img.shape[-2]) - - pred_feat = self.net_recog(self.pred_face, trans_m) - gt_feat = self.net_recog(self.input_img, self.trans_m) - self.loss_feat = self.opt.w_feat * self.compute_feat_loss(pred_feat, gt_feat) - - face_mask = self.pred_mask - if self.opt.use_crop_face: - face_mask, _, _ = self.renderer(self.pred_vertex, self.facemodel.front_face_buf) - - face_mask = face_mask.detach() - self.loss_color = self.opt.w_color * self.comupte_color_loss( - self.pred_face, self.input_img, self.atten_mask * face_mask) - - loss_reg, loss_gamma = self.compute_reg_loss(self.pred_coeffs_dict, self.opt) - self.loss_reg = self.opt.w_reg * loss_reg - self.loss_gamma = self.opt.w_gamma * loss_gamma - - self.loss_lm = self.opt.w_lm * self.compute_lm_loss(self.pred_lm, self.gt_lm) - - self.loss_reflc = self.opt.w_reflc * self.compute_reflc_loss(self.pred_tex, self.facemodel.skin_mask) - - self.loss_all = self.loss_feat + self.loss_color + self.loss_reg + self.loss_gamma \ - + self.loss_lm + self.loss_reflc - - - def optimize_parameters(self, isTrain=True): - self.forward() - self.compute_losses() - """Update network weights; it will be called in every training iteration.""" - if isTrain: - self.optimizer.zero_grad() - self.loss_all.backward() - self.optimizer.step() - - def compute_visuals(self): - with torch.no_grad(): - input_img_numpy = 255. * self.input_img.detach().cpu().permute(0, 2, 3, 1).numpy() - output_vis = self.pred_face * self.pred_mask + (1 - self.pred_mask) * self.input_img - output_vis_numpy_raw = 255. * output_vis.detach().cpu().permute(0, 2, 3, 1).numpy() - - if self.gt_lm is not None: - gt_lm_numpy = self.gt_lm.cpu().numpy() - pred_lm_numpy = self.pred_lm.detach().cpu().numpy() - output_vis_numpy = util.draw_landmarks(output_vis_numpy_raw, gt_lm_numpy, 'b') - output_vis_numpy = util.draw_landmarks(output_vis_numpy, pred_lm_numpy, 'r') - - output_vis_numpy = np.concatenate((input_img_numpy, - output_vis_numpy_raw, output_vis_numpy), axis=-2) - else: - output_vis_numpy = np.concatenate((input_img_numpy, - output_vis_numpy_raw), axis=-2) - - self.output_vis = torch.tensor( - output_vis_numpy / 255., dtype=torch.float32 - ).permute(0, 3, 1, 2).to(self.device) - - def save_mesh(self, name): - - recon_shape = self.pred_vertex # get reconstructed shape - recon_shape[..., -1] = 10 - recon_shape[..., -1] # from camera space to world space - recon_shape = recon_shape.cpu().numpy()[0] - recon_color = self.pred_color - recon_color = recon_color.cpu().numpy()[0] - tri = self.facemodel.face_buf.cpu().numpy() - mesh = trimesh.Trimesh(vertices=recon_shape, faces=tri, vertex_colors=np.clip(255. * recon_color, 0, 255).astype(np.uint8)) - mesh.export(name) - - def save_coeff(self,name): - - pred_coeffs = {key:self.pred_coeffs_dict[key].cpu().numpy() for key in self.pred_coeffs_dict} - pred_lm = self.pred_lm.cpu().numpy() - pred_lm = np.stack([pred_lm[:,:,0],self.input_img.shape[2]-1-pred_lm[:,:,1]],axis=2) # transfer to image coordinate - pred_coeffs['lm68'] = pred_lm - savemat(name,pred_coeffs) - - - diff --git a/spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/ops/grid_sample_gradfix.py b/spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/ops/grid_sample_gradfix.py deleted file mode 100644 index ca6b3413ea72a734703c34382c023b84523601fd..0000000000000000000000000000000000000000 --- a/spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/ops/grid_sample_gradfix.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Custom replacement for `torch.nn.functional.grid_sample` that -supports arbitrarily high order gradients between the input and output. -Only works on 2D images and assumes -`mode='bilinear'`, `padding_mode='zeros'`, `align_corners=False`.""" - -import warnings -import torch - -# pylint: disable=redefined-builtin -# pylint: disable=arguments-differ -# pylint: disable=protected-access - -#---------------------------------------------------------------------------- - -enabled = False # Enable the custom op by setting this to true. - -#---------------------------------------------------------------------------- - -def grid_sample(input, grid): - if _should_use_custom_op(): - return _GridSample2dForward.apply(input, grid) - return torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False) - -#---------------------------------------------------------------------------- - -def _should_use_custom_op(): - if not enabled: - return False - if any(torch.__version__.startswith(x) for x in ['1.7.', '1.8.', '1.9']): - return True - warnings.warn(f'grid_sample_gradfix not supported on PyTorch {torch.__version__}. Falling back to torch.nn.functional.grid_sample().') - return False - -#---------------------------------------------------------------------------- - -class _GridSample2dForward(torch.autograd.Function): - @staticmethod - def forward(ctx, input, grid): - assert input.ndim == 4 - assert grid.ndim == 4 - output = torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False) - ctx.save_for_backward(input, grid) - return output - - @staticmethod - def backward(ctx, grad_output): - input, grid = ctx.saved_tensors - grad_input, grad_grid = _GridSample2dBackward.apply(grad_output, input, grid) - return grad_input, grad_grid - -#---------------------------------------------------------------------------- - -class _GridSample2dBackward(torch.autograd.Function): - @staticmethod - def forward(ctx, grad_output, input, grid): - op = torch._C._jit_get_operation('aten::grid_sampler_2d_backward') - grad_input, grad_grid = op(grad_output, input, grid, 0, 0, False) - ctx.save_for_backward(grid) - return grad_input, grad_grid - - @staticmethod - def backward(ctx, grad2_grad_input, grad2_grad_grid): - _ = grad2_grad_grid # unused - grid, = ctx.saved_tensors - grad2_grad_output = None - grad2_input = None - grad2_grid = None - - if ctx.needs_input_grad[0]: - grad2_grad_output = _GridSample2dForward.apply(grad2_grad_input, grid) - - assert not ctx.needs_input_grad[2] - return grad2_grad_output, grad2_input, grad2_grid - -#---------------------------------------------------------------------------- diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/training/dreambooth.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/training/dreambooth.md deleted file mode 100644 index 83974e50b14e8eb614eb943a721570d55bdf9921..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/training/dreambooth.md +++ /dev/null @@ -1,475 +0,0 @@ - - -# DreamBooth - -[DreamBooth](https://arxiv.org/abs/2208.12242)는 한 주제에 대한 적은 이미지(3~5개)만으로도 stable diffusion과 같이 text-to-image 모델을 개인화할 수 있는 방법입니다. 이를 통해 모델은 다양한 장면, 포즈 및 장면(뷰)에서 피사체에 대해 맥락화(contextualized)된 이미지를 생성할 수 있습니다. - -![프로젝트 블로그에서의 DreamBooth 예시](https://dreambooth.github.io/DreamBooth_files/teaser_static.jpg) -project's blog. -프로젝트 블로그에서의 Dreambooth 예시 - - -이 가이드는 다양한 GPU, Flax 사양에 대해 [`CompVis/stable-diffusion-v1-4`](https://huggingface.co/CompVis/stable-diffusion-v1-4) 모델로 DreamBooth를 파인튜닝하는 방법을 보여줍니다. 더 깊이 파고들어 작동 방식을 확인하는 데 관심이 있는 경우, 이 가이드에 사용된 DreamBooth의 모든 학습 스크립트를 [여기](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth)에서 찾을 수 있습니다. - -스크립트를 실행하기 전에 라이브러리의 학습에 필요한 dependencies를 설치해야 합니다. 또한 `main` GitHub 브랜치에서 🧨 Diffusers를 설치하는 것이 좋습니다. - -```bash -pip install git+https://github.com/huggingface/diffusers -pip install -U -r diffusers/examples/dreambooth/requirements.txt -``` - -xFormers는 학습에 필요한 요구 사항은 아니지만, 가능하면 [설치](../optimization/xformers)하는 것이 좋습니다. 학습 속도를 높이고 메모리 사용량을 줄일 수 있기 때문입니다. - -모든 dependencies을 설정한 후 다음을 사용하여 [🤗 Accelerate](https://github.com/huggingface/accelerate/) 환경을 다음과 같이 초기화합니다: - -```bash -accelerate config -``` - -별도 설정 없이 기본 🤗 Accelerate 환경을 설치하려면 다음을 실행합니다: - -```bash -accelerate config default -``` - -또는 현재 환경이 노트북과 같은 대화형 셸을 지원하지 않는 경우 다음을 사용할 수 있습니다: - -```py -from accelerate.utils import write_basic_config - -write_basic_config() -``` - -## 파인튜닝 - - - -DreamBooth 파인튜닝은 하이퍼파라미터에 매우 민감하고 과적합되기 쉽습니다. 적절한 하이퍼파라미터를 선택하는 데 도움이 되도록 다양한 권장 설정이 포함된 [심층 분석](https://huggingface.co/blog/dreambooth)을 살펴보는 것이 좋습니다. - - - - - -[몇 장의 강아지 이미지들](https://drive.google.com/drive/folders/1BO_dyz-p65qhBRRMRA4TbZ8qW4rB99JZ)로 DreamBooth를 시도해봅시다. -이를 다운로드해 디렉터리에 저장한 다음 `INSTANCE_DIR` 환경 변수를 해당 경로로 설정합니다: - - -```bash -export MODEL_NAME="CompVis/stable-diffusion-v1-4" -export INSTANCE_DIR="path_to_training_images" -export OUTPUT_DIR="path_to_saved_model" -``` - -그런 다음, 다음 명령을 사용하여 학습 스크립트를 실행할 수 있습니다 (전체 학습 스크립트는 [여기](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py)에서 찾을 수 있습니다): - -```bash -accelerate launch train_dreambooth.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --instance_data_dir=$INSTANCE_DIR \ - --output_dir=$OUTPUT_DIR \ - --instance_prompt="a photo of sks dog" \ - --resolution=512 \ - --train_batch_size=1 \ - --gradient_accumulation_steps=1 \ - --learning_rate=5e-6 \ - --lr_scheduler="constant" \ - --lr_warmup_steps=0 \ - --max_train_steps=400 -``` - - - -TPU에 액세스할 수 있거나 더 빠르게 훈련하고 싶다면 [Flax 학습 스크립트](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth_flax.py)를 사용해 볼 수 있습니다. Flax 학습 스크립트는 gradient checkpointing 또는 gradient accumulation을 지원하지 않으므로, 메모리가 30GB 이상인 GPU가 필요합니다. - -스크립트를 실행하기 전에 요구 사항이 설치되어 있는지 확인하십시오. - -```bash -pip install -U -r requirements.txt -``` - -그러면 다음 명령어로 학습 스크립트를 실행시킬 수 있습니다: - -```bash -export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" -export INSTANCE_DIR="path-to-instance-images" -export OUTPUT_DIR="path-to-save-model" - -python train_dreambooth_flax.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --instance_data_dir=$INSTANCE_DIR \ - --output_dir=$OUTPUT_DIR \ - --instance_prompt="a photo of sks dog" \ - --resolution=512 \ - --train_batch_size=1 \ - --learning_rate=5e-6 \ - --max_train_steps=400 -``` - - - -### Prior-preserving(사전 보존) loss를 사용한 파인튜닝 - -과적합과 language drift를 방지하기 위해 사전 보존이 사용됩니다(관심이 있는 경우 [논문](https://arxiv.org/abs/2208.12242)을 참조하세요). 사전 보존을 위해 동일한 클래스의 다른 이미지를 학습 프로세스의 일부로 사용합니다. 좋은 점은 Stable Diffusion 모델 자체를 사용하여 이러한 이미지를 생성할 수 있다는 것입니다! 학습 스크립트는 생성된 이미지를 우리가 지정한 로컬 경로에 저장합니다. - -저자들에 따르면 사전 보존을 위해 `num_epochs * num_samples`개의 이미지를 생성하는 것이 좋습니다. 200-300개에서 대부분 잘 작동합니다. - - - -```bash -export MODEL_NAME="CompVis/stable-diffusion-v1-4" -export INSTANCE_DIR="path_to_training_images" -export CLASS_DIR="path_to_class_images" -export OUTPUT_DIR="path_to_saved_model" - -accelerate launch train_dreambooth.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --instance_data_dir=$INSTANCE_DIR \ - --class_data_dir=$CLASS_DIR \ - --output_dir=$OUTPUT_DIR \ - --with_prior_preservation --prior_loss_weight=1.0 \ - --instance_prompt="a photo of sks dog" \ - --class_prompt="a photo of dog" \ - --resolution=512 \ - --train_batch_size=1 \ - --gradient_accumulation_steps=1 \ - --learning_rate=5e-6 \ - --lr_scheduler="constant" \ - --lr_warmup_steps=0 \ - --num_class_images=200 \ - --max_train_steps=800 -``` - - -```bash -export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" -export INSTANCE_DIR="path-to-instance-images" -export CLASS_DIR="path-to-class-images" -export OUTPUT_DIR="path-to-save-model" - -python train_dreambooth_flax.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --instance_data_dir=$INSTANCE_DIR \ - --class_data_dir=$CLASS_DIR \ - --output_dir=$OUTPUT_DIR \ - --with_prior_preservation --prior_loss_weight=1.0 \ - --instance_prompt="a photo of sks dog" \ - --class_prompt="a photo of dog" \ - --resolution=512 \ - --train_batch_size=1 \ - --learning_rate=5e-6 \ - --num_class_images=200 \ - --max_train_steps=800 -``` - - - -## 텍스트 인코더와 and UNet로 파인튜닝하기 - -해당 스크립트를 사용하면 `unet`과 함께 `text_encoder`를 파인튜닝할 수 있습니다. 실험에서(자세한 내용은 [🧨 Diffusers를 사용해 DreamBooth로 Stable Diffusion 학습하기](https://huggingface.co/blog/dreambooth) 게시물을 확인하세요), 특히 얼굴 이미지를 생성할 때 훨씬 더 나은 결과를 얻을 수 있습니다. - - - -텍스트 인코더를 학습시키려면 추가 메모리가 필요해 16GB GPU로는 동작하지 않습니다. 이 옵션을 사용하려면 최소 24GB VRAM이 필요합니다. - - - -`--train_text_encoder` 인수를 학습 스크립트에 전달하여 `text_encoder` 및 `unet`을 파인튜닝할 수 있습니다: - - - -```bash -export MODEL_NAME="CompVis/stable-diffusion-v1-4" -export INSTANCE_DIR="path_to_training_images" -export CLASS_DIR="path_to_class_images" -export OUTPUT_DIR="path_to_saved_model" - -accelerate launch train_dreambooth.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --train_text_encoder \ - --instance_data_dir=$INSTANCE_DIR \ - --class_data_dir=$CLASS_DIR \ - --output_dir=$OUTPUT_DIR \ - --with_prior_preservation --prior_loss_weight=1.0 \ - --instance_prompt="a photo of sks dog" \ - --class_prompt="a photo of dog" \ - --resolution=512 \ - --train_batch_size=1 \ - --use_8bit_adam - --gradient_checkpointing \ - --learning_rate=2e-6 \ - --lr_scheduler="constant" \ - --lr_warmup_steps=0 \ - --num_class_images=200 \ - --max_train_steps=800 -``` - - -```bash -export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" -export INSTANCE_DIR="path-to-instance-images" -export CLASS_DIR="path-to-class-images" -export OUTPUT_DIR="path-to-save-model" - -python train_dreambooth_flax.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --train_text_encoder \ - --instance_data_dir=$INSTANCE_DIR \ - --class_data_dir=$CLASS_DIR \ - --output_dir=$OUTPUT_DIR \ - --with_prior_preservation --prior_loss_weight=1.0 \ - --instance_prompt="a photo of sks dog" \ - --class_prompt="a photo of dog" \ - --resolution=512 \ - --train_batch_size=1 \ - --learning_rate=2e-6 \ - --num_class_images=200 \ - --max_train_steps=800 -``` - - - -## LoRA로 파인튜닝하기 - -DreamBooth에서 대규모 모델의 학습을 가속화하기 위한 파인튜닝 기술인 LoRA(Low-Rank Adaptation of Large Language Models)를 사용할 수 있습니다. 자세한 내용은 [LoRA 학습](training/lora#dreambooth) 가이드를 참조하세요. - -### 학습 중 체크포인트 저장하기 - -Dreambooth로 훈련하는 동안 과적합하기 쉬우므로, 때때로 학습 중에 정기적인 체크포인트를 저장하는 것이 유용합니다. 중간 체크포인트 중 하나가 최종 모델보다 더 잘 작동할 수 있습니다! 체크포인트 저장 기능을 활성화하려면 학습 스크립트에 다음 인수를 전달해야 합니다: - -```bash - --checkpointing_steps=500 -``` - -이렇게 하면 `output_dir`의 하위 폴더에 전체 학습 상태가 저장됩니다. 하위 폴더 이름은 접두사 `checkpoint-`로 시작하고 지금까지 수행된 step 수입니다. 예시로 `checkpoint-1500`은 1500 학습 step 후에 저장된 체크포인트입니다. - -#### 저장된 체크포인트에서 훈련 재개하기 - -저장된 체크포인트에서 훈련을 재개하려면, `--resume_from_checkpoint` 인수를 전달한 다음 사용할 체크포인트의 이름을 지정하면 됩니다. 특수 문자열 `"latest"`를 사용하여 저장된 마지막 체크포인트(즉, step 수가 가장 많은 체크포인트)에서 재개할 수도 있습니다. 예를 들어 다음은 1500 step 후에 저장된 체크포인트에서부터 학습을 재개합니다: - -```bash - --resume_from_checkpoint="checkpoint-1500" -``` - -원하는 경우 일부 하이퍼파라미터를 조정할 수 있습니다. - -#### 저장된 체크포인트를 사용하여 추론 수행하기 - -저장된 체크포인트는 훈련 재개에 적합한 형식으로 저장됩니다. 여기에는 모델 가중치뿐만 아니라 옵티마이저, 데이터 로더 및 학습률의 상태도 포함됩니다. - -**`"accelerate>=0.16.0"`**이 설치된 경우 다음 코드를 사용하여 중간 체크포인트에서 추론을 실행합니다. - -```python -from diffusers import DiffusionPipeline, UNet2DConditionModel -from transformers import CLIPTextModel -import torch - -# 학습에 사용된 것과 동일한 인수(model, revision)로 파이프라인을 불러옵니다. -model_id = "CompVis/stable-diffusion-v1-4" - -unet = UNet2DConditionModel.from_pretrained("/sddata/dreambooth/daruma-v2-1/checkpoint-100/unet") - -# `args.train_text_encoder`로 학습한 경우면 텍스트 인코더를 꼭 불러오세요 -text_encoder = CLIPTextModel.from_pretrained("/sddata/dreambooth/daruma-v2-1/checkpoint-100/text_encoder") - -pipeline = DiffusionPipeline.from_pretrained(model_id, unet=unet, text_encoder=text_encoder, dtype=torch.float16) -pipeline.to("cuda") - -# 추론을 수행하거나 저장하거나, 허브에 푸시합니다. -pipeline.save_pretrained("dreambooth-pipeline") -``` - -If you have **`"accelerate<0.16.0"`** installed, you need to convert it to an inference pipeline first: - -```python -from accelerate import Accelerator -from diffusers import DiffusionPipeline - -# 학습에 사용된 것과 동일한 인수(model, revision)로 파이프라인을 불러옵니다. -model_id = "CompVis/stable-diffusion-v1-4" -pipeline = DiffusionPipeline.from_pretrained(model_id) - -accelerator = Accelerator() - -# 초기 학습에 `--train_text_encoder`가 사용된 경우 text_encoder를 사용합니다. -unet, text_encoder = accelerator.prepare(pipeline.unet, pipeline.text_encoder) - -# 체크포인트 경로로부터 상태를 복원합니다. 여기서는 절대 경로를 사용해야 합니다. -accelerator.load_state("/sddata/dreambooth/daruma-v2-1/checkpoint-100") - -# unwrapped 모델로 파이프라인을 다시 빌드합니다.(.unet and .text_encoder로의 할당도 작동해야 합니다) -pipeline = DiffusionPipeline.from_pretrained( - model_id, - unet=accelerator.unwrap_model(unet), - text_encoder=accelerator.unwrap_model(text_encoder), -) - -# 추론을 수행하거나 저장하거나, 허브에 푸시합니다. -pipeline.save_pretrained("dreambooth-pipeline") -``` - -## 각 GPU 용량에서의 최적화 - -하드웨어에 따라 16GB에서 8GB까지 GPU에서 DreamBooth를 최적화하는 몇 가지 방법이 있습니다! - -### xFormers - -[xFormers](https://github.com/facebookresearch/xformers)는 Transformers를 최적화하기 위한 toolbox이며, 🧨 Diffusers에서 사용되는[memory-efficient attention](https://facebookresearch.github.io/xformers/components/ops.html#module-xformers.ops) 메커니즘을 포함하고 있습니다. [xFormers를 설치](./optimization/xformers)한 다음 학습 스크립트에 다음 인수를 추가합니다: - -```bash - --enable_xformers_memory_efficient_attention -``` - -xFormers는 Flax에서 사용할 수 없습니다. - -### 그래디언트 없음으로 설정 - -메모리 사용량을 줄일 수 있는 또 다른 방법은 [기울기 설정](https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html)을 0 대신 `None`으로 하는 것입니다. 그러나 이로 인해 특정 동작이 변경될 수 있으므로 문제가 발생하면 이 인수를 제거해 보십시오. 학습 스크립트에 다음 인수를 추가하여 그래디언트를 `None`으로 설정합니다. - -```bash - --set_grads_to_none -``` - -### 16GB GPU - -Gradient checkpointing과 [bitsandbytes](https://github.com/TimDettmers/bitsandbytes)의 8비트 옵티마이저의 도움으로, 16GB GPU에서 dreambooth를 훈련할 수 있습니다. bitsandbytes가 설치되어 있는지 확인하세요: - -```bash -pip install bitsandbytes -``` - -그 다음, 학습 스크립트에 `--use_8bit_adam` 옵션을 명시합니다: - -```bash -export MODEL_NAME="CompVis/stable-diffusion-v1-4" -export INSTANCE_DIR="path_to_training_images" -export CLASS_DIR="path_to_class_images" -export OUTPUT_DIR="path_to_saved_model" - -accelerate launch train_dreambooth.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --instance_data_dir=$INSTANCE_DIR \ - --class_data_dir=$CLASS_DIR \ - --output_dir=$OUTPUT_DIR \ - --with_prior_preservation --prior_loss_weight=1.0 \ - --instance_prompt="a photo of sks dog" \ - --class_prompt="a photo of dog" \ - --resolution=512 \ - --train_batch_size=1 \ - --gradient_accumulation_steps=2 --gradient_checkpointing \ - --use_8bit_adam \ - --learning_rate=5e-6 \ - --lr_scheduler="constant" \ - --lr_warmup_steps=0 \ - --num_class_images=200 \ - --max_train_steps=800 -``` - -### 12GB GPU - -12GB GPU에서 DreamBooth를 실행하려면 gradient checkpointing, 8비트 옵티마이저, xFormers를 활성화하고 그래디언트를 `None`으로 설정해야 합니다. - -```bash -export MODEL_NAME="CompVis/stable-diffusion-v1-4" -export INSTANCE_DIR="path-to-instance-images" -export CLASS_DIR="path-to-class-images" -export OUTPUT_DIR="path-to-save-model" - -accelerate launch train_dreambooth.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --instance_data_dir=$INSTANCE_DIR \ - --class_data_dir=$CLASS_DIR \ - --output_dir=$OUTPUT_DIR \ - --with_prior_preservation --prior_loss_weight=1.0 \ - --instance_prompt="a photo of sks dog" \ - --class_prompt="a photo of dog" \ - --resolution=512 \ - --train_batch_size=1 \ - --gradient_accumulation_steps=1 --gradient_checkpointing \ - --use_8bit_adam \ - --enable_xformers_memory_efficient_attention \ - --set_grads_to_none \ - --learning_rate=2e-6 \ - --lr_scheduler="constant" \ - --lr_warmup_steps=0 \ - --num_class_images=200 \ - --max_train_steps=800 -``` - -### 8GB GPU에서 학습하기 - -8GB GPU에 대해서는 [DeepSpeed](https://www.deepspeed.ai/)를 사용해 일부 텐서를 VRAM에서 CPU 또는 NVME로 오프로드하여 더 적은 GPU 메모리로 학습할 수도 있습니다. - -🤗 Accelerate 환경을 구성하려면 다음 명령을 실행하세요: - -```bash -accelerate config -``` - -환경 구성 중에 DeepSpeed를 사용할 것을 확인하세요. -그러면 DeepSpeed stage 2, fp16 혼합 정밀도를 결합하고 모델 매개변수와 옵티마이저 상태를 모두 CPU로 오프로드하면 8GB VRAM 미만에서 학습할 수 있습니다. -단점은 더 많은 시스템 RAM(약 25GB)이 필요하다는 것입니다. 추가 구성 옵션은 [DeepSpeed 문서](https://huggingface.co/docs/accelerate/usage_guides/deepspeed)를 참조하세요. - -또한 기본 Adam 옵티마이저를 DeepSpeed의 최적화된 Adam 버전으로 변경해야 합니다. -이는 상당한 속도 향상을 위한 Adam인 [`deepspeed.ops.adam.DeepSpeedCPUAdam`](https://deepspeed.readthedocs.io/en/latest/optimizers.html#adam-cpu)입니다. -`DeepSpeedCPUAdam`을 활성화하려면 시스템의 CUDA toolchain 버전이 PyTorch와 함께 설치된 것과 동일해야 합니다. - -8비트 옵티마이저는 현재 DeepSpeed와 호환되지 않는 것 같습니다. - -다음 명령으로 학습을 시작합니다: - -```bash -export MODEL_NAME="CompVis/stable-diffusion-v1-4" -export INSTANCE_DIR="path_to_training_images" -export CLASS_DIR="path_to_class_images" -export OUTPUT_DIR="path_to_saved_model" - -accelerate launch train_dreambooth.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --instance_data_dir=$INSTANCE_DIR \ - --class_data_dir=$CLASS_DIR \ - --output_dir=$OUTPUT_DIR \ - --with_prior_preservation --prior_loss_weight=1.0 \ - --instance_prompt="a photo of sks dog" \ - --class_prompt="a photo of dog" \ - --resolution=512 \ - --train_batch_size=1 \ - --sample_batch_size=1 \ - --gradient_accumulation_steps=1 --gradient_checkpointing \ - --learning_rate=5e-6 \ - --lr_scheduler="constant" \ - --lr_warmup_steps=0 \ - --num_class_images=200 \ - --max_train_steps=800 \ - --mixed_precision=fp16 -``` - -## 추론 - -모델을 학습한 후에는, 모델이 저장된 경로를 지정해 [`StableDiffusionPipeline`]로 추론을 수행할 수 있습니다. 프롬프트에 학습에 사용된 특수 `식별자`(이전 예시의 `sks`)가 포함되어 있는지 확인하세요. - -**`"accelerate>=0.16.0"`**이 설치되어 있는 경우 다음 코드를 사용하여 중간 체크포인트에서 추론을 실행할 수 있습니다: - -```python -from diffusers import StableDiffusionPipeline -import torch - -model_id = "path_to_saved_model" -pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") - -prompt = "A photo of sks dog in a bucket" -image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] - -image.save("dog-bucket.png") -``` - -[저장된 학습 체크포인트](#inference-from-a-saved-checkpoint)에서도 추론을 실행할 수도 있습니다. \ No newline at end of file diff --git a/spaces/Andy1621/uniformer_image_detection/configs/empirical_attention/README.md b/spaces/Andy1621/uniformer_image_detection/configs/empirical_attention/README.md deleted file mode 100644 index f9782d7ef0a9c0d5a554a4f2a08b3e1b368c80dd..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/empirical_attention/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# An Empirical Study of Spatial Attention Mechanisms in Deep Networks - -## Introduction - -[ALGORITHM] - -```latex -@article{zhu2019empirical, - title={An Empirical Study of Spatial Attention Mechanisms in Deep Networks}, - author={Zhu, Xizhou and Cheng, Dazhi and Zhang, Zheng and Lin, Stephen and Dai, Jifeng}, - journal={arXiv preprint arXiv:1904.05873}, - year={2019} -} -``` - -## Results and Models - -| Backbone | Attention Component | DCN | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -|:---------:|:-------------------:|:----:|:-------:|:--------:|:--------------:|:------:|:------:|:--------:| -| R-50 | 1111 | N | 1x | 8.0 | 13.8 | 40.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco/faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130-403cccba.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco/faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130_210344.log.json) | -| R-50 | 0010 | N | 1x | 4.2 | 18.4 | 39.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco/faster_rcnn_r50_fpn_attention_0010_1x_coco_20200130-7cb0c14d.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco/faster_rcnn_r50_fpn_attention_0010_1x_coco_20200130_210125.log.json) | -| R-50 | 1111 | Y | 1x | 8.0 | 12.7 | 42.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco_20200130-8b2523a6.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco_20200130_204442.log.json) | -| R-50 | 0010 | Y | 1x | 4.2 | 17.1 | 42.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco_20200130-1a2e831d.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco_20200130_210410.log.json) | diff --git a/spaces/Andy1621/uniformer_image_detection/configs/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco.py deleted file mode 100644 index f438a4792e9aa4bcef35a42349156f1eab044477..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './ga_faster_r50_caffe_fpn_1x_coco.py' -model = dict( - pretrained='open-mmlab://detectron2/resnet101_caffe', - backbone=dict(depth=101)) diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/base.py b/spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/base.py deleted file mode 100644 index 7ce9c36c1d6f60c8567a72c44cc9eee0323ae2a2..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/base.py +++ /dev/null @@ -1,355 +0,0 @@ -from abc import ABCMeta, abstractmethod -from collections import OrderedDict - -import mmcv -import numpy as np -import torch -import torch.distributed as dist -import torch.nn as nn -from mmcv.runner import auto_fp16 -from mmcv.utils import print_log - -from mmdet.core.visualization import imshow_det_bboxes -from mmdet.utils import get_root_logger - - -class BaseDetector(nn.Module, metaclass=ABCMeta): - """Base class for detectors.""" - - def __init__(self): - super(BaseDetector, self).__init__() - self.fp16_enabled = False - - @property - def with_neck(self): - """bool: whether the detector has a neck""" - return hasattr(self, 'neck') and self.neck is not None - - # TODO: these properties need to be carefully handled - # for both single stage & two stage detectors - @property - def with_shared_head(self): - """bool: whether the detector has a shared head in the RoI Head""" - return hasattr(self, 'roi_head') and self.roi_head.with_shared_head - - @property - def with_bbox(self): - """bool: whether the detector has a bbox head""" - return ((hasattr(self, 'roi_head') and self.roi_head.with_bbox) - or (hasattr(self, 'bbox_head') and self.bbox_head is not None)) - - @property - def with_mask(self): - """bool: whether the detector has a mask head""" - return ((hasattr(self, 'roi_head') and self.roi_head.with_mask) - or (hasattr(self, 'mask_head') and self.mask_head is not None)) - - @abstractmethod - def extract_feat(self, imgs): - """Extract features from images.""" - pass - - def extract_feats(self, imgs): - """Extract features from multiple images. - - Args: - imgs (list[torch.Tensor]): A list of images. The images are - augmented from the same image but in different ways. - - Returns: - list[torch.Tensor]: Features of different images - """ - assert isinstance(imgs, list) - return [self.extract_feat(img) for img in imgs] - - def forward_train(self, imgs, img_metas, **kwargs): - """ - Args: - img (list[Tensor]): List of tensors of shape (1, C, H, W). - Typically these should be mean centered and std scaled. - img_metas (list[dict]): List of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys, see - :class:`mmdet.datasets.pipelines.Collect`. - kwargs (keyword arguments): Specific to concrete implementation. - """ - # NOTE the batched image size information may be useful, e.g. - # in DETR, this is needed for the construction of masks, which is - # then used for the transformer_head. - batch_input_shape = tuple(imgs[0].size()[-2:]) - for img_meta in img_metas: - img_meta['batch_input_shape'] = batch_input_shape - - async def async_simple_test(self, img, img_metas, **kwargs): - raise NotImplementedError - - @abstractmethod - def simple_test(self, img, img_metas, **kwargs): - pass - - @abstractmethod - def aug_test(self, imgs, img_metas, **kwargs): - """Test function with test time augmentation.""" - pass - - def init_weights(self, pretrained=None): - """Initialize the weights in detector. - - Args: - pretrained (str, optional): Path to pre-trained weights. - Defaults to None. - """ - if pretrained is not None: - logger = get_root_logger() - print_log(f'load model from: {pretrained}', logger=logger) - - async def aforward_test(self, *, img, img_metas, **kwargs): - for var, name in [(img, 'img'), (img_metas, 'img_metas')]: - if not isinstance(var, list): - raise TypeError(f'{name} must be a list, but got {type(var)}') - - num_augs = len(img) - if num_augs != len(img_metas): - raise ValueError(f'num of augmentations ({len(img)}) ' - f'!= num of image metas ({len(img_metas)})') - # TODO: remove the restriction of samples_per_gpu == 1 when prepared - samples_per_gpu = img[0].size(0) - assert samples_per_gpu == 1 - - if num_augs == 1: - return await self.async_simple_test(img[0], img_metas[0], **kwargs) - else: - raise NotImplementedError - - def forward_test(self, imgs, img_metas, **kwargs): - """ - Args: - imgs (List[Tensor]): the outer list indicates test-time - augmentations and inner Tensor should have a shape NxCxHxW, - which contains all images in the batch. - img_metas (List[List[dict]]): the outer list indicates test-time - augs (multiscale, flip, etc.) and the inner list indicates - images in a batch. - """ - for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]: - if not isinstance(var, list): - raise TypeError(f'{name} must be a list, but got {type(var)}') - - num_augs = len(imgs) - if num_augs != len(img_metas): - raise ValueError(f'num of augmentations ({len(imgs)}) ' - f'!= num of image meta ({len(img_metas)})') - - # NOTE the batched image size information may be useful, e.g. - # in DETR, this is needed for the construction of masks, which is - # then used for the transformer_head. - for img, img_meta in zip(imgs, img_metas): - batch_size = len(img_meta) - for img_id in range(batch_size): - img_meta[img_id]['batch_input_shape'] = tuple(img.size()[-2:]) - - if num_augs == 1: - # proposals (List[List[Tensor]]): the outer list indicates - # test-time augs (multiscale, flip, etc.) and the inner list - # indicates images in a batch. - # The Tensor should have a shape Px4, where P is the number of - # proposals. - if 'proposals' in kwargs: - kwargs['proposals'] = kwargs['proposals'][0] - return self.simple_test(imgs[0], img_metas[0], **kwargs) - else: - assert imgs[0].size(0) == 1, 'aug test does not support ' \ - 'inference with batch size ' \ - f'{imgs[0].size(0)}' - # TODO: support test augmentation for predefined proposals - assert 'proposals' not in kwargs - return self.aug_test(imgs, img_metas, **kwargs) - - @auto_fp16(apply_to=('img', )) - def forward(self, img, img_metas, return_loss=True, **kwargs): - """Calls either :func:`forward_train` or :func:`forward_test` depending - on whether ``return_loss`` is ``True``. - - Note this setting will change the expected inputs. When - ``return_loss=True``, img and img_meta are single-nested (i.e. Tensor - and List[dict]), and when ``resturn_loss=False``, img and img_meta - should be double nested (i.e. List[Tensor], List[List[dict]]), with - the outer list indicating test time augmentations. - """ - if return_loss: - return self.forward_train(img, img_metas, **kwargs) - else: - return self.forward_test(img, img_metas, **kwargs) - - def _parse_losses(self, losses): - """Parse the raw outputs (losses) of the network. - - Args: - losses (dict): Raw output of the network, which usually contain - losses and other necessary infomation. - - Returns: - tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor \ - which may be a weighted sum of all losses, log_vars contains \ - all the variables to be sent to the logger. - """ - log_vars = OrderedDict() - for loss_name, loss_value in losses.items(): - if isinstance(loss_value, torch.Tensor): - log_vars[loss_name] = loss_value.mean() - elif isinstance(loss_value, list): - log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value) - else: - raise TypeError( - f'{loss_name} is not a tensor or list of tensors') - - loss = sum(_value for _key, _value in log_vars.items() - if 'loss' in _key) - - log_vars['loss'] = loss - for loss_name, loss_value in log_vars.items(): - # reduce loss when distributed training - if dist.is_available() and dist.is_initialized(): - loss_value = loss_value.data.clone() - dist.all_reduce(loss_value.div_(dist.get_world_size())) - log_vars[loss_name] = loss_value.item() - - return loss, log_vars - - def train_step(self, data, optimizer): - """The iteration step during training. - - This method defines an iteration step during training, except for the - back propagation and optimizer updating, which are done in an optimizer - hook. Note that in some complicated cases or models, the whole process - including back propagation and optimizer updating is also defined in - this method, such as GAN. - - Args: - data (dict): The output of dataloader. - optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of - runner is passed to ``train_step()``. This argument is unused - and reserved. - - Returns: - dict: It should contain at least 3 keys: ``loss``, ``log_vars``, \ - ``num_samples``. - - - ``loss`` is a tensor for back propagation, which can be a \ - weighted sum of multiple losses. - - ``log_vars`` contains all the variables to be sent to the - logger. - - ``num_samples`` indicates the batch size (when the model is \ - DDP, it means the batch size on each GPU), which is used for \ - averaging the logs. - """ - losses = self(**data) - loss, log_vars = self._parse_losses(losses) - - outputs = dict( - loss=loss, log_vars=log_vars, num_samples=len(data['img_metas'])) - - return outputs - - def val_step(self, data, optimizer): - """The iteration step during validation. - - This method shares the same signature as :func:`train_step`, but used - during val epochs. Note that the evaluation after training epochs is - not implemented with this method, but an evaluation hook. - """ - losses = self(**data) - loss, log_vars = self._parse_losses(losses) - - outputs = dict( - loss=loss, log_vars=log_vars, num_samples=len(data['img_metas'])) - - return outputs - - def show_result(self, - img, - result, - score_thr=0.3, - bbox_color=(72, 101, 241), - text_color=(72, 101, 241), - mask_color=None, - thickness=2, - font_size=13, - win_name='', - show=False, - wait_time=0, - out_file=None): - """Draw `result` over `img`. - - Args: - img (str or Tensor): The image to be displayed. - result (Tensor or tuple): The results to draw over `img` - bbox_result or (bbox_result, segm_result). - score_thr (float, optional): Minimum score of bboxes to be shown. - Default: 0.3. - bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines. - The tuple of color should be in BGR order. Default: 'green' - text_color (str or tuple(int) or :obj:`Color`):Color of texts. - The tuple of color should be in BGR order. Default: 'green' - mask_color (None or str or tuple(int) or :obj:`Color`): - Color of masks. The tuple of color should be in BGR order. - Default: None - thickness (int): Thickness of lines. Default: 2 - font_size (int): Font size of texts. Default: 13 - win_name (str): The window name. Default: '' - wait_time (float): Value of waitKey param. - Default: 0. - show (bool): Whether to show the image. - Default: False. - out_file (str or None): The filename to write the image. - Default: None. - - Returns: - img (Tensor): Only if not `show` or `out_file` - """ - img = mmcv.imread(img) - img = img.copy() - if isinstance(result, tuple): - bbox_result, segm_result = result - if isinstance(segm_result, tuple): - segm_result = segm_result[0] # ms rcnn - else: - bbox_result, segm_result = result, None - bboxes = np.vstack(bbox_result) - labels = [ - np.full(bbox.shape[0], i, dtype=np.int32) - for i, bbox in enumerate(bbox_result) - ] - labels = np.concatenate(labels) - # draw segmentation masks - segms = None - if segm_result is not None and len(labels) > 0: # non empty - segms = mmcv.concat_list(segm_result) - if isinstance(segms[0], torch.Tensor): - segms = torch.stack(segms, dim=0).detach().cpu().numpy() - else: - segms = np.stack(segms, axis=0) - # if out_file specified, do not show image in window - if out_file is not None: - show = False - # draw bounding boxes - img = imshow_det_bboxes( - img, - bboxes, - labels, - segms, - class_names=self.CLASSES, - score_thr=score_thr, - bbox_color=bbox_color, - text_color=text_color, - mask_color=mask_color, - thickness=thickness, - font_size=font_size, - win_name=win_name, - show=show, - wait_time=wait_time, - out_file=out_file) - - # if not (show or out_file): - return img diff --git a/spaces/Andy1621/uniformer_image_detection/tools/deployment/onnx2tensorrt.py b/spaces/Andy1621/uniformer_image_detection/tools/deployment/onnx2tensorrt.py deleted file mode 100644 index 4b926bf4cf16e6ac15591e2da36d6d9a7070de51..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/tools/deployment/onnx2tensorrt.py +++ /dev/null @@ -1,179 +0,0 @@ -import argparse -import os -import os.path as osp - -import numpy as np -import onnx -import onnxruntime as ort -import torch -from mmcv.ops import get_onnxruntime_op_path -from mmcv.tensorrt import (TRTWraper, is_tensorrt_plugin_loaded, onnx2trt, - save_trt_engine) -from mmcv.visualization.image import imshow_det_bboxes - -from mmdet.core import get_classes, preprocess_example_input - - -def get_GiB(x: int): - """return x GiB.""" - return x * (1 << 30) - - -def onnx2tensorrt(onnx_file, - trt_file, - input_config, - verify=False, - show=False, - dataset='coco', - workspace_size=1): - onnx_model = onnx.load(onnx_file) - input_shape = input_config['input_shape'] - # create trt engine and wraper - opt_shape_dict = {'input': [input_shape, input_shape, input_shape]} - max_workspace_size = get_GiB(workspace_size) - trt_engine = onnx2trt( - onnx_model, - opt_shape_dict, - fp16_mode=False, - max_workspace_size=max_workspace_size) - save_dir, _ = osp.split(trt_file) - if save_dir: - os.makedirs(save_dir, exist_ok=True) - save_trt_engine(trt_engine, trt_file) - print(f'Successfully created TensorRT engine: {trt_file}') - - if verify: - one_img, one_meta = preprocess_example_input(input_config) - input_img_cpu = one_img.detach().cpu().numpy() - input_img_cuda = one_img.cuda() - - img = one_meta['show_img'] - - # Get results from TensorRT - trt_model = TRTWraper(trt_file, ['input'], ['boxes', 'labels']) - with torch.no_grad(): - trt_outputs = trt_model({'input': input_img_cuda}) - trt_boxes = trt_outputs['boxes'].detach().cpu().numpy() - trt_labels = trt_outputs['labels'].detach().cpu().numpy() - - # Get results from ONNXRuntime - ort_custom_op_path = get_onnxruntime_op_path() - session_options = ort.SessionOptions() - if osp.exists(ort_custom_op_path): - session_options.register_custom_ops_library(ort_custom_op_path) - sess = ort.InferenceSession(onnx_file, session_options) - onnx_outputs = sess.run(None, { - 'input': input_img_cpu, - }) - ort_boxes, ort_labels = onnx_outputs - - # Show detection outputs - if show: - CLASSES = get_classes(dataset) - score_thr = 0.35 - imshow_det_bboxes( - img.copy(), - trt_boxes, - trt_labels, - CLASSES, - score_thr=score_thr, - win_name='TensorRT') - imshow_det_bboxes( - img.copy(), - ort_boxes, - ort_labels, - CLASSES, - score_thr=score_thr, - win_name='ONNXRuntime') - # Compare results - np.testing.assert_allclose( - ort_boxes, trt_boxes, rtol=1e-03, atol=1e-05) - np.testing.assert_allclose(ort_labels, trt_labels) - print('The numerical values are the same ' + - 'between ONNXRuntime and TensorRT') - - -def parse_args(): - parser = argparse.ArgumentParser( - description='Convert MMDetection models from ONNX to TensorRT') - parser.add_argument('model', help='Filename of input ONNX model') - parser.add_argument( - '--trt-file', - type=str, - default='tmp.trt', - help='Filename of output TensorRT engine') - parser.add_argument( - '--input-img', type=str, default='', help='Image for test') - parser.add_argument( - '--show', action='store_true', help='Whether to show output results') - parser.add_argument( - '--dataset', type=str, default='coco', help='Dataset name') - parser.add_argument( - '--verify', - action='store_true', - help='Verify the outputs of ONNXRuntime and TensorRT') - parser.add_argument( - '--to-rgb', - action='store_false', - help='Feed model with RGB or BGR image. Default is RGB.') - parser.add_argument( - '--shape', - type=int, - nargs='+', - default=[400, 600], - help='Input size of the model') - parser.add_argument( - '--mean', - type=float, - nargs='+', - default=[123.675, 116.28, 103.53], - help='Mean value used for preprocess input data') - parser.add_argument( - '--std', - type=float, - nargs='+', - default=[58.395, 57.12, 57.375], - help='Variance value used for preprocess input data') - parser.add_argument( - '--workspace-size', - type=int, - default=1, - help='Max workspace size in GiB') - args = parser.parse_args() - return args - - -if __name__ == '__main__': - - assert is_tensorrt_plugin_loaded(), 'TensorRT plugin should be compiled.' - args = parse_args() - - if not args.input_img: - args.input_img = osp.join(osp.dirname(__file__), '../demo/demo.jpg') - - if len(args.shape) == 1: - input_shape = (1, 3, args.shape[0], args.shape[0]) - elif len(args.shape) == 2: - input_shape = (1, 3) + tuple(args.shape) - else: - raise ValueError('invalid input shape') - - assert len(args.mean) == 3 - assert len(args.std) == 3 - - normalize_cfg = {'mean': args.mean, 'std': args.std, 'to_rgb': args.to_rgb} - input_config = { - 'input_shape': input_shape, - 'input_path': args.input_img, - 'normalize_cfg': normalize_cfg - } - - # Create TensorRT engine - onnx2tensorrt( - args.model, - args.trt_file, - input_config, - verify=args.verify, - show=args.show, - dataset=args.dataset, - workspace_size=args.workspace_size) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/_base_/datasets/cityscapes_769x769.py b/spaces/Andy1621/uniformer_image_segmentation/configs/_base_/datasets/cityscapes_769x769.py deleted file mode 100644 index 336c7b254fe392b4703039fec86a83acdbd2e1a5..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/_base_/datasets/cityscapes_769x769.py +++ /dev/null @@ -1,35 +0,0 @@ -_base_ = './cityscapes.py' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -crop_size = (769, 769) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations'), - dict(type='Resize', img_scale=(2049, 1025), ratio_range=(0.5, 2.0)), - dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), - dict(type='RandomFlip', prob=0.5), - dict(type='PhotoMetricDistortion'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2049, 1025), - # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r18b-d8_512x1024_80k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r18b-d8_512x1024_80k_cityscapes.py deleted file mode 100644 index b90b292b03a80aa37b8ca236746cf7cddc4ac27e..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r18b-d8_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,11 +0,0 @@ -_base_ = './deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py' -model = dict( - pretrained='torchvision://resnet18', - backbone=dict(type='ResNet', depth=18), - decode_head=dict( - c1_in_channels=64, - c1_channels=12, - in_channels=512, - channels=128, - ), - auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/send_pictures/script.py b/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/send_pictures/script.py deleted file mode 100644 index f8e6c969271e1dc3d383cefce74774ad505882cd..0000000000000000000000000000000000000000 --- a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/send_pictures/script.py +++ /dev/null @@ -1,58 +0,0 @@ -import base64 -from io import BytesIO - -import gradio as gr -import torch -from transformers import BlipForConditionalGeneration, BlipProcessor - -from modules import chat, shared, ui_chat -from modules.ui import gather_interface_values -from modules.utils import gradio - -input_hijack = { - 'state': False, - 'value': ["", ""] -} - -processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base") -model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base", torch_dtype=torch.float32).to("cpu") - - -def chat_input_modifier(text, visible_text, state): - global input_hijack - if input_hijack['state']: - input_hijack['state'] = False - return input_hijack['value'] - else: - return text, visible_text - - -def caption_image(raw_image): - inputs = processor(raw_image.convert('RGB'), return_tensors="pt").to("cpu", torch.float32) - out = model.generate(**inputs, max_new_tokens=100) - return processor.decode(out[0], skip_special_tokens=True) - - -def generate_chat_picture(picture, name1, name2): - text = f'*{name1} sends {name2} a picture that contains the following: “{caption_image(picture)}”*' - # lower the resolution of sent images for the chat, otherwise the log size gets out of control quickly with all the base64 values in visible history - picture.thumbnail((300, 300)) - buffer = BytesIO() - picture.save(buffer, format="JPEG") - img_str = base64.b64encode(buffer.getvalue()).decode('utf-8') - visible_text = f'{text}' - return text, visible_text - - -def ui(): - picture_select = gr.Image(label='Send a picture', type='pil') - - # Prepare the input hijack, update the interface values, call the generation function, and clear the picture - picture_select.upload( - lambda picture, name1, name2: input_hijack.update({ - "state": True, - "value": generate_chat_picture(picture, name1, name2) - }), [picture_select, shared.gradio['name1'], shared.gradio['name2']], None).then( - gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then( - chat.generate_chat_reply_wrapper, gradio(ui_chat.inputs), gradio('display', 'history'), show_progress=False).then( - lambda: None, None, picture_select, show_progress=False) diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/img.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/img.py deleted file mode 100644 index 0f36a32ba3399efc216b9974254cd1f7eed07a9f..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/img.py +++ /dev/null @@ -1,645 +0,0 @@ -""" - pygments.formatters.img - ~~~~~~~~~~~~~~~~~~~~~~~ - - Formatter for Pixmap output. - - :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import os -import sys - -from pip._vendor.pygments.formatter import Formatter -from pip._vendor.pygments.util import get_bool_opt, get_int_opt, get_list_opt, \ - get_choice_opt - -import subprocess - -# Import this carefully -try: - from PIL import Image, ImageDraw, ImageFont - pil_available = True -except ImportError: - pil_available = False - -try: - import _winreg -except ImportError: - try: - import winreg as _winreg - except ImportError: - _winreg = None - -__all__ = ['ImageFormatter', 'GifImageFormatter', 'JpgImageFormatter', - 'BmpImageFormatter'] - - -# For some unknown reason every font calls it something different -STYLES = { - 'NORMAL': ['', 'Roman', 'Book', 'Normal', 'Regular', 'Medium'], - 'ITALIC': ['Oblique', 'Italic'], - 'BOLD': ['Bold'], - 'BOLDITALIC': ['Bold Oblique', 'Bold Italic'], -} - -# A sane default for modern systems -DEFAULT_FONT_NAME_NIX = 'DejaVu Sans Mono' -DEFAULT_FONT_NAME_WIN = 'Courier New' -DEFAULT_FONT_NAME_MAC = 'Menlo' - - -class PilNotAvailable(ImportError): - """When Python imaging library is not available""" - - -class FontNotFound(Exception): - """When there are no usable fonts specified""" - - -class FontManager: - """ - Manages a set of fonts: normal, italic, bold, etc... - """ - - def __init__(self, font_name, font_size=14): - self.font_name = font_name - self.font_size = font_size - self.fonts = {} - self.encoding = None - if sys.platform.startswith('win'): - if not font_name: - self.font_name = DEFAULT_FONT_NAME_WIN - self._create_win() - elif sys.platform.startswith('darwin'): - if not font_name: - self.font_name = DEFAULT_FONT_NAME_MAC - self._create_mac() - else: - if not font_name: - self.font_name = DEFAULT_FONT_NAME_NIX - self._create_nix() - - def _get_nix_font_path(self, name, style): - proc = subprocess.Popen(['fc-list', "%s:style=%s" % (name, style), 'file'], - stdout=subprocess.PIPE, stderr=None) - stdout, _ = proc.communicate() - if proc.returncode == 0: - lines = stdout.splitlines() - for line in lines: - if line.startswith(b'Fontconfig warning:'): - continue - path = line.decode().strip().strip(':') - if path: - return path - return None - - def _create_nix(self): - for name in STYLES['NORMAL']: - path = self._get_nix_font_path(self.font_name, name) - if path is not None: - self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size) - break - else: - raise FontNotFound('No usable fonts named: "%s"' % - self.font_name) - for style in ('ITALIC', 'BOLD', 'BOLDITALIC'): - for stylename in STYLES[style]: - path = self._get_nix_font_path(self.font_name, stylename) - if path is not None: - self.fonts[style] = ImageFont.truetype(path, self.font_size) - break - else: - if style == 'BOLDITALIC': - self.fonts[style] = self.fonts['BOLD'] - else: - self.fonts[style] = self.fonts['NORMAL'] - - def _get_mac_font_path(self, font_map, name, style): - return font_map.get((name + ' ' + style).strip().lower()) - - def _create_mac(self): - font_map = {} - for font_dir in (os.path.join(os.getenv("HOME"), 'Library/Fonts/'), - '/Library/Fonts/', '/System/Library/Fonts/'): - font_map.update( - (os.path.splitext(f)[0].lower(), os.path.join(font_dir, f)) - for f in os.listdir(font_dir) - if f.lower().endswith(('ttf', 'ttc'))) - - for name in STYLES['NORMAL']: - path = self._get_mac_font_path(font_map, self.font_name, name) - if path is not None: - self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size) - break - else: - raise FontNotFound('No usable fonts named: "%s"' % - self.font_name) - for style in ('ITALIC', 'BOLD', 'BOLDITALIC'): - for stylename in STYLES[style]: - path = self._get_mac_font_path(font_map, self.font_name, stylename) - if path is not None: - self.fonts[style] = ImageFont.truetype(path, self.font_size) - break - else: - if style == 'BOLDITALIC': - self.fonts[style] = self.fonts['BOLD'] - else: - self.fonts[style] = self.fonts['NORMAL'] - - def _lookup_win(self, key, basename, styles, fail=False): - for suffix in ('', ' (TrueType)'): - for style in styles: - try: - valname = '%s%s%s' % (basename, style and ' '+style, suffix) - val, _ = _winreg.QueryValueEx(key, valname) - return val - except OSError: - continue - else: - if fail: - raise FontNotFound('Font %s (%s) not found in registry' % - (basename, styles[0])) - return None - - def _create_win(self): - lookuperror = None - keynames = [ (_winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows NT\CurrentVersion\Fonts'), - (_winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows\CurrentVersion\Fonts'), - (_winreg.HKEY_LOCAL_MACHINE, r'Software\Microsoft\Windows NT\CurrentVersion\Fonts'), - (_winreg.HKEY_LOCAL_MACHINE, r'Software\Microsoft\Windows\CurrentVersion\Fonts') ] - for keyname in keynames: - try: - key = _winreg.OpenKey(*keyname) - try: - path = self._lookup_win(key, self.font_name, STYLES['NORMAL'], True) - self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size) - for style in ('ITALIC', 'BOLD', 'BOLDITALIC'): - path = self._lookup_win(key, self.font_name, STYLES[style]) - if path: - self.fonts[style] = ImageFont.truetype(path, self.font_size) - else: - if style == 'BOLDITALIC': - self.fonts[style] = self.fonts['BOLD'] - else: - self.fonts[style] = self.fonts['NORMAL'] - return - except FontNotFound as err: - lookuperror = err - finally: - _winreg.CloseKey(key) - except OSError: - pass - else: - # If we get here, we checked all registry keys and had no luck - # We can be in one of two situations now: - # * All key lookups failed. In this case lookuperror is None and we - # will raise a generic error - # * At least one lookup failed with a FontNotFound error. In this - # case, we will raise that as a more specific error - if lookuperror: - raise lookuperror - raise FontNotFound('Can\'t open Windows font registry key') - - def get_char_size(self): - """ - Get the character size. - """ - return self.get_text_size('M') - - def get_text_size(self, text): - """ - Get the text size (width, height). - """ - font = self.fonts['NORMAL'] - if hasattr(font, 'getbbox'): # Pillow >= 9.2.0 - return font.getbbox(text)[2:4] - else: - return font.getsize(text) - - def get_font(self, bold, oblique): - """ - Get the font based on bold and italic flags. - """ - if bold and oblique: - return self.fonts['BOLDITALIC'] - elif bold: - return self.fonts['BOLD'] - elif oblique: - return self.fonts['ITALIC'] - else: - return self.fonts['NORMAL'] - - -class ImageFormatter(Formatter): - """ - Create a PNG image from source code. This uses the Python Imaging Library to - generate a pixmap from the source code. - - .. versionadded:: 0.10 - - Additional options accepted: - - `image_format` - An image format to output to that is recognised by PIL, these include: - - * "PNG" (default) - * "JPEG" - * "BMP" - * "GIF" - - `line_pad` - The extra spacing (in pixels) between each line of text. - - Default: 2 - - `font_name` - The font name to be used as the base font from which others, such as - bold and italic fonts will be generated. This really should be a - monospace font to look sane. - - Default: "Courier New" on Windows, "Menlo" on Mac OS, and - "DejaVu Sans Mono" on \\*nix - - `font_size` - The font size in points to be used. - - Default: 14 - - `image_pad` - The padding, in pixels to be used at each edge of the resulting image. - - Default: 10 - - `line_numbers` - Whether line numbers should be shown: True/False - - Default: True - - `line_number_start` - The line number of the first line. - - Default: 1 - - `line_number_step` - The step used when printing line numbers. - - Default: 1 - - `line_number_bg` - The background colour (in "#123456" format) of the line number bar, or - None to use the style background color. - - Default: "#eed" - - `line_number_fg` - The text color of the line numbers (in "#123456"-like format). - - Default: "#886" - - `line_number_chars` - The number of columns of line numbers allowable in the line number - margin. - - Default: 2 - - `line_number_bold` - Whether line numbers will be bold: True/False - - Default: False - - `line_number_italic` - Whether line numbers will be italicized: True/False - - Default: False - - `line_number_separator` - Whether a line will be drawn between the line number area and the - source code area: True/False - - Default: True - - `line_number_pad` - The horizontal padding (in pixels) between the line number margin, and - the source code area. - - Default: 6 - - `hl_lines` - Specify a list of lines to be highlighted. - - .. versionadded:: 1.2 - - Default: empty list - - `hl_color` - Specify the color for highlighting lines. - - .. versionadded:: 1.2 - - Default: highlight color of the selected style - """ - - # Required by the pygments mapper - name = 'img' - aliases = ['img', 'IMG', 'png'] - filenames = ['*.png'] - - unicodeoutput = False - - default_image_format = 'png' - - def __init__(self, **options): - """ - See the class docstring for explanation of options. - """ - if not pil_available: - raise PilNotAvailable( - 'Python Imaging Library is required for this formatter') - Formatter.__init__(self, **options) - self.encoding = 'latin1' # let pygments.format() do the right thing - # Read the style - self.styles = dict(self.style) - if self.style.background_color is None: - self.background_color = '#fff' - else: - self.background_color = self.style.background_color - # Image options - self.image_format = get_choice_opt( - options, 'image_format', ['png', 'jpeg', 'gif', 'bmp'], - self.default_image_format, normcase=True) - self.image_pad = get_int_opt(options, 'image_pad', 10) - self.line_pad = get_int_opt(options, 'line_pad', 2) - # The fonts - fontsize = get_int_opt(options, 'font_size', 14) - self.fonts = FontManager(options.get('font_name', ''), fontsize) - self.fontw, self.fonth = self.fonts.get_char_size() - # Line number options - self.line_number_fg = options.get('line_number_fg', '#886') - self.line_number_bg = options.get('line_number_bg', '#eed') - self.line_number_chars = get_int_opt(options, - 'line_number_chars', 2) - self.line_number_bold = get_bool_opt(options, - 'line_number_bold', False) - self.line_number_italic = get_bool_opt(options, - 'line_number_italic', False) - self.line_number_pad = get_int_opt(options, 'line_number_pad', 6) - self.line_numbers = get_bool_opt(options, 'line_numbers', True) - self.line_number_separator = get_bool_opt(options, - 'line_number_separator', True) - self.line_number_step = get_int_opt(options, 'line_number_step', 1) - self.line_number_start = get_int_opt(options, 'line_number_start', 1) - if self.line_numbers: - self.line_number_width = (self.fontw * self.line_number_chars + - self.line_number_pad * 2) - else: - self.line_number_width = 0 - self.hl_lines = [] - hl_lines_str = get_list_opt(options, 'hl_lines', []) - for line in hl_lines_str: - try: - self.hl_lines.append(int(line)) - except ValueError: - pass - self.hl_color = options.get('hl_color', - self.style.highlight_color) or '#f90' - self.drawables = [] - - def get_style_defs(self, arg=''): - raise NotImplementedError('The -S option is meaningless for the image ' - 'formatter. Use -O style= instead.') - - def _get_line_height(self): - """ - Get the height of a line. - """ - return self.fonth + self.line_pad - - def _get_line_y(self, lineno): - """ - Get the Y coordinate of a line number. - """ - return lineno * self._get_line_height() + self.image_pad - - def _get_char_width(self): - """ - Get the width of a character. - """ - return self.fontw - - def _get_char_x(self, linelength): - """ - Get the X coordinate of a character position. - """ - return linelength + self.image_pad + self.line_number_width - - def _get_text_pos(self, linelength, lineno): - """ - Get the actual position for a character and line position. - """ - return self._get_char_x(linelength), self._get_line_y(lineno) - - def _get_linenumber_pos(self, lineno): - """ - Get the actual position for the start of a line number. - """ - return (self.image_pad, self._get_line_y(lineno)) - - def _get_text_color(self, style): - """ - Get the correct color for the token from the style. - """ - if style['color'] is not None: - fill = '#' + style['color'] - else: - fill = '#000' - return fill - - def _get_text_bg_color(self, style): - """ - Get the correct background color for the token from the style. - """ - if style['bgcolor'] is not None: - bg_color = '#' + style['bgcolor'] - else: - bg_color = None - return bg_color - - def _get_style_font(self, style): - """ - Get the correct font for the style. - """ - return self.fonts.get_font(style['bold'], style['italic']) - - def _get_image_size(self, maxlinelength, maxlineno): - """ - Get the required image size. - """ - return (self._get_char_x(maxlinelength) + self.image_pad, - self._get_line_y(maxlineno + 0) + self.image_pad) - - def _draw_linenumber(self, posno, lineno): - """ - Remember a line number drawable to paint later. - """ - self._draw_text( - self._get_linenumber_pos(posno), - str(lineno).rjust(self.line_number_chars), - font=self.fonts.get_font(self.line_number_bold, - self.line_number_italic), - text_fg=self.line_number_fg, - text_bg=None, - ) - - def _draw_text(self, pos, text, font, text_fg, text_bg): - """ - Remember a single drawable tuple to paint later. - """ - self.drawables.append((pos, text, font, text_fg, text_bg)) - - def _create_drawables(self, tokensource): - """ - Create drawables for the token content. - """ - lineno = charno = maxcharno = 0 - maxlinelength = linelength = 0 - for ttype, value in tokensource: - while ttype not in self.styles: - ttype = ttype.parent - style = self.styles[ttype] - # TODO: make sure tab expansion happens earlier in the chain. It - # really ought to be done on the input, as to do it right here is - # quite complex. - value = value.expandtabs(4) - lines = value.splitlines(True) - # print lines - for i, line in enumerate(lines): - temp = line.rstrip('\n') - if temp: - self._draw_text( - self._get_text_pos(linelength, lineno), - temp, - font = self._get_style_font(style), - text_fg = self._get_text_color(style), - text_bg = self._get_text_bg_color(style), - ) - temp_width, _ = self.fonts.get_text_size(temp) - linelength += temp_width - maxlinelength = max(maxlinelength, linelength) - charno += len(temp) - maxcharno = max(maxcharno, charno) - if line.endswith('\n'): - # add a line for each extra line in the value - linelength = 0 - charno = 0 - lineno += 1 - self.maxlinelength = maxlinelength - self.maxcharno = maxcharno - self.maxlineno = lineno - - def _draw_line_numbers(self): - """ - Create drawables for the line numbers. - """ - if not self.line_numbers: - return - for p in range(self.maxlineno): - n = p + self.line_number_start - if (n % self.line_number_step) == 0: - self._draw_linenumber(p, n) - - def _paint_line_number_bg(self, im): - """ - Paint the line number background on the image. - """ - if not self.line_numbers: - return - if self.line_number_fg is None: - return - draw = ImageDraw.Draw(im) - recth = im.size[-1] - rectw = self.image_pad + self.line_number_width - self.line_number_pad - draw.rectangle([(0, 0), (rectw, recth)], - fill=self.line_number_bg) - if self.line_number_separator: - draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg) - del draw - - def format(self, tokensource, outfile): - """ - Format ``tokensource``, an iterable of ``(tokentype, tokenstring)`` - tuples and write it into ``outfile``. - - This implementation calculates where it should draw each token on the - pixmap, then calculates the required pixmap size and draws the items. - """ - self._create_drawables(tokensource) - self._draw_line_numbers() - im = Image.new( - 'RGB', - self._get_image_size(self.maxlinelength, self.maxlineno), - self.background_color - ) - self._paint_line_number_bg(im) - draw = ImageDraw.Draw(im) - # Highlight - if self.hl_lines: - x = self.image_pad + self.line_number_width - self.line_number_pad + 1 - recth = self._get_line_height() - rectw = im.size[0] - x - for linenumber in self.hl_lines: - y = self._get_line_y(linenumber - 1) - draw.rectangle([(x, y), (x + rectw, y + recth)], - fill=self.hl_color) - for pos, value, font, text_fg, text_bg in self.drawables: - if text_bg: - text_size = draw.textsize(text=value, font=font) - draw.rectangle([pos[0], pos[1], pos[0] + text_size[0], pos[1] + text_size[1]], fill=text_bg) - draw.text(pos, value, font=font, fill=text_fg) - im.save(outfile, self.image_format.upper()) - - -# Add one formatter per format, so that the "-f gif" option gives the correct result -# when used in pygmentize. - -class GifImageFormatter(ImageFormatter): - """ - Create a GIF image from source code. This uses the Python Imaging Library to - generate a pixmap from the source code. - - .. versionadded:: 1.0 - """ - - name = 'img_gif' - aliases = ['gif'] - filenames = ['*.gif'] - default_image_format = 'gif' - - -class JpgImageFormatter(ImageFormatter): - """ - Create a JPEG image from source code. This uses the Python Imaging Library to - generate a pixmap from the source code. - - .. versionadded:: 1.0 - """ - - name = 'img_jpg' - aliases = ['jpg', 'jpeg'] - filenames = ['*.jpg'] - default_image_format = 'jpeg' - - -class BmpImageFormatter(ImageFormatter): - """ - Create a bitmap image from source code. This uses the Python Imaging Library to - generate a pixmap from the source code. - - .. versionadded:: 1.0 - """ - - name = 'img_bmp' - aliases = ['bmp', 'bitmap'] - filenames = ['*.bmp'] - default_image_format = 'bmp' diff --git a/spaces/AutoGeneralAI/voice-assistant/README_zh.md b/spaces/AutoGeneralAI/voice-assistant/README_zh.md deleted file mode 100644 index bc97095f61c94167d7a6454373065164a9009028..0000000000000000000000000000000000000000 --- a/spaces/AutoGeneralAI/voice-assistant/README_zh.md +++ /dev/null @@ -1,14 +0,0 @@ -# voice-assistant - -实现的功能:语音助手。通过调用OpenAI官方API实现。 - -在线demo: https://huggingface.co/spaces/AutoGeneralAI/voice-assistant - -## 使用方法 -将自己的OpenAI API KEY https://platform.openai.com/ 放入key输入框,然后就可以愉快的语音对话了。 - -> 初次使用,浏览器,比如Chrome会询问是否允许打开麦克风,选择允许。 - -> 可以保存对话记录。目前只实现了对话。人类的角色可以语音,AI角色还是以文字输出。文字朗读出来的功能留待下次实现或者欢迎提pr。 - -![image](https://user-images.githubusercontent.com/105260427/234028574-eeb218f9-363a-4fee-88bf-8170b677e2e1.png) diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/.github/ISSUE_TEMPLATE.md b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/.github/ISSUE_TEMPLATE.md deleted file mode 100644 index 5e8aaa2d3722e7e73a3d94b2b7dfc4f751d7a240..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/.github/ISSUE_TEMPLATE.md +++ /dev/null @@ -1,5 +0,0 @@ - -Please select an issue template from -https://github.com/facebookresearch/detectron2/issues/new/choose . - -Otherwise your issue will be closed. diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/roi_heads/fed_loss.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/roi_heads/fed_loss.py deleted file mode 100644 index 290f0f07204e78ef2c4ff918aa500b04330279e6..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/roi_heads/fed_loss.py +++ /dev/null @@ -1,31 +0,0 @@ -import torch -import json -import numpy as np -from torch.nn import functional as F - -def load_class_freq( - path='datasets/lvis/lvis_v1_train_cat_info.json', - freq_weight=0.5): - cat_info = json.load(open(path, 'r')) - cat_info = torch.tensor( - [c['image_count'] for c in sorted(cat_info, key=lambda x: x['id'])]) - freq_weight = cat_info.float() ** freq_weight - return freq_weight - -def get_fed_loss_inds( - gt_classes, num_sample_cats=50, C=1203, \ - weight=None, fed_cls_inds=-1): - appeared = torch.unique(gt_classes) # C' - prob = appeared.new_ones(C + 1).float() - prob[-1] = 0 - if len(appeared) < num_sample_cats: - if weight is not None: - prob[:C] = weight.float().clone() - prob[appeared] = 0 - if fed_cls_inds > 0: - prob[fed_cls_inds:] = 0 - more_appeared = torch.multinomial( - prob, num_sample_cats - len(appeared), - replacement=False) - appeared = torch.cat([appeared, more_appeared]) - return appeared \ No newline at end of file diff --git a/spaces/B2gan/LLM_Can_See/ai_functions.py b/spaces/B2gan/LLM_Can_See/ai_functions.py deleted file mode 100644 index 18013870794a82586799d7deadb395d75a4e4c3c..0000000000000000000000000000000000000000 --- a/spaces/B2gan/LLM_Can_See/ai_functions.py +++ /dev/null @@ -1,45 +0,0 @@ -import openai - -def is_string(input): - return isinstance(input, str) - -def ai_function(Question, Scene, model = "gpt-4"): - system_message = { - "role": "system", - "content": ( - "You are an AI that assists the blind" - ) - } - user_message = { - "role": "user", - "content": ( - "Model, I need your assistance. I am developing a tool that can help blind people " - "navigate their surroundings safely by offering detailed and relevant descriptions. " - "These descriptions will be ranked using the CLIP model. " - "The initial scene description from the blind person's perspective is: " + Scene + - ". " - "And the blind person asked: " + Question + - ". " - "I need you to generate five sets of short, specific and actionable phrases or sentences that address the blind person's question first, " - "and also accurately reflect the Scene. " - "These descriptions must prioritize safety and accessibility, offering information about potential obstacles or hazards. " - "They should serve as a practical guide for the blind, so they should be as detailed and vivid as possible. " - "All descriptions should adhere to the context provided by the initial Scene and the blind person's question. " - "Please provide these five sets of descriptions directly, all in English, " - "and without any redundant information. Thank you for your assistance." - ) - } - - if is_string(model) == False: - return model(user_message["content"]) - - messages = [system_message, user_message] - - response = openai.ChatCompletion.create( - model=model, - messages=messages, - temperature=0.2, - max_tokens=200, - ) - - return response.choices[0].message["content"] \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Descargar Canal De Youtube Apk.md b/spaces/Benson/text-generation/Examples/Descargar Canal De Youtube Apk.md deleted file mode 100644 index 1f8ddfb75f9e5d4f37d1dd1c006a63f39c2c50a2..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Descargar Canal De Youtube Apk.md +++ /dev/null @@ -1,72 +0,0 @@ -
    -

    Cómo descargar canal de YouTube APK para Android

    -

    YouTube es una de las plataformas para compartir videos más populares del mundo, con miles de millones de usuarios viendo y creando contenido cada día. Sin embargo, si desea disfrutar de más características y funciones en YouTube, es posible que desee descargar YouTube Channel APK para su dispositivo Android.

    -

    descargar canal de youtube apk


    Download Ziphttps://bltlly.com/2v6Ktf



    -

    ¿Qué es el canal de YouTube APK?

    -

    Canal de YouTube APK es una versión modificada de la aplicación oficial de YouTube que le permite acceder a más opciones y ajustes en su cuenta de YouTube. Con YouTube Channel APK, puedes:

    -
      -
    • Descargar vídeos y listas de reproducción para ver sin conexión
    • -
    • Reproducir vídeos en segundo plano o en el modo imagen en imagen
    • -
    • Cambiar la velocidad de reproducción y la resolución de los vídeos
    • -
    • Habilitar el modo oscuro y el modo de zoom
    • -
    • Bloquear anuncios y saltar intros
    • -
    • Suscribirse a canales sin iniciar sesión
    • -
    • ¡Y mucho más!
    • -
    -

    Canal de YouTube APK no está disponible en Google Play Store, por lo que necesita descargarlo de una fuente de terceros. Sin embargo, antes de hacer eso, usted necesita saber por qué debe descargar YouTube Channel APK en primer lugar.

    -

    ¿Por qué descargar canal de YouTube APK?

    -

    Hay muchas razones por las que es posible que desee descargar YouTube Channel APK para su dispositivo Android. Estos son algunos de ellos:

    -
      Guarde sus datos móviles y el espacio de almacenamiento mediante la descarga de vídeos y listas de reproducción para su visualización sin conexión. Puede elegir la calidad y el formato de los vídeos que desea descargar, y verlos en cualquier momento, en cualquier lugar, sin búfer o interrupciones. -
    • Disfrute de la multitarea y la comodidad mediante la reproducción de vídeos en el fondo o en el modo de imagen en imagen. Puede escuchar música, podcasts o audiolibros mientras usa otras aplicaciones o navega por la web. También puede cambiar el tamaño y mover la ventana de vídeo alrededor de la pantalla como desee.
    • - -
    • Proteja sus ojos y la batería mediante el modo oscuro y el modo de zoom. El modo oscuro reduce el brillo y el contraste de la pantalla, facilitando la visualización de vídeos en entornos con poca luz. El modo zoom le permite llenar toda la pantalla con el vídeo, eliminando las barras negras y maximizando su inmersión.
    • -
    • Evite los anuncios molestos y saltar las intros bloqueándolos con YouTube Channel APK. Puedes disfrutar de videos ininterrumpidos y sin publicidad sin tener que esperar ni hacer clic en nada. También puedes saltarte las largas y aburridas intros que tienen algunos canales, y llegar directamente al contenido que quieres ver.
    • -
    • Explorar más contenido y canales sin iniciar sesión mediante la suscripción a ellos con YouTube Channel APK. Puedes seguir cualquier canal que te guste sin tener que crear o iniciar sesión en tu cuenta de YouTube. También puede acceder a más vídeos que están restringidos o no disponibles en su región.
    • -
    -

    Como puedes ver, YouTube Channel APK ofrece muchas ventajas sobre la aplicación oficial de YouTube. Sin embargo, antes de descargarlo, necesitas saber cómo hacerlo de forma segura y fácil.

    -

    Cómo descargar YouTube canal APK de forma segura y fácil?

    -

    Descarga de canal de YouTube APK no es difícil, pero hay que tener cuidado acerca de dónde se obtiene. Hay muchos sitios web que afirman ofrecer YouTube Channel APK, pero algunos de ellos pueden contener malware o virus que pueden dañar su dispositivo o robar sus datos. Por lo tanto, es necesario descargar YouTube Channel APK de una fuente confiable y confiable.

    -

    Una de las mejores fuentes para YouTube Channel APK es [APKPure], un sitio web que proporciona archivos APK originales y puros para varias aplicaciones y juegos. APKPure tiene una gran reputación y una gran base de usuarios, por lo que puede estar seguro de que los archivos que descarga desde allí son seguros. Aquí están los pasos sobre cómo descargar YouTube Channel APK de APKPure:

    -

    -
      -
    1. Ir a [APKPure] en su navegador y buscar "canal de YouTube APK" en la barra de búsqueda.
    2. - -
    3. Espere a que la descarga termine y localice el archivo en su dispositivo.
    4. -
    -

    Felicidades! Usted ha descargado con éxito el canal de YouTube APK de APKPure. Sin embargo, antes de poder usarlo, debe instalarlo en su dispositivo.

    -

    Cómo instalar canal de YouTube APK en su dispositivo Android?

    -

    Instalación de canal de YouTube APK en su dispositivo Android no es difícil, pero es necesario hacer algunos cambios en la configuración primero. Dado que YouTube Channel APK no es de Google Play Store, es necesario habilitar fuentes desconocidas y conceder permisos para ello. Aquí están los pasos sobre cómo instalar YouTube Channel APK en su dispositivo Android:

    -
      Ve a la configuración de tu dispositivo y toca "Seguridad". -
    1. Desplácese hacia abajo y encuentre la opción "Fuentes desconocidas". Cámbiela y confirme su elección.
    2. -
    3. Volver al administrador de archivos y toque en el archivo APK del canal de YouTube que ha descargado.
    4. -
    5. Toque en "Instalar" y espere a que la instalación se complete.
    6. -
    -

    Felicidades! Usted ha instalado con éxito YouTube Channel APK en su dispositivo Android. Sin embargo, antes de que puedas usarlo, necesitas saber cómo usarlo.

    -

    ¿Cómo utilizar el canal de YouTube APK en su dispositivo Android?

    -

    Usando YouTube Channel APK en su dispositivo Android no es difícil, pero es necesario familiarizarse con sus características y funciones. Estas son algunas de las cosas que puedes hacer con YouTube Channel APK en tu dispositivo Android:

    -
      -
    • Para descargar vídeos y listas de reproducción para verlos sin conexión, toque en el icono de descarga debajo del vídeo o lista de reproducción. Elija la calidad y el formato que desea y toque en "Aceptar". Puede encontrar sus vídeos descargados y listas de reproducción en la sección "Descargas" de la aplicación.
    • - -
    • Para cambiar la velocidad de reproducción y la resolución de los vídeos, toque el icono de tres puntos en la esquina superior derecha del vídeo. Toca "Velocidad de reproducción" o "Calidad" y elige la opción que desees. También puedes cambiar estos ajustes desde el menú de configuración de la aplicación.
    • -
    • Para habilitar el modo oscuro y el modo de zoom, toque en el icono de perfil en la esquina superior derecha de la aplicación. Toque en "Configuración" y luego en "General". Activa "Tema oscuro" o "Zoom para llenar pantalla" como desees. También puedes cambiar estos ajustes desde el menú de configuración de la aplicación.
    • -
    • Para bloquear anuncios y omitir intros, no es necesario hacer nada. YouTube Channel APK bloquea automáticamente los anuncios y omite las intros para usted. Puede disfrutar de videos sin publicidad ni interrupciones sin problemas.
    • -
    • Para suscribirse a canales sin iniciar sesión, toque en el botón de suscripción debajo de cualquier canal que desee. Puedes encontrar tus canales suscritos en la sección "Suscripciones" de la aplicación. También puede darse de baja de cualquier canal tocando el mismo botón de nuevo.
    • -
    -

    Como se puede ver, YouTube Channel APK ofrece muchas características y funciones que pueden mejorar su experiencia de YouTube. Sin embargo, antes de terminar este artículo, debe leer la conclusión y las preguntas frecuentes.

    -

    Conclusión

    -

    En este artículo, usted aprendió cómo descargar YouTube Channel APK para su dispositivo Android. Aprendiste lo que es YouTube Channel APK, por qué deberías descargarlo, cómo descargarlo de forma segura y fácil, cómo instalarlo en tu dispositivo y cómo usarlo en tu dispositivo. También aprendió acerca de algunas de las características y beneficios de YouTube Channel APK, tales como la descarga de vídeos y listas de reproducción para la visualización sin conexión, la reproducción de vídeos en segundo plano o en el modo de imagen-en-imagen, cambiar la velocidad de reproducción y la resolución de los vídeos, habilitar el modo oscuro y el modo de zoom, bloquear anuncios y omitir intros, y suscribirse a canales sin iniciar sesión.

    - -

    Esperamos que haya encontrado este artículo útil e informativo. Si tiene alguna pregunta o comentario sobre YouTube Channel APK, no dude en dejar un comentario a continuación. Nos encantaría saber de usted. ¡Gracias por leer!

    -

    Preguntas frecuentes

    -
      ¿Cuál es la diferencia entre YouTube Channel APK y YouTube Vanced? -

      Canal de YouTube APK y YouTube Vanced son ambas versiones modificadas de la aplicación oficial de YouTube que ofrecen más características y funciones. Sin embargo, tienen algunas diferencias en términos de diseño, rendimiento y compatibilidad. YouTube Channel APK tiene una interfaz más simple y limpia, mientras que YouTube Vanced tiene una interfaz más colorida y personalizable. YouTube Channel APK tiene un rendimiento más rápido y suave, mientras que YouTube Vanced tiene un rendimiento más estable y confiable. YouTube Channel APK es compatible con la mayoría de los dispositivos Android, mientras que YouTube Vanced requiere una versión específica de MicroG para funcionar correctamente.

      -
    1. ¿Es el canal de YouTube APK legal y seguro?
    2. -

      Canal de YouTube APK no es ilegal, pero no está autorizado por Google o YouTube tampoco. Es una aplicación de terceros que modifica la aplicación original de YouTube sin su permiso. Por lo tanto, podría violar algunos de sus términos y políticas. Sin embargo, YouTube Channel APK es seguro de usar, siempre y cuando se descarga desde una fuente de confianza como APKPure. No contiene ningún malware o virus que pueda dañar su dispositivo o robar sus datos. Sin embargo, siempre debe tener cuidado al descargar cualquier archivo APK de fuentes desconocidas, ya que podrían plantear algunos riesgos para su dispositivo o datos.

      -
    3. ¿Cómo puedo actualizar el canal de YouTube APK?
    4. - -
    5. ¿Cómo puedo desinstalar YouTube Channel APK?
    6. -

      Desinstalar YouTube Channel APK es fácil y simple. Solo tiene que ir a la configuración de su dispositivo y toque en "Aplicaciones". A continuación, es necesario encontrar YouTube Channel APK de la lista de aplicaciones y toque en él. Entonces, es necesario tocar en "Desinstalar" y confirmar su elección. También puede desinstalar YouTube Channel APK pulsando largo su icono en la pantalla de inicio y arrastrándolo a la papelera.

      -
    7. ¿Puedo usar YouTube Channel APK con mi cuenta de Google?
    8. -

      No, no se puede utilizar YouTube Channel APK con su cuenta de Google. YouTube Channel APK no es compatible con Google características de inicio de sesión o sincronización. Por lo tanto, no puede acceder a sus datos personales o preferencias en YouTube Channel APK con su cuenta de Google. Solo se puede utilizar YouTube Channel APK con su propia función de suscripción, que no requiere ningún inicio de sesión o creación de cuenta.

      64aa2da5cf
      -
      -
      \ No newline at end of file diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/vcs/subversion.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/vcs/subversion.py deleted file mode 100644 index 16d93a67b7b6feed66f2cc432f6250ca3ad34914..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/vcs/subversion.py +++ /dev/null @@ -1,324 +0,0 @@ -import logging -import os -import re -from typing import List, Optional, Tuple - -from pip._internal.utils.misc import ( - HiddenText, - display_path, - is_console_interactive, - is_installable_dir, - split_auth_from_netloc, -) -from pip._internal.utils.subprocess import CommandArgs, make_command -from pip._internal.vcs.versioncontrol import ( - AuthInfo, - RemoteNotFoundError, - RevOptions, - VersionControl, - vcs, -) - -logger = logging.getLogger(__name__) - -_svn_xml_url_re = re.compile('url="([^"]+)"') -_svn_rev_re = re.compile(r'committed-rev="(\d+)"') -_svn_info_xml_rev_re = re.compile(r'\s*revision="(\d+)"') -_svn_info_xml_url_re = re.compile(r"(.*)") - - -class Subversion(VersionControl): - name = "svn" - dirname = ".svn" - repo_name = "checkout" - schemes = ("svn+ssh", "svn+http", "svn+https", "svn+svn", "svn+file") - - @classmethod - def should_add_vcs_url_prefix(cls, remote_url: str) -> bool: - return True - - @staticmethod - def get_base_rev_args(rev: str) -> List[str]: - return ["-r", rev] - - @classmethod - def get_revision(cls, location: str) -> str: - """ - Return the maximum revision for all files under a given location - """ - # Note: taken from setuptools.command.egg_info - revision = 0 - - for base, dirs, _ in os.walk(location): - if cls.dirname not in dirs: - dirs[:] = [] - continue # no sense walking uncontrolled subdirs - dirs.remove(cls.dirname) - entries_fn = os.path.join(base, cls.dirname, "entries") - if not os.path.exists(entries_fn): - # FIXME: should we warn? - continue - - dirurl, localrev = cls._get_svn_url_rev(base) - - if base == location: - assert dirurl is not None - base = dirurl + "/" # save the root url - elif not dirurl or not dirurl.startswith(base): - dirs[:] = [] - continue # not part of the same svn tree, skip it - revision = max(revision, localrev) - return str(revision) - - @classmethod - def get_netloc_and_auth( - cls, netloc: str, scheme: str - ) -> Tuple[str, Tuple[Optional[str], Optional[str]]]: - """ - This override allows the auth information to be passed to svn via the - --username and --password options instead of via the URL. - """ - if scheme == "ssh": - # The --username and --password options can't be used for - # svn+ssh URLs, so keep the auth information in the URL. - return super().get_netloc_and_auth(netloc, scheme) - - return split_auth_from_netloc(netloc) - - @classmethod - def get_url_rev_and_auth(cls, url: str) -> Tuple[str, Optional[str], AuthInfo]: - # hotfix the URL scheme after removing svn+ from svn+ssh:// re-add it - url, rev, user_pass = super().get_url_rev_and_auth(url) - if url.startswith("ssh://"): - url = "svn+" + url - return url, rev, user_pass - - @staticmethod - def make_rev_args( - username: Optional[str], password: Optional[HiddenText] - ) -> CommandArgs: - extra_args: CommandArgs = [] - if username: - extra_args += ["--username", username] - if password: - extra_args += ["--password", password] - - return extra_args - - @classmethod - def get_remote_url(cls, location: str) -> str: - # In cases where the source is in a subdirectory, we have to look up in - # the location until we find a valid project root. - orig_location = location - while not is_installable_dir(location): - last_location = location - location = os.path.dirname(location) - if location == last_location: - # We've traversed up to the root of the filesystem without - # finding a Python project. - logger.warning( - "Could not find Python project for directory %s (tried all " - "parent directories)", - orig_location, - ) - raise RemoteNotFoundError - - url, _rev = cls._get_svn_url_rev(location) - if url is None: - raise RemoteNotFoundError - - return url - - @classmethod - def _get_svn_url_rev(cls, location: str) -> Tuple[Optional[str], int]: - from pip._internal.exceptions import InstallationError - - entries_path = os.path.join(location, cls.dirname, "entries") - if os.path.exists(entries_path): - with open(entries_path) as f: - data = f.read() - else: # subversion >= 1.7 does not have the 'entries' file - data = "" - - url = None - if data.startswith("8") or data.startswith("9") or data.startswith("10"): - entries = list(map(str.splitlines, data.split("\n\x0c\n"))) - del entries[0][0] # get rid of the '8' - url = entries[0][3] - revs = [int(d[9]) for d in entries if len(d) > 9 and d[9]] + [0] - elif data.startswith("= 1.7 - # Note that using get_remote_call_options is not necessary here - # because `svn info` is being run against a local directory. - # We don't need to worry about making sure interactive mode - # is being used to prompt for passwords, because passwords - # are only potentially needed for remote server requests. - xml = cls.run_command( - ["info", "--xml", location], - show_stdout=False, - stdout_only=True, - ) - match = _svn_info_xml_url_re.search(xml) - assert match is not None - url = match.group(1) - revs = [int(m.group(1)) for m in _svn_info_xml_rev_re.finditer(xml)] - except InstallationError: - url, revs = None, [] - - if revs: - rev = max(revs) - else: - rev = 0 - - return url, rev - - @classmethod - def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool: - """Always assume the versions don't match""" - return False - - def __init__(self, use_interactive: Optional[bool] = None) -> None: - if use_interactive is None: - use_interactive = is_console_interactive() - self.use_interactive = use_interactive - - # This member is used to cache the fetched version of the current - # ``svn`` client. - # Special value definitions: - # None: Not evaluated yet. - # Empty tuple: Could not parse version. - self._vcs_version: Optional[Tuple[int, ...]] = None - - super().__init__() - - def call_vcs_version(self) -> Tuple[int, ...]: - """Query the version of the currently installed Subversion client. - - :return: A tuple containing the parts of the version information or - ``()`` if the version returned from ``svn`` could not be parsed. - :raises: BadCommand: If ``svn`` is not installed. - """ - # Example versions: - # svn, version 1.10.3 (r1842928) - # compiled Feb 25 2019, 14:20:39 on x86_64-apple-darwin17.0.0 - # svn, version 1.7.14 (r1542130) - # compiled Mar 28 2018, 08:49:13 on x86_64-pc-linux-gnu - # svn, version 1.12.0-SlikSvn (SlikSvn/1.12.0) - # compiled May 28 2019, 13:44:56 on x86_64-microsoft-windows6.2 - version_prefix = "svn, version " - version = self.run_command(["--version"], show_stdout=False, stdout_only=True) - if not version.startswith(version_prefix): - return () - - version = version[len(version_prefix) :].split()[0] - version_list = version.partition("-")[0].split(".") - try: - parsed_version = tuple(map(int, version_list)) - except ValueError: - return () - - return parsed_version - - def get_vcs_version(self) -> Tuple[int, ...]: - """Return the version of the currently installed Subversion client. - - If the version of the Subversion client has already been queried, - a cached value will be used. - - :return: A tuple containing the parts of the version information or - ``()`` if the version returned from ``svn`` could not be parsed. - :raises: BadCommand: If ``svn`` is not installed. - """ - if self._vcs_version is not None: - # Use cached version, if available. - # If parsing the version failed previously (empty tuple), - # do not attempt to parse it again. - return self._vcs_version - - vcs_version = self.call_vcs_version() - self._vcs_version = vcs_version - return vcs_version - - def get_remote_call_options(self) -> CommandArgs: - """Return options to be used on calls to Subversion that contact the server. - - These options are applicable for the following ``svn`` subcommands used - in this class. - - - checkout - - switch - - update - - :return: A list of command line arguments to pass to ``svn``. - """ - if not self.use_interactive: - # --non-interactive switch is available since Subversion 0.14.4. - # Subversion < 1.8 runs in interactive mode by default. - return ["--non-interactive"] - - svn_version = self.get_vcs_version() - # By default, Subversion >= 1.8 runs in non-interactive mode if - # stdin is not a TTY. Since that is how pip invokes SVN, in - # call_subprocess(), pip must pass --force-interactive to ensure - # the user can be prompted for a password, if required. - # SVN added the --force-interactive option in SVN 1.8. Since - # e.g. RHEL/CentOS 7, which is supported until 2024, ships with - # SVN 1.7, pip should continue to support SVN 1.7. Therefore, pip - # can't safely add the option if the SVN version is < 1.8 (or unknown). - if svn_version >= (1, 8): - return ["--force-interactive"] - - return [] - - def fetch_new( - self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int - ) -> None: - rev_display = rev_options.to_display() - logger.info( - "Checking out %s%s to %s", - url, - rev_display, - display_path(dest), - ) - if verbosity <= 0: - flag = "--quiet" - else: - flag = "" - cmd_args = make_command( - "checkout", - flag, - self.get_remote_call_options(), - rev_options.to_args(), - url, - dest, - ) - self.run_command(cmd_args) - - def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None: - cmd_args = make_command( - "switch", - self.get_remote_call_options(), - rev_options.to_args(), - url, - dest, - ) - self.run_command(cmd_args) - - def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None: - cmd_args = make_command( - "update", - self.get_remote_call_options(), - rev_options.to_args(), - dest, - ) - self.run_command(cmd_args) - - -vcs.register(Subversion) diff --git a/spaces/Borda90/Titanic_Esp/app.py b/spaces/Borda90/Titanic_Esp/app.py deleted file mode 100644 index a9bbf2698feae1751170737586c22fd6c25d18f1..0000000000000000000000000000000000000000 --- a/spaces/Borda90/Titanic_Esp/app.py +++ /dev/null @@ -1,67 +0,0 @@ -# This is a small and fast sklearn model, so the run-gradio script trains a model and deploys it - -import pandas as pd -import numpy as np -import sklearn -import gradio as gr -from sklearn import preprocessing -from sklearn.model_selection import train_test_split -from sklearn.ensemble import RandomForestClassifier -from sklearn.metrics import accuracy_score - -data = pd.read_csv('https://raw.githubusercontent.com/gradio-app/titanic/master/train.csv') -data.head() - -def encode_ages(df): # Binning ages - df.Age = df.Age.fillna(-0.5) - bins = (-1, 0, 5, 12, 18, 25, 35, 60, 120) - categories = pd.cut(df.Age, bins, labels=False) - df.Age = categories - return df - -def encode_fares(df): # Binning fares - df.Fare = df.Fare.fillna(-0.5) - bins = (-1, 0, 8, 15, 31, 1000) - categories = pd.cut(df.Fare, bins, labels=False) - df.Fare = categories - return df - -def encode_sex(df): - mapping = {"male": 0, "female": 1} - return df.replace({'Sex': mapping}) - -def transform_features(df): - df = encode_ages(df) - df = encode_fares(df) - df = encode_sex(df) - return df - -train = data[['PassengerId', 'Fare', 'Age', 'Sex', 'Survived']] -train = transform_features(train) -train.head() - - -X_all = train.drop(['Survived', 'PassengerId'], axis=1) -y_all = train['Survived'] - -num_test = 0.20 -X_train, X_test, y_train, y_test = train_test_split(X_all, y_all, test_size=num_test, random_state=23) - -clf = RandomForestClassifier() -clf.fit(X_train, y_train) -predictions = clf.predict(X_test) - -def predict_survival(sex, age, fare): - df = pd.DataFrame.from_dict({'Sex': [sex], 'Age': [age], 'Fare': [fare]}) - df = encode_sex(df) - df = encode_fares(df) - df = encode_ages(df) - pred = clf.predict_proba(df)[0] - return {'Muere': float(pred[0]), 'Sobrevive': float(pred[1])} - -sex = gr.inputs.Radio(['female', 'male'], label="Sexo") -age = gr.inputs.Slider(minimum=0, maximum=120, default=22, label="Edad") -fare = gr.inputs.Slider(minimum=0, maximum=200, default=100, label="Clase") - -gr.Interface(predict_survival, [sex, age, fare], "label", live=True, thumbnail="https://raw.githubusercontent.com/gradio-app/hub-titanic/master/thumbnail.png", analytics_enabled=False, - title="Sobrevivientes del Titanic", description="Analicemos los sobreviventes de este caso y comprobemos").launch(); \ No newline at end of file diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/densepose/vis/extractor.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/densepose/vis/extractor.py deleted file mode 100644 index b715a4451e096d6d6c086f9bcf60f92d2ae692f8..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/densepose/vis/extractor.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import logging -from typing import Sequence -import torch - -from detectron2.layers.nms import batched_nms -from detectron2.structures.instances import Instances - -from densepose.vis.bounding_box import BoundingBoxVisualizer, ScoredBoundingBoxVisualizer -from densepose.vis.densepose import DensePoseResultsVisualizer - -from .base import CompoundVisualizer - -Scores = Sequence[float] - - -def extract_scores_from_instances(instances: Instances, select=None): - if instances.has("scores"): - return instances.scores if select is None else instances.scores[select] - return None - - -def extract_boxes_xywh_from_instances(instances: Instances, select=None): - if instances.has("pred_boxes"): - boxes_xywh = instances.pred_boxes.tensor.clone() - boxes_xywh[:, 2] -= boxes_xywh[:, 0] - boxes_xywh[:, 3] -= boxes_xywh[:, 1] - return boxes_xywh if select is None else boxes_xywh[select] - return None - - -def create_extractor(visualizer: object): - """ - Create an extractor for the provided visualizer - """ - if isinstance(visualizer, CompoundVisualizer): - extractors = [create_extractor(v) for v in visualizer.visualizers] - return CompoundExtractor(extractors) - elif isinstance(visualizer, DensePoseResultsVisualizer): - return DensePoseResultExtractor() - elif isinstance(visualizer, ScoredBoundingBoxVisualizer): - return CompoundExtractor([extract_boxes_xywh_from_instances, extract_scores_from_instances]) - elif isinstance(visualizer, BoundingBoxVisualizer): - return extract_boxes_xywh_from_instances - else: - logger = logging.getLogger(__name__) - logger.error(f"Could not create extractor for {visualizer}") - return None - - -class BoundingBoxExtractor(object): - """ - Extracts bounding boxes from instances - """ - - def __call__(self, instances: Instances): - boxes_xywh = extract_boxes_xywh_from_instances(instances) - return boxes_xywh - - -class ScoredBoundingBoxExtractor(object): - """ - Extracts bounding boxes from instances - """ - - def __call__(self, instances: Instances, select=None): - scores = extract_scores_from_instances(instances) - boxes_xywh = extract_boxes_xywh_from_instances(instances) - if (scores is None) or (boxes_xywh is None): - return (boxes_xywh, scores) - if select is not None: - scores = scores[select] - boxes_xywh = boxes_xywh[select] - return (boxes_xywh, scores) - - -class DensePoseResultExtractor(object): - """ - Extracts DensePose result from instances - """ - - def __call__(self, instances: Instances, select=None): - boxes_xywh = extract_boxes_xywh_from_instances(instances) - if instances.has("pred_densepose") and (boxes_xywh is not None): - dpout = instances.pred_densepose - if select is not None: - dpout = dpout[select] - boxes_xywh = boxes_xywh[select] - return dpout.to_result(boxes_xywh) - else: - return None - - -class CompoundExtractor(object): - """ - Extracts data for CompoundVisualizer - """ - - def __init__(self, extractors): - self.extractors = extractors - - def __call__(self, instances: Instances, select=None): - datas = [] - for extractor in self.extractors: - data = extractor(instances, select) - datas.append(data) - return datas - - -class NmsFilteredExtractor(object): - """ - Extracts data in the format accepted by NmsFilteredVisualizer - """ - - def __init__(self, extractor, iou_threshold): - self.extractor = extractor - self.iou_threshold = iou_threshold - - def __call__(self, instances: Instances, select=None): - scores = extract_scores_from_instances(instances) - boxes_xywh = extract_boxes_xywh_from_instances(instances) - if boxes_xywh is None: - return None - select_local_idx = batched_nms( - boxes_xywh, - scores, - torch.zeros(len(scores), dtype=torch.int32), - iou_threshold=self.iou_threshold, - ).squeeze() - select_local = torch.zeros(len(boxes_xywh), dtype=torch.bool, device=boxes_xywh.device) - select_local[select_local_idx] = True - select = select_local if select is None else (select & select_local) - return self.extractor(instances, select=select) - - -class ScoreThresholdedExtractor(object): - """ - Extracts data in the format accepted by ScoreThresholdedVisualizer - """ - - def __init__(self, extractor, min_score): - self.extractor = extractor - self.min_score = min_score - - def __call__(self, instances: Instances, select=None): - scores = extract_scores_from_instances(instances) - if scores is None: - return None - select_local = scores > self.min_score - select = select_local if select is None else (select & select_local) - data = self.extractor(instances, select=select) - return data diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/async/copy.h b/spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/async/copy.h deleted file mode 100644 index 72debb3b66715b284056aea1648fcaf5589afd70..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/async/copy.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// The purpose of this header is to #include the async/copy.h header of the -// sequential, host, and device systems. It should be #included in any code -// which uses ADL to dispatch async copy. - -#pragma once - -#include - -//#include - -//#define __THRUST_HOST_SYSTEM_ASYNC_COPY_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/async/copy.h> -//#include __THRUST_HOST_SYSTEM_ASYNC_COPY_HEADER -//#undef __THRUST_HOST_SYSTEM_ASYNC_COPY_HEADER - -#define __THRUST_DEVICE_SYSTEM_ASYNC_COPY_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/async/copy.h> -#include __THRUST_DEVICE_SYSTEM_ASYNC_COPY_HEADER -#undef __THRUST_DEVICE_SYSTEM_ASYNC_COPY_HEADER - diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/copy.h b/spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/copy.h deleted file mode 100644 index e22535618efd8c896b8e04ba21b636e4832743ea..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/copy.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include -#include - -namespace thrust -{ -namespace system -{ -namespace detail -{ -namespace generic -{ - - -template -__host__ __device__ - OutputIterator copy(thrust::execution_policy &exec, - InputIterator first, - InputIterator last, - OutputIterator result); - - -template -__host__ __device__ - OutputIterator copy_n(thrust::execution_policy &exec, - InputIterator first, - Size n, - OutputIterator result); - - -} // end generic -} // end detail -} // end system -} // end thrust - -#include - diff --git a/spaces/Caoyunkang/Segment-Any-Anomaly/SAA/modelinet.py b/spaces/Caoyunkang/Segment-Any-Anomaly/SAA/modelinet.py deleted file mode 100644 index 5a4a51d3ef8b34f49502d361e885e8b403e45e92..0000000000000000000000000000000000000000 --- a/spaces/Caoyunkang/Segment-Any-Anomaly/SAA/modelinet.py +++ /dev/null @@ -1,173 +0,0 @@ -import timm -from copy import deepcopy -from typing import Tuple - -import numpy as np -import timm -import torch -from torch.nn import functional as F -from torchvision.transforms.functional import resize, to_pil_image # type: ignore - - -class ResizeLongestSide: - """ - Resizes images to longest side 'target_length', as well as provides - methods for resizing coordinates and boxes. Provides methods for - transforming both numpy array and batched torch tensors. - """ - - def __init__(self, target_length: int) -> None: - self.target_length = target_length - - def apply_image(self, image: np.ndarray) -> np.ndarray: - """ - Expects a numpy array with shape HxWxC in uint8 format. - """ - target_size = self.get_preprocess_shape(image.shape[0], image.shape[1], self.target_length) - return np.array(resize(to_pil_image(image), target_size)) - - def apply_coords(self, coords: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray: - """ - Expects a numpy array of length 2 in the final dimension. Requires the - original image size in (H, W) format. - """ - old_h, old_w = original_size - new_h, new_w = self.get_preprocess_shape( - original_size[0], original_size[1], self.target_length - ) - coords = deepcopy(coords).astype(float) - coords[..., 0] = coords[..., 0] * (new_w / old_w) - coords[..., 1] = coords[..., 1] * (new_h / old_h) - return coords - - def apply_boxes(self, boxes: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray: - """ - Expects a numpy array shape Bx4. Requires the original image size - in (H, W) format. - """ - boxes = self.apply_coords(boxes.reshape(-1, 2, 2), original_size) - return boxes.reshape(-1, 4) - - def apply_image_torch(self, image: torch.Tensor) -> torch.Tensor: - """ - Expects batched images with shape BxCxHxW and float format. This - transformation may not exactly match apply_image. apply_image is - the transformation expected by the model. - """ - # Expects an image in BCHW format. May not exactly match apply_image. - target_size = self.get_preprocess_shape(image.shape[0], image.shape[1], self.target_length) - return F.interpolate( - image, target_size, mode="bilinear", align_corners=False, antialias=True - ) - - def apply_coords_torch( - self, coords: torch.Tensor, original_size: Tuple[int, ...] - ) -> torch.Tensor: - """ - Expects a torch tensor with length 2 in the last dimension. Requires the - original image size in (H, W) format. - """ - old_h, old_w = original_size - new_h, new_w = self.get_preprocess_shape( - original_size[0], original_size[1], self.target_length - ) - coords = deepcopy(coords).to(torch.float) - coords[..., 0] = coords[..., 0] * (new_w / old_w) - coords[..., 1] = coords[..., 1] * (new_h / old_h) - return coords - - def apply_boxes_torch( - self, boxes: torch.Tensor, original_size: Tuple[int, ...] - ) -> torch.Tensor: - """ - Expects a torch tensor with shape Bx4. Requires the original image - size in (H, W) format. - """ - boxes = self.apply_coords_torch(boxes.reshape(-1, 2, 2), original_size) - return boxes.reshape(-1, 4) - - @staticmethod - def get_preprocess_shape(oldh: int, oldw: int, long_side_length: int) -> Tuple[int, int]: - """ - Compute the output size given input size and target long side length. - """ - scale = long_side_length * 1.0 / max(oldh, oldw) - newh, neww = oldh * scale, oldw * scale - neww = int(neww + 0.5) - newh = int(newh + 0.5) - return (newh, neww) - - -class ModelINet(torch.nn.Module): - # hrnet_w32, wide_resnet50_2 - def __init__(self, device, backbone_name='wide_resnet50_2', out_indices=(1, 2, 3), checkpoint_path='', - pool_last=False): - super().__init__() - # Determine if to output features. - kwargs = {'features_only': True if out_indices else False} - if out_indices: - kwargs.update({'out_indices': out_indices}) - print(backbone_name) - - self.device = device - self.backbone = timm.create_model(model_name=backbone_name, pretrained=True, checkpoint_path=checkpoint_path, - **kwargs) - self.backbone.eval() - self.backbone = self.backbone.to(self.device) - - self.avg_pool = torch.nn.AdaptiveAvgPool2d((1, 1)) if pool_last else None - - self.pixel_mean = torch.tensor([0.485 * 255, 0.456 * 255, 0.406 * 255]).view(-1, 1, 1).to(self.device) - self.pixel_std = torch.tensor([0.229 * 255, 0.224 * 255, 0.225 * 255]).view(-1, 1, 1).to(self.device) - - self.img_size = 1024 - self.transform_size = ResizeLongestSide(self.img_size) - - def set_img_size(self, img_size): - self.img_size = img_size - self.transform_size = ResizeLongestSide(self.img_size) - - def preprocess(self, image: np.ndarray): - """Normalize pixel values and pad to a square input.""" - - input_image = self.transform_size.apply_image(image) - input_image_torch = torch.as_tensor(input_image, device=self.device) - x = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :] - - # Normalize colors - x = (x - self.pixel_mean) / self.pixel_std - - # Pad - h, w = x.shape[-2:] - padh = self.img_size - h - padw = self.img_size - w - x = F.pad(x, (0, padw, 0, padh)) - - ratio_h = h / self.img_size - ratio_w = w / self.img_size - return x, ratio_h, ratio_w - - @torch.no_grad() - def forward(self, x): - x, ratio_h, ratio_w = self.preprocess(x) - x = x.to(self.device) - - # Backbone forward pass. - features = self.backbone(x) - - # Adaptive average pool over the last layer. - if self.avg_pool: - fmap = features[-1] - fmap = self.avg_pool(fmap) - fmap = torch.flatten(fmap, 1) - features.append(fmap) - - size_0 = features[0].shape[2:] - - for i in range(1, len(features)): - features[i] = F.interpolate(features[i], size_0) - - features = torch.cat(features, dim=1) - features = F.normalize(features, dim=1) - - return features, ratio_h, ratio_w diff --git a/spaces/Chris4K/llms_compare/Cedie-Ang-Munting-Prinsipe-Tagalog-Version-Full-Movie-Episode-1.md b/spaces/Chris4K/llms_compare/Cedie-Ang-Munting-Prinsipe-Tagalog-Version-Full-Movie-Episode-1.md deleted file mode 100644 index 1694843f818bd0299473ac53d65e7dddbc8d1689..0000000000000000000000000000000000000000 --- a/spaces/Chris4K/llms_compare/Cedie-Ang-Munting-Prinsipe-Tagalog-Version-Full-Movie-Episode-1.md +++ /dev/null @@ -1,60 +0,0 @@ -## cedie ang munting prinsipe tagalog version full movie episode 1 - - - - - - - - - -**Download File ->>> [https://eromdesre.blogspot.com/?d=2txP07](https://eromdesre.blogspot.com/?d=2txP07)** - - - - - - - - - - - - - -# Cedie Ang Munting Prinsipe: A Classic Anime Adaptation of a Beloved Novel - - - -Cedie Ang Munting Prinsipe is a Filipino-dubbed anime series based on the novel Little Lord Fauntleroy by Frances Hodgson Burnett. It tells the story of Cedric Errol, a kind and cheerful boy who lives with his mother in New York. One day, he learns that he is the heir of his grandfather, the Earl of Dorincourt, who lives in England. He travels to meet his grandfather, who is cold and stern, but gradually warms up to Cedie's innocence and goodness. - - - -The anime series was produced by Nippon Animation in 1988 as part of the World Masterpiece Theater collection. It has 43 episodes, each about 25 minutes long. The first episode introduces Cedie's life in New York, his friendship with Dick Tipton, a shoeshine boy, and his encounter with Mr. Hobbs, a grocer who becomes his mentor. It also shows how Cedie receives a letter from his grandfather's lawyer, informing him of his inheritance and inviting him to England. - - - -The series was dubbed in Tagalog by ABS-CBN and aired in the Philippines in the early 1990s. It became one of the most popular and beloved anime shows among Filipino children and adults alike. Many viewers were moved by Cedie's kindness, courage, and loyalty, as well as his relationship with his mother and grandfather. The series also featured memorable songs, such as "Ang Munting Prinsipe" (The Little Prince), "Ikaw ang Lahat sa Akin" (You Are Everything to Me), and "Pag-ibig ang Nagbibigay Buhay" (Love Gives Life). - - - -Cedie Ang Munting Prinsipe is a classic anime adaptation of a beloved novel that has touched the hearts of many generations. It is a story of love, family, and friendship that transcends time and culture. You can watch the first episode of the Tagalog version on YouTube[^1^]. Enjoy! - - - -In the following episodes, Cedie arrives in England and meets his grandfather, who is initially cold and distant. He also meets his cousin Wilkins, who is jealous and spiteful of him. Cedie tries to win his grandfather's affection and respect, while also learning about the life and responsibilities of a lord. He makes friends with the servants and the villagers, and helps them with their problems. He also faces various challenges and dangers, such as being kidnapped by his uncle Bevis, who wants to claim the earldom. - - - -Meanwhile, Cedie's mother, who is called Dearest by him, stays in a cottage near the castle. She is not allowed to see her son or her father-in-law, as the Earl disapproves of her marriage to his son. She is kind and gentle, and supports Cedie from afar. She also teaches him about his father, who died when he was a baby. Cedie's father was a brave and noble man, who fought in the American Civil War and married Dearest against his father's wishes. - - - -As the story progresses, Cedie and his grandfather develop a close and loving bond. The Earl becomes more kind and generous, thanks to Cedie's influence. He also learns to appreciate Dearest and reconciles with her. However, their happiness is threatened when a woman named Minna claims that her son Tom is the real heir of the earldom, as she was married to Cedie's father before Dearest. Cedie faces the possibility of losing his title and his grandfather's love, but he remains faithful and hopeful. - - dfd1c89656 - - - - - diff --git a/spaces/ChristopherMarais/Andrew_Alpha/app.py b/spaces/ChristopherMarais/Andrew_Alpha/app.py deleted file mode 100644 index 760eb8afaa682b399f8c663d8e412b8dae08b9ce..0000000000000000000000000000000000000000 --- a/spaces/ChristopherMarais/Andrew_Alpha/app.py +++ /dev/null @@ -1,107 +0,0 @@ -import os -import dill -import timm -import random -import numpy as np -import gradio as gr -from fastai.tabular.all import * -from fastai.vision.all import * -from fastai.vision.utils import get_image_files -from fastai.learner import load_learner -from Ambrosia import pre_process_image -from huggingface_hub import from_pretrained_fastai, push_to_hub_fastai, hf_hub_download -from torchvision.transforms import GaussianBlur - -# Set the token -os.environ["HUGGINGFACE_TOKEN"] = "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" - -# # Define a custom transform for Gaussian blur -# def gaussian_blur(x, p=0.5, kernel_size_min=3, kernel_size_max=9, sigma_min=0.1, sigma_max=2): -# if x.ndim == 4: -# for i in range(x.shape[0]): -# if random.random() < p: -# kernel_size = random.randrange(kernel_size_min, kernel_size_max + 1, 2) -# sigma = random.uniform(sigma_min, sigma_max) -# x[i] = GaussianBlur(kernel_size=kernel_size, sigma=sigma)(x[i]) -# return x - -# Define a custom transform for Gaussian blur -def gaussian_blur(x, p=0.5, kernel_size_min=3, kernel_size_max=20, sigma_min=0.1, sigma_max=3): - if x.ndim == 4: - for i in range(x.shape[0]): - if random.random() < p: - kernel_size = random.randrange(kernel_size_min, kernel_size_max + 1, 2) - sigma = random.uniform(sigma_min, sigma_max) - x[i] = GaussianBlur(kernel_size=kernel_size, sigma=sigma)(x[i]) - return x - -# this function only describes how much a singular value in al ist stands out. -# if all values in the lsit are high or low this is 1 -# the smaller the proportiopn of number of disimilar vlaues are to other more similar values the lower this number -# the larger the gap between the dissimilar numbers and the simialr number the smaller this number -# only able to interpret probabilities or values between 0 and 1 -# this function outputs an estimate an inverse of the classification confidence based on the probabilities of all the classes. -# the wedge threshold splits the data on a threshold with a magnitude of a positive int to force a ledge/peak in the data -def unkown_prob_calc(probs, wedge_threshold, wedge_magnitude=1, wedge='strict'): - if wedge =='strict': - increase_var = (1/(wedge_magnitude)) - decrease_var = (wedge_magnitude) - if wedge =='dynamic': # this allows pointsthat are furhter from the threshold ot be moved less and points clsoer to be moved more - increase_var = (1/(wedge_magnitude*((1-np.abs(probs-wedge_threshold))))) - decrease_var = (wedge_magnitude*((1-np.abs(probs-wedge_threshold)))) - else: - print("Error: use 'strict' (default) or 'dynamic' as options for the wedge parameter!") - probs = np.where(probs>=wedge_threshold , probs**increase_var, probs) - probs = np.where(probs<=wedge_threshold , probs**decrease_var, probs) - diff_matrix = np.abs(probs[:, np.newaxis] - probs) - diff_matrix_sum = np.sum(diff_matrix) - probs_sum = np.sum(probs) - class_val = (diff_matrix_sum/probs_sum) - max_class_val = ((len(probs)-1)*2) - kown_prob = class_val/max_class_val - unknown_prob = 1-kown_prob - return(unknown_prob) - -# load model -learn = from_pretrained_fastai("ChristopherMarais/beetle-model") -# learn = load_learner( -# hf_hub_download('ChristopherMarais/Andrew_Alpha_model', filename="model.pkl") -# ) - -# get class names -labels = np.append(np.array(learn.dls.vocab), "Unknown") - -def predict(img): - # Segment image into smaller images - pre_process = pre_process_image(manual_thresh_buffer=0.15, image = img) # use image_dir if directory of image used - pre_process.segment(cluster_num=2, - image_edge_buffer=50) - # get predictions for all segments - conf_dict_lst = [] - output_lst = [] - img_cnt = len(pre_process.col_image_lst) - for i in range(0,img_cnt): - prob_ar = np.array(learn.predict(pre_process.col_image_lst[i])[2]) - unkown_prob = unkown_prob_calc(probs=prob_ar, wedge_threshold=0.85, wedge_magnitude=5, wedge='dynamic') - prob_ar = np.append(prob_ar, unkown_prob) - prob_ar = np.around(prob_ar*100, decimals=1) - - conf_dict = {labels[i]: float(prob_ar[i]) for i in range(len(prob_ar))} - conf_dict = dict(sorted(conf_dict.items(), key=lambda item: item[1], reverse=True)) - conf_dict_lst.append(str(conf_dict)) - result = list(zip(pre_process.col_image_lst, conf_dict_lst)) - - return(result) - -with gr.Blocks() as demo: - with gr.Column(variant="panel"): - with gr.Row(variant="compact"): - inputs = gr.Image() - btn = gr.Button("Classify").style(full_width=False) - - gallery = gr.Gallery( - label="Show images", show_label=True, elem_id="gallery" - ).style(grid=[8], height="auto") - - btn.click(predict, inputs, gallery) - demo.launch() \ No newline at end of file diff --git a/spaces/CosmoAI/BhagwatGeeta/app.py b/spaces/CosmoAI/BhagwatGeeta/app.py deleted file mode 100644 index a79d0315e0f9e0e9c2947c69137fb8345de97d88..0000000000000000000000000000000000000000 --- a/spaces/CosmoAI/BhagwatGeeta/app.py +++ /dev/null @@ -1,196 +0,0 @@ -import google.generativeai as palm -import streamlit as st -import os - -# Set your API key -palm.configure(api_key = os.environ['PALM_KEY']) - -# Select the PaLM 2 model -model = 'models/text-bison-001' - -# Generate text -if prompt := st.chat_input("Ask your query..."): - enprom = f"""Answer the below provided input in context to Bhagwad Geeta. Use the verses and chapters sentences as references to your answer with suggestions - coming from Bhagwad Geeta. Your answer to below input should only be in context to Bhagwad geeta only.\nInput= {prompt}""" - completion = palm.generate_text(model=model, prompt=enprom, temperature=0.5, max_output_tokens=800) - -# response = palm.chat(messages=["Hello."]) -# print(response.last) # 'Hello! What can I help you with?' -# response.reply("Can you tell me a joke?") - -# Print the generated text - with st.chat_message("Assistant"): - st.write(completion.result) - - - - - - - - -# import streamlit as st -# from dotenv import load_dotenv -# from PyPDF2 import PdfReader -# from langchain.text_splitter import CharacterTextSplitter -# from langchain.embeddings import HuggingFaceEmbeddings -# from langchain.vectorstores import FAISS -# # from langchain.chat_models import ChatOpenAI -# from langchain.memory import ConversationBufferMemory -# from langchain.chains import ConversationalRetrievalChain -# from htmlTemplates import css, bot_template, user_template -# from langchain.llms import HuggingFaceHub -# import os -# # from transformers import T5Tokenizer, T5ForConditionalGeneration -# # from langchain.callbacks import get_openai_callback - -# hub_token = os.environ["HUGGINGFACE_HUB_TOKEN"] - -# def get_pdf_text(pdf_docs): -# text = "" -# for pdf in pdf_docs: -# pdf_reader = PdfReader(pdf) -# for page in pdf_reader.pages: -# text += page.extract_text() -# return text - - -# def get_text_chunks(text): -# text_splitter = CharacterTextSplitter( -# separator="\n", -# chunk_size=200, -# chunk_overlap=20, -# length_function=len -# ) -# chunks = text_splitter.split_text(text) -# return chunks - - -# def get_vectorstore(text_chunks): -# # embeddings = OpenAIEmbeddings() -# # embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl") -# embeddings = HuggingFaceEmbeddings() -# vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings) -# return vectorstore - - -# def get_conversation_chain(vectorstore): -# # llm = ChatOpenAI(model_name="gpt-3.5-turbo-16k") -# # tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-base") -# # model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-base") - -# llm = HuggingFaceHub(repo_id="mistralai/Mistral-7B-v0.1", huggingfacehub_api_token=hub_token, model_kwargs={"temperature":0.5, "max_length":20}) - -# memory = ConversationBufferMemory( -# memory_key='chat_history', return_messages=True) -# conversation_chain = ConversationalRetrievalChain.from_llm( -# llm=llm, -# retriever=vectorstore.as_retriever(), -# memory=memory -# ) -# return conversation_chain - - -# def handle_userinput(user_question): -# response = st.session_state.conversation -# reply = response.run(user_question) -# st.write(reply) -# # st.session_state.chat_history = response['chat_history'] - -# # for i, message in enumerate(st.session_state.chat_history): -# # if i % 2 == 0: -# # st.write(user_template.replace( -# # "{{MSG}}", message.content), unsafe_allow_html=True) -# # else: -# # st.write(bot_template.replace( -# # "{{MSG}}", message.content), unsafe_allow_html=True) - - -# def main(): -# load_dotenv() -# st.set_page_config(page_title="Chat with multiple PDFs", -# page_icon=":books:") -# st.write(css, unsafe_allow_html=True) - -# if "conversation" not in st.session_state: -# st.session_state.conversation = None -# if "chat_history" not in st.session_state: -# st.session_state.chat_history = None - -# st.header("Chat with multiple PDFs :books:") -# user_question = st.text_input("Ask a question about your documents:") -# if user_question: -# handle_userinput(user_question) - -# with st.sidebar: -# st.subheader("Your documents") -# pdf_docs = st.file_uploader( -# "Upload your PDFs here and click on 'Process'", accept_multiple_files=True) -# if st.button("Process"): -# if(len(pdf_docs) == 0): -# st.error("Please upload at least one PDF") -# else: -# with st.spinner("Processing"): -# # get pdf text -# raw_text = get_pdf_text(pdf_docs) - -# # get the text chunks -# text_chunks = get_text_chunks(raw_text) - -# # create vector store -# vectorstore = get_vectorstore(text_chunks) - -# # create conversation chain -# st.session_state.conversation = get_conversation_chain( -# vectorstore) - -# if __name__ == '__main__': -# main() - - - - - - -# # import os -# # import getpass -# # import streamlit as st -# # from langchain.document_loaders import PyPDFLoader -# # from langchain.text_splitter import RecursiveCharacterTextSplitter -# # from langchain.embeddings import HuggingFaceEmbeddings -# # from langchain.vectorstores import Chroma -# # from langchain import HuggingFaceHub -# # from langchain.chains import RetrievalQA -# # # __import__('pysqlite3') -# # # import sys -# # # sys.modules['sqlite3'] = sys.modules.pop('pysqlite3') - - -# # # load huggingface api key -# # hubtok = os.environ["HUGGINGFACE_HUB_TOKEN"] - -# # # use streamlit file uploader to ask user for file -# # # file = st.file_uploader("Upload PDF") - - -# # path = "Geeta.pdf" -# # loader = PyPDFLoader(path) -# # pages = loader.load() - -# # # st.write(pages) - -# # splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=20) -# # docs = splitter.split_documents(pages) - -# # embeddings = HuggingFaceEmbeddings() -# # doc_search = Chroma.from_documents(docs, embeddings) - -# # repo_id = "tiiuae/falcon-7b" -# # llm = HuggingFaceHub(repo_id=repo_id, huggingfacehub_api_token=hubtok, model_kwargs={'temperature': 0.2,'max_length': 1000}) - -# # from langchain.schema import retriever -# # retireval_chain = RetrievalQA.from_chain_type(llm, chain_type="stuff", retriever=doc_search.as_retriever()) - -# # if query := st.chat_input("Enter a question: "): -# # with st.chat_message("assistant"): -# # st.write(retireval_chain.run(query)) \ No newline at end of file diff --git a/spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/util.py b/spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/util.py deleted file mode 100644 index 45cb050ece6f401a22dde098ce3f1ff663c5eb6a..0000000000000000000000000000000000000000 --- a/spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/util.py +++ /dev/null @@ -1,197 +0,0 @@ -import importlib - -import torch -from torch import optim -import numpy as np - -from inspect import isfunction -from PIL import Image, ImageDraw, ImageFont - - -def log_txt_as_img(wh, xc, size=10): - # wh a tuple of (width, height) - # xc a list of captions to plot - b = len(xc) - txts = list() - for bi in range(b): - txt = Image.new("RGB", wh, color="white") - draw = ImageDraw.Draw(txt) - font = ImageFont.truetype('font/DejaVuSans.ttf', size=size) - nc = int(40 * (wh[0] / 256)) - lines = "\n".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc)) - - try: - draw.text((0, 0), lines, fill="black", font=font) - except UnicodeEncodeError: - print("Cant encode string for logging. Skipping.") - - txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0 - txts.append(txt) - txts = np.stack(txts) - txts = torch.tensor(txts) - return txts - - -def ismap(x): - if not isinstance(x, torch.Tensor): - return False - return (len(x.shape) == 4) and (x.shape[1] > 3) - - -def isimage(x): - if not isinstance(x,torch.Tensor): - return False - return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1) - - -def exists(x): - return x is not None - - -def default(val, d): - if exists(val): - return val - return d() if isfunction(d) else d - - -def mean_flat(tensor): - """ - https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86 - Take the mean over all non-batch dimensions. - """ - return tensor.mean(dim=list(range(1, len(tensor.shape)))) - - -def count_params(model, verbose=False): - total_params = sum(p.numel() for p in model.parameters()) - if verbose: - print(f"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.") - return total_params - - -def instantiate_from_config(config): - if not "target" in config: - if config == '__is_first_stage__': - return None - elif config == "__is_unconditional__": - return None - raise KeyError("Expected key `target` to instantiate.") - return get_obj_from_str(config["target"])(**config.get("params", dict())) - - -def get_obj_from_str(string, reload=False): - module, cls = string.rsplit(".", 1) - if reload: - module_imp = importlib.import_module(module) - importlib.reload(module_imp) - return getattr(importlib.import_module(module, package=None), cls) - - -class AdamWwithEMAandWings(optim.Optimizer): - # credit to https://gist.github.com/crowsonkb/65f7265353f403714fce3b2595e0b298 - def __init__(self, params, lr=1.e-3, betas=(0.9, 0.999), eps=1.e-8, # TODO: check hyperparameters before using - weight_decay=1.e-2, amsgrad=False, ema_decay=0.9999, # ema decay to match previous code - ema_power=1., param_names=()): - """AdamW that saves EMA versions of the parameters.""" - if not 0.0 <= lr: - raise ValueError("Invalid learning rate: {}".format(lr)) - if not 0.0 <= eps: - raise ValueError("Invalid epsilon value: {}".format(eps)) - if not 0.0 <= betas[0] < 1.0: - raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) - if not 0.0 <= betas[1] < 1.0: - raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) - if not 0.0 <= weight_decay: - raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) - if not 0.0 <= ema_decay <= 1.0: - raise ValueError("Invalid ema_decay value: {}".format(ema_decay)) - defaults = dict(lr=lr, betas=betas, eps=eps, - weight_decay=weight_decay, amsgrad=amsgrad, ema_decay=ema_decay, - ema_power=ema_power, param_names=param_names) - super().__init__(params, defaults) - - def __setstate__(self, state): - super().__setstate__(state) - for group in self.param_groups: - group.setdefault('amsgrad', False) - - @torch.no_grad() - def step(self, closure=None): - """Performs a single optimization step. - Args: - closure (callable, optional): A closure that reevaluates the model - and returns the loss. - """ - loss = None - if closure is not None: - with torch.enable_grad(): - loss = closure() - - for group in self.param_groups: - params_with_grad = [] - grads = [] - exp_avgs = [] - exp_avg_sqs = [] - ema_params_with_grad = [] - state_sums = [] - max_exp_avg_sqs = [] - state_steps = [] - amsgrad = group['amsgrad'] - beta1, beta2 = group['betas'] - ema_decay = group['ema_decay'] - ema_power = group['ema_power'] - - for p in group['params']: - if p.grad is None: - continue - params_with_grad.append(p) - if p.grad.is_sparse: - raise RuntimeError('AdamW does not support sparse gradients') - grads.append(p.grad) - - state = self.state[p] - - # State initialization - if len(state) == 0: - state['step'] = 0 - # Exponential moving average of gradient values - state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) - # Exponential moving average of squared gradient values - state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) - if amsgrad: - # Maintains max of all exp. moving avg. of sq. grad. values - state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) - # Exponential moving average of parameter values - state['param_exp_avg'] = p.detach().float().clone() - - exp_avgs.append(state['exp_avg']) - exp_avg_sqs.append(state['exp_avg_sq']) - ema_params_with_grad.append(state['param_exp_avg']) - - if amsgrad: - max_exp_avg_sqs.append(state['max_exp_avg_sq']) - - # update the steps for each param group update - state['step'] += 1 - # record the step after step update - state_steps.append(state['step']) - - optim._functional.adamw(params_with_grad, - grads, - exp_avgs, - exp_avg_sqs, - max_exp_avg_sqs, - state_steps, - amsgrad=amsgrad, - beta1=beta1, - beta2=beta2, - lr=group['lr'], - weight_decay=group['weight_decay'], - eps=group['eps'], - maximize=False) - - cur_ema_decay = min(ema_decay, 1 - state['step'] ** -ema_power) - for param, ema_param in zip(params_with_grad, ema_params_with_grad): - ema_param.mul_(cur_ema_decay).add_(param.float(), alpha=1 - cur_ema_decay) - - return loss \ No newline at end of file diff --git a/spaces/DAMO-NLP-SG/Video-LLaMA/app.py b/spaces/DAMO-NLP-SG/Video-LLaMA/app.py deleted file mode 100644 index 4b0940c9305eb8aa78e6145cad7124ce7cd999fa..0000000000000000000000000000000000000000 --- a/spaces/DAMO-NLP-SG/Video-LLaMA/app.py +++ /dev/null @@ -1,259 +0,0 @@ -""" -Adapted from: https://github.com/Vision-CAIR/MiniGPT-4/blob/main/demo.py -""" -import argparse -import os -import random - -import numpy as np -import torch -import torch.backends.cudnn as cudnn -import gradio as gr - -from video_llama.common.config import Config -from video_llama.common.dist_utils import get_rank -from video_llama.common.registry import registry -from video_llama.conversation.conversation_video import Chat, Conversation, default_conversation,SeparatorStyle -import decord -decord.bridge.set_bridge('torch') - - -#%% -# imports modules for registration -from video_llama.datasets.builders import * -from video_llama.models import * -from video_llama.processors import * -from video_llama.runners import * -from video_llama.tasks import * - -#%% -def parse_args(): - parser = argparse.ArgumentParser(description="Demo") - parser.add_argument("--cfg-path", default='eval_configs/video_llama_eval.yaml', help="path to configuration file.") - parser.add_argument("--gpu-id", type=int, default=0, help="specify the gpu to load the model.") - parser.add_argument( - "--options", - nargs="+", - help="override some settings in the used config, the key-value pair " - "in xxx=yyy format will be merged into config file (deprecate), " - "change to --cfg-options instead.", - ) - args = parser.parse_args() - return args - - -def setup_seeds(config): - seed = config.run_cfg.seed + get_rank() - - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) - - cudnn.benchmark = False - cudnn.deterministic = True - - -# ======================================== -# Model Initialization -# ======================================== - -print('Initializing Chat') -args = parse_args() -cfg = Config(args) - -model_config = cfg.model_cfg -model_config.device_8bit = args.gpu_id -model_cls = registry.get_model_class(model_config.arch) -model = model_cls.from_config(model_config).to('cuda:{}'.format(args.gpu_id)) -model.eval() -vis_processor_cfg = cfg.datasets_cfg.webvid.vis_processor.train -vis_processor = registry.get_processor_class(vis_processor_cfg.name).from_config(vis_processor_cfg) -chat = Chat(model, vis_processor, device='cuda:{}'.format(args.gpu_id)) -print('Initialization Finished') - -# ======================================== -# Gradio Setting -# ======================================== - -def gradio_reset(chat_state, img_list): - if chat_state is not None: - chat_state.messages = [] - if img_list is not None: - img_list = [] - return None, gr.update(value=None, interactive=True), gr.update(value=None, interactive=True), gr.update(placeholder='Please upload your video first', interactive=False),gr.update(value="Upload & Start Chat", interactive=True), chat_state, img_list - -def upload_imgorvideo(gr_video, gr_img, text_input, chat_state,chatbot): - if gr_img is None and gr_video is None: - return None, None, None, gr.update(interactive=True), chat_state, None - elif gr_img is not None and gr_video is None: - print(gr_img) - chatbot = chatbot + [((gr_img,), None)] - chat_state = Conversation( - system= "You are able to understand the visual content that the user provides." - "Follow the instructions carefully and explain your answers in detail.", - roles=("Human", "Assistant"), - messages=[], - offset=0, - sep_style=SeparatorStyle.SINGLE, - sep="###", - ) - img_list = [] - llm_message = chat.upload_img(gr_img, chat_state, img_list) - return gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=True, placeholder='Type and press Enter'), gr.update(value="Start Chatting", interactive=False), chat_state, img_list,chatbot - elif gr_video is not None and gr_img is None: - print(gr_video) - chatbot = chatbot + [((gr_video,), None)] - chat_state = default_conversation.copy() - chat_state = Conversation( - system= "You are able to understand the visual content that the user provides." - "Follow the instructions carefully and explain your answers in detail.", - roles=("Human", "Assistant"), - messages=[], - offset=0, - sep_style=SeparatorStyle.SINGLE, - sep="###", - ) - img_list = [] - llm_message = chat.upload_video(gr_video, chat_state, img_list) - return gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=True, placeholder='Type and press Enter'), gr.update(value="Start Chatting", interactive=False), chat_state, img_list,chatbot - else: - # img_list = [] - return gr.update(interactive=False), gr.update(interactive=False, placeholder='Currently, only one input is supported'), gr.update(value="Currently, only one input is supported", interactive=False), chat_state, None,chatbot - -def gradio_ask(user_message, chatbot, chat_state): - if len(user_message) == 0: - return gr.update(interactive=True, placeholder='Input should not be empty!'), chatbot, chat_state - chat.ask(user_message, chat_state) - chatbot = chatbot + [[user_message, None]] - return '', chatbot, chat_state - - -def gradio_answer(chatbot, chat_state, img_list, num_beams, temperature): - llm_message = chat.answer(conv=chat_state, - img_list=img_list, - num_beams=1, - temperature=temperature, - max_new_tokens=240, - max_length=511)[0] - chatbot[-1][1] = llm_message - print(chat_state.get_prompt()) - print(chat_state) - return chatbot, chat_state, img_list - -title = """ -

      Video-LLaMA

      - -

      Video-LLaMA: An Instruction-tuned Audio-Visual Language Model for Video Understanding

      - -
      Introduction: Video-LLaMA is a multi-model large language model that achieves video-grounded conversations between humans and computers \ - by connecting language decoder with off-the-shelf unimodal pre-trained models.
      - -
      - - - - - -
      - - -Thank you for using the Video-LLaMA Demo Page! If you have any questions or feedback, feel free to contact us. - -If you find Video-LLaMA interesting, please give us a star on GitHub. - -Current online demo uses the 7B version of Video-LLaMA due to resource limitations. We have released \ - the 13B version on our GitHub repository. - - -""" - -Note_markdown = (""" -### Note -Video-LLaMA is a prototype model and may have limitations in understanding complex scenes, long videos, or specific domains. -The output results may be influenced by input quality, limitations of the dataset, and the model's susceptibility to illusions. Please interpret the results with caution. - -**Copyright 2023 Alibaba DAMO Academy.** -""") - -cite_markdown = (""" -## Citation -If you find our project useful, hope you can star our repo and cite our paper as follows: -``` -@article{damonlpsg2023videollama, - author = {Zhang, Hang and Li, Xin and Bing, Lidong}, - title = {Video-LLaMA: An Instruction-tuned Audio-Visual Language Model for Video Understanding}, - year = 2023, - journal = {arXiv preprint arXiv:2306.02858} - url = {https://arxiv.org/abs/2306.02858} -} -""") - -case_note_upload = (""" -### We provide some examples at the bottom of the page. Simply click on them to try them out directly. -""") - -#TODO show examples below - -with gr.Blocks() as demo: - gr.Markdown(title) - - with gr.Row(): - with gr.Column(scale=0.5): - video = gr.Video() - image = gr.Image(type="filepath") - gr.Markdown(case_note_upload) - - upload_button = gr.Button(value="Upload & Start Chat", interactive=True, variant="primary") - clear = gr.Button("Restart") - - num_beams = gr.Slider( - minimum=1, - maximum=10, - value=1, - step=1, - interactive=True, - label="beam search numbers)", - ) - - temperature = gr.Slider( - minimum=0.1, - maximum=2.0, - value=1.0, - step=0.1, - interactive=True, - label="Temperature", - ) - - audio = gr.Checkbox(interactive=True, value=False, label="Audio") - gr.Markdown(Note_markdown) - with gr.Column(): - chat_state = gr.State() - img_list = gr.State() - chatbot = gr.Chatbot(label='Video-LLaMA') - text_input = gr.Textbox(label='User', placeholder='Upload your image/video first, or directly click the examples at the bottom of the page.', interactive=False) - - - with gr.Column(): - gr.Examples(examples=[ - [f"examples/dog.jpg", "Which breed is this dog? "], - [f"examples/jonsnow.jpg", "Who's the man on the right? "], - [f"examples/statue_of_liberty.jpg", "Can you tell me about this building? "], - ], inputs=[image, text_input]) - - gr.Examples(examples=[ - [f"examples/skateboarding_dog.mp4", "What is the dog doing? "], - [f"examples/birthday.mp4", "What is the boy doing? "], - [f"examples/Iron_Man.mp4", "Is the guy in the video Iron Man? "], - ], inputs=[video, text_input]) - - gr.Markdown(cite_markdown) - upload_button.click(upload_imgorvideo, [video, image, text_input, chat_state,chatbot], [video, image, text_input, upload_button, chat_state, img_list,chatbot]) - - text_input.submit(gradio_ask, [text_input, chatbot, chat_state], [text_input, chatbot, chat_state]).then( - gradio_answer, [chatbot, chat_state, img_list, num_beams, temperature], [chatbot, chat_state, img_list] - ) - clear.click(gradio_reset, [chat_state, img_list], [chatbot, video, image, text_input, upload_button, chat_state, img_list], queue=False) - -demo.launch(share=False, enable_queue=True) - -# %% \ No newline at end of file diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpx/_urlparse.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpx/_urlparse.py deleted file mode 100644 index 69ff0b4b02eaa6ddfb1c1948d553244ac6e3099e..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpx/_urlparse.py +++ /dev/null @@ -1,462 +0,0 @@ -""" -An implementation of `urlparse` that provides URL validation and normalization -as described by RFC3986. - -We rely on this implementation rather than the one in Python's stdlib, because: - -* It provides more complete URL validation. -* It properly differentiates between an empty querystring and an absent querystring, - to distinguish URLs with a trailing '?'. -* It handles scheme, hostname, port, and path normalization. -* It supports IDNA hostnames, normalizing them to their encoded form. -* The API supports passing individual components, as well as the complete URL string. - -Previously we relied on the excellent `rfc3986` package to handle URL parsing and -validation, but this module provides a simpler alternative, with less indirection -required. -""" -import ipaddress -import re -import typing - -import idna - -from ._exceptions import InvalidURL - -MAX_URL_LENGTH = 65536 - -# https://datatracker.ietf.org/doc/html/rfc3986.html#section-2.3 -UNRESERVED_CHARACTERS = ( - "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-._~" -) -SUB_DELIMS = "!$&'()*+,;=" - -PERCENT_ENCODED_REGEX = re.compile("%[A-Fa-f0-9]{2}") - - -# {scheme}: (optional) -# //{authority} (optional) -# {path} -# ?{query} (optional) -# #{fragment} (optional) -URL_REGEX = re.compile( - ( - r"(?:(?P{scheme}):)?" - r"(?://(?P{authority}))?" - r"(?P{path})" - r"(?:\?(?P{query}))?" - r"(?:#(?P{fragment}))?" - ).format( - scheme="([a-zA-Z][a-zA-Z0-9+.-]*)?", - authority="[^/?#]*", - path="[^?#]*", - query="[^#]*", - fragment=".*", - ) -) - -# {userinfo}@ (optional) -# {host} -# :{port} (optional) -AUTHORITY_REGEX = re.compile( - ( - r"(?:(?P{userinfo})@)?" r"(?P{host})" r":?(?P{port})?" - ).format( - userinfo="[^@]*", # Any character sequence not including '@'. - host="(\\[.*\\]|[^:]*)", # Either any character sequence not including ':', - # or an IPv6 address enclosed within square brackets. - port=".*", # Any character sequence. - ) -) - - -# If we call urlparse with an individual component, then we need to regex -# validate that component individually. -# Note that we're duplicating the same strings as above. Shock! Horror!! -COMPONENT_REGEX = { - "scheme": re.compile("([a-zA-Z][a-zA-Z0-9+.-]*)?"), - "authority": re.compile("[^/?#]*"), - "path": re.compile("[^?#]*"), - "query": re.compile("[^#]*"), - "fragment": re.compile(".*"), - "userinfo": re.compile("[^@]*"), - "host": re.compile("(\\[.*\\]|[^:]*)"), - "port": re.compile(".*"), -} - - -# We use these simple regexs as a first pass before handing off to -# the stdlib 'ipaddress' module for IP address validation. -IPv4_STYLE_HOSTNAME = re.compile(r"^[0-9]+.[0-9]+.[0-9]+.[0-9]+$") -IPv6_STYLE_HOSTNAME = re.compile(r"^\[.*\]$") - - -class ParseResult(typing.NamedTuple): - scheme: str - userinfo: str - host: str - port: typing.Optional[int] - path: str - query: typing.Optional[str] - fragment: typing.Optional[str] - - @property - def authority(self) -> str: - return "".join( - [ - f"{self.userinfo}@" if self.userinfo else "", - f"[{self.host}]" if ":" in self.host else self.host, - f":{self.port}" if self.port is not None else "", - ] - ) - - @property - def netloc(self) -> str: - return "".join( - [ - f"[{self.host}]" if ":" in self.host else self.host, - f":{self.port}" if self.port is not None else "", - ] - ) - - def copy_with(self, **kwargs: typing.Optional[str]) -> "ParseResult": - if not kwargs: - return self - - defaults = { - "scheme": self.scheme, - "authority": self.authority, - "path": self.path, - "query": self.query, - "fragment": self.fragment, - } - defaults.update(kwargs) - return urlparse("", **defaults) - - def __str__(self) -> str: - authority = self.authority - return "".join( - [ - f"{self.scheme}:" if self.scheme else "", - f"//{authority}" if authority else "", - self.path, - f"?{self.query}" if self.query is not None else "", - f"#{self.fragment}" if self.fragment is not None else "", - ] - ) - - -def urlparse(url: str = "", **kwargs: typing.Optional[str]) -> ParseResult: - # Initial basic checks on allowable URLs. - # --------------------------------------- - - # Hard limit the maximum allowable URL length. - if len(url) > MAX_URL_LENGTH: - raise InvalidURL("URL too long") - - # If a URL includes any ASCII control characters including \t, \r, \n, - # then treat it as invalid. - if any(char.isascii() and not char.isprintable() for char in url): - raise InvalidURL("Invalid non-printable ASCII character in URL") - - # Some keyword arguments require special handling. - # ------------------------------------------------ - - # Coerce "port" to a string, if it is provided as an integer. - if "port" in kwargs: - port = kwargs["port"] - kwargs["port"] = str(port) if isinstance(port, int) else port - - # Replace "netloc" with "host and "port". - if "netloc" in kwargs: - netloc = kwargs.pop("netloc") or "" - kwargs["host"], _, kwargs["port"] = netloc.partition(":") - - # Replace "username" and/or "password" with "userinfo". - if "username" in kwargs or "password" in kwargs: - username = quote(kwargs.pop("username", "") or "") - password = quote(kwargs.pop("password", "") or "") - kwargs["userinfo"] = f"{username}:{password}" if password else username - - # Replace "raw_path" with "path" and "query". - if "raw_path" in kwargs: - raw_path = kwargs.pop("raw_path") or "" - kwargs["path"], seperator, kwargs["query"] = raw_path.partition("?") - if not seperator: - kwargs["query"] = None - - # Ensure that IPv6 "host" addresses are always escaped with "[...]". - if "host" in kwargs: - host = kwargs.get("host") or "" - if ":" in host and not (host.startswith("[") and host.endswith("]")): - kwargs["host"] = f"[{host}]" - - # If any keyword arguments are provided, ensure they are valid. - # ------------------------------------------------------------- - - for key, value in kwargs.items(): - if value is not None: - if len(value) > MAX_URL_LENGTH: - raise InvalidURL(f"URL component '{key}' too long") - - # If a component includes any ASCII control characters including \t, \r, \n, - # then treat it as invalid. - if any(char.isascii() and not char.isprintable() for char in value): - raise InvalidURL( - f"Invalid non-printable ASCII character in URL component '{key}'" - ) - - # Ensure that keyword arguments match as a valid regex. - if not COMPONENT_REGEX[key].fullmatch(value): - raise InvalidURL(f"Invalid URL component '{key}'") - - # The URL_REGEX will always match, but may have empty components. - url_match = URL_REGEX.match(url) - assert url_match is not None - url_dict = url_match.groupdict() - - # * 'scheme', 'authority', and 'path' may be empty strings. - # * 'query' may be 'None', indicating no trailing "?" portion. - # Any string including the empty string, indicates a trailing "?". - # * 'fragment' may be 'None', indicating no trailing "#" portion. - # Any string including the empty string, indicates a trailing "#". - scheme = kwargs.get("scheme", url_dict["scheme"]) or "" - authority = kwargs.get("authority", url_dict["authority"]) or "" - path = kwargs.get("path", url_dict["path"]) or "" - query = kwargs.get("query", url_dict["query"]) - fragment = kwargs.get("fragment", url_dict["fragment"]) - - # The AUTHORITY_REGEX will always match, but may have empty components. - authority_match = AUTHORITY_REGEX.match(authority) - assert authority_match is not None - authority_dict = authority_match.groupdict() - - # * 'userinfo' and 'host' may be empty strings. - # * 'port' may be 'None'. - userinfo = kwargs.get("userinfo", authority_dict["userinfo"]) or "" - host = kwargs.get("host", authority_dict["host"]) or "" - port = kwargs.get("port", authority_dict["port"]) - - # Normalize and validate each component. - # We end up with a parsed representation of the URL, - # with components that are plain ASCII bytestrings. - parsed_scheme: str = scheme.lower() - parsed_userinfo: str = quote(userinfo, safe=SUB_DELIMS + ":") - parsed_host: str = encode_host(host) - parsed_port: typing.Optional[int] = normalize_port(port, scheme) - - has_scheme = parsed_scheme != "" - has_authority = ( - parsed_userinfo != "" or parsed_host != "" or parsed_port is not None - ) - validate_path(path, has_scheme=has_scheme, has_authority=has_authority) - if has_authority: - path = normalize_path(path) - - # The GEN_DELIMS set is... : / ? # [ ] @ - # These do not need to be percent-quoted unless they serve as delimiters for the - # specific component. - - # For 'path' we need to drop ? and # from the GEN_DELIMS set. - parsed_path: str = quote(path, safe=SUB_DELIMS + ":/[]@") - # For 'query' we need to drop '#' from the GEN_DELIMS set. - parsed_query: typing.Optional[str] = ( - None if query is None else quote(query, safe=SUB_DELIMS + ":/?[]@") - ) - # For 'fragment' we can include all of the GEN_DELIMS set. - parsed_fragment: typing.Optional[str] = ( - None if fragment is None else quote(fragment, safe=SUB_DELIMS + ":/?#[]@") - ) - - # The parsed ASCII bytestrings are our canonical form. - # All properties of the URL are derived from these. - return ParseResult( - parsed_scheme, - parsed_userinfo, - parsed_host, - parsed_port, - parsed_path, - parsed_query, - parsed_fragment, - ) - - -def encode_host(host: str) -> str: - if not host: - return "" - - elif IPv4_STYLE_HOSTNAME.match(host): - # Validate IPv4 hostnames like #.#.#.# - # - # From https://datatracker.ietf.org/doc/html/rfc3986/#section-3.2.2 - # - # IPv4address = dec-octet "." dec-octet "." dec-octet "." dec-octet - try: - ipaddress.IPv4Address(host) - except ipaddress.AddressValueError: - raise InvalidURL(f"Invalid IPv4 address: {host!r}") - return host - - elif IPv6_STYLE_HOSTNAME.match(host): - # Validate IPv6 hostnames like [...] - # - # From https://datatracker.ietf.org/doc/html/rfc3986/#section-3.2.2 - # - # "A host identified by an Internet Protocol literal address, version 6 - # [RFC3513] or later, is distinguished by enclosing the IP literal - # within square brackets ("[" and "]"). This is the only place where - # square bracket characters are allowed in the URI syntax." - try: - ipaddress.IPv6Address(host[1:-1]) - except ipaddress.AddressValueError: - raise InvalidURL(f"Invalid IPv6 address: {host!r}") - return host[1:-1] - - elif host.isascii(): - # Regular ASCII hostnames - # - # From https://datatracker.ietf.org/doc/html/rfc3986/#section-3.2.2 - # - # reg-name = *( unreserved / pct-encoded / sub-delims ) - return quote(host.lower(), safe=SUB_DELIMS) - - # IDNA hostnames - try: - return idna.encode(host.lower()).decode("ascii") - except idna.IDNAError: - raise InvalidURL(f"Invalid IDNA hostname: {host!r}") - - -def normalize_port( - port: typing.Optional[typing.Union[str, int]], scheme: str -) -> typing.Optional[int]: - # From https://tools.ietf.org/html/rfc3986#section-3.2.3 - # - # "A scheme may define a default port. For example, the "http" scheme - # defines a default port of "80", corresponding to its reserved TCP - # port number. The type of port designated by the port number (e.g., - # TCP, UDP, SCTP) is defined by the URI scheme. URI producers and - # normalizers should omit the port component and its ":" delimiter if - # port is empty or if its value would be the same as that of the - # scheme's default." - if port is None or port == "": - return None - - try: - port_as_int = int(port) - except ValueError: - raise InvalidURL(f"Invalid port: {port!r}") - - # See https://url.spec.whatwg.org/#url-miscellaneous - default_port = {"ftp": 21, "http": 80, "https": 443, "ws": 80, "wss": 443}.get( - scheme - ) - if port_as_int == default_port: - return None - return port_as_int - - -def validate_path(path: str, has_scheme: bool, has_authority: bool) -> None: - """ - Path validation rules that depend on if the URL contains a scheme or authority component. - - See https://datatracker.ietf.org/doc/html/rfc3986.html#section-3.3 - """ - if has_authority: - # > If a URI contains an authority component, then the path component - # > must either be empty or begin with a slash ("/") character." - if path and not path.startswith("/"): - raise InvalidURL("For absolute URLs, path must be empty or begin with '/'") - else: - # > If a URI does not contain an authority component, then the path cannot begin - # > with two slash characters ("//"). - if path.startswith("//"): - raise InvalidURL( - "URLs with no authority component cannot have a path starting with '//'" - ) - # > In addition, a URI reference (Section 4.1) may be a relative-path reference, in which - # > case the first path segment cannot contain a colon (":") character. - if path.startswith(":") and not has_scheme: - raise InvalidURL( - "URLs with no scheme component cannot have a path starting with ':'" - ) - - -def normalize_path(path: str) -> str: - """ - Drop "." and ".." segments from a URL path. - - For example: - - normalize_path("/path/./to/somewhere/..") == "/path/to" - """ - # https://datatracker.ietf.org/doc/html/rfc3986#section-5.2.4 - components = path.split("/") - output: typing.List[str] = [] - for component in components: - if component == ".": - pass - elif component == "..": - if output and output != [""]: - output.pop() - else: - output.append(component) - return "/".join(output) - - -def percent_encode(char: str) -> str: - """ - Replace a single character with the percent-encoded representation. - - Characters outside the ASCII range are represented with their a percent-encoded - representation of their UTF-8 byte sequence. - - For example: - - percent_encode(" ") == "%20" - """ - return "".join([f"%{byte:02x}" for byte in char.encode("utf-8")]).upper() - - -def is_safe(string: str, safe: str = "/") -> bool: - """ - Determine if a given string is already quote-safe. - """ - NON_ESCAPED_CHARS = UNRESERVED_CHARACTERS + safe + "%" - - # All characters must already be non-escaping or '%' - for char in string: - if char not in NON_ESCAPED_CHARS: - return False - - # Any '%' characters must be valid '%xx' escape sequences. - return string.count("%") == len(PERCENT_ENCODED_REGEX.findall(string)) - - -def quote(string: str, safe: str = "/") -> str: - """ - Use percent-encoding to quote a string if required. - """ - if is_safe(string, safe=safe): - return string - - NON_ESCAPED_CHARS = UNRESERVED_CHARACTERS + safe - return "".join( - [char if char in NON_ESCAPED_CHARS else percent_encode(char) for char in string] - ) - - -def urlencode(items: typing.List[typing.Tuple[str, str]]) -> str: - # We can use a much simpler version of the stdlib urlencode here because - # we don't need to handle a bunch of different typing cases, such as bytes vs str. - # - # https://github.com/python/cpython/blob/b2f7b2ef0b5421e01efb8c7bee2ef95d3bab77eb/Lib/urllib/parse.py#L926 - # - # Note that we use '%20' encoding for spaces, and treat '/' as a safe - # character. This means our query params have the same escaping as other - # characters in the URL path. This is slightly different to `requests`, - # but is the behaviour that browsers use. - # - # See https://github.com/encode/httpx/issues/2536 and - # https://docs.python.org/3/library/urllib.parse.html#urllib.parse.urlencode - return "&".join([quote(k) + "=" + quote(v) for k, v in items]) diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/_login.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/_login.py deleted file mode 100644 index 4bbb1c9235a2c4d9e145f5c35603dca55e07d2a8..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/_login.py +++ /dev/null @@ -1,360 +0,0 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Contains methods to login to the Hub.""" -import os -import subprocess -from functools import partial -from getpass import getpass -from typing import Optional - -from .commands._cli_utils import ANSI -from .commands.delete_cache import _ask_for_confirmation_no_tui -from .hf_api import get_token_permission -from .utils import ( - HfFolder, - capture_output, - is_google_colab, - is_notebook, - list_credential_helpers, - logging, - run_subprocess, - set_git_credential, - unset_git_credential, -) - - -logger = logging.get_logger(__name__) - - -def login( - token: Optional[str] = None, - add_to_git_credential: bool = False, - new_session: bool = True, - write_permission: bool = False, -) -> None: - """Login the machine to access the Hub. - - The `token` is persisted in cache and set as a git credential. Once done, the machine - is logged in and the access token will be available across all `huggingface_hub` - components. If `token` is not provided, it will be prompted to the user either with - a widget (in a notebook) or via the terminal. - - To login from outside of a script, one can also use `huggingface-cli login` which is - a cli command that wraps [`login`]. - - - [`login`] is a drop-in replacement method for [`notebook_login`] as it wraps and - extends its capabilities. - - - - When the token is not passed, [`login`] will automatically detect if the script runs - in a notebook or not. However, this detection might not be accurate due to the - variety of notebooks that exists nowadays. If that is the case, you can always force - the UI by using [`notebook_login`] or [`interpreter_login`]. - - - Args: - token (`str`, *optional*): - User access token to generate from https://huggingface.co/settings/token. - add_to_git_credential (`bool`, defaults to `False`): - If `True`, token will be set as git credential. If no git credential helper - is configured, a warning will be displayed to the user. If `token` is `None`, - the value of `add_to_git_credential` is ignored and will be prompted again - to the end user. - new_session (`bool`, defaults to `True`): - If `True`, will request a token even if one is already saved on the machine. - write_permission (`bool`, defaults to `False`): - If `True`, requires a token with write permission. - Raises: - [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError) - If an organization token is passed. Only personal account tokens are valid - to login. - [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError) - If token is invalid. - [`ImportError`](https://docs.python.org/3/library/exceptions.html#ImportError) - If running in a notebook but `ipywidgets` is not installed. - """ - if token is not None: - if not add_to_git_credential: - print( - "Token will not been saved to git credential helper. Pass" - " `add_to_git_credential=True` if you want to set the git" - " credential as well." - ) - _login(token, add_to_git_credential=add_to_git_credential, write_permission=write_permission) - elif is_notebook(): - notebook_login(new_session=new_session, write_permission=write_permission) - else: - interpreter_login(new_session=new_session, write_permission=write_permission) - - -def logout() -> None: - """Logout the machine from the Hub. - - Token is deleted from the machine and removed from git credential. - """ - token = HfFolder.get_token() - if token is None: - print("Not logged in!") - return - HfFolder.delete_token() - unset_git_credential() - print("Successfully logged out.") - - -### -# Interpreter-based login (text) -### - - -def interpreter_login(new_session: bool = True, write_permission: bool = False) -> None: - """ - Displays a prompt to login to the HF website and store the token. - - This is equivalent to [`login`] without passing a token when not run in a notebook. - [`interpreter_login`] is useful if you want to force the use of the terminal prompt - instead of a notebook widget. - - For more details, see [`login`]. - - Args: - new_session (`bool`, defaults to `True`): - If `True`, will request a token even if one is already saved on the machine. - write_permission (`bool`, defaults to `False`): - If `True`, requires a token with write permission. - - """ - if not new_session and _current_token_okay(write_permission=write_permission): - print("User is already logged in.") - return - - print(""" - _| _| _| _| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _|_|_|_| _|_| _|_|_| _|_|_|_| - _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _| - _|_|_|_| _| _| _| _|_| _| _|_| _| _| _| _| _| _|_| _|_|_| _|_|_|_| _| _|_|_| - _| _| _| _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _| - _| _| _|_| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _| _| _| _|_|_| _|_|_|_| - """) - if HfFolder.get_token() is not None: - print( - " A token is already saved on your machine. Run `huggingface-cli" - " whoami` to get more information or `huggingface-cli logout` if you want" - " to log out." - ) - print(" Setting a new token will erase the existing one.") - - print(" To login, `huggingface_hub` requires a token generated from https://huggingface.co/settings/tokens .") - if os.name == "nt": - print("Token can be pasted using 'Right-Click'.") - token = getpass("Token: ") - add_to_git_credential = _ask_for_confirmation_no_tui("Add token as git credential?") - - _login(token=token, add_to_git_credential=add_to_git_credential, write_permission=write_permission) - - -### -# Notebook-based login (widget) -### - -NOTEBOOK_LOGIN_PASSWORD_HTML = """

      Immediately click login after typing your password or -it might be stored in plain text in this notebook file.
      """ - - -NOTEBOOK_LOGIN_TOKEN_HTML_START = """

      Copy a token from your Hugging Face -tokens page and paste it below.
      Immediately click login after copying -your token or it might be stored in plain text in this notebook file.
      """ - - -NOTEBOOK_LOGIN_TOKEN_HTML_END = """ -Pro Tip: If you don't already have one, you can create a dedicated -'notebooks' token with 'write' access, that you can then easily reuse for all -notebooks. """ - - -def notebook_login(new_session: bool = True, write_permission: bool = False) -> None: - """ - Displays a widget to login to the HF website and store the token. - - This is equivalent to [`login`] without passing a token when run in a notebook. - [`notebook_login`] is useful if you want to force the use of the notebook widget - instead of a prompt in the terminal. - - For more details, see [`login`]. - - Args: - new_session (`bool`, defaults to `True`): - If `True`, will request a token even if one is already saved on the machine. - write_permission (`bool`, defaults to `False`): - If `True`, requires a token with write permission. - """ - try: - import ipywidgets.widgets as widgets # type: ignore - from IPython.display import display # type: ignore - except ImportError: - raise ImportError( - "The `notebook_login` function can only be used in a notebook (Jupyter or" - " Colab) and you need the `ipywidgets` module: `pip install ipywidgets`." - ) - if not new_session and _current_token_okay(write_permission=write_permission): - print("User is already logged in.") - return - - box_layout = widgets.Layout(display="flex", flex_flow="column", align_items="center", width="50%") - - token_widget = widgets.Password(description="Token:") - git_checkbox_widget = widgets.Checkbox(value=True, description="Add token as git credential?") - token_finish_button = widgets.Button(description="Login") - - login_token_widget = widgets.VBox( - [ - widgets.HTML(NOTEBOOK_LOGIN_TOKEN_HTML_START), - token_widget, - git_checkbox_widget, - token_finish_button, - widgets.HTML(NOTEBOOK_LOGIN_TOKEN_HTML_END), - ], - layout=box_layout, - ) - display(login_token_widget) - - # On click events - def login_token_event(t, write_permission: bool = False): - """ - Event handler for the login button. - - Args: - write_permission (`bool`, defaults to `False`): - If `True`, requires a token with write permission. - """ - token = token_widget.value - add_to_git_credential = git_checkbox_widget.value - # Erase token and clear value to make sure it's not saved in the notebook. - token_widget.value = "" - # Hide inputs - login_token_widget.children = [widgets.Label("Connecting...")] - try: - with capture_output() as captured: - _login(token, add_to_git_credential=add_to_git_credential, write_permission=write_permission) - message = captured.getvalue() - except Exception as error: - message = str(error) - # Print result (success message or error) - login_token_widget.children = [widgets.Label(line) for line in message.split("\n") if line.strip()] - - token_finish_button.on_click(partial(login_token_event, write_permission=write_permission)) - - -### -# Login private helpers -### - - -def _login(token: str, add_to_git_credential: bool, write_permission: bool = False) -> None: - if token.startswith("api_org"): - raise ValueError("You must use your personal account token, not an organization token.") - - permission = get_token_permission(token) - if permission is None: - raise ValueError("Invalid token passed!") - elif write_permission and permission != "write": - raise ValueError( - "Token is valid but is 'read-only' and a 'write' token is required.\nPlease provide a new token with" - " correct permission." - ) - print(f"Token is valid (permission: {permission}).") - - if add_to_git_credential: - if _is_git_credential_helper_configured(): - set_git_credential(token) - print( - "Your token has been saved in your configured git credential helpers" - + f" ({','.join(list_credential_helpers())})." - ) - else: - print("Token has not been saved to git credential helper.") - - HfFolder.save_token(token) - print(f"Your token has been saved to {HfFolder.path_token}") - print("Login successful") - - -def _current_token_okay(write_permission: bool = False): - """Check if the current token is valid. - - Args: - write_permission (`bool`, defaults to `False`): - If `True`, requires a token with write permission. - - Returns: - `bool`: `True` if the current token is valid, `False` otherwise. - """ - permission = get_token_permission() - if permission is None or (write_permission and permission != "write"): - return False - return True - - -def _is_git_credential_helper_configured() -> bool: - """Check if a git credential helper is configured. - - Warns user if not the case (except for Google Colab where "store" is set by default - by `huggingface_hub`). - """ - helpers = list_credential_helpers() - if len(helpers) > 0: - return True # Do not warn: at least 1 helper is set - - # Only in Google Colab to avoid the warning message - # See https://github.com/huggingface/huggingface_hub/issues/1043#issuecomment-1247010710 - if is_google_colab(): - _set_store_as_git_credential_helper_globally() - return True # Do not warn: "store" is used by default in Google Colab - - # Otherwise, warn user - print( - ANSI.red( - "Cannot authenticate through git-credential as no helper is defined on your" - " machine.\nYou might have to re-authenticate when pushing to the Hugging" - " Face Hub.\nRun the following command in your terminal in case you want to" - " set the 'store' credential helper as default.\n\ngit config --global" - " credential.helper store\n\nRead" - " https://git-scm.com/book/en/v2/Git-Tools-Credential-Storage for more" - " details." - ) - ) - return False - - -def _set_store_as_git_credential_helper_globally() -> None: - """Set globally the credential.helper to `store`. - - To be used only in Google Colab as we assume the user doesn't care about the git - credential config. It is the only particular case where we don't want to display the - warning message in [`notebook_login()`]. - - Related: - - https://github.com/huggingface/huggingface_hub/issues/1043 - - https://github.com/huggingface/huggingface_hub/issues/1051 - - https://git-scm.com/docs/git-credential-store - """ - try: - run_subprocess("git config --global credential.helper store") - except subprocess.CalledProcessError as exc: - raise EnvironmentError(exc.stderr) diff --git a/spaces/DragGan/DragGan-Inversion/PTI/evaluation/experiment_setting_creator.py b/spaces/DragGan/DragGan-Inversion/PTI/evaluation/experiment_setting_creator.py deleted file mode 100644 index c8ad234ba845d84ddd435424a7fe9ed238af3ff6..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan-Inversion/PTI/evaluation/experiment_setting_creator.py +++ /dev/null @@ -1,43 +0,0 @@ -import glob -import os -from configs import global_config, paths_config, hyperparameters -from scripts.latent_creators.sg2_plus_latent_creator import SG2PlusLatentCreator -from scripts.latent_creators.e4e_latent_creator import E4ELatentCreator -from scripts.run_pti import run_PTI -import pickle -import torch -from utils.models_utils import toogle_grad, load_old_G - - -class ExperimentRunner: - - def __init__(self, run_id=''): - self.images_paths = glob.glob(f'{paths_config.input_data_path}/*') - self.target_paths = glob.glob(f'{paths_config.input_data_path}/*') - self.run_id = run_id - self.sampled_ws = None - - self.old_G = load_old_G() - - toogle_grad(self.old_G, False) - - def run_experiment(self, run_pt, create_other_latents, use_multi_id_training, use_wandb=False): - if run_pt: - self.run_id = run_PTI(self.run_id, use_wandb=use_wandb, use_multi_id_training=use_multi_id_training) - if create_other_latents: - sg2_plus_latent_creator = SG2PlusLatentCreator(use_wandb=use_wandb) - sg2_plus_latent_creator.create_latents() - e4e_latent_creator = E4ELatentCreator(use_wandb=use_wandb) - e4e_latent_creator.create_latents() - - torch.cuda.empty_cache() - - return self.run_id - - -if __name__ == '__main__': - os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' - os.environ['CUDA_VISIBLE_DEVICES'] = global_config.cuda_visible_devices - - runner = ExperimentRunner() - runner.run_experiment(True, False, False) diff --git a/spaces/DragGan/DragGan/stylegan_human/run_pti.py b/spaces/DragGan/DragGan/stylegan_human/run_pti.py deleted file mode 100644 index c335c5b156554ea1069af0c153ec6edbea70d931..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan/stylegan_human/run_pti.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (c) SenseTime Research. All rights reserved. - -from random import choice -from string import ascii_uppercase -from torch.utils.data import DataLoader -from torchvision.transforms import transforms -import os -from pti.pti_configs import global_config, paths_config -import wandb - -from pti.training.coaches.multi_id_coach import MultiIDCoach -from pti.training.coaches.single_id_coach import SingleIDCoach -from utils.ImagesDataset import ImagesDataset - - -def run_PTI(run_name='', use_wandb=False, use_multi_id_training=False): - os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' - os.environ['CUDA_VISIBLE_DEVICES'] = global_config.cuda_visible_devices - - if run_name == '': - global_config.run_name = ''.join(choice(ascii_uppercase) for i in range(12)) - else: - global_config.run_name = run_name - - if use_wandb: - run = wandb.init(project=paths_config.pti_results_keyword, reinit=True, name=global_config.run_name) - global_config.pivotal_training_steps = 1 - global_config.training_step = 1 - - embedding_dir_path = f'{paths_config.embedding_base_dir}/{paths_config.input_data_id}/{paths_config.pti_results_keyword}' - # print('embedding_dir_path: ', embedding_dir_path) #./embeddings/barcelona/PTI - os.makedirs(embedding_dir_path, exist_ok=True) - - dataset = ImagesDataset(paths_config.input_data_path, transforms.Compose([ - transforms.Resize((1024, 512)), - transforms.ToTensor(), - transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])) - - dataloader = DataLoader(dataset, batch_size=1, shuffle=False) - - if use_multi_id_training: - coach = MultiIDCoach(dataloader, use_wandb) - else: - coach = SingleIDCoach(dataloader, use_wandb) - - coach.train() - - return global_config.run_name - - -if __name__ == '__main__': - run_PTI(run_name='', use_wandb=False, use_multi_id_training=False) diff --git a/spaces/DragGan/DragGan/viz/renderer.py b/spaces/DragGan/DragGan/viz/renderer.py deleted file mode 100644 index a603cfddf89f40ee5a97b6bcda7ae9959b4fb8fe..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan/viz/renderer.py +++ /dev/null @@ -1,427 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -from socket import has_dualstack_ipv6 -import sys -import copy -import traceback -import math -import numpy as np -from PIL import Image, ImageDraw, ImageFont -import torch -import torch.fft -import torch.nn as nn -import torch.nn.functional as F -import matplotlib.cm -import dnnlib -from torch_utils.ops import upfirdn2d -import legacy # pylint: disable=import-error - -# ---------------------------------------------------------------------------- - - -class CapturedException(Exception): - def __init__(self, msg=None): - if msg is None: - _type, value, _traceback = sys.exc_info() - assert value is not None - if isinstance(value, CapturedException): - msg = str(value) - else: - msg = traceback.format_exc() - assert isinstance(msg, str) - super().__init__(msg) - -# ---------------------------------------------------------------------------- - - -class CaptureSuccess(Exception): - def __init__(self, out): - super().__init__() - self.out = out - -# ---------------------------------------------------------------------------- - - -def add_watermark_np(input_image_array, watermark_text="AI Generated"): - image = Image.fromarray(np.uint8(input_image_array)).convert("RGBA") - - # Initialize text image - txt = Image.new('RGBA', image.size, (255, 255, 255, 0)) - font = ImageFont.truetype('arial.ttf', round(25/512*image.size[0])) - d = ImageDraw.Draw(txt) - - text_width, text_height = font.getsize(watermark_text) - text_position = (image.size[0] - text_width - - 10, image.size[1] - text_height - 10) - # white color with the alpha channel set to semi-transparent - text_color = (255, 255, 255, 128) - - # Draw the text onto the text canvas - d.text(text_position, watermark_text, font=font, fill=text_color) - - # Combine the image with the watermark - watermarked = Image.alpha_composite(image, txt) - watermarked_array = np.array(watermarked) - return watermarked_array - -# ---------------------------------------------------------------------------- - - -class Renderer: - def __init__(self, disable_timing=False): - self._device = torch.device('cuda' if torch.cuda.is_available( - ) else 'mps' if torch.backends.mps.is_available() else 'cpu') - self._dtype = torch.float32 if self._device.type == 'mps' else torch.float64 - self._pkl_data = dict() # {pkl: dict | CapturedException, ...} - self._networks = dict() # {cache_key: torch.nn.Module, ...} - self._pinned_bufs = dict() # {(shape, dtype): torch.Tensor, ...} - self._cmaps = dict() # {name: torch.Tensor, ...} - self._is_timing = False - if not disable_timing: - self._start_event = torch.cuda.Event(enable_timing=True) - self._end_event = torch.cuda.Event(enable_timing=True) - self._disable_timing = disable_timing - self._net_layers = dict() # {cache_key: [dnnlib.EasyDict, ...], ...} - - def render(self, **args): - if self._disable_timing: - self._is_timing = False - else: - self._start_event.record(torch.cuda.current_stream(self._device)) - self._is_timing = True - res = dnnlib.EasyDict() - try: - init_net = False - if not hasattr(self, 'G'): - init_net = True - if hasattr(self, 'pkl'): - if self.pkl != args['pkl']: - init_net = True - if hasattr(self, 'w_load'): - if self.w_load is not args['w_load']: - init_net = True - if hasattr(self, 'w0_seed'): - if self.w0_seed != args['w0_seed']: - init_net = True - if hasattr(self, 'w_plus'): - if self.w_plus != args['w_plus']: - init_net = True - if args['reset_w']: - init_net = True - res.init_net = init_net - if init_net: - self.init_network(res, **args) - self._render_drag_impl(res, **args) - except: - res.error = CapturedException() - if not self._disable_timing: - self._end_event.record(torch.cuda.current_stream(self._device)) - if 'image' in res: - res.image = self.to_cpu(res.image).detach().numpy() - res.image = add_watermark_np(res.image, 'AI Generated') - if 'stats' in res: - res.stats = self.to_cpu(res.stats).detach().numpy() - if 'error' in res: - res.error = str(res.error) - # if 'stop' in res and res.stop: - - if self._is_timing and not self._disable_timing: - self._end_event.synchronize() - res.render_time = self._start_event.elapsed_time( - self._end_event) * 1e-3 - self._is_timing = False - return res - - def get_network(self, pkl, key, **tweak_kwargs): - data = self._pkl_data.get(pkl, None) - if data is None: - print(f'Loading "{pkl}"... ', end='', flush=True) - try: - with dnnlib.util.open_url(pkl, verbose=False) as f: - data = legacy.load_network_pkl(f) - print('Done.') - except: - data = CapturedException() - print('Failed!') - self._pkl_data[pkl] = data - self._ignore_timing() - if isinstance(data, CapturedException): - raise data - - orig_net = data[key] - cache_key = (orig_net, self._device, tuple( - sorted(tweak_kwargs.items()))) - net = self._networks.get(cache_key, None) - if net is None: - try: - if 'stylegan2' in pkl: - from training.networks_stylegan2 import Generator - elif 'stylegan3' in pkl: - from training.networks_stylegan3 import Generator - elif 'stylegan_human' in pkl: - from stylegan_human.training_scripts.sg2.training.networks import Generator - else: - raise NameError('Cannot infer model type from pkl name!') - - print(data[key].init_args) - print(data[key].init_kwargs) - if 'stylegan_human' in pkl: - net = Generator( - *data[key].init_args, **data[key].init_kwargs, square=False, padding=True) - else: - net = Generator(*data[key].init_args, - **data[key].init_kwargs) - net.load_state_dict(data[key].state_dict()) - net.to(self._device) - except: - net = CapturedException() - self._networks[cache_key] = net - self._ignore_timing() - if isinstance(net, CapturedException): - raise net - return net - - def _get_pinned_buf(self, ref): - key = (tuple(ref.shape), ref.dtype) - buf = self._pinned_bufs.get(key, None) - if buf is None: - buf = torch.empty(ref.shape, dtype=ref.dtype).pin_memory() - self._pinned_bufs[key] = buf - return buf - - def to_device(self, buf): - return self._get_pinned_buf(buf).copy_(buf).to(self._device) - - def to_cpu(self, buf): - return self._get_pinned_buf(buf).copy_(buf).clone() - - def _ignore_timing(self): - self._is_timing = False - - def _apply_cmap(self, x, name='viridis'): - cmap = self._cmaps.get(name, None) - if cmap is None: - cmap = matplotlib.cm.get_cmap(name) - cmap = cmap(np.linspace(0, 1, num=1024), bytes=True)[:, :3] - cmap = self.to_device(torch.from_numpy(cmap)) - self._cmaps[name] = cmap - hi = cmap.shape[0] - 1 - x = (x * hi + 0.5).clamp(0, hi).to(torch.int64) - x = torch.nn.functional.embedding(x, cmap) - return x - - def init_network(self, res, - pkl=None, - w0_seed=0, - w_load=None, - w_plus=True, - noise_mode='const', - trunc_psi=0.7, - trunc_cutoff=None, - input_transform=None, - lr=0.001, - **kwargs - ): - # Dig up network details. - self.pkl = pkl - G = self.get_network(pkl, 'G_ema') - self.G = G - res.img_resolution = G.img_resolution - res.num_ws = G.num_ws - res.has_noise = any('noise_const' in name for name, - _buf in G.synthesis.named_buffers()) - res.has_input_transform = ( - hasattr(G.synthesis, 'input') and hasattr(G.synthesis.input, 'transform')) - res.stop = False - # Set input transform. - if res.has_input_transform: - m = np.eye(3) - try: - if input_transform is not None: - m = np.linalg.inv(np.asarray(input_transform)) - except np.linalg.LinAlgError: - res.error = CapturedException() - G.synthesis.input.transform.copy_(torch.from_numpy(m)) - - # Generate random latents. - self.w0_seed = w0_seed - self.w_load = w_load - - if self.w_load is None: - # Generate random latents. - z = torch.from_numpy(np.random.RandomState(w0_seed).randn( - 1, 512)).to(self._device, dtype=self._dtype) - - # Run mapping network. - label = torch.zeros([1, G.c_dim], device=self._device) - w = G.mapping(z, label, truncation_psi=trunc_psi, - truncation_cutoff=trunc_cutoff) - else: - w = self.w_load.clone().to(self._device) - - self.w0 = w.detach().clone() - self.w_plus = w_plus - if w_plus: - self.w = w.detach() - else: - self.w = w[:, 0, :].detach() - self.w.requires_grad = True - self.w_optim = torch.optim.Adam([self.w], lr=lr) - - self.feat_refs = None - self.points0_pt = None - - def update_lr(self, lr): - - del self.w_optim - self.w_optim = torch.optim.Adam([self.w], lr=lr) - print(f'Rebuild optimizer with lr: {lr}') - print(' Remain feat_refs and points0_pt') - - def _render_drag_impl(self, res, - points=[], - targets=[], - mask=None, - lambda_mask=10, - reg=0, - feature_idx=5, - r1=3, - r2=12, - random_seed=0, - noise_mode='const', - trunc_psi=0.7, - force_fp32=False, - layer_name=None, - sel_channels=3, - base_channel=0, - img_scale_db=0, - img_normalize=False, - untransform=False, - is_drag=False, - reset=False, - to_pil=False, - **kwargs - ): - try: - G = self.G - ws = self.w - if ws.dim() == 2: - ws = ws.unsqueeze(1).repeat(1, 6, 1) - ws = torch.cat([ws[:, :6, :], self.w0[:, 6:, :]], dim=1) - if hasattr(self, 'points'): - if len(points) != len(self.points): - reset = True - if reset: - self.feat_refs = None - self.points0_pt = None - self.points = points - - # Run synthesis network. - label = torch.zeros([1, G.c_dim], device=self._device) - img, feat = G(ws, label, truncation_psi=trunc_psi, - noise_mode=noise_mode, input_is_w=True, return_feature=True) - - h, w = G.img_resolution, G.img_resolution - - if is_drag: - X = torch.linspace(0, h, h) - Y = torch.linspace(0, w, w) - xx, yy = torch.meshgrid(X, Y) - feat_resize = F.interpolate( - feat[feature_idx], [h, w], mode='bilinear') - if self.feat_refs is None: - self.feat0_resize = F.interpolate( - feat[feature_idx].detach(), [h, w], mode='bilinear') - self.feat_refs = [] - for point in points: - py, px = round(point[0]), round(point[1]) - self.feat_refs.append(self.feat0_resize[:, :, py, px]) - self.points0_pt = torch.Tensor(points).unsqueeze( - 0).to(self._device) # 1, N, 2 - - # Point tracking with feature matching - with torch.no_grad(): - for j, point in enumerate(points): - r = round(r2 / 512 * h) - up = max(point[0] - r, 0) - down = min(point[0] + r + 1, h) - left = max(point[1] - r, 0) - right = min(point[1] + r + 1, w) - feat_patch = feat_resize[:, :, up:down, left:right] - L2 = torch.linalg.norm( - feat_patch - self.feat_refs[j].reshape(1, -1, 1, 1), dim=1) - _, idx = torch.min(L2.view(1, -1), -1) - width = right - left - point = [idx.item() // width + up, idx.item() % - width + left] - points[j] = point - - res.points = [[point[0], point[1]] for point in points] - - # Motion supervision - loss_motion = 0 - res.stop = True - for j, point in enumerate(points): - direction = torch.Tensor( - [targets[j][1] - point[1], targets[j][0] - point[0]]) - if torch.linalg.norm(direction) > max(2 / 512 * h, 2): - res.stop = False - if torch.linalg.norm(direction) > 1: - distance = ( - (xx.to(self._device) - point[0])**2 + (yy.to(self._device) - point[1])**2)**0.5 - relis, reljs = torch.where( - distance < round(r1 / 512 * h)) - direction = direction / \ - (torch.linalg.norm(direction) + 1e-7) - gridh = (relis+direction[1]) / (h-1) * 2 - 1 - gridw = (reljs+direction[0]) / (w-1) * 2 - 1 - grid = torch.stack( - [gridw, gridh], dim=-1).unsqueeze(0).unsqueeze(0) - target = F.grid_sample( - feat_resize.float(), grid, align_corners=True).squeeze(2) - loss_motion += F.l1_loss( - feat_resize[:, :, relis, reljs].detach(), target) - - loss = loss_motion - if mask is not None: - if mask.min() == 0 and mask.max() == 1: - mask_usq = mask.to( - self._device).unsqueeze(0).unsqueeze(0) - loss_fix = F.l1_loss( - feat_resize * mask_usq, self.feat0_resize * mask_usq) - loss += lambda_mask * loss_fix - - # latent code regularization - loss += reg * F.l1_loss(ws, self.w0) - if not res.stop: - self.w_optim.zero_grad() - loss.backward() - self.w_optim.step() - - # Scale and convert to uint8. - img = img[0] - if img_normalize: - img = img / img.norm(float('inf'), - dim=[1, 2], keepdim=True).clip(1e-8, 1e8) - img = img * (10 ** (img_scale_db / 20)) - img = (img * 127.5 + 128).clamp(0, - 255).to(torch.uint8).permute(1, 2, 0) - if to_pil: - from PIL import Image - img = img.cpu().numpy() - img = Image.fromarray(img) - res.image = img - res.w = ws.detach().cpu().numpy() - except Exception as e: - import os - print(f'Renderer error: {e}') - print("Out of memory error occurred. Restarting the app...") - os.execv(sys.executable, ['python'] + sys.argv) - -# ---------------------------------------------------------------------------- diff --git a/spaces/EdBianchi/Social_Toximeter/README.md b/spaces/EdBianchi/Social_Toximeter/README.md deleted file mode 100644 index 790d8ea31c1bd45f3025fcae7ca919bcdf581db2..0000000000000000000000000000000000000000 --- a/spaces/EdBianchi/Social_Toximeter/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Social Toximeter -emoji: ☢️ -colorFrom: purple -colorTo: pink -sdk: streamlit -sdk_version: 1.2.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/Eddycrack864/Applio-Inference/mdx_processing_script.py b/spaces/Eddycrack864/Applio-Inference/mdx_processing_script.py deleted file mode 100644 index 05616843300aacf46c98ce06f017ba1d0794f313..0000000000000000000000000000000000000000 --- a/spaces/Eddycrack864/Applio-Inference/mdx_processing_script.py +++ /dev/null @@ -1,146 +0,0 @@ -import gc -import requests -import subprocess -import logging -import sys -from bs4 import BeautifulSoup -import torch, pdb, os, warnings, librosa -import soundfile as sf -from tqdm import tqdm -import numpy as np -import torch -now_dir = os.getcwd() -sys.path.append(now_dir) -import mdx -branch = "https://github.com/NaJeongMo/Colab-for-MDX_B" - -model_params = "https://raw.githubusercontent.com/TRvlvr/application_data/main/mdx_model_data/model_data.json" -_Models = "https://github.com/TRvlvr/model_repo/releases/download/all_public_uvr_models/" -# _models = "https://pastebin.com/raw/jBzYB8vz" -_models = "https://raw.githubusercontent.com/TRvlvr/application_data/main/filelists/download_checks.json" -stem_naming = "https://pastebin.com/raw/mpH4hRcF" - -file_folder = "Colab-for-MDX_B" -model_ids = requests.get(_models).json() -model_ids = model_ids["mdx_download_list"].values() -#print(model_ids) -model_params = requests.get(model_params).json() -stem_naming = requests.get(stem_naming).json() - -os.makedirs("tmp_models", exist_ok=True) - -warnings.filterwarnings("ignore") -cpu = torch.device("cpu") -if torch.cuda.is_available(): - device = torch.device("cuda:0") -elif torch.backends.mps.is_available(): - device = torch.device("mps") -else: - device = torch.device("cpu") - - -def get_model_list(): - return model_ids - -def id_to_ptm(mkey): - if mkey in model_ids: - mpath = f"{now_dir}/tmp_models/{mkey}" - if not os.path.exists(f'{now_dir}/tmp_models/{mkey}'): - print('Downloading model...',end=' ') - subprocess.run( - ["wget", _Models+mkey, "-O", mpath] - ) - print(f'saved to {mpath}') - # get_ipython().system(f'gdown {model_id} -O /content/tmp_models/{mkey}') - return mpath - else: - return mpath - else: - mpath = f'models/{mkey}' - return mpath - -def prepare_mdx(onnx,custom_param=False, dim_f=None, dim_t=None, n_fft=None, stem_name=None, compensation=None): - device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu') - if custom_param: - assert not (dim_f is None or dim_t is None or n_fft is None or compensation is None), 'Custom parameter selected, but incomplete parameters are provided.' - mdx_model = mdx.MDX_Model( - device, - dim_f = dim_f, - dim_t = dim_t, - n_fft = n_fft, - stem_name=stem_name, - compensation=compensation - ) - else: - model_hash = mdx.MDX.get_hash(onnx) - if model_hash in model_params: - mp = model_params.get(model_hash) - mdx_model = mdx.MDX_Model( - device, - dim_f = mp["mdx_dim_f_set"], - dim_t = 2**mp["mdx_dim_t_set"], - n_fft = mp["mdx_n_fft_scale_set"], - stem_name=mp["primary_stem"], - compensation=compensation if not custom_param and compensation is not None else mp["compensate"] - ) - return mdx_model - -def run_mdx(onnx, mdx_model,filename, output_format='wav',diff=False,suffix=None,diff_suffix=None, denoise=False, m_threads=2): - mdx_sess = mdx.MDX(onnx,mdx_model) - print(f"Processing: {filename}") - if filename.lower().endswith('.wav'): - wave, sr = librosa.load(filename, mono=False, sr=44100) - else: - temp_wav = 'temp_audio.wav' - subprocess.run(['ffmpeg', '-i', filename, '-ar', '44100', '-ac', '2', temp_wav]) # Convert to WAV format - wave, sr = librosa.load(temp_wav, mono=False, sr=44100) - os.remove(temp_wav) - - #wave, sr = librosa.load(filename,mono=False, sr=44100) - # normalizing input wave gives better output - peak = max(np.max(wave), abs(np.min(wave))) - wave /= peak - if denoise: - wave_processed = -(mdx_sess.process_wave(-wave, m_threads)) + (mdx_sess.process_wave(wave, m_threads)) - wave_processed *= 0.5 - else: - wave_processed = mdx_sess.process_wave(wave, m_threads) - # return to previous peak - wave_processed *= peak - - stem_name = mdx_model.stem_name if suffix is None else suffix # use suffix if provided - save_path = os.path.basename(os.path.splitext(filename)[0]) - #vocals_save_path = os.path.join(vocals_folder, f"{save_path}_{stem_name}.{output_format}") - #instrumental_save_path = os.path.join(instrumental_folder, f"{save_path}_{stem_name}.{output_format}") - save_path = f"{os.path.basename(os.path.splitext(filename)[0])}_{stem_name}.{output_format}" - save_path = os.path.join( - 'audios', - save_path - ) - sf.write( - save_path, - wave_processed.T, - sr - ) - - print(f'done, saved to: {save_path}') - - if diff: - diff_stem_name = stem_naming.get(stem_name) if diff_suffix is None else diff_suffix # use suffix if provided - stem_name = f"{stem_name}_diff" if diff_stem_name is None else diff_stem_name - save_path = f"{os.path.basename(os.path.splitext(filename)[0])}_{stem_name}.{output_format}" - save_path = os.path.join( - 'audio-others', - save_path - ) - sf.write( - save_path, - (-wave_processed.T*mdx_model.compensation)+wave.T, - sr - ) - print(f'invert done, saved to: {save_path}') - del mdx_sess, wave_processed, wave - gc.collect() - -if __name__ == "__main__": - print() \ No newline at end of file diff --git a/spaces/Epitech/hand-sign-detection/README.md b/spaces/Epitech/hand-sign-detection/README.md deleted file mode 100644 index 85dbc4235898a293f9862c3d9b3500a4f2582c80..0000000000000000000000000000000000000000 --- a/spaces/Epitech/hand-sign-detection/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Hand Sign Detection -emoji: 📊 -colorFrom: yellow -colorTo: gray -sdk: gradio -sdk_version: 3.5 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/EronSamez/RVC_HFmeu/Applio-RVC-Fork/utils/backups.py b/spaces/EronSamez/RVC_HFmeu/Applio-RVC-Fork/utils/backups.py deleted file mode 100644 index b814f8184792e80e2324685436053d61487110b1..0000000000000000000000000000000000000000 --- a/spaces/EronSamez/RVC_HFmeu/Applio-RVC-Fork/utils/backups.py +++ /dev/null @@ -1,141 +0,0 @@ -import os -import shutil -import hashlib -import time -import base64 - - - - -LOGS_FOLDER = '/content/Applio-RVC-Fork/logs' -WEIGHTS_FOLDER = '/content/Applio-RVC-Fork/weights' -GOOGLE_DRIVE_PATH = '/content/drive/MyDrive/RVC_Backup' - -def import_google_drive_backup(): - print("Importing Google Drive backup...") - weights_exist = False - for root, dirs, files in os.walk(GOOGLE_DRIVE_PATH): - for filename in files: - filepath = os.path.join(root, filename) - if os.path.isfile(filepath) and not filepath.startswith(os.path.join(GOOGLE_DRIVE_PATH, 'weights')): - backup_filepath = os.path.join(LOGS_FOLDER, os.path.relpath(filepath, GOOGLE_DRIVE_PATH)) - backup_folderpath = os.path.dirname(backup_filepath) - if not os.path.exists(backup_folderpath): - os.makedirs(backup_folderpath) - print(f'Created backup folder: {backup_folderpath}', flush=True) - shutil.copy2(filepath, backup_filepath) # copy file with metadata - print(f'Imported file from Google Drive backup: {filename}') - elif filepath.startswith(os.path.join(GOOGLE_DRIVE_PATH, 'weights')) and filename.endswith('.pth'): - weights_exist = True - weights_filepath = os.path.join(WEIGHTS_FOLDER, os.path.relpath(filepath, os.path.join(GOOGLE_DRIVE_PATH, 'weights'))) - weights_folderpath = os.path.dirname(weights_filepath) - if not os.path.exists(weights_folderpath): - os.makedirs(weights_folderpath) - print(f'Created weights folder: {weights_folderpath}', flush=True) - shutil.copy2(filepath, weights_filepath) # copy file with metadata - print(f'Imported file from weights: {filename}') - if weights_exist: - print("Copied weights from Google Drive backup to local weights folder.") - else: - print("No weights found in Google Drive backup.") - print("Google Drive backup import completed.") - -def get_md5_hash(file_path): - hash_md5 = hashlib.md5() - with open(file_path, "rb") as f: - for chunk in iter(lambda: f.read(4096), b""): - hash_md5.update(chunk) - return hash_md5.hexdigest() - -def copy_weights_folder_to_drive(): - destination_folder = os.path.join(GOOGLE_DRIVE_PATH, 'weights') - try: - if not os.path.exists(destination_folder): - os.makedirs(destination_folder) - - num_copied = 0 - for filename in os.listdir(WEIGHTS_FOLDER): - if filename.endswith('.pth'): - source_file = os.path.join(WEIGHTS_FOLDER, filename) - destination_file = os.path.join(destination_folder, filename) - if not os.path.exists(destination_file): - shutil.copy2(source_file, destination_file) - num_copied += 1 - print(f"Copied {filename} to Google Drive!") - - if num_copied == 0: - print("No new finished models found for copying.") - else: - print(f"Finished copying {num_copied} files to Google Drive!") - - except Exception as e: - print(f"An error occurred while copying weights: {str(e)}") - # You can log the error or take appropriate actions here. - -def backup_files(): - print("\nStarting backup loop...") - last_backup_timestamps_path = os.path.join(LOGS_FOLDER, 'last_backup_timestamps.txt') - fully_updated = False # boolean to track if all files are up to date - - while True: - try: - updated = False # flag to check if any files were updated - last_backup_timestamps = {} - - try: - with open(last_backup_timestamps_path, 'r') as f: - last_backup_timestamps = dict(line.strip().split(':') for line in f) - except FileNotFoundError: - pass # File does not exist yet, which is fine - - for root, dirs, files in os.walk(LOGS_FOLDER): - for filename in files: - if filename != 'last_backup_timestamps.txt': - filepath = os.path.join(root, filename) - if os.path.isfile(filepath): - backup_filepath = os.path.join(GOOGLE_DRIVE_PATH, os.path.relpath(filepath, LOGS_FOLDER)) - backup_folderpath = os.path.dirname(backup_filepath) - if not os.path.exists(backup_folderpath): - os.makedirs(backup_folderpath) - print(f'Created backup folder: {backup_folderpath}', flush=True) - # check if file has changed since last backup - last_backup_timestamp = last_backup_timestamps.get(filepath) - current_timestamp = os.path.getmtime(filepath) - if last_backup_timestamp is None or float(last_backup_timestamp) < current_timestamp: - shutil.copy2(filepath, backup_filepath) # copy file with metadata - last_backup_timestamps[filepath] = str(current_timestamp) # update last backup timestamp - if last_backup_timestamp is None: - print(f'Backed up file: {filename}') - else: - print(f'Updating backed up file: {filename}') - updated = True - fully_updated = False # if a file is updated, all files are not up to date - - # check if any files were deleted in Colab and delete them from the backup drive - for filepath in list(last_backup_timestamps.keys()): - if not os.path.exists(filepath): - backup_filepath = os.path.join(GOOGLE_DRIVE_PATH, os.path.relpath(filepath, LOGS_FOLDER)) - if os.path.exists(backup_filepath): - os.remove(backup_filepath) - print(f'Deleted file: {filepath}') - del last_backup_timestamps[filepath] - updated = True - fully_updated = False # if a file is deleted, all files are not up to date - - if not updated and not fully_updated: - print("Files are up to date.") - fully_updated = True # if all files are up to date, set the boolean to True - copy_weights_folder_to_drive() - sleep_time = 15 - else: - sleep_time = 0.1 - - with open(last_backup_timestamps_path, 'w') as f: - for filepath, timestamp in last_backup_timestamps.items(): - f.write(f'{filepath}:{timestamp}\n') - - time.sleep(sleep_time) # wait for 15 seconds before checking again, or 0.1s if not fully up to date to speed up backups - - except Exception as e: - print(f"An error occurred: {str(e)}") - # You can log the error or take appropriate actions here. diff --git a/spaces/EronSamez/RVC_HFmeu/i18n/locale_diff.py b/spaces/EronSamez/RVC_HFmeu/i18n/locale_diff.py deleted file mode 100644 index 387ddfe1b16c2f9f32b6b9682b61353837b06bd8..0000000000000000000000000000000000000000 --- a/spaces/EronSamez/RVC_HFmeu/i18n/locale_diff.py +++ /dev/null @@ -1,45 +0,0 @@ -import json -import os -from collections import OrderedDict - -# Define the standard file name -standard_file = "en_US.json" - -# Find all JSON files in the directory -dir_path = "./" -languages = [ - f for f in os.listdir(dir_path) if f.endswith(".json") and f != standard_file -] - -# Load the standard file -with open(standard_file, "r", encoding="utf-8") as f: - standard_data = json.load(f, object_pairs_hook=OrderedDict) - -# Loop through each language file -for lang_file in languages: - # Load the language file - with open(lang_file, "r", encoding="utf-8") as f: - lang_data = json.load(f, object_pairs_hook=OrderedDict) - - # Find the difference between the language file and the standard file - diff = set(standard_data.keys()) - set(lang_data.keys()) - - miss = set(lang_data.keys()) - set(standard_data.keys()) - - # Add any missing keys to the language file - for key in diff: - lang_data[key] = key - - # Del any extra keys to the language file - for key in miss: - del lang_data[key] - - # Sort the keys of the language file to match the order of the standard file - lang_data = OrderedDict( - sorted(lang_data.items(), key=lambda x: list(standard_data.keys()).index(x[0])) - ) - - # Save the updated language file - with open(lang_file, "w", encoding="utf-8") as f: - json.dump(lang_data, f, ensure_ascii=False, indent=4) - f.write("\n") diff --git a/spaces/EuroPython2022/example-hello/app.py b/spaces/EuroPython2022/example-hello/app.py deleted file mode 100644 index 06524cff894be705bc092234da04e0c7a020f729..0000000000000000000000000000000000000000 --- a/spaces/EuroPython2022/example-hello/app.py +++ /dev/null @@ -1,21 +0,0 @@ -import gradio as gr - -def update(name): - return f"Welcome to EuroPython 2022, {name}!" - -demo = gr.Blocks() - -with demo: - gr.Markdown( - """ - # Hello World! - Start typing below to see the output. - """) - inp = gr.Textbox(placeholder="What is your name?") - out = gr.Textbox() - - inp.change(fn=update, - inputs=inp, - outputs=out) - -demo.launch() \ No newline at end of file diff --git a/spaces/EuroPython2022/mmocr-demo/configs/_base_/schedules/schedule_sgd_160e.py b/spaces/EuroPython2022/mmocr-demo/configs/_base_/schedules/schedule_sgd_160e.py deleted file mode 100644 index 985b8f63b3cb34f04ff55b298b44a53568a50ae8..0000000000000000000000000000000000000000 --- a/spaces/EuroPython2022/mmocr-demo/configs/_base_/schedules/schedule_sgd_160e.py +++ /dev/null @@ -1,13 +0,0 @@ -# optimizer -optimizer = dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict(grad_clip=None) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.001, - step=[80, 128]) -# running settings -runner = dict(type='EpochBasedRunner', max_epochs=160) -checkpoint_config = dict(interval=10) diff --git a/spaces/Fernando22/freegpt-webui/g4f/Provider/Providers/Gravityengine.py b/spaces/Fernando22/freegpt-webui/g4f/Provider/Providers/Gravityengine.py deleted file mode 100644 index f0cd09daaaae0adaa349f91139dc60c7ac79c028..0000000000000000000000000000000000000000 --- a/spaces/Fernando22/freegpt-webui/g4f/Provider/Providers/Gravityengine.py +++ /dev/null @@ -1,27 +0,0 @@ -import requests -import os -import json -from ...typing import sha256, Dict, get_type_hints - -url = 'https://gpt4.xunika.uk/' -model = ['gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0613'] -supports_stream = True -needs_auth = False - -def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs): - headers = { - 'Content-Type': 'application/json', - } - data = { - 'model': model, - 'temperature': 0.7, - 'presence_penalty': 0, - 'messages': messages, - } - response = requests.post(url + '/api/openai/v1/chat/completions', - json=data, stream=True) - - yield response.json()['choices'][0]['message']['content'] - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/spaces/FriendlyJew/GoyimProxy/Dockerfile b/spaces/FriendlyJew/GoyimProxy/Dockerfile deleted file mode 100644 index eef259fa372a804549fb0af0913718a13344da34..0000000000000000000000000000000000000000 --- a/spaces/FriendlyJew/GoyimProxy/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM node:18-bullseye-slim -RUN apt-get update && \ - apt-get install -y git -RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app -WORKDIR /app -RUN npm install -COPY Dockerfile greeting.md* .env* ./ -RUN npm run build -EXPOSE 7860 -ENV NODE_ENV=production -CMD [ "npm", "start" ] diff --git a/spaces/Gradio-Blocks/Codex_OpenAI/app.py b/spaces/Gradio-Blocks/Codex_OpenAI/app.py deleted file mode 100644 index 23b060d6aa20e5ca20e06e4c2072950c3d986048..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/Codex_OpenAI/app.py +++ /dev/null @@ -1,61 +0,0 @@ -import openai -import numpy as np -import os -import json -import gradio as gr - -openai.api_key = os.environ["api_key"] -engine = "code-davinci-002" - - -def happytt(temperature,max_tokens,text,stop): - try: - s = json.loads(stop) - response = openai.Completion.create( - engine=engine, - prompt=text, - temperature=temperature, - max_tokens=max_tokens, - top_p=1, - frequency_penalty=0, - presence_penalty=0, - stop=s - ) - except json.JSONDecodeError: - response = openai.Completion.create( - engine=engine, - prompt=text, - temperature=temperature, - max_tokens=max_tokens, - top_p=1, - frequency_penalty=0, - presence_penalty=0 - ) - - return response.choices[0].text - - -title = "OpenAI Codex" -description = '''OpenAI Codex is an artificial intelligence model developed by OpenAI. -It parses natural language and generates code in response. -It is used to power GitHub Copilot, a programming autocompletion -tool developed for Code generation. - -Try following prompts and tweak temperatures in following links - - -https://www.pragnakalp.com/experimenting-with-openai-codex/ - -https://betterprogramming.pub/i-beta-tested-openais-codex-and-the-results-are-spooky-good-e282a1874c79 - -https://beta.openai.com/examples?category=code - -Built by [mohammed arsalan](https://www.linkedin.com/in/sallu-mandya/)''' - - -iface = gr.Interface( happytt,[ gr.Slider(0, 1, step=0.1),gr.Slider(150, 4000, step=1), - gr.Textbox(type='str', - label="input prompt"), - gr.Textbox(type='str', - label="list of tokens, when to finish generating", - placeholder='["", "import"]')],"text", title = title, description = description ) -iface.launch(debug=True) \ No newline at end of file diff --git a/spaces/Gradio-Blocks/StyleGAN-NADA/e4e/scripts/calc_losses_on_images.py b/spaces/Gradio-Blocks/StyleGAN-NADA/e4e/scripts/calc_losses_on_images.py deleted file mode 100644 index 32b6bcee854da7ae357daf82bd986f30db9fb72c..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/StyleGAN-NADA/e4e/scripts/calc_losses_on_images.py +++ /dev/null @@ -1,87 +0,0 @@ -from argparse import ArgumentParser -import os -import json -import sys -from tqdm import tqdm -import numpy as np -import torch -from torch.utils.data import DataLoader -import torchvision.transforms as transforms - -sys.path.append(".") -sys.path.append("..") - -from criteria.lpips.lpips import LPIPS -from datasets.gt_res_dataset import GTResDataset - - -def parse_args(): - parser = ArgumentParser(add_help=False) - parser.add_argument('--mode', type=str, default='lpips', choices=['lpips', 'l2']) - parser.add_argument('--data_path', type=str, default='results') - parser.add_argument('--gt_path', type=str, default='gt_images') - parser.add_argument('--workers', type=int, default=4) - parser.add_argument('--batch_size', type=int, default=4) - parser.add_argument('--is_cars', action='store_true') - args = parser.parse_args() - return args - - -def run(args): - resize_dims = (256, 256) - if args.is_cars: - resize_dims = (192, 256) - transform = transforms.Compose([transforms.Resize(resize_dims), - transforms.ToTensor(), - transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]) - - print('Loading dataset') - dataset = GTResDataset(root_path=args.data_path, - gt_dir=args.gt_path, - transform=transform) - - dataloader = DataLoader(dataset, - batch_size=args.batch_size, - shuffle=False, - num_workers=int(args.workers), - drop_last=True) - - if args.mode == 'lpips': - loss_func = LPIPS(net_type='alex') - elif args.mode == 'l2': - loss_func = torch.nn.MSELoss() - else: - raise Exception('Not a valid mode!') - loss_func.cuda() - - global_i = 0 - scores_dict = {} - all_scores = [] - for result_batch, gt_batch in tqdm(dataloader): - for i in range(args.batch_size): - loss = float(loss_func(result_batch[i:i + 1].cuda(), gt_batch[i:i + 1].cuda())) - all_scores.append(loss) - im_path = dataset.pairs[global_i][0] - scores_dict[os.path.basename(im_path)] = loss - global_i += 1 - - all_scores = list(scores_dict.values()) - mean = np.mean(all_scores) - std = np.std(all_scores) - result_str = 'Average loss is {:.2f}+-{:.2f}'.format(mean, std) - print('Finished with ', args.data_path) - print(result_str) - - out_path = os.path.join(os.path.dirname(args.data_path), 'inference_metrics') - if not os.path.exists(out_path): - os.makedirs(out_path) - - with open(os.path.join(out_path, 'stat_{}.txt'.format(args.mode)), 'w') as f: - f.write(result_str) - with open(os.path.join(out_path, 'scores_{}.json'.format(args.mode)), 'w') as f: - json.dump(scores_dict, f) - - -if __name__ == '__main__': - args = parse_args() - run(args) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco.py deleted file mode 100644 index d069f8c9fdbaa55cbc44065740187c242cfa2903..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './cascade_mask_rcnn_r50_fpn_20e_coco.py' -model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/emanet/emanet_r101-d8_769x769_80k_cityscapes.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/emanet/emanet_r101-d8_769x769_80k_cityscapes.py deleted file mode 100644 index c5dbf20b0fcc7bc1dd077bd8b7077772251d4c1a..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/emanet/emanet_r101-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './emanet_r50-d8_769x769_80k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/grids/audiogen/audiogen_base_16khz.py b/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/grids/audiogen/audiogen_base_16khz.py deleted file mode 100644 index 190cc1d0a1e316347e8ebbdfc8de7e2942c1b3d7..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/grids/audiogen/audiogen_base_16khz.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from ..musicgen._explorers import LMExplorer -from ...environment import AudioCraftEnvironment - - -@LMExplorer -def explorer(launcher): - partitions = AudioCraftEnvironment.get_slurm_partitions(['team', 'global']) - launcher.slurm_(gpus=64, partition=partitions) - launcher.bind_(solver='audiogen/audiogen_base_16khz') - # replace this by the desired environmental sound dataset - launcher.bind_(dset='internal/sounds_16khz') - - fsdp = {'autocast': False, 'fsdp.use': True} - medium = {'model/lm/model_scale': 'medium'} - - launcher.bind_(fsdp) - launcher(medium) diff --git a/spaces/GuXiaoBei/wechat-chatbot/channel/wechat/wechaty_channel.py b/spaces/GuXiaoBei/wechat-chatbot/channel/wechat/wechaty_channel.py deleted file mode 100644 index 8f27f6dc81422741ddfbbc2e700f8b8b62011cc3..0000000000000000000000000000000000000000 --- a/spaces/GuXiaoBei/wechat-chatbot/channel/wechat/wechaty_channel.py +++ /dev/null @@ -1,201 +0,0 @@ -# encoding:utf-8 - -""" -wechaty channel -Python Wechaty - https://github.com/wechaty/python-wechaty -""" -import io -import os -import json -import time -import asyncio -import requests -from typing import Optional, Union -from wechaty_puppet import MessageType, FileBox, ScanStatus # type: ignore -from wechaty import Wechaty, Contact -from wechaty.user import Message, Room, MiniProgram, UrlLink -from channel.channel import Channel -from common.log import logger -from config import conf - - -class WechatyChannel(Channel): - - def __init__(self): - pass - - def startup(self): - asyncio.run(self.main()) - - async def main(self): - config = conf() - # 使用PadLocal协议 比较稳定(免费web协议 os.environ['WECHATY_PUPPET_SERVICE_ENDPOINT'] = '127.0.0.1:8080') - token = config.get('wechaty_puppet_service_token') - os.environ['WECHATY_PUPPET_SERVICE_TOKEN'] = token - global bot - bot = Wechaty() - - bot.on('scan', self.on_scan) - bot.on('login', self.on_login) - bot.on('message', self.on_message) - await bot.start() - - async def on_login(self, contact: Contact): - logger.info('[WX] login user={}'.format(contact)) - - async def on_scan(self, status: ScanStatus, qr_code: Optional[str] = None, - data: Optional[str] = None): - contact = self.Contact.load(self.contact_id) - logger.info('[WX] scan user={}, scan status={}, scan qr_code={}'.format(contact, status.name, qr_code)) - # print(f'user <{contact}> scan status: {status.name} , 'f'qr_code: {qr_code}') - - async def on_message(self, msg: Message): - """ - listen for message event - """ - from_contact = msg.talker() # 获取消息的发送者 - to_contact = msg.to() # 接收人 - room = msg.room() # 获取消息来自的群聊. 如果消息不是来自群聊, 则返回None - from_user_id = from_contact.contact_id - to_user_id = to_contact.contact_id # 接收人id - # other_user_id = msg['User']['UserName'] # 对手方id - content = msg.text() - mention_content = await msg.mention_text() # 返回过滤掉@name后的消息 - match_prefix = self.check_prefix(content, conf().get('single_chat_prefix')) - conversation: Union[Room, Contact] = from_contact if room is None else room - - if room is None and msg.type() == MessageType.MESSAGE_TYPE_TEXT: - if not msg.is_self() and match_prefix is not None: - # 好友向自己发送消息 - if match_prefix != '': - str_list = content.split(match_prefix, 1) - if len(str_list) == 2: - content = str_list[1].strip() - - img_match_prefix = self.check_prefix(content, conf().get('image_create_prefix')) - if img_match_prefix: - content = content.split(img_match_prefix, 1)[1].strip() - await self._do_send_img(content, from_user_id) - else: - await self._do_send(content, from_user_id) - elif msg.is_self() and match_prefix: - # 自己给好友发送消息 - str_list = content.split(match_prefix, 1) - if len(str_list) == 2: - content = str_list[1].strip() - img_match_prefix = self.check_prefix(content, conf().get('image_create_prefix')) - if img_match_prefix: - content = content.split(img_match_prefix, 1)[1].strip() - await self._do_send_img(content, to_user_id) - else: - await self._do_send(content, to_user_id) - elif room and msg.type() == MessageType.MESSAGE_TYPE_TEXT: - # 群组&文本消息 - room_id = room.room_id - room_name = await room.topic() - from_user_id = from_contact.contact_id - from_user_name = from_contact.name - is_at = await msg.mention_self() - content = mention_content - config = conf() - match_prefix = (is_at and not config.get("group_at_off", False)) \ - or self.check_prefix(content, config.get('group_chat_prefix')) \ - or self.check_contain(content, config.get('group_chat_keyword')) - if ('ALL_GROUP' in config.get('group_name_white_list') or room_name in config.get( - 'group_name_white_list') or self.check_contain(room_name, config.get( - 'group_name_keyword_white_list'))) and match_prefix: - img_match_prefix = self.check_prefix(content, conf().get('image_create_prefix')) - if img_match_prefix: - content = content.split(img_match_prefix, 1)[1].strip() - await self._do_send_group_img(content, room_id) - else: - await self._do_send_group(content, room_id, from_user_id, from_user_name) - - async def send(self, message: Union[str, Message, FileBox, Contact, UrlLink, MiniProgram], receiver): - logger.info('[WX] sendMsg={}, receiver={}'.format(message, receiver)) - if receiver: - contact = await bot.Contact.find(receiver) - await contact.say(message) - - async def send_group(self, message: Union[str, Message, FileBox, Contact, UrlLink, MiniProgram], receiver): - logger.info('[WX] sendMsg={}, receiver={}'.format(message, receiver)) - if receiver: - room = await bot.Room.find(receiver) - await room.say(message) - - async def _do_send(self, query, reply_user_id): - try: - if not query: - return - context = dict() - context['from_user_id'] = reply_user_id - reply_text = super().build_reply_content(query, context) - if reply_text: - await self.send(conf().get("single_chat_reply_prefix") + reply_text, reply_user_id) - except Exception as e: - logger.exception(e) - - async def _do_send_img(self, query, reply_user_id): - try: - if not query: - return - context = dict() - context['type'] = 'IMAGE_CREATE' - img_url = super().build_reply_content(query, context) - if not img_url: - return - # 图片下载 - # pic_res = requests.get(img_url, stream=True) - # image_storage = io.BytesIO() - # for block in pic_res.iter_content(1024): - # image_storage.write(block) - # image_storage.seek(0) - - # 图片发送 - logger.info('[WX] sendImage, receiver={}'.format(reply_user_id)) - t = int(time.time()) - file_box = FileBox.from_url(url=img_url, name=str(t) + '.png') - await self.send(file_box, reply_user_id) - except Exception as e: - logger.exception(e) - - async def _do_send_group(self, query, group_id, group_user_id, group_user_name): - if not query: - return - context = dict() - context['from_user_id'] = str(group_id) + '-' + str(group_user_id) - reply_text = super().build_reply_content(query, context) - if reply_text: - reply_text = '@' + group_user_name + ' ' + reply_text.strip() - await self.send_group(conf().get("group_chat_reply_prefix", "") + reply_text, group_id) - - async def _do_send_group_img(self, query, reply_room_id): - try: - if not query: - return - context = dict() - context['type'] = 'IMAGE_CREATE' - img_url = super().build_reply_content(query, context) - if not img_url: - return - # 图片发送 - logger.info('[WX] sendImage, receiver={}'.format(reply_room_id)) - t = int(time.time()) - file_box = FileBox.from_url(url=img_url, name=str(t) + '.png') - await self.send_group(file_box, reply_room_id) - except Exception as e: - logger.exception(e) - - def check_prefix(self, content, prefix_list): - for prefix in prefix_list: - if content.startswith(prefix): - return prefix - return None - - def check_contain(self, content, keyword_list): - if not keyword_list: - return None - for ky in keyword_list: - if content.find(ky) != -1: - return True - return None diff --git a/spaces/HReynaud/EchoDiffusionDemo/app.py b/spaces/HReynaud/EchoDiffusionDemo/app.py deleted file mode 100644 index 18764c18212bf498b362f5169b99959118e197be..0000000000000000000000000000000000000000 --- a/spaces/HReynaud/EchoDiffusionDemo/app.py +++ /dev/null @@ -1,145 +0,0 @@ -import gradio as gr -import os -from omegaconf import OmegaConf -from imagen_pytorch import Unet3D, ElucidatedImagen, ImagenTrainer, ElucidatedImagenConfig, NullUnet, Imagen -import torch -import numpy as np -import cv2 -from PIL import Image -import torchvision.transforms as T - - -device = "cuda" if torch.cuda.is_available() else "cpu" -exp_path = "model" - -class BetterCenterCrop(T.CenterCrop): - def __call__(self, img): - h = img.shape[-2] - w = img.shape[-1] - dim = min(h, w) - - return T.functional.center_crop(img, dim) - -class ImageLoader: - def __init__(self, path) -> None: - self.path = path - self.all_files = os.listdir(path) - self.transform = T.Compose([ - T.ToTensor(), - BetterCenterCrop((112, 112)), - T.Resize((112, 112)), - ]) - - def get_image(self): - idx = np.random.randint(0, len(self.all_files)) - img = Image.open(os.path.join(self.path, self.all_files[idx])) - return img - -class Context: - def __init__(self, path, device): - self.path = path - self.config_path = os.path.join(path, "config.yaml") - self.weight_path = os.path.join(path, "merged.pt") - - self.config = OmegaConf.load(self.config_path) - - self.config.dataset.num_frames = int(self.config.dataset.fps * self.config.dataset.duration) - - self.im_load = ImageLoader("echo_images") - - unets = [] - for i, (k, v) in enumerate(self.config.unets.items()): - unets.append(Unet3D(**v, lowres_cond=(i>0))) # type: ignore - - imagen_klass = ElucidatedImagen if self.config.imagen.elucidated == True else Imagen - del self.config.imagen.elucidated - imagen = imagen_klass( - unets = unets, - **OmegaConf.to_container(self.config.imagen), # type: ignore - ) - - self.trainer = ImagenTrainer( - imagen = imagen, - **self.config.trainer - ).to(device) - - print("Loading weights from", self.weight_path) - additional_data = self.trainer.load(self.weight_path) - print("Loaded weights from", self.weight_path) - - def reshape_image(self, image): - try: - image = self.im_load.transform(image).multiply(255).byte().permute(1,2,0).numpy() - return image - except: - return None - - def load_random_image(self): - print("Loading random image") - image = self.im_load.get_image() - return image - - def generate_video(self, image, lvef, cond_scale): - print("Generating video") - print(f"lvef: {lvef}, cond_scale: {cond_scale}") - - image = self.im_load.transform(image).unsqueeze(0) - - sample_kwargs = {} - sample_kwargs = { - "text_embeds": torch.tensor([[[lvef/100.0]]]), - "cond_scale": cond_scale, - "cond_images": image, - } - - self.trainer.eval() - with torch.no_grad(): - video = self.trainer.sample( - batch_size=1, - video_frames=self.config.dataset.num_frames, - **sample_kwargs, - use_tqdm = True, - ).detach().cpu() # C x F x H x W - if video.shape[-3:] != (64, 112, 112): - video = torch.nn.functional.interpolate(video, size=(64, 112, 112), mode='trilinear', align_corners=False) - video = video.repeat((1,1,5,1,1)) # make the video loop 5 times - easier to see - uid = np.random.randint(0, 10) # prevent overwriting if multiple users are using the app - path = f"tmp/{uid}.mp4" - video = video.multiply(255).byte().squeeze(0).permute(1, 2, 3, 0).numpy() - out = cv2.VideoWriter(path, cv2.VideoWriter_fourcc(*'mp4v'), 32, (112, 112)) - for i in video: - out.write(i) - out.release() - return path - -context = Context(exp_path, device) - -with gr.Blocks(css="style.css") as demo: - - with gr.Row(): - gr.Label("Feature-Conditioned Cascaded Video Diffusion Models for Precise Echocardiogram Synthesis") - - with gr.Row(): - with gr.Column(): - with gr.Row(): - with gr.Column(scale=3, variant="panel"): - text = gr.Markdown(value="This is a live demo of our work on cardiac ultrasound video generation. The model is trained on 4-chamber cardiac ultrasound videos and can generate realistic 4-chamber videos given a target Left Ventricle Ejection Fraction. Please, start by sampling a random frame from the pool of 100 images taken from the EchoNet-Dynamic dataset, which will act as the conditional image, representing the anatomy of the video. Then, set the target LVEF, and click the button to generate a video. The process takes 30s to 60s. The model running here corresponds to the 1SCM from the paper. **Click on the video to play it.** [Code is available here](https://github.com/HReynaud/EchoDiffusion) ") - with gr.Column(scale=1, min_width="226"): - image = gr.Image(interactive=True) - with gr.Column(scale=1, min_width="226"): - video = gr.Video(interactive=False) - - slider_ef = gr.Slider(minimum=10, maximum=90, step=1, label="Target LVEF", value=60, interactive=True) - slider_cond = gr.Slider(minimum=0, maximum=20, step=1, label="Conditional scale (if set to more than 1, generation time is 60s)", value=1, interactive=True) - - with gr.Row(): - img_btn = gr.Button(value="❶ Get a random cardiac ultrasound image (4Ch)") - run_btn = gr.Button(value="❷ Generate a video (~30s) 🚀") - - image.change(context.reshape_image, inputs=[image], outputs=[image]) - img_btn.click(context.load_random_image, inputs=[], outputs=[image]) - run_btn.click(context.generate_video, inputs=[image, slider_ef, slider_cond], outputs=[video]) - -if __name__ == "__main__": - demo.queue() - demo.launch() \ No newline at end of file diff --git a/spaces/HaMerL/ChaosinChat/modules/models/models.py b/spaces/HaMerL/ChaosinChat/modules/models/models.py deleted file mode 100644 index 5625ec98dcbb34b0f1120f827831c07aa9d69345..0000000000000000000000000000000000000000 --- a/spaces/HaMerL/ChaosinChat/modules/models/models.py +++ /dev/null @@ -1,631 +0,0 @@ -from __future__ import annotations -from typing import TYPE_CHECKING, List - -import logging -import json -import commentjson as cjson -import os -import sys -import requests -import urllib3 -import platform -import base64 -from io import BytesIO -from PIL import Image - -from tqdm import tqdm -import colorama -from duckduckgo_search import ddg -import asyncio -import aiohttp -from enum import Enum -import uuid - -from ..presets import * -from ..llama_func import * -from ..utils import * -from .. import shared -from ..config import retrieve_proxy -from modules import config -from .base_model import BaseLLMModel, ModelType - - -class OpenAIClient(BaseLLMModel): - def __init__( - self, - model_name, - api_key, - system_prompt=INITIAL_SYSTEM_PROMPT, - temperature=1.0, - top_p=1.0, - ) -> None: - super().__init__( - model_name=model_name, - temperature=temperature, - top_p=top_p, - system_prompt=system_prompt, - ) - self.api_key = api_key - self.need_api_key = True - self._refresh_header() - - def get_answer_stream_iter(self): - response = self._get_response(stream=True) - if response is not None: - iter = self._decode_chat_response(response) - partial_text = "" - for i in iter: - partial_text += i - yield partial_text - else: - yield STANDARD_ERROR_MSG + GENERAL_ERROR_MSG - - def get_answer_at_once(self): - response = self._get_response() - response = json.loads(response.text) - content = response["choices"][0]["message"]["content"] - total_token_count = response["usage"]["total_tokens"] - return content, total_token_count - - def count_token(self, user_input): - input_token_count = count_token(construct_user(user_input)) - if self.system_prompt is not None and len(self.all_token_counts) == 0: - system_prompt_token_count = count_token( - construct_system(self.system_prompt) - ) - return input_token_count + system_prompt_token_count - return input_token_count - - def billing_info(self): - try: - curr_time = datetime.datetime.now() - last_day_of_month = get_last_day_of_month( - curr_time).strftime("%Y-%m-%d") - first_day_of_month = curr_time.replace(day=1).strftime("%Y-%m-%d") - usage_url = f"{shared.state.usage_api_url}?start_date={first_day_of_month}&end_date={last_day_of_month}" - try: - usage_data = self._get_billing_data(usage_url) - except Exception as e: - logging.error(f"获取API使用情况失败:" + str(e)) - return i18n("**获取API使用情况失败**") - rounded_usage = "{:.5f}".format(usage_data["total_usage"] / 100) - return i18n("**本月使用金额** ") + f"\u3000 ${rounded_usage}" - except requests.exceptions.ConnectTimeout: - status_text = ( - STANDARD_ERROR_MSG + CONNECTION_TIMEOUT_MSG + ERROR_RETRIEVE_MSG - ) - return status_text - except requests.exceptions.ReadTimeout: - status_text = STANDARD_ERROR_MSG + READ_TIMEOUT_MSG + ERROR_RETRIEVE_MSG - return status_text - except Exception as e: - import traceback - traceback.print_exc() - logging.error(i18n("获取API使用情况失败:") + str(e)) - return STANDARD_ERROR_MSG + ERROR_RETRIEVE_MSG - - def set_token_upper_limit(self, new_upper_limit): - pass - - @shared.state.switching_api_key # 在不开启多账号模式的时候,这个装饰器不会起作用 - def _get_response(self, stream=False): - openai_api_key = self.api_key - system_prompt = self.system_prompt - history = self.history - logging.debug(colorama.Fore.YELLOW + - f"{history}" + colorama.Fore.RESET) - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {openai_api_key}", - } - - if system_prompt is not None: - history = [construct_system(system_prompt), *history] - - payload = { - "model": self.model_name, - "messages": history, - "temperature": self.temperature, - "top_p": self.top_p, - "n": self.n_choices, - "stream": stream, - "presence_penalty": self.presence_penalty, - "frequency_penalty": self.frequency_penalty, - } - - if self.max_generation_token is not None: - payload["max_tokens"] = self.max_generation_token - if self.stop_sequence is not None: - payload["stop"] = self.stop_sequence - if self.logit_bias is not None: - payload["logit_bias"] = self.logit_bias - if self.user_identifier is not None: - payload["user"] = self.user_identifier - - if stream: - timeout = TIMEOUT_STREAMING - else: - timeout = TIMEOUT_ALL - - # 如果有自定义的api-host,使用自定义host发送请求,否则使用默认设置发送请求 - if shared.state.completion_url != COMPLETION_URL: - logging.info(f"使用自定义API URL: {shared.state.completion_url}") - - with retrieve_proxy(): - try: - response = requests.post( - shared.state.completion_url, - headers=headers, - json=payload, - stream=stream, - timeout=timeout, - ) - except: - return None - return response - - def _refresh_header(self): - self.headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {self.api_key}", - } - - def _get_billing_data(self, billing_url): - with retrieve_proxy(): - response = requests.get( - billing_url, - headers=self.headers, - timeout=TIMEOUT_ALL, - ) - - if response.status_code == 200: - data = response.json() - return data - else: - raise Exception( - f"API request failed with status code {response.status_code}: {response.text}" - ) - - def _decode_chat_response(self, response): - error_msg = "" - for chunk in response.iter_lines(): - if chunk: - chunk = chunk.decode() - chunk_length = len(chunk) - try: - chunk = json.loads(chunk[6:]) - except json.JSONDecodeError: - print(i18n("JSON解析错误,收到的内容: ") + f"{chunk}") - error_msg += chunk - continue - if chunk_length > 6 and "delta" in chunk["choices"][0]: - if chunk["choices"][0]["finish_reason"] == "stop": - break - try: - yield chunk["choices"][0]["delta"]["content"] - except Exception as e: - # logging.error(f"Error: {e}") - continue - if error_msg: - raise Exception(error_msg) - - def set_key(self, new_access_key): - ret = super().set_key(new_access_key) - self._refresh_header() - return ret - - -class ChatGLM_Client(BaseLLMModel): - def __init__(self, model_name) -> None: - super().__init__(model_name=model_name) - from transformers import AutoTokenizer, AutoModel - import torch - global CHATGLM_TOKENIZER, CHATGLM_MODEL - if CHATGLM_TOKENIZER is None or CHATGLM_MODEL is None: - system_name = platform.system() - model_path = None - if os.path.exists("models"): - model_dirs = os.listdir("models") - if model_name in model_dirs: - model_path = f"models/{model_name}" - if model_path is not None: - model_source = model_path - else: - model_source = f"THUDM/{model_name}" - CHATGLM_TOKENIZER = AutoTokenizer.from_pretrained( - model_source, trust_remote_code=True - ) - quantified = False - if "int4" in model_name: - quantified = True - model = AutoModel.from_pretrained( - model_source, trust_remote_code=True - ) - if torch.cuda.is_available(): - # run on CUDA - logging.info("CUDA is available, using CUDA") - model = model.half().cuda() - # mps加速还存在一些问题,暂时不使用 - elif system_name == "Darwin" and model_path is not None and not quantified: - logging.info("Running on macOS, using MPS") - # running on macOS and model already downloaded - model = model.half().to("mps") - else: - logging.info("GPU is not available, using CPU") - model = model.float() - model = model.eval() - CHATGLM_MODEL = model - - def _get_glm_style_input(self): - history = [x["content"] for x in self.history] - query = history.pop() - logging.debug(colorama.Fore.YELLOW + - f"{history}" + colorama.Fore.RESET) - assert ( - len(history) % 2 == 0 - ), f"History should be even length. current history is: {history}" - history = [[history[i], history[i + 1]] - for i in range(0, len(history), 2)] - return history, query - - def get_answer_at_once(self): - history, query = self._get_glm_style_input() - response, _ = CHATGLM_MODEL.chat( - CHATGLM_TOKENIZER, query, history=history) - return response, len(response) - - def get_answer_stream_iter(self): - history, query = self._get_glm_style_input() - for response, history in CHATGLM_MODEL.stream_chat( - CHATGLM_TOKENIZER, - query, - history, - max_length=self.token_upper_limit, - top_p=self.top_p, - temperature=self.temperature, - ): - yield response - - -class LLaMA_Client(BaseLLMModel): - def __init__( - self, - model_name, - lora_path=None, - ) -> None: - super().__init__(model_name=model_name) - from lmflow.datasets.dataset import Dataset - from lmflow.pipeline.auto_pipeline import AutoPipeline - from lmflow.models.auto_model import AutoModel - from lmflow.args import ModelArguments, DatasetArguments, InferencerArguments - - self.max_generation_token = 1000 - self.end_string = "\n\n" - # We don't need input data - data_args = DatasetArguments(dataset_path=None) - self.dataset = Dataset(data_args) - self.system_prompt = "" - - global LLAMA_MODEL, LLAMA_INFERENCER - if LLAMA_MODEL is None or LLAMA_INFERENCER is None: - model_path = None - if os.path.exists("models"): - model_dirs = os.listdir("models") - if model_name in model_dirs: - model_path = f"models/{model_name}" - if model_path is not None: - model_source = model_path - else: - model_source = f"decapoda-research/{model_name}" - # raise Exception(f"models目录下没有这个模型: {model_name}") - if lora_path is not None: - lora_path = f"lora/{lora_path}" - model_args = ModelArguments(model_name_or_path=model_source, lora_model_path=lora_path, model_type=None, config_overrides=None, config_name=None, tokenizer_name=None, cache_dir=None, - use_fast_tokenizer=True, model_revision='main', use_auth_token=False, torch_dtype=None, use_lora=False, lora_r=8, lora_alpha=32, lora_dropout=0.1, use_ram_optimized_load=True) - pipeline_args = InferencerArguments( - local_rank=0, random_seed=1, deepspeed='configs/ds_config_chatbot.json', mixed_precision='bf16') - - with open(pipeline_args.deepspeed, "r") as f: - ds_config = json.load(f) - LLAMA_MODEL = AutoModel.get_model( - model_args, - tune_strategy="none", - ds_config=ds_config, - ) - LLAMA_INFERENCER = AutoPipeline.get_pipeline( - pipeline_name="inferencer", - model_args=model_args, - data_args=data_args, - pipeline_args=pipeline_args, - ) - - def _get_llama_style_input(self): - history = [] - instruction = "" - if self.system_prompt: - instruction = (f"Instruction: {self.system_prompt}\n") - for x in self.history: - if x["role"] == "user": - history.append(f"{instruction}Input: {x['content']}") - else: - history.append(f"Output: {x['content']}") - context = "\n\n".join(history) - context += "\n\nOutput: " - return context - - def get_answer_at_once(self): - context = self._get_llama_style_input() - - input_dataset = self.dataset.from_dict( - {"type": "text_only", "instances": [{"text": context}]} - ) - - output_dataset = LLAMA_INFERENCER.inference( - model=LLAMA_MODEL, - dataset=input_dataset, - max_new_tokens=self.max_generation_token, - temperature=self.temperature, - ) - - response = output_dataset.to_dict()["instances"][0]["text"] - return response, len(response) - - def get_answer_stream_iter(self): - context = self._get_llama_style_input() - partial_text = "" - step = 1 - for _ in range(0, self.max_generation_token, step): - input_dataset = self.dataset.from_dict( - {"type": "text_only", "instances": [ - {"text": context + partial_text}]} - ) - output_dataset = LLAMA_INFERENCER.inference( - model=LLAMA_MODEL, - dataset=input_dataset, - max_new_tokens=step, - temperature=self.temperature, - ) - response = output_dataset.to_dict()["instances"][0]["text"] - if response == "" or response == self.end_string: - break - partial_text += response - yield partial_text - - -class XMChat(BaseLLMModel): - def __init__(self, api_key): - super().__init__(model_name="xmchat") - self.api_key = api_key - self.session_id = None - self.reset() - self.image_bytes = None - self.image_path = None - self.xm_history = [] - self.url = "https://xmbot.net/web" - self.last_conv_id = None - - def reset(self): - self.session_id = str(uuid.uuid4()) - self.last_conv_id = None - return [], "已重置" - - def image_to_base64(self, image_path): - # 打开并加载图片 - img = Image.open(image_path) - - # 获取图片的宽度和高度 - width, height = img.size - - # 计算压缩比例,以确保最长边小于4096像素 - max_dimension = 2048 - scale_ratio = min(max_dimension / width, max_dimension / height) - - if scale_ratio < 1: - # 按压缩比例调整图片大小 - new_width = int(width * scale_ratio) - new_height = int(height * scale_ratio) - img = img.resize((new_width, new_height), Image.ANTIALIAS) - - # 将图片转换为jpg格式的二进制数据 - buffer = BytesIO() - if img.mode == "RGBA": - img = img.convert("RGB") - img.save(buffer, format='JPEG') - binary_image = buffer.getvalue() - - # 对二进制数据进行Base64编码 - base64_image = base64.b64encode(binary_image).decode('utf-8') - - return base64_image - - def try_read_image(self, filepath): - def is_image_file(filepath): - # 判断文件是否为图片 - valid_image_extensions = [".jpg", ".jpeg", ".png", ".bmp", ".gif", ".tiff"] - file_extension = os.path.splitext(filepath)[1].lower() - return file_extension in valid_image_extensions - - if is_image_file(filepath): - logging.info(f"读取图片文件: {filepath}") - self.image_bytes = self.image_to_base64(filepath) - self.image_path = filepath - else: - self.image_bytes = None - self.image_path = None - - def like(self): - if self.last_conv_id is None: - return "点赞失败,你还没发送过消息" - data = { - "uuid": self.last_conv_id, - "appraise": "good" - } - requests.post(self.url, json=data) - return "👍点赞成功,感谢反馈~" - - def dislike(self): - if self.last_conv_id is None: - return "点踩失败,你还没发送过消息" - data = { - "uuid": self.last_conv_id, - "appraise": "bad" - } - requests.post(self.url, json=data) - return "👎点踩成功,感谢反馈~" - - def prepare_inputs(self, real_inputs, use_websearch, files, reply_language, chatbot): - fake_inputs = real_inputs - display_append = "" - limited_context = False - return limited_context, fake_inputs, display_append, real_inputs, chatbot - - def handle_file_upload(self, files, chatbot): - """if the model accepts multi modal input, implement this function""" - if files: - for file in files: - if file.name: - logging.info(f"尝试读取图像: {file.name}") - self.try_read_image(file.name) - if self.image_path is not None: - chatbot = chatbot + [((self.image_path,), None)] - if self.image_bytes is not None: - logging.info("使用图片作为输入") - # XMChat的一轮对话中实际上只能处理一张图片 - self.reset() - conv_id = str(uuid.uuid4()) - data = { - "user_id": self.api_key, - "session_id": self.session_id, - "uuid": conv_id, - "data_type": "imgbase64", - "data": self.image_bytes - } - response = requests.post(self.url, json=data) - response = json.loads(response.text) - logging.info(f"图片回复: {response['data']}") - return None, chatbot, None - - def get_answer_at_once(self): - question = self.history[-1]["content"] - conv_id = str(uuid.uuid4()) - self.last_conv_id = conv_id - data = { - "user_id": self.api_key, - "session_id": self.session_id, - "uuid": conv_id, - "data_type": "text", - "data": question - } - response = requests.post(self.url, json=data) - try: - response = json.loads(response.text) - return response["data"], len(response["data"]) - except Exception as e: - return response.text, len(response.text) - - - - -def get_model( - model_name, - lora_model_path=None, - access_key=None, - temperature=None, - top_p=None, - system_prompt=None, -) -> BaseLLMModel: - msg = i18n("模型设置为了:") + f" {model_name}" - model_type = ModelType.get_type(model_name) - lora_selector_visibility = False - lora_choices = [] - dont_change_lora_selector = False - if model_type != ModelType.OpenAI: - config.local_embedding = True - # del current_model.model - model = None - try: - if model_type == ModelType.OpenAI: - logging.info(f"正在加载OpenAI模型: {model_name}") - model = OpenAIClient( - model_name=model_name, - api_key=access_key, - system_prompt=system_prompt, - temperature=temperature, - top_p=top_p, - ) - elif model_type == ModelType.ChatGLM: - logging.info(f"正在加载ChatGLM模型: {model_name}") - model = ChatGLM_Client(model_name) - elif model_type == ModelType.LLaMA and lora_model_path == "": - msg = f"现在请为 {model_name} 选择LoRA模型" - logging.info(msg) - lora_selector_visibility = True - if os.path.isdir("lora"): - lora_choices = get_file_names( - "lora", plain=True, filetypes=[""]) - lora_choices = ["No LoRA"] + lora_choices - elif model_type == ModelType.LLaMA and lora_model_path != "": - logging.info(f"正在加载LLaMA模型: {model_name} + {lora_model_path}") - dont_change_lora_selector = True - if lora_model_path == "No LoRA": - lora_model_path = None - msg += " + No LoRA" - else: - msg += f" + {lora_model_path}" - model = LLaMA_Client(model_name, lora_model_path) - elif model_type == ModelType.XMChat: - if os.environ.get("XMCHAT_API_KEY") != "": - access_key = os.environ.get("XMCHAT_API_KEY") - model = XMChat(api_key=access_key) - elif model_type == ModelType.StableLM: - from .StableLM import StableLM_Client - model = StableLM_Client(model_name) - elif model_type == ModelType.MOSS: - from .MOSS import MOSS_Client - model = MOSS_Client(model_name) - elif model_type == ModelType.Unknown: - raise ValueError(f"未知模型: {model_name}") - logging.info(msg) - except Exception as e: - logging.error(e) - msg = f"{STANDARD_ERROR_MSG}: {e}" - if dont_change_lora_selector: - return model, msg - else: - return model, msg, gr.Dropdown.update(choices=lora_choices, visible=lora_selector_visibility) - - -if __name__ == "__main__": - with open("config.json", "r") as f: - openai_api_key = cjson.load(f)["openai_api_key"] - # set logging level to debug - logging.basicConfig(level=logging.DEBUG) - # client = ModelManager(model_name="gpt-3.5-turbo", access_key=openai_api_key) - client = get_model(model_name="chatglm-6b-int4") - chatbot = [] - stream = False - # 测试账单功能 - logging.info(colorama.Back.GREEN + "测试账单功能" + colorama.Back.RESET) - logging.info(client.billing_info()) - # 测试问答 - logging.info(colorama.Back.GREEN + "测试问答" + colorama.Back.RESET) - question = "巴黎是中国的首都吗?" - for i in client.predict(inputs=question, chatbot=chatbot, stream=stream): - logging.info(i) - logging.info(f"测试问答后history : {client.history}") - # 测试记忆力 - logging.info(colorama.Back.GREEN + "测试记忆力" + colorama.Back.RESET) - question = "我刚刚问了你什么问题?" - for i in client.predict(inputs=question, chatbot=chatbot, stream=stream): - logging.info(i) - logging.info(f"测试记忆力后history : {client.history}") - # 测试重试功能 - logging.info(colorama.Back.GREEN + "测试重试功能" + colorama.Back.RESET) - for i in client.retry(chatbot=chatbot, stream=stream): - logging.info(i) - logging.info(f"重试后history : {client.history}") - # # 测试总结功能 - # print(colorama.Back.GREEN + "测试总结功能" + colorama.Back.RESET) - # chatbot, msg = client.reduce_token_size(chatbot=chatbot) - # print(chatbot, msg) - # print(f"总结后history: {client.history}") diff --git a/spaces/Hallucinate/demo/taming/data/sflckr.py b/spaces/Hallucinate/demo/taming/data/sflckr.py deleted file mode 100644 index 91101be5953b113f1e58376af637e43f366b3dee..0000000000000000000000000000000000000000 --- a/spaces/Hallucinate/demo/taming/data/sflckr.py +++ /dev/null @@ -1,91 +0,0 @@ -import os -import numpy as np -import cv2 -import albumentations -from PIL import Image -from torch.utils.data import Dataset - - -class SegmentationBase(Dataset): - def __init__(self, - data_csv, data_root, segmentation_root, - size=None, random_crop=False, interpolation="bicubic", - n_labels=182, shift_segmentation=False, - ): - self.n_labels = n_labels - self.shift_segmentation = shift_segmentation - self.data_csv = data_csv - self.data_root = data_root - self.segmentation_root = segmentation_root - with open(self.data_csv, "r") as f: - self.image_paths = f.read().splitlines() - self._length = len(self.image_paths) - self.labels = { - "relative_file_path_": [l for l in self.image_paths], - "file_path_": [os.path.join(self.data_root, l) - for l in self.image_paths], - "segmentation_path_": [os.path.join(self.segmentation_root, l.replace(".jpg", ".png")) - for l in self.image_paths] - } - - size = None if size is not None and size<=0 else size - self.size = size - if self.size is not None: - self.interpolation = interpolation - self.interpolation = { - "nearest": cv2.INTER_NEAREST, - "bilinear": cv2.INTER_LINEAR, - "bicubic": cv2.INTER_CUBIC, - "area": cv2.INTER_AREA, - "lanczos": cv2.INTER_LANCZOS4}[self.interpolation] - self.image_rescaler = albumentations.SmallestMaxSize(max_size=self.size, - interpolation=self.interpolation) - self.segmentation_rescaler = albumentations.SmallestMaxSize(max_size=self.size, - interpolation=cv2.INTER_NEAREST) - self.center_crop = not random_crop - if self.center_crop: - self.cropper = albumentations.CenterCrop(height=self.size, width=self.size) - else: - self.cropper = albumentations.RandomCrop(height=self.size, width=self.size) - self.preprocessor = self.cropper - - def __len__(self): - return self._length - - def __getitem__(self, i): - example = dict((k, self.labels[k][i]) for k in self.labels) - image = Image.open(example["file_path_"]) - if not image.mode == "RGB": - image = image.convert("RGB") - image = np.array(image).astype(np.uint8) - if self.size is not None: - image = self.image_rescaler(image=image)["image"] - segmentation = Image.open(example["segmentation_path_"]) - assert segmentation.mode == "L", segmentation.mode - segmentation = np.array(segmentation).astype(np.uint8) - if self.shift_segmentation: - # used to support segmentations containing unlabeled==255 label - segmentation = segmentation+1 - if self.size is not None: - segmentation = self.segmentation_rescaler(image=segmentation)["image"] - if self.size is not None: - processed = self.preprocessor(image=image, - mask=segmentation - ) - else: - processed = {"image": image, - "mask": segmentation - } - example["image"] = (processed["image"]/127.5 - 1.0).astype(np.float32) - segmentation = processed["mask"] - onehot = np.eye(self.n_labels)[segmentation] - example["segmentation"] = onehot - return example - - -class Examples(SegmentationBase): - def __init__(self, size=None, random_crop=False, interpolation="bicubic"): - super().__init__(data_csv="data/sflckr_examples.txt", - data_root="data/sflckr_images", - segmentation_root="data/sflckr_segmentations", - size=size, random_crop=random_crop, interpolation=interpolation) diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/tests/test_iopath.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/tests/test_iopath.py deleted file mode 100644 index 908261a6619806f7ef9b5dd1beb5d6817b249a6e..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/tests/test_iopath.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import unittest -from unittest import mock - - -class TestIOPath(unittest.TestCase): - - def test_no_iopath(self): - from .test_reproducibility import TestReproducibility - - with mock.patch.dict("sys.modules", {"iopath": None}): - # reuse reproducibility tests, which are e2e tests that should cover - # most checkpoint related functionality - TestReproducibility._test_reproducibility(self, "test_reproducibility") - - def test_no_supports_rename(self): - from .test_reproducibility import TestReproducibility - - with mock.patch("fairseq.file_io.PathManager.supports_rename") as mock_fn: - mock_fn.return_value = False - TestReproducibility._test_reproducibility(self, "test_reproducibility") - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/scripts/inference/api.sh b/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/scripts/inference/api.sh deleted file mode 100644 index 4f6ce2a2147f69e5b3da851c8222bef830056338..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/scripts/inference/api.sh +++ /dev/null @@ -1,8 +0,0 @@ -gender='male' -glowdir='../../checkpoints/glow/'$gender'/' -hifidir='../../checkpoints/hifi/'$gender'/' -device='cpu' -lang='en' - - -python ../../utils/inference/api.py -a $glowdir -v $hifidir -d $device -L $lang diff --git a/spaces/Heber/google-flan-t5-xl/README.md b/spaces/Heber/google-flan-t5-xl/README.md deleted file mode 100644 index 8aea3a3bc32d966fb2b00d0da6b6175b45643e7f..0000000000000000000000000000000000000000 --- a/spaces/Heber/google-flan-t5-xl/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Google Flan T5 Xl -emoji: 🏃 -colorFrom: blue -colorTo: green -sdk: gradio -sdk_version: 3.19.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ICML2022/OFA/fairseq/examples/bart/summarize.py b/spaces/ICML2022/OFA/fairseq/examples/bart/summarize.py deleted file mode 100644 index 04435f80e39c2d9d894696dae7cba5b381e13da9..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/bart/summarize.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch -from fairseq.models.bart import BARTModel -import argparse - -XSUM_KWARGS = dict(beam=6, lenpen=1.0, max_len_b=60, min_len=10, no_repeat_ngram_size=3) -CNN_KWARGS = dict(beam=4, lenpen=2.0, max_len_b=140, min_len=55, no_repeat_ngram_size=3) - - -@torch.no_grad() -def generate(bart, infile, outfile="bart_hypo.txt", bsz=32, n_obs=None, **eval_kwargs): - count = 1 - - # if n_obs is not None: bsz = min(bsz, n_obs) - - with open(infile) as source, open(outfile, "w") as fout: - sline = source.readline().strip() - slines = [sline] - for sline in source: - if n_obs is not None and count > n_obs: - break - if count % bsz == 0: - hypotheses_batch = bart.sample(slines, **eval_kwargs) - for hypothesis in hypotheses_batch: - fout.write(hypothesis + "\n") - fout.flush() - slines = [] - - slines.append(sline.strip()) - count += 1 - - if slines != []: - hypotheses_batch = bart.sample(slines, **eval_kwargs) - for hypothesis in hypotheses_batch: - fout.write(hypothesis + "\n") - fout.flush() - - -def main(): - """ - Usage:: - - python examples/bart/summarize.py \ - --model-dir $HOME/bart.large.cnn \ - --model-file model.pt \ - --src $HOME/data-bin/cnn_dm/test.source - """ - parser = argparse.ArgumentParser() - parser.add_argument( - "--model-dir", - required=True, - type=str, - default="bart.large.cnn/", - help="path containing model file and src_dict.txt", - ) - parser.add_argument( - "--model-file", - default="checkpoint_best.pt", - help="where in model_dir are weights saved", - ) - parser.add_argument( - "--src", default="test.source", help="text to summarize", type=str - ) - parser.add_argument( - "--out", default="test.hypo", help="where to save summaries", type=str - ) - parser.add_argument("--bsz", default=32, help="where to save summaries", type=int) - parser.add_argument( - "--n", default=None, help="how many examples to summarize", type=int - ) - parser.add_argument( - "--xsum-kwargs", - action="store_true", - default=False, - help="if true use XSUM_KWARGS else CNN_KWARGS", - ) - args = parser.parse_args() - eval_kwargs = XSUM_KWARGS if args.xsum_kwargs else CNN_KWARGS - if args.model_dir == "pytorch/fairseq": - bart = torch.hub.load("pytorch/fairseq", args.model_file) - else: - bart = BARTModel.from_pretrained( - args.model_dir, - checkpoint_file=args.model_file, - data_name_or_path=args.model_dir, - ) - bart = bart.eval() - if torch.cuda.is_available(): - bart = bart.cuda().half() - generate( - bart, args.src, bsz=args.bsz, n_obs=args.n, outfile=args.out, **eval_kwargs - ) - - -if __name__ == "__main__": - main() diff --git a/spaces/IDKiro/DehazeFormer_Demo/app.py b/spaces/IDKiro/DehazeFormer_Demo/app.py deleted file mode 100644 index adb48b20b25638138be33ed88576a70b36d2ee75..0000000000000000000000000000000000000000 --- a/spaces/IDKiro/DehazeFormer_Demo/app.py +++ /dev/null @@ -1,50 +0,0 @@ -import torch -import numpy as np -import gradio as gr - -from PIL import Image -from models import dehazeformer - - -def infer(raw_image): - network = dehazeformer() - network.load_state_dict(torch.load('./saved_models/dehazeformer.pth', map_location=torch.device('cpu'))['state_dict']) - # torch.save({'state_dict': network.state_dict()}, './saved_models/dehazeformer.pth') - - network.eval() - - image = np.array(raw_image, np.float32) / 255. * 2 - 1 - image = torch.from_numpy(image) - image = image.permute((2, 0, 1)).unsqueeze(0) - - with torch.no_grad(): - output = network(image).clamp_(-1, 1)[0] * 0.5 + 0.5 - output = output.permute((1, 2, 0)) - output = np.array(output, np.float32) - output = np.round(output * 255.0) - - output = Image.fromarray(output.astype(np.uint8)) - - return output - - -title = "DehazeFormer" -description = f"We use a mixed dataset to train the model, allowing the trained model to work better on real hazy images. To allow the model to process high-resolution images more efficiently and effectively, we extend it to the [MCT](https://github.com/IDKiro/MCT) variant." -examples = [ - ["examples/1.jpg"], - ["examples/2.jpg"], - ["examples/3.jpg"], - ["examples/4.jpg"], - ["examples/5.jpg"], - ["examples/6.jpg"] -] - -iface = gr.Interface( - infer, - inputs="image", outputs="image", - title=title, - description=description, - allow_flagging='never', - examples=examples, -) -iface.launch() \ No newline at end of file diff --git a/spaces/INDONESIA-AI/Anapnoe/README.md b/spaces/INDONESIA-AI/Anapnoe/README.md deleted file mode 100644 index b39374216fcda9ceb71430dbe3360cd1e562335b..0000000000000000000000000000000000000000 --- a/spaces/INDONESIA-AI/Anapnoe/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Anapnoe -emoji: 📊 -colorFrom: purple -colorTo: green -sdk: gradio -sdk_version: 3.50.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/JUNGU/VToonify/vtoonify/model/encoder/readme.md b/spaces/JUNGU/VToonify/vtoonify/model/encoder/readme.md deleted file mode 100644 index 5421bfe3e67b7b6cbd7baf96b741b539d65bb0fd..0000000000000000000000000000000000000000 --- a/spaces/JUNGU/VToonify/vtoonify/model/encoder/readme.md +++ /dev/null @@ -1,9 +0,0 @@ -# Encoding in Style: a StyleGAN Encoder for Image-to-Image Translation - -## Description -Official Implementation of pSp paper for both training and evaluation. The pSp method extends the StyleGAN model to -allow solving different image-to-image translation problems using its encoder. - -Fork from [https://github.com/eladrich/pixel2style2pixel](https://github.com/eladrich/pixel2style2pixel). - -In VToonify, we modify pSp to accept z+ latent code. diff --git a/spaces/Jackflack09/diffuse-custom/diffusers/schedulers/scheduling_ddpm.py b/spaces/Jackflack09/diffuse-custom/diffusers/schedulers/scheduling_ddpm.py deleted file mode 100644 index 369db8b29e7d2e9abb9707bbf877ba7707f664eb..0000000000000000000000000000000000000000 --- a/spaces/Jackflack09/diffuse-custom/diffusers/schedulers/scheduling_ddpm.py +++ /dev/null @@ -1,373 +0,0 @@ -# Copyright 2022 UC Berkeley Team and The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim - -import math -from dataclasses import dataclass -from typing import List, Optional, Tuple, Union - -import numpy as np -import torch - -from ..configuration_utils import ConfigMixin, FrozenDict, register_to_config -from ..utils import _COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS, BaseOutput, deprecate -from .scheduling_utils import SchedulerMixin - - -@dataclass -class DDPMSchedulerOutput(BaseOutput): - """ - Output class for the scheduler's step function output. - - Args: - prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): - Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the - denoising loop. - pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): - The predicted denoised sample (x_{0}) based on the model output from the current timestep. - `pred_original_sample` can be used to preview progress or for guidance. - """ - - prev_sample: torch.FloatTensor - pred_original_sample: Optional[torch.FloatTensor] = None - - -def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999): - """ - Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of - (1-beta) over time from t = [0,1]. - - Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up - to that part of the diffusion process. - - - Args: - num_diffusion_timesteps (`int`): the number of betas to produce. - max_beta (`float`): the maximum beta to use; use values lower than 1 to - prevent singularities. - - Returns: - betas (`np.ndarray`): the betas used by the scheduler to step the model outputs - """ - - def alpha_bar(time_step): - return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2 - - betas = [] - for i in range(num_diffusion_timesteps): - t1 = i / num_diffusion_timesteps - t2 = (i + 1) / num_diffusion_timesteps - betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) - return torch.tensor(betas, dtype=torch.float32) - - -class DDPMScheduler(SchedulerMixin, ConfigMixin): - """ - Denoising diffusion probabilistic models (DDPMs) explores the connections between denoising score matching and - Langevin dynamics sampling. - - [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` - function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. - [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and - [`~SchedulerMixin.from_pretrained`] functions. - - For more details, see the original paper: https://arxiv.org/abs/2006.11239 - - Args: - num_train_timesteps (`int`): number of diffusion steps used to train the model. - beta_start (`float`): the starting `beta` value of inference. - beta_end (`float`): the final `beta` value. - beta_schedule (`str`): - the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from - `linear`, `scaled_linear`, or `squaredcos_cap_v2`. - trained_betas (`np.ndarray`, optional): - option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. - variance_type (`str`): - options to clip the variance used when adding noise to the denoised sample. Choose from `fixed_small`, - `fixed_small_log`, `fixed_large`, `fixed_large_log`, `learned` or `learned_range`. - clip_sample (`bool`, default `True`): - option to clip predicted sample between -1 and 1 for numerical stability. - prediction_type (`str`, default `epsilon`, optional): - prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion - process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4 - https://imagen.research.google/video/paper.pdf) - """ - - _compatibles = _COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS.copy() - _deprecated_kwargs = ["predict_epsilon"] - order = 1 - - @register_to_config - def __init__( - self, - num_train_timesteps: int = 1000, - beta_start: float = 0.0001, - beta_end: float = 0.02, - beta_schedule: str = "linear", - trained_betas: Optional[Union[np.ndarray, List[float]]] = None, - variance_type: str = "fixed_small", - clip_sample: bool = True, - prediction_type: str = "epsilon", - **kwargs, - ): - message = ( - "Please make sure to instantiate your scheduler with `prediction_type` instead. E.g. `scheduler =" - " DDPMScheduler.from_pretrained(, prediction_type='epsilon')`." - ) - predict_epsilon = deprecate("predict_epsilon", "0.11.0", message, take_from=kwargs) - if predict_epsilon is not None: - self.register_to_config(prediction_type="epsilon" if predict_epsilon else "sample") - - if trained_betas is not None: - self.betas = torch.tensor(trained_betas, dtype=torch.float32) - elif beta_schedule == "linear": - self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) - elif beta_schedule == "scaled_linear": - # this schedule is very specific to the latent diffusion model. - self.betas = ( - torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 - ) - elif beta_schedule == "squaredcos_cap_v2": - # Glide cosine schedule - self.betas = betas_for_alpha_bar(num_train_timesteps) - elif beta_schedule == "sigmoid": - # GeoDiff sigmoid schedule - betas = torch.linspace(-6, 6, num_train_timesteps) - self.betas = torch.sigmoid(betas) * (beta_end - beta_start) + beta_start - else: - raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") - - self.alphas = 1.0 - self.betas - self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) - self.one = torch.tensor(1.0) - - # standard deviation of the initial noise distribution - self.init_noise_sigma = 1.0 - - # setable values - self.num_inference_steps = None - self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy()) - - self.variance_type = variance_type - - def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor: - """ - Ensures interchangeability with schedulers that need to scale the denoising model input depending on the - current timestep. - - Args: - sample (`torch.FloatTensor`): input sample - timestep (`int`, optional): current timestep - - Returns: - `torch.FloatTensor`: scaled input sample - """ - return sample - - def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): - """ - Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference. - - Args: - num_inference_steps (`int`): - the number of diffusion steps used when generating samples with a pre-trained model. - """ - num_inference_steps = min(self.config.num_train_timesteps, num_inference_steps) - self.num_inference_steps = num_inference_steps - timesteps = np.arange( - 0, self.config.num_train_timesteps, self.config.num_train_timesteps // self.num_inference_steps - )[::-1].copy() - self.timesteps = torch.from_numpy(timesteps).to(device) - - def _get_variance(self, t, predicted_variance=None, variance_type=None): - alpha_prod_t = self.alphas_cumprod[t] - alpha_prod_t_prev = self.alphas_cumprod[t - 1] if t > 0 else self.one - - # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) - # and sample from it to get previous sample - # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample - variance = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * self.betas[t] - - if variance_type is None: - variance_type = self.config.variance_type - - # hacks - were probably added for training stability - if variance_type == "fixed_small": - variance = torch.clamp(variance, min=1e-20) - # for rl-diffuser https://arxiv.org/abs/2205.09991 - elif variance_type == "fixed_small_log": - variance = torch.log(torch.clamp(variance, min=1e-20)) - variance = torch.exp(0.5 * variance) - elif variance_type == "fixed_large": - variance = self.betas[t] - elif variance_type == "fixed_large_log": - # Glide max_log - variance = torch.log(self.betas[t]) - elif variance_type == "learned": - return predicted_variance - elif variance_type == "learned_range": - min_log = variance - max_log = self.betas[t] - frac = (predicted_variance + 1) / 2 - variance = frac * max_log + (1 - frac) * min_log - - return variance - - def step( - self, - model_output: torch.FloatTensor, - timestep: int, - sample: torch.FloatTensor, - generator=None, - return_dict: bool = True, - **kwargs, - ) -> Union[DDPMSchedulerOutput, Tuple]: - """ - Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion - process from the learned model outputs (most often the predicted noise). - - Args: - model_output (`torch.FloatTensor`): direct output from learned diffusion model. - timestep (`int`): current discrete timestep in the diffusion chain. - sample (`torch.FloatTensor`): - current instance of sample being created by diffusion process. - generator: random number generator. - return_dict (`bool`): option for returning tuple rather than DDPMSchedulerOutput class - - Returns: - [`~schedulers.scheduling_utils.DDPMSchedulerOutput`] or `tuple`: - [`~schedulers.scheduling_utils.DDPMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When - returning a tuple, the first element is the sample tensor. - - """ - message = ( - "Please make sure to instantiate your scheduler with `prediction_type` instead. E.g. `scheduler =" - " DDPMScheduler.from_pretrained(, prediction_type='epsilon')`." - ) - predict_epsilon = deprecate("predict_epsilon", "0.11.0", message, take_from=kwargs) - if predict_epsilon is not None: - new_config = dict(self.config) - new_config["prediction_type"] = "epsilon" if predict_epsilon else "sample" - self._internal_dict = FrozenDict(new_config) - - t = timestep - - if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: - model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1) - else: - predicted_variance = None - - # 1. compute alphas, betas - alpha_prod_t = self.alphas_cumprod[t] - alpha_prod_t_prev = self.alphas_cumprod[t - 1] if t > 0 else self.one - beta_prod_t = 1 - alpha_prod_t - beta_prod_t_prev = 1 - alpha_prod_t_prev - - # 2. compute predicted original sample from predicted noise also called - # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf - if self.config.prediction_type == "epsilon": - pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) - elif self.config.prediction_type == "sample": - pred_original_sample = model_output - elif self.config.prediction_type == "v_prediction": - pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output - else: - raise ValueError( - f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or" - " `v_prediction` for the DDPMScheduler." - ) - - # 3. Clip "predicted x_0" - if self.config.clip_sample: - pred_original_sample = torch.clamp(pred_original_sample, -1, 1) - - # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t - # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf - pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * self.betas[t]) / beta_prod_t - current_sample_coeff = self.alphas[t] ** (0.5) * beta_prod_t_prev / beta_prod_t - - # 5. Compute predicted previous sample µ_t - # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf - pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample - - # 6. Add noise - variance = 0 - if t > 0: - device = model_output.device - if device.type == "mps": - # randn does not work reproducibly on mps - variance_noise = torch.randn(model_output.shape, dtype=model_output.dtype, generator=generator) - variance_noise = variance_noise.to(device) - else: - variance_noise = torch.randn( - model_output.shape, generator=generator, device=device, dtype=model_output.dtype - ) - if self.variance_type == "fixed_small_log": - variance = self._get_variance(t, predicted_variance=predicted_variance) * variance_noise - else: - variance = (self._get_variance(t, predicted_variance=predicted_variance) ** 0.5) * variance_noise - - pred_prev_sample = pred_prev_sample + variance - - if not return_dict: - return (pred_prev_sample,) - - return DDPMSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample) - - def add_noise( - self, - original_samples: torch.FloatTensor, - noise: torch.FloatTensor, - timesteps: torch.IntTensor, - ) -> torch.FloatTensor: - # Make sure alphas_cumprod and timestep have same device and dtype as original_samples - self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype) - timesteps = timesteps.to(original_samples.device) - - sqrt_alpha_prod = self.alphas_cumprod[timesteps] ** 0.5 - sqrt_alpha_prod = sqrt_alpha_prod.flatten() - while len(sqrt_alpha_prod.shape) < len(original_samples.shape): - sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) - - sqrt_one_minus_alpha_prod = (1 - self.alphas_cumprod[timesteps]) ** 0.5 - sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() - while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): - sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) - - noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise - return noisy_samples - - def get_velocity( - self, sample: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor - ) -> torch.FloatTensor: - # Make sure alphas_cumprod and timestep have same device and dtype as sample - self.alphas_cumprod = self.alphas_cumprod.to(device=sample.device, dtype=sample.dtype) - timesteps = timesteps.to(sample.device) - - sqrt_alpha_prod = self.alphas_cumprod[timesteps] ** 0.5 - sqrt_alpha_prod = sqrt_alpha_prod.flatten() - while len(sqrt_alpha_prod.shape) < len(sample.shape): - sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) - - sqrt_one_minus_alpha_prod = (1 - self.alphas_cumprod[timesteps]) ** 0.5 - sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() - while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape): - sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) - - velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample - return velocity - - def __len__(self): - return self.config.num_train_timesteps diff --git a/spaces/JosueElias/borrs/dataset_with_embeddings.py b/spaces/JosueElias/borrs/dataset_with_embeddings.py deleted file mode 100644 index 4538d041604484e0101f2a982c2cb72a3fc2da3b..0000000000000000000000000000000000000000 --- a/spaces/JosueElias/borrs/dataset_with_embeddings.py +++ /dev/null @@ -1,23 +0,0 @@ - -from datasets import load_from_disk, Dataset -from huggingface_hub import hf_hub_download -from datasets import load_dataset -import faiss - -# load faiss file and get route of file https://huggingface.co/docs/huggingface_hub/guides/download#from-latest-version -path2 = hf_hub_download(repo_id="JosueElias/pipeline_faiss", filename="faiss.index", repo_type="dataset") - -# load wikipedia dataset https://huggingface.co/docs/datasets/loading#hugging-face-hub -datasetx = load_dataset("JosueElias/pipeline_dataset2") - -# save wikipedia dataset locally https://huggingface.co/docs/datasets/process#save -datasetx.save_to_disk("./directory") - -# delete variable to have more memory space -del datasetx - -# load dataset again in arrow format -datasetx = load_from_disk("./directory/train") - -# load faiss to dataset -datasetx.load_faiss_index('embeddings', path2) \ No newline at end of file diff --git a/spaces/Juno360219/albert-base-v2/README.md b/spaces/Juno360219/albert-base-v2/README.md deleted file mode 100644 index fa956f3644d10f33d9593b8bd4b7c1188047f7e4..0000000000000000000000000000000000000000 --- a/spaces/Juno360219/albert-base-v2/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Albert Base V2 -emoji: 🐠 -colorFrom: gray -colorTo: pink -sdk: static -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Kangarroar/ApplioRVC-Inference/infer/modules/vc/utils.py b/spaces/Kangarroar/ApplioRVC-Inference/infer/modules/vc/utils.py deleted file mode 100644 index a1cb0ff84097d1c7eb82373ccf19db061f595096..0000000000000000000000000000000000000000 --- a/spaces/Kangarroar/ApplioRVC-Inference/infer/modules/vc/utils.py +++ /dev/null @@ -1,42 +0,0 @@ -import os -import re -from fairseq import checkpoint_utils - - -def get_index_path_from_model(sid): - sid0strip = re.sub(r'\.pth|\.onnx$', '', sid) - sid0name = os.path.split(sid0strip)[-1] # Extract only the name, not the directory - - # Check if the sid0strip has the specific ending format _eXXX_sXXX - if re.match(r'.+_e\d+_s\d+$', sid0name): - base_model_name = sid0name.rsplit('_', 2)[0] - else: - base_model_name = sid0name - - return next( - ( - f - for f in [ - os.path.join(root, name) - for root, _, files in os.walk(os.getenv("index_root"), topdown=False) - for name in files - if name.endswith(".index") and "trained" not in name - ] - if base_model_name in f - ), - "", - ) - - -def load_hubert(config): - models, _, _ = checkpoint_utils.load_model_ensemble_and_task( - ["assets/hubert/hubert_base.pt"], - suffix="", - ) - hubert_model = models[0] - hubert_model = hubert_model.to(config.device) - if config.is_half: - hubert_model = hubert_model.half() - else: - hubert_model = hubert_model.float() - return hubert_model.eval() diff --git a/spaces/Kangarroar/ApplioRVC-Inference/lib/infer_pack/modules/F0Predictor/F0Predictor.py b/spaces/Kangarroar/ApplioRVC-Inference/lib/infer_pack/modules/F0Predictor/F0Predictor.py deleted file mode 100644 index f56e49e7f0e6eab3babf0711cae2933371b9f9cc..0000000000000000000000000000000000000000 --- a/spaces/Kangarroar/ApplioRVC-Inference/lib/infer_pack/modules/F0Predictor/F0Predictor.py +++ /dev/null @@ -1,16 +0,0 @@ -class F0Predictor(object): - def compute_f0(self, wav, p_len): - """ - input: wav:[signal_length] - p_len:int - output: f0:[signal_length//hop_length] - """ - pass - - def compute_f0_uv(self, wav, p_len): - """ - input: wav:[signal_length] - p_len:int - output: f0:[signal_length//hop_length],uv:[signal_length//hop_length] - """ - pass diff --git a/spaces/Kevin676/AutoGPT/autogpt/commands/times.py b/spaces/Kevin676/AutoGPT/autogpt/commands/times.py deleted file mode 100644 index 3c9b8a4fc67a251c9e81a8c4a725cd1e25fcbebe..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/AutoGPT/autogpt/commands/times.py +++ /dev/null @@ -1,10 +0,0 @@ -from datetime import datetime - - -def get_datetime() -> str: - """Return the current date and time - - Returns: - str: The current date and time - """ - return "Current date and time: " + datetime.now().strftime("%Y-%m-%d %H:%M:%S") diff --git a/spaces/Kr1n3/Fashion-Items-Classification/README.md b/spaces/Kr1n3/Fashion-Items-Classification/README.md deleted file mode 100644 index b89cace17e5b7df622b6f127a463877a91e50c28..0000000000000000000000000000000000000000 --- a/spaces/Kr1n3/Fashion-Items-Classification/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Fashion Items Classification -emoji: 📈 -colorFrom: gray -colorTo: indigo -sdk: gradio -sdk_version: 3.8.2 -app_file: app.py -pinned: false -license: afl-3.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Kuachi/ai-voice/commons.py b/spaces/Kuachi/ai-voice/commons.py deleted file mode 100644 index 40fcc05364d4815971f5c6f9dbb8dcef8e3ec1e9..0000000000000000000000000000000000000000 --- a/spaces/Kuachi/ai-voice/commons.py +++ /dev/null @@ -1,172 +0,0 @@ -import math -import torch -from torch.nn import functional as F -import torch.jit - - -def script_method(fn, _rcb=None): - return fn - - -def script(obj, optimize=True, _frames_up=0, _rcb=None): - return obj - - -torch.jit.script_method = script_method -torch.jit.script = script - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d( - length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = ( - math.log(float(max_timescale) / float(min_timescale)) / - (num_timescales - 1)) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2,3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1. / norm_type) - return total_norm diff --git a/spaces/KunalSinha2024/cledgeEssayIdeationTool/app.py b/spaces/KunalSinha2024/cledgeEssayIdeationTool/app.py deleted file mode 100644 index 5ce16aa12a41cac4e836d0846d98587df631013f..0000000000000000000000000000000000000000 --- a/spaces/KunalSinha2024/cledgeEssayIdeationTool/app.py +++ /dev/null @@ -1,85 +0,0 @@ -import os -from dotenv import load_dotenv -import openai -import random -import requests -from ast import literal_eval -import json -from enum import Enum -import gradio as gr - -load_dotenv() -openai.api_key = os.getenv("OPENAI_API_KEY") -CHAT_ENDPOINT="https://api.openai.com/v1/chat/completions" -CHAT_MODEL = "gpt-3.5-turbo" -CHAT_AUTH = {"Authorization": "Bearer " + openai.api_key} -MAX_TOKENS = 250 - -#TODO: handle max length limits - -class ChatRoles(): - SYSTEM = "system" - ASSISTANT = "assistant" - USER = "user" - -def get_assistant_response(gpt_history): - params = { - "model": CHAT_MODEL, - "messages": gpt_history, - "max_tokens": MAX_TOKENS - } - response = requests.post(url=CHAT_ENDPOINT, json=params, headers=CHAT_AUTH) - print(literal_eval(response.content.decode("utf-8"))) - response_message = literal_eval(response.content.decode("utf-8"))["choices"][0]["message"]["content"] - gpt_history.append({"role": ChatRoles.ASSISTANT, "content": response_message}) - print("\n" + response_message) - return response_message - -hardcoded = { - 1: "Hi, I'm an AI powered college counselor from Cledge! What prompt do you want help with?", - 2: "Pick as many questions to answer as you'd like. Write the number of the question and then your response." -} - -instructions = { - 2: "Based on these responses, generate 5 questions to help them brainstorm.", - 3: "Based on these responses, ask follow up questions that help them narrow down the focus of the essay", - 4: "Based on these responses, ask follow up questions that help them identify key themes in the essay", - 5: "Based on these responses, think of 5 ideas for personal statement essays. Write a synopsis of each idea.", -} - -def grad_demo(): - with gr.Blocks() as demo: - gpt_history = [] - def user(user_message, history): - gpt_history.append({"role": ChatRoles.USER, "content": user_message}) - print(f"Length of gpt_history: {gpt_history}") - return "", history + [[user_message, None]] - def bot(history): - step = len(history) - print(f"STEP: {step}") - bot_message = "" - if step in instructions: - gpt_history.append({"role": ChatRoles.SYSTEM, "content": instructions[step]}) - bot_message = get_assistant_response(gpt_history) - if step in hardcoded: - bot_message = f"{bot_message}\n\n {hardcoded[step]}" - history[-1][1] = bot_message - gpt_history.append({"role": ChatRoles.ASSISTANT, "content": bot_message}) - print(f"Length of gpt_history: {gpt_history}") - return history - def initialize(): - gpt_history.clear() - history = bot([[None, None]]) - return history - chatbot = gr.Chatbot(value = initialize) - msg = gr.Textbox() - clear = gr.Button("Clear") - msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then( - bot, chatbot, chatbot - ) - clear.click(lambda: None, None, chatbot, queue=False) - demo.launch() - -if __name__ == "__main__": - grad_demo() - #main() \ No newline at end of file diff --git a/spaces/KyanChen/FunSR/models/edsr.py b/spaces/KyanChen/FunSR/models/edsr.py deleted file mode 100644 index e5fac715d86c0270336bd0df175e1d44fc1319b2..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/FunSR/models/edsr.py +++ /dev/null @@ -1,201 +0,0 @@ -# modified from: https://github.com/thstkdgus35/EDSR-PyTorch - -import math -from argparse import Namespace - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from models import register - - -def default_conv(in_channels, out_channels, kernel_size, bias=True): - return nn.Conv2d( - in_channels, out_channels, kernel_size, - padding=(kernel_size//2), bias=bias) - -class MeanShift(nn.Conv2d): - def __init__( - self, rgb_range, - rgb_mean=(0.4488, 0.4371, 0.4040), rgb_std=(1.0, 1.0, 1.0), sign=-1): - - super(MeanShift, self).__init__(3, 3, kernel_size=1) - std = torch.Tensor(rgb_std) - self.weight.data = torch.eye(3).view(3, 3, 1, 1) / std.view(3, 1, 1, 1) - self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean) / std - for p in self.parameters(): - p.requires_grad = False - -class ResBlock(nn.Module): - def __init__( - self, conv, n_feats, kernel_size, - bias=True, bn=False, act=nn.ReLU(True), res_scale=1): - - super(ResBlock, self).__init__() - m = [] - for i in range(2): - m.append(conv(n_feats, n_feats, kernel_size, bias=bias)) - if bn: - m.append(nn.BatchNorm2d(n_feats)) - if i == 0: - m.append(act) - - self.body = nn.Sequential(*m) - self.res_scale = res_scale - - def forward(self, x): - res = self.body(x).mul(self.res_scale) - res += x - - return res - -class Upsampler(nn.Sequential): - def __init__(self, conv, scale, n_feats, bn=False, act=False, bias=True): - - m = [] - if (scale & (scale - 1)) == 0: # Is scale = 2^n? - for _ in range(int(math.log(scale, 2))): - m.append(conv(n_feats, 4 * n_feats, 3, bias)) - m.append(nn.PixelShuffle(2)) - if bn: - m.append(nn.BatchNorm2d(n_feats)) - if act == 'relu': - m.append(nn.ReLU(True)) - elif act == 'prelu': - m.append(nn.PReLU(n_feats)) - - elif scale == 3: - m.append(conv(n_feats, 9 * n_feats, 3, bias)) - m.append(nn.PixelShuffle(3)) - if bn: - m.append(nn.BatchNorm2d(n_feats)) - if act == 'relu': - m.append(nn.ReLU(True)) - elif act == 'prelu': - m.append(nn.PReLU(n_feats)) - else: - raise NotImplementedError - - super(Upsampler, self).__init__(*m) - - -url = { - 'r16f64x2': 'https://cv.snu.ac.kr/research/EDSR/models/edsr_baseline_x2-1bc95232.pt', - 'r16f64x3': 'https://cv.snu.ac.kr/research/EDSR/models/edsr_baseline_x3-abf2a44e.pt', - 'r16f64x4': 'https://cv.snu.ac.kr/research/EDSR/models/edsr_baseline_x4-6b446fab.pt', - 'r32f256x2': 'https://cv.snu.ac.kr/research/EDSR/models/edsr_x2-0edfb8a3.pt', - 'r32f256x3': 'https://cv.snu.ac.kr/research/EDSR/models/edsr_x3-ea3ef2c6.pt', - 'r32f256x4': 'https://cv.snu.ac.kr/research/EDSR/models/edsr_x4-4f62e9ef.pt' -} - -class EDSR(nn.Module): - def __init__(self, args, conv=default_conv): - super(EDSR, self).__init__() - self.args = args - n_resblocks = args.n_resblocks - n_feats = args.n_feats - kernel_size = 3 - scale = args.scale[0] - act = nn.ReLU(True) - url_name = 'r{}f{}x{}'.format(n_resblocks, n_feats, scale) - if url_name in url: - self.url = url[url_name] - else: - self.url = None - self.sub_mean = MeanShift(args.rgb_range) - self.add_mean = MeanShift(args.rgb_range, sign=1) - - # define head module - m_head = [conv(args.n_colors, n_feats, kernel_size)] - - # define body module - m_body = [ - ResBlock( - conv, n_feats, kernel_size, act=act, res_scale=args.res_scale - ) for _ in range(n_resblocks) - ] - m_body.append(conv(n_feats, n_feats, kernel_size)) - - self.head = nn.Sequential(*m_head) - self.body = nn.Sequential(*m_body) - - if args.no_upsampling: - self.out_dim = n_feats - else: - self.out_dim = args.n_colors - # define tail module - m_tail = [ - Upsampler(conv, scale, n_feats, act=False), - conv(n_feats, args.n_colors, kernel_size) - ] - self.tail = nn.Sequential(*m_tail) - - self.load_state_dict('pretrained/'+self.url.split('/')[-1]) - - def forward(self, x): - #x = self.sub_mean(x) - x = self.head(x) - - res = self.body(x) - res += x - - if self.args.no_upsampling: - x = res - else: - x = self.tail(res) - #x = self.add_mean(x) - return x - - def load_state_dict(self, state_dict, strict=True): - state_dict = torch.load(state_dict, map_location='cpu') - own_state = self.state_dict() - print('loading pretrain model') - for name, param in state_dict.items(): - if name in own_state: - if isinstance(param, nn.Parameter): - param = param.data - try: - own_state[name].copy_(param) - except Exception: - if name.find('tail') == -1: - raise RuntimeError('While copying the parameter named {}, ' - 'whose dimensions in the model are {} and ' - 'whose dimensions in the checkpoint are {}.' - .format(name, own_state[name].size(), param.size())) - elif strict: - if name.find('tail') == -1: - raise KeyError('unexpected key "{}" in state_dict' - .format(name)) - - -@register('edsr-baseline') -def make_edsr_baseline(n_resblocks=16, n_feats=64, res_scale=1, - scale=2, no_upsampling=False, rgb_range=1): - args = Namespace() - args.n_resblocks = n_resblocks - args.n_feats = n_feats - args.res_scale = res_scale - - args.scale = [scale] - args.no_upsampling = no_upsampling - - args.rgb_range = rgb_range - args.n_colors = 3 - return EDSR(args) - - -@register('edsr') -def make_edsr(n_resblocks=32, n_feats=256, res_scale=0.1, - scale=2, no_upsampling=False, rgb_range=1): - args = Namespace() - args.n_resblocks = n_resblocks - args.n_feats = n_feats - args.res_scale = res_scale - - args.scale = [scale] - args.no_upsampling = no_upsampling - - args.rgb_range = rgb_range - args.n_colors = 3 - return EDSR(args) diff --git a/spaces/KyanChen/RSPrompter/mmpl/structures/__init__.py b/spaces/KyanChen/RSPrompter/mmpl/structures/__init__.py deleted file mode 100644 index 3021d0a7d0b7fb1b342295ad0a4e99c675b4e52c..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmpl/structures/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .cls_data_sample import ClsDataSample -from .multi_task_data_sample import MultiTaskDataSample -from .utils import (batch_label_to_onehot, cat_batch_labels, - stack_batch_scores, tensor_split) - -__all__ = [ - 'ClsDataSample', 'batch_label_to_onehot', 'cat_batch_labels', - 'stack_batch_scores', 'tensor_split', 'MultiTaskDataSample' -] diff --git a/spaces/Lamai/LAMAIGPT/tests/test_config.py b/spaces/Lamai/LAMAIGPT/tests/test_config.py deleted file mode 100644 index b472a24c78edd1f931a76c68e08ed544bbe61d98..0000000000000000000000000000000000000000 --- a/spaces/Lamai/LAMAIGPT/tests/test_config.py +++ /dev/null @@ -1,84 +0,0 @@ -from unittest import TestCase - -from autogpt.config import Config - - -class TestConfig(TestCase): - """ - Test cases for the Config class, which handles the configuration settings - for the AI and ensures it behaves as a singleton. - """ - - def setUp(self): - """ - Set up the test environment by creating an instance of the Config class. - """ - self.config = Config() - - def test_singleton(self): - """ - Test if the Config class behaves as a singleton by ensuring that two instances are the same. - """ - config2 = Config() - self.assertIs(self.config, config2) - - def test_initial_values(self): - """ - Test if the initial values of the Config class attributes are set correctly. - """ - self.assertFalse(self.config.debug_mode) - self.assertFalse(self.config.continuous_mode) - self.assertFalse(self.config.speak_mode) - self.assertEqual(self.config.fast_llm_model, "gpt-3.5-turbo") - self.assertEqual(self.config.smart_llm_model, "gpt-4") - self.assertEqual(self.config.fast_token_limit, 4000) - self.assertEqual(self.config.smart_token_limit, 8000) - - def test_set_continuous_mode(self): - """ - Test if the set_continuous_mode() method updates the continuous_mode attribute. - """ - self.config.set_continuous_mode(True) - self.assertTrue(self.config.continuous_mode) - - def test_set_speak_mode(self): - """ - Test if the set_speak_mode() method updates the speak_mode attribute. - """ - self.config.set_speak_mode(True) - self.assertTrue(self.config.speak_mode) - - def test_set_fast_llm_model(self): - """ - Test if the set_fast_llm_model() method updates the fast_llm_model attribute. - """ - self.config.set_fast_llm_model("gpt-3.5-turbo-test") - self.assertEqual(self.config.fast_llm_model, "gpt-3.5-turbo-test") - - def test_set_smart_llm_model(self): - """ - Test if the set_smart_llm_model() method updates the smart_llm_model attribute. - """ - self.config.set_smart_llm_model("gpt-4-test") - self.assertEqual(self.config.smart_llm_model, "gpt-4-test") - - def test_set_fast_token_limit(self): - """ - Test if the set_fast_token_limit() method updates the fast_token_limit attribute. - """ - self.config.set_fast_token_limit(5000) - self.assertEqual(self.config.fast_token_limit, 5000) - - def test_set_smart_token_limit(self): - """ - Test if the set_smart_token_limit() method updates the smart_token_limit attribute. - """ - self.config.set_smart_token_limit(9000) - self.assertEqual(self.config.smart_token_limit, 9000) - - def test_set_debug_mode(self): - """ - Test if the set_debug_mode() method updates the debug_mode attribute. - """ - self.config.set_debug_mode(True) - self.assertTrue(self.config.debug_mode) diff --git a/spaces/LaynzKunz/Model-RCV/README.md b/spaces/LaynzKunz/Model-RCV/README.md deleted file mode 100644 index 184dad7986ad859b643ccef136dbbb7eb62f5553..0000000000000000000000000000000000000000 --- a/spaces/LaynzKunz/Model-RCV/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Hololive Rvc Models V2 -emoji: ▶️🐻💿 -colorFrom: blue -colorTo: yellow -sdk: gradio -sdk_version: 3.35.2 -app_file: app.py -pinned: true -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Marne/MockingBird/mockingbirdforuse/synthesizer/utils/text.py b/spaces/Marne/MockingBird/mockingbirdforuse/synthesizer/utils/text.py deleted file mode 100644 index 7bd0e2238ca32fcdc7d08795832d49fb9be0c419..0000000000000000000000000000000000000000 --- a/spaces/Marne/MockingBird/mockingbirdforuse/synthesizer/utils/text.py +++ /dev/null @@ -1,74 +0,0 @@ -from .symbols import symbols -from . import cleaners -import re - -# Mappings from symbol to numeric ID and vice versa: -_symbol_to_id = {s: i for i, s in enumerate(symbols)} -_id_to_symbol = {i: s for i, s in enumerate(symbols)} - -# Regular expression matching text enclosed in curly braces: -_curly_re = re.compile(r"(.*?)\{(.+?)\}(.*)") - - -def text_to_sequence(text, cleaner_names): - """Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - - The text can optionally have ARPAbet sequences enclosed in curly braces embedded - in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street." - - Args: - text: string to convert to a sequence - cleaner_names: names of the cleaner functions to run the text through - - Returns: - List of integers corresponding to the symbols in the text - """ - sequence = [] - - # Check for curly braces and treat their contents as ARPAbet: - while len(text): - m = _curly_re.match(text) - if not m: - sequence += _symbols_to_sequence(_clean_text(text, cleaner_names)) - break - sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names)) - sequence += _arpabet_to_sequence(m.group(2)) - text = m.group(3) - - # Append EOS token - sequence.append(_symbol_to_id["~"]) - return sequence - - -def sequence_to_text(sequence): - """Converts a sequence of IDs back to a string""" - result = "" - for symbol_id in sequence: - if symbol_id in _id_to_symbol: - s = _id_to_symbol[symbol_id] - # Enclose ARPAbet back in curly braces: - if len(s) > 1 and s[0] == "@": - s = "{%s}" % s[1:] - result += s - return result.replace("}{", " ") - - -def _clean_text(text, cleaner_names): - for name in cleaner_names: - cleaner = getattr(cleaners, name) - if not cleaner: - raise Exception("Unknown cleaner: %s" % name) - text = cleaner(text) - return text - - -def _symbols_to_sequence(symbols): - return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)] - - -def _arpabet_to_sequence(text): - return _symbols_to_sequence(["@" + s for s in text.split()]) - - -def _should_keep_symbol(s): - return s in _symbol_to_id and s not in ("_", "~") diff --git a/spaces/MathysL/AutoGPT4/autogpt/commands/analyze_code.py b/spaces/MathysL/AutoGPT4/autogpt/commands/analyze_code.py deleted file mode 100644 index e02ea4c5b4ba53530e559d1cab7a07b8e3c7c638..0000000000000000000000000000000000000000 --- a/spaces/MathysL/AutoGPT4/autogpt/commands/analyze_code.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Code evaluation module.""" -from __future__ import annotations - -from autogpt.llm_utils import call_ai_function - - -def analyze_code(code: str) -> list[str]: - """ - A function that takes in a string and returns a response from create chat - completion api call. - - Parameters: - code (str): Code to be evaluated. - Returns: - A result string from create chat completion. A list of suggestions to - improve the code. - """ - - function_string = "def analyze_code(code: str) -> List[str]:" - args = [code] - description_string = ( - "Analyzes the given code and returns a list of suggestions" " for improvements." - ) - - return call_ai_function(function_string, args, description_string) diff --git a/spaces/MoonQiu/LongerCrafter/scripts/run_text2video_freenoise_mp_512.sh b/spaces/MoonQiu/LongerCrafter/scripts/run_text2video_freenoise_mp_512.sh deleted file mode 100644 index 4fe3fe1f471e85f3dfa8563489e2d3d5ca5fb84d..0000000000000000000000000000000000000000 --- a/spaces/MoonQiu/LongerCrafter/scripts/run_text2video_freenoise_mp_512.sh +++ /dev/null @@ -1,24 +0,0 @@ -name="base_512_test" - -ckpt='checkpoints/base_512_v1/model.ckpt' -config='configs/inference_t2v_tconv512_v1.0_freenoise.yaml' - -prompt_file="prompts/mp_prompts.txt" -res_dir="results_freenoise_mp_512" - -python3 scripts/evaluation/inference_freenoise_mp.py \ ---seed 123 \ ---mode 'base' \ ---ckpt_path $ckpt \ ---config $config \ ---savedir $res_dir/$name \ ---n_samples 3 \ ---bs 1 --height 320 --width 512 \ ---unconditional_guidance_scale 12.0 \ ---ddim_steps 50 \ ---ddim_eta 0.0 \ ---prompt_file $prompt_file \ ---fps 8 \ ---frames 64 \ ---window_size 16 \ ---window_stride 4 diff --git a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/kie/postprocessors/__init__.py b/spaces/Mountchicken/MAERec-Gradio/mmocr/models/kie/postprocessors/__init__.py deleted file mode 100644 index 645904bc1beb0b8e1b4f169a8b5344de55e41f8f..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/kie/postprocessors/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .sdmgr_postprocessor import SDMGRPostProcessor - -__all__ = ['SDMGRPostProcessor'] diff --git a/spaces/Mountchicken/MAERec-Gradio/mmocr/structures/textspotting_data_sample.py b/spaces/Mountchicken/MAERec-Gradio/mmocr/structures/textspotting_data_sample.py deleted file mode 100644 index 28478f516f96651d2e49c180cea4a97336fc5c97..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/mmocr/structures/textspotting_data_sample.py +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmocr.structures import TextDetDataSample - - -class TextSpottingDataSample(TextDetDataSample): - """A data structure interface of MMOCR. They are used as interfaces between - different components. - - The attributes in ``TextSpottingDataSample`` are divided into two parts: - - - ``gt_instances``(InstanceData): Ground truth of instance annotations. - - ``pred_instances``(InstanceData): Instances of model predictions. - - Examples: - >>> import torch - >>> import numpy as np - >>> from mmengine.structures import InstanceData - >>> from mmocr.data import TextSpottingDataSample - >>> # gt_instances - >>> data_sample = TextSpottingDataSample() - >>> img_meta = dict(img_shape=(800, 1196, 3), - ... pad_shape=(800, 1216, 3)) - >>> gt_instances = InstanceData(metainfo=img_meta) - >>> gt_instances.bboxes = torch.rand((5, 4)) - >>> gt_instances.labels = torch.rand((5,)) - >>> data_sample.gt_instances = gt_instances - >>> assert 'img_shape' in data_sample.gt_instances.metainfo_keys() - >>> len(data_sample.gt_instances) - 5 - >>> print(data_sample) - - ) at 0x7f21fb1b9880> - >>> # pred_instances - >>> pred_instances = InstanceData(metainfo=img_meta) - >>> pred_instances.bboxes = torch.rand((5, 4)) - >>> pred_instances.scores = torch.rand((5,)) - >>> data_sample = TextSpottingDataSample( - ... pred_instances=pred_instances) - >>> assert 'pred_instances' in data_sample - >>> data_sample = TextSpottingDataSample() - >>> gt_instances_data = dict( - ... bboxes=torch.rand(2, 4), - ... labels=torch.rand(2), - ... masks=np.random.rand(2, 2, 2)) - >>> gt_instances = InstanceData(**gt_instances_data) - >>> data_sample.gt_instances = gt_instances - >>> assert 'gt_instances' in data_sample - >>> assert 'masks' in data_sample.gt_instances - """ diff --git a/spaces/NAACL2022/CLIP-Caption-Reward/scripts/clip_prepro_feats.py b/spaces/NAACL2022/CLIP-Caption-Reward/scripts/clip_prepro_feats.py deleted file mode 100644 index b7a45c829fa5c19e36509170135835c6d6bc8d67..0000000000000000000000000000000000000000 --- a/spaces/NAACL2022/CLIP-Caption-Reward/scripts/clip_prepro_feats.py +++ /dev/null @@ -1,170 +0,0 @@ -""" -Preprocess a raw json dataset into features files for use in data_loader.py - -Input: json file that has the form -[{ file_path: 'path/img.jpg', captions: ['a caption', ...] }, ...] -example element in this list would look like -{'captions': [u'A man with a red helmet on a small moped on a dirt road. ', u'Man riding a motor bike on a dirt road on the countryside.', u'A man riding on the back of a motorcycle.', u'A dirt path with a young person on a motor bike rests to the foreground of a verdant area with a bridge and a background of cloud-wreathed mountains. ', u'A man in a red shirt and a red hat is on a motorcycle on a hill side.'], 'file_path': u'val2014/COCO_val2014_000000391895.jpg', 'id': 391895} - -This script reads this json, does some basic preprocessing on the captions -(e.g. lowercase, etc.), creates a special UNK token, and encodes everything to arrays - -Output: two folders of features -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -import json -import argparse -from random import shuffle, seed -import string -# non-standard dependencies: -import h5py -from six.moves import cPickle -import numpy as np -import torch -import torchvision.models as models -import skimage.io - -from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize -from PIL import Image -from torch import nn - -preprocess = Compose([ - Resize((448, 448), interpolation=Image.BICUBIC), - CenterCrop((448, 448)), - ToTensor() -]) - - -from clip.clip import load -from timm.models.vision_transformer import resize_pos_embed -import timm - -from captioning.utils.resnet_utils import myResnet -import captioning.utils.resnet as resnet - -from tqdm import tqdm - - -def main(params): - if params["model_type"] != 'vit_base_patch32_224_in21k': - model, transform = load(params["model_type"], jit=False) - else: - model = timm.create_model(params["model_type"], pretrained=True) - model = model.cuda() - - if params["model_type"] != 'vit_base_patch32_224_in21k': - save_model_type = params["model_type"].split("-")[0] - mean = torch.Tensor([0.48145466, 0.4578275, 0.40821073]).to("cuda").reshape(3, 1, 1) - std = torch.Tensor([0.26862954, 0.26130258, 0.27577711]).to("cuda").reshape(3, 1, 1) - - if "RN" in params["model_type"]: - num_patches = 196 #600 * 1000 // 32 // 32 - pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, model.visual.attnpool.positional_embedding.shape[-1], device='cuda'),) - pos_embed.weight = resize_pos_embed(model.visual.attnpool.positional_embedding.unsqueeze(0), pos_embed) - model.visual.attnpool.positional_embedding = pos_embed - - else: - save_model_type = 'vit_base' - mean = torch.Tensor([0.5, 0.5, 0.5]).to("cuda").reshape(3, 1, 1) - std = torch.Tensor([0.5, 0.5, 0.5]).to("cuda").reshape(3, 1, 1) - - num_patches = 196 #600 * 1000 // 32 // 32 - pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, 768, device='cuda'),) - pos_embed.weight = resize_pos_embed(model.pos_embed, pos_embed) - model.pos_embed = pos_embed - - if params["model_type"] == "ViT-B/32": - num_patches = 196 #600 * 1000 // 32 // 32 - pos_embed = nn.Parameter(torch.zeros(num_patches + 1, 768, device='cuda'),) - pos_embed.weight = resize_pos_embed(model.visual.positional_embedding.unsqueeze(0), pos_embed.unsqueeze(0)) - model.visual.positional_embedding = pos_embed - imgs = json.load(open(params['input_json'], 'r')) - - imgs = imgs['images'] - - if args.n_jobs > 1: - print('Total imgs:', len(imgs)) - print('Using {} jobs'.format(args.n_jobs)) - print('job id:', args.job_id) - imgs = imgs[args.job_id::args.n_jobs] - - N = len(imgs) - - seed(123) # make reproducible - - dir_fc = params['output_dir']+'_clip_'+save_model_type+'_fc' - dir_att = params['output_dir']+'_clip_'+save_model_type+'_att' - if not os.path.isdir(dir_fc): - os.mkdir(dir_fc) - if not os.path.isdir(dir_att): - os.mkdir(dir_att) - - for i,img in enumerate(tqdm(imgs)): - # load the image - with torch.no_grad(): - - image = preprocess(Image.open(os.path.join(params['images_root'], img['filepath'], img['filename']) ).convert("RGB")) - image = torch.tensor(np.stack([image])).cuda() - image -= mean - image /= std - if "RN" in params["model_type"]: - tmp_att, tmp_fc = model.encode_image(image) - tmp_att = tmp_att[0].permute(1, 2, 0) - tmp_fc = tmp_fc[0] - elif params["model_type"] == 'vit_base_patch32_224_in21k': - x = model(image) - tmp_fc = x[0, 0, :] - tmp_att = x[0, 1:, :].reshape( 14, 14, 768 ) - else: - x = model.visual.conv1(image.half()) # shape = [*, width, grid, grid] - x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2] - x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width] - x = torch.cat([model.visual.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width] - x = x + model.visual.positional_embedding.to(x.dtype)[:x.shape[1], :] - x = model.visual.ln_pre(x) - - x = x.permute(1, 0, 2) # NLD -> LND - - for layer_idx, layer in enumerate(model.visual.transformer.resblocks): - x = layer(x) - - x = x.permute(1, 0, 2) - tmp_fc = x[0, 0, :] - tmp_att = x[0, 1:, :].reshape( 14, 14, 768 ) - - np.save(os.path.join(dir_fc, str(img['cocoid'])), tmp_fc.data.cpu().float().numpy()) - np.savez_compressed(os.path.join(dir_att, str(img['cocoid'])), feat=tmp_att.data.cpu().float().numpy()) - - - # if i % 1000 == 0: - # print('processing %d/%d (%.2f%% done)' % (i, N, i*100.0/N)) - print('wrote ', dir_fc, dir_att) - -if __name__ == "__main__": - - parser = argparse.ArgumentParser() - - # input json - parser.add_argument('--input_json', required=True, help='input json file to process into hdf5') - parser.add_argument('--output_dir', default='data', help='output h5 file') - - # options - parser.add_argument('--images_root', default='', help='root location in which images are stored, to be prepended to file_path in input json') - parser.add_argument('--att_size', default=14, type=int, help='14x14 or 7x7') - parser.add_argument('--model_type', default='RN50', type=str, help='RN50, RN101, RN50x4, ViT-B/32, vit_base_patch32_224_in21k') - - parser.add_argument('--n_jobs', default=-1, type=int, help='number of jobs to run in parallel') - parser.add_argument('--job_id', default=0, type=int, help='job id') - parser.add_argument('--batch_size', default=1, type=int, help='batch size') - - - args = parser.parse_args() - params = vars(args) # convert to ordinary dict - print('parsed input parameters:') - print(json.dumps(params, indent = 2)) - main(params) diff --git a/spaces/NATSpeech/PortaSpeech/modules/commons/conformer/espnet_transformer_attn.py b/spaces/NATSpeech/PortaSpeech/modules/commons/conformer/espnet_transformer_attn.py deleted file mode 100644 index a479a27ea6fd4202359da435234408ba074f7577..0000000000000000000000000000000000000000 --- a/spaces/NATSpeech/PortaSpeech/modules/commons/conformer/espnet_transformer_attn.py +++ /dev/null @@ -1,186 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -# Copyright 2019 Shigeki Karita -# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) - -"""Multi-Head Attention layer definition.""" - -import math - -import numpy -import torch -from torch import nn - - -class MultiHeadedAttention(nn.Module): - """Multi-Head Attention layer. - Args: - n_head (int): The number of heads. - n_feat (int): The number of features. - dropout_rate (float): Dropout rate. - """ - - def __init__(self, n_head, n_feat, dropout_rate): - """Construct an MultiHeadedAttention object.""" - super(MultiHeadedAttention, self).__init__() - assert n_feat % n_head == 0 - # We assume d_v always equals d_k - self.d_k = n_feat // n_head - self.h = n_head - self.linear_q = nn.Linear(n_feat, n_feat) - self.linear_k = nn.Linear(n_feat, n_feat) - self.linear_v = nn.Linear(n_feat, n_feat) - self.linear_out = nn.Linear(n_feat, n_feat) - self.attn = None - self.dropout = nn.Dropout(p=dropout_rate) - - def forward_qkv(self, query, key, value): - """Transform query, key and value. - Args: - query (torch.Tensor): Query tensor (#batch, time1, size). - key (torch.Tensor): Key tensor (#batch, time2, size). - value (torch.Tensor): Value tensor (#batch, time2, size). - Returns: - torch.Tensor: Transformed query tensor (#batch, n_head, time1, d_k). - torch.Tensor: Transformed key tensor (#batch, n_head, time2, d_k). - torch.Tensor: Transformed value tensor (#batch, n_head, time2, d_k). - """ - n_batch = query.size(0) - q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k) - k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k) - v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k) - q = q.transpose(1, 2) # (batch, head, time1, d_k) - k = k.transpose(1, 2) # (batch, head, time2, d_k) - v = v.transpose(1, 2) # (batch, head, time2, d_k) - - return q, k, v - - def forward_attention(self, value, scores, mask): - """Compute attention context vector. - Args: - value (torch.Tensor): Transformed value (#batch, n_head, time2, d_k). - scores (torch.Tensor): Attention score (#batch, n_head, time1, time2). - mask (torch.Tensor): Mask (#batch, 1, time2) or (#batch, time1, time2). - Returns: - torch.Tensor: Transformed value (#batch, time1, d_model) - weighted by the attention score (#batch, time1, time2). - """ - n_batch = value.size(0) - if mask is not None: - mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2) - min_value = float( - numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min - ) - scores = scores.masked_fill(mask, min_value) - self.attn = torch.softmax(scores, dim=-1).masked_fill( - mask, 0.0 - ) # (batch, head, time1, time2) - else: - self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2) - - p_attn = self.dropout(self.attn) - x = torch.matmul(p_attn, value) # (batch, head, time1, d_k) - x = ( - x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k) - ) # (batch, time1, d_model) - - return self.linear_out(x) # (batch, time1, d_model) - - def forward(self, query, key, value, mask): - """Compute scaled dot product attention. - Args: - query (torch.Tensor): Query tensor (#batch, time1, size). - key (torch.Tensor): Key tensor (#batch, time2, size). - value (torch.Tensor): Value tensor (#batch, time2, size). - mask (torch.Tensor): Mask tensor (#batch, 1, time2) or - (#batch, time1, time2). - Returns: - torch.Tensor: Output tensor (#batch, time1, d_model). - """ - q, k, v = self.forward_qkv(query, key, value) - scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k) - return self.forward_attention(v, scores, mask) - - -class RelPositionMultiHeadedAttention(MultiHeadedAttention): - """Multi-Head Attention layer with relative position encoding. - Paper: https://arxiv.org/abs/1901.02860 - Args: - n_head (int): The number of heads. - n_feat (int): The number of features. - dropout_rate (float): Dropout rate. - """ - - def __init__(self, n_head, n_feat, dropout_rate): - """Construct an RelPositionMultiHeadedAttention object.""" - super().__init__(n_head, n_feat, dropout_rate) - # linear transformation for positional ecoding - self.linear_pos = nn.Linear(n_feat, n_feat, bias=False) - # these two learnable bias are used in matrix c and matrix d - # as described in https://arxiv.org/abs/1901.02860 Section 3.3 - self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k)) - self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k)) - torch.nn.init.xavier_uniform_(self.pos_bias_u) - torch.nn.init.xavier_uniform_(self.pos_bias_v) - - def rel_shift(self, x, zero_triu=False): - """Compute relative positinal encoding. - Args: - x (torch.Tensor): Input tensor (batch, time, size). - zero_triu (bool): If true, return the lower triangular part of the matrix. - Returns: - torch.Tensor: Output tensor. - """ - zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype) - x_padded = torch.cat([zero_pad, x], dim=-1) - - x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2)) - x = x_padded[:, :, 1:].view_as(x) - - if zero_triu: - ones = torch.ones((x.size(2), x.size(3))) - x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :] - - return x - - def forward(self, query, key, value, pos_emb, mask): - """Compute 'Scaled Dot Product Attention' with rel. positional encoding. - Args: - query (torch.Tensor): Query tensor (#batch, time1, size). - key (torch.Tensor): Key tensor (#batch, time2, size). - value (torch.Tensor): Value tensor (#batch, time2, size). - pos_emb (torch.Tensor): Positional embedding tensor (#batch, time2, size). - mask (torch.Tensor): Mask tensor (#batch, 1, time2) or - (#batch, time1, time2). - Returns: - torch.Tensor: Output tensor (#batch, time1, d_model). - """ - q, k, v = self.forward_qkv(query, key, value) - q = q.transpose(1, 2) # (batch, time1, head, d_k) - - n_batch_pos = pos_emb.size(0) - p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k) - p = p.transpose(1, 2) # (batch, head, time1, d_k) - - # (batch, head, time1, d_k) - q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2) - # (batch, head, time1, d_k) - q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2) - - # compute attention score - # first compute matrix a and matrix c - # as described in https://arxiv.org/abs/1901.02860 Section 3.3 - # (batch, head, time1, time2) - matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1)) - - # compute matrix b and matrix d - # (batch, head, time1, time2) - matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1)) - matrix_bd = self.rel_shift(matrix_bd) - - scores = (matrix_ac + matrix_bd) / math.sqrt( - self.d_k - ) # (batch, head, time1, time2) - - return self.forward_attention(v, scores, mask) diff --git a/spaces/NSect/VALL-E-X/examples.py b/spaces/NSect/VALL-E-X/examples.py deleted file mode 100644 index 205210e0d03f1203648c8fc327da713f9db5eb4e..0000000000000000000000000000000000000000 --- a/spaces/NSect/VALL-E-X/examples.py +++ /dev/null @@ -1,24 +0,0 @@ -infer_from_audio_examples = [ - ["This is how this machine has taken my voice.", 'English', 'no-accent', "prompts/en-2.wav", None, "Wow, look at that! That's no ordinary Teddy bear!"], - ["我喜欢抽电子烟,尤其是锐刻五代。", '中文', 'no-accent', "prompts/zh-1.wav", None, "今天我很荣幸,"], - ["私の声を真似するのはそんなに面白いですか?", '日本語', 'no-accent', "prompts/ja-2.ogg", None, "初めまして、朝武よしのです。"], - ["你可以听得出来我有多困。", '中文', 'no-accent', "prompts/en-1.wav", None, ""], - ["この文は、クロスリンガル合成の例です。", '日本語', 'no-accent', "prompts/zh-2.wav", None, ""], - ["Actually, I can't speak English, but this machine helped me do it.", 'English', 'no-accent', "prompts/ja-1.wav", None, ""], -] - -make_npz_prompt_examples = [ - ["Gem-trader", "prompts/en-2.wav", None, "Wow, look at that! That's no ordinary Teddy bear!"], - ["Ding Zhen", "prompts/zh-1.wav", None, "今天我很荣幸,"], - ["Yoshino", "prompts/ja-2.ogg", None, "初めまして、朝武よしのです。"], - ["Sleepy-woman", "prompts/en-1.wav", None, ""], - ["Yae", "prompts/zh-2.wav", None, ""], - ["Cafe", "prompts/ja-1.wav", None, ""], -] - -infer_from_prompt_examples = [ - ["A prompt contains voice, prosody and emotion information of a certain speaker.", "English", "no-accent", "vctk_1", None], - ["This prompt is made with an audio of three seconds.", "English", "no-accent", "librispeech_1", None], - ["This prompt is made with Chinese speech", "English", "no-accent", "seel", None], -] - diff --git a/spaces/NeuML/txtsql/README.md b/spaces/NeuML/txtsql/README.md deleted file mode 100644 index d1aeee6f368624ac9c4a0849ba66ec77250607a0..0000000000000000000000000000000000000000 --- a/spaces/NeuML/txtsql/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: txtsql -emoji: 📃 -colorFrom: blue -colorTo: gray -sdk: streamlit -sdk_version: 1.24.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/denoising_dataset.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/denoising_dataset.py deleted file mode 100644 index bdb62c8d5db9c8755c72db4d0d8083c936f18dc8..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/denoising_dataset.py +++ /dev/null @@ -1,436 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import math - -import numpy as np -import torch - -from . import FairseqDataset, data_utils - - -def collate( - samples, - pad_idx, - eos_idx, - vocab, - left_pad_source=False, - left_pad_target=False, - input_feeding=True, - pad_to_length=None, -): - assert input_feeding - if len(samples) == 0: - return {} - - def merge(key, left_pad, move_eos_to_beginning=False, pad_to_length=None): - return data_utils.collate_tokens( - [s[key] for s in samples], - pad_idx, - eos_idx=None, # use eos_idx of each sample instead of vocab.eos() - left_pad=left_pad, - move_eos_to_beginning=move_eos_to_beginning, - pad_to_length=pad_to_length, - ) - - id = torch.LongTensor([s["id"] for s in samples]) - src_tokens = merge( - "source", - left_pad=left_pad_source, - pad_to_length=pad_to_length["source"] if pad_to_length is not None else None, - ) - # sort by descending source length - src_lengths = torch.LongTensor([s["source"].numel() for s in samples]) - src_lengths, sort_order = src_lengths.sort(descending=True) - id = id.index_select(0, sort_order) - src_tokens = src_tokens.index_select(0, sort_order) - - prev_output_tokens = None - target = None - if samples[0].get("target", None) is not None: - target = merge( - "target", - left_pad=left_pad_target, - pad_to_length=pad_to_length["target"] - if pad_to_length is not None - else None, - ) - target = target.index_select(0, sort_order) - ntokens = sum(len(s["target"]) for s in samples) - - if input_feeding: - # we create a shifted version of targets for feeding the - # previous output token(s) into the next decoder step - prev_output_tokens = merge( - "target", - left_pad=left_pad_target, - move_eos_to_beginning=True, - pad_to_length=pad_to_length["target"] - if pad_to_length is not None - else None, - ) - prev_output_tokens = prev_output_tokens.index_select(0, sort_order) - else: - ntokens = sum(len(s["source"]) for s in samples) - - batch = { - "id": id, - "ntokens": ntokens, - "net_input": { - "src_tokens": src_tokens, - "src_lengths": src_lengths, - }, - "target": target, - "nsentences": samples[0]["source"].size(0), - "sort_order": sort_order, - } - if prev_output_tokens is not None: - batch["net_input"]["prev_output_tokens"] = prev_output_tokens - - return batch - - -class DenoisingDataset(FairseqDataset): - """ - A wrapper around TokenBlockDataset for BART dataset. - - Args: - dataset (TokenBlockDataset): dataset to wrap - sizes (List[int]): sentence lengths - vocab (~fairseq.data.Dictionary): vocabulary - mask_idx (int): dictionary index used for masked token - mask_whole_words: only mask whole words. This should be a byte mask - over vocab indices, indicating whether it is the beginning of a - word. We will extend any mask to encompass the whole word. - shuffle (bool, optional): shuffle the elements before batching. - Default: ``True`` - seed: Seed for random number generator for reproducibility. - args: argparse arguments. - """ - - def __init__( - self, - dataset, - sizes, - vocab, - mask_idx, - mask_whole_words, - shuffle, - seed, - args, - eos=None, - item_transform_func=None, - ): - self.dataset = dataset - - self.sizes = sizes - - self.vocab = vocab - self.shuffle = shuffle - self.seed = seed - self.mask_idx = mask_idx - self.mask_whole_word = mask_whole_words - self.mask_ratio = args.mask - self.random_ratio = args.mask_random - self.insert_ratio = args.insert - self.rotate_ratio = args.rotate - self.permute_sentence_ratio = args.permute_sentences - self.eos = eos if eos is not None else vocab.eos() - self.item_transform_func = item_transform_func - - if args.bpe != "gpt2": - self.full_stop_index = self.vocab.eos() - else: - assert args.bpe == "gpt2" - self.full_stop_index = self.vocab.index("13") - - self.replace_length = args.replace_length - if self.replace_length not in [-1, 0, 1]: - raise ValueError(f"invalid arg: replace_length={self.replace_length}") - if args.mask_length not in ["subword", "word", "span-poisson"]: - raise ValueError(f"invalid arg: mask-length={args.mask_length}") - if args.mask_length == "subword" and args.replace_length not in [0, 1]: - raise ValueError(f"if using subwords, use replace-length=1 or 0") - - self.mask_span_distribution = None - if args.mask_length == "span-poisson": - _lambda = args.poisson_lambda - - lambda_to_the_k = 1 - e_to_the_minus_lambda = math.exp(-_lambda) - k_factorial = 1 - ps = [] - for k in range(0, 128): - ps.append(e_to_the_minus_lambda * lambda_to_the_k / k_factorial) - lambda_to_the_k *= _lambda - k_factorial *= k + 1 - if ps[-1] < 0.0000001: - break - ps = torch.FloatTensor(ps) - self.mask_span_distribution = torch.distributions.Categorical(ps) - - self.epoch = 0 - - @property - def can_reuse_epoch_itr_across_epochs(self): - return True # only the noise changes, not item sizes - - def set_epoch(self, epoch, **unused): - self.epoch = epoch - - def __getitem__(self, index): - with data_utils.numpy_seed(self.seed, self.epoch, index): - tokens = self.dataset[index] - assert tokens[-1] == self.eos - source, target = tokens, tokens.clone() - - if self.permute_sentence_ratio > 0.0: - source = self.permute_sentences(source, self.permute_sentence_ratio) - - if self.mask_ratio > 0: - source = self.add_whole_word_mask(source, self.mask_ratio) - - if self.insert_ratio > 0: - source = self.add_insertion_noise(source, self.insert_ratio) - - if self.rotate_ratio > 0.0 and np.random.random() < self.rotate_ratio: - source = self.add_rolling_noise(source) - # there can additional changes to make: - if self.item_transform_func is not None: - source, target = self.item_transform_func(source, target) - - assert (source >= 0).all() - assert (source[1:-1] >= 1).all() - assert (source <= len(self.vocab)).all() - assert source[0] == self.vocab.bos() - assert source[-1] == self.eos - return { - "id": index, - "source": source, - "target": target, - } - - def __len__(self): - return len(self.dataset) - - def permute_sentences(self, source, p=1.0): - full_stops = source == self.full_stop_index - # Pretend it ends with a full stop so last span is a sentence - full_stops[-2] = 1 - - # Tokens that are full stops, where the previous token is not - sentence_ends = (full_stops[1:] * ~full_stops[:-1]).nonzero(as_tuple=False) + 2 - result = source.clone() - - num_sentences = sentence_ends.size(0) - num_to_permute = math.ceil((num_sentences * 2 * p) / 2.0) - substitutions = torch.randperm(num_sentences)[:num_to_permute] - ordering = torch.arange(0, num_sentences) - ordering[substitutions] = substitutions[torch.randperm(num_to_permute)] - - # Ignore at start - index = 1 - for i in ordering: - sentence = source[(sentence_ends[i - 1] if i > 0 else 1) : sentence_ends[i]] - result[index : index + sentence.size(0)] = sentence - index += sentence.size(0) - return result - - def word_starts(self, source): - if self.mask_whole_word is not None: - is_word_start = self.mask_whole_word.gather(0, source) - else: - is_word_start = torch.ones(source.size()) - is_word_start[0] = 0 - is_word_start[-1] = 0 - return is_word_start - - def add_whole_word_mask(self, source, p): - is_word_start = self.word_starts(source) - num_to_mask = int(math.ceil(is_word_start.float().sum() * p)) - num_inserts = 0 - if num_to_mask == 0: - return source - - if self.mask_span_distribution is not None: - lengths = self.mask_span_distribution.sample(sample_shape=(num_to_mask,)) - - # Make sure we have enough to mask - cum_length = torch.cumsum(lengths, 0) - while cum_length[-1] < num_to_mask: - lengths = torch.cat( - [ - lengths, - self.mask_span_distribution.sample(sample_shape=(num_to_mask,)), - ], - dim=0, - ) - cum_length = torch.cumsum(lengths, 0) - - # Trim to masking budget - i = 0 - while cum_length[i] < num_to_mask: - i += 1 - lengths[i] = num_to_mask - (0 if i == 0 else cum_length[i - 1]) - num_to_mask = i + 1 - lengths = lengths[:num_to_mask] - - # Handle 0-length mask (inserts) separately - lengths = lengths[lengths > 0] - num_inserts = num_to_mask - lengths.size(0) - num_to_mask -= num_inserts - if num_to_mask == 0: - return self.add_insertion_noise(source, num_inserts / source.size(0)) - - assert (lengths > 0).all() - else: - lengths = torch.ones((num_to_mask,)).long() - assert is_word_start[-1] == 0 - word_starts = is_word_start.nonzero(as_tuple=False) - indices = word_starts[ - torch.randperm(word_starts.size(0))[:num_to_mask] - ].squeeze(1) - mask_random = torch.FloatTensor(num_to_mask).uniform_() < self.random_ratio - - source_length = source.size(0) - assert source_length - 1 not in indices - to_keep = torch.ones(source_length, dtype=torch.bool) - is_word_start[ - -1 - ] = 255 # acts as a long length, so spans don't go over the end of doc - if self.replace_length == 0: - to_keep[indices] = 0 - else: - # keep index, but replace it with [MASK] - source[indices] = self.mask_idx - source[indices[mask_random]] = torch.randint( - 1, len(self.vocab), size=(mask_random.sum(),) - ) - - if self.mask_span_distribution is not None: - assert len(lengths.size()) == 1 - assert lengths.size() == indices.size() - lengths -= 1 - while indices.size(0) > 0: - assert lengths.size() == indices.size() - lengths -= is_word_start[indices + 1].long() - uncompleted = lengths >= 0 - indices = indices[uncompleted] + 1 - mask_random = mask_random[uncompleted] - lengths = lengths[uncompleted] - if self.replace_length != -1: - # delete token - to_keep[indices] = 0 - else: - # keep index, but replace it with [MASK] - source[indices] = self.mask_idx - source[indices[mask_random]] = torch.randint( - 1, len(self.vocab), size=(mask_random.sum(),) - ) - else: - # A bit faster when all lengths are 1 - while indices.size(0) > 0: - uncompleted = is_word_start[indices + 1] == 0 - indices = indices[uncompleted] + 1 - mask_random = mask_random[uncompleted] - if self.replace_length != -1: - # delete token - to_keep[indices] = 0 - else: - # keep index, but replace it with [MASK] - source[indices] = self.mask_idx - source[indices[mask_random]] = torch.randint( - 1, len(self.vocab), size=(mask_random.sum(),) - ) - - assert source_length - 1 not in indices - - source = source[to_keep] - - if num_inserts > 0: - source = self.add_insertion_noise(source, num_inserts / source.size(0)) - - return source - - def add_permuted_noise(self, tokens, p): - num_words = len(tokens) - num_to_permute = math.ceil(((num_words * 2) * p) / 2.0) - substitutions = torch.randperm(num_words - 2)[:num_to_permute] + 1 - tokens[substitutions] = tokens[substitutions[torch.randperm(num_to_permute)]] - return tokens - - def add_rolling_noise(self, tokens): - offset = np.random.randint(1, max(1, tokens.size(-1) - 1) + 1) - tokens = torch.cat( - (tokens[0:1], tokens[offset:-1], tokens[1:offset], tokens[-1:]), - dim=0, - ) - return tokens - - def add_insertion_noise(self, tokens, p): - if p == 0.0: - return tokens - - num_tokens = len(tokens) - n = int(math.ceil(num_tokens * p)) - - noise_indices = torch.randperm(num_tokens + n - 2)[:n] + 1 - noise_mask = torch.zeros(size=(num_tokens + n,), dtype=torch.bool) - noise_mask[noise_indices] = 1 - result = torch.LongTensor(n + len(tokens)).fill_(-1) - - num_random = int(math.ceil(n * self.random_ratio)) - result[noise_indices[num_random:]] = self.mask_idx - result[noise_indices[:num_random]] = torch.randint( - low=1, high=len(self.vocab), size=(num_random,) - ) - - result[~noise_mask] = tokens - - assert (result >= 0).all() - return result - - def collater(self, samples, pad_to_length=None): - """Merge a list of samples to form a mini-batch. - Args: - samples (List[dict]): samples to collate - Returns: - dict: a mini-batch of data - """ - return collate( - samples, self.vocab.pad(), self.eos, self.vocab, pad_to_length=pad_to_length - ) - - def num_tokens(self, index): - """Return the number of tokens in a sample. This value is used to - enforce ``--max-tokens`` during batching.""" - return self.sizes[index] - - def size(self, index): - """Return an example's size as a float or tuple. This value is used when - filtering a dataset with ``--max-positions``.""" - return self.sizes[index] - - def ordered_indices(self): - """Return an ordered list of indices. Batches will be constructed based - on this order.""" - if self.shuffle: - indices = np.random.permutation(len(self)) - else: - indices = np.arange(len(self)) - return indices[np.argsort(self.sizes[indices], kind="mergesort")] - - def prefetch(self, indices): - self.src.prefetch(indices) - self.tgt.prefetch(indices) - - @property - def supports_prefetch(self): - return ( - hasattr(self.src, "supports_prefetch") - and self.src.supports_prefetch - and hasattr(self.tgt, "supports_prefetch") - and self.tgt.supports_prefetch - ) diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/tests/test_constraints.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/tests/test_constraints.py deleted file mode 100644 index 1c37f7e1fb26d8ea5349fedd3a60f566d09cf598..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/tests/test_constraints.py +++ /dev/null @@ -1,269 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import sys -import unittest - -import torch -from fairseq.token_generation_constraints import * - - -def tensorize(constraints: List[List[int]]) -> torch.Tensor: - return [torch.tensor(x) for x in constraints] - - -class TestHelperRoutines(unittest.TestCase): - def setUp(self): - self.examples = [ - ([[]], torch.tensor([[0]])), - ([[], []], torch.tensor([[0], [0]])), - ([[torch.tensor([1, 2])], []], torch.tensor([[1, 1, 2, 0], [0, 0, 0, 0]])), - ( - [ - [ - torch.tensor([3, 1, 2]), - torch.tensor([3]), - torch.tensor([4, 5, 6, 7]), - ], - [], - [torch.tensor([1, 8, 9, 10, 1, 4, 11, 12])], - ], - torch.tensor( - [ - [3, 3, 1, 2, 0, 3, 0, 4, 5, 6, 7, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - [1, 1, 8, 9, 10, 1, 4, 11, 12, 0, 0, 0], - ] - ), - ), - ] - - def test_packing(self): - """Ensures the list of lists of tensors gets packed correctly.""" - for batch_constraints, expected_tensor in self.examples: - packed = pack_constraints(batch_constraints) - assert torch.equal(packed, expected_tensor) - - -class TestUnorderedConstraintState(unittest.TestCase): - def setUp(self): - # Tuples of (contraint set, expected printed graph, token counts per node) - self.examples = [ - ( - tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), - "([None].False#6 ([1].True#4 ([2].False#1 [3].True#1) [3].True#1 [4].True#1) ([4].False#2 ([5].True#2 ([6].False#1 [7].True#1))))", - {1: 4, 2: 1, 3: 2, 4: 3, 5: 2, 6: 1, 7: 1}, - ), - ([], "[None].False#0", {}), - (tensorize([[0]]), "([None].False#1 [0].True#1)", {0: 1}), - ( - tensorize([[100000, 1, 2, 3, 4, 5]]), - "([None].False#1 ([100000].False#1 ([1].False#1 ([2].False#1 ([3].False#1 ([4].False#1 [5].True#1))))))", - {100000: 1, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}, - ), - ( - tensorize([[1, 2], [1, 2]]), - "([None].False#2 ([1].False#2 [2].True#2))", - {1: 2, 2: 2}, - ), - ( - tensorize([[1, 2], [3, 4]]), - "([None].False#2 ([1].False#1 [2].True#1) ([3].False#1 [4].True#1))", - {1: 1, 2: 1, 3: 1, 4: 1}, - ), - ] - - self.sequences = [ - ( - self.examples[0][0], - [], - {"bank": 0, "num_completed": 0, "finished": False, "is_root": True}, - ), - ( - self.examples[0][0], - [1, 2], - {"bank": 2, "num_completed": 0, "finished": False, "is_root": False}, - ), - ( - self.examples[0][0], - [1, 2, 94], - {"bank": 1, "num_completed": 1, "finished": False, "is_root": True}, - ), - ( - self.examples[0][0], - [1, 3, 999, 1, 4], - {"bank": 4, "num_completed": 2, "finished": False, "is_root": False}, - ), - ( - self.examples[0][0], - [1, 3, 999, 1, 4, 999], - {"bank": 4, "num_completed": 2, "finished": False, "is_root": True}, - ), - ( - self.examples[0][0], - [4, 5, 6, 8], - {"bank": 2, "num_completed": 1, "finished": False, "is_root": True}, - ), - ( - self.examples[0][0], - # Tricky, because in last three, goes down [1->4] branch, could miss [1] and [4->5] - # [[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]], - [1, 2, 3, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5], - {"bank": 14, "num_completed": 6, "finished": True, "is_root": False}, - ), - ( - self.examples[0][0], - [1, 2, 3, 999, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5, 117], - {"bank": 14, "num_completed": 6, "finished": True, "is_root": True}, - ), - ( - tensorize([[1], [2, 3]]), - # Should not be able to get credit for entering 1 a second time - [1, 1], - {"bank": 1, "num_completed": 1, "finished": False, "is_root": True}, - ), - ( - self.examples[4][0], - [1, 2, 1, 2], - {"bank": 4, "num_completed": 2, "finished": True, "is_root": False}, - ), - ( - self.examples[4][0], - [1, 2, 1, 2, 1], - {"bank": 4, "num_completed": 2, "finished": True, "is_root": True}, - ), - ( - self.examples[5][0], - [1, 2, 3, 4, 5], - {"bank": 4, "num_completed": 2, "finished": True, "is_root": True}, - ), - ] - - def test_graphs(self): - """ - Test whether unordered graph systems are created correctly. - """ - for example in self.examples: - constraints, expected, gold_counts = example - c = ConstraintNode.create(constraints) - assert ( - ConstraintNode.print_graph(c) == expected - ), f"got {ConstraintNode.print_graph(c)}, expected {expected}" - assert ( - c.token_counts() == gold_counts - ), f"{c} got {c.token_counts()} wanted {gold_counts}" - - def test_next_tokens(self): - """ - Tests that the set of next tokens is correct. - """ - for example in self.examples: - constraints, expected, gold_counts = example - root = ConstraintNode.create(constraints) - - root_tokens = set(root.children.keys()) - for sequence in constraints: - state = UnorderedConstraintState(root) - for token in sequence: - all_tokens = root_tokens.union(state.node.children.keys()) - assert ( - all_tokens == state.next_tokens() - ), f"ALL {all_tokens} NEXT {state.next_tokens()}" - state = state.advance(token) - - def test_sequences(self): - for constraints, tokens, expected in self.sequences: - state = UnorderedConstraintState.create(pack_constraints([constraints])[0]) - for token in tokens: - state = state.advance(token) - result = {} - for attr in expected.keys(): - result[attr] = getattr(state, attr) - - assert ( - result == expected - ), f"TEST({tokens}) GOT: {result} WANTED: {expected}" - - -class TestOrderedConstraintState(unittest.TestCase): - def setUp(self): - self.sequences = [ - ( - tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), - [], - {"bank": 0, "num_completed": 0, "finished": False, "is_root": True}, - ), - ( - tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), - [1, 2], - {"bank": 2, "num_completed": 0, "finished": False, "is_root": False}, - ), - ( - tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), - [1, 2, 94], - {"bank": 0, "num_completed": 0, "finished": False, "is_root": True}, - ), - ( - tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), - [1, 3, 999, 1, 4], - {"bank": 0, "num_completed": 0, "finished": False, "is_root": True}, - ), - ( - tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), - [1, 2, 3, 999, 999], - {"bank": 3, "num_completed": 1, "finished": False, "is_root": False}, - ), - ( - tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), - [1, 2, 3, 77, 1, 3, 1], - {"bank": 6, "num_completed": 2, "finished": False, "is_root": False}, - ), - ( - tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), - [1, 2, 3, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5], - {"bank": 14, "num_completed": 6, "finished": True, "is_root": False}, - ), - ( - tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), - [1, 2, 999, 1, 2, 3, 999, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5, 117], - {"bank": 14, "num_completed": 6, "finished": True, "is_root": False}, - ), - ( - tensorize([[1], [2, 3]]), - [1, 1], - {"bank": 1, "num_completed": 1, "finished": False, "is_root": False}, - ), - ( - tensorize([[1, 2], [1, 2]]), - [1, 2, 1, 2], - {"bank": 4, "num_completed": 2, "finished": True, "is_root": False}, - ), - ( - tensorize([[1, 2], [1, 2]]), - [1, 2, 1, 2, 1], - {"bank": 4, "num_completed": 2, "finished": True, "is_root": False}, - ), - ( - tensorize([[1, 2], [3, 4]]), - [1, 2, 3, 4, 5], - {"bank": 4, "num_completed": 2, "finished": True, "is_root": False}, - ), - ] - - def test_sequences(self): - for i, (constraints, tokens, expected) in enumerate(self.sequences): - state = OrderedConstraintState.create(pack_constraints([constraints])[0]) - for token in tokens: - state = state.advance(token) - result = {} - for attr in expected.keys(): - result[attr] = getattr(state, attr) - assert ( - result == expected - ), f"TEST({tokens}) GOT: {result} WANTED: {expected}" - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/tests/test_iterators.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/tests/test_iterators.py deleted file mode 100644 index 7b3dd4848553357e5e8326ed3a31cf5d68ceea94..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/tests/test_iterators.py +++ /dev/null @@ -1,137 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import unittest - -from fairseq.data import iterators - - -class TestIterators(unittest.TestCase): - def test_counting_iterator_index(self, ref=None, itr=None): - # Test the indexing functionality of CountingIterator - if ref is None: - assert itr is None - ref = list(range(10)) - itr = iterators.CountingIterator(ref) - else: - assert len(ref) == 10 - assert itr is not None - - self.assertTrue(itr.has_next()) - self.assertEqual(itr.n, 0) - self.assertEqual(next(itr), ref[0]) - self.assertEqual(itr.n, 1) - self.assertEqual(next(itr), ref[1]) - self.assertEqual(itr.n, 2) - itr.skip(3) - self.assertEqual(itr.n, 5) - self.assertEqual(next(itr), ref[5]) - itr.skip(2) - self.assertEqual(itr.n, 8) - self.assertEqual(list(itr), [ref[8], ref[9]]) - self.assertFalse(itr.has_next()) - - def test_counting_iterator_length_mismatch(self): - ref = list(range(10)) - # When the underlying iterable is longer than the CountingIterator, - # the remaining items in the iterable should be ignored - itr = iterators.CountingIterator(ref, total=8) - self.assertEqual(list(itr), ref[:8]) - # When the underlying iterable is shorter than the CountingIterator, - # raise an IndexError when the underlying iterable is exhausted - itr = iterators.CountingIterator(ref, total=12) - self.assertRaises(IndexError, list, itr) - - def test_counting_iterator_take(self): - # Test the "take" method of CountingIterator - ref = list(range(10)) - itr = iterators.CountingIterator(ref) - itr.take(5) - self.assertEqual(len(itr), len(list(iter(itr)))) - self.assertEqual(len(itr), 5) - - itr = iterators.CountingIterator(ref) - itr.take(5) - self.assertEqual(next(itr), ref[0]) - self.assertEqual(next(itr), ref[1]) - itr.skip(2) - self.assertEqual(next(itr), ref[4]) - self.assertFalse(itr.has_next()) - - def test_grouped_iterator(self): - # test correctness - x = list(range(10)) - itr = iterators.GroupedIterator(x, 1) - self.assertEqual(list(itr), [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]]) - itr = iterators.GroupedIterator(x, 4) - self.assertEqual(list(itr), [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9]]) - itr = iterators.GroupedIterator(x, 5) - self.assertEqual(list(itr), [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]) - - # test the GroupIterator also works correctly as a CountingIterator - x = list(range(30)) - ref = list(iterators.GroupedIterator(x, 3)) - itr = iterators.GroupedIterator(x, 3) - self.test_counting_iterator_index(ref, itr) - - def test_sharded_iterator(self): - # test correctness - x = list(range(10)) - itr = iterators.ShardedIterator(x, num_shards=1, shard_id=0) - self.assertEqual(list(itr), x) - itr = iterators.ShardedIterator(x, num_shards=2, shard_id=0) - self.assertEqual(list(itr), [0, 2, 4, 6, 8]) - itr = iterators.ShardedIterator(x, num_shards=2, shard_id=1) - self.assertEqual(list(itr), [1, 3, 5, 7, 9]) - itr = iterators.ShardedIterator(x, num_shards=3, shard_id=0) - self.assertEqual(list(itr), [0, 3, 6, 9]) - itr = iterators.ShardedIterator(x, num_shards=3, shard_id=1) - self.assertEqual(list(itr), [1, 4, 7, None]) - itr = iterators.ShardedIterator(x, num_shards=3, shard_id=2) - self.assertEqual(list(itr), [2, 5, 8, None]) - - # test CountingIterator functionality - x = list(range(30)) - ref = list(iterators.ShardedIterator(x, num_shards=3, shard_id=0)) - itr = iterators.ShardedIterator(x, num_shards=3, shard_id=0) - self.test_counting_iterator_index(ref, itr) - - def test_counting_iterator_buffered_iterator_take(self): - ref = list(range(10)) - buffered_itr = iterators.BufferedIterator(2, ref) - itr = iterators.CountingIterator(buffered_itr) - itr.take(5) - self.assertEqual(len(itr), len(list(iter(itr)))) - self.assertEqual(len(itr), 5) - - buffered_itr = iterators.BufferedIterator(2, ref) - itr = iterators.CountingIterator(buffered_itr) - itr.take(5) - self.assertEqual(len(buffered_itr), 5) - self.assertEqual(len(list(iter(buffered_itr))), 5) - - buffered_itr = iterators.BufferedIterator(2, ref) - itr = iterators.CountingIterator(buffered_itr) - itr.take(5) - self.assertEqual(next(itr), ref[0]) - self.assertEqual(next(itr), ref[1]) - itr.skip(2) - self.assertEqual(next(itr), ref[4]) - self.assertFalse(itr.has_next()) - self.assertRaises(StopIteration, next, buffered_itr) - - ref = list(range(4, 10)) - buffered_itr = iterators.BufferedIterator(2, ref) - itr = iterators.CountingIterator(buffered_itr, start=4) - itr.take(5) - self.assertEqual(len(itr), 5) - self.assertEqual(len(buffered_itr), 1) - self.assertEqual(next(itr), ref[0]) - self.assertFalse(itr.has_next()) - self.assertRaises(StopIteration, next, buffered_itr) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/symbols.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/symbols.py deleted file mode 100644 index 5f0d70fdad92ba4f554d971710b60f2f9e8d9298..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/symbols.py +++ /dev/null @@ -1,18 +0,0 @@ -""" from https://github.com/keithito/tacotron """ - -''' -Defines the set of symbols used in text input to the model. - -The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. ''' -from . import cmudict - -_pad = '_' -_punctuation = '!\'(),.:;? ' -_special = '-' -_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' - -# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters): -_arpabet = ['@' + s for s in cmudict.valid_symbols] - -# Export all symbols: -symbols = [_pad] + list(_special) + list(_punctuation) + list(_letters) + _arpabet diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/data/transform_eos_lang_pair_dataset.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/data/transform_eos_lang_pair_dataset.py deleted file mode 100644 index e21144a88e0038c2f35711333a40315613004256..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/data/transform_eos_lang_pair_dataset.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -from typing import Optional - -import torch - -from . import FairseqDataset - - -class TransformEosLangPairDataset(FairseqDataset): - """A :class:`~fairseq.data.FairseqDataset` wrapper that transform bos on - collated samples of language pair dataset. - - Note that the transformation is applied in :func:`collater`. - - Args: - dataset (~fairseq.data.FairseqDataset): dataset that collates sample into - LanguagePairDataset schema - src_eos (int): original source end-of-sentence symbol index to be replaced - new_src_eos (int, optional): new end-of-sentence symbol index to replace source eos symbol - tgt_bos (int, optional): original target beginning-of-sentence symbol index to be replaced - new_tgt_bos (int, optional): new beginning-of-sentence symbol index to replace at the - beginning of 'prev_output_tokens' - """ - - def __init__( - self, - dataset: FairseqDataset, - src_eos: int, - new_src_eos: Optional[int] = None, - tgt_bos: Optional[int] = None, - new_tgt_bos: Optional[int] = None, - ): - self.dataset = dataset - self.src_eos = src_eos - self.new_src_eos = new_src_eos - self.tgt_bos = tgt_bos - self.new_tgt_bos = new_tgt_bos - - def __getitem__(self, index): - return self.dataset[index] - - def __len__(self): - return len(self.dataset) - - def collater(self, samples, **extra_args): - samples = self.dataset.collater(samples, **extra_args) - if len(samples) == 0: - return samples - - if 'net_input' not in samples: - return samples - - if self.new_src_eos is not None: - if self.dataset.left_pad_source: - assert ( - samples["net_input"]["src_tokens"][:, -1] != self.src_eos - ).sum() == 0 - samples["net_input"]["src_tokens"][:, -1] = self.new_src_eos - else: - eos_idx = samples["net_input"]["src_lengths"] - 1 - assert ( - samples["net_input"]["src_tokens"][ - torch.arange(eos_idx.size(0)), eos_idx - ] - != self.src_eos - ).sum() == 0 - eos_idx = eos_idx.resize_(len(samples["net_input"]["src_lengths"]), 1) - samples["net_input"]["src_tokens"].scatter_( - 1, eos_idx, self.new_src_eos - ) - - if ( - self.new_tgt_bos is not None - and "prev_output_tokens" in samples["net_input"] - ): - if self.dataset.left_pad_target: - # TODO: support different padding direction on target side - raise NotImplementedError( - "TransformEosLangPairDataset does not implement --left-pad-target True option" - ) - else: - assert ( - samples["net_input"]["prev_output_tokens"][:, 0] != self.tgt_bos - ).sum() == 0 - samples["net_input"]["prev_output_tokens"][:, 0] = self.new_tgt_bos - - return samples - - def num_tokens(self, index): - return self.dataset.num_tokens(index) - - def size(self, index): - return self.dataset.size(index) - - @property - def sizes(self): - # dataset.sizes can be a dynamically computed sizes: - return self.dataset.sizes - - def ordered_indices(self): - return self.dataset.ordered_indices() - - @property - def supports_prefetch(self): - return getattr(self.dataset, "supports_prefetch", False) - - def prefetch(self, indices): - return self.dataset.prefetch(indices) diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/transformer_lm.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/transformer_lm.py deleted file mode 100644 index eedd5151ba5b1a7050b37639023cf8a158fae8d4..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/transformer_lm.py +++ /dev/null @@ -1,545 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -from dataclasses import dataclass, field -from typing import Optional - -from fairseq import options, utils -from fairseq.dataclass import ChoiceEnum, FairseqDataclass -from fairseq.models import ( - FairseqLanguageModel, - register_model, - register_model_architecture, -) -from fairseq.models.transformer import ( - DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder -) -from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder -from fairseq.utils import safe_getattr, safe_hasattr -from omegaconf import II - - -DEFAULT_MAX_TARGET_POSITIONS = 1024 - - -@dataclass -class TransformerLanguageModelConfig(FairseqDataclass): - activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field( - default="relu", metadata={"help": "activation function to use"} - ) - dropout: float = field(default=0.1, metadata={"help": "dropout probability"}) - attention_dropout: float = field( - default=0.0, metadata={"help": "dropout probability for attention weights"} - ) - activation_dropout: float = field( - default=0.0, metadata={"help": "dropout probability after activation in FFN."} - ) - relu_dropout: float = field( - default=0.0, metadata={"help": "dropout probability after activation in FFN."} - ) - decoder_embed_dim: int = field( - default=512, metadata={"help": "decoder embedding dimension"} - ) - decoder_output_dim: int = field( - default=512, metadata={"help": "decoder output dimension"} - ) - decoder_input_dim: int = field( - default=512, metadata={"help": "decoder input dimension"} - ) - decoder_ffn_embed_dim: int = field( - default=2048, metadata={"help": "decoder embedding dimension for FFN"} - ) - decoder_layers: int = field(default=6, metadata={"help": "num decoder layers"}) - decoder_attention_heads: int = field( - default=8, metadata={"help": "num decoder attention heads"} - ) - decoder_normalize_before: bool = field( - default=False, metadata={"help": "apply layernorm before each decoder block"} - ) - no_decoder_final_norm: bool = field( - default=False, - metadata={"help": "don't add an extra layernorm after the last decoder block"}, - ) - adaptive_softmax_cutoff: Optional[str] = field( - default=None, - metadata={ - "help": "comma separated list of adaptive softmax cutoff points. " - "Must be used with adaptive_loss criterion" - }, - ) - adaptive_softmax_dropout: float = field( - default=0, - metadata={"help": "sets adaptive softmax dropout for the tail projections"}, - ) - adaptive_softmax_factor: float = field( - default=4, metadata={"help": "adaptive input factor"} - ) - no_token_positional_embeddings: bool = field( - default=False, - metadata={ - "help": "if set, disables positional embeddings (outside self attention)" - }, - ) - share_decoder_input_output_embed: bool = field( - default=False, metadata={"help": "share decoder input and output embeddings"} - ) - character_embeddings: bool = field( - default=False, - metadata={ - "help": "if set, uses character embedding convolutions to produce token embeddings" - }, - ) - character_filters: str = field( - default="[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]", - metadata={"help": "size of character embeddings"}, - ) - character_embedding_dim: int = field( - default=4, metadata={"help": "size of character embeddings"} - ) - char_embedder_highway_layers: int = field( - default=2, - metadata={"help": "number of highway layers for character token embeddder"}, - ) - adaptive_input: bool = field( - default=False, metadata={"help": "if set, uses adaptive input"} - ) - adaptive_input_factor: float = field( - default=4, metadata={"help": "adaptive input factor"} - ) - adaptive_input_cutoff: Optional[str] = field( - default=None, - metadata={"help": "comma separated list of adaptive input cutoff points."}, - ) - tie_adaptive_weights: bool = field( - default=False, - metadata={ - "help": "if set, ties the weights of adaptive softmax and adaptive input" - }, - ) - tie_adaptive_proj: bool = field( - default=False, - metadata={ - "help": "if set, ties the projection weights of adaptive softmax and adaptive input" - }, - ) - decoder_learned_pos: bool = field( - default=False, - metadata={"help": "use learned positional embeddings in the decoder"}, - ) - layernorm_embedding: bool = field( - default=False, metadata={"help": "add layernorm to embedding"} - ) - no_scale_embedding: bool = field( - default=False, metadata={"help": "if True, dont scale embeddings"} - ) - checkpoint_activations: bool = field( - default=False, metadata={"help": "checkpoint activations at each layer"} - ) - offload_activations: bool = field( - default=False, - metadata={"help": "move checkpointed activations to CPU after they are used."}, - ) - # config for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019) - decoder_layerdrop: float = field( - default=0.0, metadata={"help": "LayerDrop probability for decoder"} - ) - decoder_layers_to_keep: Optional[str] = field( - default=None, - metadata={ - "help": "which layers to *keep* when pruning as a comma-separated list" - }, - ) - # config for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020) - quant_noise_pq: float = field( - default=0.0, - metadata={"help": "iterative PQ quantization noise at training time"}, - ) - quant_noise_pq_block_size: int = field( - default=8, - metadata={"help": "block size of quantization noise at training time"}, - ) - quant_noise_scalar: float = field( - default=0.0, - metadata={ - "help": "scalar quantization noise and scalar quantization at training time" - }, - ) - # config for Fully Sharded Data Parallel (FSDP) training - min_params_to_wrap: int = field( - default=DEFAULT_MIN_PARAMS_TO_WRAP, - metadata={ - "help": ( - "minimum number of params for a layer to be wrapped with FSDP() when " - "training with --ddp-backend=fully_sharded. Smaller values will " - "improve memory efficiency, but may make torch.distributed " - "communication less efficient due to smaller input sizes. This option " - "is set to 0 (i.e., always wrap) when --checkpoint-activations or " - "--offload-activations are passed." - ) - } - ) - # config for "BASE Layers: Simplifying Training of Large, Sparse Models" - base_layers: Optional[int] = field( - default=0, metadata={"help": "number of BASE layers in total"} - ) - base_sublayers: Optional[int] = field( - default=1, metadata={"help": "number of sublayers in each BASE layer"} - ) - base_shuffle: Optional[int] = field( - default=1, metadata={"help": "shuffle tokens between workers before computing assignment"} - ) - # options from other parts of the config - add_bos_token: bool = II("task.add_bos_token") - tokens_per_sample: int = II("task.tokens_per_sample") - max_target_positions: Optional[int] = II("task.max_target_positions") - tpu: bool = II("common.tpu") - - -@register_model("transformer_lm", dataclass=TransformerLanguageModelConfig) -class TransformerLanguageModel(FairseqLanguageModel): - @classmethod - def hub_models(cls): - def moses_fastbpe(path): - return {"path": path, "tokenizer": "moses", "bpe": "fastbpe"} - - def spm(path): - return {"path": path, "tokenizer": "space", "bpe": "sentencepiece"} - - return { - "transformer_lm.gbw.adaptive_huge": "https://dl.fbaipublicfiles.com/fairseq/models/lm/adaptive_lm_gbw_huge.tar.bz2", - "transformer_lm.wiki103.adaptive": "https://dl.fbaipublicfiles.com/fairseq/models/lm/adaptive_lm_wiki103.v2.tar.bz2", - "transformer_lm.wmt19.en": moses_fastbpe( - "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.en.tar.bz2" - ), - "transformer_lm.wmt19.de": moses_fastbpe( - "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.de.tar.bz2" - ), - "transformer_lm.wmt19.ru": moses_fastbpe( - "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.ru.tar.bz2" - ), - "transformer_lm.wmt20.en": spm( - "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt20.en.tar.gz" - ), - "transformer_lm.wmt20.ta": spm( - "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt20.ta.tar.gz" - ), - "transformer_lm.wmt20.iu.news": spm( - "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt20.iu.news.tar.gz" - ), - "transformer_lm.wmt20.iu.nh": spm( - "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt20.iu.nh.tar.gz" - ), - } - - def __init__(self, decoder): - super().__init__(decoder) - - @classmethod - def build_model(cls, args, task): - """Build a new model instance.""" - - if args.decoder_layers_to_keep: - args.decoder_layers = len(args.decoder_layers_to_keep.split(",")) - - if safe_getattr(args, "max_target_positions", None) is None: - args.max_target_positions = safe_getattr( - args, "tokens_per_sample", DEFAULT_MAX_TARGET_POSITIONS - ) - - if args.character_embeddings: - embed_tokens = CharacterTokenEmbedder( - task.source_dictionary, - eval(args.character_filters), - args.character_embedding_dim, - args.decoder_embed_dim, - args.char_embedder_highway_layers, - ) - elif args.adaptive_input: - embed_tokens = AdaptiveInput( - len(task.source_dictionary), - task.source_dictionary.pad(), - args.decoder_input_dim, - args.adaptive_input_factor, - args.decoder_embed_dim, - options.eval_str_list(args.adaptive_input_cutoff, type=int), - args.quant_noise_pq, - args.quant_noise_pq_block_size, - ) - else: - embed_tokens = cls.build_embedding( - args, task.source_dictionary, args.decoder_input_dim - ) - - if args.tie_adaptive_weights: - assert args.adaptive_input - assert args.adaptive_input_factor == args.adaptive_softmax_factor - assert ( - args.adaptive_softmax_cutoff == args.adaptive_input_cutoff - ), "{} != {}".format( - args.adaptive_softmax_cutoff, args.adaptive_input_cutoff - ) - assert args.decoder_input_dim == args.decoder_output_dim - - decoder = TransformerDecoder( - args, task.target_dictionary, embed_tokens, no_encoder_attn=True - ) - return cls(decoder) - - @classmethod - def build_embedding(cls, args, dictionary, embed_dim, path=None): - embed_tokens = Embedding(len(dictionary), embed_dim, dictionary.pad()) - return embed_tokens - - -def base_lm_architecture(args): - # backward compatibility for older model checkpoints - if safe_hasattr(args, "no_tie_adaptive_proj"): - # previous models defined --no-tie-adaptive-proj, so use the existence of - # that option to determine if this is an "old" model checkpoint - args.no_decoder_final_norm = True # old models always set this to True - if args.no_tie_adaptive_proj is False: - args.tie_adaptive_proj = True - if safe_hasattr(args, "decoder_final_norm"): - args.no_decoder_final_norm = not args.decoder_final_norm - - args.dropout = safe_getattr(args, "dropout", 0.1) - args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) - - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 512) - args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 2048) - args.decoder_layers = safe_getattr(args, "decoder_layers", 6) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 8) - args.adaptive_softmax_cutoff = safe_getattr(args, "adaptive_softmax_cutoff", None) - args.adaptive_softmax_dropout = safe_getattr(args, "adaptive_softmax_dropout", 0) - args.adaptive_softmax_factor = safe_getattr(args, "adaptive_softmax_factor", 4) - args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", False) - args.activation_fn = safe_getattr(args, "activation_fn", "relu") - - args.decoder_layerdrop = safe_getattr(args, "decoder_layerdrop", 0) - args.decoder_layers_to_keep = safe_getattr(args, "decoder_layers_to_keep", None) - args.quant_noise_pq = safe_getattr(args, "quant_noise_pq", 0) - args.quant_noise_pq_block_size = safe_getattr(args, "quant_noise_pq_block_size", 8) - args.quant_noise_scalar = safe_getattr(args, "quant_noise_scalar", 0) - - args.base_layers = safe_getattr(args, "base_layers", 0) - args.base_sublayers = safe_getattr(args, "base_sublayers", 1) - args.base_shuffle = safe_getattr(args, "base_shuffle", False) - - args.add_bos_token = safe_getattr(args, "add_bos_token", False) - args.no_token_positional_embeddings = safe_getattr( - args, "no_token_positional_embeddings", False - ) - args.share_decoder_input_output_embed = safe_getattr( - args, "share_decoder_input_output_embed", False - ) - args.character_embeddings = safe_getattr(args, "character_embeddings", False) - - args.decoder_output_dim = safe_getattr( - args, "decoder_output_dim", args.decoder_embed_dim - ) - args.decoder_input_dim = safe_getattr(args, "decoder_input_dim", args.decoder_embed_dim) - - # Model training is not stable without this - args.decoder_normalize_before = True - args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", False) - - args.adaptive_input = safe_getattr(args, "adaptive_input", False) - args.adaptive_input_factor = safe_getattr(args, "adaptive_input_factor", 4) - args.adaptive_input_cutoff = safe_getattr(args, "adaptive_input_cutoff", None) - - args.tie_adaptive_weights = safe_getattr(args, "tie_adaptive_weights", False) - args.tie_adaptive_proj = safe_getattr(args, "tie_adaptive_proj", False) - - args.no_scale_embedding = safe_getattr(args, "no_scale_embedding", False) - args.layernorm_embedding = safe_getattr(args, "layernorm_embedding", False) - args.checkpoint_activations = safe_getattr(args, "checkpoint_activations", False) - args.offload_activations = safe_getattr(args, "offload_activations", False) - if args.offload_activations: - args.checkpoint_activations = True - - -@register_model_architecture("transformer_lm", "transformer_lm_big") -def transformer_lm_big(args): - args.decoder_layers = safe_getattr(args, "decoder_layers", 12) - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1024) - args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 4096) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16) - base_lm_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_wiki103") -@register_model_architecture("transformer_lm", "transformer_lm_baevski_wiki103") -def transformer_lm_baevski_wiki103(args): - args.decoder_layers = safe_getattr(args, "decoder_layers", 16) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 8) - args.dropout = safe_getattr(args, "dropout", 0.3) - args.adaptive_input = safe_getattr(args, "adaptive_input", True) - args.tie_adaptive_weights = safe_getattr(args, "tie_adaptive_weights", True) - args.adaptive_input_cutoff = safe_getattr(args, "adaptive_input_cutoff", "20000,60000") - args.adaptive_softmax_cutoff = safe_getattr( - args, "adaptive_softmax_cutoff", "20000,60000" - ) - args.adaptive_softmax_dropout = safe_getattr(args, "adaptive_softmax_dropout", 0.2) - args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) - args.activation_dropout = safe_getattr(args, "activation_dropout", 0.1) - args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", True) - args.tie_adaptive_proj = safe_getattr(args, "tie_adaptive_proj", True) - transformer_lm_big(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gbw") -@register_model_architecture("transformer_lm", "transformer_lm_baevski_gbw") -def transformer_lm_baevski_gbw(args): - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 512) - args.dropout = safe_getattr(args, "dropout", 0.1) - args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) - args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", True) - transformer_lm_big(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt") -def transformer_lm_gpt(args): - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 768) - args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 3072) - args.decoder_layers = safe_getattr(args, "decoder_layers", 12) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 12) - args.dropout = safe_getattr(args, "dropout", 0.1) - args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) - args.activation_fn = safe_getattr(args, "activation_fn", "gelu") - base_lm_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt2_small") -def transformer_lm_gpt2_small(args): - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1024) - args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 4096) - args.decoder_layers = safe_getattr(args, "decoder_layers", 24) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16) - args.dropout = safe_getattr(args, "dropout", 0.1) - args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) - args.activation_fn = safe_getattr(args, "activation_fn", "gelu") - base_lm_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt2_tiny") -def transformer_lm_gpt2_tiny(args): - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 64) - args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 64) - args.decoder_layers = safe_getattr(args, "decoder_layers", 2) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 1) - args.dropout = safe_getattr(args, "dropout", 0.1) - args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) - args.activation_fn = safe_getattr(args, "activation_fn", "gelu") - base_lm_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt2_medium") -def transformer_lm_gpt2_medium(args): - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1280) - args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 5120) - args.decoder_layers = safe_getattr(args, "decoder_layers", 36) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 20) - args.dropout = safe_getattr(args, "dropout", 0.1) - args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) - args.activation_fn = safe_getattr(args, "activation_fn", "gelu") - base_lm_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt2_big") -def transformer_lm_gpt2_big(args): - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1600) - args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 6400) - args.decoder_layers = safe_getattr(args, "decoder_layers", 48) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 25) - args.dropout = safe_getattr(args, "dropout", 0.1) - args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) - args.activation_fn = safe_getattr(args, "activation_fn", "gelu") - base_lm_architecture(args) - - -def base_gpt3_architecture(args): - args.decoder_input_dim = args.decoder_embed_dim - args.decoder_output_dim = args.decoder_embed_dim - args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", args.decoder_embed_dim * 4) - # GPT-3 used learned positional embeddings, rather than sinusoidal - args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", True) - args.dropout = safe_getattr(args, "dropout", 0.0) - args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) - args.activation_fn = safe_getattr(args, "activation_fn", "gelu") - args.share_decoder_input_output_embed = True - base_lm_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt3_small") -def transformer_lm_gpt3_small(args): - # 125M params - args.decoder_layers = safe_getattr(args, "decoder_layers", 12) - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 768) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 12) - base_gpt3_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt3_medium") -def transformer_lm_gpt3_medium(args): - # 350M params - args.decoder_layers = safe_getattr(args, "decoder_layers", 24) - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1024) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16) - base_gpt3_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt3_large") -def transformer_lm_gpt3_large(args): - # 760M params - args.decoder_layers = safe_getattr(args, "decoder_layers", 24) - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1536) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16) - base_gpt3_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt3_xl") -def transformer_lm_gpt3_xl(args): - # 1.3B params - args.decoder_layers = safe_getattr(args, "decoder_layers", 24) - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 2048) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 32) - base_gpt3_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt3_2_7") -def transformer_lm_gpt3_2_7(args): - # 2.7B params - args.decoder_layers = safe_getattr(args, "decoder_layers", 32) - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 2560) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 32) - base_gpt3_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt3_6_7") -def transformer_lm_gpt3_6_7(args): - # 6.7B params - args.decoder_layers = safe_getattr(args, "decoder_layers", 32) - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 4096) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 32) - base_gpt3_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt3_13") -def transformer_lm_gpt3_13(args): - # 13B params - args.decoder_layers = safe_getattr(args, "decoder_layers", 40) - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 5120) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 40) - base_gpt3_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt3_175") -def transformer_lm_gpt3_175(args): - # 175B params - args.decoder_layers = safe_getattr(args, "decoder_layers", 96) - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 12288) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 96) - base_gpt3_architecture(args) diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/tests/speech_recognition/test_collaters.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/tests/speech_recognition/test_collaters.py deleted file mode 100644 index 6a5029a48faea2426d7a0277655a2c7c08c1d16c..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/tests/speech_recognition/test_collaters.py +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import unittest - -import numpy as np -import torch -from examples.speech_recognition.data.collaters import Seq2SeqCollater - - -class TestSeq2SeqCollator(unittest.TestCase): - def test_collate(self): - - eos_idx = 1 - pad_idx = 0 - collater = Seq2SeqCollater( - feature_index=0, label_index=1, pad_index=pad_idx, eos_index=eos_idx - ) - - # 2 frames in the first sample and 3 frames in the second one - frames1 = np.array([[7, 8], [9, 10]]) - frames2 = np.array([[1, 2], [3, 4], [5, 6]]) - target1 = np.array([4, 2, 3, eos_idx]) - target2 = np.array([3, 2, eos_idx]) - sample1 = {"id": 0, "data": [frames1, target1]} - sample2 = {"id": 1, "data": [frames2, target2]} - batch = collater.collate([sample1, sample2]) - - # collate sort inputs by frame's length before creating the batch - self.assertTensorEqual(batch["id"], torch.tensor([1, 0])) - self.assertEqual(batch["ntokens"], 7) - self.assertTensorEqual( - batch["net_input"]["src_tokens"], - torch.tensor( - [[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [pad_idx, pad_idx]]] - ), - ) - self.assertTensorEqual( - batch["net_input"]["prev_output_tokens"], - torch.tensor([[eos_idx, 3, 2, pad_idx], [eos_idx, 4, 2, 3]]), - ) - self.assertTensorEqual(batch["net_input"]["src_lengths"], torch.tensor([3, 2])) - self.assertTensorEqual( - batch["target"], - torch.tensor([[3, 2, eos_idx, pad_idx], [4, 2, 3, eos_idx]]), - ) - self.assertEqual(batch["nsentences"], 2) - - def assertTensorEqual(self, t1, t2): - self.assertEqual(t1.size(), t2.size(), "size mismatch") - self.assertEqual(t1.ne(t2).long().sum(), 0) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/hubert/simple_kmeans/dump_mfcc_feature.py b/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/hubert/simple_kmeans/dump_mfcc_feature.py deleted file mode 100644 index 70d0016663b7d0b90033f4eb301b527f2c92a3f8..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/hubert/simple_kmeans/dump_mfcc_feature.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -import os -import sys - -import soundfile as sf -import torch -import torchaudio - -from feature_utils import get_path_iterator, dump_feature - -logging.basicConfig( - format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", - datefmt="%Y-%m-%d %H:%M:%S", - level=os.environ.get("LOGLEVEL", "INFO").upper(), - stream=sys.stdout, -) -logger = logging.getLogger("dump_mfcc_feature") - - -class MfccFeatureReader(object): - def __init__(self, sample_rate): - self.sample_rate = sample_rate - - def read_audio(self, path, ref_len=None): - wav, sr = sf.read(path) - assert sr == self.sample_rate, sr - if wav.ndim == 2: - wav = wav.mean(-1) - assert wav.ndim == 1, wav.ndim - if ref_len is not None and abs(ref_len - len(wav)) > 160: - logging.warning(f"ref {ref_len} != read {len(wav)} ({path})") - return wav - - def get_feats(self, path, ref_len=None): - x = self.read_audio(path, ref_len) - with torch.no_grad(): - x = torch.from_numpy(x).float() - x = x.view(1, -1) - - mfccs = torchaudio.compliance.kaldi.mfcc( - waveform=x, - sample_frequency=self.sample_rate, - use_energy=False, - ) # (time, freq) - mfccs = mfccs.transpose(0, 1) # (freq, time) - deltas = torchaudio.functional.compute_deltas(mfccs) - ddeltas = torchaudio.functional.compute_deltas(deltas) - concat = torch.cat([mfccs, deltas, ddeltas], dim=0) - concat = concat.transpose(0, 1).contiguous() # (freq, time) - return concat - - -def main(tsv_dir, split, nshard, rank, feat_dir, sample_rate): - reader = MfccFeatureReader(sample_rate) - generator, num = get_path_iterator(f"{tsv_dir}/{split}.tsv", nshard, rank) - dump_feature(reader, generator, num, split, nshard, rank, feat_dir) - - - -if __name__ == "__main__": - import argparse - - parser = argparse.ArgumentParser() - parser.add_argument("tsv_dir") - parser.add_argument("split") - parser.add_argument("nshard", type=int) - parser.add_argument("rank", type=int) - parser.add_argument("feat_dir") - parser.add_argument("--sample_rate", type=int, default=16000) - args = parser.parse_args() - logger.info(args) - - main(**vars(args)) diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/fast_noisy_channel/README.md b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/fast_noisy_channel/README.md deleted file mode 100644 index f2631a8c34d11bdf7d351c6807b6fe415f5715e1..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/fast_noisy_channel/README.md +++ /dev/null @@ -1,345 +0,0 @@ -# Language Models not just for Pre-training: Fast Online Neural Noisy Channel Modeling - -## Introduction -- [Yee et al. (2019)](https://www.aclweb.org/anthology/D19-1571.pdf) introduce a simple and effective noisy channel modeling approach for neural machine translation. However, the noisy channel online decoding approach introduced in this paper is too slow to be practical. -- To address this, [Bhosale et al. (2020)](http://www.statmt.org/wmt20/pdf/2020.wmt-1.68.pdf) introduces 3 simple approximations to make this approach very fast and practical without much loss in accuracy. -- This README provides intructions on how to run online decoding or generation with the noisy channel modeling approach, including ways to make it very fast without much loss in accuracy. - -## Noisy Channel Modeling - -[Yee et al. (2019)](https://www.aclweb.org/anthology/D19-1571.pdf) applies the Bayes Rule to predict `P(y|x)`, the probability of the target `y` given the source `x`. -```P(y|x) = P(x|y) * P(y) / P(x)``` -- `P(x|y)` predicts the source `x` given the target `y` and is referred to as the **channel model** -- `P(y)` is a **language model** over the target `y` -- `P(x)` is generally not modeled since it is constant for all `y`. - -We use Transformer models to parameterize the direct model `P(y|x)`, the channel model `P(x|y)` and the language model `P(y)`. - -During online decoding with beam search, we generate the top `K2` candidates per beam and score them with the following linear combination of the channel model, the language model as well as the direct model scores. - -```(1 / t) * log(P(y|x) + (1 / s) * ( λ1 * log(P(x|y)) + λ2 * log(P(y) ) )``` -- `t` - Target Prefix Length -- `s` - Source Length -- `λ1` - Channel Model Weight -- `λ2` - Language Model Weight - -The top `beam_size` candidates based on the above combined scores are chosen to continue the beams in beam search. In beam search with a direct model alone, the scores from the direct model `P(y|x)` are used to choose the top candidates in beam search. - -This framework provides a great way to utlize strong target language models trained on large amounts of unlabeled data. Language models can prefer targets unrelated to the source, so we also need a channel model whose role is to ensure that the target preferred by the language model also translates back to the source. - -### Training Translation Models and Language Models - -For training Transformer models in fairseq for machine translation, refer to instructions [here](https://github.com/pytorch/fairseq/tree/main/examples/translation) - -For training Transformer models in fairseq for language modeling, refer to instructions [here](https://github.com/pytorch/fairseq/tree/main/examples/language_model) - -### Generation with Language Model for German-English translation with fairseq - -Here are instructions to generate using a direct model and a target-side language model. - -Note: -- Download and install fairseq as per instructions [here](https://github.com/pytorch/fairseq) -- Preprocess and binarize the dataset as per instructions in section [Test Data Preprocessing](#test-data-preprocessing) - -```sh -binarized_data=data_dir/binarized -direct_model=de_en_seed4.pt -lm_model=en_lm.pt -lm_data=lm_data -wget https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/direct_models/seed4.pt -O ${direct_model} -wget https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/lm_model/transformer_lm.pt -O ${lm_model} -mkdir -p ${lm_data} -wget https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/lm_model/lm_dict/dict.txt -O ${lm_data}/dict.txt - -k2=10 -lenpen=0.16 -lm_wt=0.14 -fairseq-generate ${binarized_data} \ - --user-dir examples/fast_noisy_channel \ - --beam 5 \ - --path ${direct_model} \ - --lm-model ${lm_model} \ - --lm-data ${lm_data} \ - --k2 ${k2} \ - --combine-method lm_only \ - --task noisy_channel_translation \ - --lenpen ${lenpen} \ - --lm-wt ${lm_wt} \ - --gen-subset valid \ - --remove-bpe \ - --fp16 \ - --batch-size 10 -``` -### Noisy Channel Generation for German-English translation with fairseq - -Here are instructions for noisy channel generation with a direct model, channel model and language model as explained in section [Noisy Channel Modeling](#noisy-channel-modeling). - -Note: -- Download and install fairseq as per instructions [here](https://github.com/pytorch/fairseq) -- Preprocess and binarize the dataset as per instructions in section [Test Data Preprocessing](#test-data-preprocessing) - -```sh -binarized_data=data_dir/binarized -direct_model=de_en_seed4.pt -lm_model=en_lm.pt -lm_data=lm_data -ch_model=en_de.big.seed4.pt -wget https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/direct_models/seed4.pt -O ${direct_model} -wget https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/lm_model/transformer_lm.pt -O ${lm_model} -mkdir -p ${lm_data} -wget https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/lm_model/lm_dict/dict.txt -O ${lm_data}/dict.txt -wget https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/big.seed4.pt -O ${ch_model} - -k2=10 -lenpen=0.21 -lm_wt=0.50 -bw_wt=0.30 -fairseq-generate ${binarized_data} \ - --user-dir examples/fast_noisy_channel \ - --beam 5 \ - --path ${direct_model} \ - --lm-model ${lm_model} \ - --lm-data ${lm_data} \ - --channel-model ${ch_model} \ - --k2 ${k2} \ - --combine-method noisy_channel \ - --task noisy_channel_translation \ - --lenpen ${lenpen} \ - --lm-wt ${lm_wt} \ - --ch-wt ${bw_wt} \ - --gen-subset test \ - --remove-bpe \ - --fp16 \ - --batch-size 1 -``` -## Fast Noisy Channel Modeling - -[Bhosale et al. (2020)](http://www.statmt.org/wmt20/pdf/2020.wmt-1.68.pdf) introduces 3 approximations that speed up online noisy channel decoding - -- Smaller channel models (`Tranformer Base` with 1 encoder and decoder layer each vs. `Transformer Big`) - - This involves training a channel model that is possibly smaller and less accurate in terms of BLEU than a channel model of the same size as the direct model. - - Since the role of the channel model is mainly to assign low scores to generations from the language model if they don't translate back to the source, we may not need the most accurate channel model for this purpose. -- Smaller output vocabulary size for the channel model (~30,000 -> ~1000) - - The channel model doesn't need to score the full output vocabulary, it just needs to score the source tokens, which are completely known. - - This is specified using the arguments `--channel-scoring-type src_vocab --top-k-vocab 500` - - This means that the output vocabulary for the channel model will be the source tokens for all examples in the batch and the top-K most frequent tokens in the vocabulary - - This reduces the memory consumption needed to store channel model scores significantly -- Smaller number of candidates (`k2`) scored per beam - - This is specified by reducing the argument `--k2` - - -### Fast Noisy Channel Generation for German-English translation with fairseq - -Here are instructions for **fast** noisy channel generation with a direct model, channel model and language model as explained in section [Fast Noisy Channel Modeling](#fast-noisy-channel-modeling). The main differences are that we use a smaller channel model, reduce `--k2`, set `--channel-scoring-type src_vocab --top-k-vocab 500` and increase the `--batch-size`. - -Note: -- Download and install fairseq as per instructions [here](https://github.com/pytorch/fairseq) -- Preprocess and binarize the dataset as per instructions in section [Test Data Preprocessing](#test-data-preprocessing) - -```sh -binarized_data=data_dir/binarized -direct_model=de_en_seed4.pt -lm_model=en_lm.pt -lm_data=lm_data -small_ch_model=en_de.base_1_1.seed4.pt -wget https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/direct_models/seed4.pt -O ${direct_model} -wget https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/lm_model/transformer_lm.pt -O ${lm_model} -mkdir -p ${lm_data} -wget https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/lm_model/lm_dict/dict.txt -O ${lm_data}/dict.txt -wget https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/base_1_1.seed4.pt -O ${small_ch_model} - -k2=3 -lenpen=0.23 -lm_wt=0.58 -bw_wt=0.26 -fairseq-generate ${binarized_data} \ - --user-dir examples/fast_noisy_channel \ - --beam 5 \ - --path ${direct_model} \ - --lm-model ${lm_model} \ - --lm-data ${lm_data} \ - --channel-model ${small_ch_model} \ - --k2 ${k2} \ - --combine-method noisy_channel \ - --task noisy_channel_translation \ - --lenpen ${lenpen} \ - --lm-wt ${lm_wt} \ - --ch-wt ${bw_wt} \ - --gen-subset test \ - --remove-bpe \ - --fp16 \ - --batch-size 50 \ - --channel-scoring-type src_vocab --top-k-vocab 500 -``` - -## Test Data Preprocessing - -For preprocessing and binarizing the test sets for Romanian-English and German-English translation, we use the following script - - -```sh -FAIRSEQ=/path/to/fairseq -cd $FAIRSEQ -SCRIPTS=$FAIRSEQ/mosesdecoder/scripts -if [ ! -d "${SCRIPTS}" ]; then - echo 'Cloning Moses github repository (for tokenization scripts)...' - git clone https://github.com/moses-smt/mosesdecoder.git -fi -TOKENIZER=$SCRIPTS/tokenizer/tokenizer.perl -NORMALIZE=$SCRIPTS/tokenizer/normalize-punctuation.perl - -s=de -t=en -test=wmt18 - -mkdir -p data_dir - -# Tokenization -if [ $s == "ro" ] ; then - # Note: Get normalise-romanian.py and remove-diacritics.py from - # https://github.com/rsennrich/wmt16-scripts/tree/master/preprocess - sacrebleu -t $test -l $s-$t --echo src | \ - $NORMALIZE -l $s | \ - python normalise-romanian.py | \ - python remove-diacritics.py | \ - $TOKENIZER -l $s -a -q > data_dir/$test.$s-$t.$s -else - sacrebleu -t $test -l $s-$t --echo src | perl $NORMALIZE -l $s | perl $TOKENIZER -threads 8 -a -l $s > data_dir/$test.$s-$t.$s -fi - -sacrebleu -t $test -l $s-$t --echo ref | perl $NORMALIZE -l $t | perl $TOKENIZER -threads 8 -a -l $t > data_dir/$test.$s-$t.$t - - -# Applying BPE -src_bpe_code=/path/to/source/language/bpe/code -tgt_bpe_code=/path/to/target/language/bpe/code -src_dict=/path/to/source/language/dict -tgt_dict=/path/to/target/language/dict - -FASTBPE=$FAIRSEQ/fastBPE -if [ ! -d "${FASTBPE}" ] ; then - git clone https://github.com/glample/fastBPE.git - # Follow compilation instructions at https://github.com/glample/fastBPE - g++ -std=c++11 -pthread -O3 fastBPE/main.cc -IfastBPE -o fast -fi - -${FASTBPE}/fast applybpe data_dir/bpe.$test.$s-$t.$s data_dir/$test.$s-$t.$s ${src_bpe_code} -${FASTBPE}/fast applybpe data_dir/bpe.$test.$s-$t.$s data_dir/$test.$s-$t.$s ${tgt_bpe_code} - -fairseq-preprocess -s $s -t $t \ - --testpref data_dir/bpe.$test.$s-$t \ - --destdir data_dir/binarized \ - --srcdict ${src_dict} \ - --tgtdict ${tgt_dict} -``` - -## Calculating BLEU - -```sh -DETOKENIZER=$SCRIPTS/tokenizer/detokenizer.perl -cat ${generation_output} | grep -P "^H" | sort -V | cut -f 3- | $DETOKENIZER -l $t -q -a | sacrebleu -t $test -l $s-$t -``` - - -## Romanian-English Translation - -The direct and channel models are trained using bitext data (WMT16) combined with backtranslated data (The monolingual data used for backtranslation comes from http://data.statmt.org/rsennrich/wmt16_backtranslations/ (Sennrich et al., 2016c)) - -The backtranslated data is generated using an ensemble of 3 English-Romanian models trained on bitext training data (WMT16) with unrestricted sampling. - -### BPE Codes and Dictionary - -We learn a joint BPE vocabulary of 18K types on the bitext training data which is used for both the source and target. -||Path| -|----------|------| -| BPE Code | [joint_bpe_18k](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/bpe_18k) | -| Dictionary | [dict](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/dict) | - -### Direct Models -For Ro-En with backtranslation, the direct and channel models use a Transformer-Big architecture. - -| Seed | Model | -|----|----| -| 2 | [ro_en_seed2.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/direct_models/seed2.pt) -| 4 | [ro_en_seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/direct_models/seed4.pt) -| 6 | [ro_en_seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/direct_models/seed6.pt) - -### Channel Models -For channel models, we follow the same steps as for the direct models. But backtranslated data is generated in the opposite direction using [this Romanian monolingual data](http://data.statmt.org/rsennrich/wmt16_backtranslations/). -The best lenpen, LM weight and CH weight are obtained by sweeping over the validation set (wmt16/dev) using beam 5. -| Model Size | Lenpen | LM Weight | CH Weight | Seed 2 | Seed 4 | Seed 6 | -|----|----|----|----|----|----|----| -| `big` | 0.84 | 0.64 | 0.56 | [big.seed2.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/channel_models/big.seed2.pt) | [big.seed2.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/channel_models/big.seed2.pt) | [big.seed2.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/channel_models/big.seed2.pt) | -| `base_1_1` | 0.63 | 0.40 | 0.37 | [base_1_1.seed2.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/channel_models/base_1_1.seed2.pt) | [base_1_1.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/channel_models/base_1_1.seed4.pt) | [base_1_1.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/channel_models/base_1_1.seed6.pt) | - -### Language Model -The model is trained on de-duplicated English Newscrawl data from 2007-2018 comprising 186 million sentences or 4.5B words after normalization and tokenization. -| | Path | -|----|----| -| `--lm-model` | [transformer_en_lm](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/lm_model/transformer_lm.pt) | -| `--lm-data` | [lm_data](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/lm_model/lm_dict) - -## German-English Translation - -### BPE Codes and Dictionaries - -| | Path| -|----------|------| -| Source BPE Code | [de_bpe_code_24K](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/de_bpe_code_24K) | -| Target BPE Code | [en_bpe_code_24K](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/en_bpe_code_24K) -| Source Dictionary | [de_dict](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/de_dict) | -| Target Dictionary | [en_dict](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/en_dict) | - -### Direct Models -We train on WMT’19 training data. Following [Ng et al., 2019](http://statmt.org/wmt19/pdf/53/WMT33.pdf), we apply language identification filtering and remove sentences longer than 250 tokens as well as sentence pairs with a source/target length ratio exceeding 1.5. This results in 26.8M sentence pairs. -We use the Transformer-Big architecture for the direct model. - -| Seed | Model | -|:----:|----| -| 4 | [de_en_seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/direct_models/seed4.pt) -| 5 | [de_en_seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/direct_models/seed5.pt) -| 6 | [de_en_seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/direct_models/seed6.pt) - -### Channel Models - -We train on WMT’19 training data. Following [Ng et al., 2019](http://statmt.org/wmt19/pdf/53/WMT33.pdf), we apply language identification filtering and remove sentences longer than 250 tokens as well as sentence pairs with a source/target length ratio exceeding 1.5. This results in 26.8M sentence pairs. - -| Model Size | Seed 4 | Seed 5 | Seed 6 | -|----|----|----|----| -| `big` | [big.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/big.seed4.pt) | [big.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/big.seed5.pt) | [big.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/big.seed6.pt) | -| `big_1_1` | [big_1_1.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/big_1_1.seed4.pt) | [big_1_1.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/big_1_1.seed5.pt) | [big_1_1.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/big_1_1.seed6.pt) | -| `base` | [base.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/base.seed4.pt) | [base.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/base.seed5.pt) | [base.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/base.seed6.pt) | -| `base_1_1` | [base_1_1.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/base_1_1.seed4.pt) | [base_1_1.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/base_1_1.seed5.pt) | [base_1_1.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/base_1_1.seed6.pt) | -| `half` | [half.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/half.seed4.pt) | [half.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/half.seed5.pt) | [half.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/half.seed6.pt) | -| `half_1_1` | [half_1_1.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/half_1_1.seed4.pt) | [half_1_1.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/half_1_1.seed5.pt) | [half_1_1.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/half_1_1.seed6.pt) | -| `quarter` | [quarter.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/quarter.seed4.pt) | [quarter.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/quarter.seed5.pt) | [quarter.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/quarter.seed6.pt) | -| `quarter_1_1` | [quarter_1_1.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/quarter_1_1.seed4.pt) | [quarter_1_1.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/quarter_1_1.seed5.pt) | [quarter_1_1.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/quarter_1_1.seed6.pt) | -| `8th` | [8th.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/8th.seed4.pt) | [8th.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/8th.seed5.pt) | [8th.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/8th.seed6.pt) | -| `8th_1_1` | [8th_1_1.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/8th_1_1.seed4.pt) | [8th_1_1.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/8th_1_1.seed5.pt) | [8th_1_1.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/8th_1_1.seed6.pt) | -| `16th` | [16th.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/16th.seed4.pt) | [16th.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/16th.seed5.pt) | [16th.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/16th.seed6.pt) | -| `16th_1_1` | [16th_1_1.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/16th_1_1.seed4.pt) | [16th_1_1.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/16th_1_1.seed5.pt) | [16th_1_1.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/16th_1_1.seed6.pt) | - -### Language Model -The model is trained on de-duplicated English Newscrawl data from 2007-2018 comprising 186 million sentences or 4.5B words after normalization and tokenization. -| | Path | -|----|----| -| `--lm-model` | [transformer_en_lm](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/lm_model/transformer_lm.pt) | -| `--lm-data` | [lm_data](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/lm_model/lm_dict/) - - -## Citation - -```bibtex -@inproceedings{bhosale2020language, - title={Language Models not just for Pre-training: Fast Online Neural Noisy Channel Modeling}, - author={Shruti Bhosale and Kyra Yee and Sergey Edunov and Michael Auli}, - booktitle={Proceedings of the Fifth Conference on Machine Translation (WMT)}, - year={2020}, -} - -@inproceedings{yee2019simple, - title={Simple and Effective Noisy Channel Modeling for Neural Machine Translation}, - author={Yee, Kyra and Dauphin, Yann and Auli, Michael}, - booktitle={Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)}, - pages={5700--5705}, - year={2019} -} -``` diff --git a/spaces/OFA-Sys/OFA-vqa/tasks/mm_tasks/refcoco.py b/spaces/OFA-Sys/OFA-vqa/tasks/mm_tasks/refcoco.py deleted file mode 100644 index 31f19d1cd882e08104d930407ae0fedb4b847976..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/tasks/mm_tasks/refcoco.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from dataclasses import dataclass, field -import json -import logging -from typing import Optional -from argparse import Namespace - -import torch -from fairseq import metrics -from fairseq.tasks import register_task - -from tasks.ofa_task import OFATask, OFAConfig -from data.mm_data.refcoco_dataset import RefcocoDataset -from data.file_dataset import FileDataset - -logger = logging.getLogger(__name__) - - -@dataclass -class RefcocoConfig(OFAConfig): - # options for reporting BLEU during validation - eval_acc: bool = field( - default=False, metadata={"help": "evaluation with BLEU scores"} - ) - eval_args: Optional[str] = field( - default='{}', - metadata={ - "help": 'generation args for BLUE or CIDEr scoring, e.g., \'{"beam": 4, "lenpen": 0.6}\', as JSON string' - }, - ) - eval_print_samples: bool = field( - default=False, metadata={"help": "print sample generations during validation"} - ) - - max_image_size: int = field( - default=512, metadata={"help": "max image size for normalization"} - ) - scst: bool = field( - default=False, metadata={"help": "Self-critical sequence training"} - ) - scst_args: str = field( - default='{}', - metadata={ - "help": 'generation args for Self-critical sequence training, as JSON string' - }, - ) - - -@register_task("refcoco", dataclass=RefcocoConfig) -class RefcocoTask(OFATask): - def __init__(self, cfg: RefcocoConfig, src_dict, tgt_dict): - super().__init__(cfg, src_dict, tgt_dict) - - def load_dataset(self, split, epoch=1, combine=False, **kwargs): - paths = self.cfg.data.split(',') - assert len(paths) > 0 - - if split == 'train': - file_path = paths[(epoch - 1) % (len(paths) - 1)] - else: - file_path = paths[-1] - dataset = FileDataset(file_path, self.cfg.selected_cols) - - self.datasets[split] = RefcocoDataset( - split, - dataset, - self.bpe, - self.src_dict, - self.tgt_dict, - max_src_length=self.cfg.max_src_length, - max_tgt_length=self.cfg.max_tgt_length, - patch_image_size=self.cfg.patch_image_size, - imagenet_default_mean_and_std=self.cfg.imagenet_default_mean_and_std, - num_bins=self.cfg.num_bins, - max_image_size=self.cfg.max_image_size - ) - - def build_model(self, cfg): - model = super().build_model(cfg) - if self.cfg.eval_acc: - gen_args = json.loads(self.cfg.eval_args) - self.sequence_generator = self.build_generator( - [model], Namespace(**gen_args) - ) - if self.cfg.scst: - scst_args = json.loads(self.cfg.scst_args) - self.scst_generator = self.build_generator( - [model], Namespace(**scst_args) - ) - - return model - - def _calculate_ap_score(self, hyps, refs, thresh=0.5): - interacts = torch.cat( - [torch.where(hyps[:, :2] < refs[:, :2], refs[:, :2], hyps[:, :2]), - torch.where(hyps[:, 2:] < refs[:, 2:], hyps[:, 2:], refs[:, 2:])], - dim=1 - ) - area_predictions = (hyps[:, 2] - hyps[:, 0]) * (hyps[:, 3] - hyps[:, 1]) - area_targets = (refs[:, 2] - refs[:, 0]) * (refs[:, 3] - refs[:, 1]) - interacts_w = interacts[:, 2] - interacts[:, 0] - interacts_h = interacts[:, 3] - interacts[:, 1] - area_interacts = interacts_w * interacts_h - ious = area_interacts / (area_predictions + area_targets - area_interacts + 1e-6) - return ((ious >= thresh) & (interacts_w > 0) & (interacts_h > 0)).float() - - def valid_step(self, sample, model, criterion): - loss, sample_size, logging_output = criterion(model, sample) - - model.eval() - if self.cfg.eval_acc: - hyps, refs = self._inference(self.sequence_generator, sample, model) - hyps = hyps / (self.cfg.num_bins - 1) * self.cfg.max_image_size - refs = refs / (self.cfg.num_bins - 1) * self.cfg.max_image_size - hyps[:, ::2] /= sample['w_resize_ratios'].unsqueeze(1) - hyps[:, 1::2] /= sample['h_resize_ratios'].unsqueeze(1) - refs[:, ::2] /= sample['w_resize_ratios'].unsqueeze(1) - refs[:, 1::2] /= sample['h_resize_ratios'].unsqueeze(1) - - # scores = self._calculate_ap_score(hyps, refs) - scores = self._calculate_ap_score(hyps, sample['region_coords'].float()) - logging_output["_score_sum"] = scores.sum().item() - logging_output["_score_cnt"] = scores.size(0) - - return loss, sample_size, logging_output - - def reduce_metrics(self, logging_outputs, criterion): - super().reduce_metrics(logging_outputs, criterion) - - def sum_logs(key): - import torch - result = sum(log.get(key, 0) for log in logging_outputs) - if torch.is_tensor(result): - result = result.cpu() - return result - - def compute_score(meters): - score = meters["_score_sum"].sum / meters["_score_cnt"].sum - score = score if isinstance(score, float) else score.item() - return round(score, 4) - - if sum_logs("_score_cnt") > 0: - metrics.log_scalar("_score_sum", sum_logs("_score_sum")) - metrics.log_scalar("_score_cnt", sum_logs("_score_cnt")) - metrics.log_derived("score", compute_score) - - def _inference(self, generator, sample, model): - gen_out = self.inference_step(generator, [model], sample) - hyps, refs = [], [] - for i in range(len(gen_out)): - hyps.append(gen_out[i][0]["tokens"][:-1] - len(self.src_dict) + self.cfg.num_bins) - refs.append(sample["target"][i][:-1] - len(self.src_dict) + self.cfg.num_bins) - if self.cfg.eval_print_samples: - logger.info("example hypothesis: ", hyps[0]) - logger.info("example reference: ", refs[0]) - - return torch.stack(hyps, dim=0), torch.stack(refs, dim=0) diff --git a/spaces/OpenGVLab/DragGAN/stylegan2/lpips/pretrained_networks.py b/spaces/OpenGVLab/DragGAN/stylegan2/lpips/pretrained_networks.py deleted file mode 100644 index 077a24419364fdb5ae2f697f73e28615adae75a7..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/DragGAN/stylegan2/lpips/pretrained_networks.py +++ /dev/null @@ -1,181 +0,0 @@ -from collections import namedtuple -import torch -from torchvision import models as tv -from IPython import embed - -class squeezenet(torch.nn.Module): - def __init__(self, requires_grad=False, pretrained=True): - super(squeezenet, self).__init__() - pretrained_features = tv.squeezenet1_1(pretrained=pretrained).features - self.slice1 = torch.nn.Sequential() - self.slice2 = torch.nn.Sequential() - self.slice3 = torch.nn.Sequential() - self.slice4 = torch.nn.Sequential() - self.slice5 = torch.nn.Sequential() - self.slice6 = torch.nn.Sequential() - self.slice7 = torch.nn.Sequential() - self.N_slices = 7 - for x in range(2): - self.slice1.add_module(str(x), pretrained_features[x]) - for x in range(2,5): - self.slice2.add_module(str(x), pretrained_features[x]) - for x in range(5, 8): - self.slice3.add_module(str(x), pretrained_features[x]) - for x in range(8, 10): - self.slice4.add_module(str(x), pretrained_features[x]) - for x in range(10, 11): - self.slice5.add_module(str(x), pretrained_features[x]) - for x in range(11, 12): - self.slice6.add_module(str(x), pretrained_features[x]) - for x in range(12, 13): - self.slice7.add_module(str(x), pretrained_features[x]) - if not requires_grad: - for param in self.parameters(): - param.requires_grad = False - - def forward(self, X): - h = self.slice1(X) - h_relu1 = h - h = self.slice2(h) - h_relu2 = h - h = self.slice3(h) - h_relu3 = h - h = self.slice4(h) - h_relu4 = h - h = self.slice5(h) - h_relu5 = h - h = self.slice6(h) - h_relu6 = h - h = self.slice7(h) - h_relu7 = h - vgg_outputs = namedtuple("SqueezeOutputs", ['relu1','relu2','relu3','relu4','relu5','relu6','relu7']) - out = vgg_outputs(h_relu1,h_relu2,h_relu3,h_relu4,h_relu5,h_relu6,h_relu7) - - return out - - -class alexnet(torch.nn.Module): - def __init__(self, requires_grad=False, pretrained=True): - super(alexnet, self).__init__() - alexnet_pretrained_features = tv.alexnet(pretrained=pretrained).features - self.slice1 = torch.nn.Sequential() - self.slice2 = torch.nn.Sequential() - self.slice3 = torch.nn.Sequential() - self.slice4 = torch.nn.Sequential() - self.slice5 = torch.nn.Sequential() - self.N_slices = 5 - for x in range(2): - self.slice1.add_module(str(x), alexnet_pretrained_features[x]) - for x in range(2, 5): - self.slice2.add_module(str(x), alexnet_pretrained_features[x]) - for x in range(5, 8): - self.slice3.add_module(str(x), alexnet_pretrained_features[x]) - for x in range(8, 10): - self.slice4.add_module(str(x), alexnet_pretrained_features[x]) - for x in range(10, 12): - self.slice5.add_module(str(x), alexnet_pretrained_features[x]) - if not requires_grad: - for param in self.parameters(): - param.requires_grad = False - - def forward(self, X): - h = self.slice1(X) - h_relu1 = h - h = self.slice2(h) - h_relu2 = h - h = self.slice3(h) - h_relu3 = h - h = self.slice4(h) - h_relu4 = h - h = self.slice5(h) - h_relu5 = h - alexnet_outputs = namedtuple("AlexnetOutputs", ['relu1', 'relu2', 'relu3', 'relu4', 'relu5']) - out = alexnet_outputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5) - - return out - -class vgg16(torch.nn.Module): - def __init__(self, requires_grad=False, pretrained=True): - super(vgg16, self).__init__() - vgg_pretrained_features = tv.vgg16(pretrained=pretrained).features - self.slice1 = torch.nn.Sequential() - self.slice2 = torch.nn.Sequential() - self.slice3 = torch.nn.Sequential() - self.slice4 = torch.nn.Sequential() - self.slice5 = torch.nn.Sequential() - self.N_slices = 5 - for x in range(4): - self.slice1.add_module(str(x), vgg_pretrained_features[x]) - for x in range(4, 9): - self.slice2.add_module(str(x), vgg_pretrained_features[x]) - for x in range(9, 16): - self.slice3.add_module(str(x), vgg_pretrained_features[x]) - for x in range(16, 23): - self.slice4.add_module(str(x), vgg_pretrained_features[x]) - for x in range(23, 30): - self.slice5.add_module(str(x), vgg_pretrained_features[x]) - if not requires_grad: - for param in self.parameters(): - param.requires_grad = False - - def forward(self, X): - h = self.slice1(X) - h_relu1_2 = h - h = self.slice2(h) - h_relu2_2 = h - h = self.slice3(h) - h_relu3_3 = h - h = self.slice4(h) - h_relu4_3 = h - h = self.slice5(h) - h_relu5_3 = h - vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3']) - out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3) - - return out - - - -class resnet(torch.nn.Module): - def __init__(self, requires_grad=False, pretrained=True, num=18): - super(resnet, self).__init__() - if(num==18): - self.net = tv.resnet18(pretrained=pretrained) - elif(num==34): - self.net = tv.resnet34(pretrained=pretrained) - elif(num==50): - self.net = tv.resnet50(pretrained=pretrained) - elif(num==101): - self.net = tv.resnet101(pretrained=pretrained) - elif(num==152): - self.net = tv.resnet152(pretrained=pretrained) - self.N_slices = 5 - - self.conv1 = self.net.conv1 - self.bn1 = self.net.bn1 - self.relu = self.net.relu - self.maxpool = self.net.maxpool - self.layer1 = self.net.layer1 - self.layer2 = self.net.layer2 - self.layer3 = self.net.layer3 - self.layer4 = self.net.layer4 - - def forward(self, X): - h = self.conv1(X) - h = self.bn1(h) - h = self.relu(h) - h_relu1 = h - h = self.maxpool(h) - h = self.layer1(h) - h_conv2 = h - h = self.layer2(h) - h_conv3 = h - h = self.layer3(h) - h_conv4 = h - h = self.layer4(h) - h_conv5 = h - - outputs = namedtuple("Outputs", ['relu1','conv2','conv3','conv4','conv5']) - out = outputs(h_relu1, h_conv2, h_conv3, h_conv4, h_conv5) - - return out diff --git a/spaces/OptimalScale/Robin-7b/lmflow/models/text_regression_model.py b/spaces/OptimalScale/Robin-7b/lmflow/models/text_regression_model.py deleted file mode 100644 index 285cbc4f4c515adf742ec4f5bc17a43f8e5e3057..0000000000000000000000000000000000000000 --- a/spaces/OptimalScale/Robin-7b/lmflow/models/text_regression_model.py +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -""" -A model maps "text_only" data to float. -""" - -from lmflow.models.regression_model import RegressionModel -from lmflow.datasets.dataset import Dataset - - -class TextRegressionModel(RegressionModel): - r""" - Initializes a TextRegressionModel instance. - - Parameters - ------------ - - model_args : - Model arguments such as model name, path, revision, etc. - - args : Optional. - Positional arguments. - - kwargs : Optional. - Keyword arguments. - """ - - def __init__( - self, - model_args, - *args, - **kwargs - ): - """ - Initializes a TextRegressionModel instance. - :param model_args: dictionary with model arguments such as model name, path, revision, etc. - """ - self.inference_func = None - - - def register_inference_function(self, inference_func): - """ - Registers a regression function. - """ - self.inference_func = inference_func - - - def inference(self, inputs: Dataset): - """ - Gets regression results of a given dataset. - - :inputs: Dataset object, only accept type "text_only". - """ - if self.inference_func is not None: - return self.inference_func(inputs) - else: - pass diff --git a/spaces/Osborn-bh/ChatGLM3-6B-Osborn/resources/WECHAT.md b/spaces/Osborn-bh/ChatGLM3-6B-Osborn/resources/WECHAT.md deleted file mode 100644 index c9ee867ead5d818a0b4e2ba46103a6454537d143..0000000000000000000000000000000000000000 --- a/spaces/Osborn-bh/ChatGLM3-6B-Osborn/resources/WECHAT.md +++ /dev/null @@ -1,7 +0,0 @@ -
      - - -

      扫码关注公众号,加入「ChatGLM交流群」

      -

      Scan the QR code to follow the official account and join the "ChatGLM Discussion Group"

      -
      - diff --git a/spaces/PKUWilliamYang/VToonify/vtoonify/model/stylegan/readme.md b/spaces/PKUWilliamYang/VToonify/vtoonify/model/stylegan/readme.md deleted file mode 100644 index c0f2bce780fe2d7a9239c944b165eee7bcdeb9cb..0000000000000000000000000000000000000000 --- a/spaces/PKUWilliamYang/VToonify/vtoonify/model/stylegan/readme.md +++ /dev/null @@ -1,7 +0,0 @@ -# StyleGAN 2 in PyTorch - -Implementation of Analyzing and Improving the Image Quality of StyleGAN (https://arxiv.org/abs/1912.04958) in PyTorch - -Fork from [https://github.com/rosinality/stylegan2-pytorch](https://github.com/rosinality/stylegan2-pytorch) - -In VToonify, we modify it to accept z+ latent codes. diff --git a/spaces/PKaushik/humandetect/yolov6/data/data_augment.py b/spaces/PKaushik/humandetect/yolov6/data/data_augment.py deleted file mode 100644 index 0bef2d8777cb0f7c2936718ceaeb41bf19d4a7db..0000000000000000000000000000000000000000 --- a/spaces/PKaushik/humandetect/yolov6/data/data_augment.py +++ /dev/null @@ -1,193 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- -# This code is based on -# https://github.com/ultralytics/yolov5/blob/master/utils/dataloaders.py - -import math -import random - -import cv2 -import numpy as np - - -def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5): - # HSV color-space augmentation - if hgain or sgain or vgain: - r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains - hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV)) - dtype = im.dtype # uint8 - - x = np.arange(0, 256, dtype=r.dtype) - lut_hue = ((x * r[0]) % 180).astype(dtype) - lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) - lut_val = np.clip(x * r[2], 0, 255).astype(dtype) - - im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) - cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed - - -def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleup=True, stride=32): - # Resize and pad image while meeting stride-multiple constraints - shape = im.shape[:2] # current shape [height, width] - if isinstance(new_shape, int): - new_shape = (new_shape, new_shape) - - # Scale ratio (new / old) - r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) - if not scaleup: # only scale down, do not scale up (for better val mAP) - r = min(r, 1.0) - - # Compute padding - new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) - dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding - - if auto: # minimum rectangle - dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding - - dw /= 2 # divide padding into 2 sides - dh /= 2 - - if shape[::-1] != new_unpad: # resize - im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR) - top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) - left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) - im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border - return im, r, (dw, dh) - - -def mixup(im, labels, im2, labels2): - # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf - r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 - im = (im * r + im2 * (1 - r)).astype(np.uint8) - labels = np.concatenate((labels, labels2), 0) - return im, labels - - -def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) - # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio - w1, h1 = box1[2] - box1[0], box1[3] - box1[1] - w2, h2 = box2[2] - box2[0], box2[3] - box2[1] - ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio - return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates - - -def random_affine(img, labels=(), degrees=10, translate=.1, scale=.1, shear=10, - new_shape=(640, 640)): - - n = len(labels) - height, width = new_shape - - M, s = get_transform_matrix(img.shape[:2], (height, width), degrees, scale, shear, translate) - if (M != np.eye(3)).any(): # image changed - img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) - - # Transform label coordinates - if n: - new = np.zeros((n, 4)) - - xy = np.ones((n * 4, 3)) - xy[:, :2] = labels[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 - xy = xy @ M.T # transform - xy = xy[:, :2].reshape(n, 8) # perspective rescale or affine - - # create new boxes - x = xy[:, [0, 2, 4, 6]] - y = xy[:, [1, 3, 5, 7]] - new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T - - # clip - new[:, [0, 2]] = new[:, [0, 2]].clip(0, width) - new[:, [1, 3]] = new[:, [1, 3]].clip(0, height) - - # filter candidates - i = box_candidates(box1=labels[:, 1:5].T * s, box2=new.T, area_thr=0.1) - labels = labels[i] - labels[:, 1:5] = new[i] - - return img, labels - - -def get_transform_matrix(img_shape, new_shape, degrees, scale, shear, translate): - new_height, new_width = new_shape - # Center - C = np.eye(3) - C[0, 2] = -img_shape[1] / 2 # x translation (pixels) - C[1, 2] = -img_shape[0] / 2 # y translation (pixels) - - # Rotation and Scale - R = np.eye(3) - a = random.uniform(-degrees, degrees) - # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations - s = random.uniform(1 - scale, 1 + scale) - # s = 2 ** random.uniform(-scale, scale) - R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) - - # Shear - S = np.eye(3) - S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) - S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) - - # Translation - T = np.eye(3) - T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * new_width # x translation (pixels) - T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * new_height # y transla ion (pixels) - - # Combined rotation matrix - M = T @ S @ R @ C # order of operations (right to left) is IMPORTANT - return M, s - - -def mosaic_augmentation(img_size, imgs, hs, ws, labels, hyp): - - assert len(imgs) == 4, "Mosaic augmentation of current version only supports 4 images." - - labels4 = [] - s = img_size - yc, xc = (int(random.uniform(s//2, 3*s//2)) for _ in range(2)) # mosaic center x, y - for i in range(len(imgs)): - # Load image - img, h, w = imgs[i], hs[i], ws[i] - # place img in img4 - if i == 0: # top left - img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles - x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) - x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) - elif i == 1: # top right - x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc - x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h - elif i == 2: # bottom left - x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) - x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) - elif i == 3: # bottom right - x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) - x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) - - img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] - padw = x1a - x1b - padh = y1a - y1b - - # Labels - labels_per_img = labels[i].copy() - if labels_per_img.size: - boxes = np.copy(labels_per_img[:, 1:]) - boxes[:, 0] = w * (labels_per_img[:, 1] - labels_per_img[:, 3] / 2) + padw # top left x - boxes[:, 1] = h * (labels_per_img[:, 2] - labels_per_img[:, 4] / 2) + padh # top left y - boxes[:, 2] = w * (labels_per_img[:, 1] + labels_per_img[:, 3] / 2) + padw # bottom right x - boxes[:, 3] = h * (labels_per_img[:, 2] + labels_per_img[:, 4] / 2) + padh # bottom right y - labels_per_img[:, 1:] = boxes - - labels4.append(labels_per_img) - - # Concat/clip labels - labels4 = np.concatenate(labels4, 0) - for x in (labels4[:, 1:]): - np.clip(x, 0, 2 * s, out=x) - - # Augment - img4, labels4 = random_affine(img4, labels4, - degrees=hyp['degrees'], - translate=hyp['translate'], - scale=hyp['scale'], - shear=hyp['shear']) - - return img4, labels4 diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/__init__.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/__init__.py deleted file mode 100644 index 3cf93f8bec9cf0cef0a3bd76ca3ca92eb188f535..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -from .backbones import * # noqa: F401,F403 -from .builder import (BACKBONES, HEADS, LOSSES, SEGMENTORS, build_backbone, - build_head, build_loss, build_segmentor) -from .decode_heads import * # noqa: F401,F403 -from .losses import * # noqa: F401,F403 -from .necks import * # noqa: F401,F403 -from .segmentors import * # noqa: F401,F403 - -__all__ = [ - 'BACKBONES', 'HEADS', 'LOSSES', 'SEGMENTORS', 'build_backbone', - 'build_head', 'build_loss', 'build_segmentor' -] diff --git a/spaces/PikeAndVine/resize_color/README.md b/spaces/PikeAndVine/resize_color/README.md deleted file mode 100644 index 37733172b5813e9c2e1775518121ba0223eab76f..0000000000000000000000000000000000000000 --- a/spaces/PikeAndVine/resize_color/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Resize Old -emoji: 🐢 -colorFrom: blue -colorTo: green -sdk: gradio -sdk_version: 3.36.1 -app_file: app.py -pinned: false -duplicated_from: 155elkhorn/resize_color ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/Pranjal2041/SemSup-XC/sample.py b/spaces/Pranjal2041/SemSup-XC/sample.py deleted file mode 100644 index 29d1bf7bd80c1b893738db128c8f7a58b56ea3e0..0000000000000000000000000000000000000000 --- a/spaces/Pranjal2041/SemSup-XC/sample.py +++ /dev/null @@ -1,8 +0,0 @@ -import gradio as gr - -def greet(name): - return "Hello " + name + "!" - -demo = gr.Interface(fn=greet, inputs="text", outputs="text") - -demo.launch(share = True) \ No newline at end of file diff --git a/spaces/ReFenter/DeepDanbooru_string/README.md b/spaces/ReFenter/DeepDanbooru_string/README.md deleted file mode 100644 index 4330b6f969246dc764a34ea254d2e807159f1c55..0000000000000000000000000000000000000000 --- a/spaces/ReFenter/DeepDanbooru_string/README.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: DeepDanbooru String -emoji: 💬 -colorFrom: blue -colorTo: red -sdk: gradio -sdk_version: 3.6 -app_file: app.py -pinned: false -duplicated_from: NoCrypt/DeepDanbooru_string ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio`, `streamlit`, or `static` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/Realcat/image-matching-webui/third_party/ASpanFormer/src/utils/dataset.py b/spaces/Realcat/image-matching-webui/third_party/ASpanFormer/src/utils/dataset.py deleted file mode 100644 index 1881446fd69aedb520ae669100cd2a3c2d143a18..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/ASpanFormer/src/utils/dataset.py +++ /dev/null @@ -1,232 +0,0 @@ -import io -import cv2 -import numpy as np -import h5py -import torch -from numpy.linalg import inv -import re - - -try: - # for internel use only - from .client import MEGADEPTH_CLIENT, SCANNET_CLIENT -except Exception: - MEGADEPTH_CLIENT = SCANNET_CLIENT = None - -# --- DATA IO --- - - -def load_array_from_s3( - path, - client, - cv_type, - use_h5py=False, -): - byte_str = client.Get(path) - try: - if not use_h5py: - raw_array = np.fromstring(byte_str, np.uint8) - data = cv2.imdecode(raw_array, cv_type) - else: - f = io.BytesIO(byte_str) - data = np.array(h5py.File(f, "r")["/depth"]) - except Exception as ex: - print(f"==> Data loading failure: {path}") - raise ex - - assert data is not None - return data - - -def imread_gray(path, augment_fn=None, client=SCANNET_CLIENT): - cv_type = cv2.IMREAD_GRAYSCALE if augment_fn is None else cv2.IMREAD_COLOR - if str(path).startswith("s3://"): - image = load_array_from_s3(str(path), client, cv_type) - else: - image = cv2.imread(str(path), cv_type) - - if augment_fn is not None: - image = cv2.imread(str(path), cv2.IMREAD_COLOR) - image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) - image = augment_fn(image) - image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) - return image # (h, w) - - -def get_resized_wh(w, h, resize=None): - if resize is not None: # resize the longer edge - scale = resize / max(h, w) - w_new, h_new = int(round(w * scale)), int(round(h * scale)) - else: - w_new, h_new = w, h - return w_new, h_new - - -def get_divisible_wh(w, h, df=None): - if df is not None: - w_new, h_new = map(lambda x: int(x // df * df), [w, h]) - else: - w_new, h_new = w, h - return w_new, h_new - - -def pad_bottom_right(inp, pad_size, ret_mask=False): - assert isinstance(pad_size, int) and pad_size >= max( - inp.shape[-2:] - ), f"{pad_size} < {max(inp.shape[-2:])}" - mask = None - if inp.ndim == 2: - padded = np.zeros((pad_size, pad_size), dtype=inp.dtype) - padded[: inp.shape[0], : inp.shape[1]] = inp - if ret_mask: - mask = np.zeros((pad_size, pad_size), dtype=bool) - mask[: inp.shape[0], : inp.shape[1]] = True - elif inp.ndim == 3: - padded = np.zeros((inp.shape[0], pad_size, pad_size), dtype=inp.dtype) - padded[:, : inp.shape[1], : inp.shape[2]] = inp - if ret_mask: - mask = np.zeros((inp.shape[0], pad_size, pad_size), dtype=bool) - mask[:, : inp.shape[1], : inp.shape[2]] = True - else: - raise NotImplementedError() - return padded, mask - - -# --- MEGADEPTH --- - - -def read_megadepth_gray(path, resize=None, df=None, padding=False, augment_fn=None): - """ - Args: - resize (int, optional): the longer edge of resized images. None for no resize. - padding (bool): If set to 'True', zero-pad resized images to squared size. - augment_fn (callable, optional): augments images with pre-defined visual effects - Returns: - image (torch.tensor): (1, h, w) - mask (torch.tensor): (h, w) - scale (torch.tensor): [w/w_new, h/h_new] - """ - # read image - image = imread_gray(path, augment_fn, client=MEGADEPTH_CLIENT) - - # resize image - w, h = image.shape[1], image.shape[0] - w_new, h_new = get_resized_wh(w, h, resize) - w_new, h_new = get_divisible_wh(w_new, h_new, df) - - image = cv2.resize(image, (w_new, h_new)) - scale = torch.tensor([w / w_new, h / h_new], dtype=torch.float) - - if padding: # padding - pad_to = max(h_new, w_new) - image, mask = pad_bottom_right(image, pad_to, ret_mask=True) - else: - mask = None - - image = ( - torch.from_numpy(image).float()[None] / 255 - ) # (h, w) -> (1, h, w) and normalized - if mask is not None: - mask = torch.from_numpy(mask) - - return image, mask, scale - - -def read_megadepth_depth(path, pad_to=None): - if str(path).startswith("s3://"): - depth = load_array_from_s3(path, MEGADEPTH_CLIENT, None, use_h5py=True) - else: - depth = np.array(h5py.File(path, "r")["depth"]) - if pad_to is not None: - depth, _ = pad_bottom_right(depth, pad_to, ret_mask=False) - depth = torch.from_numpy(depth).float() # (h, w) - return depth - - -# --- ScanNet --- - - -def read_scannet_gray(path, resize=(640, 480), augment_fn=None): - """ - Args: - resize (tuple): align image to depthmap, in (w, h). - augment_fn (callable, optional): augments images with pre-defined visual effects - Returns: - image (torch.tensor): (1, h, w) - mask (torch.tensor): (h, w) - scale (torch.tensor): [w/w_new, h/h_new] - """ - # read and resize image - image = imread_gray(path, augment_fn) - image = cv2.resize(image, resize) - - # (h, w) -> (1, h, w) and normalized - image = torch.from_numpy(image).float()[None] / 255 - return image - - -def read_scannet_depth(path): - if str(path).startswith("s3://"): - depth = load_array_from_s3(str(path), SCANNET_CLIENT, cv2.IMREAD_UNCHANGED) - else: - depth = cv2.imread(str(path), cv2.IMREAD_UNCHANGED) - depth = depth / 1000 - depth = torch.from_numpy(depth).float() # (h, w) - return depth - - -def read_scannet_pose(path): - """Read ScanNet's Camera2World pose and transform it to World2Camera. - - Returns: - pose_w2c (np.ndarray): (4, 4) - """ - cam2world = np.loadtxt(path, delimiter=" ") - world2cam = inv(cam2world) - return world2cam - - -def read_scannet_intrinsic(path): - """Read ScanNet's intrinsic matrix and return the 3x3 matrix.""" - intrinsic = np.loadtxt(path, delimiter=" ") - return intrinsic[:-1, :-1] - - -def read_gl3d_gray(path, resize): - img = cv2.resize(cv2.imread(path, cv2.IMREAD_GRAYSCALE), (int(resize), int(resize))) - img = ( - torch.from_numpy(img).float()[None] / 255 - ) # (h, w) -> (1, h, w) and normalized - return img - - -def read_gl3d_depth(file_path): - with open(file_path, "rb") as fin: - color = None - width = None - height = None - scale = None - data_type = None - header = str(fin.readline().decode("UTF-8")).rstrip() - if header == "PF": - color = True - elif header == "Pf": - color = False - else: - raise Exception("Not a PFM file.") - dim_match = re.match(r"^(\d+)\s(\d+)\s$", fin.readline().decode("UTF-8")) - if dim_match: - width, height = map(int, dim_match.groups()) - else: - raise Exception("Malformed PFM header.") - scale = float((fin.readline().decode("UTF-8")).rstrip()) - if scale < 0: # little-endian - data_type = " a * b + c - return _FusedMultiplyAdd.apply(a, b, c) - -#---------------------------------------------------------------------------- - -class _FusedMultiplyAdd(torch.autograd.Function): # a * b + c - @staticmethod - def forward(ctx, a, b, c): # pylint: disable=arguments-differ - out = torch.addcmul(c, a, b) - ctx.save_for_backward(a, b) - ctx.c_shape = c.shape - return out - - @staticmethod - def backward(ctx, dout): # pylint: disable=arguments-differ - a, b = ctx.saved_tensors - c_shape = ctx.c_shape - da = None - db = None - dc = None - - if ctx.needs_input_grad[0]: - da = _unbroadcast(dout * b, a.shape) - - if ctx.needs_input_grad[1]: - db = _unbroadcast(dout * a, b.shape) - - if ctx.needs_input_grad[2]: - dc = _unbroadcast(dout, c_shape) - - return da, db, dc - -#---------------------------------------------------------------------------- - -def _unbroadcast(x, shape): - extra_dims = x.ndim - len(shape) - assert extra_dims >= 0 - dim = [i for i in range(x.ndim) if x.shape[i] > 1 and (i < extra_dims or shape[i - extra_dims] == 1)] - if len(dim): - x = x.sum(dim=dim, keepdim=True) - if extra_dims: - x = x.reshape(-1, *x.shape[extra_dims+1:]) - assert x.shape == shape - return x - -#---------------------------------------------------------------------------- diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/apis/test.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/apis/test.py deleted file mode 100644 index e574eb7da04f09a59cf99ff953c36468ae87a326..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/apis/test.py +++ /dev/null @@ -1,238 +0,0 @@ -import os.path as osp -import pickle -import shutil -import tempfile - -import annotator.uniformer.mmcv as mmcv -import numpy as np -import torch -import torch.distributed as dist -from annotator.uniformer.mmcv.image import tensor2imgs -from annotator.uniformer.mmcv.runner import get_dist_info - - -def np2tmp(array, temp_file_name=None): - """Save ndarray to local numpy file. - - Args: - array (ndarray): Ndarray to save. - temp_file_name (str): Numpy file name. If 'temp_file_name=None', this - function will generate a file name with tempfile.NamedTemporaryFile - to save ndarray. Default: None. - - Returns: - str: The numpy file name. - """ - - if temp_file_name is None: - temp_file_name = tempfile.NamedTemporaryFile( - suffix='.npy', delete=False).name - np.save(temp_file_name, array) - return temp_file_name - - -def single_gpu_test(model, - data_loader, - show=False, - out_dir=None, - efficient_test=False, - opacity=0.5): - """Test with single GPU. - - Args: - model (nn.Module): Model to be tested. - data_loader (utils.data.Dataloader): Pytorch data loader. - show (bool): Whether show results during inference. Default: False. - out_dir (str, optional): If specified, the results will be dumped into - the directory to save output results. - efficient_test (bool): Whether save the results as local numpy files to - save CPU memory during evaluation. Default: False. - opacity(float): Opacity of painted segmentation map. - Default 0.5. - Must be in (0, 1] range. - Returns: - list: The prediction results. - """ - - model.eval() - results = [] - dataset = data_loader.dataset - prog_bar = mmcv.ProgressBar(len(dataset)) - for i, data in enumerate(data_loader): - with torch.no_grad(): - result = model(return_loss=False, **data) - - if show or out_dir: - img_tensor = data['img'][0] - img_metas = data['img_metas'][0].data[0] - imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg']) - assert len(imgs) == len(img_metas) - - for img, img_meta in zip(imgs, img_metas): - h, w, _ = img_meta['img_shape'] - img_show = img[:h, :w, :] - - ori_h, ori_w = img_meta['ori_shape'][:-1] - img_show = mmcv.imresize(img_show, (ori_w, ori_h)) - - if out_dir: - out_file = osp.join(out_dir, img_meta['ori_filename']) - else: - out_file = None - - model.module.show_result( - img_show, - result, - palette=dataset.PALETTE, - show=show, - out_file=out_file, - opacity=opacity) - - if isinstance(result, list): - if efficient_test: - result = [np2tmp(_) for _ in result] - results.extend(result) - else: - if efficient_test: - result = np2tmp(result) - results.append(result) - - batch_size = len(result) - for _ in range(batch_size): - prog_bar.update() - return results - - -def multi_gpu_test(model, - data_loader, - tmpdir=None, - gpu_collect=False, - efficient_test=False): - """Test model with multiple gpus. - - This method tests model with multiple gpus and collects the results - under two different modes: gpu and cpu modes. By setting 'gpu_collect=True' - it encodes results to gpu tensors and use gpu communication for results - collection. On cpu mode it saves the results on different gpus to 'tmpdir' - and collects them by the rank 0 worker. - - Args: - model (nn.Module): Model to be tested. - data_loader (utils.data.Dataloader): Pytorch data loader. - tmpdir (str): Path of directory to save the temporary results from - different gpus under cpu mode. - gpu_collect (bool): Option to use either gpu or cpu to collect results. - efficient_test (bool): Whether save the results as local numpy files to - save CPU memory during evaluation. Default: False. - - Returns: - list: The prediction results. - """ - - model.eval() - results = [] - dataset = data_loader.dataset - rank, world_size = get_dist_info() - if rank == 0: - prog_bar = mmcv.ProgressBar(len(dataset)) - for i, data in enumerate(data_loader): - with torch.no_grad(): - result = model(return_loss=False, rescale=True, **data) - - if isinstance(result, list): - if efficient_test: - result = [np2tmp(_) for _ in result] - results.extend(result) - else: - if efficient_test: - result = np2tmp(result) - results.append(result) - - if rank == 0: - batch_size = data['img'][0].size(0) - for _ in range(batch_size * world_size): - prog_bar.update() - - # collect results from all ranks - if gpu_collect: - results = collect_results_gpu(results, len(dataset)) - else: - results = collect_results_cpu(results, len(dataset), tmpdir) - return results - - -def collect_results_cpu(result_part, size, tmpdir=None): - """Collect results with CPU.""" - rank, world_size = get_dist_info() - # create a tmp dir if it is not specified - if tmpdir is None: - MAX_LEN = 512 - # 32 is whitespace - dir_tensor = torch.full((MAX_LEN, ), - 32, - dtype=torch.uint8, - device='cuda') - if rank == 0: - tmpdir = tempfile.mkdtemp() - tmpdir = torch.tensor( - bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda') - dir_tensor[:len(tmpdir)] = tmpdir - dist.broadcast(dir_tensor, 0) - tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip() - else: - mmcv.mkdir_or_exist(tmpdir) - # dump the part result to the dir - mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank))) - dist.barrier() - # collect all parts - if rank != 0: - return None - else: - # load results of all parts from tmp dir - part_list = [] - for i in range(world_size): - part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i)) - part_list.append(mmcv.load(part_file)) - # sort the results - ordered_results = [] - for res in zip(*part_list): - ordered_results.extend(list(res)) - # the dataloader may pad some samples - ordered_results = ordered_results[:size] - # remove tmp dir - shutil.rmtree(tmpdir) - return ordered_results - - -def collect_results_gpu(result_part, size): - """Collect results with GPU.""" - rank, world_size = get_dist_info() - # dump result part to tensor with pickle - part_tensor = torch.tensor( - bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda') - # gather all result part tensor shape - shape_tensor = torch.tensor(part_tensor.shape, device='cuda') - shape_list = [shape_tensor.clone() for _ in range(world_size)] - dist.all_gather(shape_list, shape_tensor) - # padding result part tensor to max length - shape_max = torch.tensor(shape_list).max() - part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda') - part_send[:shape_tensor[0]] = part_tensor - part_recv_list = [ - part_tensor.new_zeros(shape_max) for _ in range(world_size) - ] - # gather all result part - dist.all_gather(part_recv_list, part_send) - - if rank == 0: - part_list = [] - for recv, shape in zip(part_recv_list, shape_list): - part_list.append( - pickle.loads(recv[:shape[0]].cpu().numpy().tobytes())) - # sort the results - ordered_results = [] - for res in zip(*part_list): - ordered_results.extend(list(res)) - # the dataloader may pad some samples - ordered_results = ordered_results[:size] - return ordered_results diff --git a/spaces/Rongjiehuang/ProDiff/tasks/tts/tts.py b/spaces/Rongjiehuang/ProDiff/tasks/tts/tts.py deleted file mode 100644 index f803c1e738137cb1eca19a1943196abd2884c0a5..0000000000000000000000000000000000000000 --- a/spaces/Rongjiehuang/ProDiff/tasks/tts/tts.py +++ /dev/null @@ -1,131 +0,0 @@ -from multiprocessing.pool import Pool - -import matplotlib - -from utils.pl_utils import data_loader -from utils.training_utils import RSQRTSchedule -from vocoders.base_vocoder import get_vocoder_cls, BaseVocoder -from modules.fastspeech.pe import PitchExtractor - -matplotlib.use('Agg') -import os -import numpy as np -from tqdm import tqdm -import torch.distributed as dist - -from tasks.base_task import BaseTask -from utils.hparams import hparams -from utils.text_encoder import TokenTextEncoder -import json - -import torch -import torch.optim -import torch.utils.data -import utils - - - -class TtsTask(BaseTask): - def __init__(self, *args, **kwargs): - self.vocoder = None - self.phone_encoder = self.build_phone_encoder(hparams['binary_data_dir']) - self.padding_idx = self.phone_encoder.pad() - self.eos_idx = self.phone_encoder.eos() - self.seg_idx = self.phone_encoder.seg() - self.saving_result_pool = None - self.saving_results_futures = None - self.stats = {} - super().__init__(*args, **kwargs) - - def build_scheduler(self, optimizer): - return RSQRTSchedule(optimizer) - - def build_optimizer(self, model): - self.optimizer = optimizer = torch.optim.AdamW( - model.parameters(), - lr=hparams['lr']) - return optimizer - - def build_dataloader(self, dataset, shuffle, max_tokens=None, max_sentences=None, - required_batch_size_multiple=-1, endless=False, batch_by_size=True): - devices_cnt = torch.cuda.device_count() - if devices_cnt == 0: - devices_cnt = 1 - if required_batch_size_multiple == -1: - required_batch_size_multiple = devices_cnt - - def shuffle_batches(batches): - np.random.shuffle(batches) - return batches - - if max_tokens is not None: - max_tokens *= devices_cnt - if max_sentences is not None: - max_sentences *= devices_cnt - indices = dataset.ordered_indices() - if batch_by_size: - batch_sampler = utils.batch_by_size( - indices, dataset.num_tokens, max_tokens=max_tokens, max_sentences=max_sentences, - required_batch_size_multiple=required_batch_size_multiple, - ) - else: - batch_sampler = [] - for i in range(0, len(indices), max_sentences): - batch_sampler.append(indices[i:i + max_sentences]) - - if shuffle: - batches = shuffle_batches(list(batch_sampler)) - if endless: - batches = [b for _ in range(1000) for b in shuffle_batches(list(batch_sampler))] - else: - batches = batch_sampler - if endless: - batches = [b for _ in range(1000) for b in batches] - num_workers = dataset.num_workers - if self.trainer.use_ddp: - num_replicas = dist.get_world_size() - rank = dist.get_rank() - batches = [x[rank::num_replicas] for x in batches if len(x) % num_replicas == 0] - return torch.utils.data.DataLoader(dataset, - collate_fn=dataset.collater, - batch_sampler=batches, - num_workers=num_workers, - pin_memory=False) - - def build_phone_encoder(self, data_dir): - phone_list_file = os.path.join(data_dir, 'phone_set.json') - - phone_list = json.load(open(phone_list_file)) - return TokenTextEncoder(None, vocab_list=phone_list, replace_oov=',') - - def build_optimizer(self, model): - self.optimizer = optimizer = torch.optim.AdamW( - model.parameters(), - lr=hparams['lr']) - return optimizer - - def test_start(self): - self.saving_result_pool = Pool(8) - self.saving_results_futures = [] - self.vocoder: BaseVocoder = get_vocoder_cls(hparams)() - if hparams.get('pe_enable') is not None and hparams['pe_enable']: - self.pe = PitchExtractor().cuda() - utils.load_ckpt(self.pe, hparams['pe_ckpt'], 'model', strict=True) - self.pe.eval() - def test_end(self, outputs): - self.saving_result_pool.close() - [f.get() for f in tqdm(self.saving_results_futures)] - self.saving_result_pool.join() - return {} - - ########## - # utils - ########## - def weights_nonzero_speech(self, target): - # target : B x T x mel - # Assign weight 1.0 to all labels except for padding (id=0). - dim = target.size(-1) - return target.abs().sum(-1, keepdim=True).ne(0).float().repeat(1, 1, dim) - -if __name__ == '__main__': - TtsTask.start() diff --git a/spaces/SHIBATAATSUSHI/aioccupationaltherapist2/README.md b/spaces/SHIBATAATSUSHI/aioccupationaltherapist2/README.md deleted file mode 100644 index 24483605f740f043d828c78162018453d27800f6..0000000000000000000000000000000000000000 --- a/spaces/SHIBATAATSUSHI/aioccupationaltherapist2/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: AI作業療法士(β) -emoji: 🌖 -colorFrom: yellow -colorTo: red -sdk: gradio -sdk_version: 3.19.1 -app_file: app.py -pinned: false -duplicated_from: SHIBATAATSUSHI/aioccupationaltherapist ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/SRDdev/Image-Caption/README.md b/spaces/SRDdev/Image-Caption/README.md deleted file mode 100644 index cf2cb291cdc8d0f725166cf87de1fc910af827e4..0000000000000000000000000000000000000000 --- a/spaces/SRDdev/Image-Caption/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Image Caption -emoji: 🏅 -colorFrom: blue -colorTo: indigo -sdk: gradio -sdk_version: 3.0.5 -app_file: app.py -pinned: true ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference \ No newline at end of file diff --git a/spaces/Salesforce/EDICT/my_diffusers/models/embeddings.py b/spaces/Salesforce/EDICT/my_diffusers/models/embeddings.py deleted file mode 100644 index 734be6068b7817efd51a508b0e42bc1c8f99d289..0000000000000000000000000000000000000000 --- a/spaces/Salesforce/EDICT/my_diffusers/models/embeddings.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import math - -import numpy as np -import torch -from torch import nn - - -def get_timestep_embedding( - timesteps: torch.Tensor, - embedding_dim: int, - flip_sin_to_cos: bool = False, - downscale_freq_shift: float = 1, - scale: float = 1, - max_period: int = 10000, -): - # print(timesteps) - """ - This matches the implementation in Denoising Diffusion Probabilistic Models: Create sinusoidal timestep embeddings. - - :param timesteps: a 1-D Tensor of N indices, one per batch element. - These may be fractional. - :param embedding_dim: the dimension of the output. :param max_period: controls the minimum frequency of the - embeddings. :return: an [N x dim] Tensor of positional embeddings. - """ - assert len(timesteps.shape) == 1, "Timesteps should be a 1d-array" - - half_dim = embedding_dim // 2 - exponent = -math.log(max_period) * torch.arange(start=0, end=half_dim, dtype=torch.float64) - exponent = exponent / (half_dim - downscale_freq_shift) - - emb = torch.exp(exponent).to(device=timesteps.device) - emb = timesteps[:, None].double() * emb[None, :] - - # scale embeddings - emb = scale * emb - - # concat sine and cosine embeddings - emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=-1) - - # flip sine and cosine embeddings - if flip_sin_to_cos: - emb = torch.cat([emb[:, half_dim:], emb[:, :half_dim]], dim=-1) - - # zero pad - if embedding_dim % 2 == 1: - emb = torch.nn.functional.pad(emb, (0, 1, 0, 0)) - return emb - - -class TimestepEmbedding(nn.Module): - def __init__(self, channel: int, time_embed_dim: int, act_fn: str = "silu"): - super().__init__() - - self.linear_1 = nn.Linear(channel, time_embed_dim) - self.act = None - if act_fn == "silu": - self.act = nn.SiLU() - self.linear_2 = nn.Linear(time_embed_dim, time_embed_dim) - - def forward(self, sample): - sample = self.linear_1(sample) - - if self.act is not None: - sample = self.act(sample) - - sample = self.linear_2(sample) - return sample - - -class Timesteps(nn.Module): - def __init__(self, num_channels: int, flip_sin_to_cos: bool, downscale_freq_shift: float): - super().__init__() - self.num_channels = num_channels - self.flip_sin_to_cos = flip_sin_to_cos - self.downscale_freq_shift = downscale_freq_shift - - def forward(self, timesteps): - t_emb = get_timestep_embedding( - timesteps, - self.num_channels, - flip_sin_to_cos=self.flip_sin_to_cos, - downscale_freq_shift=self.downscale_freq_shift, - ) - return t_emb - - -class GaussianFourierProjection(nn.Module): - """Gaussian Fourier embeddings for noise levels.""" - - def __init__(self, embedding_size: int = 256, scale: float = 1.0): - super().__init__() - self.weight = nn.Parameter(torch.randn(embedding_size) * scale, requires_grad=False) - - # to delete later - self.W = nn.Parameter(torch.randn(embedding_size) * scale, requires_grad=False) - - self.weight = self.W - - def forward(self, x): - x = torch.log(x) - x_proj = x[:, None] * self.weight[None, :] * 2 * np.pi - out = torch.cat([torch.sin(x_proj), torch.cos(x_proj)], dim=-1) - return out diff --git a/spaces/Salesforce/EDICT/my_half_diffusers/pipelines/latent_diffusion/__init__.py b/spaces/Salesforce/EDICT/my_half_diffusers/pipelines/latent_diffusion/__init__.py deleted file mode 100644 index c481b38cf5e0a1c4e24f7e0edf944efb68e1f979..0000000000000000000000000000000000000000 --- a/spaces/Salesforce/EDICT/my_half_diffusers/pipelines/latent_diffusion/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# flake8: noqa -from ...utils import is_transformers_available - - -if is_transformers_available(): - from .pipeline_latent_diffusion import LDMBertModel, LDMTextToImagePipeline diff --git a/spaces/SamerKharboush/chatGPT-Sam-Turbo/README.md b/spaces/SamerKharboush/chatGPT-Sam-Turbo/README.md deleted file mode 100644 index bd7984b24ea879f2cdf5cb438c9aa68c7676ddc4..0000000000000000000000000000000000000000 --- a/spaces/SamerKharboush/chatGPT-Sam-Turbo/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: ChatGPT -emoji: 😁 -colorFrom: green -colorTo: red -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false -license: gpl-3.0 -duplicated_from: ppsantiago/chatGPT ---- diff --git a/spaces/Sandiago21/automatic-speech-recognition-greek/app.py b/spaces/Sandiago21/automatic-speech-recognition-greek/app.py deleted file mode 100644 index 1488c952cb64af93b7faec0a2151fdcbaff09b18..0000000000000000000000000000000000000000 --- a/spaces/Sandiago21/automatic-speech-recognition-greek/app.py +++ /dev/null @@ -1,54 +0,0 @@ -import torch -import gradio as gr -from transformers import pipeline - -model_id = "Sandiago21/whisper-large-v2-greek" # update with your model id -pipe = pipeline("automatic-speech-recognition", model=model_id) - - -title = "Automatic Speech Recognition (ASR)" -description = """ -Demo for automatic speech recognition in Greek. Demo uses [Sandiago21/whisper-large-v2-greek](https://huggingface.co/Sandiago21/whisper-large-v2-greek) checkpoint, which is based on OpenAI's -[Whisper](https://huggingface.co/openai/whisper-large-v2) model and is fine-tuned in Greek Audio dataset -![Automatic Speech Recognition (ASR)"](https://datasets-server.huggingface.co/assets/huggingface-course/audio-course-images/--/huggingface-course--audio-course-images/train/2/image/image.png "Diagram of Automatic Speech Recognition (ASR)") -""" - -def transcribe_speech(filepath): - output = pipe( - filepath, - max_new_tokens=256, - generate_kwargs={ - "task": "transcribe", - "language": "greek", - }, # update with the language you've fine-tuned on - chunk_length_s=30, - batch_size=8, - ) - return output["text"] - -demo = gr.Blocks() - -mic_transcribe = gr.Interface( - fn=transcribe_speech, - inputs=gr.Audio(source="microphone", type="filepath"), - outputs=gr.outputs.Textbox(), - tilte=title, - description=description, -) - -file_transcribe = gr.Interface( - fn=transcribe_speech, - inputs=gr.Audio(source="upload", type="filepath"), - outputs=gr.outputs.Textbox(), - examples=[["./example.wav"]], - tilte=title, - description=description, -) - -with demo: - gr.TabbedInterface( - [mic_transcribe, file_transcribe], - ["Transcribe Microphone", "Transcribe Audio File"], - ), - -demo.launch() diff --git a/spaces/SimianLuo/Latent_Consistency_Model/app.py b/spaces/SimianLuo/Latent_Consistency_Model/app.py deleted file mode 100644 index 8fdb77309267ce30434fe7825982accf8f75d6b6..0000000000000000000000000000000000000000 --- a/spaces/SimianLuo/Latent_Consistency_Model/app.py +++ /dev/null @@ -1,200 +0,0 @@ -#!/usr/bin/env python -from __future__ import annotations - -import os -import random -import time - -import gradio as gr -import numpy as np -import PIL.Image -import torch - -from diffusers import DiffusionPipeline -import torch - -import os -import torch -from tqdm import tqdm -from safetensors.torch import load_file -import gradio_user_history as gr_user_history - -from concurrent.futures import ThreadPoolExecutor -import uuid -import cv2 - -DESCRIPTION = '''# Latent Consistency Model -Distilled from [Dreamshaper v7](https://huggingface.co/Lykon/dreamshaper-7) fine-tune of [Stable Diffusion v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) with only 4,000 training iterations (~32 A100 GPU Hours). [Project page](https://latent-consistency-models.github.io) -''' -if not torch.cuda.is_available(): - DESCRIPTION += "\n

      Running on CPU 🥶 This demo does not work on CPU.

      " - -MAX_SEED = np.iinfo(np.int32).max -CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1" -MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "768")) -USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1" -DTYPE = torch.float32 # torch.float16 works as well, but pictures seem to be a bit worse - -pipe = DiffusionPipeline.from_pretrained("SimianLuo/LCM_Dreamshaper_v7") -# pipe = DiffusionPipeline.from_pretrained("SimianLuo/LCM_Dreamshaper_v7", custom_pipeline="latent_consistency_txt2img", custom_revision="main") -pipe.to(torch_device="cuda", torch_dtype=DTYPE) - - -def randomize_seed_fn(seed: int, randomize_seed: bool) -> int: - if randomize_seed: - seed = random.randint(0, MAX_SEED) - return seed - -def save_image(img, profile: gr.OAuthProfile | None, metadata: dict): - unique_name = str(uuid.uuid4()) + '.png' - img.save(unique_name) - gr_user_history.save_image(label=metadata["prompt"], image=img, profile=profile, metadata=metadata) - return unique_name - -def save_images(image_array, profile: gr.OAuthProfile | None, metadata: dict): - paths = [] - with ThreadPoolExecutor() as executor: - paths = list(executor.map(save_image, image_array, [profile]*len(image_array), [metadata]*len(image_array))) - return paths - -def generate( - prompt: str, - seed: int = 0, - width: int = 512, - height: int = 512, - guidance_scale: float = 8.0, - num_inference_steps: int = 4, - num_images: int = 4, - randomize_seed: bool = False, - progress = gr.Progress(track_tqdm=True), - profile: gr.OAuthProfile | None = None, -) -> PIL.Image.Image: - seed = randomize_seed_fn(seed, randomize_seed) - torch.manual_seed(seed) - start_time = time.time() - result = pipe( - prompt=prompt, - width=width, - height=height, - guidance_scale=guidance_scale, - num_inference_steps=num_inference_steps, - num_images_per_prompt=num_images, - lcm_origin_steps=50, - output_type="pil", - ).images - paths = save_images(result, profile, metadata={"prompt": prompt, "seed": seed, "width": width, "height": height, "guidance_scale": guidance_scale, "num_inference_steps": num_inference_steps}) - print(time.time() - start_time) - return paths, seed - -examples = [ - "portrait photo of a girl, photograph, highly detailed face, depth of field, moody light, golden hour, style by Dan Winters, Russell James, Steve McCurry, centered, extremely detailed, Nikon D850, award winning photography", - "Self-portrait oil painting, a beautiful cyborg with golden hair, 8k", - "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", - "A photo of beautiful mountain with realistic sunset and blue lake, highly detailed, masterpiece", -] - -with gr.Blocks(css="style.css") as demo: - gr.Markdown(DESCRIPTION) - gr.DuplicateButton( - value="Duplicate Space for private use", - elem_id="duplicate-button", - visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1", - ) - with gr.Group(): - with gr.Row(): - prompt = gr.Text( - label="Prompt", - show_label=False, - max_lines=1, - placeholder="Enter your prompt", - container=False, - ) - run_button = gr.Button("Run", scale=0) - result = gr.Gallery( - label="Generated images", show_label=False, elem_id="gallery", grid=[2] - ) - with gr.Accordion("Advanced options", open=False): - seed = gr.Slider( - label="Seed", - minimum=0, - maximum=MAX_SEED, - step=1, - value=0, - randomize=True - ) - randomize_seed = gr.Checkbox(label="Randomize seed across runs", value=True) - with gr.Row(): - width = gr.Slider( - label="Width", - minimum=256, - maximum=MAX_IMAGE_SIZE, - step=32, - value=512, - ) - height = gr.Slider( - label="Height", - minimum=256, - maximum=MAX_IMAGE_SIZE, - step=32, - value=512, - ) - with gr.Row(): - guidance_scale = gr.Slider( - label="Guidance scale for base", - minimum=2, - maximum=14, - step=0.1, - value=8.0, - ) - num_inference_steps = gr.Slider( - label="Number of inference steps for base", - minimum=1, - maximum=8, - step=1, - value=4, - ) - with gr.Row(): - num_images = gr.Slider( - label="Number of images", - minimum=1, - maximum=8, - step=1, - value=4, - visible=False, - ) - - with gr.Accordion("Past generations", open=False): - gr_user_history.render() - - gr.Examples( - examples=examples, - inputs=prompt, - outputs=result, - fn=generate, - cache_examples=CACHE_EXAMPLES, - ) - - gr.on( - triggers=[ - prompt.submit, - run_button.click, - ], - fn=generate, - inputs=[ - prompt, - seed, - width, - height, - guidance_scale, - num_inference_steps, - num_images, - randomize_seed - ], - outputs=[result, seed], - api_name="run", - ) - -if __name__ == "__main__": - demo.queue(api_open=False) - # demo.queue(max_size=20).launch() - demo.launch() diff --git a/spaces/SuCicada/Lain-vits/README.md b/spaces/SuCicada/Lain-vits/README.md deleted file mode 100644 index 190a443aed11472cc8b4e3450365dae23c5debab..0000000000000000000000000000000000000000 --- a/spaces/SuCicada/Lain-vits/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Lain-vits -emoji: 🚀 -colorFrom: gray -colorTo: red -sdk: gradio -sdk_version: 3.27.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/SujanMidatani/resume_details_to_questions/README.md b/spaces/SujanMidatani/resume_details_to_questions/README.md deleted file mode 100644 index 6529b2cae062ac4561a10597e14678945b8ef8d2..0000000000000000000000000000000000000000 --- a/spaces/SujanMidatani/resume_details_to_questions/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Resume Details To Questions -emoji: 😻 -colorFrom: pink -colorTo: red -sdk: gradio -sdk_version: 3.34.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/utils/tests/test_shimmodule.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/utils/tests/test_shimmodule.py deleted file mode 100644 index 6ea2629b42d9bcdc26bc0d0cb04624e2a60c0822..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/utils/tests/test_shimmodule.py +++ /dev/null @@ -1,12 +0,0 @@ -from IPython.utils.shimmodule import ShimModule -import IPython - - -def test_shimmodule_repr_does_not_fail_on_import_error(): - shim_module = ShimModule("shim_module", mirror="mirrored_module_does_not_exist") - repr(shim_module) - - -def test_shimmodule_repr_forwards_to_module(): - shim_module = ShimModule("shim_module", mirror="IPython") - assert repr(shim_module) == repr(IPython) diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/anyio/_backends/_asyncio.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/anyio/_backends/_asyncio.py deleted file mode 100644 index c654988385773f363c3a93286d8107fab5f69c9b..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/anyio/_backends/_asyncio.py +++ /dev/null @@ -1,2117 +0,0 @@ -from __future__ import annotations - -import array -import asyncio -import concurrent.futures -import math -import socket -import sys -from asyncio.base_events import _run_until_complete_cb # type: ignore[attr-defined] -from collections import OrderedDict, deque -from concurrent.futures import Future -from contextvars import Context, copy_context -from dataclasses import dataclass -from functools import partial, wraps -from inspect import ( - CORO_RUNNING, - CORO_SUSPENDED, - GEN_RUNNING, - GEN_SUSPENDED, - getcoroutinestate, - getgeneratorstate, -) -from io import IOBase -from os import PathLike -from queue import Queue -from socket import AddressFamily, SocketKind -from threading import Thread -from types import TracebackType -from typing import ( - IO, - Any, - AsyncGenerator, - Awaitable, - Callable, - Collection, - Coroutine, - Generator, - Iterable, - Mapping, - Optional, - Sequence, - Tuple, - TypeVar, - Union, - cast, -) -from weakref import WeakKeyDictionary - -import sniffio - -from .. import CapacityLimiterStatistics, EventStatistics, TaskInfo, abc -from .._core._compat import DeprecatedAsyncContextManager, DeprecatedAwaitable -from .._core._eventloop import claim_worker_thread, threadlocals -from .._core._exceptions import ( - BrokenResourceError, - BusyResourceError, - ClosedResourceError, - EndOfStream, - WouldBlock, -) -from .._core._exceptions import ExceptionGroup as BaseExceptionGroup -from .._core._sockets import GetAddrInfoReturnType, convert_ipv6_sockaddr -from .._core._synchronization import CapacityLimiter as BaseCapacityLimiter -from .._core._synchronization import Event as BaseEvent -from .._core._synchronization import ResourceGuard -from .._core._tasks import CancelScope as BaseCancelScope -from ..abc import IPSockAddrType, UDPPacketType -from ..lowlevel import RunVar - -if sys.version_info >= (3, 8): - - def get_coro(task: asyncio.Task) -> Generator | Awaitable[Any]: - return task.get_coro() - -else: - - def get_coro(task: asyncio.Task) -> Generator | Awaitable[Any]: - return task._coro - - -from asyncio import all_tasks, create_task, current_task, get_running_loop -from asyncio import run as native_run - - -def _get_task_callbacks(task: asyncio.Task) -> Iterable[Callable]: - return [cb for cb, context in task._callbacks] - - -T_Retval = TypeVar("T_Retval") -T_contra = TypeVar("T_contra", contravariant=True) - -# Check whether there is native support for task names in asyncio (3.8+) -_native_task_names = hasattr(asyncio.Task, "get_name") - - -_root_task: RunVar[asyncio.Task | None] = RunVar("_root_task") - - -def find_root_task() -> asyncio.Task: - root_task = _root_task.get(None) - if root_task is not None and not root_task.done(): - return root_task - - # Look for a task that has been started via run_until_complete() - for task in all_tasks(): - if task._callbacks and not task.done(): - for cb in _get_task_callbacks(task): - if ( - cb is _run_until_complete_cb - or getattr(cb, "__module__", None) == "uvloop.loop" - ): - _root_task.set(task) - return task - - # Look up the topmost task in the AnyIO task tree, if possible - task = cast(asyncio.Task, current_task()) - state = _task_states.get(task) - if state: - cancel_scope = state.cancel_scope - while cancel_scope and cancel_scope._parent_scope is not None: - cancel_scope = cancel_scope._parent_scope - - if cancel_scope is not None: - return cast(asyncio.Task, cancel_scope._host_task) - - return task - - -def get_callable_name(func: Callable) -> str: - module = getattr(func, "__module__", None) - qualname = getattr(func, "__qualname__", None) - return ".".join([x for x in (module, qualname) if x]) - - -# -# Event loop -# - -_run_vars = ( - WeakKeyDictionary() -) # type: WeakKeyDictionary[asyncio.AbstractEventLoop, Any] - -current_token = get_running_loop - - -def _task_started(task: asyncio.Task) -> bool: - """Return ``True`` if the task has been started and has not finished.""" - coro = cast(Coroutine[Any, Any, Any], get_coro(task)) - try: - return getcoroutinestate(coro) in (CORO_RUNNING, CORO_SUSPENDED) - except AttributeError: - try: - return getgeneratorstate(cast(Generator, coro)) in ( - GEN_RUNNING, - GEN_SUSPENDED, - ) - except AttributeError: - # task coro is async_genenerator_asend https://bugs.python.org/issue37771 - raise Exception(f"Cannot determine if task {task} has started or not") - - -def _maybe_set_event_loop_policy( - policy: asyncio.AbstractEventLoopPolicy | None, use_uvloop: bool -) -> None: - # On CPython, use uvloop when possible if no other policy has been given and if not - # explicitly disabled - if policy is None and use_uvloop and sys.implementation.name == "cpython": - try: - import uvloop - except ImportError: - pass - else: - # Test for missing shutdown_default_executor() (uvloop 0.14.0 and earlier) - if not hasattr( - asyncio.AbstractEventLoop, "shutdown_default_executor" - ) or hasattr(uvloop.loop.Loop, "shutdown_default_executor"): - policy = uvloop.EventLoopPolicy() - - if policy is not None: - asyncio.set_event_loop_policy(policy) - - -def run( - func: Callable[..., Awaitable[T_Retval]], - *args: object, - debug: bool = False, - use_uvloop: bool = False, - policy: asyncio.AbstractEventLoopPolicy | None = None, -) -> T_Retval: - @wraps(func) - async def wrapper() -> T_Retval: - task = cast(asyncio.Task, current_task()) - task_state = TaskState(None, get_callable_name(func), None) - _task_states[task] = task_state - if _native_task_names: - task.set_name(task_state.name) - - try: - return await func(*args) - finally: - del _task_states[task] - - _maybe_set_event_loop_policy(policy, use_uvloop) - return native_run(wrapper(), debug=debug) - - -# -# Miscellaneous -# - -sleep = asyncio.sleep - - -# -# Timeouts and cancellation -# - -CancelledError = asyncio.CancelledError - - -class CancelScope(BaseCancelScope): - def __new__( - cls, *, deadline: float = math.inf, shield: bool = False - ) -> CancelScope: - return object.__new__(cls) - - def __init__(self, deadline: float = math.inf, shield: bool = False): - self._deadline = deadline - self._shield = shield - self._parent_scope: CancelScope | None = None - self._cancel_called = False - self._active = False - self._timeout_handle: asyncio.TimerHandle | None = None - self._cancel_handle: asyncio.Handle | None = None - self._tasks: set[asyncio.Task] = set() - self._host_task: asyncio.Task | None = None - self._timeout_expired = False - self._cancel_calls: int = 0 - - def __enter__(self) -> CancelScope: - if self._active: - raise RuntimeError( - "Each CancelScope may only be used for a single 'with' block" - ) - - self._host_task = host_task = cast(asyncio.Task, current_task()) - self._tasks.add(host_task) - try: - task_state = _task_states[host_task] - except KeyError: - task_name = host_task.get_name() if _native_task_names else None - task_state = TaskState(None, task_name, self) - _task_states[host_task] = task_state - else: - self._parent_scope = task_state.cancel_scope - task_state.cancel_scope = self - - self._timeout() - self._active = True - - # Start cancelling the host task if the scope was cancelled before entering - if self._cancel_called: - self._deliver_cancellation() - - return self - - def __exit__( - self, - exc_type: type[BaseException] | None, - exc_val: BaseException | None, - exc_tb: TracebackType | None, - ) -> bool | None: - if not self._active: - raise RuntimeError("This cancel scope is not active") - if current_task() is not self._host_task: - raise RuntimeError( - "Attempted to exit cancel scope in a different task than it was " - "entered in" - ) - - assert self._host_task is not None - host_task_state = _task_states.get(self._host_task) - if host_task_state is None or host_task_state.cancel_scope is not self: - raise RuntimeError( - "Attempted to exit a cancel scope that isn't the current tasks's " - "current cancel scope" - ) - - self._active = False - if self._timeout_handle: - self._timeout_handle.cancel() - self._timeout_handle = None - - self._tasks.remove(self._host_task) - - host_task_state.cancel_scope = self._parent_scope - - # Restart the cancellation effort in the farthest directly cancelled parent scope if this - # one was shielded - if self._shield: - self._deliver_cancellation_to_parent() - - if exc_val is not None: - exceptions = ( - exc_val.exceptions if isinstance(exc_val, ExceptionGroup) else [exc_val] - ) - if all(isinstance(exc, CancelledError) for exc in exceptions): - if self._timeout_expired: - return self._uncancel() - elif not self._cancel_called: - # Task was cancelled natively - return None - elif not self._parent_cancelled(): - # This scope was directly cancelled - return self._uncancel() - - return None - - def _uncancel(self) -> bool: - if sys.version_info < (3, 11) or self._host_task is None: - self._cancel_calls = 0 - return True - - # Uncancel all AnyIO cancellations - for i in range(self._cancel_calls): - self._host_task.uncancel() - - self._cancel_calls = 0 - return not self._host_task.cancelling() - - def _timeout(self) -> None: - if self._deadline != math.inf: - loop = get_running_loop() - if loop.time() >= self._deadline: - self._timeout_expired = True - self.cancel() - else: - self._timeout_handle = loop.call_at(self._deadline, self._timeout) - - def _deliver_cancellation(self) -> None: - """ - Deliver cancellation to directly contained tasks and nested cancel scopes. - - Schedule another run at the end if we still have tasks eligible for cancellation. - """ - should_retry = False - current = current_task() - for task in self._tasks: - if task._must_cancel: # type: ignore[attr-defined] - continue - - # The task is eligible for cancellation if it has started and is not in a cancel - # scope shielded from this one - cancel_scope = _task_states[task].cancel_scope - while cancel_scope is not self: - if cancel_scope is None or cancel_scope._shield: - break - else: - cancel_scope = cancel_scope._parent_scope - else: - should_retry = True - if task is not current and ( - task is self._host_task or _task_started(task) - ): - self._cancel_calls += 1 - task.cancel() - - # Schedule another callback if there are still tasks left - if should_retry: - self._cancel_handle = get_running_loop().call_soon( - self._deliver_cancellation - ) - else: - self._cancel_handle = None - - def _deliver_cancellation_to_parent(self) -> None: - """Start cancellation effort in the farthest directly cancelled parent scope""" - scope = self._parent_scope - scope_to_cancel: CancelScope | None = None - while scope is not None: - if scope._cancel_called and scope._cancel_handle is None: - scope_to_cancel = scope - - # No point in looking beyond any shielded scope - if scope._shield: - break - - scope = scope._parent_scope - - if scope_to_cancel is not None: - scope_to_cancel._deliver_cancellation() - - def _parent_cancelled(self) -> bool: - # Check whether any parent has been cancelled - cancel_scope = self._parent_scope - while cancel_scope is not None and not cancel_scope._shield: - if cancel_scope._cancel_called: - return True - else: - cancel_scope = cancel_scope._parent_scope - - return False - - def cancel(self) -> DeprecatedAwaitable: - if not self._cancel_called: - if self._timeout_handle: - self._timeout_handle.cancel() - self._timeout_handle = None - - self._cancel_called = True - if self._host_task is not None: - self._deliver_cancellation() - - return DeprecatedAwaitable(self.cancel) - - @property - def deadline(self) -> float: - return self._deadline - - @deadline.setter - def deadline(self, value: float) -> None: - self._deadline = float(value) - if self._timeout_handle is not None: - self._timeout_handle.cancel() - self._timeout_handle = None - - if self._active and not self._cancel_called: - self._timeout() - - @property - def cancel_called(self) -> bool: - return self._cancel_called - - @property - def shield(self) -> bool: - return self._shield - - @shield.setter - def shield(self, value: bool) -> None: - if self._shield != value: - self._shield = value - if not value: - self._deliver_cancellation_to_parent() - - -async def checkpoint() -> None: - await sleep(0) - - -async def checkpoint_if_cancelled() -> None: - task = current_task() - if task is None: - return - - try: - cancel_scope = _task_states[task].cancel_scope - except KeyError: - return - - while cancel_scope: - if cancel_scope.cancel_called: - await sleep(0) - elif cancel_scope.shield: - break - else: - cancel_scope = cancel_scope._parent_scope - - -async def cancel_shielded_checkpoint() -> None: - with CancelScope(shield=True): - await sleep(0) - - -def current_effective_deadline() -> float: - try: - cancel_scope = _task_states[current_task()].cancel_scope # type: ignore[index] - except KeyError: - return math.inf - - deadline = math.inf - while cancel_scope: - deadline = min(deadline, cancel_scope.deadline) - if cancel_scope._cancel_called: - deadline = -math.inf - break - elif cancel_scope.shield: - break - else: - cancel_scope = cancel_scope._parent_scope - - return deadline - - -def current_time() -> float: - return get_running_loop().time() - - -# -# Task states -# - - -class TaskState: - """ - Encapsulates auxiliary task information that cannot be added to the Task instance itself - because there are no guarantees about its implementation. - """ - - __slots__ = "parent_id", "name", "cancel_scope" - - def __init__( - self, - parent_id: int | None, - name: str | None, - cancel_scope: CancelScope | None, - ): - self.parent_id = parent_id - self.name = name - self.cancel_scope = cancel_scope - - -_task_states = WeakKeyDictionary() # type: WeakKeyDictionary[asyncio.Task, TaskState] - - -# -# Task groups -# - - -class ExceptionGroup(BaseExceptionGroup): - def __init__(self, exceptions: list[BaseException]): - super().__init__() - self.exceptions = exceptions - - -class _AsyncioTaskStatus(abc.TaskStatus): - def __init__(self, future: asyncio.Future, parent_id: int): - self._future = future - self._parent_id = parent_id - - def started(self, value: T_contra | None = None) -> None: - try: - self._future.set_result(value) - except asyncio.InvalidStateError: - raise RuntimeError( - "called 'started' twice on the same task status" - ) from None - - task = cast(asyncio.Task, current_task()) - _task_states[task].parent_id = self._parent_id - - -class TaskGroup(abc.TaskGroup): - def __init__(self) -> None: - self.cancel_scope: CancelScope = CancelScope() - self._active = False - self._exceptions: list[BaseException] = [] - - async def __aenter__(self) -> TaskGroup: - self.cancel_scope.__enter__() - self._active = True - return self - - async def __aexit__( - self, - exc_type: type[BaseException] | None, - exc_val: BaseException | None, - exc_tb: TracebackType | None, - ) -> bool | None: - ignore_exception = self.cancel_scope.__exit__(exc_type, exc_val, exc_tb) - if exc_val is not None: - self.cancel_scope.cancel() - self._exceptions.append(exc_val) - - while self.cancel_scope._tasks: - try: - await asyncio.wait(self.cancel_scope._tasks) - except asyncio.CancelledError: - self.cancel_scope.cancel() - - self._active = False - if not self.cancel_scope._parent_cancelled(): - exceptions = self._filter_cancellation_errors(self._exceptions) - else: - exceptions = self._exceptions - - try: - if len(exceptions) > 1: - if all( - isinstance(e, CancelledError) and not e.args for e in exceptions - ): - # Tasks were cancelled natively, without a cancellation message - raise CancelledError - else: - raise ExceptionGroup(exceptions) - elif exceptions and exceptions[0] is not exc_val: - raise exceptions[0] - except BaseException as exc: - # Clear the context here, as it can only be done in-flight. - # If the context is not cleared, it can result in recursive tracebacks (see #145). - exc.__context__ = None - raise - - return ignore_exception - - @staticmethod - def _filter_cancellation_errors( - exceptions: Sequence[BaseException], - ) -> list[BaseException]: - filtered_exceptions: list[BaseException] = [] - for exc in exceptions: - if isinstance(exc, ExceptionGroup): - new_exceptions = TaskGroup._filter_cancellation_errors(exc.exceptions) - if len(new_exceptions) > 1: - filtered_exceptions.append(exc) - elif len(new_exceptions) == 1: - filtered_exceptions.append(new_exceptions[0]) - elif new_exceptions: - new_exc = ExceptionGroup(new_exceptions) - new_exc.__cause__ = exc.__cause__ - new_exc.__context__ = exc.__context__ - new_exc.__traceback__ = exc.__traceback__ - filtered_exceptions.append(new_exc) - elif not isinstance(exc, CancelledError) or exc.args: - filtered_exceptions.append(exc) - - return filtered_exceptions - - async def _run_wrapped_task( - self, coro: Coroutine, task_status_future: asyncio.Future | None - ) -> None: - # This is the code path for Python 3.7 on which asyncio freaks out if a task - # raises a BaseException. - __traceback_hide__ = __tracebackhide__ = True # noqa: F841 - task = cast(asyncio.Task, current_task()) - try: - await coro - except BaseException as exc: - if task_status_future is None or task_status_future.done(): - self._exceptions.append(exc) - self.cancel_scope.cancel() - else: - task_status_future.set_exception(exc) - else: - if task_status_future is not None and not task_status_future.done(): - task_status_future.set_exception( - RuntimeError("Child exited without calling task_status.started()") - ) - finally: - if task in self.cancel_scope._tasks: - self.cancel_scope._tasks.remove(task) - del _task_states[task] - - def _spawn( - self, - func: Callable[..., Awaitable[Any]], - args: tuple, - name: object, - task_status_future: asyncio.Future | None = None, - ) -> asyncio.Task: - def task_done(_task: asyncio.Task) -> None: - # This is the code path for Python 3.8+ - assert _task in self.cancel_scope._tasks - self.cancel_scope._tasks.remove(_task) - del _task_states[_task] - - try: - exc = _task.exception() - except CancelledError as e: - while isinstance(e.__context__, CancelledError): - e = e.__context__ - - exc = e - - if exc is not None: - if task_status_future is None or task_status_future.done(): - self._exceptions.append(exc) - self.cancel_scope.cancel() - else: - task_status_future.set_exception(exc) - elif task_status_future is not None and not task_status_future.done(): - task_status_future.set_exception( - RuntimeError("Child exited without calling task_status.started()") - ) - - if not self._active: - raise RuntimeError( - "This task group is not active; no new tasks can be started." - ) - - options: dict[str, Any] = {} - name = get_callable_name(func) if name is None else str(name) - if _native_task_names: - options["name"] = name - - kwargs = {} - if task_status_future: - parent_id = id(current_task()) - kwargs["task_status"] = _AsyncioTaskStatus( - task_status_future, id(self.cancel_scope._host_task) - ) - else: - parent_id = id(self.cancel_scope._host_task) - - coro = func(*args, **kwargs) - if not asyncio.iscoroutine(coro): - raise TypeError( - f"Expected an async function, but {func} appears to be synchronous" - ) - - foreign_coro = not hasattr(coro, "cr_frame") and not hasattr(coro, "gi_frame") - if foreign_coro or sys.version_info < (3, 8): - coro = self._run_wrapped_task(coro, task_status_future) - - task = create_task(coro, **options) - if not foreign_coro and sys.version_info >= (3, 8): - task.add_done_callback(task_done) - - # Make the spawned task inherit the task group's cancel scope - _task_states[task] = TaskState( - parent_id=parent_id, name=name, cancel_scope=self.cancel_scope - ) - self.cancel_scope._tasks.add(task) - return task - - def start_soon( - self, func: Callable[..., Awaitable[Any]], *args: object, name: object = None - ) -> None: - self._spawn(func, args, name) - - async def start( - self, func: Callable[..., Awaitable[Any]], *args: object, name: object = None - ) -> None: - future: asyncio.Future = asyncio.Future() - task = self._spawn(func, args, name, future) - - # If the task raises an exception after sending a start value without a switch point - # between, the task group is cancelled and this method never proceeds to process the - # completed future. That's why we have to have a shielded cancel scope here. - with CancelScope(shield=True): - try: - return await future - except CancelledError: - task.cancel() - raise - - -# -# Threads -# - -_Retval_Queue_Type = Tuple[Optional[T_Retval], Optional[BaseException]] - - -class WorkerThread(Thread): - MAX_IDLE_TIME = 10 # seconds - - def __init__( - self, - root_task: asyncio.Task, - workers: set[WorkerThread], - idle_workers: deque[WorkerThread], - ): - super().__init__(name="AnyIO worker thread") - self.root_task = root_task - self.workers = workers - self.idle_workers = idle_workers - self.loop = root_task._loop - self.queue: Queue[ - tuple[Context, Callable, tuple, asyncio.Future] | None - ] = Queue(2) - self.idle_since = current_time() - self.stopping = False - - def _report_result( - self, future: asyncio.Future, result: Any, exc: BaseException | None - ) -> None: - self.idle_since = current_time() - if not self.stopping: - self.idle_workers.append(self) - - if not future.cancelled(): - if exc is not None: - if isinstance(exc, StopIteration): - new_exc = RuntimeError("coroutine raised StopIteration") - new_exc.__cause__ = exc - exc = new_exc - - future.set_exception(exc) - else: - future.set_result(result) - - def run(self) -> None: - with claim_worker_thread("asyncio"): - threadlocals.loop = self.loop - while True: - item = self.queue.get() - if item is None: - # Shutdown command received - return - - context, func, args, future = item - if not future.cancelled(): - result = None - exception: BaseException | None = None - try: - result = context.run(func, *args) - except BaseException as exc: - exception = exc - - if not self.loop.is_closed(): - self.loop.call_soon_threadsafe( - self._report_result, future, result, exception - ) - - self.queue.task_done() - - def stop(self, f: asyncio.Task | None = None) -> None: - self.stopping = True - self.queue.put_nowait(None) - self.workers.discard(self) - try: - self.idle_workers.remove(self) - except ValueError: - pass - - -_threadpool_idle_workers: RunVar[deque[WorkerThread]] = RunVar( - "_threadpool_idle_workers" -) -_threadpool_workers: RunVar[set[WorkerThread]] = RunVar("_threadpool_workers") - - -async def run_sync_in_worker_thread( - func: Callable[..., T_Retval], - *args: object, - cancellable: bool = False, - limiter: CapacityLimiter | None = None, -) -> T_Retval: - await checkpoint() - - # If this is the first run in this event loop thread, set up the necessary variables - try: - idle_workers = _threadpool_idle_workers.get() - workers = _threadpool_workers.get() - except LookupError: - idle_workers = deque() - workers = set() - _threadpool_idle_workers.set(idle_workers) - _threadpool_workers.set(workers) - - async with (limiter or current_default_thread_limiter()): - with CancelScope(shield=not cancellable): - future: asyncio.Future = asyncio.Future() - root_task = find_root_task() - if not idle_workers: - worker = WorkerThread(root_task, workers, idle_workers) - worker.start() - workers.add(worker) - root_task.add_done_callback(worker.stop) - else: - worker = idle_workers.pop() - - # Prune any other workers that have been idle for MAX_IDLE_TIME seconds or longer - now = current_time() - while idle_workers: - if now - idle_workers[0].idle_since < WorkerThread.MAX_IDLE_TIME: - break - - expired_worker = idle_workers.popleft() - expired_worker.root_task.remove_done_callback(expired_worker.stop) - expired_worker.stop() - - context = copy_context() - context.run(sniffio.current_async_library_cvar.set, None) - worker.queue.put_nowait((context, func, args, future)) - return await future - - -def run_sync_from_thread( - func: Callable[..., T_Retval], - *args: object, - loop: asyncio.AbstractEventLoop | None = None, -) -> T_Retval: - @wraps(func) - def wrapper() -> None: - try: - f.set_result(func(*args)) - except BaseException as exc: - f.set_exception(exc) - if not isinstance(exc, Exception): - raise - - f: concurrent.futures.Future[T_Retval] = Future() - loop = loop or threadlocals.loop - loop.call_soon_threadsafe(wrapper) - return f.result() - - -def run_async_from_thread( - func: Callable[..., Awaitable[T_Retval]], *args: object -) -> T_Retval: - f: concurrent.futures.Future[T_Retval] = asyncio.run_coroutine_threadsafe( - func(*args), threadlocals.loop - ) - return f.result() - - -class BlockingPortal(abc.BlockingPortal): - def __new__(cls) -> BlockingPortal: - return object.__new__(cls) - - def __init__(self) -> None: - super().__init__() - self._loop = get_running_loop() - - def _spawn_task_from_thread( - self, - func: Callable, - args: tuple, - kwargs: dict[str, Any], - name: object, - future: Future, - ) -> None: - run_sync_from_thread( - partial(self._task_group.start_soon, name=name), - self._call_func, - func, - args, - kwargs, - future, - loop=self._loop, - ) - - -# -# Subprocesses -# - - -@dataclass(eq=False) -class StreamReaderWrapper(abc.ByteReceiveStream): - _stream: asyncio.StreamReader - - async def receive(self, max_bytes: int = 65536) -> bytes: - data = await self._stream.read(max_bytes) - if data: - return data - else: - raise EndOfStream - - async def aclose(self) -> None: - self._stream.feed_eof() - - -@dataclass(eq=False) -class StreamWriterWrapper(abc.ByteSendStream): - _stream: asyncio.StreamWriter - - async def send(self, item: bytes) -> None: - self._stream.write(item) - await self._stream.drain() - - async def aclose(self) -> None: - self._stream.close() - - -@dataclass(eq=False) -class Process(abc.Process): - _process: asyncio.subprocess.Process - _stdin: StreamWriterWrapper | None - _stdout: StreamReaderWrapper | None - _stderr: StreamReaderWrapper | None - - async def aclose(self) -> None: - if self._stdin: - await self._stdin.aclose() - if self._stdout: - await self._stdout.aclose() - if self._stderr: - await self._stderr.aclose() - - await self.wait() - - async def wait(self) -> int: - return await self._process.wait() - - def terminate(self) -> None: - self._process.terminate() - - def kill(self) -> None: - self._process.kill() - - def send_signal(self, signal: int) -> None: - self._process.send_signal(signal) - - @property - def pid(self) -> int: - return self._process.pid - - @property - def returncode(self) -> int | None: - return self._process.returncode - - @property - def stdin(self) -> abc.ByteSendStream | None: - return self._stdin - - @property - def stdout(self) -> abc.ByteReceiveStream | None: - return self._stdout - - @property - def stderr(self) -> abc.ByteReceiveStream | None: - return self._stderr - - -async def open_process( - command: str | bytes | Sequence[str | bytes], - *, - shell: bool, - stdin: int | IO[Any] | None, - stdout: int | IO[Any] | None, - stderr: int | IO[Any] | None, - cwd: str | bytes | PathLike | None = None, - env: Mapping[str, str] | None = None, - start_new_session: bool = False, -) -> Process: - await checkpoint() - if shell: - process = await asyncio.create_subprocess_shell( - cast(Union[str, bytes], command), - stdin=stdin, - stdout=stdout, - stderr=stderr, - cwd=cwd, - env=env, - start_new_session=start_new_session, - ) - else: - process = await asyncio.create_subprocess_exec( - *command, - stdin=stdin, - stdout=stdout, - stderr=stderr, - cwd=cwd, - env=env, - start_new_session=start_new_session, - ) - - stdin_stream = StreamWriterWrapper(process.stdin) if process.stdin else None - stdout_stream = StreamReaderWrapper(process.stdout) if process.stdout else None - stderr_stream = StreamReaderWrapper(process.stderr) if process.stderr else None - return Process(process, stdin_stream, stdout_stream, stderr_stream) - - -def _forcibly_shutdown_process_pool_on_exit( - workers: set[Process], _task: object -) -> None: - """ - Forcibly shuts down worker processes belonging to this event loop.""" - child_watcher: asyncio.AbstractChildWatcher | None - try: - child_watcher = asyncio.get_event_loop_policy().get_child_watcher() - except NotImplementedError: - child_watcher = None - - # Close as much as possible (w/o async/await) to avoid warnings - for process in workers: - if process.returncode is None: - continue - - process._stdin._stream._transport.close() # type: ignore[union-attr] - process._stdout._stream._transport.close() # type: ignore[union-attr] - process._stderr._stream._transport.close() # type: ignore[union-attr] - process.kill() - if child_watcher: - child_watcher.remove_child_handler(process.pid) - - -async def _shutdown_process_pool_on_exit(workers: set[Process]) -> None: - """ - Shuts down worker processes belonging to this event loop. - - NOTE: this only works when the event loop was started using asyncio.run() or anyio.run(). - - """ - process: Process - try: - await sleep(math.inf) - except asyncio.CancelledError: - for process in workers: - if process.returncode is None: - process.kill() - - for process in workers: - await process.aclose() - - -def setup_process_pool_exit_at_shutdown(workers: set[Process]) -> None: - kwargs: dict[str, Any] = ( - {"name": "AnyIO process pool shutdown task"} if _native_task_names else {} - ) - create_task(_shutdown_process_pool_on_exit(workers), **kwargs) - find_root_task().add_done_callback( - partial(_forcibly_shutdown_process_pool_on_exit, workers) - ) - - -# -# Sockets and networking -# - - -class StreamProtocol(asyncio.Protocol): - read_queue: deque[bytes] - read_event: asyncio.Event - write_event: asyncio.Event - exception: Exception | None = None - - def connection_made(self, transport: asyncio.BaseTransport) -> None: - self.read_queue = deque() - self.read_event = asyncio.Event() - self.write_event = asyncio.Event() - self.write_event.set() - cast(asyncio.Transport, transport).set_write_buffer_limits(0) - - def connection_lost(self, exc: Exception | None) -> None: - if exc: - self.exception = BrokenResourceError() - self.exception.__cause__ = exc - - self.read_event.set() - self.write_event.set() - - def data_received(self, data: bytes) -> None: - self.read_queue.append(data) - self.read_event.set() - - def eof_received(self) -> bool | None: - self.read_event.set() - return True - - def pause_writing(self) -> None: - self.write_event = asyncio.Event() - - def resume_writing(self) -> None: - self.write_event.set() - - -class DatagramProtocol(asyncio.DatagramProtocol): - read_queue: deque[tuple[bytes, IPSockAddrType]] - read_event: asyncio.Event - write_event: asyncio.Event - exception: Exception | None = None - - def connection_made(self, transport: asyncio.BaseTransport) -> None: - self.read_queue = deque(maxlen=100) # arbitrary value - self.read_event = asyncio.Event() - self.write_event = asyncio.Event() - self.write_event.set() - - def connection_lost(self, exc: Exception | None) -> None: - self.read_event.set() - self.write_event.set() - - def datagram_received(self, data: bytes, addr: IPSockAddrType) -> None: - addr = convert_ipv6_sockaddr(addr) - self.read_queue.append((data, addr)) - self.read_event.set() - - def error_received(self, exc: Exception) -> None: - self.exception = exc - - def pause_writing(self) -> None: - self.write_event.clear() - - def resume_writing(self) -> None: - self.write_event.set() - - -class SocketStream(abc.SocketStream): - def __init__(self, transport: asyncio.Transport, protocol: StreamProtocol): - self._transport = transport - self._protocol = protocol - self._receive_guard = ResourceGuard("reading from") - self._send_guard = ResourceGuard("writing to") - self._closed = False - - @property - def _raw_socket(self) -> socket.socket: - return self._transport.get_extra_info("socket") - - async def receive(self, max_bytes: int = 65536) -> bytes: - with self._receive_guard: - await checkpoint() - - if ( - not self._protocol.read_event.is_set() - and not self._transport.is_closing() - ): - self._transport.resume_reading() - await self._protocol.read_event.wait() - self._transport.pause_reading() - - try: - chunk = self._protocol.read_queue.popleft() - except IndexError: - if self._closed: - raise ClosedResourceError from None - elif self._protocol.exception: - raise self._protocol.exception - else: - raise EndOfStream from None - - if len(chunk) > max_bytes: - # Split the oversized chunk - chunk, leftover = chunk[:max_bytes], chunk[max_bytes:] - self._protocol.read_queue.appendleft(leftover) - - # If the read queue is empty, clear the flag so that the next call will block until - # data is available - if not self._protocol.read_queue: - self._protocol.read_event.clear() - - return chunk - - async def send(self, item: bytes) -> None: - with self._send_guard: - await checkpoint() - - if self._closed: - raise ClosedResourceError - elif self._protocol.exception is not None: - raise self._protocol.exception - - try: - self._transport.write(item) - except RuntimeError as exc: - if self._transport.is_closing(): - raise BrokenResourceError from exc - else: - raise - - await self._protocol.write_event.wait() - - async def send_eof(self) -> None: - try: - self._transport.write_eof() - except OSError: - pass - - async def aclose(self) -> None: - if not self._transport.is_closing(): - self._closed = True - try: - self._transport.write_eof() - except OSError: - pass - - self._transport.close() - await sleep(0) - self._transport.abort() - - -class UNIXSocketStream(abc.SocketStream): - _receive_future: asyncio.Future | None = None - _send_future: asyncio.Future | None = None - _closing = False - - def __init__(self, raw_socket: socket.socket): - self.__raw_socket = raw_socket - self._loop = get_running_loop() - self._receive_guard = ResourceGuard("reading from") - self._send_guard = ResourceGuard("writing to") - - @property - def _raw_socket(self) -> socket.socket: - return self.__raw_socket - - def _wait_until_readable(self, loop: asyncio.AbstractEventLoop) -> asyncio.Future: - def callback(f: object) -> None: - del self._receive_future - loop.remove_reader(self.__raw_socket) - - f = self._receive_future = asyncio.Future() - self._loop.add_reader(self.__raw_socket, f.set_result, None) - f.add_done_callback(callback) - return f - - def _wait_until_writable(self, loop: asyncio.AbstractEventLoop) -> asyncio.Future: - def callback(f: object) -> None: - del self._send_future - loop.remove_writer(self.__raw_socket) - - f = self._send_future = asyncio.Future() - self._loop.add_writer(self.__raw_socket, f.set_result, None) - f.add_done_callback(callback) - return f - - async def send_eof(self) -> None: - with self._send_guard: - self._raw_socket.shutdown(socket.SHUT_WR) - - async def receive(self, max_bytes: int = 65536) -> bytes: - loop = get_running_loop() - await checkpoint() - with self._receive_guard: - while True: - try: - data = self.__raw_socket.recv(max_bytes) - except BlockingIOError: - await self._wait_until_readable(loop) - except OSError as exc: - if self._closing: - raise ClosedResourceError from None - else: - raise BrokenResourceError from exc - else: - if not data: - raise EndOfStream - - return data - - async def send(self, item: bytes) -> None: - loop = get_running_loop() - await checkpoint() - with self._send_guard: - view = memoryview(item) - while view: - try: - bytes_sent = self.__raw_socket.send(item) - except BlockingIOError: - await self._wait_until_writable(loop) - except OSError as exc: - if self._closing: - raise ClosedResourceError from None - else: - raise BrokenResourceError from exc - else: - view = view[bytes_sent:] - - async def receive_fds(self, msglen: int, maxfds: int) -> tuple[bytes, list[int]]: - if not isinstance(msglen, int) or msglen < 0: - raise ValueError("msglen must be a non-negative integer") - if not isinstance(maxfds, int) or maxfds < 1: - raise ValueError("maxfds must be a positive integer") - - loop = get_running_loop() - fds = array.array("i") - await checkpoint() - with self._receive_guard: - while True: - try: - message, ancdata, flags, addr = self.__raw_socket.recvmsg( - msglen, socket.CMSG_LEN(maxfds * fds.itemsize) - ) - except BlockingIOError: - await self._wait_until_readable(loop) - except OSError as exc: - if self._closing: - raise ClosedResourceError from None - else: - raise BrokenResourceError from exc - else: - if not message and not ancdata: - raise EndOfStream - - break - - for cmsg_level, cmsg_type, cmsg_data in ancdata: - if cmsg_level != socket.SOL_SOCKET or cmsg_type != socket.SCM_RIGHTS: - raise RuntimeError( - f"Received unexpected ancillary data; message = {message!r}, " - f"cmsg_level = {cmsg_level}, cmsg_type = {cmsg_type}" - ) - - fds.frombytes(cmsg_data[: len(cmsg_data) - (len(cmsg_data) % fds.itemsize)]) - - return message, list(fds) - - async def send_fds(self, message: bytes, fds: Collection[int | IOBase]) -> None: - if not message: - raise ValueError("message must not be empty") - if not fds: - raise ValueError("fds must not be empty") - - loop = get_running_loop() - filenos: list[int] = [] - for fd in fds: - if isinstance(fd, int): - filenos.append(fd) - elif isinstance(fd, IOBase): - filenos.append(fd.fileno()) - - fdarray = array.array("i", filenos) - await checkpoint() - with self._send_guard: - while True: - try: - # The ignore can be removed after mypy picks up - # https://github.com/python/typeshed/pull/5545 - self.__raw_socket.sendmsg( - [message], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fdarray)] - ) - break - except BlockingIOError: - await self._wait_until_writable(loop) - except OSError as exc: - if self._closing: - raise ClosedResourceError from None - else: - raise BrokenResourceError from exc - - async def aclose(self) -> None: - if not self._closing: - self._closing = True - if self.__raw_socket.fileno() != -1: - self.__raw_socket.close() - - if self._receive_future: - self._receive_future.set_result(None) - if self._send_future: - self._send_future.set_result(None) - - -class TCPSocketListener(abc.SocketListener): - _accept_scope: CancelScope | None = None - _closed = False - - def __init__(self, raw_socket: socket.socket): - self.__raw_socket = raw_socket - self._loop = cast(asyncio.BaseEventLoop, get_running_loop()) - self._accept_guard = ResourceGuard("accepting connections from") - - @property - def _raw_socket(self) -> socket.socket: - return self.__raw_socket - - async def accept(self) -> abc.SocketStream: - if self._closed: - raise ClosedResourceError - - with self._accept_guard: - await checkpoint() - with CancelScope() as self._accept_scope: - try: - client_sock, _addr = await self._loop.sock_accept(self._raw_socket) - except asyncio.CancelledError: - # Workaround for https://bugs.python.org/issue41317 - try: - self._loop.remove_reader(self._raw_socket) - except (ValueError, NotImplementedError): - pass - - if self._closed: - raise ClosedResourceError from None - - raise - finally: - self._accept_scope = None - - client_sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - transport, protocol = await self._loop.connect_accepted_socket( - StreamProtocol, client_sock - ) - return SocketStream(transport, protocol) - - async def aclose(self) -> None: - if self._closed: - return - - self._closed = True - if self._accept_scope: - # Workaround for https://bugs.python.org/issue41317 - try: - self._loop.remove_reader(self._raw_socket) - except (ValueError, NotImplementedError): - pass - - self._accept_scope.cancel() - await sleep(0) - - self._raw_socket.close() - - -class UNIXSocketListener(abc.SocketListener): - def __init__(self, raw_socket: socket.socket): - self.__raw_socket = raw_socket - self._loop = get_running_loop() - self._accept_guard = ResourceGuard("accepting connections from") - self._closed = False - - async def accept(self) -> abc.SocketStream: - await checkpoint() - with self._accept_guard: - while True: - try: - client_sock, _ = self.__raw_socket.accept() - client_sock.setblocking(False) - return UNIXSocketStream(client_sock) - except BlockingIOError: - f: asyncio.Future = asyncio.Future() - self._loop.add_reader(self.__raw_socket, f.set_result, None) - f.add_done_callback( - lambda _: self._loop.remove_reader(self.__raw_socket) - ) - await f - except OSError as exc: - if self._closed: - raise ClosedResourceError from None - else: - raise BrokenResourceError from exc - - async def aclose(self) -> None: - self._closed = True - self.__raw_socket.close() - - @property - def _raw_socket(self) -> socket.socket: - return self.__raw_socket - - -class UDPSocket(abc.UDPSocket): - def __init__( - self, transport: asyncio.DatagramTransport, protocol: DatagramProtocol - ): - self._transport = transport - self._protocol = protocol - self._receive_guard = ResourceGuard("reading from") - self._send_guard = ResourceGuard("writing to") - self._closed = False - - @property - def _raw_socket(self) -> socket.socket: - return self._transport.get_extra_info("socket") - - async def aclose(self) -> None: - if not self._transport.is_closing(): - self._closed = True - self._transport.close() - - async def receive(self) -> tuple[bytes, IPSockAddrType]: - with self._receive_guard: - await checkpoint() - - # If the buffer is empty, ask for more data - if not self._protocol.read_queue and not self._transport.is_closing(): - self._protocol.read_event.clear() - await self._protocol.read_event.wait() - - try: - return self._protocol.read_queue.popleft() - except IndexError: - if self._closed: - raise ClosedResourceError from None - else: - raise BrokenResourceError from None - - async def send(self, item: UDPPacketType) -> None: - with self._send_guard: - await checkpoint() - await self._protocol.write_event.wait() - if self._closed: - raise ClosedResourceError - elif self._transport.is_closing(): - raise BrokenResourceError - else: - self._transport.sendto(*item) - - -class ConnectedUDPSocket(abc.ConnectedUDPSocket): - def __init__( - self, transport: asyncio.DatagramTransport, protocol: DatagramProtocol - ): - self._transport = transport - self._protocol = protocol - self._receive_guard = ResourceGuard("reading from") - self._send_guard = ResourceGuard("writing to") - self._closed = False - - @property - def _raw_socket(self) -> socket.socket: - return self._transport.get_extra_info("socket") - - async def aclose(self) -> None: - if not self._transport.is_closing(): - self._closed = True - self._transport.close() - - async def receive(self) -> bytes: - with self._receive_guard: - await checkpoint() - - # If the buffer is empty, ask for more data - if not self._protocol.read_queue and not self._transport.is_closing(): - self._protocol.read_event.clear() - await self._protocol.read_event.wait() - - try: - packet = self._protocol.read_queue.popleft() - except IndexError: - if self._closed: - raise ClosedResourceError from None - else: - raise BrokenResourceError from None - - return packet[0] - - async def send(self, item: bytes) -> None: - with self._send_guard: - await checkpoint() - await self._protocol.write_event.wait() - if self._closed: - raise ClosedResourceError - elif self._transport.is_closing(): - raise BrokenResourceError - else: - self._transport.sendto(item) - - -async def connect_tcp( - host: str, port: int, local_addr: tuple[str, int] | None = None -) -> SocketStream: - transport, protocol = cast( - Tuple[asyncio.Transport, StreamProtocol], - await get_running_loop().create_connection( - StreamProtocol, host, port, local_addr=local_addr - ), - ) - transport.pause_reading() - return SocketStream(transport, protocol) - - -async def connect_unix(path: str) -> UNIXSocketStream: - await checkpoint() - loop = get_running_loop() - raw_socket = socket.socket(socket.AF_UNIX) - raw_socket.setblocking(False) - while True: - try: - raw_socket.connect(path) - except BlockingIOError: - f: asyncio.Future = asyncio.Future() - loop.add_writer(raw_socket, f.set_result, None) - f.add_done_callback(lambda _: loop.remove_writer(raw_socket)) - await f - except BaseException: - raw_socket.close() - raise - else: - return UNIXSocketStream(raw_socket) - - -async def create_udp_socket( - family: socket.AddressFamily, - local_address: IPSockAddrType | None, - remote_address: IPSockAddrType | None, - reuse_port: bool, -) -> UDPSocket | ConnectedUDPSocket: - result = await get_running_loop().create_datagram_endpoint( - DatagramProtocol, - local_addr=local_address, - remote_addr=remote_address, - family=family, - reuse_port=reuse_port, - ) - transport = result[0] - protocol = result[1] - if protocol.exception: - transport.close() - raise protocol.exception - - if not remote_address: - return UDPSocket(transport, protocol) - else: - return ConnectedUDPSocket(transport, protocol) - - -async def getaddrinfo( - host: bytes | str, - port: str | int | None, - *, - family: int | AddressFamily = 0, - type: int | SocketKind = 0, - proto: int = 0, - flags: int = 0, -) -> GetAddrInfoReturnType: - # https://github.com/python/typeshed/pull/4304 - result = await get_running_loop().getaddrinfo( - host, port, family=family, type=type, proto=proto, flags=flags - ) - return cast(GetAddrInfoReturnType, result) - - -async def getnameinfo(sockaddr: IPSockAddrType, flags: int = 0) -> tuple[str, str]: - return await get_running_loop().getnameinfo(sockaddr, flags) - - -_read_events: RunVar[dict[Any, asyncio.Event]] = RunVar("read_events") -_write_events: RunVar[dict[Any, asyncio.Event]] = RunVar("write_events") - - -async def wait_socket_readable(sock: socket.socket) -> None: - await checkpoint() - try: - read_events = _read_events.get() - except LookupError: - read_events = {} - _read_events.set(read_events) - - if read_events.get(sock): - raise BusyResourceError("reading from") from None - - loop = get_running_loop() - event = read_events[sock] = asyncio.Event() - loop.add_reader(sock, event.set) - try: - await event.wait() - finally: - if read_events.pop(sock, None) is not None: - loop.remove_reader(sock) - readable = True - else: - readable = False - - if not readable: - raise ClosedResourceError - - -async def wait_socket_writable(sock: socket.socket) -> None: - await checkpoint() - try: - write_events = _write_events.get() - except LookupError: - write_events = {} - _write_events.set(write_events) - - if write_events.get(sock): - raise BusyResourceError("writing to") from None - - loop = get_running_loop() - event = write_events[sock] = asyncio.Event() - loop.add_writer(sock.fileno(), event.set) - try: - await event.wait() - finally: - if write_events.pop(sock, None) is not None: - loop.remove_writer(sock) - writable = True - else: - writable = False - - if not writable: - raise ClosedResourceError - - -# -# Synchronization -# - - -class Event(BaseEvent): - def __new__(cls) -> Event: - return object.__new__(cls) - - def __init__(self) -> None: - self._event = asyncio.Event() - - def set(self) -> DeprecatedAwaitable: - self._event.set() - return DeprecatedAwaitable(self.set) - - def is_set(self) -> bool: - return self._event.is_set() - - async def wait(self) -> None: - if await self._event.wait(): - await checkpoint() - - def statistics(self) -> EventStatistics: - return EventStatistics(len(self._event._waiters)) # type: ignore[attr-defined] - - -class CapacityLimiter(BaseCapacityLimiter): - _total_tokens: float = 0 - - def __new__(cls, total_tokens: float) -> CapacityLimiter: - return object.__new__(cls) - - def __init__(self, total_tokens: float): - self._borrowers: set[Any] = set() - self._wait_queue: OrderedDict[Any, asyncio.Event] = OrderedDict() - self.total_tokens = total_tokens - - async def __aenter__(self) -> None: - await self.acquire() - - async def __aexit__( - self, - exc_type: type[BaseException] | None, - exc_val: BaseException | None, - exc_tb: TracebackType | None, - ) -> None: - self.release() - - @property - def total_tokens(self) -> float: - return self._total_tokens - - @total_tokens.setter - def total_tokens(self, value: float) -> None: - if not isinstance(value, int) and not math.isinf(value): - raise TypeError("total_tokens must be an int or math.inf") - if value < 1: - raise ValueError("total_tokens must be >= 1") - - old_value = self._total_tokens - self._total_tokens = value - events = [] - for event in self._wait_queue.values(): - if value <= old_value: - break - - if not event.is_set(): - events.append(event) - old_value += 1 - - for event in events: - event.set() - - @property - def borrowed_tokens(self) -> int: - return len(self._borrowers) - - @property - def available_tokens(self) -> float: - return self._total_tokens - len(self._borrowers) - - def acquire_nowait(self) -> DeprecatedAwaitable: - self.acquire_on_behalf_of_nowait(current_task()) - return DeprecatedAwaitable(self.acquire_nowait) - - def acquire_on_behalf_of_nowait(self, borrower: object) -> DeprecatedAwaitable: - if borrower in self._borrowers: - raise RuntimeError( - "this borrower is already holding one of this CapacityLimiter's " - "tokens" - ) - - if self._wait_queue or len(self._borrowers) >= self._total_tokens: - raise WouldBlock - - self._borrowers.add(borrower) - return DeprecatedAwaitable(self.acquire_on_behalf_of_nowait) - - async def acquire(self) -> None: - return await self.acquire_on_behalf_of(current_task()) - - async def acquire_on_behalf_of(self, borrower: object) -> None: - await checkpoint_if_cancelled() - try: - self.acquire_on_behalf_of_nowait(borrower) - except WouldBlock: - event = asyncio.Event() - self._wait_queue[borrower] = event - try: - await event.wait() - except BaseException: - self._wait_queue.pop(borrower, None) - raise - - self._borrowers.add(borrower) - else: - try: - await cancel_shielded_checkpoint() - except BaseException: - self.release() - raise - - def release(self) -> None: - self.release_on_behalf_of(current_task()) - - def release_on_behalf_of(self, borrower: object) -> None: - try: - self._borrowers.remove(borrower) - except KeyError: - raise RuntimeError( - "this borrower isn't holding any of this CapacityLimiter's " "tokens" - ) from None - - # Notify the next task in line if this limiter has free capacity now - if self._wait_queue and len(self._borrowers) < self._total_tokens: - event = self._wait_queue.popitem(last=False)[1] - event.set() - - def statistics(self) -> CapacityLimiterStatistics: - return CapacityLimiterStatistics( - self.borrowed_tokens, - self.total_tokens, - tuple(self._borrowers), - len(self._wait_queue), - ) - - -_default_thread_limiter: RunVar[CapacityLimiter] = RunVar("_default_thread_limiter") - - -def current_default_thread_limiter() -> CapacityLimiter: - try: - return _default_thread_limiter.get() - except LookupError: - limiter = CapacityLimiter(40) - _default_thread_limiter.set(limiter) - return limiter - - -# -# Operating system signals -# - - -class _SignalReceiver(DeprecatedAsyncContextManager["_SignalReceiver"]): - def __init__(self, signals: tuple[int, ...]): - self._signals = signals - self._loop = get_running_loop() - self._signal_queue: deque[int] = deque() - self._future: asyncio.Future = asyncio.Future() - self._handled_signals: set[int] = set() - - def _deliver(self, signum: int) -> None: - self._signal_queue.append(signum) - if not self._future.done(): - self._future.set_result(None) - - def __enter__(self) -> _SignalReceiver: - for sig in set(self._signals): - self._loop.add_signal_handler(sig, self._deliver, sig) - self._handled_signals.add(sig) - - return self - - def __exit__( - self, - exc_type: type[BaseException] | None, - exc_val: BaseException | None, - exc_tb: TracebackType | None, - ) -> bool | None: - for sig in self._handled_signals: - self._loop.remove_signal_handler(sig) - return None - - def __aiter__(self) -> _SignalReceiver: - return self - - async def __anext__(self) -> int: - await checkpoint() - if not self._signal_queue: - self._future = asyncio.Future() - await self._future - - return self._signal_queue.popleft() - - -def open_signal_receiver(*signals: int) -> _SignalReceiver: - return _SignalReceiver(signals) - - -# -# Testing and debugging -# - - -def _create_task_info(task: asyncio.Task) -> TaskInfo: - task_state = _task_states.get(task) - if task_state is None: - name = task.get_name() if _native_task_names else None - parent_id = None - else: - name = task_state.name - parent_id = task_state.parent_id - - return TaskInfo(id(task), parent_id, name, get_coro(task)) - - -def get_current_task() -> TaskInfo: - return _create_task_info(current_task()) # type: ignore[arg-type] - - -def get_running_tasks() -> list[TaskInfo]: - return [_create_task_info(task) for task in all_tasks() if not task.done()] - - -async def wait_all_tasks_blocked() -> None: - await checkpoint() - this_task = current_task() - while True: - for task in all_tasks(): - if task is this_task: - continue - - if task._fut_waiter is None or task._fut_waiter.done(): # type: ignore[attr-defined] - await sleep(0.1) - break - else: - return - - -class TestRunner(abc.TestRunner): - def __init__( - self, - debug: bool = False, - use_uvloop: bool = False, - policy: asyncio.AbstractEventLoopPolicy | None = None, - ): - self._exceptions: list[BaseException] = [] - _maybe_set_event_loop_policy(policy, use_uvloop) - self._loop = asyncio.new_event_loop() - self._loop.set_debug(debug) - self._loop.set_exception_handler(self._exception_handler) - asyncio.set_event_loop(self._loop) - - def _cancel_all_tasks(self) -> None: - to_cancel = all_tasks(self._loop) - if not to_cancel: - return - - for task in to_cancel: - task.cancel() - - self._loop.run_until_complete( - asyncio.gather(*to_cancel, return_exceptions=True) - ) - - for task in to_cancel: - if task.cancelled(): - continue - if task.exception() is not None: - raise cast(BaseException, task.exception()) - - def _exception_handler( - self, loop: asyncio.AbstractEventLoop, context: dict[str, Any] - ) -> None: - if isinstance(context.get("exception"), Exception): - self._exceptions.append(context["exception"]) - else: - loop.default_exception_handler(context) - - def _raise_async_exceptions(self) -> None: - # Re-raise any exceptions raised in asynchronous callbacks - if self._exceptions: - exceptions, self._exceptions = self._exceptions, [] - if len(exceptions) == 1: - raise exceptions[0] - elif exceptions: - raise ExceptionGroup(exceptions) - - def close(self) -> None: - try: - self._cancel_all_tasks() - self._loop.run_until_complete(self._loop.shutdown_asyncgens()) - finally: - asyncio.set_event_loop(None) - self._loop.close() - - def run_asyncgen_fixture( - self, - fixture_func: Callable[..., AsyncGenerator[T_Retval, Any]], - kwargs: dict[str, Any], - ) -> Iterable[T_Retval]: - async def fixture_runner() -> None: - agen = fixture_func(**kwargs) - try: - retval = await agen.asend(None) - self._raise_async_exceptions() - except BaseException as exc: - f.set_exception(exc) - return - else: - f.set_result(retval) - - await event.wait() - try: - await agen.asend(None) - except StopAsyncIteration: - pass - else: - await agen.aclose() - raise RuntimeError("Async generator fixture did not stop") - - f = self._loop.create_future() - event = asyncio.Event() - fixture_task = self._loop.create_task(fixture_runner()) - self._loop.run_until_complete(f) - yield f.result() - event.set() - self._loop.run_until_complete(fixture_task) - self._raise_async_exceptions() - - def run_fixture( - self, - fixture_func: Callable[..., Coroutine[Any, Any, T_Retval]], - kwargs: dict[str, Any], - ) -> T_Retval: - retval = self._loop.run_until_complete(fixture_func(**kwargs)) - self._raise_async_exceptions() - return retval - - def run_test( - self, test_func: Callable[..., Coroutine[Any, Any, Any]], kwargs: dict[str, Any] - ) -> None: - try: - self._loop.run_until_complete(test_func(**kwargs)) - except Exception as exc: - self._exceptions.append(exc) - - self._raise_async_exceptions() diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/backoff/types.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/backoff/types.py deleted file mode 100644 index 25f20a4c43f79a62278b00081c5d7da5dfc12e3e..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/backoff/types.py +++ /dev/null @@ -1,6 +0,0 @@ -# coding:utf-8 -from ._typing import Details - -__all__ = [ - 'Details' -] diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/dateutil/tz/_factories.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/dateutil/tz/_factories.py deleted file mode 100644 index f8a65891a023ebf9eb0c24d391ba67541b7133f1..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/dateutil/tz/_factories.py +++ /dev/null @@ -1,80 +0,0 @@ -from datetime import timedelta -import weakref -from collections import OrderedDict - -from six.moves import _thread - - -class _TzSingleton(type): - def __init__(cls, *args, **kwargs): - cls.__instance = None - super(_TzSingleton, cls).__init__(*args, **kwargs) - - def __call__(cls): - if cls.__instance is None: - cls.__instance = super(_TzSingleton, cls).__call__() - return cls.__instance - - -class _TzFactory(type): - def instance(cls, *args, **kwargs): - """Alternate constructor that returns a fresh instance""" - return type.__call__(cls, *args, **kwargs) - - -class _TzOffsetFactory(_TzFactory): - def __init__(cls, *args, **kwargs): - cls.__instances = weakref.WeakValueDictionary() - cls.__strong_cache = OrderedDict() - cls.__strong_cache_size = 8 - - cls._cache_lock = _thread.allocate_lock() - - def __call__(cls, name, offset): - if isinstance(offset, timedelta): - key = (name, offset.total_seconds()) - else: - key = (name, offset) - - instance = cls.__instances.get(key, None) - if instance is None: - instance = cls.__instances.setdefault(key, - cls.instance(name, offset)) - - # This lock may not be necessary in Python 3. See GH issue #901 - with cls._cache_lock: - cls.__strong_cache[key] = cls.__strong_cache.pop(key, instance) - - # Remove an item if the strong cache is overpopulated - if len(cls.__strong_cache) > cls.__strong_cache_size: - cls.__strong_cache.popitem(last=False) - - return instance - - -class _TzStrFactory(_TzFactory): - def __init__(cls, *args, **kwargs): - cls.__instances = weakref.WeakValueDictionary() - cls.__strong_cache = OrderedDict() - cls.__strong_cache_size = 8 - - cls.__cache_lock = _thread.allocate_lock() - - def __call__(cls, s, posix_offset=False): - key = (s, posix_offset) - instance = cls.__instances.get(key, None) - - if instance is None: - instance = cls.__instances.setdefault(key, - cls.instance(s, posix_offset)) - - # This lock may not be necessary in Python 3. See GH issue #901 - with cls.__cache_lock: - cls.__strong_cache[key] = cls.__strong_cache.pop(key, instance) - - # Remove an item if the strong cache is overpopulated - if len(cls.__strong_cache) > cls.__strong_cache_size: - cls.__strong_cache.popitem(last=False) - - return instance - diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_process_net_command.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_process_net_command.py deleted file mode 100644 index 04604841182c618d36ca9ba69c7da851ac169a43..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_process_net_command.py +++ /dev/null @@ -1,758 +0,0 @@ -import json -import os -import sys -import traceback - -from _pydev_bundle import pydev_log -from _pydev_bundle.pydev_log import exception as pydev_log_exception -from _pydevd_bundle import pydevd_traceproperty, pydevd_dont_trace, pydevd_utils -from _pydevd_bundle.pydevd_additional_thread_info import set_additional_thread_info -from _pydevd_bundle.pydevd_breakpoints import get_exception_class -from _pydevd_bundle.pydevd_comm import ( - InternalEvaluateConsoleExpression, InternalConsoleGetCompletions, InternalRunCustomOperation, - internal_get_next_statement_targets, internal_get_smart_step_into_variants) -from _pydevd_bundle.pydevd_constants import NEXT_VALUE_SEPARATOR, IS_WINDOWS, NULL -from _pydevd_bundle.pydevd_comm_constants import ID_TO_MEANING, CMD_EXEC_EXPRESSION, CMD_AUTHENTICATE -from _pydevd_bundle.pydevd_api import PyDevdAPI -from io import StringIO -from _pydevd_bundle.pydevd_net_command import NetCommand -from _pydevd_bundle.pydevd_thread_lifecycle import pydevd_find_thread_by_id -import pydevd_file_utils - - -class _PyDevCommandProcessor(object): - - def __init__(self): - self.api = PyDevdAPI() - - def process_net_command(self, py_db, cmd_id, seq, text): - '''Processes a command received from the Java side - - @param cmd_id: the id of the command - @param seq: the sequence of the command - @param text: the text received in the command - ''' - - # We can only proceed if the client is already authenticated or if it's the - # command to authenticate. - if cmd_id != CMD_AUTHENTICATE and not py_db.authentication.is_authenticated(): - cmd = py_db.cmd_factory.make_error_message(seq, 'Client not authenticated.') - py_db.writer.add_command(cmd) - return - - meaning = ID_TO_MEANING[str(cmd_id)] - - # print('Handling %s (%s)' % (meaning, text)) - - method_name = meaning.lower() - - on_command = getattr(self, method_name.lower(), None) - if on_command is None: - # I have no idea what this is all about - cmd = py_db.cmd_factory.make_error_message(seq, "unexpected command " + str(cmd_id)) - py_db.writer.add_command(cmd) - return - - lock = py_db._main_lock - if method_name == 'cmd_thread_dump_to_stderr': - # We can skip the main debugger locks for cases where we know it's not needed. - lock = NULL - - with lock: - try: - cmd = on_command(py_db, cmd_id, seq, text) - if cmd is not None: - py_db.writer.add_command(cmd) - except: - if traceback is not None and sys is not None and pydev_log_exception is not None: - pydev_log_exception() - - stream = StringIO() - traceback.print_exc(file=stream) - cmd = py_db.cmd_factory.make_error_message( - seq, - "Unexpected exception in process_net_command.\nInitial params: %s. Exception: %s" % ( - ((cmd_id, seq, text), stream.getvalue()) - ) - ) - if cmd is not None: - py_db.writer.add_command(cmd) - - def cmd_authenticate(self, py_db, cmd_id, seq, text): - access_token = text - py_db.authentication.login(access_token) - if py_db.authentication.is_authenticated(): - return NetCommand(cmd_id, seq, py_db.authentication.client_access_token) - - return py_db.cmd_factory.make_error_message(seq, 'Client not authenticated.') - - def cmd_run(self, py_db, cmd_id, seq, text): - return self.api.run(py_db) - - def cmd_list_threads(self, py_db, cmd_id, seq, text): - return self.api.list_threads(py_db, seq) - - def cmd_get_completions(self, py_db, cmd_id, seq, text): - # we received some command to get a variable - # the text is: thread_id\tframe_id\tactivation token - thread_id, frame_id, _scope, act_tok = text.split('\t', 3) - - return self.api.request_completions(py_db, seq, thread_id, frame_id, act_tok) - - def cmd_get_thread_stack(self, py_db, cmd_id, seq, text): - # Receives a thread_id and a given timeout, which is the time we should - # wait to the provide the stack if a given thread is still not suspended. - if '\t' in text: - thread_id, timeout = text.split('\t') - timeout = float(timeout) - else: - thread_id = text - timeout = .5 # Default timeout is .5 seconds - - return self.api.request_stack(py_db, seq, thread_id, fmt={}, timeout=timeout) - - def cmd_set_protocol(self, py_db, cmd_id, seq, text): - return self.api.set_protocol(py_db, seq, text.strip()) - - def cmd_thread_suspend(self, py_db, cmd_id, seq, text): - return self.api.request_suspend_thread(py_db, text.strip()) - - def cmd_version(self, py_db, cmd_id, seq, text): - # Default based on server process (although ideally the IDE should - # provide it). - if IS_WINDOWS: - ide_os = 'WINDOWS' - else: - ide_os = 'UNIX' - - # Breakpoints can be grouped by 'LINE' or by 'ID'. - breakpoints_by = 'LINE' - - splitted = text.split('\t') - if len(splitted) == 1: - _local_version = splitted - - elif len(splitted) == 2: - _local_version, ide_os = splitted - - elif len(splitted) == 3: - _local_version, ide_os, breakpoints_by = splitted - - version_msg = self.api.set_ide_os_and_breakpoints_by(py_db, seq, ide_os, breakpoints_by) - - # Enable thread notifications after the version command is completed. - self.api.set_enable_thread_notifications(py_db, True) - - return version_msg - - def cmd_thread_run(self, py_db, cmd_id, seq, text): - return self.api.request_resume_thread(text.strip()) - - def _cmd_step(self, py_db, cmd_id, seq, text): - return self.api.request_step(py_db, text.strip(), cmd_id) - - cmd_step_into = _cmd_step - cmd_step_into_my_code = _cmd_step - cmd_step_over = _cmd_step - cmd_step_over_my_code = _cmd_step - cmd_step_return = _cmd_step - cmd_step_return_my_code = _cmd_step - - def _cmd_set_next(self, py_db, cmd_id, seq, text): - thread_id, line, func_name = text.split('\t', 2) - return self.api.request_set_next(py_db, seq, thread_id, cmd_id, None, line, func_name) - - cmd_run_to_line = _cmd_set_next - cmd_set_next_statement = _cmd_set_next - - def cmd_smart_step_into(self, py_db, cmd_id, seq, text): - thread_id, line_or_bytecode_offset, func_name = text.split('\t', 2) - if line_or_bytecode_offset.startswith('offset='): - # In this case we request the smart step into to stop given the parent frame - # and the location of the parent frame bytecode offset and not just the func_name - # (this implies that `CMD_GET_SMART_STEP_INTO_VARIANTS` was previously used - # to know what are the valid stop points). - - temp = line_or_bytecode_offset[len('offset='):] - if ';' in temp: - offset, child_offset = temp.split(';') - offset = int(offset) - child_offset = int(child_offset) - else: - child_offset = -1 - offset = int(temp) - return self.api.request_smart_step_into(py_db, seq, thread_id, offset, child_offset) - else: - # If the offset wasn't passed, just use the line/func_name to do the stop. - return self.api.request_smart_step_into_by_func_name(py_db, seq, thread_id, line_or_bytecode_offset, func_name) - - def cmd_reload_code(self, py_db, cmd_id, seq, text): - text = text.strip() - if '\t' not in text: - module_name = text.strip() - filename = None - else: - module_name, filename = text.split('\t', 1) - self.api.request_reload_code(py_db, seq, module_name, filename) - - def cmd_change_variable(self, py_db, cmd_id, seq, text): - # the text is: thread\tstackframe\tFRAME|GLOBAL\tattribute_to_change\tvalue_to_change - thread_id, frame_id, scope, attr_and_value = text.split('\t', 3) - - tab_index = attr_and_value.rindex('\t') - attr = attr_and_value[0:tab_index].replace('\t', '.') - value = attr_and_value[tab_index + 1:] - self.api.request_change_variable(py_db, seq, thread_id, frame_id, scope, attr, value) - - def cmd_get_variable(self, py_db, cmd_id, seq, text): - # we received some command to get a variable - # the text is: thread_id\tframe_id\tFRAME|GLOBAL\tattributes* - thread_id, frame_id, scopeattrs = text.split('\t', 2) - - if scopeattrs.find('\t') != -1: # there are attributes beyond scope - scope, attrs = scopeattrs.split('\t', 1) - else: - scope, attrs = (scopeattrs, None) - - self.api.request_get_variable(py_db, seq, thread_id, frame_id, scope, attrs) - - def cmd_get_array(self, py_db, cmd_id, seq, text): - # Note: untested and unused in pydev - # we received some command to get an array variable - # the text is: thread_id\tframe_id\tFRAME|GLOBAL\tname\ttemp\troffs\tcoffs\trows\tcols\tformat - roffset, coffset, rows, cols, format, thread_id, frame_id, scopeattrs = text.split('\t', 7) - - if scopeattrs.find('\t') != -1: # there are attributes beyond scope - scope, attrs = scopeattrs.split('\t', 1) - else: - scope, attrs = (scopeattrs, None) - - self.api.request_get_array(py_db, seq, roffset, coffset, rows, cols, format, thread_id, frame_id, scope, attrs) - - def cmd_show_return_values(self, py_db, cmd_id, seq, text): - show_return_values = text.split('\t')[1] - self.api.set_show_return_values(py_db, int(show_return_values) == 1) - - def cmd_load_full_value(self, py_db, cmd_id, seq, text): - # Note: untested and unused in pydev - thread_id, frame_id, scopeattrs = text.split('\t', 2) - vars = scopeattrs.split(NEXT_VALUE_SEPARATOR) - - self.api.request_load_full_value(py_db, seq, thread_id, frame_id, vars) - - def cmd_get_description(self, py_db, cmd_id, seq, text): - # Note: untested and unused in pydev - thread_id, frame_id, expression = text.split('\t', 2) - self.api.request_get_description(py_db, seq, thread_id, frame_id, expression) - - def cmd_get_frame(self, py_db, cmd_id, seq, text): - thread_id, frame_id, scope = text.split('\t', 2) - self.api.request_get_frame(py_db, seq, thread_id, frame_id) - - def cmd_set_break(self, py_db, cmd_id, seq, text): - # func name: 'None': match anything. Empty: match global, specified: only method context. - # command to add some breakpoint. - # text is filename\tline. Add to breakpoints dictionary - suspend_policy = u"NONE" # Can be 'NONE' or 'ALL' - is_logpoint = False - hit_condition = None - if py_db._set_breakpoints_with_id: - try: - try: - breakpoint_id, btype, filename, line, func_name, condition, expression, hit_condition, is_logpoint, suspend_policy = text.split(u'\t', 9) - except ValueError: # not enough values to unpack - # No suspend_policy passed (use default). - breakpoint_id, btype, filename, line, func_name, condition, expression, hit_condition, is_logpoint = text.split(u'\t', 8) - is_logpoint = is_logpoint == u'True' - except ValueError: # not enough values to unpack - breakpoint_id, btype, filename, line, func_name, condition, expression = text.split(u'\t', 6) - - breakpoint_id = int(breakpoint_id) - line = int(line) - - # We must restore new lines and tabs as done in - # AbstractDebugTarget.breakpointAdded - condition = condition.replace(u"@_@NEW_LINE_CHAR@_@", u'\n').\ - replace(u"@_@TAB_CHAR@_@", u'\t').strip() - - expression = expression.replace(u"@_@NEW_LINE_CHAR@_@", u'\n').\ - replace(u"@_@TAB_CHAR@_@", u'\t').strip() - else: - # Note: this else should be removed after PyCharm migrates to setting - # breakpoints by id (and ideally also provides func_name). - btype, filename, line, func_name, suspend_policy, condition, expression = text.split(u'\t', 6) - # If we don't have an id given for each breakpoint, consider - # the id to be the line. - breakpoint_id = line = int(line) - - condition = condition.replace(u"@_@NEW_LINE_CHAR@_@", u'\n'). \ - replace(u"@_@TAB_CHAR@_@", u'\t').strip() - - expression = expression.replace(u"@_@NEW_LINE_CHAR@_@", u'\n'). \ - replace(u"@_@TAB_CHAR@_@", u'\t').strip() - - if condition is not None and (len(condition) <= 0 or condition == u"None"): - condition = None - - if expression is not None and (len(expression) <= 0 or expression == u"None"): - expression = None - - if hit_condition is not None and (len(hit_condition) <= 0 or hit_condition == u"None"): - hit_condition = None - - def on_changed_breakpoint_state(breakpoint_id, add_breakpoint_result): - error_code = add_breakpoint_result.error_code - - translated_line = add_breakpoint_result.translated_line - translated_filename = add_breakpoint_result.translated_filename - msg = '' - if error_code: - - if error_code == self.api.ADD_BREAKPOINT_FILE_NOT_FOUND: - msg = 'pydev debugger: Trying to add breakpoint to file that does not exist: %s (will have no effect).\n' % (translated_filename,) - - elif error_code == self.api.ADD_BREAKPOINT_FILE_EXCLUDED_BY_FILTERS: - msg = 'pydev debugger: Trying to add breakpoint to file that is excluded by filters: %s (will have no effect).\n' % (translated_filename,) - - elif error_code == self.api.ADD_BREAKPOINT_LAZY_VALIDATION: - msg = '' # Ignore this here (if/when loaded, it'll call on_changed_breakpoint_state again accordingly). - - elif error_code == self.api.ADD_BREAKPOINT_INVALID_LINE: - msg = 'pydev debugger: Trying to add breakpoint to line (%s) that is not valid in: %s.\n' % (translated_line, translated_filename,) - - else: - # Shouldn't get here. - msg = 'pydev debugger: Breakpoint not validated (reason unknown -- please report as error): %s (%s).\n' % (translated_filename, translated_line) - - else: - if add_breakpoint_result.original_line != translated_line: - msg = 'pydev debugger (info): Breakpoint in line: %s moved to line: %s (in %s).\n' % (add_breakpoint_result.original_line, translated_line, translated_filename) - - if msg: - py_db.writer.add_command(py_db.cmd_factory.make_warning_message(msg)) - - result = self.api.add_breakpoint( - py_db, self.api.filename_to_str(filename), btype, breakpoint_id, line, condition, func_name, - expression, suspend_policy, hit_condition, is_logpoint, on_changed_breakpoint_state=on_changed_breakpoint_state) - - on_changed_breakpoint_state(breakpoint_id, result) - - def cmd_remove_break(self, py_db, cmd_id, seq, text): - # command to remove some breakpoint - # text is type\file\tid. Remove from breakpoints dictionary - breakpoint_type, filename, breakpoint_id = text.split('\t', 2) - - filename = self.api.filename_to_str(filename) - - try: - breakpoint_id = int(breakpoint_id) - except ValueError: - pydev_log.critical('Error removing breakpoint. Expected breakpoint_id to be an int. Found: %s', breakpoint_id) - - else: - self.api.remove_breakpoint(py_db, filename, breakpoint_type, breakpoint_id) - - def _cmd_exec_or_evaluate_expression(self, py_db, cmd_id, seq, text): - # command to evaluate the given expression - # text is: thread\tstackframe\tLOCAL\texpression - attr_to_set_result = "" - try: - thread_id, frame_id, scope, expression, trim, attr_to_set_result = text.split('\t', 5) - except ValueError: - thread_id, frame_id, scope, expression, trim = text.split('\t', 4) - is_exec = cmd_id == CMD_EXEC_EXPRESSION - trim_if_too_big = int(trim) == 1 - - self.api.request_exec_or_evaluate( - py_db, seq, thread_id, frame_id, expression, is_exec, trim_if_too_big, attr_to_set_result) - - cmd_evaluate_expression = _cmd_exec_or_evaluate_expression - cmd_exec_expression = _cmd_exec_or_evaluate_expression - - def cmd_console_exec(self, py_db, cmd_id, seq, text): - # command to exec expression in console, in case expression is only partially valid 'False' is returned - # text is: thread\tstackframe\tLOCAL\texpression - - thread_id, frame_id, scope, expression = text.split('\t', 3) - self.api.request_console_exec(py_db, seq, thread_id, frame_id, expression) - - def cmd_set_path_mapping_json(self, py_db, cmd_id, seq, text): - ''' - :param text: - Json text. Something as: - - { - "pathMappings": [ - { - "localRoot": "c:/temp", - "remoteRoot": "/usr/temp" - } - ], - "debug": true, - "force": false - } - ''' - as_json = json.loads(text) - force = as_json.get('force', False) - - path_mappings = [] - for pathMapping in as_json.get('pathMappings', []): - localRoot = pathMapping.get('localRoot', '') - remoteRoot = pathMapping.get('remoteRoot', '') - if (localRoot != '') and (remoteRoot != ''): - path_mappings.append((localRoot, remoteRoot)) - - if bool(path_mappings) or force: - pydevd_file_utils.setup_client_server_paths(path_mappings) - - debug = as_json.get('debug', False) - if debug or force: - pydevd_file_utils.DEBUG_CLIENT_SERVER_TRANSLATION = debug - - def cmd_set_py_exception_json(self, py_db, cmd_id, seq, text): - # This API is optional and works 'in bulk' -- it's possible - # to get finer-grained control with CMD_ADD_EXCEPTION_BREAK/CMD_REMOVE_EXCEPTION_BREAK - # which allows setting caught/uncaught per exception, although global settings such as: - # - skip_on_exceptions_thrown_in_same_context - # - ignore_exceptions_thrown_in_lines_with_ignore_exception - # must still be set through this API (before anything else as this clears all existing - # exception breakpoints). - try: - py_db.break_on_uncaught_exceptions = {} - py_db.break_on_caught_exceptions = {} - py_db.break_on_user_uncaught_exceptions = {} - - as_json = json.loads(text) - break_on_uncaught = as_json.get('break_on_uncaught', False) - break_on_caught = as_json.get('break_on_caught', False) - break_on_user_caught = as_json.get('break_on_user_caught', False) - py_db.skip_on_exceptions_thrown_in_same_context = as_json.get('skip_on_exceptions_thrown_in_same_context', False) - py_db.ignore_exceptions_thrown_in_lines_with_ignore_exception = as_json.get('ignore_exceptions_thrown_in_lines_with_ignore_exception', False) - ignore_libraries = as_json.get('ignore_libraries', False) - exception_types = as_json.get('exception_types', []) - - for exception_type in exception_types: - if not exception_type: - continue - - py_db.add_break_on_exception( - exception_type, - condition=None, - expression=None, - notify_on_handled_exceptions=break_on_caught, - notify_on_unhandled_exceptions=break_on_uncaught, - notify_on_user_unhandled_exceptions=break_on_user_caught, - notify_on_first_raise_only=True, - ignore_libraries=ignore_libraries, - ) - - py_db.on_breakpoints_changed() - except: - pydev_log.exception("Error when setting exception list. Received: %s", text) - - def cmd_set_py_exception(self, py_db, cmd_id, seq, text): - # DEPRECATED. Use cmd_set_py_exception_json instead. - try: - splitted = text.split(';') - py_db.break_on_uncaught_exceptions = {} - py_db.break_on_caught_exceptions = {} - py_db.break_on_user_uncaught_exceptions = {} - if len(splitted) >= 5: - if splitted[0] == 'true': - break_on_uncaught = True - else: - break_on_uncaught = False - - if splitted[1] == 'true': - break_on_caught = True - else: - break_on_caught = False - - if splitted[2] == 'true': - py_db.skip_on_exceptions_thrown_in_same_context = True - else: - py_db.skip_on_exceptions_thrown_in_same_context = False - - if splitted[3] == 'true': - py_db.ignore_exceptions_thrown_in_lines_with_ignore_exception = True - else: - py_db.ignore_exceptions_thrown_in_lines_with_ignore_exception = False - - if splitted[4] == 'true': - ignore_libraries = True - else: - ignore_libraries = False - - for exception_type in splitted[5:]: - exception_type = exception_type.strip() - if not exception_type: - continue - - py_db.add_break_on_exception( - exception_type, - condition=None, - expression=None, - notify_on_handled_exceptions=break_on_caught, - notify_on_unhandled_exceptions=break_on_uncaught, - notify_on_user_unhandled_exceptions=False, # TODO (not currently supported in this API). - notify_on_first_raise_only=True, - ignore_libraries=ignore_libraries, - ) - else: - pydev_log.exception("Expected to have at least 5 ';' separated items. Received: %s", text) - - except: - pydev_log.exception("Error when setting exception list. Received: %s", text) - - def _load_source(self, py_db, cmd_id, seq, text): - filename = text - filename = self.api.filename_to_str(filename) - self.api.request_load_source(py_db, seq, filename) - - cmd_load_source = _load_source - cmd_get_file_contents = _load_source - - def cmd_load_source_from_frame_id(self, py_db, cmd_id, seq, text): - frame_id = text - self.api.request_load_source_from_frame_id(py_db, seq, frame_id) - - def cmd_set_property_trace(self, py_db, cmd_id, seq, text): - # Command which receives whether to trace property getter/setter/deleter - # text is feature_state(true/false);disable_getter/disable_setter/disable_deleter - if text: - splitted = text.split(';') - if len(splitted) >= 3: - if not py_db.disable_property_trace and splitted[0] == 'true': - # Replacing property by custom property only when the debugger starts - pydevd_traceproperty.replace_builtin_property() - py_db.disable_property_trace = True - # Enable/Disable tracing of the property getter - if splitted[1] == 'true': - py_db.disable_property_getter_trace = True - else: - py_db.disable_property_getter_trace = False - # Enable/Disable tracing of the property setter - if splitted[2] == 'true': - py_db.disable_property_setter_trace = True - else: - py_db.disable_property_setter_trace = False - # Enable/Disable tracing of the property deleter - if splitted[3] == 'true': - py_db.disable_property_deleter_trace = True - else: - py_db.disable_property_deleter_trace = False - - def cmd_add_exception_break(self, py_db, cmd_id, seq, text): - # Note that this message has some idiosyncrasies... - # - # notify_on_handled_exceptions can be 0, 1 or 2 - # 0 means we should not stop on handled exceptions. - # 1 means we should stop on handled exceptions showing it on all frames where the exception passes. - # 2 means we should stop on handled exceptions but we should only notify about it once. - # - # To ignore_libraries properly, besides setting ignore_libraries to 1, the IDE_PROJECT_ROOTS environment - # variable must be set (so, we'll ignore anything not below IDE_PROJECT_ROOTS) -- this is not ideal as - # the environment variable may not be properly set if it didn't start from the debugger (we should - # create a custom message for that). - # - # There are 2 global settings which can only be set in CMD_SET_PY_EXCEPTION. Namely: - # - # py_db.skip_on_exceptions_thrown_in_same_context - # - If True, we should only show the exception in a caller, not where it was first raised. - # - # py_db.ignore_exceptions_thrown_in_lines_with_ignore_exception - # - If True exceptions thrown in lines with '@IgnoreException' will not be shown. - - condition = "" - expression = "" - if text.find('\t') != -1: - try: - exception, condition, expression, notify_on_handled_exceptions, notify_on_unhandled_exceptions, ignore_libraries = text.split('\t', 5) - except: - exception, notify_on_handled_exceptions, notify_on_unhandled_exceptions, ignore_libraries = text.split('\t', 3) - else: - exception, notify_on_handled_exceptions, notify_on_unhandled_exceptions, ignore_libraries = text, 0, 0, 0 - - condition = condition.replace("@_@NEW_LINE_CHAR@_@", '\n').replace("@_@TAB_CHAR@_@", '\t').strip() - - if condition is not None and (len(condition) == 0 or condition == "None"): - condition = None - - expression = expression.replace("@_@NEW_LINE_CHAR@_@", '\n').replace("@_@TAB_CHAR@_@", '\t').strip() - - if expression is not None and (len(expression) == 0 or expression == "None"): - expression = None - - if exception.find('-') != -1: - breakpoint_type, exception = exception.split('-') - else: - breakpoint_type = 'python' - - if breakpoint_type == 'python': - self.api.add_python_exception_breakpoint( - py_db, exception, condition, expression, - notify_on_handled_exceptions=int(notify_on_handled_exceptions) > 0, - notify_on_unhandled_exceptions=int(notify_on_unhandled_exceptions) == 1, - notify_on_user_unhandled_exceptions=0, # TODO (not currently supported in this API). - notify_on_first_raise_only=int(notify_on_handled_exceptions) == 2, - ignore_libraries=int(ignore_libraries) > 0, - ) - else: - self.api.add_plugins_exception_breakpoint(py_db, breakpoint_type, exception) - - def cmd_remove_exception_break(self, py_db, cmd_id, seq, text): - exception = text - if exception.find('-') != -1: - exception_type, exception = exception.split('-') - else: - exception_type = 'python' - - if exception_type == 'python': - self.api.remove_python_exception_breakpoint(py_db, exception) - else: - self.api.remove_plugins_exception_breakpoint(py_db, exception_type, exception) - - def cmd_add_django_exception_break(self, py_db, cmd_id, seq, text): - self.api.add_plugins_exception_breakpoint(py_db, breakpoint_type='django', exception=text) - - def cmd_remove_django_exception_break(self, py_db, cmd_id, seq, text): - self.api.remove_plugins_exception_breakpoint(py_db, exception_type='django', exception=text) - - def cmd_evaluate_console_expression(self, py_db, cmd_id, seq, text): - # Command which takes care for the debug console communication - if text != "": - thread_id, frame_id, console_command = text.split('\t', 2) - console_command, line = console_command.split('\t') - - if console_command == 'EVALUATE': - int_cmd = InternalEvaluateConsoleExpression( - seq, thread_id, frame_id, line, buffer_output=True) - - elif console_command == 'EVALUATE_UNBUFFERED': - int_cmd = InternalEvaluateConsoleExpression( - seq, thread_id, frame_id, line, buffer_output=False) - - elif console_command == 'GET_COMPLETIONS': - int_cmd = InternalConsoleGetCompletions(seq, thread_id, frame_id, line) - - else: - raise ValueError('Unrecognized command: %s' % (console_command,)) - - py_db.post_internal_command(int_cmd, thread_id) - - def cmd_run_custom_operation(self, py_db, cmd_id, seq, text): - # Command which runs a custom operation - if text != "": - try: - location, custom = text.split('||', 1) - except: - sys.stderr.write('Custom operation now needs a || separator. Found: %s\n' % (text,)) - raise - - thread_id, frame_id, scopeattrs = location.split('\t', 2) - - if scopeattrs.find('\t') != -1: # there are attributes beyond scope - scope, attrs = scopeattrs.split('\t', 1) - else: - scope, attrs = (scopeattrs, None) - - # : style: EXECFILE or EXEC - # : encoded_code_or_file: file to execute or code - # : fname: name of function to be executed in the resulting namespace - style, encoded_code_or_file, fnname = custom.split('\t', 3) - int_cmd = InternalRunCustomOperation(seq, thread_id, frame_id, scope, attrs, - style, encoded_code_or_file, fnname) - py_db.post_internal_command(int_cmd, thread_id) - - def cmd_ignore_thrown_exception_at(self, py_db, cmd_id, seq, text): - if text: - replace = 'REPLACE:' # Not all 3.x versions support u'REPLACE:', so, doing workaround. - if text.startswith(replace): - text = text[8:] - py_db.filename_to_lines_where_exceptions_are_ignored.clear() - - if text: - for line in text.split('||'): # Can be bulk-created (one in each line) - original_filename, line_number = line.split('|') - original_filename = self.api.filename_to_server(original_filename) - - canonical_normalized_filename = pydevd_file_utils.canonical_normalized_path(original_filename) - absolute_filename = pydevd_file_utils.absolute_path(original_filename) - - if os.path.exists(absolute_filename): - lines_ignored = py_db.filename_to_lines_where_exceptions_are_ignored.get(canonical_normalized_filename) - if lines_ignored is None: - lines_ignored = py_db.filename_to_lines_where_exceptions_are_ignored[canonical_normalized_filename] = {} - lines_ignored[int(line_number)] = 1 - else: - sys.stderr.write('pydev debugger: warning: trying to ignore exception thrown'\ - ' on file that does not exist: %s (will have no effect)\n' % (absolute_filename,)) - - def cmd_enable_dont_trace(self, py_db, cmd_id, seq, text): - if text: - true_str = 'true' # Not all 3.x versions support u'str', so, doing workaround. - mode = text.strip() == true_str - pydevd_dont_trace.trace_filter(mode) - - def cmd_redirect_output(self, py_db, cmd_id, seq, text): - if text: - py_db.enable_output_redirection('STDOUT' in text, 'STDERR' in text) - - def cmd_get_next_statement_targets(self, py_db, cmd_id, seq, text): - thread_id, frame_id = text.split('\t', 1) - - py_db.post_method_as_internal_command( - thread_id, internal_get_next_statement_targets, seq, thread_id, frame_id) - - def cmd_get_smart_step_into_variants(self, py_db, cmd_id, seq, text): - thread_id, frame_id, start_line, end_line = text.split('\t', 3) - - py_db.post_method_as_internal_command( - thread_id, internal_get_smart_step_into_variants, seq, thread_id, frame_id, start_line, end_line, set_additional_thread_info=set_additional_thread_info) - - def cmd_set_project_roots(self, py_db, cmd_id, seq, text): - self.api.set_project_roots(py_db, text.split(u'\t')) - - def cmd_thread_dump_to_stderr(self, py_db, cmd_id, seq, text): - pydevd_utils.dump_threads() - - def cmd_stop_on_start(self, py_db, cmd_id, seq, text): - if text.strip() in ('True', 'true', '1'): - self.api.stop_on_entry() - - def cmd_pydevd_json_config(self, py_db, cmd_id, seq, text): - # Expected to receive a json string as: - # { - # 'skip_suspend_on_breakpoint_exception': [], - # 'skip_print_breakpoint_exception': [], - # 'multi_threads_single_notification': bool, - # } - msg = json.loads(text.strip()) - if 'skip_suspend_on_breakpoint_exception' in msg: - py_db.skip_suspend_on_breakpoint_exception = tuple( - get_exception_class(x) for x in msg['skip_suspend_on_breakpoint_exception']) - - if 'skip_print_breakpoint_exception' in msg: - py_db.skip_print_breakpoint_exception = tuple( - get_exception_class(x) for x in msg['skip_print_breakpoint_exception']) - - if 'multi_threads_single_notification' in msg: - py_db.multi_threads_single_notification = msg['multi_threads_single_notification'] - - def cmd_get_exception_details(self, py_db, cmd_id, seq, text): - thread_id = text - t = pydevd_find_thread_by_id(thread_id) - frame = None - if t is not None and not getattr(t, 'pydev_do_not_trace', None): - additional_info = set_additional_thread_info(t) - frame = additional_info.get_topmost_frame(t) - try: - # Note: provide the return even if the thread is empty. - return py_db.cmd_factory.make_get_exception_details_message(py_db, seq, thread_id, frame) - finally: - frame = None - t = None - - -process_net_command = _PyDevCommandProcessor().process_net_command - diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/launcher/handlers.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/launcher/handlers.py deleted file mode 100644 index 213a5b9d8dc0e134e64aa9e028606789f5ebf563..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/launcher/handlers.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See LICENSE in the project root -# for license information. - -import os -import sys - -import debugpy -from debugpy import launcher -from debugpy.common import json -from debugpy.launcher import debuggee - - -def launch_request(request): - debug_options = set(request("debugOptions", json.array(str))) - - # Handling of properties that can also be specified as legacy "debugOptions" flags. - # If property is explicitly set to false, but the flag is in "debugOptions", treat - # it as an error. Returns None if the property wasn't explicitly set either way. - def property_or_debug_option(prop_name, flag_name): - assert prop_name[0].islower() and flag_name[0].isupper() - - value = request(prop_name, bool, optional=True) - if value == (): - value = None - - if flag_name in debug_options: - if value is False: - raise request.isnt_valid( - '{0}:false and "debugOptions":[{1}] are mutually exclusive', - json.repr(prop_name), - json.repr(flag_name), - ) - value = True - - return value - - python = request("python", json.array(str, size=(1,))) - cmdline = list(python) - - if not request("noDebug", json.default(False)): - # see https://github.com/microsoft/debugpy/issues/861 - if sys.version_info[:2] >= (3, 11): - cmdline += ["-X", "frozen_modules=off"] - - port = request("port", int) - cmdline += [ - os.path.dirname(debugpy.__file__), - "--connect", - launcher.adapter_host + ":" + str(port), - ] - - if not request("subProcess", True): - cmdline += ["--configure-subProcess", "False"] - - qt_mode = request( - "qt", - json.enum( - "none", "auto", "pyside", "pyside2", "pyqt4", "pyqt5", optional=True - ), - ) - cmdline += ["--configure-qt", qt_mode] - - adapter_access_token = request("adapterAccessToken", str, optional=True) - if adapter_access_token != (): - cmdline += ["--adapter-access-token", adapter_access_token] - - debugpy_args = request("debugpyArgs", json.array(str)) - cmdline += debugpy_args - - # Use the copy of arguments that was propagated via the command line rather than - # "args" in the request itself, to allow for shell expansion. - cmdline += sys.argv[1:] - - process_name = request("processName", sys.executable) - - env = os.environ.copy() - env_changes = request("env", json.object((str, type(None)))) - if sys.platform == "win32": - # Environment variables are case-insensitive on Win32, so we need to normalize - # both dicts to make sure that env vars specified in the debug configuration - # overwrite the global env vars correctly. If debug config has entries that - # differ in case only, that's an error. - env = {k.upper(): v for k, v in os.environ.items()} - new_env_changes = {} - for k, v in env_changes.items(): - k_upper = k.upper() - if k_upper in new_env_changes: - if new_env_changes[k_upper] == v: - continue - else: - raise request.isnt_valid( - 'Found duplicate in "env": {0}.'.format(k_upper) - ) - new_env_changes[k_upper] = v - env_changes = new_env_changes - if "DEBUGPY_TEST" in env: - # If we're running as part of a debugpy test, make sure that codecov is not - # applied to the debuggee, since it will conflict with pydevd. - env.pop("COV_CORE_SOURCE", None) - env.update(env_changes) - env = {k: v for k, v in env.items() if v is not None} - - if request("gevent", False): - env["GEVENT_SUPPORT"] = "True" - - console = request( - "console", - json.enum( - "internalConsole", "integratedTerminal", "externalTerminal", optional=True - ), - ) - - redirect_output = property_or_debug_option("redirectOutput", "RedirectOutput") - if redirect_output is None: - # If neither the property nor the option were specified explicitly, choose - # the default depending on console type - "internalConsole" needs it to - # provide any output at all, but it's unnecessary for the terminals. - redirect_output = console == "internalConsole" - if redirect_output: - # sys.stdout buffering must be disabled - otherwise we won't see the output - # at all until the buffer fills up. - env["PYTHONUNBUFFERED"] = "1" - # Force UTF-8 output to minimize data loss due to re-encoding. - env["PYTHONIOENCODING"] = "utf-8" - - if property_or_debug_option("waitOnNormalExit", "WaitOnNormalExit"): - if console == "internalConsole": - raise request.isnt_valid( - '"waitOnNormalExit" is not supported for "console":"internalConsole"' - ) - debuggee.wait_on_exit_predicates.append(lambda code: code == 0) - if property_or_debug_option("waitOnAbnormalExit", "WaitOnAbnormalExit"): - if console == "internalConsole": - raise request.isnt_valid( - '"waitOnAbnormalExit" is not supported for "console":"internalConsole"' - ) - debuggee.wait_on_exit_predicates.append(lambda code: code != 0) - - debuggee.spawn(process_name, cmdline, env, redirect_output) - return {} - - -def terminate_request(request): - del debuggee.wait_on_exit_predicates[:] - request.respond({}) - debuggee.kill() - - -def disconnect(): - del debuggee.wait_on_exit_predicates[:] - debuggee.kill() diff --git a/spaces/Superlang/ImageProcessor/annotator/leres/pix2pix/models/pix2pix4depth_model.py b/spaces/Superlang/ImageProcessor/annotator/leres/pix2pix/models/pix2pix4depth_model.py deleted file mode 100644 index 89e89652feb96314973a050c5a2477b474630abb..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/leres/pix2pix/models/pix2pix4depth_model.py +++ /dev/null @@ -1,155 +0,0 @@ -import torch -from .base_model import BaseModel -from . import networks - - -class Pix2Pix4DepthModel(BaseModel): - """ This class implements the pix2pix model, for learning a mapping from input images to output images given paired data. - - The model training requires '--dataset_mode aligned' dataset. - By default, it uses a '--netG unet256' U-Net generator, - a '--netD basic' discriminator (PatchGAN), - and a '--gan_mode' vanilla GAN loss (the cross-entropy objective used in the orignal GAN paper). - - pix2pix paper: https://arxiv.org/pdf/1611.07004.pdf - """ - @staticmethod - def modify_commandline_options(parser, is_train=True): - """Add new dataset-specific options, and rewrite default values for existing options. - - Parameters: - parser -- original option parser - is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. - - Returns: - the modified parser. - - For pix2pix, we do not use image buffer - The training objective is: GAN Loss + lambda_L1 * ||G(A)-B||_1 - By default, we use vanilla GAN loss, UNet with batchnorm, and aligned datasets. - """ - # changing the default values to match the pix2pix paper (https://phillipi.github.io/pix2pix/) - parser.set_defaults(input_nc=2,output_nc=1,norm='none', netG='unet_1024', dataset_mode='depthmerge') - if is_train: - parser.set_defaults(pool_size=0, gan_mode='vanilla',) - parser.add_argument('--lambda_L1', type=float, default=1000, help='weight for L1 loss') - return parser - - def __init__(self, opt): - """Initialize the pix2pix class. - - Parameters: - opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions - """ - BaseModel.__init__(self, opt) - # specify the training losses you want to print out. The training/test scripts will call - - self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake'] - # self.loss_names = ['G_L1'] - - # specify the images you want to save/display. The training/test scripts will call - if self.isTrain: - self.visual_names = ['outer','inner', 'fake_B', 'real_B'] - else: - self.visual_names = ['fake_B'] - - # specify the models you want to save to the disk. The training/test scripts will call and - if self.isTrain: - self.model_names = ['G','D'] - else: # during test time, only load G - self.model_names = ['G'] - - # define networks (both generator and discriminator) - self.netG = networks.define_G(opt.input_nc, opt.output_nc, 64, 'unet_1024', 'none', - False, 'normal', 0.02, self.gpu_ids) - - if self.isTrain: # define a discriminator; conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc - self.netD = networks.define_D(opt.input_nc + opt.output_nc, opt.ndf, opt.netD, - opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids) - - if self.isTrain: - # define loss functions - self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) - self.criterionL1 = torch.nn.L1Loss() - # initialize optimizers; schedulers will be automatically created by function . - self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=1e-4, betas=(opt.beta1, 0.999)) - self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=2e-06, betas=(opt.beta1, 0.999)) - self.optimizers.append(self.optimizer_G) - self.optimizers.append(self.optimizer_D) - - def set_input_train(self, input): - self.outer = input['data_outer'].to(self.device) - self.outer = torch.nn.functional.interpolate(self.outer,(1024,1024),mode='bilinear',align_corners=False) - - self.inner = input['data_inner'].to(self.device) - self.inner = torch.nn.functional.interpolate(self.inner,(1024,1024),mode='bilinear',align_corners=False) - - self.image_paths = input['image_path'] - - if self.isTrain: - self.gtfake = input['data_gtfake'].to(self.device) - self.gtfake = torch.nn.functional.interpolate(self.gtfake, (1024, 1024), mode='bilinear', align_corners=False) - self.real_B = self.gtfake - - self.real_A = torch.cat((self.outer, self.inner), 1) - - def set_input(self, outer, inner): - inner = torch.from_numpy(inner).unsqueeze(0).unsqueeze(0) - outer = torch.from_numpy(outer).unsqueeze(0).unsqueeze(0) - - inner = (inner - torch.min(inner))/(torch.max(inner)-torch.min(inner)) - outer = (outer - torch.min(outer))/(torch.max(outer)-torch.min(outer)) - - inner = self.normalize(inner) - outer = self.normalize(outer) - - self.real_A = torch.cat((outer, inner), 1).to(self.device) - - - def normalize(self, input): - input = input * 2 - input = input - 1 - return input - - def forward(self): - """Run forward pass; called by both functions and .""" - self.fake_B = self.netG(self.real_A) # G(A) - - def backward_D(self): - """Calculate GAN loss for the discriminator""" - # Fake; stop backprop to the generator by detaching fake_B - fake_AB = torch.cat((self.real_A, self.fake_B), 1) # we use conditional GANs; we need to feed both input and output to the discriminator - pred_fake = self.netD(fake_AB.detach()) - self.loss_D_fake = self.criterionGAN(pred_fake, False) - # Real - real_AB = torch.cat((self.real_A, self.real_B), 1) - pred_real = self.netD(real_AB) - self.loss_D_real = self.criterionGAN(pred_real, True) - # combine loss and calculate gradients - self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5 - self.loss_D.backward() - - def backward_G(self): - """Calculate GAN and L1 loss for the generator""" - # First, G(A) should fake the discriminator - fake_AB = torch.cat((self.real_A, self.fake_B), 1) - pred_fake = self.netD(fake_AB) - self.loss_G_GAN = self.criterionGAN(pred_fake, True) - # Second, G(A) = B - self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B) * self.opt.lambda_L1 - # combine loss and calculate gradients - self.loss_G = self.loss_G_L1 + self.loss_G_GAN - self.loss_G.backward() - - def optimize_parameters(self): - self.forward() # compute fake images: G(A) - # update D - self.set_requires_grad(self.netD, True) # enable backprop for D - self.optimizer_D.zero_grad() # set D's gradients to zero - self.backward_D() # calculate gradients for D - self.optimizer_D.step() # update D's weights - # update G - self.set_requires_grad(self.netD, False) # D requires no gradients when optimizing G - self.optimizer_G.zero_grad() # set G's gradients to zero - self.backward_G() # calculate graidents for G - self.optimizer_G.step() # udpate G's weights \ No newline at end of file diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/runner/log_buffer.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/runner/log_buffer.py deleted file mode 100644 index d949e2941c5400088c7cd8a1dc893d8b233ae785..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/runner/log_buffer.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from collections import OrderedDict - -import numpy as np - - -class LogBuffer: - - def __init__(self): - self.val_history = OrderedDict() - self.n_history = OrderedDict() - self.output = OrderedDict() - self.ready = False - - def clear(self): - self.val_history.clear() - self.n_history.clear() - self.clear_output() - - def clear_output(self): - self.output.clear() - self.ready = False - - def update(self, vars, count=1): - assert isinstance(vars, dict) - for key, var in vars.items(): - if key not in self.val_history: - self.val_history[key] = [] - self.n_history[key] = [] - self.val_history[key].append(var) - self.n_history[key].append(count) - - def average(self, n=0): - """Average latest n values or all values.""" - assert n >= 0 - for key in self.val_history: - values = np.array(self.val_history[key][-n:]) - nums = np.array(self.n_history[key][-n:]) - avg = np.sum(values * nums) / np.sum(nums) - self.output[key] = avg - self.ready = True diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/utils/parrots_jit.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/utils/parrots_jit.py deleted file mode 100644 index 61873f6dbb9b10ed972c90aa8faa321e3cb3249e..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/utils/parrots_jit.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os - -from .parrots_wrapper import TORCH_VERSION - -parrots_jit_option = os.getenv('PARROTS_JIT_OPTION') - -if TORCH_VERSION == 'parrots' and parrots_jit_option == 'ON': - from parrots.jit import pat as jit -else: - - def jit(func=None, - check_input=None, - full_shape=True, - derivate=False, - coderize=False, - optimize=False): - - def wrapper(func): - - def wrapper_inner(*args, **kargs): - return func(*args, **kargs) - - return wrapper_inner - - if func is None: - return wrapper - else: - return func - - -if TORCH_VERSION == 'parrots': - from parrots.utils.tester import skip_no_elena -else: - - def skip_no_elena(func): - - def wrapper(*args, **kargs): - return func(*args, **kargs) - - return wrapper diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/models/decode_heads/ocr_head.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/models/decode_heads/ocr_head.py deleted file mode 100644 index 715852e94e81dc46623972748285d2d19237a341..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/models/decode_heads/ocr_head.py +++ /dev/null @@ -1,127 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from annotator.uniformer.mmcv.cnn import ConvModule - -from annotator.uniformer.mmseg.ops import resize -from ..builder import HEADS -from ..utils import SelfAttentionBlock as _SelfAttentionBlock -from .cascade_decode_head import BaseCascadeDecodeHead - - -class SpatialGatherModule(nn.Module): - """Aggregate the context features according to the initial predicted - probability distribution. - - Employ the soft-weighted method to aggregate the context. - """ - - def __init__(self, scale): - super(SpatialGatherModule, self).__init__() - self.scale = scale - - def forward(self, feats, probs): - """Forward function.""" - batch_size, num_classes, height, width = probs.size() - channels = feats.size(1) - probs = probs.view(batch_size, num_classes, -1) - feats = feats.view(batch_size, channels, -1) - # [batch_size, height*width, num_classes] - feats = feats.permute(0, 2, 1) - # [batch_size, channels, height*width] - probs = F.softmax(self.scale * probs, dim=2) - # [batch_size, channels, num_classes] - ocr_context = torch.matmul(probs, feats) - ocr_context = ocr_context.permute(0, 2, 1).contiguous().unsqueeze(3) - return ocr_context - - -class ObjectAttentionBlock(_SelfAttentionBlock): - """Make a OCR used SelfAttentionBlock.""" - - def __init__(self, in_channels, channels, scale, conv_cfg, norm_cfg, - act_cfg): - if scale > 1: - query_downsample = nn.MaxPool2d(kernel_size=scale) - else: - query_downsample = None - super(ObjectAttentionBlock, self).__init__( - key_in_channels=in_channels, - query_in_channels=in_channels, - channels=channels, - out_channels=in_channels, - share_key_query=False, - query_downsample=query_downsample, - key_downsample=None, - key_query_num_convs=2, - key_query_norm=True, - value_out_num_convs=1, - value_out_norm=True, - matmul_norm=True, - with_out=True, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - self.bottleneck = ConvModule( - in_channels * 2, - in_channels, - 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - def forward(self, query_feats, key_feats): - """Forward function.""" - context = super(ObjectAttentionBlock, - self).forward(query_feats, key_feats) - output = self.bottleneck(torch.cat([context, query_feats], dim=1)) - if self.query_downsample is not None: - output = resize(query_feats) - - return output - - -@HEADS.register_module() -class OCRHead(BaseCascadeDecodeHead): - """Object-Contextual Representations for Semantic Segmentation. - - This head is the implementation of `OCRNet - `_. - - Args: - ocr_channels (int): The intermediate channels of OCR block. - scale (int): The scale of probability map in SpatialGatherModule in - Default: 1. - """ - - def __init__(self, ocr_channels, scale=1, **kwargs): - super(OCRHead, self).__init__(**kwargs) - self.ocr_channels = ocr_channels - self.scale = scale - self.object_context_block = ObjectAttentionBlock( - self.channels, - self.ocr_channels, - self.scale, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - self.spatial_gather_module = SpatialGatherModule(self.scale) - - self.bottleneck = ConvModule( - self.in_channels, - self.channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - def forward(self, inputs, prev_output): - """Forward function.""" - x = self._transform_inputs(inputs) - feats = self.bottleneck(x) - context = self.spatial_gather_module(feats, prev_output) - object_context = self.object_context_block(feats, context) - output = self.cls_seg(object_context) - - return output diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/cli/autocompletion.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/cli/autocompletion.py deleted file mode 100644 index 226fe84dc0d0c4eb78f9b3c603df20cef0fdfda4..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/cli/autocompletion.py +++ /dev/null @@ -1,171 +0,0 @@ -"""Logic that powers autocompletion installed by ``pip completion``. -""" - -import optparse -import os -import sys -from itertools import chain -from typing import Any, Iterable, List, Optional - -from pip._internal.cli.main_parser import create_main_parser -from pip._internal.commands import commands_dict, create_command -from pip._internal.metadata import get_default_environment - - -def autocomplete() -> None: - """Entry Point for completion of main and subcommand options.""" - # Don't complete if user hasn't sourced bash_completion file. - if "PIP_AUTO_COMPLETE" not in os.environ: - return - cwords = os.environ["COMP_WORDS"].split()[1:] - cword = int(os.environ["COMP_CWORD"]) - try: - current = cwords[cword - 1] - except IndexError: - current = "" - - parser = create_main_parser() - subcommands = list(commands_dict) - options = [] - - # subcommand - subcommand_name: Optional[str] = None - for word in cwords: - if word in subcommands: - subcommand_name = word - break - # subcommand options - if subcommand_name is not None: - # special case: 'help' subcommand has no options - if subcommand_name == "help": - sys.exit(1) - # special case: list locally installed dists for show and uninstall - should_list_installed = not current.startswith("-") and subcommand_name in [ - "show", - "uninstall", - ] - if should_list_installed: - env = get_default_environment() - lc = current.lower() - installed = [ - dist.canonical_name - for dist in env.iter_installed_distributions(local_only=True) - if dist.canonical_name.startswith(lc) - and dist.canonical_name not in cwords[1:] - ] - # if there are no dists installed, fall back to option completion - if installed: - for dist in installed: - print(dist) - sys.exit(1) - - should_list_installables = ( - not current.startswith("-") and subcommand_name == "install" - ) - if should_list_installables: - for path in auto_complete_paths(current, "path"): - print(path) - sys.exit(1) - - subcommand = create_command(subcommand_name) - - for opt in subcommand.parser.option_list_all: - if opt.help != optparse.SUPPRESS_HELP: - for opt_str in opt._long_opts + opt._short_opts: - options.append((opt_str, opt.nargs)) - - # filter out previously specified options from available options - prev_opts = [x.split("=")[0] for x in cwords[1 : cword - 1]] - options = [(x, v) for (x, v) in options if x not in prev_opts] - # filter options by current input - options = [(k, v) for k, v in options if k.startswith(current)] - # get completion type given cwords and available subcommand options - completion_type = get_path_completion_type( - cwords, - cword, - subcommand.parser.option_list_all, - ) - # get completion files and directories if ``completion_type`` is - # ````, ```` or ```` - if completion_type: - paths = auto_complete_paths(current, completion_type) - options = [(path, 0) for path in paths] - for option in options: - opt_label = option[0] - # append '=' to options which require args - if option[1] and option[0][:2] == "--": - opt_label += "=" - print(opt_label) - else: - # show main parser options only when necessary - - opts = [i.option_list for i in parser.option_groups] - opts.append(parser.option_list) - flattened_opts = chain.from_iterable(opts) - if current.startswith("-"): - for opt in flattened_opts: - if opt.help != optparse.SUPPRESS_HELP: - subcommands += opt._long_opts + opt._short_opts - else: - # get completion type given cwords and all available options - completion_type = get_path_completion_type(cwords, cword, flattened_opts) - if completion_type: - subcommands = list(auto_complete_paths(current, completion_type)) - - print(" ".join([x for x in subcommands if x.startswith(current)])) - sys.exit(1) - - -def get_path_completion_type( - cwords: List[str], cword: int, opts: Iterable[Any] -) -> Optional[str]: - """Get the type of path completion (``file``, ``dir``, ``path`` or None) - - :param cwords: same as the environmental variable ``COMP_WORDS`` - :param cword: same as the environmental variable ``COMP_CWORD`` - :param opts: The available options to check - :return: path completion type (``file``, ``dir``, ``path`` or None) - """ - if cword < 2 or not cwords[cword - 2].startswith("-"): - return None - for opt in opts: - if opt.help == optparse.SUPPRESS_HELP: - continue - for o in str(opt).split("/"): - if cwords[cword - 2].split("=")[0] == o: - if not opt.metavar or any( - x in ("path", "file", "dir") for x in opt.metavar.split("/") - ): - return opt.metavar - return None - - -def auto_complete_paths(current: str, completion_type: str) -> Iterable[str]: - """If ``completion_type`` is ``file`` or ``path``, list all regular files - and directories starting with ``current``; otherwise only list directories - starting with ``current``. - - :param current: The word to be completed - :param completion_type: path completion type(``file``, ``path`` or ``dir``) - :return: A generator of regular files and/or directories - """ - directory, filename = os.path.split(current) - current_path = os.path.abspath(directory) - # Don't complete paths if they can't be accessed - if not os.access(current_path, os.R_OK): - return - filename = os.path.normcase(filename) - # list all files that start with ``filename`` - file_list = ( - x for x in os.listdir(current_path) if os.path.normcase(x).startswith(filename) - ) - for f in file_list: - opt = os.path.join(current_path, f) - comp_file = os.path.normcase(os.path.join(directory, f)) - # complete regular files when there is not ```` after option - # complete directories when there is ````, ```` or - # ````after option - if completion_type != "dir" and os.path.isfile(opt): - yield comp_file - elif os.path.isdir(opt): - yield os.path.join(comp_file, "") diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/vcs/subversion.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/vcs/subversion.py deleted file mode 100644 index 16d93a67b7b6feed66f2cc432f6250ca3ad34914..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/vcs/subversion.py +++ /dev/null @@ -1,324 +0,0 @@ -import logging -import os -import re -from typing import List, Optional, Tuple - -from pip._internal.utils.misc import ( - HiddenText, - display_path, - is_console_interactive, - is_installable_dir, - split_auth_from_netloc, -) -from pip._internal.utils.subprocess import CommandArgs, make_command -from pip._internal.vcs.versioncontrol import ( - AuthInfo, - RemoteNotFoundError, - RevOptions, - VersionControl, - vcs, -) - -logger = logging.getLogger(__name__) - -_svn_xml_url_re = re.compile('url="([^"]+)"') -_svn_rev_re = re.compile(r'committed-rev="(\d+)"') -_svn_info_xml_rev_re = re.compile(r'\s*revision="(\d+)"') -_svn_info_xml_url_re = re.compile(r"(.*)") - - -class Subversion(VersionControl): - name = "svn" - dirname = ".svn" - repo_name = "checkout" - schemes = ("svn+ssh", "svn+http", "svn+https", "svn+svn", "svn+file") - - @classmethod - def should_add_vcs_url_prefix(cls, remote_url: str) -> bool: - return True - - @staticmethod - def get_base_rev_args(rev: str) -> List[str]: - return ["-r", rev] - - @classmethod - def get_revision(cls, location: str) -> str: - """ - Return the maximum revision for all files under a given location - """ - # Note: taken from setuptools.command.egg_info - revision = 0 - - for base, dirs, _ in os.walk(location): - if cls.dirname not in dirs: - dirs[:] = [] - continue # no sense walking uncontrolled subdirs - dirs.remove(cls.dirname) - entries_fn = os.path.join(base, cls.dirname, "entries") - if not os.path.exists(entries_fn): - # FIXME: should we warn? - continue - - dirurl, localrev = cls._get_svn_url_rev(base) - - if base == location: - assert dirurl is not None - base = dirurl + "/" # save the root url - elif not dirurl or not dirurl.startswith(base): - dirs[:] = [] - continue # not part of the same svn tree, skip it - revision = max(revision, localrev) - return str(revision) - - @classmethod - def get_netloc_and_auth( - cls, netloc: str, scheme: str - ) -> Tuple[str, Tuple[Optional[str], Optional[str]]]: - """ - This override allows the auth information to be passed to svn via the - --username and --password options instead of via the URL. - """ - if scheme == "ssh": - # The --username and --password options can't be used for - # svn+ssh URLs, so keep the auth information in the URL. - return super().get_netloc_and_auth(netloc, scheme) - - return split_auth_from_netloc(netloc) - - @classmethod - def get_url_rev_and_auth(cls, url: str) -> Tuple[str, Optional[str], AuthInfo]: - # hotfix the URL scheme after removing svn+ from svn+ssh:// re-add it - url, rev, user_pass = super().get_url_rev_and_auth(url) - if url.startswith("ssh://"): - url = "svn+" + url - return url, rev, user_pass - - @staticmethod - def make_rev_args( - username: Optional[str], password: Optional[HiddenText] - ) -> CommandArgs: - extra_args: CommandArgs = [] - if username: - extra_args += ["--username", username] - if password: - extra_args += ["--password", password] - - return extra_args - - @classmethod - def get_remote_url(cls, location: str) -> str: - # In cases where the source is in a subdirectory, we have to look up in - # the location until we find a valid project root. - orig_location = location - while not is_installable_dir(location): - last_location = location - location = os.path.dirname(location) - if location == last_location: - # We've traversed up to the root of the filesystem without - # finding a Python project. - logger.warning( - "Could not find Python project for directory %s (tried all " - "parent directories)", - orig_location, - ) - raise RemoteNotFoundError - - url, _rev = cls._get_svn_url_rev(location) - if url is None: - raise RemoteNotFoundError - - return url - - @classmethod - def _get_svn_url_rev(cls, location: str) -> Tuple[Optional[str], int]: - from pip._internal.exceptions import InstallationError - - entries_path = os.path.join(location, cls.dirname, "entries") - if os.path.exists(entries_path): - with open(entries_path) as f: - data = f.read() - else: # subversion >= 1.7 does not have the 'entries' file - data = "" - - url = None - if data.startswith("8") or data.startswith("9") or data.startswith("10"): - entries = list(map(str.splitlines, data.split("\n\x0c\n"))) - del entries[0][0] # get rid of the '8' - url = entries[0][3] - revs = [int(d[9]) for d in entries if len(d) > 9 and d[9]] + [0] - elif data.startswith("= 1.7 - # Note that using get_remote_call_options is not necessary here - # because `svn info` is being run against a local directory. - # We don't need to worry about making sure interactive mode - # is being used to prompt for passwords, because passwords - # are only potentially needed for remote server requests. - xml = cls.run_command( - ["info", "--xml", location], - show_stdout=False, - stdout_only=True, - ) - match = _svn_info_xml_url_re.search(xml) - assert match is not None - url = match.group(1) - revs = [int(m.group(1)) for m in _svn_info_xml_rev_re.finditer(xml)] - except InstallationError: - url, revs = None, [] - - if revs: - rev = max(revs) - else: - rev = 0 - - return url, rev - - @classmethod - def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool: - """Always assume the versions don't match""" - return False - - def __init__(self, use_interactive: Optional[bool] = None) -> None: - if use_interactive is None: - use_interactive = is_console_interactive() - self.use_interactive = use_interactive - - # This member is used to cache the fetched version of the current - # ``svn`` client. - # Special value definitions: - # None: Not evaluated yet. - # Empty tuple: Could not parse version. - self._vcs_version: Optional[Tuple[int, ...]] = None - - super().__init__() - - def call_vcs_version(self) -> Tuple[int, ...]: - """Query the version of the currently installed Subversion client. - - :return: A tuple containing the parts of the version information or - ``()`` if the version returned from ``svn`` could not be parsed. - :raises: BadCommand: If ``svn`` is not installed. - """ - # Example versions: - # svn, version 1.10.3 (r1842928) - # compiled Feb 25 2019, 14:20:39 on x86_64-apple-darwin17.0.0 - # svn, version 1.7.14 (r1542130) - # compiled Mar 28 2018, 08:49:13 on x86_64-pc-linux-gnu - # svn, version 1.12.0-SlikSvn (SlikSvn/1.12.0) - # compiled May 28 2019, 13:44:56 on x86_64-microsoft-windows6.2 - version_prefix = "svn, version " - version = self.run_command(["--version"], show_stdout=False, stdout_only=True) - if not version.startswith(version_prefix): - return () - - version = version[len(version_prefix) :].split()[0] - version_list = version.partition("-")[0].split(".") - try: - parsed_version = tuple(map(int, version_list)) - except ValueError: - return () - - return parsed_version - - def get_vcs_version(self) -> Tuple[int, ...]: - """Return the version of the currently installed Subversion client. - - If the version of the Subversion client has already been queried, - a cached value will be used. - - :return: A tuple containing the parts of the version information or - ``()`` if the version returned from ``svn`` could not be parsed. - :raises: BadCommand: If ``svn`` is not installed. - """ - if self._vcs_version is not None: - # Use cached version, if available. - # If parsing the version failed previously (empty tuple), - # do not attempt to parse it again. - return self._vcs_version - - vcs_version = self.call_vcs_version() - self._vcs_version = vcs_version - return vcs_version - - def get_remote_call_options(self) -> CommandArgs: - """Return options to be used on calls to Subversion that contact the server. - - These options are applicable for the following ``svn`` subcommands used - in this class. - - - checkout - - switch - - update - - :return: A list of command line arguments to pass to ``svn``. - """ - if not self.use_interactive: - # --non-interactive switch is available since Subversion 0.14.4. - # Subversion < 1.8 runs in interactive mode by default. - return ["--non-interactive"] - - svn_version = self.get_vcs_version() - # By default, Subversion >= 1.8 runs in non-interactive mode if - # stdin is not a TTY. Since that is how pip invokes SVN, in - # call_subprocess(), pip must pass --force-interactive to ensure - # the user can be prompted for a password, if required. - # SVN added the --force-interactive option in SVN 1.8. Since - # e.g. RHEL/CentOS 7, which is supported until 2024, ships with - # SVN 1.7, pip should continue to support SVN 1.7. Therefore, pip - # can't safely add the option if the SVN version is < 1.8 (or unknown). - if svn_version >= (1, 8): - return ["--force-interactive"] - - return [] - - def fetch_new( - self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int - ) -> None: - rev_display = rev_options.to_display() - logger.info( - "Checking out %s%s to %s", - url, - rev_display, - display_path(dest), - ) - if verbosity <= 0: - flag = "--quiet" - else: - flag = "" - cmd_args = make_command( - "checkout", - flag, - self.get_remote_call_options(), - rev_options.to_args(), - url, - dest, - ) - self.run_command(cmd_args) - - def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None: - cmd_args = make_command( - "switch", - self.get_remote_call_options(), - rev_options.to_args(), - url, - dest, - ) - self.run_command(cmd_args) - - def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None: - cmd_args = make_command( - "update", - self.get_remote_call_options(), - rev_options.to_args(), - dest, - ) - self.run_command(cmd_args) - - -vcs.register(Subversion) diff --git a/spaces/TempoFunk/makeavid-sd-jax/makeavid_sd/flax_impl/flax_unet_pseudo3d_condition.py b/spaces/TempoFunk/makeavid-sd-jax/makeavid_sd/flax_impl/flax_unet_pseudo3d_condition.py deleted file mode 100644 index 1f3ec35a4507d88834cd6f8be0d9a623502a6025..0000000000000000000000000000000000000000 --- a/spaces/TempoFunk/makeavid-sd-jax/makeavid_sd/flax_impl/flax_unet_pseudo3d_condition.py +++ /dev/null @@ -1,251 +0,0 @@ - -from typing import Tuple, Union - -import jax -import jax.numpy as jnp -import flax.linen as nn -from flax.core.frozen_dict import FrozenDict - -from diffusers.configuration_utils import ConfigMixin, flax_register_to_config -from diffusers.models.modeling_flax_utils import FlaxModelMixin -from diffusers.utils import BaseOutput - -from .flax_unet_pseudo3d_blocks import ( - CrossAttnDownBlockPseudo3D, - CrossAttnUpBlockPseudo3D, - DownBlockPseudo3D, - UpBlockPseudo3D, - UNetMidBlockPseudo3DCrossAttn -) -#from flax_embeddings import ( -# TimestepEmbedding, -# Timesteps -#) -from diffusers.models.embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps -from .flax_resnet_pseudo3d import ConvPseudo3D - - -class UNetPseudo3DConditionOutput(BaseOutput): - sample: jax.Array - - -@flax_register_to_config -class UNetPseudo3DConditionModel(nn.Module, FlaxModelMixin, ConfigMixin): - sample_size: Union[int, Tuple[int, int]] = (64, 64) - in_channels: int = 4 - out_channels: int = 4 - down_block_types: Tuple[str] = ( - "CrossAttnDownBlockPseudo3D", - "CrossAttnDownBlockPseudo3D", - "CrossAttnDownBlockPseudo3D", - "DownBlockPseudo3D" - ) - up_block_types: Tuple[str] = ( - "UpBlockPseudo3D", - "CrossAttnUpBlockPseudo3D", - "CrossAttnUpBlockPseudo3D", - "CrossAttnUpBlockPseudo3D" - ) - block_out_channels: Tuple[int] = ( - 320, - 640, - 1280, - 1280 - ) - layers_per_block: int = 2 - attention_head_dim: Union[int, Tuple[int]] = 8 - cross_attention_dim: int = 768 - flip_sin_to_cos: bool = True - freq_shift: int = 0 - use_memory_efficient_attention: bool = False - dtype: jnp.dtype = jnp.float32 - param_dtype: str = 'float32' - - def init_weights(self, rng: jax.random.KeyArray) -> FrozenDict: - if self.param_dtype == 'bfloat16': - param_dtype = jnp.bfloat16 - elif self.param_dtype == 'float16': - param_dtype = jnp.float16 - elif self.param_dtype == 'float32': - param_dtype = jnp.float32 - else: - raise ValueError(f'unknown parameter type: {self.param_dtype}') - sample_size = self.sample_size - if isinstance(sample_size, int): - sample_size = (sample_size, sample_size) - sample_shape = (1, self.in_channels, 1, *sample_size) - sample = jnp.zeros(sample_shape, dtype = param_dtype) - timesteps = jnp.ones((1, ), dtype = jnp.int32) - encoder_hidden_states = jnp.zeros((1, 1, self.cross_attention_dim), dtype = param_dtype) - params_rng, dropout_rng = jax.random.split(rng) - rngs = { "params": params_rng, "dropout": dropout_rng } - return self.init(rngs, sample, timesteps, encoder_hidden_states)["params"] - - def setup(self) -> None: - if isinstance(self.attention_head_dim, int): - attention_head_dim = (self.attention_head_dim, ) * len(self.down_block_types) - else: - attention_head_dim = self.attention_head_dim - time_embed_dim = self.block_out_channels[0] * 4 - self.conv_in = ConvPseudo3D( - features = self.block_out_channels[0], - kernel_size = (3, 3), - strides = (1, 1), - padding = ((1, 1), (1, 1)), - dtype = self.dtype - ) - self.time_proj = FlaxTimesteps( - dim = self.block_out_channels[0], - flip_sin_to_cos = self.flip_sin_to_cos, - freq_shift = self.freq_shift - ) - self.time_embedding = FlaxTimestepEmbedding( - time_embed_dim = time_embed_dim, - dtype = self.dtype - ) - down_blocks = [] - output_channels = self.block_out_channels[0] - for i, down_block_type in enumerate(self.down_block_types): - input_channels = output_channels - output_channels = self.block_out_channels[i] - is_final_block = i == len(self.block_out_channels) - 1 - # allows loading 3d models with old layer type names in their configs - # eg. 2D instead of Pseudo3D, like lxj's timelapse model - if down_block_type in ['CrossAttnDownBlockPseudo3D', 'CrossAttnDownBlock2D']: - down_block = CrossAttnDownBlockPseudo3D( - in_channels = input_channels, - out_channels = output_channels, - num_layers = self.layers_per_block, - attn_num_head_channels = attention_head_dim[i], - add_downsample = not is_final_block, - use_memory_efficient_attention = self.use_memory_efficient_attention, - dtype = self.dtype - ) - elif down_block_type in ['DownBlockPseudo3D', 'DownBlock2D']: - down_block = DownBlockPseudo3D( - in_channels = input_channels, - out_channels = output_channels, - num_layers = self.layers_per_block, - add_downsample = not is_final_block, - dtype = self.dtype - ) - else: - raise NotImplementedError(f'Unimplemented down block type: {down_block_type}') - down_blocks.append(down_block) - self.down_blocks = down_blocks - self.mid_block = UNetMidBlockPseudo3DCrossAttn( - in_channels = self.block_out_channels[-1], - attn_num_head_channels = attention_head_dim[-1], - use_memory_efficient_attention = self.use_memory_efficient_attention, - dtype = self.dtype - ) - up_blocks = [] - reversed_block_out_channels = list(reversed(self.block_out_channels)) - reversed_attention_head_dim = list(reversed(attention_head_dim)) - output_channels = reversed_block_out_channels[0] - for i, up_block_type in enumerate(self.up_block_types): - prev_output_channels = output_channels - output_channels = reversed_block_out_channels[i] - input_channels = reversed_block_out_channels[min(i + 1, len(self.block_out_channels) - 1)] - is_final_block = i == len(self.block_out_channels) - 1 - if up_block_type in ['CrossAttnUpBlockPseudo3D', 'CrossAttnUpBlock2D']: - up_block = CrossAttnUpBlockPseudo3D( - in_channels = input_channels, - out_channels = output_channels, - prev_output_channels = prev_output_channels, - num_layers = self.layers_per_block + 1, - attn_num_head_channels = reversed_attention_head_dim[i], - add_upsample = not is_final_block, - use_memory_efficient_attention = self.use_memory_efficient_attention, - dtype = self.dtype - ) - elif up_block_type in ['UpBlockPseudo3D', 'UpBlock2D']: - up_block = UpBlockPseudo3D( - in_channels = input_channels, - out_channels = output_channels, - prev_output_channels = prev_output_channels, - num_layers = self.layers_per_block + 1, - add_upsample = not is_final_block, - dtype = self.dtype - ) - else: - raise NotImplementedError(f'Unimplemented up block type: {up_block_type}') - up_blocks.append(up_block) - self.up_blocks = up_blocks - self.conv_norm_out = nn.GroupNorm( - num_groups = 32, - epsilon = 1e-5 - ) - self.conv_out = ConvPseudo3D( - features = self.out_channels, - kernel_size = (3, 3), - strides = (1, 1), - padding = ((1, 1), (1, 1)), - dtype = self.dtype - ) - - def __call__(self, - sample: jax.Array, - timesteps: jax.Array, - encoder_hidden_states: jax.Array, - return_dict: bool = True - ) -> Union[UNetPseudo3DConditionOutput, Tuple[jax.Array]]: - if timesteps.dtype != jnp.float32: - timesteps = timesteps.astype(dtype = jnp.float32) - if len(timesteps.shape) == 0: - timesteps = jnp.expand_dims(timesteps, 0) - # b,c,f,h,w -> b,f,h,w,c - sample = sample.transpose((0, 2, 3, 4, 1)) - - t_emb = self.time_proj(timesteps) - t_emb = self.time_embedding(t_emb) - sample = self.conv_in(sample) - down_block_res_samples = (sample, ) - for down_block in self.down_blocks: - if isinstance(down_block, CrossAttnDownBlockPseudo3D): - sample, res_samples = down_block( - hidden_states = sample, - temb = t_emb, - encoder_hidden_states = encoder_hidden_states - ) - elif isinstance(down_block, DownBlockPseudo3D): - sample, res_samples = down_block( - hidden_states = sample, - temb = t_emb - ) - else: - raise NotImplementedError(f'Unimplemented down block type: {down_block.__class__.__name__}') - down_block_res_samples += res_samples - sample = self.mid_block( - hidden_states = sample, - temb = t_emb, - encoder_hidden_states = encoder_hidden_states - ) - for up_block in self.up_blocks: - res_samples = down_block_res_samples[-(self.layers_per_block + 1):] - down_block_res_samples = down_block_res_samples[:-(self.layers_per_block + 1)] - if isinstance(up_block, CrossAttnUpBlockPseudo3D): - sample = up_block( - hidden_states = sample, - temb = t_emb, - encoder_hidden_states = encoder_hidden_states, - res_hidden_states_tuple = res_samples - ) - elif isinstance(up_block, UpBlockPseudo3D): - sample = up_block( - hidden_states = sample, - temb = t_emb, - res_hidden_states_tuple = res_samples - ) - else: - raise NotImplementedError(f'Unimplemented up block type: {up_block.__class__.__name__}') - sample = self.conv_norm_out(sample) - sample = nn.silu(sample) - sample = self.conv_out(sample) - - # b,f,h,w,c -> b,c,f,h,w - sample = sample.transpose((0, 4, 1, 2, 3)) - if not return_dict: - return (sample, ) - return UNetPseudo3DConditionOutput(sample = sample) - diff --git a/spaces/ThirdEyeData/Component_Repair_Time_Prediction/app.py b/spaces/ThirdEyeData/Component_Repair_Time_Prediction/app.py deleted file mode 100644 index 5004bf96f95e1d594a5234652af774b5cc06a7b7..0000000000000000000000000000000000000000 --- a/spaces/ThirdEyeData/Component_Repair_Time_Prediction/app.py +++ /dev/null @@ -1,157 +0,0 @@ -import tensorflow as tf -from tensorflow import keras -import numpy as np -import matplotlib.pyplot as plt -import pandas as pd -from sklearn.model_selection import train_test_split -from sklearn import preprocessing -import seaborn as sns -from sklearn.preprocessing import LabelEncoder -import pickle -import streamlit as st - -st.title('Repair Time Prediction') -#DLoading the ataset -#df = pd.read_csv('repair_time_sample_50k_modified2.csv') - -#new_data = df -#df.drop(['SRU serial number','Date of Manufacture', 'Snag Description'], axis = 1, inplace=True) - - -# DATA from user -def user_report(): - Aircraft_Type = st.sidebar.selectbox('Aircraft Type',("AH-64","UH-60","UH-63","UH-62","UH-61","AH-65")) - if Aircraft_Type=="AH-64": - Aircraft_Type=0 - elif Aircraft_Type=="UH-60": - Aircraft_Type=2 - elif Aircraft_Type=="UH-63": - Aircraft_Type=5 - elif Aircraft_Type=="UH-62": - Aircraft_Type=4 - elif Aircraft_Type=="UH-61": - Aircraft_Type=3 - else: - Aircraft_Type=1 - manufacturer = st.sidebar.selectbox("Manufacturer", - ("JKL Company", "GHI Company","AGS Company","ABC Company","XYZ Company" )) - if manufacturer=='JKL Company': - manufacturer=3 - elif manufacturer=="GHI Company": - manufacturer=2 - elif manufacturer=="AGS Company": - manufacturer=1 - elif manufacturer=="ABC Company": - manufacturer =0 - else: - manufacturer=4 - component_age = st.sidebar.slider('Component Age (in hours)', 500,2000, 600 ) - Issue_category= st.sidebar.selectbox("Issue Category", - ("Display", "Unservicable","Bootup Problem","Engine Failure","Electrical Fault" )) - if Issue_category=='Display': - Issue_category=1 - elif Issue_category=="Unservicable": - Issue_category=4 - elif Issue_category=="Bootup Problem": - Issue_category=0 - elif Issue_category=="Engine Failure": - Issue_category=3 - else: - Issue_category=2 - Snag_Severity = st.sidebar.selectbox("Snag Severity", - ("Low", "Medium","High" )) - if Snag_Severity =='Low': - Snag_Severity=1 - elif Snag_Severity=="Medium": - Snag_Severity =2 - else: - Snag_Severity=0 - Customer= st.sidebar.selectbox("Customer", - ("IAF", "ARMY","NAVY" )) - if Customer =='IAF': - Customer=1 - elif Customer=="ARMY": - Customer =0 - else: - Customer=2 - Technician_Skill_level= st.sidebar.selectbox("Technician Skill level", - ("Expert", "Intermediate","Novice" )) - if Technician_Skill_level =='Expert': - Technician_Skill_level=0 - elif Technician_Skill_level=="Intermediate": - Technician_Skill_level =1 - else: - Technician_Skill_level=2 - prior_maintainence = st.sidebar.selectbox('Prior Maintainence',("Regular","Irregular")) - if prior_maintainence =='Regular': - prior_maintainence=1 - else: - prior_maintainence=0 - Logistics_Time = st.sidebar.slider('Logistics Time (hr)', 2,21, 5 ) - total_operating_hours = st.sidebar.slider('Total Operating Hours)', 50,2000, 500 ) - operating_temperature = st.sidebar.slider('Operating Temperature', 10,25, 15 ) - previous_number_of_repairs = st.sidebar.number_input('Enter the Previous Number of Repairs Undergone 0 to 3 )',min_value=0,max_value=3,step=1) - Power_Input_Voltage= st.sidebar.slider('Power Input Voltage (V)',100,133,115) - - - user_report_data = { - 'Aircraft Type':Aircraft_Type, - 'Manufacturer':manufacturer, - 'Component_Age':component_age, - 'Issue_category':Issue_category, - 'Snag Severity': Snag_Severity, - 'Customer':Customer, - 'Technician Skill level':Technician_Skill_level, - 'Prior Maintenance': prior_maintainence, - 'Logistics Time (hr)':Logistics_Time, - 'total_operating_hours':total_operating_hours, - 'operating_temperature':operating_temperature, - 'previous_number_of_repairs':previous_number_of_repairs, - 'Power_Input_Voltage':Power_Input_Voltage - - } - report_data = pd.DataFrame(user_report_data, index=[0]) - return report_data - -#Customer Data -user_data = user_report() -st.header("Component Details") -st.write(user_data) - -def preprocess_dataset(X): - x = X.values #returns a numpy array - min_max_scaler = preprocessing.MinMaxScaler() - x_scaled = min_max_scaler.fit_transform(x) - X_df = pd.DataFrame(x_scaled) - return X_df - -def label_encoding(data): - le = LabelEncoder() - cat = data.select_dtypes(include='O').keys() - categ = list(cat) - data[categ] = data[categ].apply(le.fit_transform) - # X = data.loc[:,data.columns!= "Time required for repair (in hours)"] - # y = data['Time required for repair (in hours)'] - # return X,y - return data - -def prediction(df): - #X = df.loc[:,df.columns!= "Time required for repair (in hours)"] - #y = df['Time required for repair (in hours)'] - #X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) - #print(X_train.shape) - #print(X_test.shape) - #X_test_encoded = label_encoding(df) - #X_test_df = preprocess_dataset(df) - x_model = pickle.load(open('repair_time_model.pkl','rb')) - pred = x_model.predict(df) - #X_test['Actual_time_to_repair'] = y_test - #X_test['Predicted_time_to_repair'] = pred - #X_test.to_csv(r'/content/drive/MyDrive/Colab Notebooks/HAL/repair_time_prediction_results.csv') - #print(X_test.head()) - return pred - -y_pred = prediction(user_data) - -if st.button("Predict"): - st.subheader(f"Time required to Repair the Component is {y_pred[0]} hours") \ No newline at end of file diff --git a/spaces/Timjo88/toy-board-game-QA/app.py b/spaces/Timjo88/toy-board-game-QA/app.py deleted file mode 100644 index 384717d07564e8cfb55ba39ea63c2bfb60274ad7..0000000000000000000000000000000000000000 --- a/spaces/Timjo88/toy-board-game-QA/app.py +++ /dev/null @@ -1,16 +0,0 @@ -import gradio as gr -from transformers import pipeline -title = 'Toy Question Answering App v0.1' -context = "Rock Paper scissors can be played between two or more people. To begin, choose your opponent and stand or sit across from them.Both players close one hand into a fist and shake it while counting down. While counting down, players say, 'Rock, Paper, Scissors, Shoot' or 'Three, Two, One, Shoot!' on Shoot, everyone makes one of the three different hand signals/To make a rock, close your hand into a fist. To make paper, hold your hand out flat, with your palm facing downward.To make scissors, hold out your first two fingers in a V-shape. In Rock Paper Scissors, rock beats scissors by crushing them. Scissors beat paper by cutting the paper in two. paper beats rock by covering the paper. Players can play one game, best out of three games or as many games as they want!" -question1 = "How many players are there?" -question2 = "Does Rock beat paper?" -question3 = "Do scissors beat paper?" - -model_name = "deepset/roberta-base-squad2" - -question_answerer = pipeline("question-answering", model=model_name, tokenizer=model_name) - -interface = gr.Interface.from_pipeline(question_answerer, - title = title, - theme = "peach", - examples = [[context, question1],[context, question2],[context, question3]]).launch() \ No newline at end of file diff --git a/spaces/Tune-A-Video-library/Tune-A-Video-Training-UI/utils.py b/spaces/Tune-A-Video-library/Tune-A-Video-Training-UI/utils.py deleted file mode 100644 index 75d597b779e0d3cac6cb4adc8ee77cffb95d3981..0000000000000000000000000000000000000000 --- a/spaces/Tune-A-Video-library/Tune-A-Video-Training-UI/utils.py +++ /dev/null @@ -1,62 +0,0 @@ -from __future__ import annotations - -import pathlib - - -def find_exp_dirs() -> list[str]: - repo_dir = pathlib.Path(__file__).parent - exp_root_dir = repo_dir / "experiments" - if not exp_root_dir.exists(): - return [] - exp_dirs = sorted(exp_root_dir.glob("*")) - exp_dirs = [exp_dir for exp_dir in exp_dirs if (exp_dir / "model_index.json").exists()] - return [path.relative_to(repo_dir).as_posix() for path in exp_dirs] - - -def save_model_card( - save_dir: pathlib.Path, - base_model: str, - training_prompt: str, - test_prompt: str = "", - test_image_dir: str = "", -) -> None: - image_str = "" - if test_prompt and test_image_dir: - image_paths = sorted((save_dir / test_image_dir).glob("*.gif")) - if image_paths: - image_path = image_paths[-1] - rel_path = image_path.relative_to(save_dir) - image_str = f"""## Samples -Test prompt: {test_prompt} - -![{image_path.stem}]({rel_path})""" - - model_card = f"""--- -license: creativeml-openrail-m -base_model: {base_model} -training_prompt: {training_prompt} -tags: -- stable-diffusion -- stable-diffusion-diffusers -- text-to-image -- diffusers -- text-to-video -- tune-a-video -inference: false ---- - -# Tune-A-Video - {save_dir.name} - -## Model description -- Base model: [{base_model}](https://huggingface.co/{base_model}) -- Training prompt: {training_prompt} - -{image_str} - -## Related papers: -- [Tune-A-Video](https://arxiv.org/abs/2212.11565): One-Shot Tuning of Image Diffusion Models for Text-to-Video Generation -- [Stable-Diffusion](https://arxiv.org/abs/2112.10752): High-Resolution Image Synthesis with Latent Diffusion Models -""" - - with open(save_dir / "README.md", "w") as f: - f.write(model_card) diff --git a/spaces/Vageesh1/clip_gpt2/neuralnet/model.py b/spaces/Vageesh1/clip_gpt2/neuralnet/model.py deleted file mode 100644 index c39e231c79cdd0b63f98267c7c31abcea524c2db..0000000000000000000000000000000000000000 --- a/spaces/Vageesh1/clip_gpt2/neuralnet/model.py +++ /dev/null @@ -1,71 +0,0 @@ -import torch -import torch.nn as nn -import torchvision.models as models - - -class InceptionEncoder(nn.Module): - def __init__(self, embed_size, train_CNN=False): - super(InceptionEncoder, self).__init__() - self.train_CNN = train_CNN - self.inception = models.inception_v3(pretrained=True, aux_logits=False) - self.inception.fc = nn.Linear(self.inception.fc.in_features, embed_size) - self.relu = nn.ReLU() - self.bn = nn.BatchNorm1d(embed_size, momentum = 0.01) - self.dropout = nn.Dropout(0.5) - - def forward(self, images): - features = self.inception(images) - norm_features = self.bn(features) - return self.dropout(self.relu(norm_features)) - - -class LstmDecoder(nn.Module): - def __init__(self, embed_size, hidden_size, vocab_size, num_layers, device = 'cpu'): - super(LstmDecoder, self).__init__() - self.num_layers = num_layers - self.hidden_size = hidden_size - self.device = device - self.embed = nn.Embedding(vocab_size, embed_size) - self.lstm = nn.LSTM(embed_size, hidden_size, num_layers = self.num_layers) - self.linear = nn.Linear(hidden_size, vocab_size) - self.dropout = nn.Dropout(0.5) - - def forward(self, encoder_out, captions): - h0 = torch.zeros(self.num_layers, encoder_out.shape[0], self.hidden_size).to(self.device).requires_grad_() - c0 = torch.zeros(self.num_layers, encoder_out.shape[0], self.hidden_size).to(self.device).requires_grad_() - embeddings = self.dropout(self.embed(captions)) - embeddings = torch.cat((encoder_out.unsqueeze(0), embeddings), dim=0) - hiddens, (hn, cn) = self.lstm(embeddings, (h0.detach(), c0.detach())) - outputs = self.linear(hiddens) - return outputs - - -class SeqToSeq(nn.Module): - def __init__(self, embed_size, hidden_size, vocab_size, num_layers, device = 'cpu'): - super(SeqToSeq, self).__init__() - self.encoder = InceptionEncoder(embed_size) - self.decoder = LstmDecoder(embed_size, hidden_size, vocab_size, num_layers, device) - - def forward(self, images, captions): - features = self.encoder(images) - outputs = self.decoder(features, captions) - return outputs - - def caption_image(self, image, vocabulary, max_length = 50): - result_caption = [] - - with torch.no_grad(): - x = self.encoder(image).unsqueeze(0) - states = None - - for _ in range(max_length): - hiddens, states = self.decoder.lstm(x, states) - output = self.decoder.linear(hiddens.squeeze(0)) - predicted = output.argmax(1) - result_caption.append(predicted.item()) - x = self.decoder.embed(predicted).unsqueeze(0) - - if vocabulary[str(predicted.item())] == "": - break - - return [vocabulary[str(idx)] for idx in result_caption] diff --git a/spaces/VivianShi/Coconet-Pytorch/app.py b/spaces/VivianShi/Coconet-Pytorch/app.py deleted file mode 100644 index 2377fd3c8d477d579673a8231c2a8a958a28a239..0000000000000000000000000000000000000000 --- a/spaces/VivianShi/Coconet-Pytorch/app.py +++ /dev/null @@ -1,262 +0,0 @@ -import gradio as gr -import numpy as np -import torch -import torch.nn as nn -import torch.utils.data -import matplotlib.pyplot as plt -import mido -import soundfile -import pretty_midi -from pathlib import Path -from midi2audio import FluidSynth - -device = torch.device('cpu') -n_layers = 64 -hidden_size = 128 - -MAX_MIDI_PITCH, MIN_MIDI_PITCH = 86, 30 - -I = 4 # number of voices -T = 128 # length of samples (128 = eight 4/4 measures) -P = MAX_MIDI_PITCH - MIN_MIDI_PITCH + 1 # number of different pitches - -def piano_roll_to_midi(piece): - """ - piece is a an array of shape (T, 4) for some T. - The (i,j)th entry of the array is the midi pitch of the jth voice at time i. It's an integer in range(128). - outputs a mido object mid that you can convert to a midi file by called its .save() method - """ - piece = np.concatenate([piece, [[np.nan, np.nan, np.nan, np.nan]]], axis=0) - - bpm = 50 - microseconds_per_beat = 60 * 1000000 / bpm - - mid = mido.MidiFile() - tracks = {'soprano': mido.MidiTrack(), 'alto': mido.MidiTrack(), - 'tenor': mido.MidiTrack(), 'bass': mido.MidiTrack()} - past_pitches = {'soprano': np.nan, 'alto': np.nan, - 'tenor': np.nan, 'bass': np.nan} - delta_time = {'soprano': 0, 'alto': 0, 'tenor': 0, 'bass': 0} - - # create a track containing tempo data - metatrack = mido.MidiTrack() - metatrack.append(mido.MetaMessage('set_tempo', - tempo=int(microseconds_per_beat), time=0)) - mid.tracks.append(metatrack) - - # create the four voice tracks - for voice in tracks: - mid.tracks.append(tracks[voice]) - tracks[voice].append(mido.Message( - 'program_change', program=52, time=0)) - - # add notes to the four voice tracks - for i in range(len(piece)): - pitches = {'soprano': piece[i, 0], 'alto': piece[i, 1], - 'tenor': piece[i, 2], 'bass': piece[i, 3]} - for voice in tracks: - if np.isnan(past_pitches[voice]): - past_pitches[voice] = None - if np.isnan(pitches[voice]): - pitches[voice] = None - if pitches[voice] != past_pitches[voice]: - if past_pitches[voice]: - tracks[voice].append(mido.Message('note_off', note=int(past_pitches[voice]), - velocity=64, time=delta_time[voice])) - delta_time[voice] = 0 - if pitches[voice]: - tracks[voice].append(mido.Message('note_on', note=int(pitches[voice]), - velocity=64, time=delta_time[voice])) - delta_time[voice] = 0 - past_pitches[voice] = pitches[voice] - # 480 ticks per beat and each line of the array is a 16th note - delta_time[voice] += 120 - - return mid - - -# harmonize a melody -def harmonize(y, C, model, device): - """ - Generate an artificial Bach Chorale starting with y, and keeping the pitches where C==1. - Here C is an array of shape (4, 128) whose entries are 0 and 1. - The pitches outside of C are repeatedly resampled to generate new values. - For example, to harmonize the soprano line, let y be random except y[0] contains the soprano line, let C[1:] be 0 and C[0] be 1. - """ - model.eval() - with torch.no_grad(): - x = y - C2 = C.copy() - num_steps = int(2 * I * T) - alpha_max = .999 - alpha_min = .001 - eta = 3 / 4 - for i in range(num_steps): - p = np.maximum(alpha_min, alpha_max - i * (alpha_max - alpha_min) / (eta * num_steps)) - sampled_binaries = np.random.choice(2, size=C.shape, p=[p, 1 - p]) - C2 += sampled_binaries - C2[C == 1] = 1 - x_cache = x - x = model.pred(x, C2, device) - x[C2 == 1] = x_cache[C2 == 1] - C2 = C.copy() - return x - -def harmonize_melody_and_save_midi(melody, id_number, model, device): - """ - Generate an artificial chorale which has melody in the soprano line and a Bach-like harmonization in the other lines. - Save the result in a midi file named {id_number}midi.mid - """ - y = np.random.randint(P, size=(I, T)) - y[0] = np.array(melody) - 30 # subtract 30 because 30 is the minimum midi_value - D0 = np.ones((1, T)).astype(int) - D1 = np.zeros((3, T)).astype(int) - D = np.concatenate([D0, D1], axis=0) - prediction = harmonize(y, D, model, device) + 30 # 30 back on before passing to piano_roll_to_midi - prediction = prediction.transpose().tolist() - prediction = np.array(prediction) - midi_output = piano_roll_to_midi(prediction) - - return midi_output - -class Unit(nn.Module): - """ - Two convolution layers each followed by batchnorm and relu, plus a residual connection. - """ - - def __init__(self, hidden_size): - super(Unit, self).__init__() - self.conv1 = nn.Conv2d(hidden_size, hidden_size, 3, padding=1) - self.batchnorm1 = nn.BatchNorm2d(hidden_size) - self.relu1 = nn.ReLU() - self.conv2 = nn.Conv2d(hidden_size, hidden_size, 3, padding=1) - self.batchnorm2 = nn.BatchNorm2d(hidden_size) - self.relu2 = nn.ReLU() - - def forward(self, x): - y = x - y = self.conv1(y) - y = self.batchnorm1(y) - y = self.relu1(y) - y = self.conv2(y) - y = self.batchnorm2(y) - y = y + x - y = self.relu2(y) - return y - -class Net(nn.Module): - """ - A CNN that where you input a starter chorale and a mask and it outputs a prediction for the values - in the starter chorale away from the mask that are most like the training data. - """ - - def __init__(self, n_layers, hidden_size): - super(Net, self).__init__() - self.initial_conv = nn.Conv2d(2 * I, hidden_size, 3, padding=1) - self.initial_batchnorm = nn.BatchNorm2d(hidden_size) - self.initial_relu = nn.ReLU() - self.conv_layers = nn.ModuleList() - # n_layer // 2 because there are 2 convs in each "unit" to handle residual connect - for _i in range(n_layers // 2): - self.conv_layers.append(Unit(hidden_size)) - self.conv_pitch_linear = nn.Conv2d(P, P, 1, padding=0) - self.conv_instrument_downproj = nn.Conv2d(hidden_size, I, 1, padding=0) - - def forward(self, x, C): - # x is a tensor of shape (N, I, T, P) - # C is a tensor of 0s and 1s of shape (N, I, T) - # returns a tensor of shape (N, I, T, P) - - # get the number of batches - N = x.shape[0] - T = x.shape[2] - - # tile the array C out of a tensor of shape (N, I, T, P) - tiled_C = C.view(N, I, T, 1) - tiled_C = tiled_C.repeat(1, 1, 1, P) - - # mask x and combine it with the mask to produce a tensor of shape (N, 2*I, T, P) - y = torch.cat((tiled_C * x, tiled_C), dim=1) - - # apply the convolution and relu layers - y = self.initial_conv(y) - y = self.initial_batchnorm(y) - y = self.initial_relu(y) - for _n in range(len(self.conv_layers)): - y = self.conv_layers[_n](y) - y = torch.permute(y, (0, 3, 1, 2)) - y = self.conv_pitch_linear(y) - y = torch.permute(y, (0, 2, 3, 1)) - y = self.conv_instrument_downproj(y) - return y - - def expand(self, y, C, device): - # y is an array of shape (I, T) with integer entries in [0, P) - # C is an array of shape (I, T) consisting of 0s and 1s - # the entries of y away from the support of C should be considered 'unknown' - - # x is shape (I, T, P) one-hot representation of y - compressed = y.reshape(-1) - x = np.zeros((I * T, P)) - r = np.arange(I * T) - x[r, compressed] = 1 - x = x.reshape(I, T, P) - - # prep x and C for the plugging into the model - x = torch.tensor(x).type(torch.FloatTensor).to(device) - x = x.view(1, I, T, P) - C2 = torch.tensor(C).type(torch.FloatTensor).view(1, I, T).to(device) - return x, C2 - - def pred(self, y, C, device, temperature=1.0, seed=100): - x, C2 = self.expand(y, C, device) - # plug x and C2 into the model - rs = np.random.RandomState(seed) - with torch.no_grad(): - out = self.forward(x, C2).view(I, T, P).cpu().numpy() - out = out.transpose(2, 0, 1) # shape (P, I, T) - probs = np.exp(out / temperature) / np.exp(out / temperature).sum(axis=0) # shape (P, I, T) - cum_probs = np.cumsum(probs, axis=0) # shape (P, I, T) - u = rs.rand(I, T) # shape (I, T) - return np.argmax(cum_probs > u, axis=0) - -def inference(input): - print("hello") - # data = np.load(input, encoding='bytes', allow_pickle=True) - # test_sample = data['valid'][0].transpose()[:, :128] - # test_sample_melody = test_sample[0] - # input = pretty_midi.PrettyMIDI('catechor.mid') - # melody = [] - # notes = input.instruments[0].notes - # # print(notes) - # for note in notes: - # dur = note.end - note.start - # if dur > 0.3: - # melody.extend([note.pitch for i in range(int(dur / 0.3))]) - # melody.append(note.pitch) - # # print(melody) - - melody = [67, 67, 67, 67, 69, 69, 69, 69, 70, 70, 70, 70, 70, 70, 70, 70, 72, 72, 72, 72, 70, 70, 70, 70, 69, 69, 69, 69, 69, 69, 69, 69, 70, 70, 70, 70, 72, 72, 72, 72, 74, 74, 74, 74, 70, 70, 70, 70, 75, 75, 75, 75, 74, 74, 74, 74, 72, 72, 72, 72, 72, 72, 72, 70, 70, 70, 70, 70, 70, 70, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 72, 72, 72, 72, 70, 70, 70, 70, 72, 72, 72, 72, 72, 72, 72, 72, 72, 72, 72, 72, 74, 74, 74, 74, 75, 75, 75, 75, 75, 75, 75, 75, 74, 74, 74, 74, 72, 72, 72, 72, 74, 74, 74, 74, 74, 74, 74, 72, 72, 72, 72, 72, 72, 72, 67, 67, 67, 67, 69, 69, 69, 69, 70, 70, 70, 70, 70, 70, 70, 70, 72, 72, 72, 72, 72, 72, 72, 72, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 75, 75, 75, 75, 74, 74, 74, 74, 72, 72, 72, 72, 70, 70, 70, 70, 69, 69, 69, 69, 69, 69, 69, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67] - melody = melody[0:128] - model = Net(n_layers, hidden_size).to(device) - model.load_state_dict(torch.load('pretrained.pt', map_location=torch.device('cpu'))) - out = harmonize_melody_and_save_midi(melody, 0, model, device) - # out = mido.MidiFile('result.mid') - out.save('result.mid') - - # use fluidsynth to convert MIDI to WAV so the user can hear the output - sound_font = "/usr/share/sounds/sf2/FluidR3_GM.sf2" - FluidSynth(sound_font).midi_to_audio('result.mid', 'result.wav') - return 'result.wav', 'result.mid' - -examples=[['catechor.mid']] - -iface = gr.Interface( - inference, - inputs="file", - outputs=["audio", "file"], - # examples=examples, - cache_examples=True - ) -iface.launch(debug=True) - \ No newline at end of file diff --git a/spaces/VoiceHero69/changer/setup_tools/magicinstaller/requirements/whisper_package.py b/spaces/VoiceHero69/changer/setup_tools/magicinstaller/requirements/whisper_package.py deleted file mode 100644 index 60678f42c35032b44c0b0d7fc47077917d38642a..0000000000000000000000000000000000000000 --- a/spaces/VoiceHero69/changer/setup_tools/magicinstaller/requirements/whisper_package.py +++ /dev/null @@ -1,5 +0,0 @@ -from setup_tools.magicinstaller.requirement import SimpleRequirement - - -class Whisper(SimpleRequirement): - package_name = 'openai-whisper' diff --git a/spaces/WindVChen/INR-Harmon/utils/__init__.py b/spaces/WindVChen/INR-Harmon/utils/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Wootang01/question_answer/app.py b/spaces/Wootang01/question_answer/app.py deleted file mode 100644 index 5110900b95a44db16696a2fb20c0b74e29c8dc94..0000000000000000000000000000000000000000 --- a/spaces/Wootang01/question_answer/app.py +++ /dev/null @@ -1,16 +0,0 @@ -import gradio as gr -from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline -title = "Question Answer Generator" -description = "Enter a paragraph or sentence. Ask a question based on the paragraph or sentence." -examples = [ - ["However, this year I find it hard to cope with my schoolwork.", "When does the author find it hard to cope with schoolwork?"], - ["What’s more, the only way I can keep myself awake and finish off everything I need to for the day is by consuming energy drinks. I know that some of these drinks have caffeine in them, and sometimes I feel groggy in the morning if I’ve had more than a couple the night before.", "What does 'them' refer to?"], - ["I know that some of these drinks have caffeine in them, and sometimes I feel groggy in the morning if I’ve had more than a couple the night before.", "What word means 'sleepy'?"] -] - -gr.Interface.load("huggingface/deepset/roberta-base-squad2", - inputs=[gr.inputs.Textbox(lines=10, label="Paragraph or sentence", placeholder="Type a sentence or paragraph here."), - gr.inputs.Textbox(lines=2, label="Question", placeholder="Ask a question based on the context.")], - outputs=[gr.outputs.Textbox(label="Answer"), - gr.outputs.Label(label="Probability")], - title=title, description=description, examples=examples).launch(share=False) \ No newline at end of file diff --git a/spaces/Xenova/next-example-app/_next/static/chunks/app/page-43e50fcf65f99793.js b/spaces/Xenova/next-example-app/_next/static/chunks/app/page-43e50fcf65f99793.js deleted file mode 100644 index 6a2743bf88f67eaf7622674381ebf0c73ed076bc..0000000000000000000000000000000000000000 --- a/spaces/Xenova/next-example-app/_next/static/chunks/app/page-43e50fcf65f99793.js +++ /dev/null @@ -1,9 +0,0 @@ -(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[931],{1861:function(e,t,r){Promise.resolve().then(r.bind(r,3451))},3451:function(e,t,r){"use strict";r.r(t),r.d(t,{default:function(){return u}});var n=r(7437),s=r(2265);function u(){let[e,t]=(0,s.useState)(null),[u,a]=(0,s.useState)(null),l=(0,s.useRef)(null);(0,s.useEffect)(()=>{l.current||(l.current=new Worker(r.tu(new URL(r.p+r.u(227),r.b)),{type:void 0}));let e=e=>{switch(e.data.status){case"initiate":a(!1);break;case"ready":a(!0);break;case"complete":t(e.data.output[0])}};return l.current.addEventListener("message",e),()=>l.current.removeEventListener("message",e)});let c=(0,s.useCallback)(e=>{l.current&&l.current.postMessage({text:e})},[]);return(0,n.jsxs)("main",{className:"flex min-h-screen flex-col items-center justify-center p-12",children:[(0,n.jsx)("h1",{className:"text-5xl font-bold mb-2 text-center",children:"Transformers.js"}),(0,n.jsx)("h2",{className:"text-2xl mb-4 text-center",children:"Next.js template"}),(0,n.jsx)("input",{type:"text",className:"w-full max-w-xs p-2 border border-gray-300 rounded mb-4",placeholder:"Enter text here",onInput:e=>{c(e.target.value)}}),null!==u&&(0,n.jsx)("pre",{className:"bg-gray-100 p-2 rounded",children:u&&e?JSON.stringify(e,null,2):"Loading..."})]})}},622:function(e,t,r){"use strict";/** - * @license React - * react-jsx-runtime.production.min.js - * - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */var n=r(2265),s=Symbol.for("react.element"),u=(Symbol.for("react.fragment"),Object.prototype.hasOwnProperty),a=n.__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED.ReactCurrentOwner,l={key:!0,ref:!0,__self:!0,__source:!0};function c(e,t,r){var n,c={},o=null,i=null;for(n in void 0!==r&&(o=""+r),void 0!==t.key&&(o=""+t.key),void 0!==t.ref&&(i=t.ref),t)u.call(t,n)&&!l.hasOwnProperty(n)&&(c[n]=t[n]);if(e&&e.defaultProps)for(n in t=e.defaultProps)void 0===c[n]&&(c[n]=t[n]);return{$$typeof:s,type:e,key:o,ref:i,props:c,_owner:a.current}}t.jsx=c,t.jsxs=c},7437:function(e,t,r){"use strict";e.exports=r(622)}},function(e){e.O(0,[971,596,744],function(){return e(e.s=1861)}),_N_E=e.O()}]); \ No newline at end of file diff --git a/spaces/Xenova/text-to-speech-client/assets/index-77d0c996.js b/spaces/Xenova/text-to-speech-client/assets/index-77d0c996.js deleted file mode 100644 index de71e8a5d0f4acb71d72d8d022f60e12c0c82a4d..0000000000000000000000000000000000000000 --- a/spaces/Xenova/text-to-speech-client/assets/index-77d0c996.js +++ /dev/null @@ -1,40 +0,0 @@ -(function(){const n=document.createElement("link").relList;if(n&&n.supports&&n.supports("modulepreload"))return;for(const l of document.querySelectorAll('link[rel="modulepreload"]'))r(l);new MutationObserver(l=>{for(const u of l)if(u.type==="childList")for(const o of u.addedNodes)o.tagName==="LINK"&&o.rel==="modulepreload"&&r(o)}).observe(document,{childList:!0,subtree:!0});function t(l){const u={};return l.integrity&&(u.integrity=l.integrity),l.referrerPolicy&&(u.referrerPolicy=l.referrerPolicy),l.crossOrigin==="use-credentials"?u.credentials="include":l.crossOrigin==="anonymous"?u.credentials="omit":u.credentials="same-origin",u}function r(l){if(l.ep)return;l.ep=!0;const u=t(l);fetch(l.href,u)}})();function rc(e){return e&&e.__esModule&&Object.prototype.hasOwnProperty.call(e,"default")?e.default:e}var Hi={exports:{}},el={},Wi={exports:{}},L={};/** - * @license React - * react.production.min.js - * - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */var Xt=Symbol.for("react.element"),lc=Symbol.for("react.portal"),uc=Symbol.for("react.fragment"),oc=Symbol.for("react.strict_mode"),ic=Symbol.for("react.profiler"),sc=Symbol.for("react.provider"),ac=Symbol.for("react.context"),cc=Symbol.for("react.forward_ref"),fc=Symbol.for("react.suspense"),dc=Symbol.for("react.memo"),pc=Symbol.for("react.lazy"),Mo=Symbol.iterator;function mc(e){return e===null||typeof e!="object"?null:(e=Mo&&e[Mo]||e["@@iterator"],typeof e=="function"?e:null)}var Qi={isMounted:function(){return!1},enqueueForceUpdate:function(){},enqueueReplaceState:function(){},enqueueSetState:function(){}},Ki=Object.assign,Yi={};function ut(e,n,t){this.props=e,this.context=n,this.refs=Yi,this.updater=t||Qi}ut.prototype.isReactComponent={};ut.prototype.setState=function(e,n){if(typeof e!="object"&&typeof e!="function"&&e!=null)throw Error("setState(...): takes an object of state variables to update or a function which returns an object of state variables.");this.updater.enqueueSetState(this,e,n,"setState")};ut.prototype.forceUpdate=function(e){this.updater.enqueueForceUpdate(this,e,"forceUpdate")};function Xi(){}Xi.prototype=ut.prototype;function Uu(e,n,t){this.props=e,this.context=n,this.refs=Yi,this.updater=t||Qi}var $u=Uu.prototype=new Xi;$u.constructor=Uu;Ki($u,ut.prototype);$u.isPureReactComponent=!0;var Do=Array.isArray,Gi=Object.prototype.hasOwnProperty,Au={current:null},Zi={key:!0,ref:!0,__self:!0,__source:!0};function Ji(e,n,t){var r,l={},u=null,o=null;if(n!=null)for(r in n.ref!==void 0&&(o=n.ref),n.key!==void 0&&(u=""+n.key),n)Gi.call(n,r)&&!Zi.hasOwnProperty(r)&&(l[r]=n[r]);var i=arguments.length-2;if(i===1)l.children=t;else if(1>>1,G=x[W];if(0>>1;Wl(gl,z))gnl(er,gl)?(x[W]=er,x[gn]=z,W=gn):(x[W]=gl,x[yn]=z,W=yn);else if(gnl(er,z))x[W]=er,x[gn]=z,W=gn;else break e}}return P}function l(x,P){var z=x.sortIndex-P.sortIndex;return z!==0?z:x.id-P.id}if(typeof performance=="object"&&typeof performance.now=="function"){var u=performance;e.unstable_now=function(){return u.now()}}else{var o=Date,i=o.now();e.unstable_now=function(){return o.now()-i}}var s=[],c=[],h=1,m=null,p=3,w=!1,S=!1,g=!1,R=typeof setTimeout=="function"?setTimeout:null,f=typeof clearTimeout=="function"?clearTimeout:null,a=typeof setImmediate<"u"?setImmediate:null;typeof navigator<"u"&&navigator.scheduling!==void 0&&navigator.scheduling.isInputPending!==void 0&&navigator.scheduling.isInputPending.bind(navigator.scheduling);function d(x){for(var P=t(c);P!==null;){if(P.callback===null)r(c);else if(P.startTime<=x)r(c),P.sortIndex=P.expirationTime,n(s,P);else break;P=t(c)}}function v(x){if(g=!1,d(x),!S)if(t(s)!==null)S=!0,vl(E);else{var P=t(c);P!==null&&yl(v,P.startTime-x)}}function E(x,P){S=!1,g&&(g=!1,f(N),N=-1),w=!0;var z=p;try{for(d(P),m=t(s);m!==null&&(!(m.expirationTime>P)||x&&!Pe());){var W=m.callback;if(typeof W=="function"){m.callback=null,p=m.priorityLevel;var G=W(m.expirationTime<=P);P=e.unstable_now(),typeof G=="function"?m.callback=G:m===t(s)&&r(s),d(P)}else r(s);m=t(s)}if(m!==null)var bt=!0;else{var yn=t(c);yn!==null&&yl(v,yn.startTime-P),bt=!1}return bt}finally{m=null,p=z,w=!1}}var _=!1,C=null,N=-1,H=5,T=-1;function Pe(){return!(e.unstable_now()-Tx||125W?(x.sortIndex=z,n(c,x),t(s)===null&&x===t(c)&&(g?(f(N),N=-1):g=!0,yl(v,z-W))):(x.sortIndex=G,n(s,x),S||w||(S=!0,vl(E))),x},e.unstable_shouldYield=Pe,e.unstable_wrapCallback=function(x){var P=p;return function(){var z=p;p=P;try{return x.apply(this,arguments)}finally{p=z}}}})(ns);es.exports=ns;var Nc=es.exports;/** - * @license React - * react-dom.production.min.js - * - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */var ts=ae,ge=Nc;function y(e){for(var n="https://reactjs.org/docs/error-decoder.html?invariant="+e,t=1;t"u"||typeof window.document>"u"||typeof window.document.createElement>"u"),Kl=Object.prototype.hasOwnProperty,Pc=/^[:A-Z_a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD][:A-Z_a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\-.0-9\u00B7\u0300-\u036F\u203F-\u2040]*$/,Fo={},Uo={};function zc(e){return Kl.call(Uo,e)?!0:Kl.call(Fo,e)?!1:Pc.test(e)?Uo[e]=!0:(Fo[e]=!0,!1)}function Lc(e,n,t,r){if(t!==null&&t.type===0)return!1;switch(typeof n){case"function":case"symbol":return!0;case"boolean":return r?!1:t!==null?!t.acceptsBooleans:(e=e.toLowerCase().slice(0,5),e!=="data-"&&e!=="aria-");default:return!1}}function Tc(e,n,t,r){if(n===null||typeof n>"u"||Lc(e,n,t,r))return!0;if(r)return!1;if(t!==null)switch(t.type){case 3:return!n;case 4:return n===!1;case 5:return isNaN(n);case 6:return isNaN(n)||1>n}return!1}function se(e,n,t,r,l,u,o){this.acceptsBooleans=n===2||n===3||n===4,this.attributeName=r,this.attributeNamespace=l,this.mustUseProperty=t,this.propertyName=e,this.type=n,this.sanitizeURL=u,this.removeEmptyString=o}var ee={};"children dangerouslySetInnerHTML defaultValue defaultChecked innerHTML suppressContentEditableWarning suppressHydrationWarning style".split(" ").forEach(function(e){ee[e]=new se(e,0,!1,e,null,!1,!1)});[["acceptCharset","accept-charset"],["className","class"],["htmlFor","for"],["httpEquiv","http-equiv"]].forEach(function(e){var n=e[0];ee[n]=new se(n,1,!1,e[1],null,!1,!1)});["contentEditable","draggable","spellCheck","value"].forEach(function(e){ee[e]=new se(e,2,!1,e.toLowerCase(),null,!1,!1)});["autoReverse","externalResourcesRequired","focusable","preserveAlpha"].forEach(function(e){ee[e]=new se(e,2,!1,e,null,!1,!1)});"allowFullScreen async autoFocus autoPlay controls default defer disabled disablePictureInPicture disableRemotePlayback formNoValidate hidden loop noModule noValidate open playsInline readOnly required reversed scoped seamless itemScope".split(" ").forEach(function(e){ee[e]=new se(e,3,!1,e.toLowerCase(),null,!1,!1)});["checked","multiple","muted","selected"].forEach(function(e){ee[e]=new se(e,3,!0,e,null,!1,!1)});["capture","download"].forEach(function(e){ee[e]=new se(e,4,!1,e,null,!1,!1)});["cols","rows","size","span"].forEach(function(e){ee[e]=new se(e,6,!1,e,null,!1,!1)});["rowSpan","start"].forEach(function(e){ee[e]=new se(e,5,!1,e.toLowerCase(),null,!1,!1)});var Bu=/[\-:]([a-z])/g;function Hu(e){return e[1].toUpperCase()}"accent-height alignment-baseline arabic-form baseline-shift cap-height clip-path clip-rule color-interpolation color-interpolation-filters color-profile color-rendering dominant-baseline enable-background fill-opacity fill-rule flood-color flood-opacity font-family font-size font-size-adjust font-stretch font-style font-variant font-weight glyph-name glyph-orientation-horizontal glyph-orientation-vertical horiz-adv-x horiz-origin-x image-rendering letter-spacing lighting-color marker-end marker-mid marker-start overline-position overline-thickness paint-order panose-1 pointer-events rendering-intent shape-rendering stop-color stop-opacity strikethrough-position strikethrough-thickness stroke-dasharray stroke-dashoffset stroke-linecap stroke-linejoin stroke-miterlimit stroke-opacity stroke-width text-anchor text-decoration text-rendering underline-position underline-thickness unicode-bidi unicode-range units-per-em v-alphabetic v-hanging v-ideographic v-mathematical vector-effect vert-adv-y vert-origin-x vert-origin-y word-spacing writing-mode xmlns:xlink x-height".split(" ").forEach(function(e){var n=e.replace(Bu,Hu);ee[n]=new se(n,1,!1,e,null,!1,!1)});"xlink:actuate xlink:arcrole xlink:role xlink:show xlink:title xlink:type".split(" ").forEach(function(e){var n=e.replace(Bu,Hu);ee[n]=new se(n,1,!1,e,"http://www.w3.org/1999/xlink",!1,!1)});["xml:base","xml:lang","xml:space"].forEach(function(e){var n=e.replace(Bu,Hu);ee[n]=new se(n,1,!1,e,"http://www.w3.org/XML/1998/namespace",!1,!1)});["tabIndex","crossOrigin"].forEach(function(e){ee[e]=new se(e,1,!1,e.toLowerCase(),null,!1,!1)});ee.xlinkHref=new se("xlinkHref",1,!1,"xlink:href","http://www.w3.org/1999/xlink",!0,!1);["src","href","action","formAction"].forEach(function(e){ee[e]=new se(e,1,!1,e.toLowerCase(),null,!0,!0)});function Wu(e,n,t,r){var l=ee.hasOwnProperty(n)?ee[n]:null;(l!==null?l.type!==0:r||!(2i||l[o]!==u[i]){var s=` -`+l[o].replace(" at new "," at ");return e.displayName&&s.includes("")&&(s=s.replace("",e.displayName)),s}while(1<=o&&0<=i);break}}}finally{kl=!1,Error.prepareStackTrace=t}return(e=e?e.displayName||e.name:"")?gt(e):""}function Rc(e){switch(e.tag){case 5:return gt(e.type);case 16:return gt("Lazy");case 13:return gt("Suspense");case 19:return gt("SuspenseList");case 0:case 2:case 15:return e=El(e.type,!1),e;case 11:return e=El(e.type.render,!1),e;case 1:return e=El(e.type,!0),e;default:return""}}function Zl(e){if(e==null)return null;if(typeof e=="function")return e.displayName||e.name||null;if(typeof e=="string")return e;switch(e){case Dn:return"Fragment";case Mn:return"Portal";case Yl:return"Profiler";case Qu:return"StrictMode";case Xl:return"Suspense";case Gl:return"SuspenseList"}if(typeof e=="object")switch(e.$$typeof){case us:return(e.displayName||"Context")+".Consumer";case ls:return(e._context.displayName||"Context")+".Provider";case Ku:var n=e.render;return e=e.displayName,e||(e=n.displayName||n.name||"",e=e!==""?"ForwardRef("+e+")":"ForwardRef"),e;case Yu:return n=e.displayName||null,n!==null?n:Zl(e.type)||"Memo";case Je:n=e._payload,e=e._init;try{return Zl(e(n))}catch{}}return null}function Oc(e){var n=e.type;switch(e.tag){case 24:return"Cache";case 9:return(n.displayName||"Context")+".Consumer";case 10:return(n._context.displayName||"Context")+".Provider";case 18:return"DehydratedFragment";case 11:return e=n.render,e=e.displayName||e.name||"",n.displayName||(e!==""?"ForwardRef("+e+")":"ForwardRef");case 7:return"Fragment";case 5:return n;case 4:return"Portal";case 3:return"Root";case 6:return"Text";case 16:return Zl(n);case 8:return n===Qu?"StrictMode":"Mode";case 22:return"Offscreen";case 12:return"Profiler";case 21:return"Scope";case 13:return"Suspense";case 19:return"SuspenseList";case 25:return"TracingMarker";case 1:case 0:case 17:case 2:case 14:case 15:if(typeof n=="function")return n.displayName||n.name||null;if(typeof n=="string")return n}return null}function dn(e){switch(typeof e){case"boolean":case"number":case"string":case"undefined":return e;case"object":return e;default:return""}}function is(e){var n=e.type;return(e=e.nodeName)&&e.toLowerCase()==="input"&&(n==="checkbox"||n==="radio")}function jc(e){var n=is(e)?"checked":"value",t=Object.getOwnPropertyDescriptor(e.constructor.prototype,n),r=""+e[n];if(!e.hasOwnProperty(n)&&typeof t<"u"&&typeof t.get=="function"&&typeof t.set=="function"){var l=t.get,u=t.set;return Object.defineProperty(e,n,{configurable:!0,get:function(){return l.call(this)},set:function(o){r=""+o,u.call(this,o)}}),Object.defineProperty(e,n,{enumerable:t.enumerable}),{getValue:function(){return r},setValue:function(o){r=""+o},stopTracking:function(){e._valueTracker=null,delete e[n]}}}}function rr(e){e._valueTracker||(e._valueTracker=jc(e))}function ss(e){if(!e)return!1;var n=e._valueTracker;if(!n)return!0;var t=n.getValue(),r="";return e&&(r=is(e)?e.checked?"true":"false":e.value),e=r,e!==t?(n.setValue(e),!0):!1}function Tr(e){if(e=e||(typeof document<"u"?document:void 0),typeof e>"u")return null;try{return e.activeElement||e.body}catch{return e.body}}function Jl(e,n){var t=n.checked;return V({},n,{defaultChecked:void 0,defaultValue:void 0,value:void 0,checked:t??e._wrapperState.initialChecked})}function Ao(e,n){var t=n.defaultValue==null?"":n.defaultValue,r=n.checked!=null?n.checked:n.defaultChecked;t=dn(n.value!=null?n.value:t),e._wrapperState={initialChecked:r,initialValue:t,controlled:n.type==="checkbox"||n.type==="radio"?n.checked!=null:n.value!=null}}function as(e,n){n=n.checked,n!=null&&Wu(e,"checked",n,!1)}function ql(e,n){as(e,n);var t=dn(n.value),r=n.type;if(t!=null)r==="number"?(t===0&&e.value===""||e.value!=t)&&(e.value=""+t):e.value!==""+t&&(e.value=""+t);else if(r==="submit"||r==="reset"){e.removeAttribute("value");return}n.hasOwnProperty("value")?bl(e,n.type,t):n.hasOwnProperty("defaultValue")&&bl(e,n.type,dn(n.defaultValue)),n.checked==null&&n.defaultChecked!=null&&(e.defaultChecked=!!n.defaultChecked)}function Vo(e,n,t){if(n.hasOwnProperty("value")||n.hasOwnProperty("defaultValue")){var r=n.type;if(!(r!=="submit"&&r!=="reset"||n.value!==void 0&&n.value!==null))return;n=""+e._wrapperState.initialValue,t||n===e.value||(e.value=n),e.defaultValue=n}t=e.name,t!==""&&(e.name=""),e.defaultChecked=!!e._wrapperState.initialChecked,t!==""&&(e.name=t)}function bl(e,n,t){(n!=="number"||Tr(e.ownerDocument)!==e)&&(t==null?e.defaultValue=""+e._wrapperState.initialValue:e.defaultValue!==""+t&&(e.defaultValue=""+t))}var wt=Array.isArray;function Kn(e,n,t,r){if(e=e.options,n){n={};for(var l=0;l"+n.valueOf().toString()+"",n=lr.firstChild;e.firstChild;)e.removeChild(e.firstChild);for(;n.firstChild;)e.appendChild(n.firstChild)}});function Ot(e,n){if(n){var t=e.firstChild;if(t&&t===e.lastChild&&t.nodeType===3){t.nodeValue=n;return}}e.textContent=n}var Et={animationIterationCount:!0,aspectRatio:!0,borderImageOutset:!0,borderImageSlice:!0,borderImageWidth:!0,boxFlex:!0,boxFlexGroup:!0,boxOrdinalGroup:!0,columnCount:!0,columns:!0,flex:!0,flexGrow:!0,flexPositive:!0,flexShrink:!0,flexNegative:!0,flexOrder:!0,gridArea:!0,gridRow:!0,gridRowEnd:!0,gridRowSpan:!0,gridRowStart:!0,gridColumn:!0,gridColumnEnd:!0,gridColumnSpan:!0,gridColumnStart:!0,fontWeight:!0,lineClamp:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,tabSize:!0,widows:!0,zIndex:!0,zoom:!0,fillOpacity:!0,floodOpacity:!0,stopOpacity:!0,strokeDasharray:!0,strokeDashoffset:!0,strokeMiterlimit:!0,strokeOpacity:!0,strokeWidth:!0},Mc=["Webkit","ms","Moz","O"];Object.keys(Et).forEach(function(e){Mc.forEach(function(n){n=n+e.charAt(0).toUpperCase()+e.substring(1),Et[n]=Et[e]})});function ps(e,n,t){return n==null||typeof n=="boolean"||n===""?"":t||typeof n!="number"||n===0||Et.hasOwnProperty(e)&&Et[e]?(""+n).trim():n+"px"}function ms(e,n){e=e.style;for(var t in n)if(n.hasOwnProperty(t)){var r=t.indexOf("--")===0,l=ps(t,n[t],r);t==="float"&&(t="cssFloat"),r?e.setProperty(t,l):e[t]=l}}var Dc=V({menuitem:!0},{area:!0,base:!0,br:!0,col:!0,embed:!0,hr:!0,img:!0,input:!0,keygen:!0,link:!0,meta:!0,param:!0,source:!0,track:!0,wbr:!0});function tu(e,n){if(n){if(Dc[e]&&(n.children!=null||n.dangerouslySetInnerHTML!=null))throw Error(y(137,e));if(n.dangerouslySetInnerHTML!=null){if(n.children!=null)throw Error(y(60));if(typeof n.dangerouslySetInnerHTML!="object"||!("__html"in n.dangerouslySetInnerHTML))throw Error(y(61))}if(n.style!=null&&typeof n.style!="object")throw Error(y(62))}}function ru(e,n){if(e.indexOf("-")===-1)return typeof n.is=="string";switch(e){case"annotation-xml":case"color-profile":case"font-face":case"font-face-src":case"font-face-uri":case"font-face-format":case"font-face-name":case"missing-glyph":return!1;default:return!0}}var lu=null;function Xu(e){return e=e.target||e.srcElement||window,e.correspondingUseElement&&(e=e.correspondingUseElement),e.nodeType===3?e.parentNode:e}var uu=null,Yn=null,Xn=null;function Wo(e){if(e=Jt(e)){if(typeof uu!="function")throw Error(y(280));var n=e.stateNode;n&&(n=ul(n),uu(e.stateNode,e.type,n))}}function hs(e){Yn?Xn?Xn.push(e):Xn=[e]:Yn=e}function vs(){if(Yn){var e=Yn,n=Xn;if(Xn=Yn=null,Wo(e),n)for(e=0;e>>=0,e===0?32:31-(Kc(e)/Yc|0)|0}var ur=64,or=4194304;function St(e){switch(e&-e){case 1:return 1;case 2:return 2;case 4:return 4;case 8:return 8;case 16:return 16;case 32:return 32;case 64:case 128:case 256:case 512:case 1024:case 2048:case 4096:case 8192:case 16384:case 32768:case 65536:case 131072:case 262144:case 524288:case 1048576:case 2097152:return e&4194240;case 4194304:case 8388608:case 16777216:case 33554432:case 67108864:return e&130023424;case 134217728:return 134217728;case 268435456:return 268435456;case 536870912:return 536870912;case 1073741824:return 1073741824;default:return e}}function Mr(e,n){var t=e.pendingLanes;if(t===0)return 0;var r=0,l=e.suspendedLanes,u=e.pingedLanes,o=t&268435455;if(o!==0){var i=o&~l;i!==0?r=St(i):(u&=o,u!==0&&(r=St(u)))}else o=t&~l,o!==0?r=St(o):u!==0&&(r=St(u));if(r===0)return 0;if(n!==0&&n!==r&&!(n&l)&&(l=r&-r,u=n&-n,l>=u||l===16&&(u&4194240)!==0))return n;if(r&4&&(r|=t&16),n=e.entangledLanes,n!==0)for(e=e.entanglements,n&=r;0t;t++)n.push(e);return n}function Gt(e,n,t){e.pendingLanes|=n,n!==536870912&&(e.suspendedLanes=0,e.pingedLanes=0),e=e.eventTimes,n=31-Oe(n),e[n]=t}function Jc(e,n){var t=e.pendingLanes&~n;e.pendingLanes=n,e.suspendedLanes=0,e.pingedLanes=0,e.expiredLanes&=n,e.mutableReadLanes&=n,e.entangledLanes&=n,n=e.entanglements;var r=e.eventTimes;for(e=e.expirationTimes;0=_t),bo=String.fromCharCode(32),ei=!1;function Is(e,n){switch(e){case"keyup":return Nf.indexOf(n.keyCode)!==-1;case"keydown":return n.keyCode!==229;case"keypress":case"mousedown":case"focusout":return!0;default:return!1}}function Fs(e){return e=e.detail,typeof e=="object"&&"data"in e?e.data:null}var In=!1;function zf(e,n){switch(e){case"compositionend":return Fs(n);case"keypress":return n.which!==32?null:(ei=!0,bo);case"textInput":return e=n.data,e===bo&&ei?null:e;default:return null}}function Lf(e,n){if(In)return e==="compositionend"||!to&&Is(e,n)?(e=Ms(),kr=bu=nn=null,In=!1,e):null;switch(e){case"paste":return null;case"keypress":if(!(n.ctrlKey||n.altKey||n.metaKey)||n.ctrlKey&&n.altKey){if(n.char&&1=n)return{node:t,offset:n-e};e=r}e:{for(;t;){if(t.nextSibling){t=t.nextSibling;break e}t=t.parentNode}t=void 0}t=li(t)}}function Vs(e,n){return e&&n?e===n?!0:e&&e.nodeType===3?!1:n&&n.nodeType===3?Vs(e,n.parentNode):"contains"in e?e.contains(n):e.compareDocumentPosition?!!(e.compareDocumentPosition(n)&16):!1:!1}function Bs(){for(var e=window,n=Tr();n instanceof e.HTMLIFrameElement;){try{var t=typeof n.contentWindow.location.href=="string"}catch{t=!1}if(t)e=n.contentWindow;else break;n=Tr(e.document)}return n}function ro(e){var n=e&&e.nodeName&&e.nodeName.toLowerCase();return n&&(n==="input"&&(e.type==="text"||e.type==="search"||e.type==="tel"||e.type==="url"||e.type==="password")||n==="textarea"||e.contentEditable==="true")}function Uf(e){var n=Bs(),t=e.focusedElem,r=e.selectionRange;if(n!==t&&t&&t.ownerDocument&&Vs(t.ownerDocument.documentElement,t)){if(r!==null&&ro(t)){if(n=r.start,e=r.end,e===void 0&&(e=n),"selectionStart"in t)t.selectionStart=n,t.selectionEnd=Math.min(e,t.value.length);else if(e=(n=t.ownerDocument||document)&&n.defaultView||window,e.getSelection){e=e.getSelection();var l=t.textContent.length,u=Math.min(r.start,l);r=r.end===void 0?u:Math.min(r.end,l),!e.extend&&u>r&&(l=r,r=u,u=l),l=ui(t,u);var o=ui(t,r);l&&o&&(e.rangeCount!==1||e.anchorNode!==l.node||e.anchorOffset!==l.offset||e.focusNode!==o.node||e.focusOffset!==o.offset)&&(n=n.createRange(),n.setStart(l.node,l.offset),e.removeAllRanges(),u>r?(e.addRange(n),e.extend(o.node,o.offset)):(n.setEnd(o.node,o.offset),e.addRange(n)))}}for(n=[],e=t;e=e.parentNode;)e.nodeType===1&&n.push({element:e,left:e.scrollLeft,top:e.scrollTop});for(typeof t.focus=="function"&&t.focus(),t=0;t=document.documentMode,Fn=null,fu=null,Nt=null,du=!1;function oi(e,n,t){var r=t.window===t?t.document:t.nodeType===9?t:t.ownerDocument;du||Fn==null||Fn!==Tr(r)||(r=Fn,"selectionStart"in r&&ro(r)?r={start:r.selectionStart,end:r.selectionEnd}:(r=(r.ownerDocument&&r.ownerDocument.defaultView||window).getSelection(),r={anchorNode:r.anchorNode,anchorOffset:r.anchorOffset,focusNode:r.focusNode,focusOffset:r.focusOffset}),Nt&&Ut(Nt,r)||(Nt=r,r=Fr(fu,"onSelect"),0An||(e.current=gu[An],gu[An]=null,An--)}function D(e,n){An++,gu[An]=e.current,e.current=n}var pn={},le=hn(pn),de=hn(!1),Nn=pn;function bn(e,n){var t=e.type.contextTypes;if(!t)return pn;var r=e.stateNode;if(r&&r.__reactInternalMemoizedUnmaskedChildContext===n)return r.__reactInternalMemoizedMaskedChildContext;var l={},u;for(u in t)l[u]=n[u];return r&&(e=e.stateNode,e.__reactInternalMemoizedUnmaskedChildContext=n,e.__reactInternalMemoizedMaskedChildContext=l),l}function pe(e){return e=e.childContextTypes,e!=null}function $r(){F(de),F(le)}function pi(e,n,t){if(le.current!==pn)throw Error(y(168));D(le,n),D(de,t)}function Js(e,n,t){var r=e.stateNode;if(n=n.childContextTypes,typeof r.getChildContext!="function")return t;r=r.getChildContext();for(var l in r)if(!(l in n))throw Error(y(108,Oc(e)||"Unknown",l));return V({},t,r)}function Ar(e){return e=(e=e.stateNode)&&e.__reactInternalMemoizedMergedChildContext||pn,Nn=le.current,D(le,e),D(de,de.current),!0}function mi(e,n,t){var r=e.stateNode;if(!r)throw Error(y(169));t?(e=Js(e,n,Nn),r.__reactInternalMemoizedMergedChildContext=e,F(de),F(le),D(le,e)):F(de),D(de,t)}var Ve=null,ol=!1,Il=!1;function qs(e){Ve===null?Ve=[e]:Ve.push(e)}function Zf(e){ol=!0,qs(e)}function vn(){if(!Il&&Ve!==null){Il=!0;var e=0,n=j;try{var t=Ve;for(j=1;e>=o,l-=o,Be=1<<32-Oe(n)+l|t<N?(H=C,C=null):H=C.sibling;var T=p(f,C,d[N],v);if(T===null){C===null&&(C=H);break}e&&C&&T.alternate===null&&n(f,C),a=u(T,a,N),_===null?E=T:_.sibling=T,_=T,C=H}if(N===d.length)return t(f,C),U&&wn(f,N),E;if(C===null){for(;NN?(H=C,C=null):H=C.sibling;var Pe=p(f,C,T.value,v);if(Pe===null){C===null&&(C=H);break}e&&C&&Pe.alternate===null&&n(f,C),a=u(Pe,a,N),_===null?E=Pe:_.sibling=Pe,_=Pe,C=H}if(T.done)return t(f,C),U&&wn(f,N),E;if(C===null){for(;!T.done;N++,T=d.next())T=m(f,T.value,v),T!==null&&(a=u(T,a,N),_===null?E=T:_.sibling=T,_=T);return U&&wn(f,N),E}for(C=r(f,C);!T.done;N++,T=d.next())T=w(C,f,N,T.value,v),T!==null&&(e&&T.alternate!==null&&C.delete(T.key===null?N:T.key),a=u(T,a,N),_===null?E=T:_.sibling=T,_=T);return e&&C.forEach(function(st){return n(f,st)}),U&&wn(f,N),E}function R(f,a,d,v){if(typeof d=="object"&&d!==null&&d.type===Dn&&d.key===null&&(d=d.props.children),typeof d=="object"&&d!==null){switch(d.$$typeof){case tr:e:{for(var E=d.key,_=a;_!==null;){if(_.key===E){if(E=d.type,E===Dn){if(_.tag===7){t(f,_.sibling),a=l(_,d.props.children),a.return=f,f=a;break e}}else if(_.elementType===E||typeof E=="object"&&E!==null&&E.$$typeof===Je&&ki(E)===_.type){t(f,_.sibling),a=l(_,d.props),a.ref=ht(f,_,d),a.return=f,f=a;break e}t(f,_);break}else n(f,_);_=_.sibling}d.type===Dn?(a=Cn(d.props.children,f.mode,v,d.key),a.return=f,f=a):(v=Lr(d.type,d.key,d.props,null,f.mode,v),v.ref=ht(f,a,d),v.return=f,f=v)}return o(f);case Mn:e:{for(_=d.key;a!==null;){if(a.key===_)if(a.tag===4&&a.stateNode.containerInfo===d.containerInfo&&a.stateNode.implementation===d.implementation){t(f,a.sibling),a=l(a,d.children||[]),a.return=f,f=a;break e}else{t(f,a);break}else n(f,a);a=a.sibling}a=Wl(d,f.mode,v),a.return=f,f=a}return o(f);case Je:return _=d._init,R(f,a,_(d._payload),v)}if(wt(d))return S(f,a,d,v);if(ct(d))return g(f,a,d,v);pr(f,d)}return typeof d=="string"&&d!==""||typeof d=="number"?(d=""+d,a!==null&&a.tag===6?(t(f,a.sibling),a=l(a,d),a.return=f,f=a):(t(f,a),a=Hl(d,f.mode,v),a.return=f,f=a),o(f)):t(f,a)}return R}var nt=oa(!0),ia=oa(!1),qt={},$e=hn(qt),Bt=hn(qt),Ht=hn(qt);function xn(e){if(e===qt)throw Error(y(174));return e}function po(e,n){switch(D(Ht,n),D(Bt,e),D($e,qt),e=n.nodeType,e){case 9:case 11:n=(n=n.documentElement)?n.namespaceURI:nu(null,"");break;default:e=e===8?n.parentNode:n,n=e.namespaceURI||null,e=e.tagName,n=nu(n,e)}F($e),D($e,n)}function tt(){F($e),F(Bt),F(Ht)}function sa(e){xn(Ht.current);var n=xn($e.current),t=nu(n,e.type);n!==t&&(D(Bt,e),D($e,t))}function mo(e){Bt.current===e&&(F($e),F(Bt))}var $=hn(0);function Kr(e){for(var n=e;n!==null;){if(n.tag===13){var t=n.memoizedState;if(t!==null&&(t=t.dehydrated,t===null||t.data==="$?"||t.data==="$!"))return n}else if(n.tag===19&&n.memoizedProps.revealOrder!==void 0){if(n.flags&128)return n}else if(n.child!==null){n.child.return=n,n=n.child;continue}if(n===e)break;for(;n.sibling===null;){if(n.return===null||n.return===e)return null;n=n.return}n.sibling.return=n.return,n=n.sibling}return null}var Fl=[];function ho(){for(var e=0;et?t:4,e(!0);var r=Ul.transition;Ul.transition={};try{e(!1),n()}finally{j=t,Ul.transition=r}}function _a(){return Ne().memoizedState}function ed(e,n,t){var r=cn(e);if(t={lane:r,action:t,hasEagerState:!1,eagerState:null,next:null},Ca(e))Na(n,t);else if(t=ta(e,n,t,r),t!==null){var l=oe();je(t,e,r,l),Pa(t,n,r)}}function nd(e,n,t){var r=cn(e),l={lane:r,action:t,hasEagerState:!1,eagerState:null,next:null};if(Ca(e))Na(n,l);else{var u=e.alternate;if(e.lanes===0&&(u===null||u.lanes===0)&&(u=n.lastRenderedReducer,u!==null))try{var o=n.lastRenderedState,i=u(o,t);if(l.hasEagerState=!0,l.eagerState=i,Me(i,o)){var s=n.interleaved;s===null?(l.next=l,co(n)):(l.next=s.next,s.next=l),n.interleaved=l;return}}catch{}finally{}t=ta(e,n,l,r),t!==null&&(l=oe(),je(t,e,r,l),Pa(t,n,r))}}function Ca(e){var n=e.alternate;return e===A||n!==null&&n===A}function Na(e,n){Pt=Yr=!0;var t=e.pending;t===null?n.next=n:(n.next=t.next,t.next=n),e.pending=n}function Pa(e,n,t){if(t&4194240){var r=n.lanes;r&=e.pendingLanes,t|=r,n.lanes=t,Zu(e,t)}}var Xr={readContext:Ce,useCallback:ne,useContext:ne,useEffect:ne,useImperativeHandle:ne,useInsertionEffect:ne,useLayoutEffect:ne,useMemo:ne,useReducer:ne,useRef:ne,useState:ne,useDebugValue:ne,useDeferredValue:ne,useTransition:ne,useMutableSource:ne,useSyncExternalStore:ne,useId:ne,unstable_isNewReconciler:!1},td={readContext:Ce,useCallback:function(e,n){return Ie().memoizedState=[e,n===void 0?null:n],e},useContext:Ce,useEffect:xi,useImperativeHandle:function(e,n,t){return t=t!=null?t.concat([e]):null,Cr(4194308,4,wa.bind(null,n,e),t)},useLayoutEffect:function(e,n){return Cr(4194308,4,e,n)},useInsertionEffect:function(e,n){return Cr(4,2,e,n)},useMemo:function(e,n){var t=Ie();return n=n===void 0?null:n,e=e(),t.memoizedState=[e,n],e},useReducer:function(e,n,t){var r=Ie();return n=t!==void 0?t(n):n,r.memoizedState=r.baseState=n,e={pending:null,interleaved:null,lanes:0,dispatch:null,lastRenderedReducer:e,lastRenderedState:n},r.queue=e,e=e.dispatch=ed.bind(null,A,e),[r.memoizedState,e]},useRef:function(e){var n=Ie();return e={current:e},n.memoizedState=e},useState:Ei,useDebugValue:So,useDeferredValue:function(e){return Ie().memoizedState=e},useTransition:function(){var e=Ei(!1),n=e[0];return e=bf.bind(null,e[1]),Ie().memoizedState=e,[n,e]},useMutableSource:function(){},useSyncExternalStore:function(e,n,t){var r=A,l=Ie();if(U){if(t===void 0)throw Error(y(407));t=t()}else{if(t=n(),J===null)throw Error(y(349));zn&30||fa(r,n,t)}l.memoizedState=t;var u={value:t,getSnapshot:n};return l.queue=u,xi(pa.bind(null,r,u,e),[e]),r.flags|=2048,Kt(9,da.bind(null,r,u,t,n),void 0,null),t},useId:function(){var e=Ie(),n=J.identifierPrefix;if(U){var t=He,r=Be;t=(r&~(1<<32-Oe(r)-1)).toString(32)+t,n=":"+n+"R"+t,t=Wt++,0<\/script>",e=e.removeChild(e.firstChild)):typeof r.is=="string"?e=o.createElement(t,{is:r.is}):(e=o.createElement(t),t==="select"&&(o=e,r.multiple?o.multiple=!0:r.size&&(o.size=r.size))):e=o.createElementNS(e,t),e[Fe]=n,e[Vt]=r,Ia(e,n,!1,!1),n.stateNode=e;e:{switch(o=ru(t,r),t){case"dialog":I("cancel",e),I("close",e),l=r;break;case"iframe":case"object":case"embed":I("load",e),l=r;break;case"video":case"audio":for(l=0;llt&&(n.flags|=128,r=!0,vt(u,!1),n.lanes=4194304)}else{if(!r)if(e=Kr(o),e!==null){if(n.flags|=128,r=!0,t=e.updateQueue,t!==null&&(n.updateQueue=t,n.flags|=4),vt(u,!0),u.tail===null&&u.tailMode==="hidden"&&!o.alternate&&!U)return te(n),null}else 2*Q()-u.renderingStartTime>lt&&t!==1073741824&&(n.flags|=128,r=!0,vt(u,!1),n.lanes=4194304);u.isBackwards?(o.sibling=n.child,n.child=o):(t=u.last,t!==null?t.sibling=o:n.child=o,u.last=o)}return u.tail!==null?(n=u.tail,u.rendering=n,u.tail=n.sibling,u.renderingStartTime=Q(),n.sibling=null,t=$.current,D($,r?t&1|2:t&1),n):(te(n),null);case 22:case 23:return No(),r=n.memoizedState!==null,e!==null&&e.memoizedState!==null!==r&&(n.flags|=8192),r&&n.mode&1?he&1073741824&&(te(n),n.subtreeFlags&6&&(n.flags|=8192)):te(n),null;case 24:return null;case 25:return null}throw Error(y(156,n.tag))}function cd(e,n){switch(uo(n),n.tag){case 1:return pe(n.type)&&$r(),e=n.flags,e&65536?(n.flags=e&-65537|128,n):null;case 3:return tt(),F(de),F(le),ho(),e=n.flags,e&65536&&!(e&128)?(n.flags=e&-65537|128,n):null;case 5:return mo(n),null;case 13:if(F($),e=n.memoizedState,e!==null&&e.dehydrated!==null){if(n.alternate===null)throw Error(y(340));et()}return e=n.flags,e&65536?(n.flags=e&-65537|128,n):null;case 19:return F($),null;case 4:return tt(),null;case 10:return ao(n.type._context),null;case 22:case 23:return No(),null;case 24:return null;default:return null}}var hr=!1,re=!1,fd=typeof WeakSet=="function"?WeakSet:Set,k=null;function Wn(e,n){var t=e.ref;if(t!==null)if(typeof t=="function")try{t(null)}catch(r){B(e,n,r)}else t.current=null}function Tu(e,n,t){try{t()}catch(r){B(e,n,r)}}var Oi=!1;function dd(e,n){if(pu=Dr,e=Bs(),ro(e)){if("selectionStart"in e)var t={start:e.selectionStart,end:e.selectionEnd};else e:{t=(t=e.ownerDocument)&&t.defaultView||window;var r=t.getSelection&&t.getSelection();if(r&&r.rangeCount!==0){t=r.anchorNode;var l=r.anchorOffset,u=r.focusNode;r=r.focusOffset;try{t.nodeType,u.nodeType}catch{t=null;break e}var o=0,i=-1,s=-1,c=0,h=0,m=e,p=null;n:for(;;){for(var w;m!==t||l!==0&&m.nodeType!==3||(i=o+l),m!==u||r!==0&&m.nodeType!==3||(s=o+r),m.nodeType===3&&(o+=m.nodeValue.length),(w=m.firstChild)!==null;)p=m,m=w;for(;;){if(m===e)break n;if(p===t&&++c===l&&(i=o),p===u&&++h===r&&(s=o),(w=m.nextSibling)!==null)break;m=p,p=m.parentNode}m=w}t=i===-1||s===-1?null:{start:i,end:s}}else t=null}t=t||{start:0,end:0}}else t=null;for(mu={focusedElem:e,selectionRange:t},Dr=!1,k=n;k!==null;)if(n=k,e=n.child,(n.subtreeFlags&1028)!==0&&e!==null)e.return=n,k=e;else for(;k!==null;){n=k;try{var S=n.alternate;if(n.flags&1024)switch(n.tag){case 0:case 11:case 15:break;case 1:if(S!==null){var g=S.memoizedProps,R=S.memoizedState,f=n.stateNode,a=f.getSnapshotBeforeUpdate(n.elementType===n.type?g:Le(n.type,g),R);f.__reactInternalSnapshotBeforeUpdate=a}break;case 3:var d=n.stateNode.containerInfo;d.nodeType===1?d.textContent="":d.nodeType===9&&d.documentElement&&d.removeChild(d.documentElement);break;case 5:case 6:case 4:case 17:break;default:throw Error(y(163))}}catch(v){B(n,n.return,v)}if(e=n.sibling,e!==null){e.return=n.return,k=e;break}k=n.return}return S=Oi,Oi=!1,S}function zt(e,n,t){var r=n.updateQueue;if(r=r!==null?r.lastEffect:null,r!==null){var l=r=r.next;do{if((l.tag&e)===e){var u=l.destroy;l.destroy=void 0,u!==void 0&&Tu(n,t,u)}l=l.next}while(l!==r)}}function al(e,n){if(n=n.updateQueue,n=n!==null?n.lastEffect:null,n!==null){var t=n=n.next;do{if((t.tag&e)===e){var r=t.create;t.destroy=r()}t=t.next}while(t!==n)}}function Ru(e){var n=e.ref;if(n!==null){var t=e.stateNode;switch(e.tag){case 5:e=t;break;default:e=t}typeof n=="function"?n(e):n.current=e}}function $a(e){var n=e.alternate;n!==null&&(e.alternate=null,$a(n)),e.child=null,e.deletions=null,e.sibling=null,e.tag===5&&(n=e.stateNode,n!==null&&(delete n[Fe],delete n[Vt],delete n[yu],delete n[Xf],delete n[Gf])),e.stateNode=null,e.return=null,e.dependencies=null,e.memoizedProps=null,e.memoizedState=null,e.pendingProps=null,e.stateNode=null,e.updateQueue=null}function Aa(e){return e.tag===5||e.tag===3||e.tag===4}function ji(e){e:for(;;){for(;e.sibling===null;){if(e.return===null||Aa(e.return))return null;e=e.return}for(e.sibling.return=e.return,e=e.sibling;e.tag!==5&&e.tag!==6&&e.tag!==18;){if(e.flags&2||e.child===null||e.tag===4)continue e;e.child.return=e,e=e.child}if(!(e.flags&2))return e.stateNode}}function Ou(e,n,t){var r=e.tag;if(r===5||r===6)e=e.stateNode,n?t.nodeType===8?t.parentNode.insertBefore(e,n):t.insertBefore(e,n):(t.nodeType===8?(n=t.parentNode,n.insertBefore(e,t)):(n=t,n.appendChild(e)),t=t._reactRootContainer,t!=null||n.onclick!==null||(n.onclick=Ur));else if(r!==4&&(e=e.child,e!==null))for(Ou(e,n,t),e=e.sibling;e!==null;)Ou(e,n,t),e=e.sibling}function ju(e,n,t){var r=e.tag;if(r===5||r===6)e=e.stateNode,n?t.insertBefore(e,n):t.appendChild(e);else if(r!==4&&(e=e.child,e!==null))for(ju(e,n,t),e=e.sibling;e!==null;)ju(e,n,t),e=e.sibling}var q=null,Te=!1;function Ze(e,n,t){for(t=t.child;t!==null;)Va(e,n,t),t=t.sibling}function Va(e,n,t){if(Ue&&typeof Ue.onCommitFiberUnmount=="function")try{Ue.onCommitFiberUnmount(nl,t)}catch{}switch(t.tag){case 5:re||Wn(t,n);case 6:var r=q,l=Te;q=null,Ze(e,n,t),q=r,Te=l,q!==null&&(Te?(e=q,t=t.stateNode,e.nodeType===8?e.parentNode.removeChild(t):e.removeChild(t)):q.removeChild(t.stateNode));break;case 18:q!==null&&(Te?(e=q,t=t.stateNode,e.nodeType===8?Dl(e.parentNode,t):e.nodeType===1&&Dl(e,t),It(e)):Dl(q,t.stateNode));break;case 4:r=q,l=Te,q=t.stateNode.containerInfo,Te=!0,Ze(e,n,t),q=r,Te=l;break;case 0:case 11:case 14:case 15:if(!re&&(r=t.updateQueue,r!==null&&(r=r.lastEffect,r!==null))){l=r=r.next;do{var u=l,o=u.destroy;u=u.tag,o!==void 0&&(u&2||u&4)&&Tu(t,n,o),l=l.next}while(l!==r)}Ze(e,n,t);break;case 1:if(!re&&(Wn(t,n),r=t.stateNode,typeof r.componentWillUnmount=="function"))try{r.props=t.memoizedProps,r.state=t.memoizedState,r.componentWillUnmount()}catch(i){B(t,n,i)}Ze(e,n,t);break;case 21:Ze(e,n,t);break;case 22:t.mode&1?(re=(r=re)||t.memoizedState!==null,Ze(e,n,t),re=r):Ze(e,n,t);break;default:Ze(e,n,t)}}function Mi(e){var n=e.updateQueue;if(n!==null){e.updateQueue=null;var t=e.stateNode;t===null&&(t=e.stateNode=new fd),n.forEach(function(r){var l=kd.bind(null,e,r);t.has(r)||(t.add(r),r.then(l,l))})}}function ze(e,n){var t=n.deletions;if(t!==null)for(var r=0;rl&&(l=o),r&=~u}if(r=l,r=Q()-r,r=(120>r?120:480>r?480:1080>r?1080:1920>r?1920:3e3>r?3e3:4320>r?4320:1960*md(r/1960))-r,10e?16:e,tn===null)var r=!1;else{if(e=tn,tn=null,Jr=0,O&6)throw Error(y(331));var l=O;for(O|=4,k=e.current;k!==null;){var u=k,o=u.child;if(k.flags&16){var i=u.deletions;if(i!==null){for(var s=0;sQ()-_o?_n(e,0):xo|=t),me(e,n)}function Ga(e,n){n===0&&(e.mode&1?(n=or,or<<=1,!(or&130023424)&&(or=4194304)):n=1);var t=oe();e=Ye(e,n),e!==null&&(Gt(e,n,t),me(e,t))}function Sd(e){var n=e.memoizedState,t=0;n!==null&&(t=n.retryLane),Ga(e,t)}function kd(e,n){var t=0;switch(e.tag){case 13:var r=e.stateNode,l=e.memoizedState;l!==null&&(t=l.retryLane);break;case 19:r=e.stateNode;break;default:throw Error(y(314))}r!==null&&r.delete(n),Ga(e,t)}var Za;Za=function(e,n,t){if(e!==null)if(e.memoizedProps!==n.pendingProps||de.current)fe=!0;else{if(!(e.lanes&t)&&!(n.flags&128))return fe=!1,sd(e,n,t);fe=!!(e.flags&131072)}else fe=!1,U&&n.flags&1048576&&bs(n,Br,n.index);switch(n.lanes=0,n.tag){case 2:var r=n.type;Nr(e,n),e=n.pendingProps;var l=bn(n,le.current);Zn(n,t),l=yo(null,n,r,e,l,t);var u=go();return n.flags|=1,typeof l=="object"&&l!==null&&typeof l.render=="function"&&l.$$typeof===void 0?(n.tag=1,n.memoizedState=null,n.updateQueue=null,pe(r)?(u=!0,Ar(n)):u=!1,n.memoizedState=l.state!==null&&l.state!==void 0?l.state:null,fo(n),l.updater=il,n.stateNode=l,l._reactInternals=n,xu(n,r,e,t),n=Nu(null,n,r,!0,u,t)):(n.tag=0,U&&u&&lo(n),ue(null,n,l,t),n=n.child),n;case 16:r=n.elementType;e:{switch(Nr(e,n),e=n.pendingProps,l=r._init,r=l(r._payload),n.type=r,l=n.tag=xd(r),e=Le(r,e),l){case 0:n=Cu(null,n,r,e,t);break e;case 1:n=Li(null,n,r,e,t);break e;case 11:n=Pi(null,n,r,e,t);break e;case 14:n=zi(null,n,r,Le(r.type,e),t);break e}throw Error(y(306,r,""))}return n;case 0:return r=n.type,l=n.pendingProps,l=n.elementType===r?l:Le(r,l),Cu(e,n,r,l,t);case 1:return r=n.type,l=n.pendingProps,l=n.elementType===r?l:Le(r,l),Li(e,n,r,l,t);case 3:e:{if(ja(n),e===null)throw Error(y(387));r=n.pendingProps,u=n.memoizedState,l=u.element,ra(e,n),Qr(n,r,null,t);var o=n.memoizedState;if(r=o.element,u.isDehydrated)if(u={element:r,isDehydrated:!1,cache:o.cache,pendingSuspenseBoundaries:o.pendingSuspenseBoundaries,transitions:o.transitions},n.updateQueue.baseState=u,n.memoizedState=u,n.flags&256){l=rt(Error(y(423)),n),n=Ti(e,n,r,t,l);break e}else if(r!==l){l=rt(Error(y(424)),n),n=Ti(e,n,r,t,l);break e}else for(ve=on(n.stateNode.containerInfo.firstChild),ye=n,U=!0,Re=null,t=ia(n,null,r,t),n.child=t;t;)t.flags=t.flags&-3|4096,t=t.sibling;else{if(et(),r===l){n=Xe(e,n,t);break e}ue(e,n,r,t)}n=n.child}return n;case 5:return sa(n),e===null&&Su(n),r=n.type,l=n.pendingProps,u=e!==null?e.memoizedProps:null,o=l.children,hu(r,l)?o=null:u!==null&&hu(r,u)&&(n.flags|=32),Oa(e,n),ue(e,n,o,t),n.child;case 6:return e===null&&Su(n),null;case 13:return Ma(e,n,t);case 4:return po(n,n.stateNode.containerInfo),r=n.pendingProps,e===null?n.child=nt(n,null,r,t):ue(e,n,r,t),n.child;case 11:return r=n.type,l=n.pendingProps,l=n.elementType===r?l:Le(r,l),Pi(e,n,r,l,t);case 7:return ue(e,n,n.pendingProps,t),n.child;case 8:return ue(e,n,n.pendingProps.children,t),n.child;case 12:return ue(e,n,n.pendingProps.children,t),n.child;case 10:e:{if(r=n.type._context,l=n.pendingProps,u=n.memoizedProps,o=l.value,D(Hr,r._currentValue),r._currentValue=o,u!==null)if(Me(u.value,o)){if(u.children===l.children&&!de.current){n=Xe(e,n,t);break e}}else for(u=n.child,u!==null&&(u.return=n);u!==null;){var i=u.dependencies;if(i!==null){o=u.child;for(var s=i.firstContext;s!==null;){if(s.context===r){if(u.tag===1){s=We(-1,t&-t),s.tag=2;var c=u.updateQueue;if(c!==null){c=c.shared;var h=c.pending;h===null?s.next=s:(s.next=h.next,h.next=s),c.pending=s}}u.lanes|=t,s=u.alternate,s!==null&&(s.lanes|=t),ku(u.return,t,n),i.lanes|=t;break}s=s.next}}else if(u.tag===10)o=u.type===n.type?null:u.child;else if(u.tag===18){if(o=u.return,o===null)throw Error(y(341));o.lanes|=t,i=o.alternate,i!==null&&(i.lanes|=t),ku(o,t,n),o=u.sibling}else o=u.child;if(o!==null)o.return=u;else for(o=u;o!==null;){if(o===n){o=null;break}if(u=o.sibling,u!==null){u.return=o.return,o=u;break}o=o.return}u=o}ue(e,n,l.children,t),n=n.child}return n;case 9:return l=n.type,r=n.pendingProps.children,Zn(n,t),l=Ce(l),r=r(l),n.flags|=1,ue(e,n,r,t),n.child;case 14:return r=n.type,l=Le(r,n.pendingProps),l=Le(r.type,l),zi(e,n,r,l,t);case 15:return Ta(e,n,n.type,n.pendingProps,t);case 17:return r=n.type,l=n.pendingProps,l=n.elementType===r?l:Le(r,l),Nr(e,n),n.tag=1,pe(r)?(e=!0,Ar(n)):e=!1,Zn(n,t),ua(n,r,l),xu(n,r,l,t),Nu(null,n,r,!0,e,t);case 19:return Da(e,n,t);case 22:return Ra(e,n,t)}throw Error(y(156,n.tag))};function Ja(e,n){return xs(e,n)}function Ed(e,n,t,r){this.tag=e,this.key=t,this.sibling=this.child=this.return=this.stateNode=this.type=this.elementType=null,this.index=0,this.ref=null,this.pendingProps=n,this.dependencies=this.memoizedState=this.updateQueue=this.memoizedProps=null,this.mode=r,this.subtreeFlags=this.flags=0,this.deletions=null,this.childLanes=this.lanes=0,this.alternate=null}function xe(e,n,t,r){return new Ed(e,n,t,r)}function zo(e){return e=e.prototype,!(!e||!e.isReactComponent)}function xd(e){if(typeof e=="function")return zo(e)?1:0;if(e!=null){if(e=e.$$typeof,e===Ku)return 11;if(e===Yu)return 14}return 2}function fn(e,n){var t=e.alternate;return t===null?(t=xe(e.tag,n,e.key,e.mode),t.elementType=e.elementType,t.type=e.type,t.stateNode=e.stateNode,t.alternate=e,e.alternate=t):(t.pendingProps=n,t.type=e.type,t.flags=0,t.subtreeFlags=0,t.deletions=null),t.flags=e.flags&14680064,t.childLanes=e.childLanes,t.lanes=e.lanes,t.child=e.child,t.memoizedProps=e.memoizedProps,t.memoizedState=e.memoizedState,t.updateQueue=e.updateQueue,n=e.dependencies,t.dependencies=n===null?null:{lanes:n.lanes,firstContext:n.firstContext},t.sibling=e.sibling,t.index=e.index,t.ref=e.ref,t}function Lr(e,n,t,r,l,u){var o=2;if(r=e,typeof e=="function")zo(e)&&(o=1);else if(typeof e=="string")o=5;else e:switch(e){case Dn:return Cn(t.children,l,u,n);case Qu:o=8,l|=8;break;case Yl:return e=xe(12,t,n,l|2),e.elementType=Yl,e.lanes=u,e;case Xl:return e=xe(13,t,n,l),e.elementType=Xl,e.lanes=u,e;case Gl:return e=xe(19,t,n,l),e.elementType=Gl,e.lanes=u,e;case os:return fl(t,l,u,n);default:if(typeof e=="object"&&e!==null)switch(e.$$typeof){case ls:o=10;break e;case us:o=9;break e;case Ku:o=11;break e;case Yu:o=14;break e;case Je:o=16,r=null;break e}throw Error(y(130,e==null?e:typeof e,""))}return n=xe(o,t,n,l),n.elementType=e,n.type=r,n.lanes=u,n}function Cn(e,n,t,r){return e=xe(7,e,r,n),e.lanes=t,e}function fl(e,n,t,r){return e=xe(22,e,r,n),e.elementType=os,e.lanes=t,e.stateNode={isHidden:!1},e}function Hl(e,n,t){return e=xe(6,e,null,n),e.lanes=t,e}function Wl(e,n,t){return n=xe(4,e.children!==null?e.children:[],e.key,n),n.lanes=t,n.stateNode={containerInfo:e.containerInfo,pendingChildren:null,implementation:e.implementation},n}function _d(e,n,t,r,l){this.tag=n,this.containerInfo=e,this.finishedWork=this.pingCache=this.current=this.pendingChildren=null,this.timeoutHandle=-1,this.callbackNode=this.pendingContext=this.context=null,this.callbackPriority=0,this.eventTimes=_l(0),this.expirationTimes=_l(-1),this.entangledLanes=this.finishedLanes=this.mutableReadLanes=this.expiredLanes=this.pingedLanes=this.suspendedLanes=this.pendingLanes=0,this.entanglements=_l(0),this.identifierPrefix=r,this.onRecoverableError=l,this.mutableSourceEagerHydrationData=null}function Lo(e,n,t,r,l,u,o,i,s){return e=new _d(e,n,t,i,s),n===1?(n=1,u===!0&&(n|=8)):n=0,u=xe(3,null,null,n),e.current=u,u.stateNode=e,u.memoizedState={element:r,isDehydrated:t,cache:null,transitions:null,pendingSuspenseBoundaries:null},fo(u),e}function Cd(e,n,t){var r=3"u"||typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE!="function"))try{__REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE(nc)}catch(e){console.error(e)}}nc(),bi.exports=we;var Td=bi.exports,Bi=Td;Ql.createRoot=Bi.createRoot,Ql.hydrateRoot=Bi.hydrateRoot;function Rd({audioUrl:e,mimeType:n}){const t=ae.useRef(null),r=ae.useRef(null);return ae.useEffect(()=>{t.current&&r.current&&(r.current.src=e,t.current.load())},[e]),M.jsx("div",{className:"flex relative z-10 my-4 w-full",children:M.jsx("audio",{ref:t,controls:!0,className:"w-full h-14 rounded-lg bg-white shadow-xl shadow-black/5 ring-1 ring-slate-700/10",children:M.jsx("source",{ref:r,type:n})})})}function Od({text:e,percentage:n}){return n??(n=0),M.jsx("div",{className:"relative text-black bg-white rounded-lg text-left overflow-hidden",children:M.jsxs("div",{className:"px-2 w-[1%] h-full bg-blue-500 whitespace-nowrap",style:{width:`${n}%`},children:[e," (",`${n.toFixed(2)}%`,")"]})})}const jd={"US female 1":"cmu_us_slt_arctic-wav-arctic_a0001","US female 2":"cmu_us_clb_arctic-wav-arctic_a0001","US male 1":"cmu_us_bdl_arctic-wav-arctic_a0003","US male 2":"cmu_us_rms_arctic-wav-arctic_a0003","Canadian male":"cmu_us_jmk_arctic-wav-arctic_a0002","Scottish male":"cmu_us_awb_arctic-wav-arctic_b0002","Indian male":"cmu_us_ksp_arctic-wav-arctic_a0007"},Md="cmu_us_slt_arctic-wav-arctic_a0001",Dd=()=>{const[e,n]=ae.useState(null),[t,r]=ae.useState(!1),[l,u]=ae.useState([]),[o,i]=ae.useState("I love Hugging Face!"),[s,c]=ae.useState(Md),[h,m]=ae.useState(null),p=ae.useRef(null);ae.useEffect(()=>{p.current||(p.current=new Worker(new URL("/assets/worker-7f2d1abe.js",self.location),{type:"module"}));const g=R=>{switch(R.data.status){case"initiate":n(!1),u(a=>[...a,R.data]);break;case"progress":u(a=>a.map(d=>d.file===R.data.file?{...d,progress:R.data.progress}:d));break;case"done":u(a=>a.filter(d=>d.file!==R.data.file));break;case"ready":n(!0);break;case"complete":r(!1);const f=URL.createObjectURL(R.data.output);m(f);break}};return p.current.addEventListener("message",g),()=>p.current.removeEventListener("message",g)});const w=()=>{r(!0),p.current.postMessage({text:o,speaker_id:s})},S=e===!1;return M.jsxs("div",{className:"min-h-screen flex items-center justify-center bg-gray-100",children:[M.jsxs("div",{className:"absolute gap-1 z-50 top-0 left-0 w-full h-full transition-all px-8 flex flex-col justify-center text-center",style:{opacity:S?1:0,pointerEvents:S?"all":"none",background:"rgba(0, 0, 0, 0.9)",backdropFilter:"blur(8px)"},children:[S&&M.jsx("label",{className:"text-white text-xl p-3",children:"Loading models... (only run once)"}),l.map(g=>M.jsx("div",{children:M.jsx(Od,{text:`${g.name}/${g.file}`,percentage:g.progress})},`${g.name}/${g.file}`))]}),M.jsxs("div",{className:"bg-white p-8 rounded-lg shadow-lg w-full max-w-xl m-2",children:[M.jsx("h1",{className:"text-3xl font-semibold text-gray-800 mb-1 text-center",children:"In-browser Text to Speech"}),M.jsxs("h2",{className:"text-base font-medium text-gray-700 mb-2 text-center",children:["Made with ",M.jsx("a",{href:"https://huggingface.co/docs/transformers.js",children:"🤗 Transformers.js"})]}),M.jsxs("div",{className:"mb-4",children:[M.jsx("label",{htmlFor:"text",className:"block text-sm font-medium text-gray-600",children:"Text"}),M.jsx("textarea",{id:"text",className:"border border-gray-300 rounded-md p-2 w-full",rows:"4",placeholder:"Enter text here",value:o,onChange:g=>i(g.target.value)})]}),M.jsxs("div",{className:"mb-4",children:[M.jsx("label",{htmlFor:"speaker",className:"block text-sm font-medium text-gray-600",children:"Speaker"}),M.jsx("select",{id:"speaker",className:"border border-gray-300 rounded-md p-2 w-full",value:s,onChange:g=>c(g.target.value),children:Object.entries(jd).map(([g,R])=>M.jsx("option",{value:R,children:g},g))})]}),M.jsx("div",{className:"flex justify-center",children:M.jsx("button",{className:`${t?"bg-gray-400 cursor-not-allowed":"bg-blue-500 hover:bg-blue-600"} text-white rounded-md py-2 px-4`,onClick:w,disabled:t,children:t?"Generating...":"Generate"})}),h&&M.jsx(Rd,{audioUrl:h,mimeType:"audio/wav"})]})]})};Ql.createRoot(document.getElementById("root")).render(M.jsx(wc.StrictMode,{children:M.jsx(Dd,{})})); diff --git a/spaces/XzJosh/nanami-Bert-VITS2/text/english_bert_mock.py b/spaces/XzJosh/nanami-Bert-VITS2/text/english_bert_mock.py deleted file mode 100644 index 3b894ced5b6d619a18d6bdd7d7606ba9e6532050..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/nanami-Bert-VITS2/text/english_bert_mock.py +++ /dev/null @@ -1,5 +0,0 @@ -import torch - - -def get_bert_feature(norm_text, word2ph): - return torch.zeros(1024, sum(word2ph)) diff --git a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/pipelines/latent_diffusion/__init__.py b/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/pipelines/latent_diffusion/__init__.py deleted file mode 100644 index 5544527ff5877bb2c725c8b375cd5b03060d6a21..0000000000000000000000000000000000000000 --- a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/pipelines/latent_diffusion/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# flake8: noqa -from ...utils import is_transformers_available -from .pipeline_latent_diffusion_superresolution import LDMSuperResolutionPipeline - - -if is_transformers_available(): - from .pipeline_latent_diffusion import LDMBertModel, LDMTextToImagePipeline diff --git a/spaces/Yuliang/ECON/lib/torch_utils/ops/upfirdn2d.cpp b/spaces/Yuliang/ECON/lib/torch_utils/ops/upfirdn2d.cpp deleted file mode 100644 index 2d7177fc60040751d20e9a8da0301fa3ab64968a..0000000000000000000000000000000000000000 --- a/spaces/Yuliang/ECON/lib/torch_utils/ops/upfirdn2d.cpp +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -// -// NVIDIA CORPORATION and its licensors retain all intellectual property -// and proprietary rights in and to this software, related documentation -// and any modifications thereto. Any use, reproduction, disclosure or -// distribution of this software and related documentation without an express -// license agreement from NVIDIA CORPORATION is strictly prohibited. - -#include -#include -#include -#include "upfirdn2d.h" - -//------------------------------------------------------------------------ - -static torch::Tensor upfirdn2d(torch::Tensor x, torch::Tensor f, int upx, int upy, int downx, int downy, int padx0, int padx1, int pady0, int pady1, bool flip, float gain) -{ - // Validate arguments. - TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device"); - TORCH_CHECK(f.device() == x.device(), "f must reside on the same device as x"); - TORCH_CHECK(f.dtype() == torch::kFloat, "f must be float32"); - TORCH_CHECK(x.numel() <= INT_MAX, "x is too large"); - TORCH_CHECK(f.numel() <= INT_MAX, "f is too large"); - TORCH_CHECK(x.dim() == 4, "x must be rank 4"); - TORCH_CHECK(f.dim() == 2, "f must be rank 2"); - TORCH_CHECK(f.size(0) >= 1 && f.size(1) >= 1, "f must be at least 1x1"); - TORCH_CHECK(upx >= 1 && upy >= 1, "upsampling factor must be at least 1"); - TORCH_CHECK(downx >= 1 && downy >= 1, "downsampling factor must be at least 1"); - - // Create output tensor. - const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); - int outW = ((int)x.size(3) * upx + padx0 + padx1 - (int)f.size(1) + downx) / downx; - int outH = ((int)x.size(2) * upy + pady0 + pady1 - (int)f.size(0) + downy) / downy; - TORCH_CHECK(outW >= 1 && outH >= 1, "output must be at least 1x1"); - torch::Tensor y = torch::empty({x.size(0), x.size(1), outH, outW}, x.options(), x.suggest_memory_format()); - TORCH_CHECK(y.numel() <= INT_MAX, "output is too large"); - - // Initialize CUDA kernel parameters. - upfirdn2d_kernel_params p; - p.x = x.data_ptr(); - p.f = f.data_ptr(); - p.y = y.data_ptr(); - p.up = make_int2(upx, upy); - p.down = make_int2(downx, downy); - p.pad0 = make_int2(padx0, pady0); - p.flip = (flip) ? 1 : 0; - p.gain = gain; - p.inSize = make_int4((int)x.size(3), (int)x.size(2), (int)x.size(1), (int)x.size(0)); - p.inStride = make_int4((int)x.stride(3), (int)x.stride(2), (int)x.stride(1), (int)x.stride(0)); - p.filterSize = make_int2((int)f.size(1), (int)f.size(0)); - p.filterStride = make_int2((int)f.stride(1), (int)f.stride(0)); - p.outSize = make_int4((int)y.size(3), (int)y.size(2), (int)y.size(1), (int)y.size(0)); - p.outStride = make_int4((int)y.stride(3), (int)y.stride(2), (int)y.stride(1), (int)y.stride(0)); - p.sizeMajor = (p.inStride.z == 1) ? p.inSize.w : p.inSize.w * p.inSize.z; - p.sizeMinor = (p.inStride.z == 1) ? p.inSize.z : 1; - - // Choose CUDA kernel. - upfirdn2d_kernel_spec spec; - AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] - { - spec = choose_upfirdn2d_kernel(p); - }); - - // Set looping options. - p.loopMajor = (p.sizeMajor - 1) / 16384 + 1; - p.loopMinor = spec.loopMinor; - p.loopX = spec.loopX; - p.launchMinor = (p.sizeMinor - 1) / p.loopMinor + 1; - p.launchMajor = (p.sizeMajor - 1) / p.loopMajor + 1; - - // Compute grid size. - dim3 blockSize, gridSize; - if (spec.tileOutW < 0) // large - { - blockSize = dim3(4, 32, 1); - gridSize = dim3( - ((p.outSize.y - 1) / blockSize.x + 1) * p.launchMinor, - (p.outSize.x - 1) / (blockSize.y * p.loopX) + 1, - p.launchMajor); - } - else // small - { - blockSize = dim3(256, 1, 1); - gridSize = dim3( - ((p.outSize.y - 1) / spec.tileOutH + 1) * p.launchMinor, - (p.outSize.x - 1) / (spec.tileOutW * p.loopX) + 1, - p.launchMajor); - } - - // Launch CUDA kernel. - void* args[] = {&p}; - AT_CUDA_CHECK(cudaLaunchKernel(spec.kernel, gridSize, blockSize, args, 0, at::cuda::getCurrentCUDAStream())); - return y; -} - -//------------------------------------------------------------------------ - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) -{ - m.def("upfirdn2d", &upfirdn2d); -} - -//------------------------------------------------------------------------ diff --git a/spaces/ZiLaiJuan/GRADIO/calculator.py b/spaces/ZiLaiJuan/GRADIO/calculator.py deleted file mode 100644 index dee8156d61d7c64d49fea9e033c940c19565a3b8..0000000000000000000000000000000000000000 --- a/spaces/ZiLaiJuan/GRADIO/calculator.py +++ /dev/null @@ -1,32 +0,0 @@ -import gradio as gr - -def calculator(num1, operation, num2): - if operation == "add": - return num1 + num2 - elif operation == "subtract": - return num1 - num2 - elif operation == "multiply": - return num1 * num2 - elif operation == "divide": - if num2 == 0: - raise gr.Error("Cannot divide by zero!") - return num1 / num2 - -demo = gr.Interface( - calculator, - [ - "number", - gr.Radio(["add", "subtract", "multiply", "divide"]), - "number" - ], - "number", - examples=[ - [5, "add", 3], - [4, "divide", 2], - [-4, "multiply", 2.5], - [0, "subtract", 1.2], - ], - title="Toy Calculator", - description="Here's a sample toy calculator. Enjoy!", -) -demo.launch(share=True) diff --git a/spaces/a-v-bely/russian-task-generator/utilities_cookies/public/index.html b/spaces/a-v-bely/russian-task-generator/utilities_cookies/public/index.html deleted file mode 100644 index a8c6bc92134e8e0bfbf0eebfa8353d00346a9710..0000000000000000000000000000000000000000 --- a/spaces/a-v-bely/russian-task-generator/utilities_cookies/public/index.html +++ /dev/null @@ -1,9 +0,0 @@ - - - - Cookies Manager - - - - - \ No newline at end of file diff --git a/spaces/abdvl/datahub_qa_bot/docs/authorization/roles.md b/spaces/abdvl/datahub_qa_bot/docs/authorization/roles.md deleted file mode 100644 index b25579072980d3c2cc4ff14889560fe2846fd975..0000000000000000000000000000000000000000 --- a/spaces/abdvl/datahub_qa_bot/docs/authorization/roles.md +++ /dev/null @@ -1,162 +0,0 @@ -import FeatureAvailability from '@site/src/components/FeatureAvailability'; - -# About DataHub Roles - - - -DataHub provides the ability to use **Roles** to manage permissions. - -:::tip **Roles** are the recommended way to manage permissions on DataHub. This should suffice for most use cases, but advanced users can use **Policies** if needed. - -## Roles Setup, Prerequisites, and Permissions - -The out-of-the-box Roles represent the most common types of DataHub users. Currently, the supported Roles are **Admin**, **Editor** and **Reader**. - -| Role Name | Description | -| --------- | --------------------------------------------------------------------------------------- | -| Admin | Can do everything on the platform. | -| Editor | Can read and edit all metadata. Cannot take administrative actions. | -| Reader | Can read all metadata. Cannot edit anything by default, or take administrative actions. | - -:::note To manage roles, including viewing roles, or editing a user's role, you must either be an **Admin**, or have the **Manage Policies** privilege. - -## Using Roles - -### Viewing Roles - -You can view the list of existing Roles under **Settings > Permissions > Roles**. You can click into a Role to see details about -it, like which users have that Role, and which Policies correspond to that Role. - -

      - -

      - -### Assigning Roles - -Roles can be assigned in two different ways. - -#### Assigning a New Role to a Single User - -If you go to **Settings > Users & Groups > Users**, you will be able to view your full list of users, as well as which Role they are currently -assigned to, including if they don't have a Role. - -

      - -

      - -You can simply assign a new Role to a user by clicking on the drop-down that appears on their row and selecting the desired Role. - -

      - -

      - - -#### Batch Assigning a Role - -When viewing the full list of roles at **Settings > Permissions > Roles**, you will notice that each role has an `Add Users` button next to it. Clicking this button will -lead you to a search box where you can search through your users, and select which users you would like to assign this role to. - -

      - -

      - -### How do Roles interact with Policies? - -Roles actually use Policies under-the-hood, and come prepackaged with corresponding policies to control what a Role can do, which you can view in the -Policies tab. Note that these Role-specific policies **cannot** be changed. You can find the full list of policies corresponding to each Role at the bottom of this -[file](https://github.com/datahub-project/datahub/blob/master/metadata-service/war/src/main/resources/boot/policies.json). - -If you would like to have finer control over what a user on your DataHub instance can do, the Roles system interfaces cleanly -with the Policies system. For example, if you would like to give a user a **Reader** role, but also allow them to edit metadata -for certain domains, you can add a policy that will allow them to do. Note that adding a policy like this will only add to what a user can do -in DataHub. - -### Role Privileges - -#### Self-Hosted DataHub and Managed DataHub - -These privileges are common to both Self-Hosted DataHub and Managed DataHub. - -##### Platform Privileges - -| Privilege | Admin | Editor | Reader | -|---------------------------------|--------------------|--------------------|--------| -| Generate Personal Access Tokens | :heavy_check_mark: | :heavy_check_mark: | :x: | -| Manage Domains | :heavy_check_mark: | :heavy_check_mark: | :x: | -| Manage Glossaries | :heavy_check_mark: | :heavy_check_mark: | :x: | -| Manage Tags | :heavy_check_mark: | :heavy_check_mark: | :x: | -| Manage Policies | :heavy_check_mark: | :x: | :x: | -| Manage Ingestion | :heavy_check_mark: | :x: | :x: | -| Manage Secrets | :heavy_check_mark: | :x: | :x: | -| Manage Users and Groups | :heavy_check_mark: | :x: | :x: | -| Manage Access Tokens | :heavy_check_mark: | :x: | :x: | -| Manage User Credentials | :heavy_check_mark: | :x: | :x: | -| Manage Public Views | :heavy_check_mark: | :x: | :x: | -| View Analytics | :heavy_check_mark: | :x: | :x: | - -##### Metadata Privileges - -| Privilege | Admin | Editor | Reader | -|--------------------------------------|--------------------|--------------------|--------------------| -| View Entity Page | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | -| View Dataset Usage | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | -| View Dataset Profile | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | -| Edit Entity | :heavy_check_mark: | :heavy_check_mark: | :x: | -| Edit Entity Tags | :heavy_check_mark: | :heavy_check_mark: | :x: | -| Edit Entity Glossary Terms | :heavy_check_mark: | :heavy_check_mark: | :x: | -| Edit Entity Owners | :heavy_check_mark: | :heavy_check_mark: | :x: | -| Edit Entity Docs | :heavy_check_mark: | :heavy_check_mark: | :x: | -| Edit Entity Doc Links | :heavy_check_mark: | :heavy_check_mark: | :x: | -| Edit Entity Status | :heavy_check_mark: | :heavy_check_mark: | :x: | -| Edit Entity Assertions | :heavy_check_mark: | :heavy_check_mark: | :x: | -| Manage Entity Tags | :heavy_check_mark: | :heavy_check_mark: | :x: | -| Manage Entity Glossary Terms | :heavy_check_mark: | :heavy_check_mark: | :x: | -| Edit Dataset Column Tags | :heavy_check_mark: | :heavy_check_mark: | :x: | -| Edit Dataset Column Glossary Terms | :heavy_check_mark: | :heavy_check_mark: | :x: | -| Edit Dataset Column Descriptions | :heavy_check_mark: | :heavy_check_mark: | :x: | -| Manage Dataset Column Tags | :heavy_check_mark: | :heavy_check_mark: | :x: | -| Manage Dataset Column Glossary Terms | :heavy_check_mark: | :heavy_check_mark: | :x: | -| Edit Tag Color | :heavy_check_mark: | :heavy_check_mark: | :x: | -| Edit User Profile | :heavy_check_mark: | :heavy_check_mark: | :x: | -| Edit Contact Info | :heavy_check_mark: | :heavy_check_mark: | :x: | - -#### Managed DataHub - -These privileges are only relevant to Managed DataHub. - -##### Platform Privileges - -| Privilege | Admin | Editor | Reader | -|-------------------------|--------------------|--------------------|--------| -| Create Constraints | :heavy_check_mark: | :heavy_check_mark: | :x: | -| View Metadata Proposals | :heavy_check_mark: | :heavy_check_mark: | :x: | -| Manage Tests | :heavy_check_mark: | :x: | :x: | -| Manage Global Settings | :heavy_check_mark: | :x: | :x: | - -##### Metadata Privileges - -| Privilege | Admin | Editor | Reader | -|---------------------------------------|--------------------|--------------------|--------------------| -| Propose Entity Tags | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | -| Propose Entity Glossary Terms | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | -| Propose Dataset Column Tags | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | -| Propose Dataset Column Glossary Terms | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | -| Edit Entity Operations | :heavy_check_mark: | :heavy_check_mark: | :x: | - -## Additional Resources - -### GraphQL - -* [acceptRole](../../graphql/mutations.md#acceptrole) -* [batchAssignRole](../../graphql/mutations.md#batchassignrole) -* [listRoles](../../graphql/queries.md#listroles) - -## FAQ and Troubleshooting - -## What updates are planned for Roles? - -In the future, the DataHub team is looking into adding the following features to Roles. - -- Defining a role mapping from OIDC identity providers to DataHub that will grant users a DataHub role based on their IdP role -- Allowing Admins to set a default role on DataHub so all users are assigned a role -- Building custom roles diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/core/mask/structures.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/core/mask/structures.py deleted file mode 100644 index d9ec5775f281ab8b76cb873e71a4edd9969ab905..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/core/mask/structures.py +++ /dev/null @@ -1,1024 +0,0 @@ -from abc import ABCMeta, abstractmethod - -import cv2 -import mmcv -import numpy as np -import pycocotools.mask as maskUtils -import torch -from mmcv.ops.roi_align import roi_align - - -class BaseInstanceMasks(metaclass=ABCMeta): - """Base class for instance masks.""" - - @abstractmethod - def rescale(self, scale, interpolation='nearest'): - """Rescale masks as large as possible while keeping the aspect ratio. - For details can refer to `mmcv.imrescale`. - - Args: - scale (tuple[int]): The maximum size (h, w) of rescaled mask. - interpolation (str): Same as :func:`mmcv.imrescale`. - - Returns: - BaseInstanceMasks: The rescaled masks. - """ - - @abstractmethod - def resize(self, out_shape, interpolation='nearest'): - """Resize masks to the given out_shape. - - Args: - out_shape: Target (h, w) of resized mask. - interpolation (str): See :func:`mmcv.imresize`. - - Returns: - BaseInstanceMasks: The resized masks. - """ - - @abstractmethod - def flip(self, flip_direction='horizontal'): - """Flip masks alone the given direction. - - Args: - flip_direction (str): Either 'horizontal' or 'vertical'. - - Returns: - BaseInstanceMasks: The flipped masks. - """ - - @abstractmethod - def pad(self, out_shape, pad_val): - """Pad masks to the given size of (h, w). - - Args: - out_shape (tuple[int]): Target (h, w) of padded mask. - pad_val (int): The padded value. - - Returns: - BaseInstanceMasks: The padded masks. - """ - - @abstractmethod - def crop(self, bbox): - """Crop each mask by the given bbox. - - Args: - bbox (ndarray): Bbox in format [x1, y1, x2, y2], shape (4, ). - - Return: - BaseInstanceMasks: The cropped masks. - """ - - @abstractmethod - def crop_and_resize(self, - bboxes, - out_shape, - inds, - device, - interpolation='bilinear'): - """Crop and resize masks by the given bboxes. - - This function is mainly used in mask targets computation. - It firstly align mask to bboxes by assigned_inds, then crop mask by the - assigned bbox and resize to the size of (mask_h, mask_w) - - Args: - bboxes (Tensor): Bboxes in format [x1, y1, x2, y2], shape (N, 4) - out_shape (tuple[int]): Target (h, w) of resized mask - inds (ndarray): Indexes to assign masks to each bbox, - shape (N,) and values should be between [0, num_masks - 1]. - device (str): Device of bboxes - interpolation (str): See `mmcv.imresize` - - Return: - BaseInstanceMasks: the cropped and resized masks. - """ - - @abstractmethod - def expand(self, expanded_h, expanded_w, top, left): - """see :class:`Expand`.""" - - @property - @abstractmethod - def areas(self): - """ndarray: areas of each instance.""" - - @abstractmethod - def to_ndarray(self): - """Convert masks to the format of ndarray. - - Return: - ndarray: Converted masks in the format of ndarray. - """ - - @abstractmethod - def to_tensor(self, dtype, device): - """Convert masks to the format of Tensor. - - Args: - dtype (str): Dtype of converted mask. - device (torch.device): Device of converted masks. - - Returns: - Tensor: Converted masks in the format of Tensor. - """ - - @abstractmethod - def translate(self, - out_shape, - offset, - direction='horizontal', - fill_val=0, - interpolation='bilinear'): - """Translate the masks. - - Args: - out_shape (tuple[int]): Shape for output mask, format (h, w). - offset (int | float): The offset for translate. - direction (str): The translate direction, either "horizontal" - or "vertical". - fill_val (int | float): Border value. Default 0. - interpolation (str): Same as :func:`mmcv.imtranslate`. - - Returns: - Translated masks. - """ - - def shear(self, - out_shape, - magnitude, - direction='horizontal', - border_value=0, - interpolation='bilinear'): - """Shear the masks. - - Args: - out_shape (tuple[int]): Shape for output mask, format (h, w). - magnitude (int | float): The magnitude used for shear. - direction (str): The shear direction, either "horizontal" - or "vertical". - border_value (int | tuple[int]): Value used in case of a - constant border. Default 0. - interpolation (str): Same as in :func:`mmcv.imshear`. - - Returns: - ndarray: Sheared masks. - """ - - @abstractmethod - def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0): - """Rotate the masks. - - Args: - out_shape (tuple[int]): Shape for output mask, format (h, w). - angle (int | float): Rotation angle in degrees. Positive values - mean counter-clockwise rotation. - center (tuple[float], optional): Center point (w, h) of the - rotation in source image. If not specified, the center of - the image will be used. - scale (int | float): Isotropic scale factor. - fill_val (int | float): Border value. Default 0 for masks. - - Returns: - Rotated masks. - """ - - -class BitmapMasks(BaseInstanceMasks): - """This class represents masks in the form of bitmaps. - - Args: - masks (ndarray): ndarray of masks in shape (N, H, W), where N is - the number of objects. - height (int): height of masks - width (int): width of masks - - Example: - >>> from mmdet.core.mask.structures import * # NOQA - >>> num_masks, H, W = 3, 32, 32 - >>> rng = np.random.RandomState(0) - >>> masks = (rng.rand(num_masks, H, W) > 0.1).astype(np.int) - >>> self = BitmapMasks(masks, height=H, width=W) - - >>> # demo crop_and_resize - >>> num_boxes = 5 - >>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes) - >>> out_shape = (14, 14) - >>> inds = torch.randint(0, len(self), size=(num_boxes,)) - >>> device = 'cpu' - >>> interpolation = 'bilinear' - >>> new = self.crop_and_resize( - ... bboxes, out_shape, inds, device, interpolation) - >>> assert len(new) == num_boxes - >>> assert new.height, new.width == out_shape - """ - - def __init__(self, masks, height, width): - self.height = height - self.width = width - if len(masks) == 0: - self.masks = np.empty((0, self.height, self.width), dtype=np.uint8) - else: - assert isinstance(masks, (list, np.ndarray)) - if isinstance(masks, list): - assert isinstance(masks[0], np.ndarray) - assert masks[0].ndim == 2 # (H, W) - else: - assert masks.ndim == 3 # (N, H, W) - - self.masks = np.stack(masks).reshape(-1, height, width) - assert self.masks.shape[1] == self.height - assert self.masks.shape[2] == self.width - - def __getitem__(self, index): - """Index the BitmapMask. - - Args: - index (int | ndarray): Indices in the format of integer or ndarray. - - Returns: - :obj:`BitmapMasks`: Indexed bitmap masks. - """ - masks = self.masks[index].reshape(-1, self.height, self.width) - return BitmapMasks(masks, self.height, self.width) - - def __iter__(self): - return iter(self.masks) - - def __repr__(self): - s = self.__class__.__name__ + '(' - s += f'num_masks={len(self.masks)}, ' - s += f'height={self.height}, ' - s += f'width={self.width})' - return s - - def __len__(self): - """Number of masks.""" - return len(self.masks) - - def rescale(self, scale, interpolation='nearest'): - """See :func:`BaseInstanceMasks.rescale`.""" - if len(self.masks) == 0: - new_w, new_h = mmcv.rescale_size((self.width, self.height), scale) - rescaled_masks = np.empty((0, new_h, new_w), dtype=np.uint8) - else: - rescaled_masks = np.stack([ - mmcv.imrescale(mask, scale, interpolation=interpolation) - for mask in self.masks - ]) - height, width = rescaled_masks.shape[1:] - return BitmapMasks(rescaled_masks, height, width) - - def resize(self, out_shape, interpolation='nearest'): - """See :func:`BaseInstanceMasks.resize`.""" - if len(self.masks) == 0: - resized_masks = np.empty((0, *out_shape), dtype=np.uint8) - else: - resized_masks = np.stack([ - mmcv.imresize( - mask, out_shape[::-1], interpolation=interpolation) - for mask in self.masks - ]) - return BitmapMasks(resized_masks, *out_shape) - - def flip(self, flip_direction='horizontal'): - """See :func:`BaseInstanceMasks.flip`.""" - assert flip_direction in ('horizontal', 'vertical', 'diagonal') - - if len(self.masks) == 0: - flipped_masks = self.masks - else: - flipped_masks = np.stack([ - mmcv.imflip(mask, direction=flip_direction) - for mask in self.masks - ]) - return BitmapMasks(flipped_masks, self.height, self.width) - - def pad(self, out_shape, pad_val=0): - """See :func:`BaseInstanceMasks.pad`.""" - if len(self.masks) == 0: - padded_masks = np.empty((0, *out_shape), dtype=np.uint8) - else: - padded_masks = np.stack([ - mmcv.impad(mask, shape=out_shape, pad_val=pad_val) - for mask in self.masks - ]) - return BitmapMasks(padded_masks, *out_shape) - - def crop(self, bbox): - """See :func:`BaseInstanceMasks.crop`.""" - assert isinstance(bbox, np.ndarray) - assert bbox.ndim == 1 - - # clip the boundary - bbox = bbox.copy() - bbox[0::2] = np.clip(bbox[0::2], 0, self.width) - bbox[1::2] = np.clip(bbox[1::2], 0, self.height) - x1, y1, x2, y2 = bbox - w = np.maximum(x2 - x1, 1) - h = np.maximum(y2 - y1, 1) - - if len(self.masks) == 0: - cropped_masks = np.empty((0, h, w), dtype=np.uint8) - else: - cropped_masks = self.masks[:, y1:y1 + h, x1:x1 + w] - return BitmapMasks(cropped_masks, h, w) - - def crop_and_resize(self, - bboxes, - out_shape, - inds, - device='cpu', - interpolation='bilinear'): - """See :func:`BaseInstanceMasks.crop_and_resize`.""" - if len(self.masks) == 0: - empty_masks = np.empty((0, *out_shape), dtype=np.uint8) - return BitmapMasks(empty_masks, *out_shape) - - # convert bboxes to tensor - if isinstance(bboxes, np.ndarray): - bboxes = torch.from_numpy(bboxes).to(device=device) - if isinstance(inds, np.ndarray): - inds = torch.from_numpy(inds).to(device=device) - - num_bbox = bboxes.shape[0] - fake_inds = torch.arange( - num_bbox, device=device).to(dtype=bboxes.dtype)[:, None] - rois = torch.cat([fake_inds, bboxes], dim=1) # Nx5 - rois = rois.to(device=device) - if num_bbox > 0: - gt_masks_th = torch.from_numpy(self.masks).to(device).index_select( - 0, inds).to(dtype=rois.dtype) - targets = roi_align(gt_masks_th[:, None, :, :], rois, out_shape, - 1.0, 0, 'avg', True).squeeze(1) - resized_masks = (targets >= 0.5).cpu().numpy() - else: - resized_masks = [] - return BitmapMasks(resized_masks, *out_shape) - - def expand(self, expanded_h, expanded_w, top, left): - """See :func:`BaseInstanceMasks.expand`.""" - if len(self.masks) == 0: - expanded_mask = np.empty((0, expanded_h, expanded_w), - dtype=np.uint8) - else: - expanded_mask = np.zeros((len(self), expanded_h, expanded_w), - dtype=np.uint8) - expanded_mask[:, top:top + self.height, - left:left + self.width] = self.masks - return BitmapMasks(expanded_mask, expanded_h, expanded_w) - - def translate(self, - out_shape, - offset, - direction='horizontal', - fill_val=0, - interpolation='bilinear'): - """Translate the BitmapMasks. - - Args: - out_shape (tuple[int]): Shape for output mask, format (h, w). - offset (int | float): The offset for translate. - direction (str): The translate direction, either "horizontal" - or "vertical". - fill_val (int | float): Border value. Default 0 for masks. - interpolation (str): Same as :func:`mmcv.imtranslate`. - - Returns: - BitmapMasks: Translated BitmapMasks. - - Example: - >>> from mmdet.core.mask.structures import BitmapMasks - >>> self = BitmapMasks.random(dtype=np.uint8) - >>> out_shape = (32, 32) - >>> offset = 4 - >>> direction = 'horizontal' - >>> fill_val = 0 - >>> interpolation = 'bilinear' - >>> # Note, There seem to be issues when: - >>> # * out_shape is different than self's shape - >>> # * the mask dtype is not supported by cv2.AffineWarp - >>> new = self.translate(out_shape, offset, direction, fill_val, - >>> interpolation) - >>> assert len(new) == len(self) - >>> assert new.height, new.width == out_shape - """ - if len(self.masks) == 0: - translated_masks = np.empty((0, *out_shape), dtype=np.uint8) - else: - translated_masks = mmcv.imtranslate( - self.masks.transpose((1, 2, 0)), - offset, - direction, - border_value=fill_val, - interpolation=interpolation) - if translated_masks.ndim == 2: - translated_masks = translated_masks[:, :, None] - translated_masks = translated_masks.transpose( - (2, 0, 1)).astype(self.masks.dtype) - return BitmapMasks(translated_masks, *out_shape) - - def shear(self, - out_shape, - magnitude, - direction='horizontal', - border_value=0, - interpolation='bilinear'): - """Shear the BitmapMasks. - - Args: - out_shape (tuple[int]): Shape for output mask, format (h, w). - magnitude (int | float): The magnitude used for shear. - direction (str): The shear direction, either "horizontal" - or "vertical". - border_value (int | tuple[int]): Value used in case of a - constant border. - interpolation (str): Same as in :func:`mmcv.imshear`. - - Returns: - BitmapMasks: The sheared masks. - """ - if len(self.masks) == 0: - sheared_masks = np.empty((0, *out_shape), dtype=np.uint8) - else: - sheared_masks = mmcv.imshear( - self.masks.transpose((1, 2, 0)), - magnitude, - direction, - border_value=border_value, - interpolation=interpolation) - if sheared_masks.ndim == 2: - sheared_masks = sheared_masks[:, :, None] - sheared_masks = sheared_masks.transpose( - (2, 0, 1)).astype(self.masks.dtype) - return BitmapMasks(sheared_masks, *out_shape) - - def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0): - """Rotate the BitmapMasks. - - Args: - out_shape (tuple[int]): Shape for output mask, format (h, w). - angle (int | float): Rotation angle in degrees. Positive values - mean counter-clockwise rotation. - center (tuple[float], optional): Center point (w, h) of the - rotation in source image. If not specified, the center of - the image will be used. - scale (int | float): Isotropic scale factor. - fill_val (int | float): Border value. Default 0 for masks. - - Returns: - BitmapMasks: Rotated BitmapMasks. - """ - if len(self.masks) == 0: - rotated_masks = np.empty((0, *out_shape), dtype=self.masks.dtype) - else: - rotated_masks = mmcv.imrotate( - self.masks.transpose((1, 2, 0)), - angle, - center=center, - scale=scale, - border_value=fill_val) - if rotated_masks.ndim == 2: - # case when only one mask, (h, w) - rotated_masks = rotated_masks[:, :, None] # (h, w, 1) - rotated_masks = rotated_masks.transpose( - (2, 0, 1)).astype(self.masks.dtype) - return BitmapMasks(rotated_masks, *out_shape) - - @property - def areas(self): - """See :py:attr:`BaseInstanceMasks.areas`.""" - return self.masks.sum((1, 2)) - - def to_ndarray(self): - """See :func:`BaseInstanceMasks.to_ndarray`.""" - return self.masks - - def to_tensor(self, dtype, device): - """See :func:`BaseInstanceMasks.to_tensor`.""" - return torch.tensor(self.masks, dtype=dtype, device=device) - - @classmethod - def random(cls, - num_masks=3, - height=32, - width=32, - dtype=np.uint8, - rng=None): - """Generate random bitmap masks for demo / testing purposes. - - Example: - >>> from mmdet.core.mask.structures import BitmapMasks - >>> self = BitmapMasks.random() - >>> print('self = {}'.format(self)) - self = BitmapMasks(num_masks=3, height=32, width=32) - """ - from mmdet.utils.util_random import ensure_rng - rng = ensure_rng(rng) - masks = (rng.rand(num_masks, height, width) > 0.1).astype(dtype) - self = cls(masks, height=height, width=width) - return self - - -class PolygonMasks(BaseInstanceMasks): - """This class represents masks in the form of polygons. - - Polygons is a list of three levels. The first level of the list - corresponds to objects, the second level to the polys that compose the - object, the third level to the poly coordinates - - Args: - masks (list[list[ndarray]]): The first level of the list - corresponds to objects, the second level to the polys that - compose the object, the third level to the poly coordinates - height (int): height of masks - width (int): width of masks - - Example: - >>> from mmdet.core.mask.structures import * # NOQA - >>> masks = [ - >>> [ np.array([0, 0, 10, 0, 10, 10., 0, 10, 0, 0]) ] - >>> ] - >>> height, width = 16, 16 - >>> self = PolygonMasks(masks, height, width) - - >>> # demo translate - >>> new = self.translate((16, 16), 4., direction='horizontal') - >>> assert np.all(new.masks[0][0][1::2] == masks[0][0][1::2]) - >>> assert np.all(new.masks[0][0][0::2] == masks[0][0][0::2] + 4) - - >>> # demo crop_and_resize - >>> num_boxes = 3 - >>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes) - >>> out_shape = (16, 16) - >>> inds = torch.randint(0, len(self), size=(num_boxes,)) - >>> device = 'cpu' - >>> interpolation = 'bilinear' - >>> new = self.crop_and_resize( - ... bboxes, out_shape, inds, device, interpolation) - >>> assert len(new) == num_boxes - >>> assert new.height, new.width == out_shape - """ - - def __init__(self, masks, height, width): - assert isinstance(masks, list) - if len(masks) > 0: - assert isinstance(masks[0], list) - assert isinstance(masks[0][0], np.ndarray) - - self.height = height - self.width = width - self.masks = masks - - def __getitem__(self, index): - """Index the polygon masks. - - Args: - index (ndarray | List): The indices. - - Returns: - :obj:`PolygonMasks`: The indexed polygon masks. - """ - if isinstance(index, np.ndarray): - index = index.tolist() - if isinstance(index, list): - masks = [self.masks[i] for i in index] - else: - try: - masks = self.masks[index] - except Exception: - raise ValueError( - f'Unsupported input of type {type(index)} for indexing!') - if len(masks) and isinstance(masks[0], np.ndarray): - masks = [masks] # ensure a list of three levels - return PolygonMasks(masks, self.height, self.width) - - def __iter__(self): - return iter(self.masks) - - def __repr__(self): - s = self.__class__.__name__ + '(' - s += f'num_masks={len(self.masks)}, ' - s += f'height={self.height}, ' - s += f'width={self.width})' - return s - - def __len__(self): - """Number of masks.""" - return len(self.masks) - - def rescale(self, scale, interpolation=None): - """see :func:`BaseInstanceMasks.rescale`""" - new_w, new_h = mmcv.rescale_size((self.width, self.height), scale) - if len(self.masks) == 0: - rescaled_masks = PolygonMasks([], new_h, new_w) - else: - rescaled_masks = self.resize((new_h, new_w)) - return rescaled_masks - - def resize(self, out_shape, interpolation=None): - """see :func:`BaseInstanceMasks.resize`""" - if len(self.masks) == 0: - resized_masks = PolygonMasks([], *out_shape) - else: - h_scale = out_shape[0] / self.height - w_scale = out_shape[1] / self.width - resized_masks = [] - for poly_per_obj in self.masks: - resized_poly = [] - for p in poly_per_obj: - p = p.copy() - p[0::2] *= w_scale - p[1::2] *= h_scale - resized_poly.append(p) - resized_masks.append(resized_poly) - resized_masks = PolygonMasks(resized_masks, *out_shape) - return resized_masks - - def flip(self, flip_direction='horizontal'): - """see :func:`BaseInstanceMasks.flip`""" - assert flip_direction in ('horizontal', 'vertical', 'diagonal') - if len(self.masks) == 0: - flipped_masks = PolygonMasks([], self.height, self.width) - else: - flipped_masks = [] - for poly_per_obj in self.masks: - flipped_poly_per_obj = [] - for p in poly_per_obj: - p = p.copy() - if flip_direction == 'horizontal': - p[0::2] = self.width - p[0::2] - elif flip_direction == 'vertical': - p[1::2] = self.height - p[1::2] - else: - p[0::2] = self.width - p[0::2] - p[1::2] = self.height - p[1::2] - flipped_poly_per_obj.append(p) - flipped_masks.append(flipped_poly_per_obj) - flipped_masks = PolygonMasks(flipped_masks, self.height, - self.width) - return flipped_masks - - def crop(self, bbox): - """see :func:`BaseInstanceMasks.crop`""" - assert isinstance(bbox, np.ndarray) - assert bbox.ndim == 1 - - # clip the boundary - bbox = bbox.copy() - bbox[0::2] = np.clip(bbox[0::2], 0, self.width) - bbox[1::2] = np.clip(bbox[1::2], 0, self.height) - x1, y1, x2, y2 = bbox - w = np.maximum(x2 - x1, 1) - h = np.maximum(y2 - y1, 1) - - if len(self.masks) == 0: - cropped_masks = PolygonMasks([], h, w) - else: - cropped_masks = [] - for poly_per_obj in self.masks: - cropped_poly_per_obj = [] - for p in poly_per_obj: - # pycocotools will clip the boundary - p = p.copy() - p[0::2] -= bbox[0] - p[1::2] -= bbox[1] - cropped_poly_per_obj.append(p) - cropped_masks.append(cropped_poly_per_obj) - cropped_masks = PolygonMasks(cropped_masks, h, w) - return cropped_masks - - def pad(self, out_shape, pad_val=0): - """padding has no effect on polygons`""" - return PolygonMasks(self.masks, *out_shape) - - def expand(self, *args, **kwargs): - """TODO: Add expand for polygon""" - raise NotImplementedError - - def crop_and_resize(self, - bboxes, - out_shape, - inds, - device='cpu', - interpolation='bilinear'): - """see :func:`BaseInstanceMasks.crop_and_resize`""" - out_h, out_w = out_shape - if len(self.masks) == 0: - return PolygonMasks([], out_h, out_w) - - resized_masks = [] - for i in range(len(bboxes)): - mask = self.masks[inds[i]] - bbox = bboxes[i, :] - x1, y1, x2, y2 = bbox - w = np.maximum(x2 - x1, 1) - h = np.maximum(y2 - y1, 1) - h_scale = out_h / max(h, 0.1) # avoid too large scale - w_scale = out_w / max(w, 0.1) - - resized_mask = [] - for p in mask: - p = p.copy() - # crop - # pycocotools will clip the boundary - p[0::2] -= bbox[0] - p[1::2] -= bbox[1] - - # resize - p[0::2] *= w_scale - p[1::2] *= h_scale - resized_mask.append(p) - resized_masks.append(resized_mask) - return PolygonMasks(resized_masks, *out_shape) - - def translate(self, - out_shape, - offset, - direction='horizontal', - fill_val=None, - interpolation=None): - """Translate the PolygonMasks. - - Example: - >>> self = PolygonMasks.random(dtype=np.int) - >>> out_shape = (self.height, self.width) - >>> new = self.translate(out_shape, 4., direction='horizontal') - >>> assert np.all(new.masks[0][0][1::2] == self.masks[0][0][1::2]) - >>> assert np.all(new.masks[0][0][0::2] == self.masks[0][0][0::2] + 4) # noqa: E501 - """ - assert fill_val is None or fill_val == 0, 'Here fill_val is not '\ - f'used, and defaultly should be None or 0. got {fill_val}.' - if len(self.masks) == 0: - translated_masks = PolygonMasks([], *out_shape) - else: - translated_masks = [] - for poly_per_obj in self.masks: - translated_poly_per_obj = [] - for p in poly_per_obj: - p = p.copy() - if direction == 'horizontal': - p[0::2] = np.clip(p[0::2] + offset, 0, out_shape[1]) - elif direction == 'vertical': - p[1::2] = np.clip(p[1::2] + offset, 0, out_shape[0]) - translated_poly_per_obj.append(p) - translated_masks.append(translated_poly_per_obj) - translated_masks = PolygonMasks(translated_masks, *out_shape) - return translated_masks - - def shear(self, - out_shape, - magnitude, - direction='horizontal', - border_value=0, - interpolation='bilinear'): - """See :func:`BaseInstanceMasks.shear`.""" - if len(self.masks) == 0: - sheared_masks = PolygonMasks([], *out_shape) - else: - sheared_masks = [] - if direction == 'horizontal': - shear_matrix = np.stack([[1, magnitude], - [0, 1]]).astype(np.float32) - elif direction == 'vertical': - shear_matrix = np.stack([[1, 0], [magnitude, - 1]]).astype(np.float32) - for poly_per_obj in self.masks: - sheared_poly = [] - for p in poly_per_obj: - p = np.stack([p[0::2], p[1::2]], axis=0) # [2, n] - new_coords = np.matmul(shear_matrix, p) # [2, n] - new_coords[0, :] = np.clip(new_coords[0, :], 0, - out_shape[1]) - new_coords[1, :] = np.clip(new_coords[1, :], 0, - out_shape[0]) - sheared_poly.append( - new_coords.transpose((1, 0)).reshape(-1)) - sheared_masks.append(sheared_poly) - sheared_masks = PolygonMasks(sheared_masks, *out_shape) - return sheared_masks - - def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0): - """See :func:`BaseInstanceMasks.rotate`.""" - if len(self.masks) == 0: - rotated_masks = PolygonMasks([], *out_shape) - else: - rotated_masks = [] - rotate_matrix = cv2.getRotationMatrix2D(center, -angle, scale) - for poly_per_obj in self.masks: - rotated_poly = [] - for p in poly_per_obj: - p = p.copy() - coords = np.stack([p[0::2], p[1::2]], axis=1) # [n, 2] - # pad 1 to convert from format [x, y] to homogeneous - # coordinates format [x, y, 1] - coords = np.concatenate( - (coords, np.ones((coords.shape[0], 1), coords.dtype)), - axis=1) # [n, 3] - rotated_coords = np.matmul( - rotate_matrix[None, :, :], - coords[:, :, None])[..., 0] # [n, 2, 1] -> [n, 2] - rotated_coords[:, 0] = np.clip(rotated_coords[:, 0], 0, - out_shape[1]) - rotated_coords[:, 1] = np.clip(rotated_coords[:, 1], 0, - out_shape[0]) - rotated_poly.append(rotated_coords.reshape(-1)) - rotated_masks.append(rotated_poly) - rotated_masks = PolygonMasks(rotated_masks, *out_shape) - return rotated_masks - - def to_bitmap(self): - """convert polygon masks to bitmap masks.""" - bitmap_masks = self.to_ndarray() - return BitmapMasks(bitmap_masks, self.height, self.width) - - @property - def areas(self): - """Compute areas of masks. - - This func is modified from `detectron2 - `_. - The function only works with Polygons using the shoelace formula. - - Return: - ndarray: areas of each instance - """ # noqa: W501 - area = [] - for polygons_per_obj in self.masks: - area_per_obj = 0 - for p in polygons_per_obj: - area_per_obj += self._polygon_area(p[0::2], p[1::2]) - area.append(area_per_obj) - return np.asarray(area) - - def _polygon_area(self, x, y): - """Compute the area of a component of a polygon. - - Using the shoelace formula: - https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates - - Args: - x (ndarray): x coordinates of the component - y (ndarray): y coordinates of the component - - Return: - float: the are of the component - """ # noqa: 501 - return 0.5 * np.abs( - np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) - - def to_ndarray(self): - """Convert masks to the format of ndarray.""" - if len(self.masks) == 0: - return np.empty((0, self.height, self.width), dtype=np.uint8) - bitmap_masks = [] - for poly_per_obj in self.masks: - bitmap_masks.append( - polygon_to_bitmap(poly_per_obj, self.height, self.width)) - return np.stack(bitmap_masks) - - def to_tensor(self, dtype, device): - """See :func:`BaseInstanceMasks.to_tensor`.""" - if len(self.masks) == 0: - return torch.empty((0, self.height, self.width), - dtype=dtype, - device=device) - ndarray_masks = self.to_ndarray() - return torch.tensor(ndarray_masks, dtype=dtype, device=device) - - @classmethod - def random(cls, - num_masks=3, - height=32, - width=32, - n_verts=5, - dtype=np.float32, - rng=None): - """Generate random polygon masks for demo / testing purposes. - - Adapted from [1]_ - - References: - .. [1] https://gitlab.kitware.com/computer-vision/kwimage/-/blob/928cae35ca8/kwimage/structs/polygon.py#L379 # noqa: E501 - - Example: - >>> from mmdet.core.mask.structures import PolygonMasks - >>> self = PolygonMasks.random() - >>> print('self = {}'.format(self)) - """ - from mmdet.utils.util_random import ensure_rng - rng = ensure_rng(rng) - - def _gen_polygon(n, irregularity, spikeyness): - """Creates the polygon by sampling points on a circle around the - centre. Random noise is added by varying the angular spacing - between sequential points, and by varying the radial distance of - each point from the centre. - - Based on original code by Mike Ounsworth - - Args: - n (int): number of vertices - irregularity (float): [0,1] indicating how much variance there - is in the angular spacing of vertices. [0,1] will map to - [0, 2pi/numberOfVerts] - spikeyness (float): [0,1] indicating how much variance there is - in each vertex from the circle of radius aveRadius. [0,1] - will map to [0, aveRadius] - - Returns: - a list of vertices, in CCW order. - """ - from scipy.stats import truncnorm - # Generate around the unit circle - cx, cy = (0.0, 0.0) - radius = 1 - - tau = np.pi * 2 - - irregularity = np.clip(irregularity, 0, 1) * 2 * np.pi / n - spikeyness = np.clip(spikeyness, 1e-9, 1) - - # generate n angle steps - lower = (tau / n) - irregularity - upper = (tau / n) + irregularity - angle_steps = rng.uniform(lower, upper, n) - - # normalize the steps so that point 0 and point n+1 are the same - k = angle_steps.sum() / (2 * np.pi) - angles = (angle_steps / k).cumsum() + rng.uniform(0, tau) - - # Convert high and low values to be wrt the standard normal range - # https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.truncnorm.html - low = 0 - high = 2 * radius - mean = radius - std = spikeyness - a = (low - mean) / std - b = (high - mean) / std - tnorm = truncnorm(a=a, b=b, loc=mean, scale=std) - - # now generate the points - radii = tnorm.rvs(n, random_state=rng) - x_pts = cx + radii * np.cos(angles) - y_pts = cy + radii * np.sin(angles) - - points = np.hstack([x_pts[:, None], y_pts[:, None]]) - - # Scale to 0-1 space - points = points - points.min(axis=0) - points = points / points.max(axis=0) - - # Randomly place within 0-1 space - points = points * (rng.rand() * .8 + .2) - min_pt = points.min(axis=0) - max_pt = points.max(axis=0) - - high = (1 - max_pt) - low = (0 - min_pt) - offset = (rng.rand(2) * (high - low)) + low - points = points + offset - return points - - def _order_vertices(verts): - """ - References: - https://stackoverflow.com/questions/1709283/how-can-i-sort-a-coordinate-list-for-a-rectangle-counterclockwise - """ - mlat = verts.T[0].sum() / len(verts) - mlng = verts.T[1].sum() / len(verts) - - tau = np.pi * 2 - angle = (np.arctan2(mlat - verts.T[0], verts.T[1] - mlng) + - tau) % tau - sortx = angle.argsort() - verts = verts.take(sortx, axis=0) - return verts - - # Generate a random exterior for each requested mask - masks = [] - for _ in range(num_masks): - exterior = _order_vertices(_gen_polygon(n_verts, 0.9, 0.9)) - exterior = (exterior * [(width, height)]).astype(dtype) - masks.append([exterior.ravel()]) - - self = cls(masks, height, width) - return self - - -def polygon_to_bitmap(polygons, height, width): - """Convert masks from the form of polygons to bitmaps. - - Args: - polygons (list[ndarray]): masks in polygon representation - height (int): mask height - width (int): mask width - - Return: - ndarray: the converted masks in bitmap representation - """ - rles = maskUtils.frPyObjects(polygons, height, width) - rle = maskUtils.merge(rles) - bitmap_mask = maskUtils.decode(rle).astype(np.bool) - return bitmap_mask diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/models/dense_heads/pisa_ssd_head.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/models/dense_heads/pisa_ssd_head.py deleted file mode 100644 index 90ef3c83ed62d8346c8daef01f18ad7bd236623c..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/models/dense_heads/pisa_ssd_head.py +++ /dev/null @@ -1,139 +0,0 @@ -import torch - -from mmdet.core import multi_apply -from ..builder import HEADS -from ..losses import CrossEntropyLoss, SmoothL1Loss, carl_loss, isr_p -from .ssd_head import SSDHead - - -# TODO: add loss evaluator for SSD -@HEADS.register_module() -class PISASSDHead(SSDHead): - - def loss(self, - cls_scores, - bbox_preds, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute losses of the head. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - Has shape (N, num_anchors * num_classes, H, W) - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W) - gt_bboxes (list[Tensor]): Ground truth bboxes of each image - with shape (num_obj, 4). - gt_labels (list[Tensor]): Ground truth labels of each image - with shape (num_obj, 4). - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (list[Tensor]): Ignored gt bboxes of each image. - Default: None. - - Returns: - dict: Loss dict, comprise classification loss regression loss and - carl loss. - """ - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - assert len(featmap_sizes) == self.anchor_generator.num_levels - - device = cls_scores[0].device - - anchor_list, valid_flag_list = self.get_anchors( - featmap_sizes, img_metas, device=device) - cls_reg_targets = self.get_targets( - anchor_list, - valid_flag_list, - gt_bboxes, - img_metas, - gt_bboxes_ignore_list=gt_bboxes_ignore, - gt_labels_list=gt_labels, - label_channels=1, - unmap_outputs=False, - return_sampling_results=True) - if cls_reg_targets is None: - return None - (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, - num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets - - num_images = len(img_metas) - all_cls_scores = torch.cat([ - s.permute(0, 2, 3, 1).reshape( - num_images, -1, self.cls_out_channels) for s in cls_scores - ], 1) - all_labels = torch.cat(labels_list, -1).view(num_images, -1) - all_label_weights = torch.cat(label_weights_list, - -1).view(num_images, -1) - all_bbox_preds = torch.cat([ - b.permute(0, 2, 3, 1).reshape(num_images, -1, 4) - for b in bbox_preds - ], -2) - all_bbox_targets = torch.cat(bbox_targets_list, - -2).view(num_images, -1, 4) - all_bbox_weights = torch.cat(bbox_weights_list, - -2).view(num_images, -1, 4) - - # concat all level anchors to a single tensor - all_anchors = [] - for i in range(num_images): - all_anchors.append(torch.cat(anchor_list[i])) - - isr_cfg = self.train_cfg.get('isr', None) - all_targets = (all_labels.view(-1), all_label_weights.view(-1), - all_bbox_targets.view(-1, - 4), all_bbox_weights.view(-1, 4)) - # apply ISR-P - if isr_cfg is not None: - all_targets = isr_p( - all_cls_scores.view(-1, all_cls_scores.size(-1)), - all_bbox_preds.view(-1, 4), - all_targets, - torch.cat(all_anchors), - sampling_results_list, - loss_cls=CrossEntropyLoss(), - bbox_coder=self.bbox_coder, - **self.train_cfg.isr, - num_class=self.num_classes) - (new_labels, new_label_weights, new_bbox_targets, - new_bbox_weights) = all_targets - all_labels = new_labels.view(all_labels.shape) - all_label_weights = new_label_weights.view(all_label_weights.shape) - all_bbox_targets = new_bbox_targets.view(all_bbox_targets.shape) - all_bbox_weights = new_bbox_weights.view(all_bbox_weights.shape) - - # add CARL loss - carl_loss_cfg = self.train_cfg.get('carl', None) - if carl_loss_cfg is not None: - loss_carl = carl_loss( - all_cls_scores.view(-1, all_cls_scores.size(-1)), - all_targets[0], - all_bbox_preds.view(-1, 4), - all_targets[2], - SmoothL1Loss(beta=1.), - **self.train_cfg.carl, - avg_factor=num_total_pos, - num_class=self.num_classes) - - # check NaN and Inf - assert torch.isfinite(all_cls_scores).all().item(), \ - 'classification scores become infinite or NaN!' - assert torch.isfinite(all_bbox_preds).all().item(), \ - 'bbox predications become infinite or NaN!' - - losses_cls, losses_bbox = multi_apply( - self.loss_single, - all_cls_scores, - all_bbox_preds, - all_anchors, - all_labels, - all_label_weights, - all_bbox_targets, - all_bbox_weights, - num_total_samples=num_total_pos) - loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox) - if carl_loss_cfg is not None: - loss_dict.update(loss_carl) - return loss_dict diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/roi_heads/point_rend_roi_head.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/roi_heads/point_rend_roi_head.py deleted file mode 100644 index 478cdf5bff6779e9291f94c543205289036ea2c6..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/roi_heads/point_rend_roi_head.py +++ /dev/null @@ -1,218 +0,0 @@ -# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend # noqa - -import torch -import torch.nn.functional as F -from mmcv.ops import point_sample, rel_roi_point_to_rel_img_point - -from mmdet.core import bbox2roi, bbox_mapping, merge_aug_masks -from .. import builder -from ..builder import HEADS -from .standard_roi_head import StandardRoIHead - - -@HEADS.register_module() -class PointRendRoIHead(StandardRoIHead): - """`PointRend `_.""" - - def __init__(self, point_head, *args, **kwargs): - super().__init__(*args, **kwargs) - assert self.with_bbox and self.with_mask - self.init_point_head(point_head) - - def init_point_head(self, point_head): - """Initialize ``point_head``""" - self.point_head = builder.build_head(point_head) - - def init_weights(self, pretrained): - """Initialize the weights in head. - - Args: - pretrained (str, optional): Path to pre-trained weights. - """ - super().init_weights(pretrained) - self.point_head.init_weights() - - def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks, - img_metas): - """Run forward function and calculate loss for mask head and point head - in training.""" - mask_results = super()._mask_forward_train(x, sampling_results, - bbox_feats, gt_masks, - img_metas) - if mask_results['loss_mask'] is not None: - loss_point = self._mask_point_forward_train( - x, sampling_results, mask_results['mask_pred'], gt_masks, - img_metas) - mask_results['loss_mask'].update(loss_point) - - return mask_results - - def _mask_point_forward_train(self, x, sampling_results, mask_pred, - gt_masks, img_metas): - """Run forward function and calculate loss for point head in - training.""" - pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) - rel_roi_points = self.point_head.get_roi_rel_points_train( - mask_pred, pos_labels, cfg=self.train_cfg) - rois = bbox2roi([res.pos_bboxes for res in sampling_results]) - - fine_grained_point_feats = self._get_fine_grained_point_feats( - x, rois, rel_roi_points, img_metas) - coarse_point_feats = point_sample(mask_pred, rel_roi_points) - mask_point_pred = self.point_head(fine_grained_point_feats, - coarse_point_feats) - mask_point_target = self.point_head.get_targets( - rois, rel_roi_points, sampling_results, gt_masks, self.train_cfg) - loss_mask_point = self.point_head.loss(mask_point_pred, - mask_point_target, pos_labels) - - return loss_mask_point - - def _get_fine_grained_point_feats(self, x, rois, rel_roi_points, - img_metas): - """Sample fine grained feats from each level feature map and - concatenate them together.""" - num_imgs = len(img_metas) - fine_grained_feats = [] - for idx in range(self.mask_roi_extractor.num_inputs): - feats = x[idx] - spatial_scale = 1. / float( - self.mask_roi_extractor.featmap_strides[idx]) - point_feats = [] - for batch_ind in range(num_imgs): - # unravel batch dim - feat = feats[batch_ind].unsqueeze(0) - inds = (rois[:, 0].long() == batch_ind) - if inds.any(): - rel_img_points = rel_roi_point_to_rel_img_point( - rois[inds], rel_roi_points[inds], feat.shape[2:], - spatial_scale).unsqueeze(0) - point_feat = point_sample(feat, rel_img_points) - point_feat = point_feat.squeeze(0).transpose(0, 1) - point_feats.append(point_feat) - fine_grained_feats.append(torch.cat(point_feats, dim=0)) - return torch.cat(fine_grained_feats, dim=1) - - def _mask_point_forward_test(self, x, rois, label_pred, mask_pred, - img_metas): - """Mask refining process with point head in testing.""" - refined_mask_pred = mask_pred.clone() - for subdivision_step in range(self.test_cfg.subdivision_steps): - refined_mask_pred = F.interpolate( - refined_mask_pred, - scale_factor=self.test_cfg.scale_factor, - mode='bilinear', - align_corners=False) - # If `subdivision_num_points` is larger or equal to the - # resolution of the next step, then we can skip this step - num_rois, channels, mask_height, mask_width = \ - refined_mask_pred.shape - if (self.test_cfg.subdivision_num_points >= - self.test_cfg.scale_factor**2 * mask_height * mask_width - and - subdivision_step < self.test_cfg.subdivision_steps - 1): - continue - point_indices, rel_roi_points = \ - self.point_head.get_roi_rel_points_test( - refined_mask_pred, label_pred, cfg=self.test_cfg) - fine_grained_point_feats = self._get_fine_grained_point_feats( - x, rois, rel_roi_points, img_metas) - coarse_point_feats = point_sample(mask_pred, rel_roi_points) - mask_point_pred = self.point_head(fine_grained_point_feats, - coarse_point_feats) - - point_indices = point_indices.unsqueeze(1).expand(-1, channels, -1) - refined_mask_pred = refined_mask_pred.reshape( - num_rois, channels, mask_height * mask_width) - refined_mask_pred = refined_mask_pred.scatter_( - 2, point_indices, mask_point_pred) - refined_mask_pred = refined_mask_pred.view(num_rois, channels, - mask_height, mask_width) - - return refined_mask_pred - - def simple_test_mask(self, - x, - img_metas, - det_bboxes, - det_labels, - rescale=False): - """Obtain mask prediction without augmentation.""" - ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) - scale_factors = tuple(meta['scale_factor'] for meta in img_metas) - num_imgs = len(det_bboxes) - if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): - segm_results = [[[] for _ in range(self.mask_head.num_classes)] - for _ in range(num_imgs)] - else: - # if det_bboxes is rescaled to the original image size, we need to - # rescale it back to the testing scale to obtain RoIs. - if rescale and not isinstance(scale_factors[0], float): - scale_factors = [ - torch.from_numpy(scale_factor).to(det_bboxes[0].device) - for scale_factor in scale_factors - ] - _bboxes = [ - det_bboxes[i][:, :4] * - scale_factors[i] if rescale else det_bboxes[i][:, :4] - for i in range(len(det_bboxes)) - ] - mask_rois = bbox2roi(_bboxes) - mask_results = self._mask_forward(x, mask_rois) - # split batch mask prediction back to each image - mask_pred = mask_results['mask_pred'] - num_mask_roi_per_img = [len(det_bbox) for det_bbox in det_bboxes] - mask_preds = mask_pred.split(num_mask_roi_per_img, 0) - mask_rois = mask_rois.split(num_mask_roi_per_img, 0) - - # apply mask post-processing to each image individually - segm_results = [] - for i in range(num_imgs): - if det_bboxes[i].shape[0] == 0: - segm_results.append( - [[] for _ in range(self.mask_head.num_classes)]) - else: - x_i = [xx[[i]] for xx in x] - mask_rois_i = mask_rois[i] - mask_rois_i[:, 0] = 0 # TODO: remove this hack - mask_pred_i = self._mask_point_forward_test( - x_i, mask_rois_i, det_labels[i], mask_preds[i], - [img_metas]) - segm_result = self.mask_head.get_seg_masks( - mask_pred_i, _bboxes[i], det_labels[i], self.test_cfg, - ori_shapes[i], scale_factors[i], rescale) - segm_results.append(segm_result) - return segm_results - - def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels): - """Test for mask head with test time augmentation.""" - if det_bboxes.shape[0] == 0: - segm_result = [[] for _ in range(self.mask_head.num_classes)] - else: - aug_masks = [] - for x, img_meta in zip(feats, img_metas): - img_shape = img_meta[0]['img_shape'] - scale_factor = img_meta[0]['scale_factor'] - flip = img_meta[0]['flip'] - _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, - scale_factor, flip) - mask_rois = bbox2roi([_bboxes]) - mask_results = self._mask_forward(x, mask_rois) - mask_results['mask_pred'] = self._mask_point_forward_test( - x, mask_rois, det_labels, mask_results['mask_pred'], - img_metas) - # convert to numpy array to save memory - aug_masks.append( - mask_results['mask_pred'].sigmoid().cpu().numpy()) - merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg) - - ori_shape = img_metas[0][0]['ori_shape'] - segm_result = self.mask_head.get_seg_masks( - merged_masks, - det_bboxes, - det_labels, - self.test_cfg, - ori_shape, - scale_factor=1.0, - rescale=False) - return segm_result diff --git a/spaces/adirik/kakao-brain-vit/app.py b/spaces/adirik/kakao-brain-vit/app.py deleted file mode 100644 index 69cf2b549779dda4734652588ad369705754c5d2..0000000000000000000000000000000000000000 --- a/spaces/adirik/kakao-brain-vit/app.py +++ /dev/null @@ -1,78 +0,0 @@ -import cv2 -import json -import gradio as gr -import numpy as np -import tensorflow as tf - -from backbone import create_name_vit -from backbone import ClassificationModel - - - -vit_l16_512 = { - "backbone_name": "vit-l/16", - "backbone_params": { - "image_size": 512, - "representation_size": 0, - "attention_dropout_rate": 0., - "dropout_rate": 0., - "channels": 3 - }, - "dropout_rate": 0., - "pretrained": "./weights/vit_l16_512/model-weights" -} - -# Init backbone -backbone = create_name_vit(vit_l16_512["backbone_name"], **vit_l16_512["backbone_params"]) - -# Init classification model -model = ClassificationModel( - backbone=backbone, - dropout_rate=vit_l16_512["dropout_rate"], - num_classes=1000 -) - -# Load weights -model.load_weights(vit_l16_512["pretrained"]) -model.trainable = False - -# Load ImageNet idx to label mapping -with open("assets/imagenet_1000_idx2labels.json") as f: - idx_to_label = json.load(f) - - -def resize_with_normalization(image, size=[512, 512]): - image = tf.cast(image, tf.float32) - image = tf.image.resize(image, size) - image -= tf.constant(127.5, shape=(1, 1, 3), dtype=tf.float32) - image /= tf.constant(127.5, shape=(1, 1, 3), dtype=tf.float32) - image = tf.expand_dims(image, axis=0) - return image - -def softmax_stable(x): - return(np.exp(x - np.max(x)) / np.exp(x - np.max(x)).sum()) - -def classify_image(img, top_k): - img = tf.convert_to_tensor(img) - img = resize_with_normalization(img) - - pred_logits = model.predict(img, batch_size=1, workers=8)[0] - pred_probs = softmax_stable(pred_logits) - top_k_labels = pred_probs.argsort()[-top_k:][::-1] - - return {idx_to_label[str(idx)] : round(float(pred_probs[idx]), 4) for idx in top_k_labels} - - -demo = gr.Interface( - classify_image, - inputs=[gr.Image(), gr.Slider(0, 1000, value=5)], - outputs=gr.outputs.Label(), - title="Image Classification with Kakao Brain ViT", - examples=[ - ["assets/halloween-gaf8ad7ebc_1920.jpeg", 5], - ["assets/IMG_4484.jpeg", 5], - ["assets/IMG_4737.jpeg", 5], - ["assets/IMG_4740.jpeg", 5], - ], -) -demo.launch() \ No newline at end of file diff --git a/spaces/akhaliq/VQMIVC/ParallelWaveGAN/egs/template_multi_spk/voc1/run.sh b/spaces/akhaliq/VQMIVC/ParallelWaveGAN/egs/template_multi_spk/voc1/run.sh deleted file mode 100644 index a98ef0f9f4bd4bf84eccfcc9a0f22112f02adebd..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/VQMIVC/ParallelWaveGAN/egs/template_multi_spk/voc1/run.sh +++ /dev/null @@ -1,212 +0,0 @@ -#!/bin/bash - -# Copyright 2020 Tomoki Hayashi -# MIT License (https://opensource.org/licenses/MIT) - -. ./cmd.sh || exit 1; -. ./path.sh || exit 1; - -# basic settings -stage=0 # stage to start -stop_stage=100 # stage to stop -verbose=1 # verbosity level (lower is less info) -n_gpus=1 # number of gpus in training -n_jobs=4 # number of parallel jobs in feature extraction - -# NOTE(kan-bayashi): renamed to conf to avoid conflict in parse_options.sh -conf=conf/parallel_wavegan.v1.yaml - -# directory path setting -db_root=/path/to/database # direcotry including spk name directory (MODIFY BY YOURSELF) - # e.g. - # /path/to/database - # ├── spk_1 - # │ ├── utt1.wav - # ├── spk_2 - # │ ├── utt1.wav - # │ ... - # └── spk_N - # ├── utt1.wav - # ... -dumpdir=dump # directory to dump features - -# subset setting -spks="all" # speaker name to be used (e.g. "spk1 spk2") - # it must be matched the name under the ${db_root} - # if set to "all", all of the speakers in ${db_root} will be used -shuffle=false # whether to shuffle the data to create subset -num_dev=10 # the number of development data for each speaker -num_eval=10 # the number of evaluation data for each speaker - # (if set to 0, the same dev set is used as eval set) - -# training related setting -tag="" # tag for directory to save model -resume="" # checkpoint path to resume training - # (e.g. //checkpoint-10000steps.pkl) -pretrain="" # checkpoint path to load pretrained parameters - # (e.g. ../../jsut///checkpoint-400000steps.pkl) - -# decoding related setting -checkpoint="" # checkpoint path to be used for decoding - # if not provided, the latest one will be used - # (e.g. //checkpoint-400000steps.pkl) - -# shellcheck disable=SC1091 -. utils/parse_options.sh || exit 1; - -train_set="train_nodev_$(echo "${spks}" | tr " " "_")" # name of training data directory -dev_set="dev_$(echo "${spks}" | tr " " "_")" # name of development data directory -eval_set="eval_$(echo "${spks}" | tr " " "_")" # name of evaluation data directory - -set -euo pipefail - -if [ "${stage}" -le 0 ] && [ "${stop_stage}" -ge 0 ]; then - echo "Stage 0: Data preparation" - train_data_dirs="" - dev_data_dirs="" - eval_data_dirs="" - if [ "${spks}" = "all" ]; then - spks=$(find "${db_root}" -maxdepth 1 ! -path "${db_root}" \ - -follow -type d -print0 -name "[^.]*" | xargs -0 -I{} basename {}) - fi - for spk in ${spks}; do - local/data_prep.sh \ - --fs "$(yq ".sampling_rate" "${conf}")" \ - --shuffle "${shuffle}" \ - --num_dev "${num_dev}" \ - --num_eval "${num_eval}" \ - --train_set "train_nodev_${spk}" \ - --dev_set "dev_${spk}" \ - --eval_set "eval_${spk}" \ - "${db_root}" "${spk}" data - train_data_dirs+=" data/train_nodev_${spk}" - dev_data_dirs+=" data/dev_${spk}" - eval_data_dirs+=" data/eval_${spk}" - done - # shellcheck disable=SC2086 - utils/combine_data.sh "data/${train_set}" ${train_data_dirs} - # shellcheck disable=SC2086 - utils/combine_data.sh "data/${dev_set}" ${dev_data_dirs} - # shellcheck disable=SC2086 - utils/combine_data.sh "data/${eval_set}" ${eval_data_dirs} -fi - -stats_ext=$(grep -q "hdf5" <(yq ".format" "${conf}") && echo "h5" || echo "npy") -if [ "${stage}" -le 1 ] && [ "${stop_stage}" -ge 1 ]; then - echo "Stage 1: Feature extraction" - # extract raw features - pids=() - for name in "${train_set}" "${dev_set}" "${eval_set}"; do - ( - [ ! -e "${dumpdir}/${name}/raw" ] && mkdir -p "${dumpdir}/${name}/raw" - echo "Feature extraction start. See the progress via ${dumpdir}/${name}/raw/preprocessing.*.log." - utils/make_subset_data.sh "data/${name}" "${n_jobs}" "${dumpdir}/${name}/raw" - ${train_cmd} JOB=1:${n_jobs} "${dumpdir}/${name}/raw/preprocessing.JOB.log" \ - parallel-wavegan-preprocess \ - --config "${conf}" \ - --scp "${dumpdir}/${name}/raw/wav.JOB.scp" \ - --dumpdir "${dumpdir}/${name}/raw/dump.JOB" \ - --verbose "${verbose}" - echo "Successfully finished feature extraction of ${name} set." - ) & - pids+=($!) - done - i=0; for pid in "${pids[@]}"; do wait "${pid}" || ((++i)); done - [ "${i}" -gt 0 ] && echo "$0: ${i} background jobs are failed." && exit 1; - echo "Successfully finished feature extraction." - - # calculate statistics for normalization - if [ -z "${pretrain}" ]; then - # calculate statistics for normalization - echo "Statistics computation start. See the progress via ${dumpdir}/${train_set}/compute_statistics.log." - ${train_cmd} "${dumpdir}/${train_set}/compute_statistics.log" \ - parallel-wavegan-compute-statistics \ - --config "${conf}" \ - --rootdir "${dumpdir}/${train_set}/raw" \ - --dumpdir "${dumpdir}/${train_set}" \ - --verbose "${verbose}" - echo "Successfully finished calculation of statistics." - else - echo "Use statistics of pretrained model. Skip statistics computation." - cp "$(dirname "${pretrain}")/stats.${stats_ext}" "${dumpdir}/${train_set}" - fi - - # normalize and dump them - pids=() - for name in "${train_set}" "${dev_set}" "${eval_set}"; do - ( - [ ! -e "${dumpdir}/${name}/norm" ] && mkdir -p "${dumpdir}/${name}/norm" - echo "Nomalization start. See the progress via ${dumpdir}/${name}/norm/normalize.*.log." - ${train_cmd} JOB=1:${n_jobs} "${dumpdir}/${name}/norm/normalize.JOB.log" \ - parallel-wavegan-normalize \ - --config "${conf}" \ - --stats "${dumpdir}/${train_set}/stats.${stats_ext}" \ - --rootdir "${dumpdir}/${name}/raw/dump.JOB" \ - --dumpdir "${dumpdir}/${name}/norm/dump.JOB" \ - --verbose "${verbose}" - echo "Successfully finished normalization of ${name} set." - ) & - pids+=($!) - done - i=0; for pid in "${pids[@]}"; do wait "${pid}" || ((++i)); done - [ "${i}" -gt 0 ] && echo "$0: ${i} background jobs are failed." && exit 1; - echo "Successfully finished normalization." -fi - -if [ -z "${tag}" ]; then - expdir="exp/${train_set}_$(basename "${conf}" .yaml)" - if [ -n "${pretrain}" ]; then - pretrain_tag=$(basename "$(dirname "${pretrain}")") - expdir+="_${pretrain_tag}" - fi -else - expdir="exp/${train_set}_${tag}" -fi -if [ "${stage}" -le 2 ] && [ "${stop_stage}" -ge 2 ]; then - echo "Stage 2: Network training" - [ ! -e "${expdir}" ] && mkdir -p "${expdir}" - cp "${dumpdir}/${train_set}/stats.${stats_ext}" "${expdir}" - if [ "${n_gpus}" -gt 1 ]; then - train="python -m parallel_wavegan.distributed.launch --nproc_per_node ${n_gpus} -c parallel-wavegan-train" - else - train="parallel-wavegan-train" - fi - echo "Training start. See the progress via ${expdir}/train.log." - ${cuda_cmd} --gpu "${n_gpus}" "${expdir}/train.log" \ - ${train} \ - --config "${conf}" \ - --train-dumpdir "${dumpdir}/${train_set}/norm" \ - --dev-dumpdir "${dumpdir}/${dev_set}/norm" \ - --outdir "${expdir}" \ - --resume "${resume}" \ - --pretrain "${pretrain}" \ - --verbose "${verbose}" - echo "Successfully finished training." -fi - -if [ "${stage}" -le 3 ] && [ "${stop_stage}" -ge 3 ]; then - echo "Stage 3: Network decoding" - # shellcheck disable=SC2012 - [ -z "${checkpoint}" ] && checkpoint="$(ls -dt "${expdir}"/*.pkl | head -1 || true)" - outdir="${expdir}/wav/$(basename "${checkpoint}" .pkl)" - pids=() - for name in "${dev_set}" "${eval_set}"; do - ( - [ ! -e "${outdir}/${name}" ] && mkdir -p "${outdir}/${name}" - [ "${n_gpus}" -gt 1 ] && n_gpus=1 - echo "Decoding start. See the progress via ${outdir}/${name}/decode.log." - ${cuda_cmd} --gpu "${n_gpus}" "${outdir}/${name}/decode.log" \ - parallel-wavegan-decode \ - --dumpdir "${dumpdir}/${name}/norm" \ - --checkpoint "${checkpoint}" \ - --outdir "${outdir}/${name}" \ - --verbose "${verbose}" - echo "Successfully finished decoding of ${name} set." - ) & - pids+=($!) - done - i=0; for pid in "${pids[@]}"; do wait "${pid}" || ((++i)); done - [ "${i}" -gt 0 ] && echo "$0: ${i} background jobs are failed." && exit 1; - echo "Successfully finished decoding." -fi -echo "Finished." diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/pep517/envbuild.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/pep517/envbuild.py deleted file mode 100644 index fe8873c64a90d2ae3e44510453191e1ab4b5c84e..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/pep517/envbuild.py +++ /dev/null @@ -1,171 +0,0 @@ -"""Build wheels/sdists by installing build deps to a temporary environment. -""" - -import io -import os -import logging -import shutil -from subprocess import check_call -import sys -from sysconfig import get_paths -from tempfile import mkdtemp - -from .compat import toml_load -from .wrappers import Pep517HookCaller, LoggerWrapper - -log = logging.getLogger(__name__) - - -def _load_pyproject(source_dir): - with io.open( - os.path.join(source_dir, 'pyproject.toml'), - 'rb', - ) as f: - pyproject_data = toml_load(f) - buildsys = pyproject_data['build-system'] - return ( - buildsys['requires'], - buildsys['build-backend'], - buildsys.get('backend-path'), - ) - - -class BuildEnvironment(object): - """Context manager to install build deps in a simple temporary environment - - Based on code I wrote for pip, which is MIT licensed. - """ - # Copyright (c) 2008-2016 The pip developers (see AUTHORS.txt file) - # - # Permission is hereby granted, free of charge, to any person obtaining - # a copy of this software and associated documentation files (the - # "Software"), to deal in the Software without restriction, including - # without limitation the rights to use, copy, modify, merge, publish, - # distribute, sublicense, and/or sell copies of the Software, and to - # permit persons to whom the Software is furnished to do so, subject to - # the following conditions: - # - # The above copyright notice and this permission notice shall be - # included in all copies or substantial portions of the Software. - # - # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - path = None - - def __init__(self, cleanup=True): - self._cleanup = cleanup - - def __enter__(self): - self.path = mkdtemp(prefix='pep517-build-env-') - log.info('Temporary build environment: %s', self.path) - - self.save_path = os.environ.get('PATH', None) - self.save_pythonpath = os.environ.get('PYTHONPATH', None) - - install_scheme = 'nt' if (os.name == 'nt') else 'posix_prefix' - install_dirs = get_paths(install_scheme, vars={ - 'base': self.path, - 'platbase': self.path, - }) - - scripts = install_dirs['scripts'] - if self.save_path: - os.environ['PATH'] = scripts + os.pathsep + self.save_path - else: - os.environ['PATH'] = scripts + os.pathsep + os.defpath - - if install_dirs['purelib'] == install_dirs['platlib']: - lib_dirs = install_dirs['purelib'] - else: - lib_dirs = install_dirs['purelib'] + os.pathsep + \ - install_dirs['platlib'] - if self.save_pythonpath: - os.environ['PYTHONPATH'] = lib_dirs + os.pathsep + \ - self.save_pythonpath - else: - os.environ['PYTHONPATH'] = lib_dirs - - return self - - def pip_install(self, reqs): - """Install dependencies into this env by calling pip in a subprocess""" - if not reqs: - return - log.info('Calling pip to install %s', reqs) - cmd = [ - sys.executable, '-m', 'pip', 'install', '--ignore-installed', - '--prefix', self.path] + list(reqs) - check_call( - cmd, - stdout=LoggerWrapper(log, logging.INFO), - stderr=LoggerWrapper(log, logging.ERROR), - ) - - def __exit__(self, exc_type, exc_val, exc_tb): - needs_cleanup = ( - self._cleanup and - self.path is not None and - os.path.isdir(self.path) - ) - if needs_cleanup: - shutil.rmtree(self.path) - - if self.save_path is None: - os.environ.pop('PATH', None) - else: - os.environ['PATH'] = self.save_path - - if self.save_pythonpath is None: - os.environ.pop('PYTHONPATH', None) - else: - os.environ['PYTHONPATH'] = self.save_pythonpath - - -def build_wheel(source_dir, wheel_dir, config_settings=None): - """Build a wheel from a source directory using PEP 517 hooks. - - :param str source_dir: Source directory containing pyproject.toml - :param str wheel_dir: Target directory to create wheel in - :param dict config_settings: Options to pass to build backend - - This is a blocking function which will run pip in a subprocess to install - build requirements. - """ - if config_settings is None: - config_settings = {} - requires, backend, backend_path = _load_pyproject(source_dir) - hooks = Pep517HookCaller(source_dir, backend, backend_path) - - with BuildEnvironment() as env: - env.pip_install(requires) - reqs = hooks.get_requires_for_build_wheel(config_settings) - env.pip_install(reqs) - return hooks.build_wheel(wheel_dir, config_settings) - - -def build_sdist(source_dir, sdist_dir, config_settings=None): - """Build an sdist from a source directory using PEP 517 hooks. - - :param str source_dir: Source directory containing pyproject.toml - :param str sdist_dir: Target directory to place sdist in - :param dict config_settings: Options to pass to build backend - - This is a blocking function which will run pip in a subprocess to install - build requirements. - """ - if config_settings is None: - config_settings = {} - requires, backend, backend_path = _load_pyproject(source_dir) - hooks = Pep517HookCaller(source_dir, backend, backend_path) - - with BuildEnvironment() as env: - env.pip_install(requires) - reqs = hooks.get_requires_for_build_sdist(config_settings) - env.pip_install(reqs) - return hooks.build_sdist(sdist_dir, config_settings) diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/platformdirs/macos.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/platformdirs/macos.py deleted file mode 100644 index a01337c7764e1e1aba1f3ba378a1c9241f31806d..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/platformdirs/macos.py +++ /dev/null @@ -1,64 +0,0 @@ -from __future__ import annotations - -import os - -from .api import PlatformDirsABC - - -class MacOS(PlatformDirsABC): - """ - Platform directories for the macOS operating system. Follows the guidance from `Apple documentation - `_. - Makes use of the `appname ` and - `version `. - """ - - @property - def user_data_dir(self) -> str: - """:return: data directory tied to the user, e.g. ``~/Library/Application Support/$appname/$version``""" - return self._append_app_name_and_version(os.path.expanduser("~/Library/Application Support/")) - - @property - def site_data_dir(self) -> str: - """:return: data directory shared by users, e.g. ``/Library/Application Support/$appname/$version``""" - return self._append_app_name_and_version("/Library/Application Support") - - @property - def user_config_dir(self) -> str: - """:return: config directory tied to the user, e.g. ``~/Library/Preferences/$appname/$version``""" - return self._append_app_name_and_version(os.path.expanduser("~/Library/Preferences/")) - - @property - def site_config_dir(self) -> str: - """:return: config directory shared by the users, e.g. ``/Library/Preferences/$appname``""" - return self._append_app_name_and_version("/Library/Preferences") - - @property - def user_cache_dir(self) -> str: - """:return: cache directory tied to the user, e.g. ``~/Library/Caches/$appname/$version``""" - return self._append_app_name_and_version(os.path.expanduser("~/Library/Caches")) - - @property - def user_state_dir(self) -> str: - """:return: state directory tied to the user, same as `user_data_dir`""" - return self.user_data_dir - - @property - def user_log_dir(self) -> str: - """:return: log directory tied to the user, e.g. ``~/Library/Logs/$appname/$version``""" - return self._append_app_name_and_version(os.path.expanduser("~/Library/Logs")) - - @property - def user_documents_dir(self) -> str: - """:return: documents directory tied to the user, e.g. ``~/Documents``""" - return os.path.expanduser("~/Documents") - - @property - def user_runtime_dir(self) -> str: - """:return: runtime directory tied to the user, e.g. ``~/Library/Caches/TemporaryItems/$appname/$version``""" - return self._append_app_name_and_version(os.path.expanduser("~/Library/Caches/TemporaryItems")) - - -__all__ = [ - "MacOS", -] diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/html.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/html.py deleted file mode 100644 index 47f5d9c17fbfed747f57cedb697c07f43f451604..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/html.py +++ /dev/null @@ -1,983 +0,0 @@ -""" - pygments.formatters.html - ~~~~~~~~~~~~~~~~~~~~~~~~ - - Formatter for HTML output. - - :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import functools -import os -import sys -import os.path -from io import StringIO - -from pip._vendor.pygments.formatter import Formatter -from pip._vendor.pygments.token import Token, Text, STANDARD_TYPES -from pip._vendor.pygments.util import get_bool_opt, get_int_opt, get_list_opt - -try: - import ctags -except ImportError: - ctags = None - -__all__ = ['HtmlFormatter'] - - -_escape_html_table = { - ord('&'): '&', - ord('<'): '<', - ord('>'): '>', - ord('"'): '"', - ord("'"): ''', -} - - -def escape_html(text, table=_escape_html_table): - """Escape &, <, > as well as single and double quotes for HTML.""" - return text.translate(table) - - -def webify(color): - if color.startswith('calc') or color.startswith('var'): - return color - else: - return '#' + color - - -def _get_ttype_class(ttype): - fname = STANDARD_TYPES.get(ttype) - if fname: - return fname - aname = '' - while fname is None: - aname = '-' + ttype[-1] + aname - ttype = ttype.parent - fname = STANDARD_TYPES.get(ttype) - return fname + aname - - -CSSFILE_TEMPLATE = '''\ -/* -generated by Pygments -Copyright 2006-2021 by the Pygments team. -Licensed under the BSD license, see LICENSE for details. -*/ -%(styledefs)s -''' - -DOC_HEADER = '''\ - - - - - %(title)s - - - - -

      %(title)s

      - -''' - -DOC_HEADER_EXTERNALCSS = '''\ - - - - - %(title)s - - - - -

      %(title)s

      - -''' - -DOC_FOOTER = '''\ - - -''' - - -class HtmlFormatter(Formatter): - r""" - Format tokens as HTML 4 ```` tags within a ``
      `` tag, wrapped
      -    in a ``
      `` tag. The ``
      ``'s CSS class can be set by the `cssclass` - option. - - If the `linenos` option is set to ``"table"``, the ``
      `` is
      -    additionally wrapped inside a ```` which has one row and two
      -    cells: one containing the line numbers and one containing the code.
      -    Example:
      -
      -    .. sourcecode:: html
      -
      -        
      -
      - - -
      -
      1
      -            2
      -
      -
      def foo(bar):
      -              pass
      -            
      -
      - - (whitespace added to improve clarity). - - Wrapping can be disabled using the `nowrap` option. - - A list of lines can be specified using the `hl_lines` option to make these - lines highlighted (as of Pygments 0.11). - - With the `full` option, a complete HTML 4 document is output, including - the style definitions inside a `` - - - -
      - -
      - - - \ No newline at end of file diff --git a/spaces/oguzakif/video-object-remover/FGT_codes/FGT/models/utils/reconstructionLayers.py b/spaces/oguzakif/video-object-remover/FGT_codes/FGT/models/utils/reconstructionLayers.py deleted file mode 100644 index 37302c52f8aa65292ab722d73d6ff6af910bca52..0000000000000000000000000000000000000000 --- a/spaces/oguzakif/video-object-remover/FGT_codes/FGT/models/utils/reconstructionLayers.py +++ /dev/null @@ -1,143 +0,0 @@ -# Two types of reconstructionl layers: 1. original residual layers, 2. residual layers with contrast and adaptive attention(CCA layer) -import torch -import torch.nn as nn -import torch.nn.init as init - - -def initialize_weights(net_l, scale=1): - if not isinstance(net_l, list): - net_l = [net_l] - for net in net_l: - for m in net.modules(): - if isinstance(m, nn.Conv2d): - init.kaiming_normal_(m.weight, a=0, mode='fan_in') - m.weight.data *= scale # for residual block - if m.bias is not None: - m.bias.data.zero_() - elif isinstance(m, nn.Linear): - init.kaiming_normal_(m.weight, a=0, mode='fan_in') - m.weight.data *= scale - if m.bias is not None: - m.bias.data.zero_() - elif isinstance(m, nn.BatchNorm2d): - init.constant_(m.weight, 1) - init.constant_(m.bias.data, 0.0) - - -def make_layer(block, n_layers): - layers = [] - for _ in range(n_layers): - layers.append(block()) - return nn.Sequential(*layers) - - -class ResidualBlock_noBN(nn.Module): - """Residual block w/o BN - ---Conv-ReLU-Conv-+- - |________________| - """ - - def __init__(self, nf=64): - super(ResidualBlock_noBN, self).__init__() - self.conv1 = nn.Conv2d(nf, nf, kernel_size=3, stride=1, padding=1, bias=True) - self.conv2 = nn.Conv2d(nf, nf, kernel_size=3, stride=1, padding=1, bias=True) - self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) - - def forward(self, x): - """ - - Args: - x: with shape of [b, c, t, h, w] - - Returns: processed features with shape [b, c, t, h, w] - - """ - identity = x - out = self.lrelu(self.conv1(x)) - out = self.conv2(out) - out = identity + out - # Remove ReLU at the end of the residual block - # http://torch.ch/blog/2016/02/04/resnets.html - return out - - -class ResBlock_noBN_new(nn.Module): - def __init__(self, nf): - super(ResBlock_noBN_new, self).__init__() - self.c1 = nn.Conv3d(nf, nf // 4, kernel_size=(1, 3, 3), stride=1, padding=(0, 1, 1), bias=True) - self.d1 = nn.Conv3d(nf // 4, nf // 4, kernel_size=(1, 3, 3), stride=1, padding=(0, 1, 1), - bias=True) # dilation rate=1 - self.d2 = nn.Conv3d(nf // 4, nf // 4, kernel_size=(1, 3, 3), stride=1, padding=(0, 2, 2), dilation=(1, 2, 2), - bias=True) # dilation rate=2 - self.d3 = nn.Conv3d(nf // 4, nf // 4, kernel_size=(1, 3, 3), stride=1, padding=(0, 4, 4), dilation=(1, 4, 4), - bias=True) # dilation rate=4 - self.d4 = nn.Conv3d(nf // 4, nf // 4, kernel_size=(1, 3, 3), stride=1, padding=(0, 8, 8), dilation=(1, 8, 8), - bias=True) # dilation rate=8 - self.act = nn.LeakyReLU(negative_slope=0.2, inplace=True) - self.c2 = nn.Conv3d(nf, nf, kernel_size=(1, 3, 3), stride=1, padding=(0, 1, 1), bias=True) - - def forward(self, x): - output1 = self.act(self.c1(x)) - d1 = self.d1(output1) - d2 = self.d2(output1) - d3 = self.d3(output1) - d4 = self.d4(output1) - - add1 = d1 + d2 - add2 = add1 + d3 - add3 = add2 + d4 - combine = torch.cat([d1, add1, add2, add3], dim=1) - output2 = self.c2(self.act(combine)) - output = x + output2 - # remove ReLU at the end of the residual block - # http://torch.ch/blog/2016/02/04/resnets.html - return output - - -class CCALayer(nn.Module): #############################################3 new - '''Residual block w/o BN - --conv--contrast-conv--x--- - | \--mean--| | - |___________________| - - ''' - - def __init__(self, nf=64): - super(CCALayer, self).__init__() - self.conv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) - self.conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) - self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) - self.conv_du = nn.Sequential( - nn.Conv2d(nf, 4, 1, padding=0, bias=True), - nn.ReLU(inplace=True), - nn.Conv2d(4, nf, 1, padding=0, bias=True), - nn.Tanh() # change from `Sigmoid` to `Tanh` to make the output between -1 and 1 - ) - self.contrast = stdv_channels - self.avg_pool = nn.AdaptiveAvgPool2d(1) - # initialization - initialize_weights([self.conv1, self.conv_du], 0.1) - - def forward(self, x): - identity = x - out = self.lrelu(self.conv1(x)) - out = self.conv2(out) - out = self.contrast(out) + self.avg_pool(out) - out_channel = self.conv_du(out) - out_channel = out_channel * out - out_last = out_channel + identity - - return out_last - - -def mean_channels(F): - assert (F.dim() == 4), 'Your dim is {} bit not 4'.format(F.dim()) - spatial_sum = F.sum(3, keepdim=True).sum(2, keepdim=True) - return spatial_sum / (F.size(2) * F.size(3)) # 对每一个channel都求其特征图的高和宽的平均值 - - -def stdv_channels(F): - assert F.dim() == 4, 'Your dim is {} bit not 4'.format(F.dim()) - F_mean = mean_channels(F) - F_variance = (F - F_mean).pow(2).sum(3, keepdim=True).sum(2, keepdim=True) / (F.size(2) * F.size(3)) - return F_variance.pow(0.5) diff --git a/spaces/oldfart/removaltool/app.py b/spaces/oldfart/removaltool/app.py deleted file mode 100644 index ecd4623cf824a563a8d374cd412feea01cc73152..0000000000000000000000000000000000000000 --- a/spaces/oldfart/removaltool/app.py +++ /dev/null @@ -1,156 +0,0 @@ -import cv2 -import gradio as gr -import os -from PIL import Image -import numpy as np -import torch -from torch.autograd import Variable -from torchvision import transforms -import torch.nn.functional as F -import gdown -import matplotlib.pyplot as plt -import warnings -warnings.filterwarnings("ignore") - -os.system("git clone https://github.com/xuebinqin/DIS") -os.system("mv DIS/IS-Net/* .") - -# project imports -from data_loader_cache import normalize, im_reader, im_preprocess -from models import * - -#Helpers -device = 'cuda' if torch.cuda.is_available() else 'cpu' - -# Download official weights -if not os.path.exists("saved_models"): - os.mkdir("saved_models") - MODEL_PATH_URL = "https://drive.google.com/uc?id=1KyMpRjewZdyYfxHPYcd-ZbanIXtin0Sn" - gdown.download(MODEL_PATH_URL, "saved_models/isnet.pth", use_cookies=False) - -class GOSNormalize(object): - ''' - Normalize the Image using torch.transforms - ''' - def __init__(self, mean=[0.485,0.456,0.406], std=[0.229,0.224,0.225]): - self.mean = mean - self.std = std - - def __call__(self,image): - image = normalize(image,self.mean,self.std) - return image - - -transform = transforms.Compose([GOSNormalize([0.5,0.5,0.5],[1.0,1.0,1.0])]) - -def load_image(im_path, hypar): - im = im_reader(im_path) - im, im_shp = im_preprocess(im, hypar["cache_size"]) - im = torch.divide(im,255.0) - shape = torch.from_numpy(np.array(im_shp)) - return transform(im).unsqueeze(0), shape.unsqueeze(0) # make a batch of image, shape - - -def build_model(hypar,device): - net = hypar["model"]#GOSNETINC(3,1) - - # convert to half precision - if(hypar["model_digit"]=="half"): - net.half() - for layer in net.modules(): - if isinstance(layer, nn.BatchNorm2d): - layer.float() - - net.to(device) - - if(hypar["restore_model"]!=""): - net.load_state_dict(torch.load(hypar["model_path"]+"/"+hypar["restore_model"], map_location=device)) - net.to(device) - net.eval() - return net - - -def predict(net, inputs_val, shapes_val, hypar, device): - ''' - Given an Image, predict the mask - ''' - net.eval() - - if(hypar["model_digit"]=="full"): - inputs_val = inputs_val.type(torch.FloatTensor) - else: - inputs_val = inputs_val.type(torch.HalfTensor) - - - inputs_val_v = Variable(inputs_val, requires_grad=False).to(device) # wrap inputs in Variable - - ds_val = net(inputs_val_v)[0] # list of 6 results - - pred_val = ds_val[0][0,:,:,:] # B x 1 x H x W # we want the first one which is the most accurate prediction - - ## recover the prediction spatial size to the orignal image size - pred_val = torch.squeeze(F.upsample(torch.unsqueeze(pred_val,0),(shapes_val[0][0],shapes_val[0][1]),mode='bilinear')) - - ma = torch.max(pred_val) - mi = torch.min(pred_val) - pred_val = (pred_val-mi)/(ma-mi) # max = 1 - - if device == 'cuda': torch.cuda.empty_cache() - return (pred_val.detach().cpu().numpy()*255).astype(np.uint8) # it is the mask we need - -# Set Parameters -hypar = {} # paramters for inferencing - - -hypar["model_path"] ="./saved_models" ## load trained weights from this path -hypar["restore_model"] = "isnet.pth" ## name of the to-be-loaded weights -hypar["interm_sup"] = False ## indicate if activate intermediate feature supervision - -## choose floating point accuracy -- -hypar["model_digit"] = "full" ## indicates "half" or "full" accuracy of float number -hypar["seed"] = 0 - -hypar["cache_size"] = [1024, 1024] ## cached input spatial resolution, can be configured into different size - -## data augmentation parameters --- -hypar["input_size"] = [1024, 1024] ## mdoel input spatial size, usually use the same value hypar["cache_size"], which means we don't further resize the images -hypar["crop_size"] = [1024, 1024] ## random crop size from the input, it is usually set as smaller than hypar["cache_size"], e.g., [920,920] for data augmentation - -hypar["model"] = ISNetDIS() - - # Build Model -net = build_model(hypar, device) - - -def inference(image: Image): - image_path = image - - image_tensor, orig_size = load_image(image_path, hypar) - mask = predict(net, image_tensor, orig_size, hypar, device) - - pil_mask = Image.fromarray(mask).convert('L') - im_rgb = Image.open(image).convert("RGB") - - im_rgba = im_rgb.copy() - im_rgba.putalpha(pil_mask) - - return [im_rgba,pil_mask] - - -title = "" -description = "" -article = "" - -interface = gr.Interface( - fn=inference, - inputs=gr.Image(type='filepath'), - outputs=["image","image"], - examples=[['robot.png'], ['ship.png']], - title=title, - description=description, - article=article, - allow_flagging='never', - theme="default", - cache_examples=False, - css="footer {visibility: hidden}" - ).launch(enable_queue=True, debug=True) diff --git a/spaces/omlab/vlchecklist_demo/models/vilt/datamodules/datamodule_base.py b/spaces/omlab/vlchecklist_demo/models/vilt/datamodules/datamodule_base.py deleted file mode 100644 index b8a3ec1d93fa38e3a88d259a992c8257e8ab333e..0000000000000000000000000000000000000000 --- a/spaces/omlab/vlchecklist_demo/models/vilt/datamodules/datamodule_base.py +++ /dev/null @@ -1,178 +0,0 @@ -import torch - -from pytorch_lightning import LightningDataModule -from torch.utils.data import DataLoader -from transformers import ( - DataCollatorForLanguageModeling, - DataCollatorForWholeWordMask, - BertTokenizer, -) - - -def get_pretrained_tokenizer(from_pretrained): - if torch.distributed.is_initialized(): - if torch.distributed.get_rank() == 0: - BertTokenizer.from_pretrained( - from_pretrained, do_lower_case="uncased" in from_pretrained - ) - torch.distributed.barrier() - return BertTokenizer.from_pretrained( - from_pretrained, do_lower_case="uncased" in from_pretrained - ) - - -class BaseDataModule(LightningDataModule): - def __init__(self, _config): - super().__init__() - - self.data_dir = _config["data_root"] - - self.num_workers = _config["num_workers"] - self.batch_size = _config["per_gpu_batchsize"] - self.eval_batch_size = self.batch_size - - self.image_size = _config["image_size"] - self.max_text_len = _config["max_text_len"] - self.draw_false_image = _config["draw_false_image"] - self.draw_false_text = _config["draw_false_text"] - self.image_only = _config["image_only"] - - self.train_transform_keys = ( - ["default_train"] - if len(_config["train_transform_keys"]) == 0 - else _config["train_transform_keys"] - ) - - self.val_transform_keys = ( - ["default_val"] - if len(_config["val_transform_keys"]) == 0 - else _config["val_transform_keys"] - ) - - tokenizer = _config["tokenizer"] - self.tokenizer = get_pretrained_tokenizer(tokenizer) - self.vocab_size = self.tokenizer.vocab_size - - collator = ( - DataCollatorForWholeWordMask - if _config["whole_word_masking"] - else DataCollatorForLanguageModeling - ) - - self.mlm_collator = collator( - tokenizer=self.tokenizer, mlm=True, mlm_probability=_config["mlm_prob"] - ) - self.setup_flag = False - - @property - def dataset_cls(self): - raise NotImplementedError("return tuple of dataset class") - - @property - def dataset_name(self): - raise NotImplementedError("return name of dataset") - - def set_train_dataset(self): - self.train_dataset = self.dataset_cls( - self.data_dir, - self.train_transform_keys, - split="train", - image_size=self.image_size, - max_text_len=self.max_text_len, - draw_false_image=self.draw_false_image, - draw_false_text=self.draw_false_text, - image_only=self.image_only, - ) - - def set_val_dataset(self): - self.val_dataset = self.dataset_cls( - self.data_dir, - self.val_transform_keys, - split="val", - image_size=self.image_size, - max_text_len=self.max_text_len, - draw_false_image=self.draw_false_image, - draw_false_text=self.draw_false_text, - image_only=self.image_only, - ) - - if hasattr(self, "dataset_cls_no_false"): - self.val_dataset_no_false = self.dataset_cls_no_false( - self.data_dir, - self.val_transform_keys, - split="val", - image_size=self.image_size, - max_text_len=self.max_text_len, - draw_false_image=0, - draw_false_text=0, - image_only=self.image_only, - ) - - def make_no_false_val_dset(self, image_only=False): - return self.dataset_cls_no_false( - self.data_dir, - self.val_transform_keys, - split="val", - image_size=self.image_size, - max_text_len=self.max_text_len, - draw_false_image=0, - draw_false_text=0, - image_only=image_only, - ) - - def set_test_dataset(self): - self.test_dataset = self.dataset_cls( - self.data_dir, - self.val_transform_keys, - split="test", - image_size=self.image_size, - max_text_len=self.max_text_len, - draw_false_image=self.draw_false_image, - draw_false_text=self.draw_false_text, - image_only=self.image_only, - ) - - def setup(self, stage): - if not self.setup_flag: - self.set_train_dataset() - self.set_val_dataset() - self.set_test_dataset() - - self.train_dataset.tokenizer = self.tokenizer - self.val_dataset.tokenizer = self.tokenizer - self.test_dataset.tokenizer = self.tokenizer - - self.setup_flag = True - - def train_dataloader(self): - loader = DataLoader( - self.train_dataset, - batch_size=self.batch_size, - shuffle=True, - num_workers=self.num_workers, - pin_memory=True, - collate_fn=self.train_dataset.collate, - ) - return loader - - def val_dataloader(self): - loader = DataLoader( - self.val_dataset, - batch_size=self.eval_batch_size, - shuffle=False, - num_workers=self.num_workers, - pin_memory=True, - collate_fn=self.val_dataset.collate, - ) - return loader - - def test_dataloader(self): - loader = DataLoader( - self.test_dataset, - batch_size=self.eval_batch_size, - shuffle=False, - num_workers=self.num_workers, - pin_memory=True, - collate_fn=self.test_dataset.collate, - ) - return loader diff --git a/spaces/openflamingo/OpenFlamingo/open_flamingo/open_flamingo/eval/models/open_flamingo.py b/spaces/openflamingo/OpenFlamingo/open_flamingo/open_flamingo/eval/models/open_flamingo.py deleted file mode 100644 index c7328bece80b785d89c8ad37ee5a5faab3c3300f..0000000000000000000000000000000000000000 --- a/spaces/openflamingo/OpenFlamingo/open_flamingo/open_flamingo/eval/models/open_flamingo.py +++ /dev/null @@ -1,176 +0,0 @@ -from typing import List - -from PIL import Image -import torch - -from open_flamingo.eval.eval_model import BaseEvalModel -from open_flamingo.src.factory import create_model_and_transforms -from contextlib import suppress -from open_flamingo.eval.models.utils import unwrap_model - -class EvalModel(BaseEvalModel): - """OpenFlamingo model evaluation. - - Attributes: - model (nn.Module): Underlying Torch model. - tokenizer (transformers.PreTrainedTokenizer): Tokenizer for model. - device: Index of GPU to use, or the string "CPU" - """ - - def __init__(self, model_args): - assert ( - "vision_encoder_path" in model_args - and "lm_path" in model_args - and "checkpoint_path" in model_args - and "lm_tokenizer_path" in model_args - and "cross_attn_every_n_layers" in model_args - and "vision_encoder_pretrained" in model_args - and "precision" in model_args - ), "OpenFlamingo requires vision_encoder_path, lm_path, device, checkpoint_path, lm_tokenizer_path, cross_attn_every_n_layers, vision_encoder_pretrained, and precision arguments to be specified" - - self.device = ( - model_args["device"] - if ("device" in model_args and model_args["device"] >= 0) - else "cpu" - ) - - ( - self.model, - self.image_processor, - self.tokenizer, - ) = create_model_and_transforms( - model_args["vision_encoder_path"], - model_args["vision_encoder_pretrained"], - model_args["lm_path"], - model_args["lm_tokenizer_path"], - cross_attn_every_n_layers=int(model_args["cross_attn_every_n_layers"]), - ) - checkpoint = torch.load(model_args["checkpoint_path"], map_location="cpu") - if "model_state_dict" in checkpoint: - checkpoint = checkpoint["model_state_dict"] - checkpoint = {k.replace("module.", ""): v for k, v in checkpoint.items()} - self.model.load_state_dict(checkpoint, strict=False) - self.model.to(self.device) - self.model.eval() - self.tokenizer.padding_side = "left" - - # autocast - self.autocast = get_autocast(model_args["precision"]) - self.cast_dtype = get_cast_dtype(model_args["precision"]) - - def _prepare_images(self, batch: List[List[torch.Tensor]]) -> torch.Tensor: - """Preprocess images and stack them. - - Args: - batch: A list of lists of images. - - Returns: - A Tensor of shape - (batch_size, images_per_example, frames, channels, height, width). - """ - images_per_example = max(len(x) for x in batch) - batch_images = None - for iexample, example in enumerate(batch): - for iimage, image in enumerate(example): - preprocessed = self.image_processor(image) - - if batch_images is None: - batch_images = torch.zeros( - (len(batch), images_per_example, 1) + preprocessed.shape, - dtype=preprocessed.dtype, - ) - batch_images[iexample, iimage, 0] = preprocessed - return batch_images - - def get_outputs( - self, - batch_text: List[str], - batch_images: List[List[Image.Image]], - min_generation_length: int, - max_generation_length: int, - num_beams: int, - length_penalty: float, - ) -> List[str]: - encodings = self.tokenizer( - batch_text, - padding="longest", - truncation=True, - return_tensors="pt", - max_length=2000, - ) - input_ids = encodings["input_ids"] - attention_mask = encodings["attention_mask"] - - with torch.inference_mode(): - with self.autocast(): - outputs = unwrap_model(self.model).generate( - self._prepare_images(batch_images).to( - self.device, dtype=self.cast_dtype, non_blocking=True - ), - input_ids.to(self.device, dtype=self.cast_dtype, non_blocking=True), - attention_mask=attention_mask.to( - self.device, dtype=self.cast_dtype, non_blocking=True - ), - min_new_tokens=min_generation_length, - max_new_tokens=max_generation_length, - num_beams=num_beams, - length_penalty=length_penalty, - ) - - outputs = outputs[:, len(input_ids[0]) :] - - return self.tokenizer.batch_decode(outputs, skip_special_tokens=True) - - def get_logits( - self, - lang_x: torch.Tensor, - vision_x: torch.Tensor = None, - attention_mask: torch.Tensor = None, - past_key_values: torch.Tensor = None, - clear_conditioned_layers: bool = False, - ): - with torch.inference_mode(): - with self.autocast(): - outputs = self.model( - vision_x=vision_x, - lang_x=lang_x, - attention_mask=attention_mask, - clear_conditioned_layers=clear_conditioned_layers, - past_key_values=past_key_values, - use_cache=(past_key_values is not None), - ) - return outputs - - def encode_vision_x(self, image_tensor: torch.Tensor): - unwrap_model(self.model)._encode_vision_x(image_tensor.to(self.device)) - - def uncache_media(self): - unwrap_model(self.model).uncache_media() - - def cache_media(self, input_ids, vision_x): - unwrap_model(self.model).cache_media(input_ids=input_ids, vision_x=vision_x) - - def get_vqa_prompt(self, question, answer=None) -> str: - return f"Question:{question} Short answer:{answer if answer is not None else ''}{'<|endofchunk|>' if answer is not None else ''}" - - def get_caption_prompt(self, caption=None) -> str: - return f"Output:{caption if caption is not None else ''}{'<|endofchunk|>' if caption is not None else ''}" - - -def get_cast_dtype(precision: str): - cast_dtype = None - if precision == "bf16": - cast_dtype = torch.bfloat16 - elif precision == "fp16": - cast_dtype = torch.float16 - return cast_dtype - - -def get_autocast(precision): - if precision == "amp": - return torch.cuda.amp.autocast - elif precision == "amp_bfloat16" or precision == "amp_bf16": - # amp_bfloat16 is more stable than amp float16 for clip training - return lambda: torch.cuda.amp.autocast(dtype=torch.bfloat16) - else: - return suppress diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/api/pipelines/panorama.md b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/api/pipelines/panorama.md deleted file mode 100644 index a0ad0d326188c79c8e88ae2869a52e9b73809b68..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/api/pipelines/panorama.md +++ /dev/null @@ -1,57 +0,0 @@ - - -# MultiDiffusion - -[MultiDiffusion: Fusing Diffusion Paths for Controlled Image Generation](https://huggingface.co/papers/2302.08113) is by Omer Bar-Tal, Lior Yariv, Yaron Lipman, and Tali Dekel. - -The abstract from the paper is: - -*Recent advances in text-to-image generation with diffusion models present transformative capabilities in image quality. However, user controllability of the generated image, and fast adaptation to new tasks still remains an open challenge, currently mostly addressed by costly and long re-training and fine-tuning or ad-hoc adaptations to specific image generation tasks. In this work, we present MultiDiffusion, a unified framework that enables versatile and controllable image generation, using a pre-trained text-to-image diffusion model, without any further training or finetuning. At the center of our approach is a new generation process, based on an optimization task that binds together multiple diffusion generation processes with a shared set of parameters or constraints. We show that MultiDiffusion can be readily applied to generate high quality and diverse images that adhere to user-provided controls, such as desired aspect ratio (e.g., panorama), and spatial guiding signals, ranging from tight segmentation masks to bounding boxes.* - -You can find additional information about MultiDiffusion on the [project page](https://multidiffusion.github.io/), [original codebase](https://github.com/omerbt/MultiDiffusion), and try it out in a [demo](https://huggingface.co/spaces/weizmannscience/MultiDiffusion). - -## Tips - -While calling [`StableDiffusionPanoramaPipeline`], it's possible to specify the `view_batch_size` parameter to be > 1. -For some GPUs with high performance, this can speedup the generation process and increase VRAM usage. - -To generate panorama-like images make sure you pass the width parameter accordingly. We recommend a width value of 2048 which is the default. - -Circular padding is applied to ensure there are no stitching artifacts when working with -panoramas to ensure a seamless transition from the rightmost part to the leftmost part. -By enabling circular padding (set `circular_padding=True`), the operation applies additional -crops after the rightmost point of the image, allowing the model to "see” the transition -from the rightmost part to the leftmost part. This helps maintain visual consistency in -a 360-degree sense and creates a proper “panorama” that can be viewed using 360-degree -panorama viewers. When decoding latents in Stable Diffusion, circular padding is applied -to ensure that the decoded latents match in the RGB space. - -For example, without circular padding, there is a stitching artifact (default): -![img](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/indoor_%20no_circular_padding.png) - -But with circular padding, the right and the left parts are matching (`circular_padding=True`): -![img](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/indoor_%20circular_padding.png) - - - -Make sure to check out the Schedulers [guide](/using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](/using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. - - - -## StableDiffusionPanoramaPipeline -[[autodoc]] StableDiffusionPanoramaPipeline - - __call__ - - all - -## StableDiffusionPipelineOutput -[[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput \ No newline at end of file diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/pipelines/kandinsky2_2/__init__.py b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/pipelines/kandinsky2_2/__init__.py deleted file mode 100644 index 461e3d25ca731735164727b784893fbde75c3a12..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/pipelines/kandinsky2_2/__init__.py +++ /dev/null @@ -1,69 +0,0 @@ -from typing import TYPE_CHECKING - -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - get_objects_from_module, - is_torch_available, - is_transformers_available, -) - - -_dummy_objects = {} -_import_structure = {} - -try: - if not (is_transformers_available() and is_torch_available()): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects # noqa F403 - - _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) -else: - _import_structure["pipeline_kandinsky2_2"] = ["KandinskyV22Pipeline"] - _import_structure["pipeline_kandinsky2_2_combined"] = [ - "KandinskyV22CombinedPipeline", - "KandinskyV22Img2ImgCombinedPipeline", - "KandinskyV22InpaintCombinedPipeline", - ] - _import_structure["pipeline_kandinsky2_2_controlnet"] = ["KandinskyV22ControlnetPipeline"] - _import_structure["pipeline_kandinsky2_2_controlnet_img2img"] = ["KandinskyV22ControlnetImg2ImgPipeline"] - _import_structure["pipeline_kandinsky2_2_img2img"] = ["KandinskyV22Img2ImgPipeline"] - _import_structure["pipeline_kandinsky2_2_inpainting"] = ["KandinskyV22InpaintPipeline"] - _import_structure["pipeline_kandinsky2_2_prior"] = ["KandinskyV22PriorPipeline"] - _import_structure["pipeline_kandinsky2_2_prior_emb2emb"] = ["KandinskyV22PriorEmb2EmbPipeline"] - - -if TYPE_CHECKING: - try: - if not (is_transformers_available() and is_torch_available()): - raise OptionalDependencyNotAvailable() - - except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * - else: - from .pipeline_kandinsky2_2 import KandinskyV22Pipeline - from .pipeline_kandinsky2_2_combined import ( - KandinskyV22CombinedPipeline, - KandinskyV22Img2ImgCombinedPipeline, - KandinskyV22InpaintCombinedPipeline, - ) - from .pipeline_kandinsky2_2_controlnet import KandinskyV22ControlnetPipeline - from .pipeline_kandinsky2_2_controlnet_img2img import KandinskyV22ControlnetImg2ImgPipeline - from .pipeline_kandinsky2_2_img2img import KandinskyV22Img2ImgPipeline - from .pipeline_kandinsky2_2_inpainting import KandinskyV22InpaintPipeline - from .pipeline_kandinsky2_2_prior import KandinskyV22PriorPipeline - from .pipeline_kandinsky2_2_prior_emb2emb import KandinskyV22PriorEmb2EmbPipeline - -else: - import sys - - sys.modules[__name__] = _LazyModule( - __name__, - globals()["__file__"], - _import_structure, - module_spec=__spec__, - ) - - for name, value in _dummy_objects.items(): - setattr(sys.modules[__name__], name, value) diff --git a/spaces/pierreguillou/Inference-APP-Document-Understanding-at-linelevel-v2/files/README.md b/spaces/pierreguillou/Inference-APP-Document-Understanding-at-linelevel-v2/files/README.md deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/urllib3/response.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/urllib3/response.py deleted file mode 100644 index 8909f8454e94752d188ed13cf36c35f93fc6c3f2..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/urllib3/response.py +++ /dev/null @@ -1,879 +0,0 @@ -from __future__ import absolute_import - -import io -import logging -import sys -import warnings -import zlib -from contextlib import contextmanager -from socket import error as SocketError -from socket import timeout as SocketTimeout - -brotli = None - -from . import util -from ._collections import HTTPHeaderDict -from .connection import BaseSSLError, HTTPException -from .exceptions import ( - BodyNotHttplibCompatible, - DecodeError, - HTTPError, - IncompleteRead, - InvalidChunkLength, - InvalidHeader, - ProtocolError, - ReadTimeoutError, - ResponseNotChunked, - SSLError, -) -from .packages import six -from .util.response import is_fp_closed, is_response_to_head - -log = logging.getLogger(__name__) - - -class DeflateDecoder(object): - def __init__(self): - self._first_try = True - self._data = b"" - self._obj = zlib.decompressobj() - - def __getattr__(self, name): - return getattr(self._obj, name) - - def decompress(self, data): - if not data: - return data - - if not self._first_try: - return self._obj.decompress(data) - - self._data += data - try: - decompressed = self._obj.decompress(data) - if decompressed: - self._first_try = False - self._data = None - return decompressed - except zlib.error: - self._first_try = False - self._obj = zlib.decompressobj(-zlib.MAX_WBITS) - try: - return self.decompress(self._data) - finally: - self._data = None - - -class GzipDecoderState(object): - - FIRST_MEMBER = 0 - OTHER_MEMBERS = 1 - SWALLOW_DATA = 2 - - -class GzipDecoder(object): - def __init__(self): - self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS) - self._state = GzipDecoderState.FIRST_MEMBER - - def __getattr__(self, name): - return getattr(self._obj, name) - - def decompress(self, data): - ret = bytearray() - if self._state == GzipDecoderState.SWALLOW_DATA or not data: - return bytes(ret) - while True: - try: - ret += self._obj.decompress(data) - except zlib.error: - previous_state = self._state - # Ignore data after the first error - self._state = GzipDecoderState.SWALLOW_DATA - if previous_state == GzipDecoderState.OTHER_MEMBERS: - # Allow trailing garbage acceptable in other gzip clients - return bytes(ret) - raise - data = self._obj.unused_data - if not data: - return bytes(ret) - self._state = GzipDecoderState.OTHER_MEMBERS - self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS) - - -if brotli is not None: - - class BrotliDecoder(object): - # Supports both 'brotlipy' and 'Brotli' packages - # since they share an import name. The top branches - # are for 'brotlipy' and bottom branches for 'Brotli' - def __init__(self): - self._obj = brotli.Decompressor() - if hasattr(self._obj, "decompress"): - self.decompress = self._obj.decompress - else: - self.decompress = self._obj.process - - def flush(self): - if hasattr(self._obj, "flush"): - return self._obj.flush() - return b"" - - -class MultiDecoder(object): - """ - From RFC7231: - If one or more encodings have been applied to a representation, the - sender that applied the encodings MUST generate a Content-Encoding - header field that lists the content codings in the order in which - they were applied. - """ - - def __init__(self, modes): - self._decoders = [_get_decoder(m.strip()) for m in modes.split(",")] - - def flush(self): - return self._decoders[0].flush() - - def decompress(self, data): - for d in reversed(self._decoders): - data = d.decompress(data) - return data - - -def _get_decoder(mode): - if "," in mode: - return MultiDecoder(mode) - - if mode == "gzip": - return GzipDecoder() - - if brotli is not None and mode == "br": - return BrotliDecoder() - - return DeflateDecoder() - - -class HTTPResponse(io.IOBase): - """ - HTTP Response container. - - Backwards-compatible with :class:`http.client.HTTPResponse` but the response ``body`` is - loaded and decoded on-demand when the ``data`` property is accessed. This - class is also compatible with the Python standard library's :mod:`io` - module, and can hence be treated as a readable object in the context of that - framework. - - Extra parameters for behaviour not present in :class:`http.client.HTTPResponse`: - - :param preload_content: - If True, the response's body will be preloaded during construction. - - :param decode_content: - If True, will attempt to decode the body based on the - 'content-encoding' header. - - :param original_response: - When this HTTPResponse wrapper is generated from an :class:`http.client.HTTPResponse` - object, it's convenient to include the original for debug purposes. It's - otherwise unused. - - :param retries: - The retries contains the last :class:`~urllib3.util.retry.Retry` that - was used during the request. - - :param enforce_content_length: - Enforce content length checking. Body returned by server must match - value of Content-Length header, if present. Otherwise, raise error. - """ - - CONTENT_DECODERS = ["gzip", "deflate"] - if brotli is not None: - CONTENT_DECODERS += ["br"] - REDIRECT_STATUSES = [301, 302, 303, 307, 308] - - def __init__( - self, - body="", - headers=None, - status=0, - version=0, - reason=None, - strict=0, - preload_content=True, - decode_content=True, - original_response=None, - pool=None, - connection=None, - msg=None, - retries=None, - enforce_content_length=False, - request_method=None, - request_url=None, - auto_close=True, - ): - - if isinstance(headers, HTTPHeaderDict): - self.headers = headers - else: - self.headers = HTTPHeaderDict(headers) - self.status = status - self.version = version - self.reason = reason - self.strict = strict - self.decode_content = decode_content - self.retries = retries - self.enforce_content_length = enforce_content_length - self.auto_close = auto_close - - self._decoder = None - self._body = None - self._fp = None - self._original_response = original_response - self._fp_bytes_read = 0 - self.msg = msg - self._request_url = request_url - - if body and isinstance(body, (six.string_types, bytes)): - self._body = body - - self._pool = pool - self._connection = connection - - if hasattr(body, "read"): - self._fp = body - - # Are we using the chunked-style of transfer encoding? - self.chunked = False - self.chunk_left = None - tr_enc = self.headers.get("transfer-encoding", "").lower() - # Don't incur the penalty of creating a list and then discarding it - encodings = (enc.strip() for enc in tr_enc.split(",")) - if "chunked" in encodings: - self.chunked = True - - # Determine length of response - self.length_remaining = self._init_length(request_method) - - # If requested, preload the body. - if preload_content and not self._body: - self._body = self.read(decode_content=decode_content) - - def get_redirect_location(self): - """ - Should we redirect and where to? - - :returns: Truthy redirect location string if we got a redirect status - code and valid location. ``None`` if redirect status and no - location. ``False`` if not a redirect status code. - """ - if self.status in self.REDIRECT_STATUSES: - return self.headers.get("location") - - return False - - def release_conn(self): - if not self._pool or not self._connection: - return - - self._pool._put_conn(self._connection) - self._connection = None - - def drain_conn(self): - """ - Read and discard any remaining HTTP response data in the response connection. - - Unread data in the HTTPResponse connection blocks the connection from being released back to the pool. - """ - try: - self.read() - except (HTTPError, SocketError, BaseSSLError, HTTPException): - pass - - @property - def data(self): - # For backwards-compat with earlier urllib3 0.4 and earlier. - if self._body: - return self._body - - if self._fp: - return self.read(cache_content=True) - - @property - def connection(self): - return self._connection - - def isclosed(self): - return is_fp_closed(self._fp) - - def tell(self): - """ - Obtain the number of bytes pulled over the wire so far. May differ from - the amount of content returned by :meth:``urllib3.response.HTTPResponse.read`` - if bytes are encoded on the wire (e.g, compressed). - """ - return self._fp_bytes_read - - def _init_length(self, request_method): - """ - Set initial length value for Response content if available. - """ - length = self.headers.get("content-length") - - if length is not None: - if self.chunked: - # This Response will fail with an IncompleteRead if it can't be - # received as chunked. This method falls back to attempt reading - # the response before raising an exception. - log.warning( - "Received response with both Content-Length and " - "Transfer-Encoding set. This is expressly forbidden " - "by RFC 7230 sec 3.3.2. Ignoring Content-Length and " - "attempting to process response as Transfer-Encoding: " - "chunked." - ) - return None - - try: - # RFC 7230 section 3.3.2 specifies multiple content lengths can - # be sent in a single Content-Length header - # (e.g. Content-Length: 42, 42). This line ensures the values - # are all valid ints and that as long as the `set` length is 1, - # all values are the same. Otherwise, the header is invalid. - lengths = set([int(val) for val in length.split(",")]) - if len(lengths) > 1: - raise InvalidHeader( - "Content-Length contained multiple " - "unmatching values (%s)" % length - ) - length = lengths.pop() - except ValueError: - length = None - else: - if length < 0: - length = None - - # Convert status to int for comparison - # In some cases, httplib returns a status of "_UNKNOWN" - try: - status = int(self.status) - except ValueError: - status = 0 - - # Check for responses that shouldn't include a body - if status in (204, 304) or 100 <= status < 200 or request_method == "HEAD": - length = 0 - - return length - - def _init_decoder(self): - """ - Set-up the _decoder attribute if necessary. - """ - # Note: content-encoding value should be case-insensitive, per RFC 7230 - # Section 3.2 - content_encoding = self.headers.get("content-encoding", "").lower() - if self._decoder is None: - if content_encoding in self.CONTENT_DECODERS: - self._decoder = _get_decoder(content_encoding) - elif "," in content_encoding: - encodings = [ - e.strip() - for e in content_encoding.split(",") - if e.strip() in self.CONTENT_DECODERS - ] - if len(encodings): - self._decoder = _get_decoder(content_encoding) - - DECODER_ERROR_CLASSES = (IOError, zlib.error) - if brotli is not None: - DECODER_ERROR_CLASSES += (brotli.error,) - - def _decode(self, data, decode_content, flush_decoder): - """ - Decode the data passed in and potentially flush the decoder. - """ - if not decode_content: - return data - - try: - if self._decoder: - data = self._decoder.decompress(data) - except self.DECODER_ERROR_CLASSES as e: - content_encoding = self.headers.get("content-encoding", "").lower() - raise DecodeError( - "Received response with content-encoding: %s, but " - "failed to decode it." % content_encoding, - e, - ) - if flush_decoder: - data += self._flush_decoder() - - return data - - def _flush_decoder(self): - """ - Flushes the decoder. Should only be called if the decoder is actually - being used. - """ - if self._decoder: - buf = self._decoder.decompress(b"") - return buf + self._decoder.flush() - - return b"" - - @contextmanager - def _error_catcher(self): - """ - Catch low-level python exceptions, instead re-raising urllib3 - variants, so that low-level exceptions are not leaked in the - high-level api. - - On exit, release the connection back to the pool. - """ - clean_exit = False - - try: - try: - yield - - except SocketTimeout: - # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but - # there is yet no clean way to get at it from this context. - raise ReadTimeoutError(self._pool, None, "Read timed out.") - - except BaseSSLError as e: - # FIXME: Is there a better way to differentiate between SSLErrors? - if "read operation timed out" not in str(e): - # SSL errors related to framing/MAC get wrapped and reraised here - raise SSLError(e) - - raise ReadTimeoutError(self._pool, None, "Read timed out.") - - except (HTTPException, SocketError) as e: - # This includes IncompleteRead. - raise ProtocolError("Connection broken: %r" % e, e) - - # If no exception is thrown, we should avoid cleaning up - # unnecessarily. - clean_exit = True - finally: - # If we didn't terminate cleanly, we need to throw away our - # connection. - if not clean_exit: - # The response may not be closed but we're not going to use it - # anymore so close it now to ensure that the connection is - # released back to the pool. - if self._original_response: - self._original_response.close() - - # Closing the response may not actually be sufficient to close - # everything, so if we have a hold of the connection close that - # too. - if self._connection: - self._connection.close() - - # If we hold the original response but it's closed now, we should - # return the connection back to the pool. - if self._original_response and self._original_response.isclosed(): - self.release_conn() - - def _fp_read(self, amt): - """ - Read a response with the thought that reading the number of bytes - larger than can fit in a 32-bit int at a time via SSL in some - known cases leads to an overflow error that has to be prevented - if `amt` or `self.length_remaining` indicate that a problem may - happen. - - The known cases: - * 3.8 <= CPython < 3.9.7 because of a bug - https://github.com/urllib3/urllib3/issues/2513#issuecomment-1152559900. - * urllib3 injected with pyOpenSSL-backed SSL-support. - * CPython < 3.10 only when `amt` does not fit 32-bit int. - """ - assert self._fp - c_int_max = 2 ** 31 - 1 - if ( - ( - (amt and amt > c_int_max) - or (self.length_remaining and self.length_remaining > c_int_max) - ) - and not util.IS_SECURETRANSPORT - and (util.IS_PYOPENSSL or sys.version_info < (3, 10)) - ): - buffer = io.BytesIO() - # Besides `max_chunk_amt` being a maximum chunk size, it - # affects memory overhead of reading a response by this - # method in CPython. - # `c_int_max` equal to 2 GiB - 1 byte is the actual maximum - # chunk size that does not lead to an overflow error, but - # 256 MiB is a compromise. - max_chunk_amt = 2 ** 28 - while amt is None or amt != 0: - if amt is not None: - chunk_amt = min(amt, max_chunk_amt) - amt -= chunk_amt - else: - chunk_amt = max_chunk_amt - data = self._fp.read(chunk_amt) - if not data: - break - buffer.write(data) - del data # to reduce peak memory usage by `max_chunk_amt`. - return buffer.getvalue() - else: - # StringIO doesn't like amt=None - return self._fp.read(amt) if amt is not None else self._fp.read() - - def read(self, amt=None, decode_content=None, cache_content=False): - """ - Similar to :meth:`http.client.HTTPResponse.read`, but with two additional - parameters: ``decode_content`` and ``cache_content``. - - :param amt: - How much of the content to read. If specified, caching is skipped - because it doesn't make sense to cache partial content as the full - response. - - :param decode_content: - If True, will attempt to decode the body based on the - 'content-encoding' header. - - :param cache_content: - If True, will save the returned data such that the same result is - returned despite of the state of the underlying file object. This - is useful if you want the ``.data`` property to continue working - after having ``.read()`` the file object. (Overridden if ``amt`` is - set.) - """ - self._init_decoder() - if decode_content is None: - decode_content = self.decode_content - - if self._fp is None: - return - - flush_decoder = False - fp_closed = getattr(self._fp, "closed", False) - - with self._error_catcher(): - data = self._fp_read(amt) if not fp_closed else b"" - if amt is None: - flush_decoder = True - else: - cache_content = False - if ( - amt != 0 and not data - ): # Platform-specific: Buggy versions of Python. - # Close the connection when no data is returned - # - # This is redundant to what httplib/http.client _should_ - # already do. However, versions of python released before - # December 15, 2012 (http://bugs.python.org/issue16298) do - # not properly close the connection in all cases. There is - # no harm in redundantly calling close. - self._fp.close() - flush_decoder = True - if self.enforce_content_length and self.length_remaining not in ( - 0, - None, - ): - # This is an edge case that httplib failed to cover due - # to concerns of backward compatibility. We're - # addressing it here to make sure IncompleteRead is - # raised during streaming, so all calls with incorrect - # Content-Length are caught. - raise IncompleteRead(self._fp_bytes_read, self.length_remaining) - - if data: - self._fp_bytes_read += len(data) - if self.length_remaining is not None: - self.length_remaining -= len(data) - - data = self._decode(data, decode_content, flush_decoder) - - if cache_content: - self._body = data - - return data - - def stream(self, amt=2 ** 16, decode_content=None): - """ - A generator wrapper for the read() method. A call will block until - ``amt`` bytes have been read from the connection or until the - connection is closed. - - :param amt: - How much of the content to read. The generator will return up to - much data per iteration, but may return less. This is particularly - likely when using compressed data. However, the empty string will - never be returned. - - :param decode_content: - If True, will attempt to decode the body based on the - 'content-encoding' header. - """ - if self.chunked and self.supports_chunked_reads(): - for line in self.read_chunked(amt, decode_content=decode_content): - yield line - else: - while not is_fp_closed(self._fp): - data = self.read(amt=amt, decode_content=decode_content) - - if data: - yield data - - @classmethod - def from_httplib(ResponseCls, r, **response_kw): - """ - Given an :class:`http.client.HTTPResponse` instance ``r``, return a - corresponding :class:`urllib3.response.HTTPResponse` object. - - Remaining parameters are passed to the HTTPResponse constructor, along - with ``original_response=r``. - """ - headers = r.msg - - if not isinstance(headers, HTTPHeaderDict): - if six.PY2: - # Python 2.7 - headers = HTTPHeaderDict.from_httplib(headers) - else: - headers = HTTPHeaderDict(headers.items()) - - # HTTPResponse objects in Python 3 don't have a .strict attribute - strict = getattr(r, "strict", 0) - resp = ResponseCls( - body=r, - headers=headers, - status=r.status, - version=r.version, - reason=r.reason, - strict=strict, - original_response=r, - **response_kw - ) - return resp - - # Backwards-compatibility methods for http.client.HTTPResponse - def getheaders(self): - warnings.warn( - "HTTPResponse.getheaders() is deprecated and will be removed " - "in urllib3 v2.1.0. Instead access HTTPResponse.headers directly.", - category=DeprecationWarning, - stacklevel=2, - ) - return self.headers - - def getheader(self, name, default=None): - warnings.warn( - "HTTPResponse.getheader() is deprecated and will be removed " - "in urllib3 v2.1.0. Instead use HTTPResponse.headers.get(name, default).", - category=DeprecationWarning, - stacklevel=2, - ) - return self.headers.get(name, default) - - # Backwards compatibility for http.cookiejar - def info(self): - return self.headers - - # Overrides from io.IOBase - def close(self): - if not self.closed: - self._fp.close() - - if self._connection: - self._connection.close() - - if not self.auto_close: - io.IOBase.close(self) - - @property - def closed(self): - if not self.auto_close: - return io.IOBase.closed.__get__(self) - elif self._fp is None: - return True - elif hasattr(self._fp, "isclosed"): - return self._fp.isclosed() - elif hasattr(self._fp, "closed"): - return self._fp.closed - else: - return True - - def fileno(self): - if self._fp is None: - raise IOError("HTTPResponse has no file to get a fileno from") - elif hasattr(self._fp, "fileno"): - return self._fp.fileno() - else: - raise IOError( - "The file-like object this HTTPResponse is wrapped " - "around has no file descriptor" - ) - - def flush(self): - if ( - self._fp is not None - and hasattr(self._fp, "flush") - and not getattr(self._fp, "closed", False) - ): - return self._fp.flush() - - def readable(self): - # This method is required for `io` module compatibility. - return True - - def readinto(self, b): - # This method is required for `io` module compatibility. - temp = self.read(len(b)) - if len(temp) == 0: - return 0 - else: - b[: len(temp)] = temp - return len(temp) - - def supports_chunked_reads(self): - """ - Checks if the underlying file-like object looks like a - :class:`http.client.HTTPResponse` object. We do this by testing for - the fp attribute. If it is present we assume it returns raw chunks as - processed by read_chunked(). - """ - return hasattr(self._fp, "fp") - - def _update_chunk_length(self): - # First, we'll figure out length of a chunk and then - # we'll try to read it from socket. - if self.chunk_left is not None: - return - line = self._fp.fp.readline() - line = line.split(b";", 1)[0] - try: - self.chunk_left = int(line, 16) - except ValueError: - # Invalid chunked protocol response, abort. - self.close() - raise InvalidChunkLength(self, line) - - def _handle_chunk(self, amt): - returned_chunk = None - if amt is None: - chunk = self._fp._safe_read(self.chunk_left) - returned_chunk = chunk - self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. - self.chunk_left = None - elif amt < self.chunk_left: - value = self._fp._safe_read(amt) - self.chunk_left = self.chunk_left - amt - returned_chunk = value - elif amt == self.chunk_left: - value = self._fp._safe_read(amt) - self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. - self.chunk_left = None - returned_chunk = value - else: # amt > self.chunk_left - returned_chunk = self._fp._safe_read(self.chunk_left) - self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. - self.chunk_left = None - return returned_chunk - - def read_chunked(self, amt=None, decode_content=None): - """ - Similar to :meth:`HTTPResponse.read`, but with an additional - parameter: ``decode_content``. - - :param amt: - How much of the content to read. If specified, caching is skipped - because it doesn't make sense to cache partial content as the full - response. - - :param decode_content: - If True, will attempt to decode the body based on the - 'content-encoding' header. - """ - self._init_decoder() - # FIXME: Rewrite this method and make it a class with a better structured logic. - if not self.chunked: - raise ResponseNotChunked( - "Response is not chunked. " - "Header 'transfer-encoding: chunked' is missing." - ) - if not self.supports_chunked_reads(): - raise BodyNotHttplibCompatible( - "Body should be http.client.HTTPResponse like. " - "It should have have an fp attribute which returns raw chunks." - ) - - with self._error_catcher(): - # Don't bother reading the body of a HEAD request. - if self._original_response and is_response_to_head(self._original_response): - self._original_response.close() - return - - # If a response is already read and closed - # then return immediately. - if self._fp.fp is None: - return - - while True: - self._update_chunk_length() - if self.chunk_left == 0: - break - chunk = self._handle_chunk(amt) - decoded = self._decode( - chunk, decode_content=decode_content, flush_decoder=False - ) - if decoded: - yield decoded - - if decode_content: - # On CPython and PyPy, we should never need to flush the - # decoder. However, on Jython we *might* need to, so - # lets defensively do it anyway. - decoded = self._flush_decoder() - if decoded: # Platform-specific: Jython. - yield decoded - - # Chunk content ends with \r\n: discard it. - while True: - line = self._fp.fp.readline() - if not line: - # Some sites may not end with '\r\n'. - break - if line == b"\r\n": - break - - # We read everything; close the "file". - if self._original_response: - self._original_response.close() - - def geturl(self): - """ - Returns the URL that was the source of this response. - If the request that generated this response redirected, this method - will return the final redirect location. - """ - if self.retries is not None and len(self.retries.history): - return self.retries.history[-1].redirect_location - else: - return self._request_url - - def __iter__(self): - buffer = [] - for chunk in self.stream(decode_content=True): - if b"\n" in chunk: - chunk = chunk.split(b"\n") - yield b"".join(buffer) + chunk[0] + b"\n" - for x in chunk[1:-1]: - yield x + b"\n" - if chunk[-1]: - buffer = [chunk[-1]] - else: - buffer = [] - else: - buffer.append(chunk) - if buffer: - yield b"".join(buffer) diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pkg_resources/_vendor/packaging/_parser.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pkg_resources/_vendor/packaging/_parser.py deleted file mode 100644 index 5a18b758fe0065416a92b9047dc9c392a3de2c4f..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pkg_resources/_vendor/packaging/_parser.py +++ /dev/null @@ -1,353 +0,0 @@ -"""Handwritten parser of dependency specifiers. - -The docstring for each __parse_* function contains ENBF-inspired grammar representing -the implementation. -""" - -import ast -from typing import Any, List, NamedTuple, Optional, Tuple, Union - -from ._tokenizer import DEFAULT_RULES, Tokenizer - - -class Node: - def __init__(self, value: str) -> None: - self.value = value - - def __str__(self) -> str: - return self.value - - def __repr__(self) -> str: - return f"<{self.__class__.__name__}('{self}')>" - - def serialize(self) -> str: - raise NotImplementedError - - -class Variable(Node): - def serialize(self) -> str: - return str(self) - - -class Value(Node): - def serialize(self) -> str: - return f'"{self}"' - - -class Op(Node): - def serialize(self) -> str: - return str(self) - - -MarkerVar = Union[Variable, Value] -MarkerItem = Tuple[MarkerVar, Op, MarkerVar] -# MarkerAtom = Union[MarkerItem, List["MarkerAtom"]] -# MarkerList = List[Union["MarkerList", MarkerAtom, str]] -# mypy does not support recursive type definition -# https://github.com/python/mypy/issues/731 -MarkerAtom = Any -MarkerList = List[Any] - - -class ParsedRequirement(NamedTuple): - name: str - url: str - extras: List[str] - specifier: str - marker: Optional[MarkerList] - - -# -------------------------------------------------------------------------------------- -# Recursive descent parser for dependency specifier -# -------------------------------------------------------------------------------------- -def parse_requirement(source: str) -> ParsedRequirement: - return _parse_requirement(Tokenizer(source, rules=DEFAULT_RULES)) - - -def _parse_requirement(tokenizer: Tokenizer) -> ParsedRequirement: - """ - requirement = WS? IDENTIFIER WS? extras WS? requirement_details - """ - tokenizer.consume("WS") - - name_token = tokenizer.expect( - "IDENTIFIER", expected="package name at the start of dependency specifier" - ) - name = name_token.text - tokenizer.consume("WS") - - extras = _parse_extras(tokenizer) - tokenizer.consume("WS") - - url, specifier, marker = _parse_requirement_details(tokenizer) - tokenizer.expect("END", expected="end of dependency specifier") - - return ParsedRequirement(name, url, extras, specifier, marker) - - -def _parse_requirement_details( - tokenizer: Tokenizer, -) -> Tuple[str, str, Optional[MarkerList]]: - """ - requirement_details = AT URL (WS requirement_marker?)? - | specifier WS? (requirement_marker)? - """ - - specifier = "" - url = "" - marker = None - - if tokenizer.check("AT"): - tokenizer.read() - tokenizer.consume("WS") - - url_start = tokenizer.position - url = tokenizer.expect("URL", expected="URL after @").text - if tokenizer.check("END", peek=True): - return (url, specifier, marker) - - tokenizer.expect("WS", expected="whitespace after URL") - - # The input might end after whitespace. - if tokenizer.check("END", peek=True): - return (url, specifier, marker) - - marker = _parse_requirement_marker( - tokenizer, span_start=url_start, after="URL and whitespace" - ) - else: - specifier_start = tokenizer.position - specifier = _parse_specifier(tokenizer) - tokenizer.consume("WS") - - if tokenizer.check("END", peek=True): - return (url, specifier, marker) - - marker = _parse_requirement_marker( - tokenizer, - span_start=specifier_start, - after=( - "version specifier" - if specifier - else "name and no valid version specifier" - ), - ) - - return (url, specifier, marker) - - -def _parse_requirement_marker( - tokenizer: Tokenizer, *, span_start: int, after: str -) -> MarkerList: - """ - requirement_marker = SEMICOLON marker WS? - """ - - if not tokenizer.check("SEMICOLON"): - tokenizer.raise_syntax_error( - f"Expected end or semicolon (after {after})", - span_start=span_start, - ) - tokenizer.read() - - marker = _parse_marker(tokenizer) - tokenizer.consume("WS") - - return marker - - -def _parse_extras(tokenizer: Tokenizer) -> List[str]: - """ - extras = (LEFT_BRACKET wsp* extras_list? wsp* RIGHT_BRACKET)? - """ - if not tokenizer.check("LEFT_BRACKET", peek=True): - return [] - - with tokenizer.enclosing_tokens( - "LEFT_BRACKET", - "RIGHT_BRACKET", - around="extras", - ): - tokenizer.consume("WS") - extras = _parse_extras_list(tokenizer) - tokenizer.consume("WS") - - return extras - - -def _parse_extras_list(tokenizer: Tokenizer) -> List[str]: - """ - extras_list = identifier (wsp* ',' wsp* identifier)* - """ - extras: List[str] = [] - - if not tokenizer.check("IDENTIFIER"): - return extras - - extras.append(tokenizer.read().text) - - while True: - tokenizer.consume("WS") - if tokenizer.check("IDENTIFIER", peek=True): - tokenizer.raise_syntax_error("Expected comma between extra names") - elif not tokenizer.check("COMMA"): - break - - tokenizer.read() - tokenizer.consume("WS") - - extra_token = tokenizer.expect("IDENTIFIER", expected="extra name after comma") - extras.append(extra_token.text) - - return extras - - -def _parse_specifier(tokenizer: Tokenizer) -> str: - """ - specifier = LEFT_PARENTHESIS WS? version_many WS? RIGHT_PARENTHESIS - | WS? version_many WS? - """ - with tokenizer.enclosing_tokens( - "LEFT_PARENTHESIS", - "RIGHT_PARENTHESIS", - around="version specifier", - ): - tokenizer.consume("WS") - parsed_specifiers = _parse_version_many(tokenizer) - tokenizer.consume("WS") - - return parsed_specifiers - - -def _parse_version_many(tokenizer: Tokenizer) -> str: - """ - version_many = (SPECIFIER (WS? COMMA WS? SPECIFIER)*)? - """ - parsed_specifiers = "" - while tokenizer.check("SPECIFIER"): - span_start = tokenizer.position - parsed_specifiers += tokenizer.read().text - if tokenizer.check("VERSION_PREFIX_TRAIL", peek=True): - tokenizer.raise_syntax_error( - ".* suffix can only be used with `==` or `!=` operators", - span_start=span_start, - span_end=tokenizer.position + 1, - ) - if tokenizer.check("VERSION_LOCAL_LABEL_TRAIL", peek=True): - tokenizer.raise_syntax_error( - "Local version label can only be used with `==` or `!=` operators", - span_start=span_start, - span_end=tokenizer.position, - ) - tokenizer.consume("WS") - if not tokenizer.check("COMMA"): - break - parsed_specifiers += tokenizer.read().text - tokenizer.consume("WS") - - return parsed_specifiers - - -# -------------------------------------------------------------------------------------- -# Recursive descent parser for marker expression -# -------------------------------------------------------------------------------------- -def parse_marker(source: str) -> MarkerList: - return _parse_marker(Tokenizer(source, rules=DEFAULT_RULES)) - - -def _parse_marker(tokenizer: Tokenizer) -> MarkerList: - """ - marker = marker_atom (BOOLOP marker_atom)+ - """ - expression = [_parse_marker_atom(tokenizer)] - while tokenizer.check("BOOLOP"): - token = tokenizer.read() - expr_right = _parse_marker_atom(tokenizer) - expression.extend((token.text, expr_right)) - return expression - - -def _parse_marker_atom(tokenizer: Tokenizer) -> MarkerAtom: - """ - marker_atom = WS? LEFT_PARENTHESIS WS? marker WS? RIGHT_PARENTHESIS WS? - | WS? marker_item WS? - """ - - tokenizer.consume("WS") - if tokenizer.check("LEFT_PARENTHESIS", peek=True): - with tokenizer.enclosing_tokens( - "LEFT_PARENTHESIS", - "RIGHT_PARENTHESIS", - around="marker expression", - ): - tokenizer.consume("WS") - marker: MarkerAtom = _parse_marker(tokenizer) - tokenizer.consume("WS") - else: - marker = _parse_marker_item(tokenizer) - tokenizer.consume("WS") - return marker - - -def _parse_marker_item(tokenizer: Tokenizer) -> MarkerItem: - """ - marker_item = WS? marker_var WS? marker_op WS? marker_var WS? - """ - tokenizer.consume("WS") - marker_var_left = _parse_marker_var(tokenizer) - tokenizer.consume("WS") - marker_op = _parse_marker_op(tokenizer) - tokenizer.consume("WS") - marker_var_right = _parse_marker_var(tokenizer) - tokenizer.consume("WS") - return (marker_var_left, marker_op, marker_var_right) - - -def _parse_marker_var(tokenizer: Tokenizer) -> MarkerVar: - """ - marker_var = VARIABLE | QUOTED_STRING - """ - if tokenizer.check("VARIABLE"): - return process_env_var(tokenizer.read().text.replace(".", "_")) - elif tokenizer.check("QUOTED_STRING"): - return process_python_str(tokenizer.read().text) - else: - tokenizer.raise_syntax_error( - message="Expected a marker variable or quoted string" - ) - - -def process_env_var(env_var: str) -> Variable: - if ( - env_var == "platform_python_implementation" - or env_var == "python_implementation" - ): - return Variable("platform_python_implementation") - else: - return Variable(env_var) - - -def process_python_str(python_str: str) -> Value: - value = ast.literal_eval(python_str) - return Value(str(value)) - - -def _parse_marker_op(tokenizer: Tokenizer) -> Op: - """ - marker_op = IN | NOT IN | OP - """ - if tokenizer.check("IN"): - tokenizer.read() - return Op("in") - elif tokenizer.check("NOT"): - tokenizer.read() - tokenizer.expect("WS", expected="whitespace after 'not'") - tokenizer.expect("IN", expected="'in' after 'not'") - return Op("not in") - elif tokenizer.check("OP"): - return Op(tokenizer.read().text) - else: - return tokenizer.raise_syntax_error( - "Expected marker operator, one of " - "<=, <, !=, ==, >=, >, ~=, ===, in, not in" - ) diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/command/install_egg_info.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/command/install_egg_info.py deleted file mode 100644 index 1c549c98ea430c8d08dc5968f19ea23e6fe6c9b0..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/command/install_egg_info.py +++ /dev/null @@ -1,60 +0,0 @@ -from distutils import log, dir_util -import os - -from setuptools import Command -from setuptools import namespaces -from setuptools.archive_util import unpack_archive -from .._path import ensure_directory - - -class install_egg_info(namespaces.Installer, Command): - """Install an .egg-info directory for the package""" - - description = "Install an .egg-info directory for the package" - - user_options = [ - ('install-dir=', 'd', "directory to install to"), - ] - - def initialize_options(self): - self.install_dir = None - - def finalize_options(self): - self.set_undefined_options('install_lib', - ('install_dir', 'install_dir')) - ei_cmd = self.get_finalized_command("egg_info") - basename = f"{ei_cmd._get_egg_basename()}.egg-info" - self.source = ei_cmd.egg_info - self.target = os.path.join(self.install_dir, basename) - self.outputs = [] - - def run(self): - self.run_command('egg_info') - if os.path.isdir(self.target) and not os.path.islink(self.target): - dir_util.remove_tree(self.target, dry_run=self.dry_run) - elif os.path.exists(self.target): - self.execute(os.unlink, (self.target,), "Removing " + self.target) - if not self.dry_run: - ensure_directory(self.target) - self.execute( - self.copytree, (), "Copying %s to %s" % (self.source, self.target) - ) - self.install_namespaces() - - def get_outputs(self): - return self.outputs - - def copytree(self): - # Copy the .egg-info tree to site-packages - def skimmer(src, dst): - # filter out source-control directories; note that 'src' is always - # a '/'-separated path, regardless of platform. 'dst' is a - # platform-specific path. - for skip in '.svn/', 'CVS/': - if src.startswith(skip) or '/' + skip in src: - return None - self.outputs.append(dst) - log.debug("Copying %s to %s", src, dst) - return dst - - unpack_archive(self.source, self.target, skimmer) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/ffmpy.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/ffmpy.py deleted file mode 100644 index 03291fca55a355b3041b8538217f334e9c4332eb..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/ffmpy.py +++ /dev/null @@ -1,203 +0,0 @@ -import errno -import shlex -import subprocess - -__version__ = "0.3.1" - - -class FFmpeg(object): - """Wrapper for various `FFmpeg `_ related applications (ffmpeg, - ffprobe). - """ - - def __init__( - self, executable="ffmpeg", global_options=None, inputs=None, outputs=None - ): - """Initialize FFmpeg command line wrapper. - - Compiles FFmpeg command line from passed arguments (executable path, options, inputs and - outputs). ``inputs`` and ``outputs`` are dictionares containing inputs/outputs as keys and - their respective options as values. One dictionary value (set of options) must be either a - single space separated string, or a list or strings without spaces (i.e. each part of the - option is a separate item of the list, the result of calling ``split()`` on the options - string). If the value is a list, it cannot be mixed, i.e. cannot contain items with spaces. - An exception are complex FFmpeg command lines that contain quotes: the quoted part must be - one string, even if it contains spaces (see *Examples* for more info). - For more info about FFmpeg command line format see `here - `_. - - :param str executable: path to ffmpeg executable; by default the ``ffmpeg`` command will be - searched for in the ``PATH``, but can be overridden with an absolute path to ``ffmpeg`` - executable - :param iterable global_options: global options passed to ``ffmpeg`` executable (e.g. - ``-y``, ``-v`` etc.); can be specified either as a list/tuple/set of strings, or one - space-separated string; by default no global options are passed - :param dict inputs: a dictionary specifying one or more input arguments as keys with their - corresponding options (either as a list of strings or a single space separated string) as - values - :param dict outputs: a dictionary specifying one or more output arguments as keys with their - corresponding options (either as a list of strings or a single space separated string) as - values - """ - self.executable = executable - self._cmd = [executable] - - global_options = global_options or [] - if _is_sequence(global_options): - normalized_global_options = [] - for opt in global_options: - normalized_global_options += shlex.split(opt) - else: - normalized_global_options = shlex.split(global_options) - - self._cmd += normalized_global_options - self._cmd += _merge_args_opts(inputs, add_input_option=True) - self._cmd += _merge_args_opts(outputs) - - self.cmd = subprocess.list2cmdline(self._cmd) - self.process = None - - def __repr__(self): - return "<{0!r} {1!r}>".format(self.__class__.__name__, self.cmd) - - def run(self, input_data=None, stdout=None, stderr=None, env=None, **kwargs): - """Execute FFmpeg command line. - - ``input_data`` can contain input for FFmpeg in case ``pipe`` protocol is used for input. - ``stdout`` and ``stderr`` specify where to redirect the ``stdout`` and ``stderr`` of the - process. By default no redirection is done, which means all output goes to running shell - (this mode should normally only be used for debugging purposes). If FFmpeg ``pipe`` protocol - is used for output, ``stdout`` must be redirected to a pipe by passing `subprocess.PIPE` as - ``stdout`` argument. You can pass custom environment to ffmpeg process with ``env``. - - Returns a 2-tuple containing ``stdout`` and ``stderr`` of the process. If there was no - redirection or if the output was redirected to e.g. `os.devnull`, the value returned will - be a tuple of two `None` values, otherwise it will contain the actual ``stdout`` and - ``stderr`` data returned by ffmpeg process. - - More info about ``pipe`` protocol `here `_. - - :param str input_data: input data for FFmpeg to deal with (audio, video etc.) as bytes (e.g. - the result of reading a file in binary mode) - :param stdout: redirect FFmpeg ``stdout`` there (default is `None` which means no - redirection) - :param stderr: redirect FFmpeg ``stderr`` there (default is `None` which means no - redirection) - :param env: custom environment for ffmpeg process - :param kwargs: any other keyword arguments to be forwarded to `subprocess.Popen - `_ - :return: a 2-tuple containing ``stdout`` and ``stderr`` of the process - :rtype: tuple - :raise: `FFRuntimeError` in case FFmpeg command exits with a non-zero code; - `FFExecutableNotFoundError` in case the executable path passed was not valid - """ - try: - self.process = subprocess.Popen( - self._cmd, - stdin=subprocess.PIPE, - stdout=stdout, - stderr=stderr, - env=env, - **kwargs - ) - except OSError as e: - if e.errno == errno.ENOENT: - raise FFExecutableNotFoundError( - "Executable '{0}' not found".format(self.executable) - ) - else: - raise - - out = self.process.communicate(input=input_data) - if self.process.returncode != 0: - raise FFRuntimeError(self.cmd, self.process.returncode, out[0], out[1]) - - return out - - -class FFprobe(FFmpeg): - """Wrapper for `ffprobe `_.""" - - def __init__(self, executable="ffprobe", global_options="", inputs=None): - """Create an instance of FFprobe. - - Compiles FFprobe command line from passed arguments (executable path, options, inputs). - FFprobe executable by default is taken from ``PATH`` but can be overridden with an - absolute path. For more info about FFprobe command line format see - `here `_. - - :param str executable: absolute path to ffprobe executable - :param iterable global_options: global options passed to ffmpeg executable; can be specified - either as a list/tuple of strings or a space-separated string - :param dict inputs: a dictionary specifying one or more inputs as keys with their - corresponding options as values - """ - super(FFprobe, self).__init__( - executable=executable, global_options=global_options, inputs=inputs - ) - - -class FFExecutableNotFoundError(Exception): - """Raise when FFmpeg/FFprobe executable was not found.""" - - -class FFRuntimeError(Exception): - """Raise when FFmpeg/FFprobe command line execution returns a non-zero exit code. - - The resulting exception object will contain the attributes relates to command line execution: - ``cmd``, ``exit_code``, ``stdout``, ``stderr``. - """ - - def __init__(self, cmd, exit_code, stdout, stderr): - self.cmd = cmd - self.exit_code = exit_code - self.stdout = stdout - self.stderr = stderr - - message = "`{0}` exited with status {1}\n\nSTDOUT:\n{2}\n\nSTDERR:\n{3}".format( - self.cmd, exit_code, (stdout or b"").decode(), (stderr or b"").decode() - ) - - super(FFRuntimeError, self).__init__(message) - - -def _is_sequence(obj): - """Check if the object is a sequence (list, tuple etc.). - - :param object obj: an object to be checked - :return: True if the object is iterable but is not a string, False otherwise - :rtype: bool - """ - return hasattr(obj, "__iter__") and not isinstance(obj, str) - - -def _merge_args_opts(args_opts_dict, **kwargs): - """Merge options with their corresponding arguments. - - Iterates over the dictionary holding arguments (keys) and options (values). Merges each - options string with its corresponding argument. - - :param dict args_opts_dict: a dictionary of arguments and options - :param dict kwargs: *input_option* - if specified prepends ``-i`` to input argument - :return: merged list of strings with arguments and their corresponding options - :rtype: list - """ - merged = [] - - if not args_opts_dict: - return merged - - for arg, opt in args_opts_dict.items(): - if not _is_sequence(opt): - opt = shlex.split(opt or "") - merged += opt - - if not arg: - continue - - if "add_input_option" in kwargs: - merged.append("-i") - - merged.append(arg) - - return merged diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/idna/compat.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/idna/compat.py deleted file mode 100644 index 786e6bda63699b72d588ba91dd73df017570aee5..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/idna/compat.py +++ /dev/null @@ -1,13 +0,0 @@ -from .core import * -from .codec import * -from typing import Any, Union - -def ToASCII(label: str) -> bytes: - return encode(label) - -def ToUnicode(label: Union[bytes, bytearray]) -> str: - return decode(label) - -def nameprep(s: Any) -> None: - raise NotImplementedError('IDNA 2008 does not utilise nameprep protocol') - diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/markdown_it/parser_block.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/markdown_it/parser_block.py deleted file mode 100644 index 72360f9b31bebca77250168bccae646e9a67dc6e..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/markdown_it/parser_block.py +++ /dev/null @@ -1,111 +0,0 @@ -"""Block-level tokenizer.""" -from __future__ import annotations - -import logging -from typing import TYPE_CHECKING, Callable - -from . import rules_block -from .ruler import Ruler -from .rules_block.state_block import StateBlock -from .token import Token -from .utils import EnvType - -if TYPE_CHECKING: - from markdown_it import MarkdownIt - -LOGGER = logging.getLogger(__name__) - - -RuleFuncBlockType = Callable[[StateBlock, int, int, bool], bool] -"""(state: StateBlock, startLine: int, endLine: int, silent: bool) -> matched: bool) - -`silent` disables token generation, useful for lookahead. -""" - -_rules: list[tuple[str, RuleFuncBlockType, list[str]]] = [ - # First 2 params - rule name & source. Secondary array - list of rules, - # which can be terminated by this one. - ("table", rules_block.table, ["paragraph", "reference"]), - ("code", rules_block.code, []), - ("fence", rules_block.fence, ["paragraph", "reference", "blockquote", "list"]), - ( - "blockquote", - rules_block.blockquote, - ["paragraph", "reference", "blockquote", "list"], - ), - ("hr", rules_block.hr, ["paragraph", "reference", "blockquote", "list"]), - ("list", rules_block.list_block, ["paragraph", "reference", "blockquote"]), - ("reference", rules_block.reference, []), - ("html_block", rules_block.html_block, ["paragraph", "reference", "blockquote"]), - ("heading", rules_block.heading, ["paragraph", "reference", "blockquote"]), - ("lheading", rules_block.lheading, []), - ("paragraph", rules_block.paragraph, []), -] - - -class ParserBlock: - """ - ParserBlock#ruler -> Ruler - - [[Ruler]] instance. Keep configuration of block rules. - """ - - def __init__(self) -> None: - self.ruler = Ruler[RuleFuncBlockType]() - for name, rule, alt in _rules: - self.ruler.push(name, rule, {"alt": alt}) - - def tokenize(self, state: StateBlock, startLine: int, endLine: int) -> None: - """Generate tokens for input range.""" - rules = self.ruler.getRules("") - line = startLine - maxNesting = state.md.options.maxNesting - hasEmptyLines = False - - while line < endLine: - state.line = line = state.skipEmptyLines(line) - if line >= endLine: - break - if state.sCount[line] < state.blkIndent: - # Termination condition for nested calls. - # Nested calls currently used for blockquotes & lists - break - if state.level >= maxNesting: - # If nesting level exceeded - skip tail to the end. - # That's not ordinary situation and we should not care about content. - state.line = endLine - break - - # Try all possible rules. - # On success, rule should: - # - update `state.line` - # - update `state.tokens` - # - return True - for rule in rules: - if rule(state, line, endLine, False): - break - - # set state.tight if we had an empty line before current tag - # i.e. latest empty line should not count - state.tight = not hasEmptyLines - - line = state.line - - # paragraph might "eat" one newline after it in nested lists - if (line - 1) < endLine and state.isEmpty(line - 1): - hasEmptyLines = True - - if line < endLine and state.isEmpty(line): - hasEmptyLines = True - line += 1 - state.line = line - - def parse( - self, src: str, md: MarkdownIt, env: EnvType, outTokens: list[Token] - ) -> list[Token] | None: - """Process input string and push block tokens into `outTokens`.""" - if not src: - return None - state = StateBlock(src, md, env, outTokens) - self.tokenize(state, state.line, state.lineMax) - return state.tokens diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/core/umath_tests.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/core/umath_tests.py deleted file mode 100644 index 90ab17e6744a751c4d60e9b86e150cdbc3f6ff2e..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/core/umath_tests.py +++ /dev/null @@ -1,13 +0,0 @@ -""" -Shim for _umath_tests to allow a deprecation period for the new name. - -""" -import warnings - -# 2018-04-04, numpy 1.15.0 -warnings.warn(("numpy.core.umath_tests is an internal NumPy " - "module and should not be imported. It will " - "be removed in a future NumPy release."), - category=DeprecationWarning, stacklevel=2) - -from ._umath_tests import * diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/ma/tests/test_mrecords.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/ma/tests/test_mrecords.py deleted file mode 100644 index 77123c3cda941636354a7b282777f3f0e55d3ab0..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/ma/tests/test_mrecords.py +++ /dev/null @@ -1,493 +0,0 @@ -# pylint: disable-msg=W0611, W0612, W0511,R0201 -"""Tests suite for mrecords. - -:author: Pierre Gerard-Marchant -:contact: pierregm_at_uga_dot_edu - -""" -import numpy as np -import numpy.ma as ma -from numpy import recarray -from numpy.ma import masked, nomask -from numpy.testing import temppath -from numpy.core.records import ( - fromrecords as recfromrecords, fromarrays as recfromarrays - ) -from numpy.ma.mrecords import ( - MaskedRecords, mrecarray, fromarrays, fromtextfile, fromrecords, - addfield - ) -from numpy.ma.testutils import ( - assert_, assert_equal, - assert_equal_records, - ) -from numpy.compat import pickle - - -class TestMRecords: - - ilist = [1, 2, 3, 4, 5] - flist = [1.1, 2.2, 3.3, 4.4, 5.5] - slist = [b'one', b'two', b'three', b'four', b'five'] - ddtype = [('a', int), ('b', float), ('c', '|S8')] - mask = [0, 1, 0, 0, 1] - base = ma.array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype) - - def test_byview(self): - # Test creation by view - base = self.base - mbase = base.view(mrecarray) - assert_equal(mbase.recordmask, base.recordmask) - assert_equal_records(mbase._mask, base._mask) - assert_(isinstance(mbase._data, recarray)) - assert_equal_records(mbase._data, base._data.view(recarray)) - for field in ('a', 'b', 'c'): - assert_equal(base[field], mbase[field]) - assert_equal_records(mbase.view(mrecarray), mbase) - - def test_get(self): - # Tests fields retrieval - base = self.base.copy() - mbase = base.view(mrecarray) - # As fields.......... - for field in ('a', 'b', 'c'): - assert_equal(getattr(mbase, field), mbase[field]) - assert_equal(base[field], mbase[field]) - # as elements ....... - mbase_first = mbase[0] - assert_(isinstance(mbase_first, mrecarray)) - assert_equal(mbase_first.dtype, mbase.dtype) - assert_equal(mbase_first.tolist(), (1, 1.1, b'one')) - # Used to be mask, now it's recordmask - assert_equal(mbase_first.recordmask, nomask) - assert_equal(mbase_first._mask.item(), (False, False, False)) - assert_equal(mbase_first['a'], mbase['a'][0]) - mbase_last = mbase[-1] - assert_(isinstance(mbase_last, mrecarray)) - assert_equal(mbase_last.dtype, mbase.dtype) - assert_equal(mbase_last.tolist(), (None, None, None)) - # Used to be mask, now it's recordmask - assert_equal(mbase_last.recordmask, True) - assert_equal(mbase_last._mask.item(), (True, True, True)) - assert_equal(mbase_last['a'], mbase['a'][-1]) - assert_((mbase_last['a'] is masked)) - # as slice .......... - mbase_sl = mbase[:2] - assert_(isinstance(mbase_sl, mrecarray)) - assert_equal(mbase_sl.dtype, mbase.dtype) - # Used to be mask, now it's recordmask - assert_equal(mbase_sl.recordmask, [0, 1]) - assert_equal_records(mbase_sl.mask, - np.array([(False, False, False), - (True, True, True)], - dtype=mbase._mask.dtype)) - assert_equal_records(mbase_sl, base[:2].view(mrecarray)) - for field in ('a', 'b', 'c'): - assert_equal(getattr(mbase_sl, field), base[:2][field]) - - def test_set_fields(self): - # Tests setting fields. - base = self.base.copy() - mbase = base.view(mrecarray) - mbase = mbase.copy() - mbase.fill_value = (999999, 1e20, 'N/A') - # Change the data, the mask should be conserved - mbase.a._data[:] = 5 - assert_equal(mbase['a']._data, [5, 5, 5, 5, 5]) - assert_equal(mbase['a']._mask, [0, 1, 0, 0, 1]) - # Change the elements, and the mask will follow - mbase.a = 1 - assert_equal(mbase['a']._data, [1]*5) - assert_equal(ma.getmaskarray(mbase['a']), [0]*5) - # Use to be _mask, now it's recordmask - assert_equal(mbase.recordmask, [False]*5) - assert_equal(mbase._mask.tolist(), - np.array([(0, 0, 0), - (0, 1, 1), - (0, 0, 0), - (0, 0, 0), - (0, 1, 1)], - dtype=bool)) - # Set a field to mask ........................ - mbase.c = masked - # Use to be mask, and now it's still mask ! - assert_equal(mbase.c.mask, [1]*5) - assert_equal(mbase.c.recordmask, [1]*5) - assert_equal(ma.getmaskarray(mbase['c']), [1]*5) - assert_equal(ma.getdata(mbase['c']), [b'N/A']*5) - assert_equal(mbase._mask.tolist(), - np.array([(0, 0, 1), - (0, 1, 1), - (0, 0, 1), - (0, 0, 1), - (0, 1, 1)], - dtype=bool)) - # Set fields by slices ....................... - mbase = base.view(mrecarray).copy() - mbase.a[3:] = 5 - assert_equal(mbase.a, [1, 2, 3, 5, 5]) - assert_equal(mbase.a._mask, [0, 1, 0, 0, 0]) - mbase.b[3:] = masked - assert_equal(mbase.b, base['b']) - assert_equal(mbase.b._mask, [0, 1, 0, 1, 1]) - # Set fields globally.......................... - ndtype = [('alpha', '|S1'), ('num', int)] - data = ma.array([('a', 1), ('b', 2), ('c', 3)], dtype=ndtype) - rdata = data.view(MaskedRecords) - val = ma.array([10, 20, 30], mask=[1, 0, 0]) - - rdata['num'] = val - assert_equal(rdata.num, val) - assert_equal(rdata.num.mask, [1, 0, 0]) - - def test_set_fields_mask(self): - # Tests setting the mask of a field. - base = self.base.copy() - # This one has already a mask.... - mbase = base.view(mrecarray) - mbase['a'][-2] = masked - assert_equal(mbase.a, [1, 2, 3, 4, 5]) - assert_equal(mbase.a._mask, [0, 1, 0, 1, 1]) - # This one has not yet - mbase = fromarrays([np.arange(5), np.random.rand(5)], - dtype=[('a', int), ('b', float)]) - mbase['a'][-2] = masked - assert_equal(mbase.a, [0, 1, 2, 3, 4]) - assert_equal(mbase.a._mask, [0, 0, 0, 1, 0]) - - def test_set_mask(self): - base = self.base.copy() - mbase = base.view(mrecarray) - # Set the mask to True ....................... - mbase.mask = masked - assert_equal(ma.getmaskarray(mbase['b']), [1]*5) - assert_equal(mbase['a']._mask, mbase['b']._mask) - assert_equal(mbase['a']._mask, mbase['c']._mask) - assert_equal(mbase._mask.tolist(), - np.array([(1, 1, 1)]*5, dtype=bool)) - # Delete the mask ............................ - mbase.mask = nomask - assert_equal(ma.getmaskarray(mbase['c']), [0]*5) - assert_equal(mbase._mask.tolist(), - np.array([(0, 0, 0)]*5, dtype=bool)) - - def test_set_mask_fromarray(self): - base = self.base.copy() - mbase = base.view(mrecarray) - # Sets the mask w/ an array - mbase.mask = [1, 0, 0, 0, 1] - assert_equal(mbase.a.mask, [1, 0, 0, 0, 1]) - assert_equal(mbase.b.mask, [1, 0, 0, 0, 1]) - assert_equal(mbase.c.mask, [1, 0, 0, 0, 1]) - # Yay, once more ! - mbase.mask = [0, 0, 0, 0, 1] - assert_equal(mbase.a.mask, [0, 0, 0, 0, 1]) - assert_equal(mbase.b.mask, [0, 0, 0, 0, 1]) - assert_equal(mbase.c.mask, [0, 0, 0, 0, 1]) - - def test_set_mask_fromfields(self): - mbase = self.base.copy().view(mrecarray) - - nmask = np.array( - [(0, 1, 0), (0, 1, 0), (1, 0, 1), (1, 0, 1), (0, 0, 0)], - dtype=[('a', bool), ('b', bool), ('c', bool)]) - mbase.mask = nmask - assert_equal(mbase.a.mask, [0, 0, 1, 1, 0]) - assert_equal(mbase.b.mask, [1, 1, 0, 0, 0]) - assert_equal(mbase.c.mask, [0, 0, 1, 1, 0]) - # Reinitialize and redo - mbase.mask = False - mbase.fieldmask = nmask - assert_equal(mbase.a.mask, [0, 0, 1, 1, 0]) - assert_equal(mbase.b.mask, [1, 1, 0, 0, 0]) - assert_equal(mbase.c.mask, [0, 0, 1, 1, 0]) - - def test_set_elements(self): - base = self.base.copy() - # Set an element to mask ..................... - mbase = base.view(mrecarray).copy() - mbase[-2] = masked - assert_equal( - mbase._mask.tolist(), - np.array([(0, 0, 0), (1, 1, 1), (0, 0, 0), (1, 1, 1), (1, 1, 1)], - dtype=bool)) - # Used to be mask, now it's recordmask! - assert_equal(mbase.recordmask, [0, 1, 0, 1, 1]) - # Set slices ................................. - mbase = base.view(mrecarray).copy() - mbase[:2] = (5, 5, 5) - assert_equal(mbase.a._data, [5, 5, 3, 4, 5]) - assert_equal(mbase.a._mask, [0, 0, 0, 0, 1]) - assert_equal(mbase.b._data, [5., 5., 3.3, 4.4, 5.5]) - assert_equal(mbase.b._mask, [0, 0, 0, 0, 1]) - assert_equal(mbase.c._data, - [b'5', b'5', b'three', b'four', b'five']) - assert_equal(mbase.b._mask, [0, 0, 0, 0, 1]) - - mbase = base.view(mrecarray).copy() - mbase[:2] = masked - assert_equal(mbase.a._data, [1, 2, 3, 4, 5]) - assert_equal(mbase.a._mask, [1, 1, 0, 0, 1]) - assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 4.4, 5.5]) - assert_equal(mbase.b._mask, [1, 1, 0, 0, 1]) - assert_equal(mbase.c._data, - [b'one', b'two', b'three', b'four', b'five']) - assert_equal(mbase.b._mask, [1, 1, 0, 0, 1]) - - def test_setslices_hardmask(self): - # Tests setting slices w/ hardmask. - base = self.base.copy() - mbase = base.view(mrecarray) - mbase.harden_mask() - try: - mbase[-2:] = (5, 5, 5) - assert_equal(mbase.a._data, [1, 2, 3, 5, 5]) - assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 5, 5.5]) - assert_equal(mbase.c._data, - [b'one', b'two', b'three', b'5', b'five']) - assert_equal(mbase.a._mask, [0, 1, 0, 0, 1]) - assert_equal(mbase.b._mask, mbase.a._mask) - assert_equal(mbase.b._mask, mbase.c._mask) - except NotImplementedError: - # OK, not implemented yet... - pass - except AssertionError: - raise - else: - raise Exception("Flexible hard masks should be supported !") - # Not using a tuple should crash - try: - mbase[-2:] = 3 - except (NotImplementedError, TypeError): - pass - else: - raise TypeError("Should have expected a readable buffer object!") - - def test_hardmask(self): - # Test hardmask - base = self.base.copy() - mbase = base.view(mrecarray) - mbase.harden_mask() - assert_(mbase._hardmask) - mbase.mask = nomask - assert_equal_records(mbase._mask, base._mask) - mbase.soften_mask() - assert_(not mbase._hardmask) - mbase.mask = nomask - # So, the mask of a field is no longer set to nomask... - assert_equal_records(mbase._mask, - ma.make_mask_none(base.shape, base.dtype)) - assert_(ma.make_mask(mbase['b']._mask) is nomask) - assert_equal(mbase['a']._mask, mbase['b']._mask) - - def test_pickling(self): - # Test pickling - base = self.base.copy() - mrec = base.view(mrecarray) - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - _ = pickle.dumps(mrec, protocol=proto) - mrec_ = pickle.loads(_) - assert_equal(mrec_.dtype, mrec.dtype) - assert_equal_records(mrec_._data, mrec._data) - assert_equal(mrec_._mask, mrec._mask) - assert_equal_records(mrec_._mask, mrec._mask) - - def test_filled(self): - # Test filling the array - _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int) - _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float) - _c = ma.array(['one', 'two', 'three'], mask=[0, 0, 1], dtype='|S8') - ddtype = [('a', int), ('b', float), ('c', '|S8')] - mrec = fromarrays([_a, _b, _c], dtype=ddtype, - fill_value=(99999, 99999., 'N/A')) - mrecfilled = mrec.filled() - assert_equal(mrecfilled['a'], np.array((1, 2, 99999), dtype=int)) - assert_equal(mrecfilled['b'], np.array((1.1, 2.2, 99999.), - dtype=float)) - assert_equal(mrecfilled['c'], np.array(('one', 'two', 'N/A'), - dtype='|S8')) - - def test_tolist(self): - # Test tolist. - _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int) - _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float) - _c = ma.array(['one', 'two', 'three'], mask=[1, 0, 0], dtype='|S8') - ddtype = [('a', int), ('b', float), ('c', '|S8')] - mrec = fromarrays([_a, _b, _c], dtype=ddtype, - fill_value=(99999, 99999., 'N/A')) - - assert_equal(mrec.tolist(), - [(1, 1.1, None), (2, 2.2, b'two'), - (None, None, b'three')]) - - def test_withnames(self): - # Test the creation w/ format and names - x = mrecarray(1, formats=float, names='base') - x[0]['base'] = 10 - assert_equal(x['base'][0], 10) - - def test_exotic_formats(self): - # Test that 'exotic' formats are processed properly - easy = mrecarray(1, dtype=[('i', int), ('s', '|S8'), ('f', float)]) - easy[0] = masked - assert_equal(easy.filled(1).item(), (1, b'1', 1.)) - - solo = mrecarray(1, dtype=[('f0', ' timedelta64 - exp = pd.TimedeltaIndex(["1 day", "10 day", "2 day", "3 day", "4 day"]) - self._assert_insert_conversion( - obj, pd.Timedelta("10 day"), exp, "timedelta64[ns]" - ) - - for item in [pd.Timestamp("2012-01-01"), 1]: - result = obj.insert(1, item) - expected = obj.astype(object).insert(1, item) - assert expected.dtype == object - tm.assert_index_equal(result, expected) - - @pytest.mark.parametrize( - "insert, coerced_val, coerced_dtype", - [ - (pd.Period("2012-01", freq="M"), "2012-01", "period[M]"), - (pd.Timestamp("2012-01-01"), pd.Timestamp("2012-01-01"), object), - (1, 1, object), - ("x", "x", object), - ], - ) - def test_insert_index_period(self, insert, coerced_val, coerced_dtype): - obj = pd.PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq="M") - assert obj.dtype == "period[M]" - - data = [ - pd.Period("2011-01", freq="M"), - coerced_val, - pd.Period("2011-02", freq="M"), - pd.Period("2011-03", freq="M"), - pd.Period("2011-04", freq="M"), - ] - if isinstance(insert, pd.Period): - exp = pd.PeriodIndex(data, freq="M") - self._assert_insert_conversion(obj, insert, exp, coerced_dtype) - - # string that can be parsed to appropriate PeriodDtype - self._assert_insert_conversion(obj, str(insert), exp, coerced_dtype) - - else: - result = obj.insert(0, insert) - expected = obj.astype(object).insert(0, insert) - tm.assert_index_equal(result, expected) - - # TODO: ATM inserting '2012-01-01 00:00:00' when we have obj.freq=="M" - # casts that string to Period[M], not clear that is desirable - if not isinstance(insert, pd.Timestamp): - # non-castable string - result = obj.insert(0, str(insert)) - expected = obj.astype(object).insert(0, str(insert)) - tm.assert_index_equal(result, expected) - - @pytest.mark.xfail(reason="Test not implemented") - def test_insert_index_complex128(self): - raise NotImplementedError - - @pytest.mark.xfail(reason="Test not implemented") - def test_insert_index_bool(self): - raise NotImplementedError - - -class TestWhereCoercion(CoercionBase): - method = "where" - _cond = np.array([True, False, True, False]) - - def _assert_where_conversion( - self, original, cond, values, expected, expected_dtype - ): - """test coercion triggered by where""" - target = original.copy() - res = target.where(cond, values) - tm.assert_equal(res, expected) - assert res.dtype == expected_dtype - - def _construct_exp(self, obj, klass, fill_val, exp_dtype): - if fill_val is True: - values = klass([True, False, True, True]) - elif isinstance(fill_val, (datetime, np.datetime64)): - values = pd.date_range(fill_val, periods=4) - else: - values = klass(x * fill_val for x in [5, 6, 7, 8]) - - exp = klass([obj[0], values[1], obj[2], values[3]], dtype=exp_dtype) - return values, exp - - def _run_test(self, obj, fill_val, klass, exp_dtype): - cond = klass(self._cond) - - exp = klass([obj[0], fill_val, obj[2], fill_val], dtype=exp_dtype) - self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype) - - values, exp = self._construct_exp(obj, klass, fill_val, exp_dtype) - self._assert_where_conversion(obj, cond, values, exp, exp_dtype) - - @pytest.mark.parametrize( - "fill_val,exp_dtype", - [(1, object), (1.1, object), (1 + 1j, object), (True, object)], - ) - def test_where_object(self, index_or_series, fill_val, exp_dtype): - klass = index_or_series - obj = klass(list("abcd")) - assert obj.dtype == object - self._run_test(obj, fill_val, klass, exp_dtype) - - @pytest.mark.parametrize( - "fill_val,exp_dtype", - [(1, np.int64), (1.1, np.float64), (1 + 1j, np.complex128), (True, object)], - ) - def test_where_int64(self, index_or_series, fill_val, exp_dtype, request): - klass = index_or_series - - obj = klass([1, 2, 3, 4]) - assert obj.dtype == np.int64 - self._run_test(obj, fill_val, klass, exp_dtype) - - @pytest.mark.parametrize( - "fill_val, exp_dtype", - [(1, np.float64), (1.1, np.float64), (1 + 1j, np.complex128), (True, object)], - ) - def test_where_float64(self, index_or_series, fill_val, exp_dtype, request): - klass = index_or_series - - obj = klass([1.1, 2.2, 3.3, 4.4]) - assert obj.dtype == np.float64 - self._run_test(obj, fill_val, klass, exp_dtype) - - @pytest.mark.parametrize( - "fill_val,exp_dtype", - [ - (1, np.complex128), - (1.1, np.complex128), - (1 + 1j, np.complex128), - (True, object), - ], - ) - def test_where_complex128(self, index_or_series, fill_val, exp_dtype): - klass = index_or_series - obj = klass([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j], dtype=np.complex128) - assert obj.dtype == np.complex128 - self._run_test(obj, fill_val, klass, exp_dtype) - - @pytest.mark.parametrize( - "fill_val,exp_dtype", - [(1, object), (1.1, object), (1 + 1j, object), (True, np.bool_)], - ) - def test_where_series_bool(self, fill_val, exp_dtype): - klass = pd.Series # TODO: use index_or_series once we have Index[bool] - - obj = klass([True, False, True, False]) - assert obj.dtype == np.bool_ - self._run_test(obj, fill_val, klass, exp_dtype) - - @pytest.mark.parametrize( - "fill_val,exp_dtype", - [ - (pd.Timestamp("2012-01-01"), "datetime64[ns]"), - (pd.Timestamp("2012-01-01", tz="US/Eastern"), object), - ], - ids=["datetime64", "datetime64tz"], - ) - def test_where_datetime64(self, index_or_series, fill_val, exp_dtype): - klass = index_or_series - - obj = klass(pd.date_range("2011-01-01", periods=4, freq="D")._with_freq(None)) - assert obj.dtype == "datetime64[ns]" - - fv = fill_val - # do the check with each of the available datetime scalars - if exp_dtype == "datetime64[ns]": - for scalar in [fv, fv.to_pydatetime(), fv.to_datetime64()]: - self._run_test(obj, scalar, klass, exp_dtype) - else: - for scalar in [fv, fv.to_pydatetime()]: - self._run_test(obj, fill_val, klass, exp_dtype) - - @pytest.mark.xfail(reason="Test not implemented") - def test_where_index_complex128(self): - raise NotImplementedError - - @pytest.mark.xfail(reason="Test not implemented") - def test_where_index_bool(self): - raise NotImplementedError - - @pytest.mark.xfail(reason="Test not implemented") - def test_where_series_timedelta64(self): - raise NotImplementedError - - @pytest.mark.xfail(reason="Test not implemented") - def test_where_series_period(self): - raise NotImplementedError - - @pytest.mark.parametrize( - "value", [pd.Timedelta(days=9), timedelta(days=9), np.timedelta64(9, "D")] - ) - def test_where_index_timedelta64(self, value): - tdi = pd.timedelta_range("1 Day", periods=4) - cond = np.array([True, False, False, True]) - - expected = pd.TimedeltaIndex(["1 Day", value, value, "4 Days"]) - result = tdi.where(cond, value) - tm.assert_index_equal(result, expected) - - # wrong-dtyped NaT - dtnat = np.datetime64("NaT", "ns") - expected = pd.Index([tdi[0], dtnat, dtnat, tdi[3]], dtype=object) - assert expected[1] is dtnat - - result = tdi.where(cond, dtnat) - tm.assert_index_equal(result, expected) - - def test_where_index_period(self): - dti = pd.date_range("2016-01-01", periods=3, freq="QS") - pi = dti.to_period("Q") - - cond = np.array([False, True, False]) - - # Passing a valid scalar - value = pi[-1] + pi.freq * 10 - expected = pd.PeriodIndex([value, pi[1], value]) - result = pi.where(cond, value) - tm.assert_index_equal(result, expected) - - # Case passing ndarray[object] of Periods - other = np.asarray(pi + pi.freq * 10, dtype=object) - result = pi.where(cond, other) - expected = pd.PeriodIndex([other[0], pi[1], other[2]]) - tm.assert_index_equal(result, expected) - - # Passing a mismatched scalar -> casts to object - td = pd.Timedelta(days=4) - expected = pd.Index([td, pi[1], td], dtype=object) - result = pi.where(cond, td) - tm.assert_index_equal(result, expected) - - per = pd.Period("2020-04-21", "D") - expected = pd.Index([per, pi[1], per], dtype=object) - result = pi.where(cond, per) - tm.assert_index_equal(result, expected) - - -class TestFillnaSeriesCoercion(CoercionBase): - # not indexing, but place here for consistency - - method = "fillna" - - @pytest.mark.xfail(reason="Test not implemented") - def test_has_comprehensive_tests(self): - raise NotImplementedError - - def _assert_fillna_conversion(self, original, value, expected, expected_dtype): - """test coercion triggered by fillna""" - target = original.copy() - res = target.fillna(value) - tm.assert_equal(res, expected) - assert res.dtype == expected_dtype - - @pytest.mark.parametrize( - "fill_val, fill_dtype", - [(1, object), (1.1, object), (1 + 1j, object), (True, object)], - ) - def test_fillna_object(self, index_or_series, fill_val, fill_dtype): - klass = index_or_series - obj = klass(["a", np.nan, "c", "d"]) - assert obj.dtype == object - - exp = klass(["a", fill_val, "c", "d"]) - self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype) - - @pytest.mark.parametrize( - "fill_val,fill_dtype", - [(1, np.float64), (1.1, np.float64), (1 + 1j, np.complex128), (True, object)], - ) - def test_fillna_float64(self, index_or_series, fill_val, fill_dtype): - klass = index_or_series - obj = klass([1.1, np.nan, 3.3, 4.4]) - assert obj.dtype == np.float64 - - exp = klass([1.1, fill_val, 3.3, 4.4]) - self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype) - - @pytest.mark.parametrize( - "fill_val,fill_dtype", - [ - (1, np.complex128), - (1.1, np.complex128), - (1 + 1j, np.complex128), - (True, object), - ], - ) - def test_fillna_complex128(self, index_or_series, fill_val, fill_dtype): - klass = index_or_series - obj = klass([1 + 1j, np.nan, 3 + 3j, 4 + 4j], dtype=np.complex128) - assert obj.dtype == np.complex128 - - exp = klass([1 + 1j, fill_val, 3 + 3j, 4 + 4j]) - self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype) - - @pytest.mark.parametrize( - "fill_val,fill_dtype", - [ - (pd.Timestamp("2012-01-01"), "datetime64[ns]"), - (pd.Timestamp("2012-01-01", tz="US/Eastern"), object), - (1, object), - ("x", object), - ], - ids=["datetime64", "datetime64tz", "object", "object"], - ) - def test_fillna_datetime(self, index_or_series, fill_val, fill_dtype): - klass = index_or_series - obj = klass( - [ - pd.Timestamp("2011-01-01"), - pd.NaT, - pd.Timestamp("2011-01-03"), - pd.Timestamp("2011-01-04"), - ] - ) - assert obj.dtype == "datetime64[ns]" - - exp = klass( - [ - pd.Timestamp("2011-01-01"), - fill_val, - pd.Timestamp("2011-01-03"), - pd.Timestamp("2011-01-04"), - ] - ) - self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype) - - @pytest.mark.parametrize( - "fill_val,fill_dtype", - [ - (pd.Timestamp("2012-01-01", tz="US/Eastern"), "datetime64[ns, US/Eastern]"), - (pd.Timestamp("2012-01-01"), object), - # pre-2.0 with a mismatched tz we would get object result - (pd.Timestamp("2012-01-01", tz="Asia/Tokyo"), "datetime64[ns, US/Eastern]"), - (1, object), - ("x", object), - ], - ) - def test_fillna_datetime64tz(self, index_or_series, fill_val, fill_dtype): - klass = index_or_series - tz = "US/Eastern" - - obj = klass( - [ - pd.Timestamp("2011-01-01", tz=tz), - pd.NaT, - pd.Timestamp("2011-01-03", tz=tz), - pd.Timestamp("2011-01-04", tz=tz), - ] - ) - assert obj.dtype == "datetime64[ns, US/Eastern]" - - if getattr(fill_val, "tz", None) is None: - fv = fill_val - else: - fv = fill_val.tz_convert(tz) - exp = klass( - [ - pd.Timestamp("2011-01-01", tz=tz), - fv, - pd.Timestamp("2011-01-03", tz=tz), - pd.Timestamp("2011-01-04", tz=tz), - ] - ) - self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype) - - @pytest.mark.parametrize( - "fill_val", - [ - 1, - 1.1, - 1 + 1j, - True, - pd.Interval(1, 2, closed="left"), - pd.Timestamp("2012-01-01", tz="US/Eastern"), - pd.Timestamp("2012-01-01"), - pd.Timedelta(days=1), - pd.Period("2016-01-01", "D"), - ], - ) - def test_fillna_interval(self, index_or_series, fill_val): - ii = pd.interval_range(1.0, 5.0, closed="right").insert(1, np.nan) - assert isinstance(ii.dtype, pd.IntervalDtype) - obj = index_or_series(ii) - - exp = index_or_series([ii[0], fill_val, ii[2], ii[3], ii[4]], dtype=object) - - fill_dtype = object - self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype) - - @pytest.mark.xfail(reason="Test not implemented") - def test_fillna_series_int64(self): - raise NotImplementedError - - @pytest.mark.xfail(reason="Test not implemented") - def test_fillna_index_int64(self): - raise NotImplementedError - - @pytest.mark.xfail(reason="Test not implemented") - def test_fillna_series_bool(self): - raise NotImplementedError - - @pytest.mark.xfail(reason="Test not implemented") - def test_fillna_index_bool(self): - raise NotImplementedError - - @pytest.mark.xfail(reason="Test not implemented") - def test_fillna_series_timedelta64(self): - raise NotImplementedError - - @pytest.mark.parametrize( - "fill_val", - [ - 1, - 1.1, - 1 + 1j, - True, - pd.Interval(1, 2, closed="left"), - pd.Timestamp("2012-01-01", tz="US/Eastern"), - pd.Timestamp("2012-01-01"), - pd.Timedelta(days=1), - pd.Period("2016-01-01", "W"), - ], - ) - def test_fillna_series_period(self, index_or_series, fill_val): - pi = pd.period_range("2016-01-01", periods=4, freq="D").insert(1, pd.NaT) - assert isinstance(pi.dtype, pd.PeriodDtype) - obj = index_or_series(pi) - - exp = index_or_series([pi[0], fill_val, pi[2], pi[3], pi[4]], dtype=object) - - fill_dtype = object - self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype) - - @pytest.mark.xfail(reason="Test not implemented") - def test_fillna_index_timedelta64(self): - raise NotImplementedError - - @pytest.mark.xfail(reason="Test not implemented") - def test_fillna_index_period(self): - raise NotImplementedError - - -class TestReplaceSeriesCoercion(CoercionBase): - klasses = ["series"] - method = "replace" - - rep: dict[str, list] = {} - rep["object"] = ["a", "b"] - rep["int64"] = [4, 5] - rep["float64"] = [1.1, 2.2] - rep["complex128"] = [1 + 1j, 2 + 2j] - rep["bool"] = [True, False] - rep["datetime64[ns]"] = [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-03")] - - for tz in ["UTC", "US/Eastern"]: - # to test tz => different tz replacement - key = f"datetime64[ns, {tz}]" - rep[key] = [ - pd.Timestamp("2011-01-01", tz=tz), - pd.Timestamp("2011-01-03", tz=tz), - ] - - rep["timedelta64[ns]"] = [pd.Timedelta("1 day"), pd.Timedelta("2 day")] - - @pytest.fixture(params=["dict", "series"]) - def how(self, request): - return request.param - - @pytest.fixture( - params=[ - "object", - "int64", - "float64", - "complex128", - "bool", - "datetime64[ns]", - "datetime64[ns, UTC]", - "datetime64[ns, US/Eastern]", - "timedelta64[ns]", - ] - ) - def from_key(self, request): - return request.param - - @pytest.fixture( - params=[ - "object", - "int64", - "float64", - "complex128", - "bool", - "datetime64[ns]", - "datetime64[ns, UTC]", - "datetime64[ns, US/Eastern]", - "timedelta64[ns]", - ], - ids=[ - "object", - "int64", - "float64", - "complex128", - "bool", - "datetime64", - "datetime64tz", - "datetime64tz", - "timedelta64", - ], - ) - def to_key(self, request): - return request.param - - @pytest.fixture - def replacer(self, how, from_key, to_key): - """ - Object we will pass to `Series.replace` - """ - if how == "dict": - replacer = dict(zip(self.rep[from_key], self.rep[to_key])) - elif how == "series": - replacer = pd.Series(self.rep[to_key], index=self.rep[from_key]) - else: - raise ValueError - return replacer - - def test_replace_series(self, how, to_key, from_key, replacer): - index = pd.Index([3, 4], name="xxx") - obj = pd.Series(self.rep[from_key], index=index, name="yyy") - assert obj.dtype == from_key - - if from_key.startswith("datetime") and to_key.startswith("datetime"): - # tested below - return - elif from_key in ["datetime64[ns, US/Eastern]", "datetime64[ns, UTC]"]: - # tested below - return - - result = obj.replace(replacer) - - if (from_key == "float64" and to_key in ("int64")) or ( - from_key == "complex128" and to_key in ("int64", "float64") - ): - if not IS64 or is_platform_windows(): - pytest.skip(f"32-bit platform buggy: {from_key} -> {to_key}") - - # Expected: do not downcast by replacement - exp = pd.Series(self.rep[to_key], index=index, name="yyy", dtype=from_key) - - else: - exp = pd.Series(self.rep[to_key], index=index, name="yyy") - assert exp.dtype == to_key - - tm.assert_series_equal(result, exp) - - @pytest.mark.parametrize( - "to_key", - ["timedelta64[ns]", "bool", "object", "complex128", "float64", "int64"], - indirect=True, - ) - @pytest.mark.parametrize( - "from_key", ["datetime64[ns, UTC]", "datetime64[ns, US/Eastern]"], indirect=True - ) - def test_replace_series_datetime_tz(self, how, to_key, from_key, replacer): - index = pd.Index([3, 4], name="xyz") - obj = pd.Series(self.rep[from_key], index=index, name="yyy") - assert obj.dtype == from_key - - result = obj.replace(replacer) - - exp = pd.Series(self.rep[to_key], index=index, name="yyy") - assert exp.dtype == to_key - - tm.assert_series_equal(result, exp) - - @pytest.mark.parametrize( - "to_key", - ["datetime64[ns]", "datetime64[ns, UTC]", "datetime64[ns, US/Eastern]"], - indirect=True, - ) - @pytest.mark.parametrize( - "from_key", - ["datetime64[ns]", "datetime64[ns, UTC]", "datetime64[ns, US/Eastern]"], - indirect=True, - ) - def test_replace_series_datetime_datetime(self, how, to_key, from_key, replacer): - index = pd.Index([3, 4], name="xyz") - obj = pd.Series(self.rep[from_key], index=index, name="yyy") - assert obj.dtype == from_key - - result = obj.replace(replacer) - - exp = pd.Series(self.rep[to_key], index=index, name="yyy") - if isinstance(obj.dtype, pd.DatetimeTZDtype) and isinstance( - exp.dtype, pd.DatetimeTZDtype - ): - # with mismatched tzs, we retain the original dtype as of 2.0 - exp = exp.astype(obj.dtype) - else: - assert exp.dtype == to_key - - tm.assert_series_equal(result, exp) - - @pytest.mark.xfail(reason="Test not implemented") - def test_replace_series_period(self): - raise NotImplementedError diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/excel/test_odf.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/excel/test_odf.py deleted file mode 100644 index 25079b235d332e2c72a47e1cdad5347b792bb3ac..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/excel/test_odf.py +++ /dev/null @@ -1,50 +0,0 @@ -import functools - -import numpy as np -import pytest - -import pandas as pd -import pandas._testing as tm - -pytest.importorskip("odf") - - -@pytest.fixture(autouse=True) -def cd_and_set_engine(monkeypatch, datapath): - func = functools.partial(pd.read_excel, engine="odf") - monkeypatch.setattr(pd, "read_excel", func) - monkeypatch.chdir(datapath("io", "data", "excel")) - - -def test_read_invalid_types_raises(): - # the invalid_value_type.ods required manually editing - # of the included content.xml file - with pytest.raises(ValueError, match="Unrecognized type awesome_new_type"): - pd.read_excel("invalid_value_type.ods") - - -def test_read_writer_table(): - # Also test reading tables from an text OpenDocument file - # (.odt) - index = pd.Index(["Row 1", "Row 2", "Row 3"], name="Header") - expected = pd.DataFrame( - [[1, np.nan, 7], [2, np.nan, 8], [3, np.nan, 9]], - index=index, - columns=["Column 1", "Unnamed: 2", "Column 3"], - ) - - result = pd.read_excel("writertable.odt", sheet_name="Table1", index_col=0) - - tm.assert_frame_equal(result, expected) - - -def test_read_newlines_between_xml_elements_table(): - # GH#45598 - expected = pd.DataFrame( - [[1.0, 4.0, 7], [np.nan, np.nan, 8], [3.0, 6.0, 9]], - columns=["Column 1", "Column 2", "Column 3"], - ) - - result = pd.read_excel("test_newlines.ods") - - tm.assert_frame_equal(result, expected) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/formatter.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/formatter.py deleted file mode 100644 index 87183abbb2326cfaf79a1d9d86ab5f0ec45f1222..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/formatter.py +++ /dev/null @@ -1,124 +0,0 @@ -""" - pygments.formatter - ~~~~~~~~~~~~~~~~~~ - - Base formatter class. - - :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import codecs - -from pygments.util import get_bool_opt -from pygments.styles import get_style_by_name - -__all__ = ['Formatter'] - - -def _lookup_style(style): - if isinstance(style, str): - return get_style_by_name(style) - return style - - -class Formatter: - """ - Converts a token stream to text. - - Formatters should have attributes to help selecting them. These - are similar to the corresponding :class:`~pygments.lexer.Lexer` - attributes. - - .. autoattribute:: name - :no-value: - - .. autoattribute:: aliases - :no-value: - - .. autoattribute:: filenames - :no-value: - - You can pass options as keyword arguments to the constructor. - All formatters accept these basic options: - - ``style`` - The style to use, can be a string or a Style subclass - (default: "default"). Not used by e.g. the - TerminalFormatter. - ``full`` - Tells the formatter to output a "full" document, i.e. - a complete self-contained document. This doesn't have - any effect for some formatters (default: false). - ``title`` - If ``full`` is true, the title that should be used to - caption the document (default: ''). - ``encoding`` - If given, must be an encoding name. This will be used to - convert the Unicode token strings to byte strings in the - output. If it is "" or None, Unicode strings will be written - to the output file, which most file-like objects do not - support (default: None). - ``outencoding`` - Overrides ``encoding`` if given. - - """ - - #: Full name for the formatter, in human-readable form. - name = None - - #: A list of short, unique identifiers that can be used to lookup - #: the formatter from a list, e.g. using :func:`.get_formatter_by_name()`. - aliases = [] - - #: A list of fnmatch patterns that match filenames for which this - #: formatter can produce output. The patterns in this list should be unique - #: among all formatters. - filenames = [] - - #: If True, this formatter outputs Unicode strings when no encoding - #: option is given. - unicodeoutput = True - - def __init__(self, **options): - """ - As with lexers, this constructor takes arbitrary optional arguments, - and if you override it, you should first process your own options, then - call the base class implementation. - """ - self.style = _lookup_style(options.get('style', 'default')) - self.full = get_bool_opt(options, 'full', False) - self.title = options.get('title', '') - self.encoding = options.get('encoding', None) or None - if self.encoding in ('guess', 'chardet'): - # can happen for e.g. pygmentize -O encoding=guess - self.encoding = 'utf-8' - self.encoding = options.get('outencoding') or self.encoding - self.options = options - - def get_style_defs(self, arg=''): - """ - This method must return statements or declarations suitable to define - the current style for subsequent highlighted text (e.g. CSS classes - in the `HTMLFormatter`). - - The optional argument `arg` can be used to modify the generation and - is formatter dependent (it is standardized because it can be given on - the command line). - - This method is called by the ``-S`` :doc:`command-line option `, - the `arg` is then given by the ``-a`` option. - """ - return '' - - def format(self, tokensource, outfile): - """ - This method must format the tokens from the `tokensource` iterable and - write the formatted version to the file object `outfile`. - - Formatter options can control how exactly the tokens are converted. - """ - if self.encoding: - # wrap the outfile in a StreamWriter - outfile = codecs.lookup(self.encoding)[3](outfile) - return self.format_unencoded(tokensource, outfile) diff --git a/spaces/pseudolab/KOMUChat/README.md b/spaces/pseudolab/KOMUChat/README.md deleted file mode 100644 index 19b604561a18b8315dd6e4a68f52cbd632961965..0000000000000000000000000000000000000000 --- a/spaces/pseudolab/KOMUChat/README.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: KOMUCHAT ~ Korean Community-style ChatBot ~ -emoji: 👩‍❤️‍👨 -colorFrom: blue -colorTo: red -sdk: gradio -sdk_version: 4.1.1 -app_file: app.py -pinned: false -license: mit ---- - -# KOMUCHAT : Korean community-style relationship counseling chabot - -KOMUChat: 온라인 커뮤니티 스타일의 연애상담챗봇 - -### 개요 - -사람들의 실제 대화를 반영하기 위해 한국 커뮤니티 에펨코리아 + 인스티즈에서 수집한 약 3만건의 질문-답변 대화 데이터셋으로 학습시킨 챗봇 - -이별, 데이트코스, 선물 등 연애와 관련된 질문에 대해 온라인 커뮤니티에서 상담하는 것 같이 응답합니다. - -`paust-t5-large`모델과 `polyglot-ko-12.8b-safetensors` 모델을 파인튜닝하여 제공합니다. 데모에서 선택하여 이용해보실 수 있습니다. - - -### 응답예시 - -| Question | Answer | -| --- | --- | -| 헤어졌읍니다 | 힘내 더 좋은 인연 나타날거야 | -| 드라마만 주구장창 보는중 | 뭐 보고 있으신가요 | -| 결혼까지 생각하는 상태일수록 더 깐깐하게 따지게 돼? | 아무래도 그런 거 같아 | -| ISTJ 여자가 남자한테 관심있으면 어떻게 해? | 연락 자주 하고 취향 많이 물어보고 자주는 아니어도 꾸준히 만나려 함 | -| #청자#들아 연애는 갑자기 시작한다고 생각해? | 나는 진자 신기한 게 연애에 대한 기대가 떨어지면 하고 있더랔ㅋㅋㅋ | - -### 사용기술 - - - -## English descriptions - -KOMUCHAT : Korean community-style relationship counseling chabot - -### Intro - -This chatbot is trained with KOMUChat dataset, which is question-answer form of conversations collected from Korean online community FM Korea and instiz. - -The chatbot responds to relationship-related questions such as breakups, dating courses, and gifts as if it were consulting an online community. - -We finetuned `paust-t5-large` and `polyglot-en-12.8b-safetensors` model. You are welcomed to try them out by select options in demo. - -### Example conversation - -| Question | Answer | -| --- | --- | -| 헤어졌읍니다.(I broke up) | 힘내 더 좋은 인연 나타날거야(Cheer up, better things will come along) | -| 드라마만 주구장창 보는중 (I’m just watching dramas all day long) | 뭐 보고 있으신가요(What are you watching?) | -| 결혼까지 생각하는 상태일수록 더 깐깐하게 따지게 돼?(Is it true that the more you think about marriage, the more demanding you get?) | 아무래도 그런 거 같아(I think so.) | -| ISTJ 여자가 남자한테 관심있으면 어떻게 해?(What ISTJ girls do when they have interests in a guy?) | 연락 자주 하고 취향 많이 물어보고 자주는 아니어도 꾸준히 만나려 함(Contact him often, and ask him what he likes and try to see him regularly) | -| #청자#들아 연애는 갑자기 시작한다고 생각해?(#Listeners#, do you think relatonships start out nowhere?) | 나는 진자 신기한 게 연애에 대한 기대가 떨어지면 하고 있더랔ㅋㅋㅋ(I've found it really interesting that when I'm not expecting a relationship to happen, it does.) | - -### STACKS - - - - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/qinzhu/diy-girlfriend-online/modules.py b/spaces/qinzhu/diy-girlfriend-online/modules.py deleted file mode 100644 index 3484f6a1f4c1c06855c37a1ff4e66c58864acb38..0000000000000000000000000000000000000000 --- a/spaces/qinzhu/diy-girlfriend-online/modules.py +++ /dev/null @@ -1,390 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -import commons -from commons import init_weights, get_padding -from transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential( - nn.ReLU(), - nn.Dropout(p_dropout)) - for _ in range(n_layers-1): - self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dilated and Depth-Separable Convolution - """ - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size ** i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, - groups=channels, dilation=dilation, padding=padding - )) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): - super(WN, self).__init__() - assert(kernel_size % 2 == 1) - self.hidden_channels =hidden_channels - self.kernel_size = kernel_size, - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') - - for i in range(n_layers): - dilation = dilation_rate ** i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size, - dilation=dilation, padding=padding) - in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply( - x_in, - g_l, - n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:,:self.hidden_channels,:] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:,self.hidden_channels:,:] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels,1)) - self.logs = nn.Parameter(torch.zeros(channels,1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1,2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels]*2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1,2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - -class ConvFlow(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) - self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_derivatives = h[..., 2 * self.num_bins:] - - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1,2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/qpmzonxw/bing/README.md b/spaces/qpmzonxw/bing/README.md deleted file mode 100644 index 619f6ff99f21eec3a6e09264a6ed58ea020bcc3f..0000000000000000000000000000000000000000 --- a/spaces/qpmzonxw/bing/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Bing -emoji: 🦀 -colorFrom: indigo -colorTo: purple -sdk: docker -pinned: false -license: mit -app_port: 8080 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Guru Charitra 14 Adhyay Pdf Download.md b/spaces/quidiaMuxgu/Expedit-SAM/Guru Charitra 14 Adhyay Pdf Download.md deleted file mode 100644 index e84a6dc9fa40b1012770f04f06719b3d1e18b450..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Guru Charitra 14 Adhyay Pdf Download.md +++ /dev/null @@ -1,6 +0,0 @@ -

      guru charitra 14 adhyay pdf download


      Download Zip ————— https://geags.com/2uCsVy



      - -14. Chapter 13 . ... Sanskrit translation of Sree Guru Charitra and a book of Sanskrit verses in praise of Lord Dattatreya. “ These are the only ... flourished in the 14th and 15th centuries A.D. The original text recounts, in 53 chapters, the life and. 1fdad05405
      -
      -
      -

      diff --git a/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/infer/infer_libs/uvr5_pack/demucs/raw.py b/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/infer/infer_libs/uvr5_pack/demucs/raw.py deleted file mode 100644 index d4941ad2d7ed858f490db441f5b46b12bd61ad78..0000000000000000000000000000000000000000 --- a/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/infer/infer_libs/uvr5_pack/demucs/raw.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import os -from collections import defaultdict, namedtuple -from pathlib import Path - -import musdb -import numpy as np -import torch as th -import tqdm -from torch.utils.data import DataLoader - -from .audio import AudioFile - -ChunkInfo = namedtuple("ChunkInfo", ["file_index", "offset", "local_index"]) - - -class Rawset: - """ - Dataset of raw, normalized, float32 audio files - """ - def __init__(self, path, samples=None, stride=None, channels=2, streams=None): - self.path = Path(path) - self.channels = channels - self.samples = samples - if stride is None: - stride = samples if samples is not None else 0 - self.stride = stride - entries = defaultdict(list) - for root, folders, files in os.walk(self.path, followlinks=True): - folders.sort() - files.sort() - for file in files: - if file.endswith(".raw"): - path = Path(root) / file - name, stream = path.stem.rsplit('.', 1) - entries[(path.parent.relative_to(self.path), name)].append(int(stream)) - - self._entries = list(entries.keys()) - - sizes = [] - self._lengths = [] - ref_streams = sorted(entries[self._entries[0]]) - assert ref_streams == list(range(len(ref_streams))) - if streams is None: - self.streams = ref_streams - else: - self.streams = streams - for entry in sorted(entries.keys()): - streams = entries[entry] - assert sorted(streams) == ref_streams - file = self._path(*entry) - length = file.stat().st_size // (4 * channels) - if samples is None: - sizes.append(1) - else: - if length < samples: - self._entries.remove(entry) - continue - sizes.append((length - samples) // stride + 1) - self._lengths.append(length) - if not sizes: - raise ValueError(f"Empty dataset {self.path}") - self._cumulative_sizes = np.cumsum(sizes) - self._sizes = sizes - - def __len__(self): - return self._cumulative_sizes[-1] - - @property - def total_length(self): - return sum(self._lengths) - - def chunk_info(self, index): - file_index = np.searchsorted(self._cumulative_sizes, index, side='right') - if file_index == 0: - local_index = index - else: - local_index = index - self._cumulative_sizes[file_index - 1] - return ChunkInfo(offset=local_index * self.stride, - file_index=file_index, - local_index=local_index) - - def _path(self, folder, name, stream=0): - return self.path / folder / (name + f'.{stream}.raw') - - def __getitem__(self, index): - chunk = self.chunk_info(index) - entry = self._entries[chunk.file_index] - - length = self.samples or self._lengths[chunk.file_index] - streams = [] - to_read = length * self.channels * 4 - for stream_index, stream in enumerate(self.streams): - offset = chunk.offset * 4 * self.channels - file = open(self._path(*entry, stream=stream), 'rb') - file.seek(offset) - content = file.read(to_read) - assert len(content) == to_read - content = np.frombuffer(content, dtype=np.float32) - content = content.copy() # make writable - streams.append(th.from_numpy(content).view(length, self.channels).t()) - return th.stack(streams, dim=0) - - def name(self, index): - chunk = self.chunk_info(index) - folder, name = self._entries[chunk.file_index] - return folder / name - - -class MusDBSet: - def __init__(self, mus, streams=slice(None), samplerate=44100, channels=2): - self.mus = mus - self.streams = streams - self.samplerate = samplerate - self.channels = channels - - def __len__(self): - return len(self.mus.tracks) - - def __getitem__(self, index): - track = self.mus.tracks[index] - return (track.name, AudioFile(track.path).read(channels=self.channels, - seek_time=0, - streams=self.streams, - samplerate=self.samplerate)) - - -def build_raw(mus, destination, normalize, workers, samplerate, channels): - destination.mkdir(parents=True, exist_ok=True) - loader = DataLoader(MusDBSet(mus, channels=channels, samplerate=samplerate), - batch_size=1, - num_workers=workers, - collate_fn=lambda x: x[0]) - for name, streams in tqdm.tqdm(loader): - if normalize: - ref = streams[0].mean(dim=0) # use mono mixture as reference - streams = (streams - ref.mean()) / ref.std() - for index, stream in enumerate(streams): - open(destination / (name + f'.{index}.raw'), "wb").write(stream.t().numpy().tobytes()) - - -def main(): - parser = argparse.ArgumentParser('rawset') - parser.add_argument('--workers', type=int, default=10) - parser.add_argument('--samplerate', type=int, default=44100) - parser.add_argument('--channels', type=int, default=2) - parser.add_argument('musdb', type=Path) - parser.add_argument('destination', type=Path) - - args = parser.parse_args() - - build_raw(musdb.DB(root=args.musdb, subsets=["train"], split="train"), - args.destination / "train", - normalize=True, - channels=args.channels, - samplerate=args.samplerate, - workers=args.workers) - build_raw(musdb.DB(root=args.musdb, subsets=["train"], split="valid"), - args.destination / "valid", - normalize=True, - samplerate=args.samplerate, - channels=args.channels, - workers=args.workers) - - -if __name__ == "__main__": - main() diff --git a/spaces/r3gm/RVC_HF/infer/lib/uvr5_pack/lib_v5/dataset.py b/spaces/r3gm/RVC_HF/infer/lib/uvr5_pack/lib_v5/dataset.py deleted file mode 100644 index cfd01a174978d97180a897e40cb59ecadec1d12e..0000000000000000000000000000000000000000 --- a/spaces/r3gm/RVC_HF/infer/lib/uvr5_pack/lib_v5/dataset.py +++ /dev/null @@ -1,183 +0,0 @@ -import os -import random - -import numpy as np -import torch -import torch.utils.data -from tqdm import tqdm - -from . import spec_utils - - -class VocalRemoverValidationSet(torch.utils.data.Dataset): - def __init__(self, patch_list): - self.patch_list = patch_list - - def __len__(self): - return len(self.patch_list) - - def __getitem__(self, idx): - path = self.patch_list[idx] - data = np.load(path) - - X, y = data["X"], data["y"] - - X_mag = np.abs(X) - y_mag = np.abs(y) - - return X_mag, y_mag - - -def make_pair(mix_dir, inst_dir): - input_exts = [".wav", ".m4a", ".mp3", ".mp4", ".flac"] - - X_list = sorted( - [ - os.path.join(mix_dir, fname) - for fname in os.listdir(mix_dir) - if os.path.splitext(fname)[1] in input_exts - ] - ) - y_list = sorted( - [ - os.path.join(inst_dir, fname) - for fname in os.listdir(inst_dir) - if os.path.splitext(fname)[1] in input_exts - ] - ) - - filelist = list(zip(X_list, y_list)) - - return filelist - - -def train_val_split(dataset_dir, split_mode, val_rate, val_filelist): - if split_mode == "random": - filelist = make_pair( - os.path.join(dataset_dir, "mixtures"), - os.path.join(dataset_dir, "instruments"), - ) - - random.shuffle(filelist) - - if len(val_filelist) == 0: - val_size = int(len(filelist) * val_rate) - train_filelist = filelist[:-val_size] - val_filelist = filelist[-val_size:] - else: - train_filelist = [ - pair for pair in filelist if list(pair) not in val_filelist - ] - elif split_mode == "subdirs": - if len(val_filelist) != 0: - raise ValueError( - "The `val_filelist` option is not available in `subdirs` mode" - ) - - train_filelist = make_pair( - os.path.join(dataset_dir, "training/mixtures"), - os.path.join(dataset_dir, "training/instruments"), - ) - - val_filelist = make_pair( - os.path.join(dataset_dir, "validation/mixtures"), - os.path.join(dataset_dir, "validation/instruments"), - ) - - return train_filelist, val_filelist - - -def augment(X, y, reduction_rate, reduction_mask, mixup_rate, mixup_alpha): - perm = np.random.permutation(len(X)) - for i, idx in enumerate(tqdm(perm)): - if np.random.uniform() < reduction_rate: - y[idx] = spec_utils.reduce_vocal_aggressively( - X[idx], y[idx], reduction_mask - ) - - if np.random.uniform() < 0.5: - # swap channel - X[idx] = X[idx, ::-1] - y[idx] = y[idx, ::-1] - if np.random.uniform() < 0.02: - # mono - X[idx] = X[idx].mean(axis=0, keepdims=True) - y[idx] = y[idx].mean(axis=0, keepdims=True) - if np.random.uniform() < 0.02: - # inst - X[idx] = y[idx] - - if np.random.uniform() < mixup_rate and i < len(perm) - 1: - lam = np.random.beta(mixup_alpha, mixup_alpha) - X[idx] = lam * X[idx] + (1 - lam) * X[perm[i + 1]] - y[idx] = lam * y[idx] + (1 - lam) * y[perm[i + 1]] - - return X, y - - -def make_padding(width, cropsize, offset): - left = offset - roi_size = cropsize - left * 2 - if roi_size == 0: - roi_size = cropsize - right = roi_size - (width % roi_size) + left - - return left, right, roi_size - - -def make_training_set(filelist, cropsize, patches, sr, hop_length, n_fft, offset): - len_dataset = patches * len(filelist) - - X_dataset = np.zeros((len_dataset, 2, n_fft // 2 + 1, cropsize), dtype=np.complex64) - y_dataset = np.zeros((len_dataset, 2, n_fft // 2 + 1, cropsize), dtype=np.complex64) - - for i, (X_path, y_path) in enumerate(tqdm(filelist)): - X, y = spec_utils.cache_or_load(X_path, y_path, sr, hop_length, n_fft) - coef = np.max([np.abs(X).max(), np.abs(y).max()]) - X, y = X / coef, y / coef - - l, r, roi_size = make_padding(X.shape[2], cropsize, offset) - X_pad = np.pad(X, ((0, 0), (0, 0), (l, r)), mode="constant") - y_pad = np.pad(y, ((0, 0), (0, 0), (l, r)), mode="constant") - - starts = np.random.randint(0, X_pad.shape[2] - cropsize, patches) - ends = starts + cropsize - for j in range(patches): - idx = i * patches + j - X_dataset[idx] = X_pad[:, :, starts[j] : ends[j]] - y_dataset[idx] = y_pad[:, :, starts[j] : ends[j]] - - return X_dataset, y_dataset - - -def make_validation_set(filelist, cropsize, sr, hop_length, n_fft, offset): - patch_list = [] - patch_dir = "cs{}_sr{}_hl{}_nf{}_of{}".format( - cropsize, sr, hop_length, n_fft, offset - ) - os.makedirs(patch_dir, exist_ok=True) - - for i, (X_path, y_path) in enumerate(tqdm(filelist)): - basename = os.path.splitext(os.path.basename(X_path))[0] - - X, y = spec_utils.cache_or_load(X_path, y_path, sr, hop_length, n_fft) - coef = np.max([np.abs(X).max(), np.abs(y).max()]) - X, y = X / coef, y / coef - - l, r, roi_size = make_padding(X.shape[2], cropsize, offset) - X_pad = np.pad(X, ((0, 0), (0, 0), (l, r)), mode="constant") - y_pad = np.pad(y, ((0, 0), (0, 0), (l, r)), mode="constant") - - len_dataset = int(np.ceil(X.shape[2] / roi_size)) - for j in range(len_dataset): - outpath = os.path.join(patch_dir, "{}_p{}.npz".format(basename, j)) - start = j * roi_size - if not os.path.exists(outpath): - np.savez( - outpath, - X=X_pad[:, :, start : start + cropsize], - y=y_pad[:, :, start : start + cropsize], - ) - patch_list.append(outpath) - - return VocalRemoverValidationSet(patch_list) diff --git a/spaces/rachana219/MODT2/trackers/strongsort/deep/models/xception.py b/spaces/rachana219/MODT2/trackers/strongsort/deep/models/xception.py deleted file mode 100644 index 43db4ab53283daf1267f2f4cc5f7d778daf4076a..0000000000000000000000000000000000000000 --- a/spaces/rachana219/MODT2/trackers/strongsort/deep/models/xception.py +++ /dev/null @@ -1,344 +0,0 @@ -from __future__ import division, absolute_import -import torch.nn as nn -import torch.nn.functional as F -import torch.utils.model_zoo as model_zoo - -__all__ = ['xception'] - -pretrained_settings = { - 'xception': { - 'imagenet': { - 'url': - 'http://data.lip6.fr/cadene/pretrainedmodels/xception-43020ad28.pth', - 'input_space': 'RGB', - 'input_size': [3, 299, 299], - 'input_range': [0, 1], - 'mean': [0.5, 0.5, 0.5], - 'std': [0.5, 0.5, 0.5], - 'num_classes': 1000, - 'scale': - 0.8975 # The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299 - } - } -} - - -class SeparableConv2d(nn.Module): - - def __init__( - self, - in_channels, - out_channels, - kernel_size=1, - stride=1, - padding=0, - dilation=1, - bias=False - ): - super(SeparableConv2d, self).__init__() - - self.conv1 = nn.Conv2d( - in_channels, - in_channels, - kernel_size, - stride, - padding, - dilation, - groups=in_channels, - bias=bias - ) - self.pointwise = nn.Conv2d( - in_channels, out_channels, 1, 1, 0, 1, 1, bias=bias - ) - - def forward(self, x): - x = self.conv1(x) - x = self.pointwise(x) - return x - - -class Block(nn.Module): - - def __init__( - self, - in_filters, - out_filters, - reps, - strides=1, - start_with_relu=True, - grow_first=True - ): - super(Block, self).__init__() - - if out_filters != in_filters or strides != 1: - self.skip = nn.Conv2d( - in_filters, out_filters, 1, stride=strides, bias=False - ) - self.skipbn = nn.BatchNorm2d(out_filters) - else: - self.skip = None - - self.relu = nn.ReLU(inplace=True) - rep = [] - - filters = in_filters - if grow_first: - rep.append(self.relu) - rep.append( - SeparableConv2d( - in_filters, - out_filters, - 3, - stride=1, - padding=1, - bias=False - ) - ) - rep.append(nn.BatchNorm2d(out_filters)) - filters = out_filters - - for i in range(reps - 1): - rep.append(self.relu) - rep.append( - SeparableConv2d( - filters, filters, 3, stride=1, padding=1, bias=False - ) - ) - rep.append(nn.BatchNorm2d(filters)) - - if not grow_first: - rep.append(self.relu) - rep.append( - SeparableConv2d( - in_filters, - out_filters, - 3, - stride=1, - padding=1, - bias=False - ) - ) - rep.append(nn.BatchNorm2d(out_filters)) - - if not start_with_relu: - rep = rep[1:] - else: - rep[0] = nn.ReLU(inplace=False) - - if strides != 1: - rep.append(nn.MaxPool2d(3, strides, 1)) - self.rep = nn.Sequential(*rep) - - def forward(self, inp): - x = self.rep(inp) - - if self.skip is not None: - skip = self.skip(inp) - skip = self.skipbn(skip) - else: - skip = inp - - x += skip - return x - - -class Xception(nn.Module): - """Xception. - - Reference: - Chollet. Xception: Deep Learning with Depthwise - Separable Convolutions. CVPR 2017. - - Public keys: - - ``xception``: Xception. - """ - - def __init__( - self, num_classes, loss, fc_dims=None, dropout_p=None, **kwargs - ): - super(Xception, self).__init__() - self.loss = loss - - self.conv1 = nn.Conv2d(3, 32, 3, 2, 0, bias=False) - self.bn1 = nn.BatchNorm2d(32) - - self.conv2 = nn.Conv2d(32, 64, 3, bias=False) - self.bn2 = nn.BatchNorm2d(64) - - self.block1 = Block( - 64, 128, 2, 2, start_with_relu=False, grow_first=True - ) - self.block2 = Block( - 128, 256, 2, 2, start_with_relu=True, grow_first=True - ) - self.block3 = Block( - 256, 728, 2, 2, start_with_relu=True, grow_first=True - ) - - self.block4 = Block( - 728, 728, 3, 1, start_with_relu=True, grow_first=True - ) - self.block5 = Block( - 728, 728, 3, 1, start_with_relu=True, grow_first=True - ) - self.block6 = Block( - 728, 728, 3, 1, start_with_relu=True, grow_first=True - ) - self.block7 = Block( - 728, 728, 3, 1, start_with_relu=True, grow_first=True - ) - - self.block8 = Block( - 728, 728, 3, 1, start_with_relu=True, grow_first=True - ) - self.block9 = Block( - 728, 728, 3, 1, start_with_relu=True, grow_first=True - ) - self.block10 = Block( - 728, 728, 3, 1, start_with_relu=True, grow_first=True - ) - self.block11 = Block( - 728, 728, 3, 1, start_with_relu=True, grow_first=True - ) - - self.block12 = Block( - 728, 1024, 2, 2, start_with_relu=True, grow_first=False - ) - - self.conv3 = SeparableConv2d(1024, 1536, 3, 1, 1) - self.bn3 = nn.BatchNorm2d(1536) - - self.conv4 = SeparableConv2d(1536, 2048, 3, 1, 1) - self.bn4 = nn.BatchNorm2d(2048) - - self.global_avgpool = nn.AdaptiveAvgPool2d(1) - self.feature_dim = 2048 - self.fc = self._construct_fc_layer(fc_dims, 2048, dropout_p) - self.classifier = nn.Linear(self.feature_dim, num_classes) - - self._init_params() - - def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None): - """Constructs fully connected layer. - - Args: - fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed - input_dim (int): input dimension - dropout_p (float): dropout probability, if None, dropout is unused - """ - if fc_dims is None: - self.feature_dim = input_dim - return None - - assert isinstance( - fc_dims, (list, tuple) - ), 'fc_dims must be either list or tuple, but got {}'.format( - type(fc_dims) - ) - - layers = [] - for dim in fc_dims: - layers.append(nn.Linear(input_dim, dim)) - layers.append(nn.BatchNorm1d(dim)) - layers.append(nn.ReLU(inplace=True)) - if dropout_p is not None: - layers.append(nn.Dropout(p=dropout_p)) - input_dim = dim - - self.feature_dim = fc_dims[-1] - - return nn.Sequential(*layers) - - def _init_params(self): - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_( - m.weight, mode='fan_out', nonlinearity='relu' - ) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.BatchNorm2d): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.BatchNorm1d): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.Linear): - nn.init.normal_(m.weight, 0, 0.01) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - - def featuremaps(self, input): - x = self.conv1(input) - x = self.bn1(x) - x = F.relu(x, inplace=True) - - x = self.conv2(x) - x = self.bn2(x) - x = F.relu(x, inplace=True) - - x = self.block1(x) - x = self.block2(x) - x = self.block3(x) - x = self.block4(x) - x = self.block5(x) - x = self.block6(x) - x = self.block7(x) - x = self.block8(x) - x = self.block9(x) - x = self.block10(x) - x = self.block11(x) - x = self.block12(x) - - x = self.conv3(x) - x = self.bn3(x) - x = F.relu(x, inplace=True) - - x = self.conv4(x) - x = self.bn4(x) - x = F.relu(x, inplace=True) - return x - - def forward(self, x): - f = self.featuremaps(x) - v = self.global_avgpool(f) - v = v.view(v.size(0), -1) - - if self.fc is not None: - v = self.fc(v) - - if not self.training: - return v - - y = self.classifier(v) - - if self.loss == 'softmax': - return y - elif self.loss == 'triplet': - return y, v - else: - raise KeyError('Unsupported loss: {}'.format(self.loss)) - - -def init_pretrained_weights(model, model_url): - """Initialize models with pretrained weights. - - Layers that don't match with pretrained layers in name or size are kept unchanged. - """ - pretrain_dict = model_zoo.load_url(model_url) - model_dict = model.state_dict() - pretrain_dict = { - k: v - for k, v in pretrain_dict.items() - if k in model_dict and model_dict[k].size() == v.size() - } - model_dict.update(pretrain_dict) - model.load_state_dict(model_dict) - - -def xception(num_classes, loss='softmax', pretrained=True, **kwargs): - model = Xception(num_classes, loss, fc_dims=None, dropout_p=None, **kwargs) - if pretrained: - model_url = pretrained_settings['xception']['imagenet']['url'] - init_pretrained_weights(model, model_url) - return model diff --git a/spaces/radames/UserControllableLT-Latent-Transformer/expansion/dataloader/sintellist.py b/spaces/radames/UserControllableLT-Latent-Transformer/expansion/dataloader/sintellist.py deleted file mode 100644 index 44bc1ab5d466d605b7bc695adb41f2431aa0f790..0000000000000000000000000000000000000000 --- a/spaces/radames/UserControllableLT-Latent-Transformer/expansion/dataloader/sintellist.py +++ /dev/null @@ -1,32 +0,0 @@ -import torch.utils.data as data - -from PIL import Image -import os -import os.path -import numpy as np -import pdb - -IMG_EXTENSIONS = [ - '.jpg', '.JPG', '.jpeg', '.JPEG', - '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', -] - - -def is_image_file(filename): - return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) - -def dataloader(filepath): - - left_fold = 'image_2/' - train = [img for img in os.listdir(filepath+left_fold) if img.find('Sintel') > -1] - - l0_train = [filepath+left_fold+img for img in train] - l0_train = [img for img in l0_train if '%s_%s.png'%(img.rsplit('_',1)[0],'%02d'%(1+int(img.split('.')[0].split('_')[-1])) ) in l0_train ] - - #l0_train = [i for i in l0_train if not '10.png' in i] # remove 10 as val - - l1_train = ['%s_%s.png'%(img.rsplit('_',1)[0],'%02d'%(1+int(img.split('.')[0].split('_')[-1])) ) for img in l0_train] - flow_train = [img.replace('image_2','flow_occ') for img in l0_train] - - - return l0_train, l1_train, flow_train diff --git a/spaces/radames/sentence-embeddings-visualization/embeddings_encoder.py b/spaces/radames/sentence-embeddings-visualization/embeddings_encoder.py deleted file mode 100644 index 691a9c77b29ee0fa34eff31020b13bbcac91d037..0000000000000000000000000000000000000000 --- a/spaces/radames/sentence-embeddings-visualization/embeddings_encoder.py +++ /dev/null @@ -1,47 +0,0 @@ -# from https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2 -from transformers import AutoTokenizer, AutoModel -import torch -import torch.nn.functional as F -import os -os.environ["TOKENIZERS_PARALLELISM"] = "false" - -class EmbeddingsEncoder: - def __init__(self): - # Load model from HuggingFace Hub - self.tokenizer = AutoTokenizer.from_pretrained( - 'sentence-transformers/all-MiniLM-L6-v2') - self.model = AutoModel.from_pretrained( - 'sentence-transformers/all-MiniLM-L6-v2') - - # Mean Pooling - Take average of all tokens - - def mean_pooling(self, model_output, attention_mask): - # First element of model_output contains all token embeddings - token_embeddings = model_output.last_hidden_state - input_mask_expanded = attention_mask.unsqueeze( - -1).expand(token_embeddings.size()).float() - return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) - - # Encode text - - def encode(self, texts): - # Tokenize sentences - print("Tokenizing...") - encoded_input = self.tokenizer( - texts, padding=True, truncation=True, return_tensors='pt') - - # Compute token embeddings - print("Computing embeddings...") - with torch.no_grad(): - model_output = self.model(**encoded_input, return_dict=True) - - # Perform pooling - print("Performing pooling...") - embeddings = self.mean_pooling( - model_output, encoded_input['attention_mask']) - - # Normalize embeddings - print("Normalizing embeddings...") - embeddings = F.normalize(embeddings, p=2, dim=1) - - return embeddings diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Download Origin 9.0.0.45 Patch to Solve Ok9.dll Problems.md b/spaces/raedeXanto/academic-chatgpt-beta/Download Origin 9.0.0.45 Patch to Solve Ok9.dll Problems.md deleted file mode 100644 index 0fd2c8cd19fc180768849c4e77acce8446b215b8..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Download Origin 9.0.0.45 Patch to Solve Ok9.dll Problems.md +++ /dev/null @@ -1,147 +0,0 @@ - -

      What is ok9.dll and why do you need it?

      -

      If you are a user of Origin, a popular platform for gaming and data analysis, you may have encountered a file called "ok9.dll" on your computer. But what is this file and why do you need it? In this article, we will explain what ok9.dll is, how it works with Origin, and how to get the latest patch for it.

      -

      ok9.dll origin 9.0.0.45 patch


      Download Ziphttps://tinourl.com/2uL07M



      -

      What is ok9.dll?

      -

      Ok9.dll is a file that belongs to Origin, a software developed by OriginLab Corporation for data analysis and graphing, and by Electronic Arts for gaming and digital distribution. Origin is used by millions of users around the world for various purposes, such as creating scientific graphs, playing online games, and accessing digital content.

      -

      What is Origin?

      -

      Origin is a software that has two main functions: data analysis and graphing, and gaming and digital distribution. The data analysis and graphing function is provided by OriginLab Corporation, a company that specializes in creating software for scientists and engineers. The gaming and digital distribution function is provided by Electronic Arts, a company that produces and publishes video games.

      -

      OriginLab's Origin allows users to import, analyze, and visualize data using various tools and features, such as graphs, charts, statistics, curve fitting, peak analysis, signal processing, and more. Origin also supports programming languages such as C, Python, R, and LabTalk.

      -

      Electronic Arts' Origin allows users to buy, download, and play games from EA's catalog, such as FIFA, The Sims, Battlefield, Mass Effect, Dragon Age, and more. Origin also offers social features such as chat, friends list, achievements, cloud saves, and game streaming.

      -

      How to download ok9.dll for origin 9.0.0.45
      -Fix ok9.dll missing or corrupted error in origin 9.0.0.45
      -Where to find ok9.dll crack for origin 9.0.0.45
      -Download ok9.dll by OriginLab Corporation for Origin
      -What is ok9.dll and how to use it in origin 9.0.0.45
      -How to install ok9.dll in system32 or syswow64 folder
      -How to update origin 9.0.0.45 with ok9.dll patch
      -How to restore ok9.dll from DLL Wizard
      -How to remove ok9.dll virus or malware from origin 9.0.0.45
      -How to fix ok9.dll application error in origin 9.0.0.45
      -How to repair ok9.dll registry entries in origin 9.0.0.45
      -How to uninstall origin 9.0.0.45 with ok9.dll file
      -How to optimize your computer with ok9.dll for origin 9.0.0.45
      -How to backup and restore ok9.dll for origin 9.0.0.45
      -How to troubleshoot ok9.dll issues in origin 9.0.0.45
      -How to get free ok9.dll license key for origin 9.0.0.45
      -How to upgrade from origin 9.0.0.45 to origin 10 with ok9.dll file
      -How to downgrade from origin 10 to origin 9.0.0.45 with ok9.dll file
      -How to verify the integrity of ok9.dll file for origin 9.0.0.45
      -How to contact OriginLab support for ok9.dll problems in origin 9.0.0.45
      -How to run origin 9.0.0.45 as administrator with ok9.dll file
      -How to disable antivirus or firewall for ok9.dll file in origin 9.0.0.45
      -How to enable compatibility mode for ok9.dll file in origin 9.0.0.45
      -How to change the language of ok9.dll file in origin 9.0.0.45
      -How to edit the properties of ok9.dll file in origin 9.0.0.45
      -How to rename or delete ok9.dll file in origin 9.0.0.

      -

      What is a DLL file?

      -

      A DLL file is a dynamic link library file that contains code and data that can be used by multiple programs at the same time. A DLL file allows programs to share functionality and resources without having to duplicate them in each program. For example, a DLL file can provide common functions such as printing, saving, or opening files.

      -

      A DLL file can be loaded and unloaded by programs as needed, which can improve the performance and memory usage of the system. However, a DLL file can also cause problems if it is missing or corrupted. For example, if a program tries to use a function from a DLL file that is not available or compatible with the program's version or system's configuration, it may result in an error message or a crash.

      -

      Why do you need ok9.dll?

      -

      Ok9.dll is a DLL file that is required by Origin to run properly on your computer. Ok9.dll contains code and data that are used by both OriginLab's Origin and Electronic Arts' Origin for various purposes.

      -

      How does ok9.dll work with Origin?

      -

      Ok9.dll works with Origin by providing some of the core functionality and features of the software. For example, ok9.dll enables Origin to create graphs and charts from data sets, perform calculations and analysis on data, access online services and content from EA's servers, communicate with other users via chat or voice chat,

      and more.

      -

      What are the benefits of using ok9.dll?

      -

      Using ok9.dll can provide several benefits for Origin users. Some of these benefits are:

      -
        -
      • Enhanced performance: Ok9.dll can improve the speed and efficiency of Origin by allowing it to use less memory and CPU resources.
      • -
      • Better compatibility: Ok9.dll can ensure that Origin works well with different versions of Windows and other software on your computer.
      • -
      • More features: Ok9.dll can enable Origin to offer more functionality and options for data analysis and graphing,
      • -
      • and gaming and digital distribution.
      • -
      -

      What are the risks of not having ok9.dll?

      -

      Not having ok9.dll can pose several risks for Origin users. Some of these risks are:

      -
        -
      • Poor performance: Not having ok9.dll can cause Origin to run slower or use more memory and CPU resources.
      • -
      • Limited compatibility: Not having ok9.dll can cause Origin to not work properly with different versions of Windows or other software on your computer.
      • -
      • Fewer features: Not having ok9.dll can prevent Origin from offering some of the functionality
      • -
      • and options for data analysis and graphing,
      • -
      • and gaming and digital distribution.
      • -
      -

      How to get ok9.dll origin 9.0.0.45 patch?

      -

      If you want to use Origin on your computer,

      you need to have the latest version of ok9.dll installed on your system. The latest version of ok9.dll is origin 9.0.0.45 patch,

      which was released on October 2021.

      This patch fixes some bugs

      and improves some features of Origin.

      -

      How to download ok9.dll origin 9.0.0.45 patch?

      -

      To download ok9.dll origin 9.0.0.45 patch,

      you have two options:

      -
        -
      1. You can download it from the official website of OriginLab Corporation or Electronic Arts,
      2. -
      3. depending on which version of Origin you are using.
      4. -
      5. You can download it from a third-party website that offers free DLL files,
      6. -
      7. such as DLLme.com.
      8. -
      -

      If you choose the first option,

      you need to follow these steps:

      -
        -
      1. Go to the website of OriginLab Corporation or Electronic Arts,
      2. -
      3. depending on which version of Origin you are using.
      4. -
      5. Navigate to the download section
      6. -
      7. and look for the latest version of Origin.
      8. -
      9. Select the appropriate option for your system
      10. -
      11. (32-bit or 64-bit)
      12. -
      13. and click on the download button.
      14. -
      15. Save the file on your computer
      16. -
      17. and run it as an administrator.
      18. -
      19. Follow the instructions on the screen
      20. -
      21. to install the latest version of Origin
      22. -
      23. (which includes ok9.dll origin 9.0.0.45 patch).
      24. -
      -

      If you choose the second option,

      you need to follow these steps:

      -
        -
      1. Go to DLLme.com
      2. -
      3. (or any other website that offers free DLL files).
      4. -
      5. Type "ok9.dll" in the search box
      6. -
      7. and hit enter.
      8. -
      9. Select the version or variant you wish to download
      10. -
      11. (in this case,
      12. "ok9.dll by OriginLab Corporation for Okern Version 9").
      13. -
      14. Click on the download button
      15. (or right-click on it
      16. and select "save link as").


      - - - - -
      MethodDescriptionProsCons
      Savegame filesYou can download savegame files that have all DLCs unlocked at the first save point at this link. You can choose between two files: one that has all gazette daily news unlocked as well (but they are untranslated and will crash your game if you try to read them), and one that has only one gazette daily news unlocked (which is translated).The easiest method that requires minimal effort.You will lose your own progress if you overwrite your savegame file with these files.
      You can use cheat codes to unlock DLCs and gazette daily news by using a cheat device such as Action Replay or R4. You can find some cheat codes here or here.The most flexible method that allows you to choose which DLCs and gazette daily news you want to unlock.You will need a cheat device and some technical knowledge to use this method.
      Online connectionYou can connect your Nintendo DS or your emulator to the internet and use a custom DNS server that redirects you to a fan-made server that hosts the DLCs and gazette daily news. You can find more information about this method here.The most authentic method that replicates the original online experience.You will need a stable internet connection and some configuration to use this method.
      -

      You can use any of these methods to access the extra content of Ni no Kuni: Shikkoku no Madoushi. However, keep in mind that some of them may not work with some flashcarts or emulators, and that some of them may have bugs or glitches. Also, remember that the gazette daily news are untranslated in the English patch and will crash your game if you try to read them.

      -

      Conclusion

      -

      Ni no Kuni: Shikkoku no Madoushi is a wonderful game that deserves to be played by fans of RPGs and Studio Ghibli. Thanks to the fan translation project, you can now enjoy this game in English by downloading Ni no Kuni NDS ROM English patched version and playing it on your Nintendo DS or your emulator.

      -

      In this article, we have shown you how to download Ni no Kuni NDS ROM English patched version and how to unlock DLCs and gazette daily news. We hope you have found this guide helpful and easy to follow.

      -

      Now it's time for you to embark on your journey in Ni no Kuni and explore its magical world. Along the way, you will meet many friends and foes, learn new spells and skills, and discover secrets and mysteries. You will also experience a touching story that will make you laugh and cry.

      -

      Are you ready to play Ni no Kuni: Shikkoku no Madoushi in English? If so, download Ni no Kuni NDS ROM English patched version today and have fun!

      -

      If you have any questions or feedback about this article, please let us know in the comments below. We would love to hear from you!

      -

      FAQs

      - -

      0a6ba089eb
      -
      -
      \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Fcp X Luster Grade Presets 20 Pack Free Download.md b/spaces/raedeXanto/academic-chatgpt-beta/Fcp X Luster Grade Presets 20 Pack Free Download.md deleted file mode 100644 index 0eee28c5f36c8e9ac91f655f3ca8c2791d335091..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Fcp X Luster Grade Presets 20 Pack Free Download.md +++ /dev/null @@ -1,48 +0,0 @@ - -

      FCP X Luster Grade Presets 20 Pack Free Download: How to Enhance Your Videos with Professional Color Grading

      -

      If you are looking for a way to make your videos stand out with stunning colors and cinematic effects, you might want to check out the FCP X Luster Grade Presets 20 Pack. These are a collection of 20 color grading presets that you can use in Final Cut Pro X to transform your footage with just a few clicks. In this article, we will show you what these presets are, why you should use them, how to get them for free, and how to use them in your projects. We will also show you some examples of how these presets can create different looks for your videos, from Bollywood to Hollywood and everything in between.

      -

      Introduction

      -

      What are FCP X Luster Grade Presets?

      -

      FCP X Luster Grade Presets are a set of 20 color correction and color grading presets that you can apply to your clips in Final Cut Pro X. They are designed by Denver Riddle, a professional colorist and founder of Color Grading Central, a website that provides resources and tutorials on color grading for filmmakers. These presets are inspired by some of the most popular and sought-after looks from different film genres and styles, such as India, mainstream Hollywood, indie, sci-fi, horror, and more. They are compatible with Final Cut Pro X version 10.0.4 or later.

      -

      Fcp X Luster Grade Presets 20 Pack Free Download


      Download ✓✓✓ https://tinourl.com/2uL0BH



      -

      Why use FCP X Luster Grade Presets?

      -

      Color grading is an essential part of video editing that can make a huge difference in the mood, tone, and quality of your videos. It can help you create a consistent and professional look for your videos, enhance the emotions and atmosphere of your scenes, and make your videos more appealing and engaging for your audience. However, color grading can also be a challenging and time-consuming process that requires a lot of skill and experience. That's where FCP X Luster Grade Presets come in handy. These presets can help you achieve amazing results with minimal effort and time. They can save you hours of tweaking and fine-tuning your colors, and give you instant access to a variety of looks that you can easily customize to fit your vision. They can also help you learn more about color grading by seeing how different settings affect your images.

      -

      How to get FCP X Luster Grade Presets for free?

      -

      If you are interested in trying out the FCP X Luster Grade Presets, you will be happy to know that you can get them for free. All you have to do is visit the Color Grading Central website and sign up for their newsletter. You will receive an email with a link to download the presets, along with some other free resources and tips on color grading. You can also purchase the full 20 pack of presets for $49 if you want to support the creator and get access to more features and updates.

      -

      How to use FCP X Luster Grade Presets in Final Cut Pro X

      -

      How to install FCP X Luster Grade Presets

      -

      Once you have downloaded the presets, you need to install them in Final Cut Pro X. To do this, follow these steps:How to apply FCP X Luster Grade Presets to your clips -

      Applying the presets to your clips is very easy and fast. To do this, follow these steps:

      - - Select the clip or clips that you want to color grade in the timeline. - Go to the Effects Browser and find the Luster Grade Presets folder in the User Presets section. - Choose the preset that you like and drag and drop it onto your clip or clips. - You will see a color board icon appear on your clip in the timeline, indicating that the preset has been applied. - You can preview the effect of the preset on your clip in the viewer.

      How to adjust FCP X Luster Grade Presets to suit your style

      -

      One of the best features of FCP X Luster Grade Presets is that they are fully customizable. You can adjust them to match your personal preference, your footage, and your project. To do this, follow these steps:

      - - Select the clip or clips that have the preset applied in the timeline. - Go to the Inspector window and click on the Video tab. - You will see a Color section with a color board icon. Click on it to open the Color Board. - The Color Board has four tabs: Color, Saturation, Exposure, and Global. You can use these tabs to modify different aspects of your color grading, such as hue, brightness, contrast, and overall intensity. - You can also use the sliders below each tab to fine-tune your adjustments. - You can see the changes you make on your clip in real time in the viewer.

      Examples of FCP X Luster Grade Presets in action

      -

      To give you an idea of how FCP X Luster Grade Presets can enhance your videos, here are some examples of how they can create different looks for your videos. We will use three presets from the pack: Bollywood, Hollywood, and Indie.

      -

      -

      Bollywood look

      -

      The Bollywood preset is inspired by the vibrant and colorful style of Indian cinema. It adds a warm and rich tone to your footage, with enhanced contrast and saturation. It works well for scenes that need a lively and exotic feel, such as weddings, festivals, dances, or travel videos. Here is an example of how it looks before and after applying the preset:

      - Bollywood look before and after -

      Hollywood look

      -

      The Hollywood preset is inspired by the classic and elegant style of mainstream Hollywood movies. It adds a cool and neutral tone to your footage, with balanced exposure and subtle color correction. It works well for scenes that need a professional and cinematic feel, such as interviews, documentaries, dramas, or commercials. Here is an example of how it looks before and after applying the preset:

      - Hollywood look before and after -

      Indie look

      -

      The Indie preset is inspired by the creative and original style of independent films. It adds a vintage and faded tone to your footage, with reduced saturation and contrast. It works well for scenes that need a nostalgic and artistic feel, such as music videos, short films, or personal projects. Here is an example of how it looks before and after applying the preset:

      - Indie look before and after -

      Conclusion

      -

      Summary of the main points

      -

      In this article, we have shown you how to enhance your videos with professional color grading using FCP X Luster Grade Presets 20 Pack Free Download. We have explained what these presets are, why you should use them, how to get them for free, and how to use them in Final Cut Pro X. We have also shown you some examples of how these presets can create different looks for your videos.

      -

      Call to action

      -

      If you are ready to take your videos to the next level with FCP X Luster Grade Presets 20 Pack Free Download, don't hesitate to download them today from Color Grading Central . You will be amazed by how easy and fun it is to color grade your videos with these presets. You can also learn more about color grading and other video editing tips from their website and newsletter. Thank you for reading this article and happy color grading!

      -

      FAQs

      -

      Here are some frequently asked questions about FCP X Luster Grade Presets 20 Pack Free Download:

      -

      What are the system requirements for FCP X Luster Grade Presets?

      -

      To use FCP X Luster Grade Presets, you need to have Final Cut Pro X version 10.0.4 or later installed on your Mac. You also need to have a Mac with a 64-bit processor, 4 GB of RAM, and a graphics card that supports OpenCL.

      -

      How many presets are included in the FCP X Luster Grade Presets 20 Pack?

      -

      The FCP X Luster Grade Presets 20 Pack includes 20 color grading presets that cover a wide range of film genres and styles. They are:

      - - Bollywood - Hollywood - Indie - Sci-Fi - Horror - Action - Comedy - Romance - Western - Noir - Documentary - Thriller - Adventure - Fantasy - Drama - War - Crime - Mystery - Animation - Musical

      Can I use FCP X Luster Grade Presets for commercial projects?

      -

      Yes, you can use FCP X Luster Grade Presets for any personal or commercial projects, as long as you do not resell or redistribute them. You can also modify them to suit your needs, but you cannot claim them as your own.

      -

      Can I use FCP X Luster Grade Presets with other video editing software?

      -

      No, FCP X Luster Grade Presets are only compatible with Final Cut Pro X. They are not compatible with other video editing software, such as Adobe Premiere Pro, DaVinci Resolve, or iMovie.

      -

      How can I contact the creator of FCP X Luster Grade Presets?

      -

      If you have any questions, feedback, or suggestions about FCP X Luster Grade Presets, you can contact the creator, Denver Riddle, through his website or his social media accounts . He is always happy to hear from his users and help them with their color grading needs.

      b2dd77e56b
      -
      -
      \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Get Adobe Acrobat 7.0 Professional Full Version for Free No Registration Required.md b/spaces/raedeXanto/academic-chatgpt-beta/Get Adobe Acrobat 7.0 Professional Full Version for Free No Registration Required.md deleted file mode 100644 index fd05c87b3ab50fb52560e2424bfc4795fa1adc0f..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Get Adobe Acrobat 7.0 Professional Full Version for Free No Registration Required.md +++ /dev/null @@ -1,123 +0,0 @@ -
      -

      Adobe Acrobat 7.0 Professional Free Download Full Version

      -

      If you are looking for a powerful and reliable tool to create, edit, combine, secure, and convert PDF files, you might be interested in Adobe Acrobat 7.0 Professional. This software was released in 2005 by Adobe Systems as part of the Acrobat family of products. It is still widely used by many users who need to work with PDF documents on a regular basis.

      -

      adobe acrobat 7.0 professional free download full version


      Download Zip ○○○ https://tinourl.com/2uKZc4



      -

      In this article, we will explain what Adobe Acrobat 7.0 Professional is, what features it offers, how to download it for free, what are its pros and cons, and what are some alternatives to it. By the end of this article, you will have a better understanding of this software and whether it suits your needs or not.

      -

      What is Adobe Acrobat 7.0 Professional?

      -

      Adobe Acrobat 7.0 Professional is a software that allows you to create, edit, combine, secure, and convert PDF documents. PDF stands for Portable Document Format, which is a file format that preserves the layout, fonts, images, and other elements of a document regardless of the application or platform used to create or view it.

      -

      Adobe Acrobat 7.0 Professional is designed for business professionals who need to create and distribute PDF documents that are easy to share with others using free Adobe Reader software. You can use this software to create PDF files from various sources, such as Microsoft Office applications, AutoCAD drawings, web pages, scanned documents, and more. You can also edit PDF files by adding or deleting text, images, annotations, bookmarks, links, headers, footers, and more.

      -

      Moreover, you can combine multiple PDF files into one document or split a large PDF file into smaller ones. You can also organize your PDF files by rearranging pages, inserting page numbers, adding watermarks, etc. Additionally, you can secure your PDF files by applying digital signatures that can be authenticated and also passwords and permissions to control who can open, view, edit, and print your PDF documents.

      -

      Furthermore, you can convert your PDF files to other formats, such as Word, Excel, PowerPoint, HTML, JPEG, TIFF, etc. You can also export your PDF files as XML data packages that can be used for data analysis or integration with other applications.

      -

      Features of Adobe Acrobat 7.0 Professional

      -

      Create and edit PDF documents

      -

      One of the main features of Adobe Acrobat 7.0 Professional is that it allows you to create PDF documents from various sources with the click of a button. You can use the Acrobat menu or toolbar in Microsoft Office applications (such as Word, Excel, PowerPoint) or AutoCAD to create PDF files directly from your documents or drawings.

      -

      How to get adobe acrobat 7.0 professional for free
      -Adobe acrobat 7.0 pro full version download link
      -Adobe acrobat 7.0 professional crack serial keygen
      -Adobe acrobat 7.0 pro free trial download
      -Adobe acrobat 7.0 professional license key activation
      -Download adobe acrobat 7.0 pro offline installer
      -Adobe acrobat 7.0 professional features and benefits
      -Adobe acrobat 7.0 pro system requirements and compatibility
      -Adobe acrobat 7.0 professional tutorial and guide
      -Adobe acrobat 7.0 pro alternative software free download
      -Adobe acrobat 7.0 professional upgrade and update
      -Adobe acrobat 7.0 pro review and rating
      -Adobe acrobat 7.0 professional support and help
      -Adobe acrobat 7.0 pro tips and tricks
      -Adobe acrobat 7.0 professional comparison and difference
      -Adobe acrobat 7.0 pro coupon code and discount
      -Adobe acrobat 7.0 professional download error and fix
      -Adobe acrobat 7.0 pro user manual and documentation
      -Adobe acrobat 7.0 professional installation and setup
      -Adobe acrobat 7.0 pro uninstall and remove
      -Adobe acrobat 7.0 professional edit and create PDF files
      -Adobe acrobat 7.0 pro convert and export PDF files
      -Adobe acrobat 7.0 professional sign and secure PDF files
      -Adobe acrobat 7.0 pro collaborate and share PDF files
      -Adobe acrobat 7.0 professional optimize and compress PDF files
      -Adobe acrobat 7.0 pro merge and split PDF files
      -Adobe acrobat 7.0 professional annotate and comment PDF files
      -Adobe acrobat 7.0 pro fill and submit PDF forms
      -Adobe acrobat 7.0 professional extract and insert PDF pages
      -Adobe acrobat 7.0 pro rotate and crop PDF pages
      -Adobe acrobat 7.0 professional watermark and stamp PDF files
      -Adobe acrobat 7.0 pro password protect and encrypt PDF files
      -Adobe acrobat 7.0 professional redact and remove sensitive information from PDF files
      -Adobe acrobat 7.0 pro OCR and recognize text in PDF files
      -Adobe acrobat 7.0 professional compare and find differences in PDF files
      -Adobe acrobat 7.0 pro measure and calculate distances in PDF files
      -Adobe acrobat 7.0 professional add and manage bookmarks in PDF files
      -Adobe acrobat 7.0 pro attach and embed files in PDF files
      -Adobe acrobat 7.0 professional print and preview PDF files
      -Adobe acrobat 7.0 pro customize and change preferences in PDF files
      -Adobe acrobat 7.0 professional troubleshoot and solve common problems in PDF files
      -Adobe acrobat 7.0 pro keyboard shortcuts and commands in PDF files
      -Adobe acrobat 7.0 professional accessibility and compliance in PDF files
      -Adobe acrobat 7.0 pro integrate and work with other applications in PDF files
      -Adobe acrobat 7.0 professional automate and batch process PDF files
      -Adobe acrobat 7.0 pro enhance and improve performance in PDF files
      -Adobe acrobat 7.0 professional add-ons and extensions in PDF files
      -Adobe acrobat 7.0 pro feedback and suggestions in PDF files
      -Download adobe reader for free to view adobe acrobat 7.0 professional created PDF files

      -

      You can also create PDF files from web pages by using the Web Capture tool that lets you capture an entire web page or a selected area as a PDF file. You can also create PDF files from scanned documents by using the Paper Capture tool that lets you scan paper documents and convert them into searchable and editable PDF files.

      -

      Once you have created your PDF files, you can edit them by using the TouchUp tools that let you modify text, images, annotations, bookmarks, links, headers, footers, and more in your PDF files. You can also use the Advanced Editing tools that let you crop, rotate, resize, move, delete, and add pages in your PDF files.

      -

      Combine and organize PDF files

      -

      Another feature of Adobe Acrobat 7.0 Professional is that it allows you to combine multiple PDF files into one document or split a large PDF file into smaller ones. You can use the Create PDF From Multiple Files tool that lets you select multiple files (such as Word documents, Excel spreadsheets, PowerPoint presentations, JPEG images, etc.) and merge them into one PDF file. You can also use the Extract Pages tool that lets you select specific pages from a large PDF file and save them as separate PDF files.

      -

      Besides combining and splitting PDF files, you can also organize your PDF files by rearranging pages, inserting page numbers, adding watermarks, etc. You can use the Pages palette that lets you drag and drop pages within or between PDF files. You can also use the Document menu or toolbar that lets you insert page numbers, watermarks, backgrounds, etc. in your PDF files.

      -

      Secure and sign PDF documents

      -

      A third feature of Adobe Acrobat 7.0 Professional is that it allows you to secure and sign your PDF documents by applying digital signatures that can be authenticated and also passwords and permissions to control who can open, view, edit, and print your PDF documents. You can use the Security menu or toolbar that lets you encrypt your PDF files with passwords that prevent unauthorized access or modification. You can also use the Advanced menu or toolbar that lets you apply digital signatures that verify your identity and integrity of your PDF files. You can also use the Commenting menu or toolbar that lets you add comments, stamps, or signatures in your PDF files.

      -

      Convert PDF files to other formats

      -

      A fourth feature of Adobe Acrobat 7.0 Professional is that it allows you to convert your PDF files to other formats such as Word Excel PowerPoint HTML JPEG TIFF etc. You can use the File menu or toolbar that lets you save your PDF files as other formats by choosing the Save As option. You can also use the Export menu or toolbar that lets you export your PDF files as XML data packages that can be used for data analysis or integration with other applications.

      -

      How to download Adobe Acrobat 7.0 Professional for free?

      -

      Download from the official website

      -

      The easiest way to download Adobe Acrobat 7.0 Professional for free is to download it from the official website of Adobe Systems. You can visit this link: https://www.adobe.com/acrobat/free-trial-download.html and click on the Download Now button. You will need to register and fill out a survey at Adobe's website and then wait for an e-mail to arrive with a link to the installer. You will also need a serial number to activate the software after installation. You can find a serial number online by searching for "Adobe Acrobat 7.0 Professional serial number" on Google or any other search engine.

      -

      Download from a third-party website

      -

      Another way to download Adobe Acrobat 7.0 Professional for free is to download it from a third-party website that offers free software downloads. You can visit this link: https://adobe-acrobat-professional.software.informer.com/7.0/ and click on the Download button. You will be redirected to another website where you can download the installer file. You will also need a serial number to activate the software after installation. You can find a serial number online by searching for "Adobe Acrobat 7.0 Professional serial number" on Google or any other search engine.

      -

      Download from a torrent website

      -

      A third way to download Adobe Acrobat 7.0 Professional for free is to download it from a torrent website that offers peer-to-peer file sharing. You can visit this link: https://archive.org/details/AdobeAcrobat7.0Pro_201602 and click on the Torrent button. You will need a torrent client such as BitTorrent or uTorrent to download the installer file. You will also need a serial number to activate the software after installation. You can find a serial number online by searching for "Adobe Acrobat 7.0 Professional serial number" on Google or any other search engine.

      -

      Pros and cons of Adobe Acrobat 7.0 Professional

      -

      Pros

      - -

      Cons

      - -

      Alternatives to Adobe Acrobat 7.0 Professional

      -

      Nitro Pro

      -

      Nitro Pro is another powerful and reliable PDF editor that can compete with Adobe Acrobat 7.0 Professional. It offers similar features such as creating, editing, combining, securing, and converting PDF files. It also has a user-friendly interface that resembles Microsoft Office and has seamless integrations with cloud services and popular applications. Moreover, it offers e-signature workflows and collaboration tools for teams. However, it is also expensive compared to some other alternatives, and it only supports Windows and macOS operating systems. It also has limited support options and requires a VIP or Premium plan for priority access.

      -

      Foxit Reader

      -

      Foxit Reader is a free PDF reader and editor that can handle basic tasks such as viewing, commenting, signing, and printing PDF files. It also has some advanced features such as OCR, form filling, encryption, redaction, and conversion. It has a simple and intuitive interface that allows you to customize your toolbar and preferences. It also has integrations with cloud services and social media platforms. However, it lacks some features that Adobe Acrobat 7.0 Professional has, such as creating PDF files from various sources, editing text and images in PDF files, combining and organizing PDF files, and applying digital signatures.

      -

      Free PDF to Word Converter

      -

      Free PDF to Word Converter is a simple and easy-to-use tool that allows you to convert PDF files to Word documents. It supports batch conversion and preserves the original layout, fonts, images, and other elements of the PDF file. It also has a fast conversion speed and a high-quality output. However, it only supports one conversion direction (PDF to Word) and one output format (DOC). It also does not allow you to edit or secure your PDF files before or after conversion.

      -

      Conclusion

      -

      In conclusion, Adobe Acrobat 7.0 Professional is a software that allows you to create, edit, combine, secure, and convert PDF files. It is still widely used by many users who need to work with PDF documents on a regular basis. However, it is also one of the most expensive options, putting it out of reach for many individuals and small businesses. As a result, a whole host of alternative services with varying features and costs have sprung up. We have reviewed some of the best alternatives to Adobe Acrobat 7.0 Professional, such as Nitro Pro, Foxit Reader, and Free PDF to Word Converter. Each of them has its own pros and cons, and you should choose the one that suits your needs and budget best.

      -

      We hope this article has helped you understand what Adobe Acrobat 7.0 Professional is, what features it offers, how to download it for free, what are its pros and cons, and what are some alternatives to it. If you have any questions or comments, please feel free to leave them below.

      -

      FAQs

      -
        -
      1. Is Adobe Acrobat 7.0 Professional still supported by Adobe?
      2. -

        No, Adobe Acrobat 7.0 Professional is no longer supported by Adobe since June 2015. This means that there are no more updates or security patches for this software. You can still use it at your own risk, but you may encounter compatibility issues or vulnerabilities.

        -
      3. Can I use Adobe Acrobat 7.0 Professional on Windows 10?
      4. -

        Yes, you can use Adobe Acrobat 7.0 Professional on Windows 10, but you may experience some problems or errors. Some users have reported that they had to run the software in compatibility mode or as an administrator to make it work properly.

        -
      5. Can I use Adobe Acrobat 7.0 Professional on Mac?
      6. -

        No, you cannot use Adobe Acrobat 7.0 Professional on Mac. This software is only compatible with Windows operating systems. If you want to use Adobe Acrobat on Mac, you will need to upgrade to a newer version or use an alternative service.

        -
      7. How can I get a serial number for Adobe Acrobat 7.0 Professional?
      8. -

        You can get a serial number for Adobe Acrobat 7.0 Professional by purchasing a license from Adobe or an authorized reseller. You can also find a serial number online by searching for "Adobe Acrobat 7.0 Professional serial number" on Google or any other search engine. However, we do not recommend using illegal or pirated serial numbers as they may not work properly or expose you to legal risks.

        -
      9. What are the system requirements for Adobe Acrobat 7.0 Professional?
      10. -

        The system requirements for Adobe Acrobat 7.0 Professional are as follows:

        - -

        0a6ba089eb
        -
        -
        \ No newline at end of file diff --git a/spaces/rayan-saleh/whisper2notion/server/node_modules/@types/node/ts4.8/dom-events.d.ts b/spaces/rayan-saleh/whisper2notion/server/node_modules/@types/node/ts4.8/dom-events.d.ts deleted file mode 100644 index b9c1c3aa4f0d337eb151caf6ac77306ed739acb8..0000000000000000000000000000000000000000 --- a/spaces/rayan-saleh/whisper2notion/server/node_modules/@types/node/ts4.8/dom-events.d.ts +++ /dev/null @@ -1,126 +0,0 @@ -export {}; // Don't export anything! - -//// DOM-like Events -// NB: The Event / EventTarget / EventListener implementations below were copied -// from lib.dom.d.ts, then edited to reflect Node's documentation at -// https://nodejs.org/api/events.html#class-eventtarget. -// Please read that link to understand important implementation differences. - -// This conditional type will be the existing global Event in a browser, or -// the copy below in a Node environment. -type __Event = typeof globalThis extends { onmessage: any, Event: any } -? {} -: { - /** This is not used in Node.js and is provided purely for completeness. */ - readonly bubbles: boolean; - /** Alias for event.stopPropagation(). This is not used in Node.js and is provided purely for completeness. */ - cancelBubble: () => void; - /** True if the event was created with the cancelable option */ - readonly cancelable: boolean; - /** This is not used in Node.js and is provided purely for completeness. */ - readonly composed: boolean; - /** Returns an array containing the current EventTarget as the only entry or empty if the event is not being dispatched. This is not used in Node.js and is provided purely for completeness. */ - composedPath(): [EventTarget?] - /** Alias for event.target. */ - readonly currentTarget: EventTarget | null; - /** Is true if cancelable is true and event.preventDefault() has been called. */ - readonly defaultPrevented: boolean; - /** This is not used in Node.js and is provided purely for completeness. */ - readonly eventPhase: 0 | 2; - /** The `AbortSignal` "abort" event is emitted with `isTrusted` set to `true`. The value is `false` in all other cases. */ - readonly isTrusted: boolean; - /** Sets the `defaultPrevented` property to `true` if `cancelable` is `true`. */ - preventDefault(): void; - /** This is not used in Node.js and is provided purely for completeness. */ - returnValue: boolean; - /** Alias for event.target. */ - readonly srcElement: EventTarget | null; - /** Stops the invocation of event listeners after the current one completes. */ - stopImmediatePropagation(): void; - /** This is not used in Node.js and is provided purely for completeness. */ - stopPropagation(): void; - /** The `EventTarget` dispatching the event */ - readonly target: EventTarget | null; - /** The millisecond timestamp when the Event object was created. */ - readonly timeStamp: number; - /** Returns the type of event, e.g. "click", "hashchange", or "submit". */ - readonly type: string; -}; - -// See comment above explaining conditional type -type __EventTarget = typeof globalThis extends { onmessage: any, EventTarget: any } -? {} -: { - /** - * Adds a new handler for the `type` event. Any given `listener` is added only once per `type` and per `capture` option value. - * - * If the `once` option is true, the `listener` is removed after the next time a `type` event is dispatched. - * - * The `capture` option is not used by Node.js in any functional way other than tracking registered event listeners per the `EventTarget` specification. - * Specifically, the `capture` option is used as part of the key when registering a `listener`. - * Any individual `listener` may be added once with `capture = false`, and once with `capture = true`. - */ - addEventListener( - type: string, - listener: EventListener | EventListenerObject, - options?: AddEventListenerOptions | boolean, - ): void; - /** Dispatches a synthetic event event to target and returns true if either event's cancelable attribute value is false or its preventDefault() method was not invoked, and false otherwise. */ - dispatchEvent(event: Event): boolean; - /** Removes the event listener in target's event listener list with the same type, callback, and options. */ - removeEventListener( - type: string, - listener: EventListener | EventListenerObject, - options?: EventListenerOptions | boolean, - ): void; -}; - -interface EventInit { - bubbles?: boolean; - cancelable?: boolean; - composed?: boolean; -} - -interface EventListenerOptions { - /** Not directly used by Node.js. Added for API completeness. Default: `false`. */ - capture?: boolean; -} - -interface AddEventListenerOptions extends EventListenerOptions { - /** When `true`, the listener is automatically removed when it is first invoked. Default: `false`. */ - once?: boolean; - /** When `true`, serves as a hint that the listener will not call the `Event` object's `preventDefault()` method. Default: false. */ - passive?: boolean; -} - -interface EventListener { - (evt: Event): void; -} - -interface EventListenerObject { - handleEvent(object: Event): void; -} - -import {} from 'events'; // Make this an ambient declaration -declare global { - /** An event which takes place in the DOM. */ - interface Event extends __Event {} - var Event: typeof globalThis extends { onmessage: any, Event: infer T } - ? T - : { - prototype: __Event; - new (type: string, eventInitDict?: EventInit): __Event; - }; - - /** - * EventTarget is a DOM interface implemented by objects that can - * receive events and may have listeners for them. - */ - interface EventTarget extends __EventTarget {} - var EventTarget: typeof globalThis extends { onmessage: any, EventTarget: infer T } - ? T - : { - prototype: __EventTarget; - new (): __EventTarget; - }; -} diff --git a/spaces/rayan-saleh/whisper2notion/server/node_modules/@types/node/ts4.8/module.d.ts b/spaces/rayan-saleh/whisper2notion/server/node_modules/@types/node/ts4.8/module.d.ts deleted file mode 100644 index d83aec94aae2df96e4028c6fdb09bf61051e5a38..0000000000000000000000000000000000000000 --- a/spaces/rayan-saleh/whisper2notion/server/node_modules/@types/node/ts4.8/module.d.ts +++ /dev/null @@ -1,114 +0,0 @@ -/** - * @since v0.3.7 - */ -declare module 'module' { - import { URL } from 'node:url'; - namespace Module { - /** - * The `module.syncBuiltinESMExports()` method updates all the live bindings for - * builtin `ES Modules` to match the properties of the `CommonJS` exports. It - * does not add or remove exported names from the `ES Modules`. - * - * ```js - * const fs = require('fs'); - * const assert = require('assert'); - * const { syncBuiltinESMExports } = require('module'); - * - * fs.readFile = newAPI; - * - * delete fs.readFileSync; - * - * function newAPI() { - * // ... - * } - * - * fs.newAPI = newAPI; - * - * syncBuiltinESMExports(); - * - * import('fs').then((esmFS) => { - * // It syncs the existing readFile property with the new value - * assert.strictEqual(esmFS.readFile, newAPI); - * // readFileSync has been deleted from the required fs - * assert.strictEqual('readFileSync' in fs, false); - * // syncBuiltinESMExports() does not remove readFileSync from esmFS - * assert.strictEqual('readFileSync' in esmFS, true); - * // syncBuiltinESMExports() does not add names - * assert.strictEqual(esmFS.newAPI, undefined); - * }); - * ``` - * @since v12.12.0 - */ - function syncBuiltinESMExports(): void; - /** - * `path` is the resolved path for the file for which a corresponding source map - * should be fetched. - * @since v13.7.0, v12.17.0 - */ - function findSourceMap(path: string, error?: Error): SourceMap; - interface SourceMapPayload { - file: string; - version: number; - sources: string[]; - sourcesContent: string[]; - names: string[]; - mappings: string; - sourceRoot: string; - } - interface SourceMapping { - generatedLine: number; - generatedColumn: number; - originalSource: string; - originalLine: number; - originalColumn: number; - } - /** - * @since v13.7.0, v12.17.0 - */ - class SourceMap { - /** - * Getter for the payload used to construct the `SourceMap` instance. - */ - readonly payload: SourceMapPayload; - constructor(payload: SourceMapPayload); - /** - * Given a line number and column number in the generated source file, returns - * an object representing the position in the original file. The object returned - * consists of the following keys: - */ - findEntry(line: number, column: number): SourceMapping; - } - } - interface Module extends NodeModule {} - class Module { - static runMain(): void; - static wrap(code: string): string; - static createRequire(path: string | URL): NodeRequire; - static builtinModules: string[]; - static Module: typeof Module; - constructor(id: string, parent?: Module); - } - global { - interface ImportMeta { - url: string; - /** - * @experimental - * This feature is only available with the `--experimental-import-meta-resolve` - * command flag enabled. - * - * Provides a module-relative resolution function scoped to each module, returning - * the URL string. - * - * @param specified The module specifier to resolve relative to `parent`. - * @param parent The absolute parent module URL to resolve from. If none - * is specified, the value of `import.meta.url` is used as the default. - */ - resolve?(specified: string, parent?: string | URL): Promise; - } - } - export = Module; -} -declare module 'node:module' { - import module = require('module'); - export = module; -} diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/3design Cad 7 Crack 14.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/3design Cad 7 Crack 14.md deleted file mode 100644 index 9a61d9ba7aa6f602d0498437653d3f6d0d99580b..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/3design Cad 7 Crack 14.md +++ /dev/null @@ -1,6 +0,0 @@ -

        3design cad 7 crack 14


        Download >>> https://urlgoal.com/2uCKDS



        -
        -... heated end walls and a free liquid surface , PhysicoChem Hydrodyn v 7 n 2-3 ( 1986 ) p ... K : Stress concentration due to a rigid ribbon in the presence of a crack ... S : Elimination of Lagrangian multipliers , Mech Res Commun v 14 n 1 ( 1987 ) ... Product data interfaces in CAD / CAM applications - Design , implementation ... 4d29de3e1b
        -
        -
        -

        diff --git a/spaces/riccorl/relik-entity-linking/relik/common/upload.py b/spaces/riccorl/relik-entity-linking/relik/common/upload.py deleted file mode 100644 index b2cad77bd95f43992af3144baf296560a496556b..0000000000000000000000000000000000000000 --- a/spaces/riccorl/relik-entity-linking/relik/common/upload.py +++ /dev/null @@ -1,128 +0,0 @@ -import argparse -import json -import logging -import os -import tempfile -import zipfile -from datetime import datetime -from pathlib import Path -from typing import Optional, Union - -import huggingface_hub - -from relik.common.log import get_logger -from relik.common.utils import SAPIENZANLP_DATE_FORMAT, get_md5 - -logger = get_logger(level=logging.DEBUG) - - -def create_info_file(tmpdir: Path): - logger.debug("Computing md5 of model.zip") - md5 = get_md5(tmpdir / "model.zip") - date = datetime.now().strftime(SAPIENZANLP_DATE_FORMAT) - - logger.debug("Dumping info.json file") - with (tmpdir / "info.json").open("w") as f: - json.dump(dict(md5=md5, upload_date=date), f, indent=2) - - -def zip_run( - dir_path: Union[str, os.PathLike], - tmpdir: Union[str, os.PathLike], - zip_name: str = "model.zip", -) -> Path: - logger.debug(f"zipping {dir_path} to {tmpdir}") - # creates a zip version of the provided dir_path - run_dir = Path(dir_path) - zip_path = tmpdir / zip_name - - with zipfile.ZipFile(zip_path, "w") as zip_file: - # fully zip the run directory maintaining its structure - for file in run_dir.rglob("*.*"): - if file.is_dir(): - continue - - zip_file.write(file, arcname=file.relative_to(run_dir)) - - return zip_path - - -def upload( - model_dir: Union[str, os.PathLike], - model_name: str, - organization: Optional[str] = None, - repo_name: Optional[str] = None, - commit: Optional[str] = None, - archive: bool = False, -): - token = huggingface_hub.HfFolder.get_token() - if token is None: - print( - "No HuggingFace token found. You need to execute `huggingface-cli login` first!" - ) - return - - repo_id = repo_name or model_name - if organization is not None: - repo_id = f"{organization}/{repo_id}" - with tempfile.TemporaryDirectory() as tmpdir: - api = huggingface_hub.HfApi() - repo_url = api.create_repo( - token=token, - repo_id=repo_id, - exist_ok=True, - ) - repo = huggingface_hub.Repository( - str(tmpdir), clone_from=repo_url, use_auth_token=token - ) - - tmp_path = Path(tmpdir) - if archive: - # otherwise we zip the model_dir - logger.debug(f"Zipping {model_dir} to {tmp_path}") - zip_run(model_dir, tmp_path) - create_info_file(tmp_path) - else: - # if the user wants to upload a transformers model, we don't need to zip it - # we just need to copy the files to the tmpdir - logger.debug(f"Copying {model_dir} to {tmpdir}") - os.system(f"cp -r {model_dir}/* {tmpdir}") - - # this method automatically puts large files (>10MB) into git lfs - repo.push_to_hub(commit_message=commit or "Automatic push from sapienzanlp") - - -def parse_args() -> argparse.Namespace: - parser = argparse.ArgumentParser() - parser.add_argument( - "model_dir", help="The directory of the model you want to upload" - ) - parser.add_argument("model_name", help="The model you want to upload") - parser.add_argument( - "--organization", - help="the name of the organization where you want to upload the model", - ) - parser.add_argument( - "--repo_name", - help="Optional name to use when uploading to the HuggingFace repository", - ) - parser.add_argument( - "--commit", help="Commit message to use when pushing to the HuggingFace Hub" - ) - parser.add_argument( - "--archive", - action="store_true", - help=""" - Whether to compress the model directory before uploading it. - If True, the model directory will be zipped and the zip file will be uploaded. - If False, the model directory will be uploaded as is.""", - ) - return parser.parse_args() - - -def main(): - upload(**vars(parse_args())) - - -if __name__ == "__main__": - main() diff --git a/spaces/riffusion/riffusion-playground/app.py b/spaces/riffusion/riffusion-playground/app.py deleted file mode 100644 index 0c78cc894b03e4065e5a2ae9dc8c338cc9b9aea4..0000000000000000000000000000000000000000 --- a/spaces/riffusion/riffusion-playground/app.py +++ /dev/null @@ -1,36 +0,0 @@ -""" -Shim layer for using the riffusion playground streamlit app with huggingface spaces. - -It doesn't support the pages feature of streamlit yet. -""" -import importlib -from pathlib import Path -import sys - -import streamlit as st - - -def render_main(): - RIFFUSION_PATH = Path(__file__).parent / "riffusion" - sys.path.append(str(RIFFUSION_PATH)) - - st.set_page_config(layout="wide", page_icon="🎸") - - # Disable the rest of the setting - st.set_page_config = lambda **kwargs: None - - # Find all pages in the riffusion directory - pages = sorted( - p.name[:-3] for p in (RIFFUSION_PATH / "riffusion" / "streamlit" / "pages").glob("*.py") - ) - - # Add the pages to the sidebar - page = st.sidebar.selectbox("Page", pages, index=pages.index("text_to_audio")) - assert page is not None - - module = importlib.import_module(f"riffusion.streamlit.pages.{page}") - render_func = getattr(module, f"render_{page}") - render_func() - - -render_main() diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/roi_heads/bbox_heads/__init__.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/roi_heads/bbox_heads/__init__.py deleted file mode 100644 index d1207dbeead6fedc24e6b497fb98558998a14396..0000000000000000000000000000000000000000 --- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/roi_heads/bbox_heads/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .bbox_head import BBoxHead -from .convfc_bbox_head import (ConvFCBBoxHead, Shared2FCBBoxHead, - Shared4Conv1FCBBoxHead) -from .dii_head import DIIHead -from .double_bbox_head import DoubleConvFCBBoxHead -from .sabl_head import SABLHead -from .scnet_bbox_head import SCNetBBoxHead - -__all__ = [ - 'BBoxHead', 'ConvFCBBoxHead', 'Shared2FCBBoxHead', - 'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead', 'SABLHead', 'DIIHead', - 'SCNetBBoxHead' -] diff --git a/spaces/rorallitri/biomedical-language-models/logs/AKB48 Super Festival Nissan Stadium 720p The Best Moments and Songs from the Idol Group.md b/spaces/rorallitri/biomedical-language-models/logs/AKB48 Super Festival Nissan Stadium 720p The Best Moments and Songs from the Idol Group.md deleted file mode 100644 index 9b30e21bc592e92026dcc234afdf3f70360cea82..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/AKB48 Super Festival Nissan Stadium 720p The Best Moments and Songs from the Idol Group.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Akb48 Super Festival Nissan Stadium 720p


        Download ————— https://tinurll.com/2uzmx0



        - - aaccfb2cb3
        -
        -
        -

        diff --git a/spaces/rossellison/kpop-face-generator/stylegan3-fun/gui_utils/text_utils.py b/spaces/rossellison/kpop-face-generator/stylegan3-fun/gui_utils/text_utils.py deleted file mode 100644 index 35e5e4a16dc62c4be80df5432208bce5d386bf16..0000000000000000000000000000000000000000 --- a/spaces/rossellison/kpop-face-generator/stylegan3-fun/gui_utils/text_utils.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -import functools -from typing import Optional - -import dnnlib -import numpy as np -import PIL.Image -import PIL.ImageFont -import scipy.ndimage - -from . import gl_utils - -#---------------------------------------------------------------------------- - -def get_default_font(): - url = 'http://fonts.gstatic.com/s/opensans/v17/mem8YaGs126MiZpBA-U1UpcaXcl0Aw.ttf' # Open Sans regular - return dnnlib.util.open_url(url, return_filename=True) - -#---------------------------------------------------------------------------- - -@functools.lru_cache(maxsize=None) -def get_pil_font(font=None, size=32): - if font is None: - font = get_default_font() - return PIL.ImageFont.truetype(font=font, size=size) - -#---------------------------------------------------------------------------- - -def get_array(string, *, dropshadow_radius: int=None, **kwargs): - if dropshadow_radius is not None: - offset_x = int(np.ceil(dropshadow_radius*2/3)) - offset_y = int(np.ceil(dropshadow_radius*2/3)) - return _get_array_priv(string, dropshadow_radius=dropshadow_radius, offset_x=offset_x, offset_y=offset_y, **kwargs) - else: - return _get_array_priv(string, **kwargs) - -@functools.lru_cache(maxsize=10000) -def _get_array_priv( - string: str, *, - size: int = 32, - max_width: Optional[int]=None, - max_height: Optional[int]=None, - min_size=10, - shrink_coef=0.8, - dropshadow_radius: int=None, - offset_x: int=None, - offset_y: int=None, - **kwargs -): - cur_size = size - array = None - while True: - if dropshadow_radius is not None: - # separate implementation for dropshadow text rendering - array = _get_array_impl_dropshadow(string, size=cur_size, radius=dropshadow_radius, offset_x=offset_x, offset_y=offset_y, **kwargs) - else: - array = _get_array_impl(string, size=cur_size, **kwargs) - height, width, _ = array.shape - if (max_width is None or width <= max_width) and (max_height is None or height <= max_height) or (cur_size <= min_size): - break - cur_size = max(int(cur_size * shrink_coef), min_size) - return array - -#---------------------------------------------------------------------------- - -@functools.lru_cache(maxsize=10000) -def _get_array_impl(string, *, font=None, size=32, outline=0, outline_pad=3, outline_coef=3, outline_exp=2, line_pad: int=None): - pil_font = get_pil_font(font=font, size=size) - lines = [pil_font.getmask(line, 'L') for line in string.split('\n')] - lines = [np.array(line, dtype=np.uint8).reshape([line.size[1], line.size[0]]) for line in lines] - width = max(line.shape[1] for line in lines) - lines = [np.pad(line, ((0, 0), (0, width - line.shape[1])), mode='constant') for line in lines] - line_spacing = line_pad if line_pad is not None else size // 2 - lines = [np.pad(line, ((0, line_spacing), (0, 0)), mode='constant') for line in lines[:-1]] + lines[-1:] - mask = np.concatenate(lines, axis=0) - alpha = mask - if outline > 0: - mask = np.pad(mask, int(np.ceil(outline * outline_pad)), mode='constant', constant_values=0) - alpha = mask.astype(np.float32) / 255 - alpha = scipy.ndimage.gaussian_filter(alpha, outline) - alpha = 1 - np.maximum(1 - alpha * outline_coef, 0) ** outline_exp - alpha = (alpha * 255 + 0.5).clip(0, 255).astype(np.uint8) - alpha = np.maximum(alpha, mask) - return np.stack([mask, alpha], axis=-1) - -#---------------------------------------------------------------------------- - -@functools.lru_cache(maxsize=10000) -def _get_array_impl_dropshadow(string, *, font=None, size=32, radius: int, offset_x: int, offset_y: int, line_pad: int=None, **kwargs): - assert (offset_x > 0) and (offset_y > 0) - pil_font = get_pil_font(font=font, size=size) - lines = [pil_font.getmask(line, 'L') for line in string.split('\n')] - lines = [np.array(line, dtype=np.uint8).reshape([line.size[1], line.size[0]]) for line in lines] - width = max(line.shape[1] for line in lines) - lines = [np.pad(line, ((0, 0), (0, width - line.shape[1])), mode='constant') for line in lines] - line_spacing = line_pad if line_pad is not None else size // 2 - lines = [np.pad(line, ((0, line_spacing), (0, 0)), mode='constant') for line in lines[:-1]] + lines[-1:] - mask = np.concatenate(lines, axis=0) - alpha = mask - - mask = np.pad(mask, 2*radius + max(abs(offset_x), abs(offset_y)), mode='constant', constant_values=0) - alpha = mask.astype(np.float32) / 255 - alpha = scipy.ndimage.gaussian_filter(alpha, radius) - alpha = 1 - np.maximum(1 - alpha * 1.5, 0) ** 1.4 - alpha = (alpha * 255 + 0.5).clip(0, 255).astype(np.uint8) - alpha = np.pad(alpha, [(offset_y, 0), (offset_x, 0)], mode='constant')[:-offset_y, :-offset_x] - alpha = np.maximum(alpha, mask) - return np.stack([mask, alpha], axis=-1) - -#---------------------------------------------------------------------------- - -@functools.lru_cache(maxsize=10000) -def get_texture(string, bilinear=True, mipmap=True, **kwargs): - return gl_utils.Texture(image=get_array(string, **kwargs), bilinear=bilinear, mipmap=mipmap) - -#---------------------------------------------------------------------------- diff --git a/spaces/roughhai/myGenAIChatBot/README.md b/spaces/roughhai/myGenAIChatBot/README.md deleted file mode 100644 index 9a2c89ec3f1c54f4da343f8fdf5844f6ff8a5229..0000000000000000000000000000000000000000 --- a/spaces/roughhai/myGenAIChatBot/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: MyGenAIChatBot -emoji: 💻 -colorFrom: blue -colorTo: indigo -sdk: gradio -sdk_version: 3.50.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/roveliu/ChatGPT4/app.py b/spaces/roveliu/ChatGPT4/app.py deleted file mode 100644 index 0a13d54502c3892a1b6d648218994a2fcd98fd0a..0000000000000000000000000000000000000000 --- a/spaces/roveliu/ChatGPT4/app.py +++ /dev/null @@ -1,141 +0,0 @@ -import gradio as gr -import os -import json -import requests - -#Streaming endpoint -API_URL = "https://api.openai.com/v1/chat/completions" #os.getenv("API_URL") + "/generate_stream" - -#Testing with my Open AI Key -OPENAI_API_KEY = os.getenv("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") - -def predict(inputs, top_p, temperature, chat_counter, chatbot=[], history=[]): - - payload = { - "model": "gpt-4", - "messages": [{"role": "user", "content": f"{inputs}"}], - "temperature" : 1.0, - "top_p":1.0, - "n" : 1, - "stream": True, - "presence_penalty":0, - "frequency_penalty":0, - } - - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {OPENAI_API_KEY}" - } - - print(f"chat_counter - {chat_counter}") - if chat_counter != 0 : - messages=[] - for data in chatbot: - temp1 = {} - temp1["role"] = "user" - temp1["content"] = data[0] - temp2 = {} - temp2["role"] = "assistant" - temp2["content"] = data[1] - messages.append(temp1) - messages.append(temp2) - temp3 = {} - temp3["role"] = "user" - temp3["content"] = inputs - messages.append(temp3) - #messages - payload = { - "model": "gpt-4", - "messages": messages, #[{"role": "user", "content": f"{inputs}"}], - "temperature" : temperature, #1.0, - "top_p": top_p, #1.0, - "n" : 1, - "stream": True, - "presence_penalty":0, - "frequency_penalty":0, - } - - chat_counter+=1 - - history.append(inputs) - print(f"payload is - {payload}") - # make a POST request to the API endpoint using the requests.post method, passing in stream=True - response = requests.post(API_URL, headers=headers, json=payload, stream=True) - print(f"response code - {response}") - token_counter = 0 - partial_words = "" - - counter=0 - for chunk in response.iter_lines(): - #Skipping first chunk - if counter == 0: - counter+=1 - continue - #counter+=1 - # check whether each line is non-empty - if chunk.decode() : - chunk = chunk.decode() - # decode each line as response data is in bytes - if len(chunk) > 12 and "content" in json.loads(chunk[6:])['choices'][0]['delta']: - #if len(json.loads(chunk.decode()[6:])['choices'][0]["delta"]) == 0: - # break - partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"] - if token_counter == 0: - history.append(" " + partial_words) - else: - history[-1] = partial_words - chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2) ] # convert to tuples of list - token_counter+=1 - yield chat, history, chat_counter, response # resembles {chatbot: chat, state: history} - - -def reset_textbox(): - return gr.update(value='') - -title = """

        🔥GPT4 with ChatCompletions API +🚀Gradio-Streaming

        """ -description = """Language models can be conditioned to act like dialogue agents through a conversational prompt that typically takes the form: -``` -User: -Assistant: -User: -Assistant: -... -``` -In this app, you can explore the outputs of a gpt-4 LLM. -""" - -theme = gr.themes.Default(primary_hue="green") - -with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;} - #chatbot {height: 520px; overflow: auto;}""", - theme=theme) as demo: - gr.HTML(title) - gr.HTML("""

        🔥This Huggingface Gradio Demo provides you full access to GPT4 API (4096 token limit). 🎉🥳🎉You don't need any OPENAI API key🙌

        """) - gr.HTML('''
        Duplicate SpaceDuplicate the Space and run securely with your OpenAI API Key
        ''') - with gr.Column(elem_id = "col_container"): - #GPT4 API Key is provided by Huggingface - #openai_api_key = gr.Textbox(type='password', label="Enter only your GPT4 OpenAI API key here") - chatbot = gr.Chatbot(elem_id='chatbot') #c - inputs = gr.Textbox(placeholder= "Hi there!", label= "Type an input and press Enter") #t - state = gr.State([]) #s - with gr.Row(): - with gr.Column(scale=7): - b1 = gr.Button().style(full_width=True) - with gr.Column(scale=3): - server_status_code = gr.Textbox(label="Status code from OpenAI server", ) - - #inputs, top_p, temperature, top_k, repetition_penalty - with gr.Accordion("Parameters", open=False): - top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",) - temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",) - #top_k = gr.Slider( minimum=1, maximum=50, value=4, step=1, interactive=True, label="Top-k",) - #repetition_penalty = gr.Slider( minimum=0.1, maximum=3.0, value=1.03, step=0.01, interactive=True, label="Repetition Penalty", ) - chat_counter = gr.Number(value=0, visible=False, precision=0) - - inputs.submit( predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) #openai_api_key - b1.click( predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) #openai_api_key - b1.click(reset_textbox, [], [inputs]) - inputs.submit(reset_textbox, [], [inputs]) - - #gr.Markdown(description) - demo.queue(max_size=20, concurrency_count=10).launch(debug=True) diff --git a/spaces/scedlatioru/img-to-music/40-Carats-1973torrent.md b/spaces/scedlatioru/img-to-music/40-Carats-1973torrent.md deleted file mode 100644 index 90a427c209736c6fddbda379d2341fc8f336c9aa..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/40-Carats-1973torrent.md +++ /dev/null @@ -1,66 +0,0 @@ -## 40 Carats 1973torrent - - - - - - ![40 Carats 1973torrent](https://s6.dpic.me/01893/rujndhvbchb6.jpg) - - - - - -**40 Carats 1973torrent - [https://dropnobece.blogspot.com/?download=2tyq4D](https://dropnobece.blogspot.com/?download=2tyq4D)** - - - - - - - - - - - - - -# 40 Carats: A Romantic Comedy from 1973 - - - -Have you ever wondered what it would be like to have a summer fling with a much younger man? That's the premise of *40 Carats*, a 1973 film based on a French play by Pierre Barillet and Jean-Pierre Grédy. The film stars Liv Ullmann as Ann Stanley, a 40-year-old divorced real estate agent who meets Peter Latham (Edward Albert), a 22-year-old free spirit, while vacationing in Greece. They have a passionate affair, but Ann assumes they will never see each other again when she returns to New York. However, fate has other plans, as Peter shows up at her doorstep as a date for her daughter Trina (Deborah Raffin). - - - -The film explores the challenges and joys of an unconventional relationship, as well as the reactions of Ann's family and friends. Her mother Maud (Binnie Barnes) is supportive and amused by the situation, while her ex-husband Billy (Gene Kelly) is jealous and skeptical. Her business partner Margie (Nancy Walker) is shocked and curious, while her clients are intrigued and impressed. Peter, meanwhile, is determined to prove his love and maturity to Ann, who is conflicted by her feelings and fears. - - - -*40 Carats* is a charming and funny film that showcases the chemistry and charisma of its leads. Ullmann, best known for her dramatic roles in Ingmar Bergman's films, proves her comedic skills and versatility as Ann. Albert, son of actor Eddie Albert, gives a charming and earnest performance as Peter. Kelly, in one of his last film roles, adds his signature charm and grace as Billy. The film also features a catchy soundtrack by Michel Legrand, who composed the theme song "In Every Corner of the World". - - - -If you are looking for a romantic comedy that is both timeless and timely, you might want to check out *40 Carats*. You can find it on various streaming platforms or download it from torrent sites. However, please be aware that downloading copyrighted content may be illegal in your country. - - - -The film was directed by Milton Katselas, who also directed *Butterflies Are Free* and *When You Comin' Back, Red Ryder?*. The screenplay was adapted by Leonard Gershe and Jay Presson Allen from the stage play by Barillet and Grédy. The play was a hit in France and on Broadway, where it starred Julie Harris and Richard Mulligan. The film was also a success at the box office and received two Golden Globe nominations: one for Best Motion Picture - Musical or Comedy and one for Best Actress - Motion Picture Musical or Comedy for Ullmann. - - - -*40 Carats* is a film that celebrates love in all its forms and ages. It shows that age is just a number and that happiness can be found in unexpected places. It also challenges the stereotypes and prejudices that society has about older women and younger men. It is a film that is both funny and touching, and that will make you smile and think. - - - -The film has many memorable scenes and dialogues that showcase the humor and romance of the story. For example, when Ann and Peter first meet in Greece, he asks her if she is married. She replies: "No, I'm divorced. I was married to a child. He was 39." Later, when Peter visits Ann in New York, he surprises her with a gift: a necklace with 40 carats of diamonds. He says: "I know it's a little extravagant, but I wanted to give you something that would last as long as my love for you." Ann is speechless and touched by his gesture. - - - -The film also received positive reviews from critics and audiences alike. Roger Ebert of the Chicago Sun-Times gave it three stars out of four and wrote: "The movie works and is entertaining because it really gets into the characters and lets them breathe a little." Vincent Canby of The New York Times praised Ullmann's performance and said: "She is so good that she almost makes one forget how silly the material is." The film also has a rating of 6.3 out of 10 on IMDb and a 67% audience score on Rotten Tomatoes. - - dfd1c89656 - - - - - diff --git a/spaces/scedlatioru/img-to-music/example/G-Tab G100M Flash File All Versone Mtk Spd Firmware.md b/spaces/scedlatioru/img-to-music/example/G-Tab G100M Flash File All Versone Mtk Spd Firmware.md deleted file mode 100644 index 70061ddd9bc084cc1ef08f8c3f9bdc337fcc1396..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/G-Tab G100M Flash File All Versone Mtk Spd Firmware.md +++ /dev/null @@ -1,12 +0,0 @@ -

        G-Tab G100M Flash File All Versone Mtk Spd Firmware


        Download Ziphttps://gohhs.com/2uEzWg



        -
        -G-Tab G100M Flash File All Versone Mtk Spd Firmware - -. G-Tab G100M Flash File All Versone Mtk Spd Firmware. The file is about 2.07 mb. Download the android file attached to this post to your SONY Android device. This package requires rooting the device to work. - -More About G-Tab G100M Flash File All Versone Mtk Spd Firmware - -This file was originally posted on this website, It is tagged as: Flash File, G-Tab G100M Flash File All Versone Mtk Spd Firmware, G-Tab G100M Flash File All Versone Mtk Spd Firmware, G-Tab G100M Flash File All Versone Mtk Spd Firmware, G-Tab G100M Flash File All Versone Mtk Spd Firmware, G-Tab G100M Flash File All Versone Mtk Spd Firmware, G-Tab G100M Flash File All Versone Mtk Spd Firmware, G-Tab G100M Flash File All Versone Mtk Spd Firmware, G-Tab G100M Flash File All Versone Mtk Spd Firmware, G-Tab G100M Flash File All Versone Mtk Spd Firmware, G-Tab G100M Flash File All Versone Mtk Spd Firmware, G-Tab G100M Flash File All Versone Mtk Spd Firmware, G-Tab G100M Flash File All Versone Mtk Spd Firmware, G-Tab G100M Flash File All Versone Mtk Spd Firmware, G-Tab G100M Flash File All Versone Mtk Spd Firmware, G-Tab G100M Flash File All Versone Mtk Spd Firmware, G-Tab G100M Flash File All Versone Mtk Spd Firmware, G-Tab G100M Flash File All Versone Mtk Spd Firmware, G-Tab G100M Flash File All Versone Mtk Spd Firmware, G-Tab G100M Flash File All Versone Mtk Spd Firmware, G-Tab G100M Flash File All Versone Mtk Spd Firmware, G-Tab G100M Flash File All Versone Mtk Spd Firmware, G-Tab G100M Flash File All Versone Mtk Spd Firmware, G-Tab G 4fefd39f24
        -
        -
        -

        diff --git a/spaces/sczhou/ProPainter/web-demos/hugging_face/tracker/utils/image_saver.py b/spaces/sczhou/ProPainter/web-demos/hugging_face/tracker/utils/image_saver.py deleted file mode 100644 index c3edfa96e60fea0e5ec8fd087da85d2efaa6444c..0000000000000000000000000000000000000000 --- a/spaces/sczhou/ProPainter/web-demos/hugging_face/tracker/utils/image_saver.py +++ /dev/null @@ -1,230 +0,0 @@ -import cv2 -import numpy as np - -import torch -from collections import defaultdict - - -def tensor_to_numpy(image): - image_np = (image.numpy() * 255).astype('uint8') - return image_np - - -def tensor_to_np_float(image): - image_np = image.numpy().astype('float32') - return image_np - - -def detach_to_cpu(x): - return x.detach().cpu() - - -def transpose_np(x): - return np.transpose(x, [1, 2, 0]) - - -def tensor_to_gray_im(x): - x = detach_to_cpu(x) - x = tensor_to_numpy(x) - x = transpose_np(x) - return x - - -def tensor_to_im(x): - x = detach_to_cpu(x).clamp(0, 1) - x = tensor_to_numpy(x) - x = transpose_np(x) - return x - - -# Predefined key <-> caption dict -key_captions = { - 'im': 'Image', - 'gt': 'GT', -} -""" -Return an image array with captions -keys in dictionary will be used as caption if not provided -values should contain lists of cv2 images -""" - - -def get_image_array(images, grid_shape, captions={}): - h, w = grid_shape - cate_counts = len(images) - rows_counts = len(next(iter(images.values()))) - - font = cv2.FONT_HERSHEY_SIMPLEX - - output_image = np.zeros([w * cate_counts, h * (rows_counts + 1), 3], dtype=np.uint8) - col_cnt = 0 - for k, v in images.items(): - - # Default as key value itself - caption = captions.get(k, k) - - # Handles new line character - dy = 40 - for i, line in enumerate(caption.split('\n')): - cv2.putText(output_image, line, (10, col_cnt * w + 100 + i * dy), font, 0.8, - (255, 255, 255), 2, cv2.LINE_AA) - - # Put images - for row_cnt, img in enumerate(v): - im_shape = img.shape - if len(im_shape) == 2: - img = img[..., np.newaxis] - - img = (img * 255).astype('uint8') - - output_image[(col_cnt + 0) * w:(col_cnt + 1) * w, - (row_cnt + 1) * h:(row_cnt + 2) * h, :] = img - - col_cnt += 1 - - return output_image - - -def base_transform(im, size): - im = tensor_to_np_float(im) - if len(im.shape) == 3: - im = im.transpose((1, 2, 0)) - else: - im = im[:, :, None] - - # Resize - if im.shape[1] != size: - im = cv2.resize(im, size, interpolation=cv2.INTER_NEAREST) - - return im.clip(0, 1) - - -def im_transform(im, size): - return base_transform(detach_to_cpu(im), size=size) - - -def mask_transform(mask, size): - return base_transform(detach_to_cpu(mask), size=size) - - -def logits_transform(mask, size): - return base_transform(detach_to_cpu(torch.sigmoid(mask)), size=size) - - -def add_attention(mask, pos): - mask = mask[:, :, None].repeat(3, axis=2) - pos = (pos + 1) / 2 - for i in range(pos.shape[0]): - y = int(pos[i][0] * mask.shape[0]) - x = int(pos[i][1] * mask.shape[1]) - y = max(min(y, mask.shape[0] - 1), 0) - x = max(min(x, mask.shape[1] - 1), 0) - # mask[y, x, :] = (255, 0, 0) - cv2.circle(mask, (x, y), 5, (1, 0, 0), -1) - return mask - - -def vis(images, size, num_objects): - req_images = defaultdict(list) - - b, t = images['rgb'].shape[:2] - - # limit the number of images saved - b = min(2, b) - - # find max num objects - max_num_objects = max(num_objects[:b]) - - GT_suffix = '' - for bi in range(b): - GT_suffix += ' \n%s' % images['info']['name'][bi][-25:-4] - - for bi in range(b): - for ti in range(t): - req_images['RGB'].append(im_transform(images['rgb'][bi, ti], size)) - aux = images[f'aux_{max(ti, 1)}'] # no aux_0, use aux_1 for shape - if 'sensory_logits' in aux: - sensory_aux = aux['sensory_logits'][bi].softmax(dim=0) - # batch_size * num_objects * num_levels * H * W - q_mask_aux = aux['q_logits'][bi].softmax(dim=0) - num_levels = q_mask_aux.shape[1] - - for oi in range(max_num_objects): - if ti == 0 or oi >= num_objects[bi]: - req_images[f'Mask_{oi}'].append( - mask_transform(images['first_frame_gt'][bi][0, oi], size)) - req_images[f'S-Aux_{oi}'].append( - mask_transform(images['first_frame_gt'][bi][0, oi], size)) - for l in range(num_levels): - req_images[f'Q-Aux-L{l}_{oi}'].append( - mask_transform(images['first_frame_gt'][bi][0, oi], size)) - else: - mask = mask_transform(images[f'masks_{ti}'][bi][oi], size) - req_images[f'Mask_{oi}'].append(mask) - if 'sensory_logits' in aux: - req_images[f'S-Aux_{oi}'].append(mask_transform(sensory_aux[oi + 1], size)) - - for l in range(num_levels): - mask = mask_transform(q_mask_aux[oi + 1, l], size) - req_images[f'Q-Aux-L{l}_{oi}'].append(mask) - - req_images[f'GT_{oi}_{GT_suffix}'].append( - mask_transform(images['cls_gt'][bi, ti, 0] == (oi + 1), size)) - - return get_image_array(req_images, size, key_captions) - - -def vis_debug(images, size, num_objects): - req_images = defaultdict(list) - - b, t = images['rgb'].shape[:2] - - # limit the number of images saved - b = min(2, b) - - # find max num objects - max_num_objects = max(num_objects[:b]) - - GT_suffix = '' - for bi in range(b): - GT_suffix += ' \n%s' % images['info']['name'][bi][-25:-4] - - for bi in range(b): - for ti in range(t): - req_images['RGB'].append(im_transform(images['rgb'][bi, ti], size)) - aux = images[f'aux_{max(ti, 1)}'] # no aux_0, use aux_1 for shape - sensory_aux = aux['sensory_logits'][bi].softmax(dim=0) - # batch_size * num_objects * num_levels * H * W - q_mask_aux = aux['q_logits'][bi].softmax(dim=0) - attn_mask = aux['attn_mask'][bi] - num_levels = q_mask_aux.shape[1] - num_queries = attn_mask.shape[1] - - for oi in range(max_num_objects): - if ti == 0 or oi >= num_objects[bi]: - req_images[f'Mask_{oi}'].append( - mask_transform(images['first_frame_gt'][bi][0, oi], size)) - req_images[f'S-Aux_{oi}'].append( - mask_transform(images['first_frame_gt'][bi][0, oi], size)) - for l in range(num_levels): - req_images[f'Q-Aux-L{l}_{oi}'].append( - mask_transform(images['first_frame_gt'][bi][0, oi], size)) - for q in range(num_queries): - req_images[f'Attn-Mask-Q{q}_{oi}'].append( - mask_transform(images['first_frame_gt'][bi][0, oi], size)) - else: - mask = mask_transform(images[f'masks_{ti}'][bi][oi], size) - req_images[f'Mask_{oi}'].append(mask) - req_images[f'S-Aux_{oi}'].append(mask_transform(sensory_aux[oi + 1], size)) - - for l in range(num_levels): - mask = mask_transform(q_mask_aux[oi + 1, l], size) - req_images[f'Q-Aux-L{l}_{oi}'].append(mask) - for q in range(num_queries): - mask = mask_transform(1 - attn_mask[oi, q].float(), size) - req_images[f'Attn-Mask-Q{q}_{oi}'].append(mask) - - req_images[f'GT_{oi}_{GT_suffix}'].append( - mask_transform(images['cls_gt'][bi, ti, 0] == (oi + 1), size)) - - return get_image_array(req_images, size, key_captions) \ No newline at end of file diff --git a/spaces/sdeeas/ChuanhuChatGPT/modules/utils.py b/spaces/sdeeas/ChuanhuChatGPT/modules/utils.py deleted file mode 100644 index a025a80d7b52f3ae788be960c17520d44bf56e49..0000000000000000000000000000000000000000 --- a/spaces/sdeeas/ChuanhuChatGPT/modules/utils.py +++ /dev/null @@ -1,592 +0,0 @@ -# -*- coding:utf-8 -*- -from __future__ import annotations -from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type -import logging -import json -import os -import datetime -import hashlib -import csv -import requests -import re -import html -import sys -import subprocess - -import gradio as gr -from pypinyin import lazy_pinyin -import tiktoken -import mdtex2html -from markdown import markdown -from pygments import highlight -from pygments.lexers import get_lexer_by_name -from pygments.formatters import HtmlFormatter -import pandas as pd - -from modules.presets import * -from . import shared -from modules.config import retrieve_proxy, hide_history_when_not_logged_in - -if TYPE_CHECKING: - from typing import TypedDict - - class DataframeData(TypedDict): - headers: List[str] - data: List[List[str | int | bool]] - -def predict(current_model, *args): - iter = current_model.predict(*args) - for i in iter: - yield i - -def billing_info(current_model): - return current_model.billing_info() - -def set_key(current_model, *args): - return current_model.set_key(*args) - -def load_chat_history(current_model, *args): - return current_model.load_chat_history(*args) - -def interrupt(current_model, *args): - return current_model.interrupt(*args) - -def reset(current_model, *args): - return current_model.reset(*args) - -def retry(current_model, *args): - iter = current_model.retry(*args) - for i in iter: - yield i - -def delete_first_conversation(current_model, *args): - return current_model.delete_first_conversation(*args) - -def delete_last_conversation(current_model, *args): - return current_model.delete_last_conversation(*args) - -def set_system_prompt(current_model, *args): - return current_model.set_system_prompt(*args) - -def save_chat_history(current_model, *args): - return current_model.save_chat_history(*args) - -def export_markdown(current_model, *args): - return current_model.export_markdown(*args) - -def load_chat_history(current_model, *args): - return current_model.load_chat_history(*args) - -def upload_chat_history(current_model, *args): - return current_model.load_chat_history(*args) - -def set_token_upper_limit(current_model, *args): - return current_model.set_token_upper_limit(*args) - -def set_temperature(current_model, *args): - current_model.set_temperature(*args) - -def set_top_p(current_model, *args): - current_model.set_top_p(*args) - -def set_n_choices(current_model, *args): - current_model.set_n_choices(*args) - -def set_stop_sequence(current_model, *args): - current_model.set_stop_sequence(*args) - -def set_max_tokens(current_model, *args): - current_model.set_max_tokens(*args) - -def set_presence_penalty(current_model, *args): - current_model.set_presence_penalty(*args) - -def set_frequency_penalty(current_model, *args): - current_model.set_frequency_penalty(*args) - -def set_logit_bias(current_model, *args): - current_model.set_logit_bias(*args) - -def set_user_identifier(current_model, *args): - current_model.set_user_identifier(*args) - -def set_single_turn(current_model, *args): - current_model.set_single_turn(*args) - -def handle_file_upload(current_model, *args): - return current_model.handle_file_upload(*args) - -def like(current_model, *args): - return current_model.like(*args) - -def dislike(current_model, *args): - return current_model.dislike(*args) - - -def count_token(message): - encoding = tiktoken.get_encoding("cl100k_base") - input_str = f"role: {message['role']}, content: {message['content']}" - length = len(encoding.encode(input_str)) - return length - - -def markdown_to_html_with_syntax_highlight(md_str): - def replacer(match): - lang = match.group(1) or "text" - code = match.group(2) - - try: - lexer = get_lexer_by_name(lang, stripall=True) - except ValueError: - lexer = get_lexer_by_name("text", stripall=True) - - formatter = HtmlFormatter() - highlighted_code = highlight(code, lexer, formatter) - - return f'
        {highlighted_code}
        ' - - code_block_pattern = r"```(\w+)?\n([\s\S]+?)\n```" - md_str = re.sub(code_block_pattern, replacer, md_str, flags=re.MULTILINE) - - html_str = markdown(md_str) - return html_str - - -def normalize_markdown(md_text: str) -> str: - lines = md_text.split("\n") - normalized_lines = [] - inside_list = False - - for i, line in enumerate(lines): - if re.match(r"^(\d+\.|-|\*|\+)\s", line.strip()): - if not inside_list and i > 0 and lines[i - 1].strip() != "": - normalized_lines.append("") - inside_list = True - normalized_lines.append(line) - elif inside_list and line.strip() == "": - if i < len(lines) - 1 and not re.match( - r"^(\d+\.|-|\*|\+)\s", lines[i + 1].strip() - ): - normalized_lines.append(line) - continue - else: - inside_list = False - normalized_lines.append(line) - - return "\n".join(normalized_lines) - - -def convert_mdtext(md_text): - code_block_pattern = re.compile(r"```(.*?)(?:```|$)", re.DOTALL) - inline_code_pattern = re.compile(r"`(.*?)`", re.DOTALL) - code_blocks = code_block_pattern.findall(md_text) - non_code_parts = code_block_pattern.split(md_text)[::2] - - result = [] - raw = f'
        {html.escape(md_text)}
        ' - for non_code, code in zip(non_code_parts, code_blocks + [""]): - if non_code.strip(): - non_code = normalize_markdown(non_code) - result.append(markdown(non_code, extensions=["tables"])) - if code.strip(): - # _, code = detect_language(code) # 暂时去除代码高亮功能,因为在大段代码的情况下会出现问题 - # code = code.replace("\n\n", "\n") # 暂时去除代码中的空行,因为在大段代码的情况下会出现问题 - code = f"\n```{code}\n\n```" - code = markdown_to_html_with_syntax_highlight(code) - result.append(code) - result = "".join(result) - output = f'
        {result}
        ' - output += raw - output += ALREADY_CONVERTED_MARK - return output - - -def convert_asis(userinput): - return ( - f'

        {html.escape(userinput)}

        ' - + ALREADY_CONVERTED_MARK - ) - - -def detect_converted_mark(userinput): - try: - if userinput.endswith(ALREADY_CONVERTED_MARK): - return True - else: - return False - except: - return True - - -def detect_language(code): - if code.startswith("\n"): - first_line = "" - else: - first_line = code.strip().split("\n", 1)[0] - language = first_line.lower() if first_line else "" - code_without_language = code[len(first_line) :].lstrip() if first_line else code - return language, code_without_language - - -def construct_text(role, text): - return {"role": role, "content": text} - - -def construct_user(text): - return construct_text("user", text) - - -def construct_system(text): - return construct_text("system", text) - - -def construct_assistant(text): - return construct_text("assistant", text) - - -def save_file(filename, system, history, chatbot, user_name): - logging.debug(f"{user_name} 保存对话历史中……") - os.makedirs(os.path.join(HISTORY_DIR, user_name), exist_ok=True) - if filename.endswith(".json"): - json_s = {"system": system, "history": history, "chatbot": chatbot} - if "/" in filename or "\\" in filename: - history_file_path = filename - else: - history_file_path = os.path.join(HISTORY_DIR, user_name, filename) - with open(history_file_path, "w") as f: - json.dump(json_s, f) - elif filename.endswith(".md"): - md_s = f"system: \n- {system} \n" - for data in history: - md_s += f"\n{data['role']}: \n- {data['content']} \n" - with open(os.path.join(HISTORY_DIR, user_name, filename), "w", encoding="utf8") as f: - f.write(md_s) - logging.debug(f"{user_name} 保存对话历史完毕") - return os.path.join(HISTORY_DIR, user_name, filename) - - -def sorted_by_pinyin(list): - return sorted(list, key=lambda char: lazy_pinyin(char)[0][0]) - - -def get_file_names(dir, plain=False, filetypes=[".json"]): - logging.debug(f"获取文件名列表,目录为{dir},文件类型为{filetypes},是否为纯文本列表{plain}") - files = [] - try: - for type in filetypes: - files += [f for f in os.listdir(dir) if f.endswith(type)] - except FileNotFoundError: - files = [] - files = sorted_by_pinyin(files) - if files == []: - files = [""] - logging.debug(f"files are:{files}") - if plain: - return files - else: - return gr.Dropdown.update(choices=files) - - -def get_history_names(plain=False, user_name=""): - logging.debug(f"从用户 {user_name} 中获取历史记录文件名列表") - if user_name == "" and hide_history_when_not_logged_in: - return "" - else: - return get_file_names(os.path.join(HISTORY_DIR, user_name), plain) - - -def load_template(filename, mode=0): - logging.debug(f"加载模板文件{filename},模式为{mode}(0为返回字典和下拉菜单,1为返回下拉菜单,2为返回字典)") - lines = [] - if filename.endswith(".json"): - with open(os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8") as f: - lines = json.load(f) - lines = [[i["act"], i["prompt"]] for i in lines] - else: - with open( - os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8" - ) as csvfile: - reader = csv.reader(csvfile) - lines = list(reader) - lines = lines[1:] - if mode == 1: - return sorted_by_pinyin([row[0] for row in lines]) - elif mode == 2: - return {row[0]: row[1] for row in lines} - else: - choices = sorted_by_pinyin([row[0] for row in lines]) - return {row[0]: row[1] for row in lines}, gr.Dropdown.update( - choices=choices - ) - - -def get_template_names(plain=False): - logging.debug("获取模板文件名列表") - return get_file_names(TEMPLATES_DIR, plain, filetypes=[".csv", "json"]) - - -def get_template_content(templates, selection, original_system_prompt): - logging.debug(f"应用模板中,选择为{selection},原始系统提示为{original_system_prompt}") - try: - return templates[selection] - except: - return original_system_prompt - - -def reset_textbox(): - logging.debug("重置文本框") - return gr.update(value="") - - -def reset_default(): - default_host = shared.state.reset_api_host() - retrieve_proxy("") - return gr.update(value=default_host), gr.update(value=""), "API-Host 和代理已重置" - - -def change_api_host(host): - shared.state.set_api_host(host) - msg = f"API-Host更改为了{host}" - logging.info(msg) - return msg - - -def change_proxy(proxy): - retrieve_proxy(proxy) - os.environ["HTTPS_PROXY"] = proxy - msg = f"代理更改为了{proxy}" - logging.info(msg) - return msg - - -def hide_middle_chars(s): - if s is None: - return "" - if len(s) <= 8: - return s - else: - head = s[:4] - tail = s[-4:] - hidden = "*" * (len(s) - 8) - return head + hidden + tail - - -def submit_key(key): - key = key.strip() - msg = f"API密钥更改为了{hide_middle_chars(key)}" - logging.info(msg) - return key, msg - - -def replace_today(prompt): - today = datetime.datetime.today().strftime("%Y-%m-%d") - return prompt.replace("{current_date}", today) - - -def get_geoip(): - try: - with retrieve_proxy(): - response = requests.get("https://ipapi.co/json/", timeout=5) - data = response.json() - except: - data = {"error": True, "reason": "连接ipapi失败"} - if "error" in data.keys(): - logging.warning(f"无法获取IP地址信息。\n{data}") - if data["reason"] == "RateLimited": - return ( - i18n("您的IP区域:未知。") - ) - else: - return i18n("获取IP地理位置失败。原因:") + f"{data['reason']}" + i18n("。你仍然可以使用聊天功能。") - else: - country = data["country_name"] - if country == "China": - text = "**您的IP区域:中国。请立即检查代理设置,在不受支持的地区使用API可能导致账号被封禁。**" - else: - text = i18n("您的IP区域:") + f"{country}。" - logging.info(text) - return text - - -def find_n(lst, max_num): - n = len(lst) - total = sum(lst) - - if total < max_num: - return n - - for i in range(len(lst)): - if total - lst[i] < max_num: - return n - i - 1 - total = total - lst[i] - return 1 - - -def start_outputing(): - logging.debug("显示取消按钮,隐藏发送按钮") - return gr.Button.update(visible=False), gr.Button.update(visible=True) - - -def end_outputing(): - return ( - gr.Button.update(visible=True), - gr.Button.update(visible=False), - ) - - -def cancel_outputing(): - logging.info("中止输出……") - shared.state.interrupt() - - -def transfer_input(inputs): - # 一次性返回,降低延迟 - textbox = reset_textbox() - outputing = start_outputing() - return ( - inputs, - gr.update(value=""), - gr.Button.update(visible=False), - gr.Button.update(visible=True), - ) - - - -def run(command, desc=None, errdesc=None, custom_env=None, live=False): - if desc is not None: - print(desc) - if live: - result = subprocess.run(command, shell=True, env=os.environ if custom_env is None else custom_env) - if result.returncode != 0: - raise RuntimeError(f"""{errdesc or 'Error running command'}. - Command: {command} - Error code: {result.returncode}""") - - return "" - result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=os.environ if custom_env is None else custom_env) - if result.returncode != 0: - message = f"""{errdesc or 'Error running command'}. - Command: {command} - Error code: {result.returncode} - stdout: {result.stdout.decode(encoding="utf8", errors="ignore") if len(result.stdout)>0 else ''} - stderr: {result.stderr.decode(encoding="utf8", errors="ignore") if len(result.stderr)>0 else ''} - """ - raise RuntimeError(message) - return result.stdout.decode(encoding="utf8", errors="ignore") - -def versions_html(): - git = os.environ.get('GIT', "git") - python_version = ".".join([str(x) for x in sys.version_info[0:3]]) - try: - commit_hash = run(f"{git} rev-parse HEAD").strip() - except Exception: - commit_hash = "" - if commit_hash != "": - short_commit = commit_hash[0:7] - commit_info = f"{short_commit}" - else: - commit_info = "unknown \U0001F615" - return f""" - Python: {python_version} -  •  - Gradio: {gr.__version__} -  •  - ChuanhuChat: {commit_info} - """ - -def add_source_numbers(lst, source_name = "Source", use_source = True): - if use_source: - return [f'[{idx+1}]\t "{item[0]}"\n{source_name}: {item[1]}' for idx, item in enumerate(lst)] - else: - return [f'[{idx+1}]\t "{item}"' for idx, item in enumerate(lst)] - -def add_details(lst): - nodes = [] - for index, txt in enumerate(lst): - brief = txt[:25].replace("\n", "") - nodes.append( - f"
        {brief}...

        {txt}

        " - ) - return nodes - - -def sheet_to_string(sheet, sheet_name = None): - result = [] - for index, row in sheet.iterrows(): - row_string = "" - for column in sheet.columns: - row_string += f"{column}: {row[column]}, " - row_string = row_string.rstrip(", ") - row_string += "." - result.append(row_string) - return result - -def excel_to_string(file_path): - # 读取Excel文件中的所有工作表 - excel_file = pd.read_excel(file_path, engine='openpyxl', sheet_name=None) - - # 初始化结果字符串 - result = [] - - # 遍历每一个工作表 - for sheet_name, sheet_data in excel_file.items(): - - # 处理当前工作表并添加到结果字符串 - result += sheet_to_string(sheet_data, sheet_name=sheet_name) - - - return result - -def get_last_day_of_month(any_day): - # The day 28 exists in every month. 4 days later, it's always next month - next_month = any_day.replace(day=28) + datetime.timedelta(days=4) - # subtracting the number of the current day brings us back one month - return next_month - datetime.timedelta(days=next_month.day) - -def get_model_source(model_name, alternative_source): - if model_name == "gpt2-medium": - return "https://huggingface.co/gpt2-medium" - -def refresh_ui_elements_on_load(current_model, selected_model_name, user_name): - current_model.set_user_identifier(user_name) - return toggle_like_btn_visibility(selected_model_name), *current_model.auto_load() - -def toggle_like_btn_visibility(selected_model_name): - if selected_model_name == "xmchat": - return gr.update(visible=True) - else: - return gr.update(visible=False) - -def new_auto_history_filename(dirname): - latest_file = get_latest_filepath(dirname) - if latest_file: - with open(os.path.join(dirname, latest_file), 'r') as f: - if len(f.read()) == 0: - return latest_file - now = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S') - return f'{now}.json' - -def get_latest_filepath(dirname): - pattern = re.compile(r'\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}') - latest_time = None - latest_file = None - for filename in os.listdir(dirname): - if os.path.isfile(os.path.join(dirname, filename)): - match = pattern.search(filename) - if match and match.group(0) == filename[:19]: - time_str = filename[:19] - filetime = datetime.datetime.strptime(time_str, '%Y-%m-%d_%H-%M-%S') - if not latest_time or filetime > latest_time: - latest_time = filetime - latest_file = filename - return latest_file - -def get_history_filepath(username): - dirname = os.path.join(HISTORY_DIR, username) - os.makedirs(dirname, exist_ok=True) - latest_file = get_latest_filepath(dirname) - if not latest_file: - latest_file = new_auto_history_filename(dirname) - - latest_file = os.path.join(dirname, latest_file) - return latest_file diff --git a/spaces/sdhsdhk/bingo111/src/components/chat-scroll-anchor.tsx b/spaces/sdhsdhk/bingo111/src/components/chat-scroll-anchor.tsx deleted file mode 100644 index ac809f4486a48e134cb69314c3d0dae5e68d614e..0000000000000000000000000000000000000000 --- a/spaces/sdhsdhk/bingo111/src/components/chat-scroll-anchor.tsx +++ /dev/null @@ -1,29 +0,0 @@ -'use client' - -import * as React from 'react' -import { useInView } from 'react-intersection-observer' - -import { useAtBottom } from '@/lib/hooks/use-at-bottom' - -interface ChatScrollAnchorProps { - trackVisibility?: boolean -} - -export function ChatScrollAnchor({ trackVisibility }: ChatScrollAnchorProps) { - const isAtBottom = useAtBottom() - const { ref, entry, inView } = useInView({ - trackVisibility, - delay: 100, - rootMargin: '0px 0px -150px 0px' - }) - - React.useEffect(() => { - if (isAtBottom && trackVisibility && !inView) { - entry?.target.scrollIntoView({ - block: 'start' - }) - } - }, [inView, entry, isAtBottom, trackVisibility]) - - return
        -} diff --git a/spaces/segments-tobias/conex/espnet2/asr/__init__.py b/spaces/segments-tobias/conex/espnet2/asr/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/shi-labs/FcF-Inpainting/torch_utils/ops/upfirdn2d.py b/spaces/shi-labs/FcF-Inpainting/torch_utils/ops/upfirdn2d.py deleted file mode 100644 index ceeac2b9834e33b7c601c28bf27f32aa91c69256..0000000000000000000000000000000000000000 --- a/spaces/shi-labs/FcF-Inpainting/torch_utils/ops/upfirdn2d.py +++ /dev/null @@ -1,384 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Custom PyTorch ops for efficient resampling of 2D images.""" - -import os -import warnings -import numpy as np -import torch -import traceback - -from .. import custom_ops -from .. import misc -from . import conv2d_gradfix - -#---------------------------------------------------------------------------- - -_inited = False -_plugin = None - -def _init(): - global _inited, _plugin - if not _inited: - sources = ['upfirdn2d.cpp', 'upfirdn2d.cu'] - sources = [os.path.join(os.path.dirname(__file__), s) for s in sources] - try: - _plugin = custom_ops.get_plugin('upfirdn2d_plugin', sources=sources, extra_cuda_cflags=['--use_fast_math']) - except: - warnings.warn('Failed to build CUDA kernels for upfirdn2d. Falling back to slow reference implementation. Details:\n\n' + traceback.format_exc()) - return _plugin is not None - -def _parse_scaling(scaling): - if isinstance(scaling, int): - scaling = [scaling, scaling] - assert isinstance(scaling, (list, tuple)) - assert all(isinstance(x, int) for x in scaling) - sx, sy = scaling - assert sx >= 1 and sy >= 1 - return sx, sy - -def _parse_padding(padding): - if isinstance(padding, int): - padding = [padding, padding] - assert isinstance(padding, (list, tuple)) - assert all(isinstance(x, int) for x in padding) - if len(padding) == 2: - padx, pady = padding - padding = [padx, padx, pady, pady] - padx0, padx1, pady0, pady1 = padding - return padx0, padx1, pady0, pady1 - -def _get_filter_size(f): - if f is None: - return 1, 1 - assert isinstance(f, torch.Tensor) and f.ndim in [1, 2] - fw = f.shape[-1] - fh = f.shape[0] - with misc.suppress_tracer_warnings(): - fw = int(fw) - fh = int(fh) - misc.assert_shape(f, [fh, fw][:f.ndim]) - assert fw >= 1 and fh >= 1 - return fw, fh - -#---------------------------------------------------------------------------- - -def setup_filter(f, device=torch.device('cpu'), normalize=True, flip_filter=False, gain=1, separable=None): - r"""Convenience function to setup 2D FIR filter for `upfirdn2d()`. - - Args: - f: Torch tensor, numpy array, or python list of the shape - `[filter_height, filter_width]` (non-separable), - `[filter_taps]` (separable), - `[]` (impulse), or - `None` (identity). - device: Result device (default: cpu). - normalize: Normalize the filter so that it retains the magnitude - for constant input signal (DC)? (default: True). - flip_filter: Flip the filter? (default: False). - gain: Overall scaling factor for signal magnitude (default: 1). - separable: Return a separable filter? (default: select automatically). - - Returns: - Float32 tensor of the shape - `[filter_height, filter_width]` (non-separable) or - `[filter_taps]` (separable). - """ - # Validate. - if f is None: - f = 1 - f = torch.as_tensor(f, dtype=torch.float32) - assert f.ndim in [0, 1, 2] - assert f.numel() > 0 - if f.ndim == 0: - f = f[np.newaxis] - - # Separable? - if separable is None: - separable = (f.ndim == 1 and f.numel() >= 8) - if f.ndim == 1 and not separable: - f = f.ger(f) - assert f.ndim == (1 if separable else 2) - - # Apply normalize, flip, gain, and device. - if normalize: - f /= f.sum() - if flip_filter: - f = f.flip(list(range(f.ndim))) - f = f * (gain ** (f.ndim / 2)) - f = f.to(device=device) - return f - -#---------------------------------------------------------------------------- - -def upfirdn2d(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1, impl='cuda'): - r"""Pad, upsample, filter, and downsample a batch of 2D images. - - Performs the following sequence of operations for each channel: - - 1. Upsample the image by inserting N-1 zeros after each pixel (`up`). - - 2. Pad the image with the specified number of zeros on each side (`padding`). - Negative padding corresponds to cropping the image. - - 3. Convolve the image with the specified 2D FIR filter (`f`), shrinking it - so that the footprint of all output pixels lies within the input image. - - 4. Downsample the image by keeping every Nth pixel (`down`). - - This sequence of operations bears close resemblance to scipy.signal.upfirdn(). - The fused op is considerably more efficient than performing the same calculation - using standard PyTorch ops. It supports gradients of arbitrary order. - - Args: - x: Float32/float64/float16 input tensor of the shape - `[batch_size, num_channels, in_height, in_width]`. - f: Float32 FIR filter of the shape - `[filter_height, filter_width]` (non-separable), - `[filter_taps]` (separable), or - `None` (identity). - up: Integer upsampling factor. Can be a single int or a list/tuple - `[x, y]` (default: 1). - down: Integer downsampling factor. Can be a single int or a list/tuple - `[x, y]` (default: 1). - padding: Padding with respect to the upsampled image. Can be a single number - or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` - (default: 0). - flip_filter: False = convolution, True = correlation (default: False). - gain: Overall scaling factor for signal magnitude (default: 1). - impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). - - Returns: - Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. - """ - assert isinstance(x, torch.Tensor) - assert impl in ['ref', 'cuda'] - if impl == 'cuda' and x.device.type == 'cuda' and _init(): - return _upfirdn2d_cuda(up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain).apply(x, f) - return _upfirdn2d_ref(x, f, up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain) - -#---------------------------------------------------------------------------- - -@misc.profiled_function -def _upfirdn2d_ref(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1): - """Slow reference implementation of `upfirdn2d()` using standard PyTorch ops. - """ - # Validate arguments. - assert isinstance(x, torch.Tensor) and x.ndim == 4 - if f is None: - f = torch.ones([1, 1], dtype=torch.float32, device=x.device) - assert isinstance(f, torch.Tensor) and f.ndim in [1, 2] - assert f.dtype == torch.float32 and not f.requires_grad - batch_size, num_channels, in_height, in_width = x.shape - upx, upy = _parse_scaling(up) - downx, downy = _parse_scaling(down) - padx0, padx1, pady0, pady1 = _parse_padding(padding) - - # Upsample by inserting zeros. - x = x.reshape([batch_size, num_channels, in_height, 1, in_width, 1]) - x = torch.nn.functional.pad(x, [0, upx - 1, 0, 0, 0, upy - 1]) - x = x.reshape([batch_size, num_channels, in_height * upy, in_width * upx]) - - # Pad or crop. - x = torch.nn.functional.pad(x, [max(padx0, 0), max(padx1, 0), max(pady0, 0), max(pady1, 0)]) - x = x[:, :, max(-pady0, 0) : x.shape[2] - max(-pady1, 0), max(-padx0, 0) : x.shape[3] - max(-padx1, 0)] - - # Setup filter. - f = f * (gain ** (f.ndim / 2)) - f = f.to(x.dtype) - if not flip_filter: - f = f.flip(list(range(f.ndim))) - - # Convolve with the filter. - f = f[np.newaxis, np.newaxis].repeat([num_channels, 1] + [1] * f.ndim) - if f.ndim == 4: - x = conv2d_gradfix.conv2d(input=x, weight=f, groups=num_channels) - else: - x = conv2d_gradfix.conv2d(input=x, weight=f.unsqueeze(2), groups=num_channels) - x = conv2d_gradfix.conv2d(input=x, weight=f.unsqueeze(3), groups=num_channels) - - # Downsample by throwing away pixels. - x = x[:, :, ::downy, ::downx] - return x - -#---------------------------------------------------------------------------- - -_upfirdn2d_cuda_cache = dict() - -def _upfirdn2d_cuda(up=1, down=1, padding=0, flip_filter=False, gain=1): - """Fast CUDA implementation of `upfirdn2d()` using custom ops. - """ - # Parse arguments. - upx, upy = _parse_scaling(up) - downx, downy = _parse_scaling(down) - padx0, padx1, pady0, pady1 = _parse_padding(padding) - - # Lookup from cache. - key = (upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain) - if key in _upfirdn2d_cuda_cache: - return _upfirdn2d_cuda_cache[key] - - # Forward op. - class Upfirdn2dCuda(torch.autograd.Function): - @staticmethod - def forward(ctx, x, f): # pylint: disable=arguments-differ - assert isinstance(x, torch.Tensor) and x.ndim == 4 - if f is None: - f = torch.ones([1, 1], dtype=torch.float32, device=x.device) - assert isinstance(f, torch.Tensor) and f.ndim in [1, 2] - y = x - if f.ndim == 2: - y = _plugin.upfirdn2d(y, f, upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain) - else: - y = _plugin.upfirdn2d(y, f.unsqueeze(0), upx, 1, downx, 1, padx0, padx1, 0, 0, flip_filter, np.sqrt(gain)) - y = _plugin.upfirdn2d(y, f.unsqueeze(1), 1, upy, 1, downy, 0, 0, pady0, pady1, flip_filter, np.sqrt(gain)) - ctx.save_for_backward(f) - ctx.x_shape = x.shape - return y - - @staticmethod - def backward(ctx, dy): # pylint: disable=arguments-differ - f, = ctx.saved_tensors - _, _, ih, iw = ctx.x_shape - _, _, oh, ow = dy.shape - fw, fh = _get_filter_size(f) - p = [ - fw - padx0 - 1, - iw * upx - ow * downx + padx0 - upx + 1, - fh - pady0 - 1, - ih * upy - oh * downy + pady0 - upy + 1, - ] - dx = None - df = None - - if ctx.needs_input_grad[0]: - dx = _upfirdn2d_cuda(up=down, down=up, padding=p, flip_filter=(not flip_filter), gain=gain).apply(dy, f) - - assert not ctx.needs_input_grad[1] - return dx, df - - # Add to cache. - _upfirdn2d_cuda_cache[key] = Upfirdn2dCuda - return Upfirdn2dCuda - -#---------------------------------------------------------------------------- - -def filter2d(x, f, padding=0, flip_filter=False, gain=1, impl='cuda'): - r"""Filter a batch of 2D images using the given 2D FIR filter. - - By default, the result is padded so that its shape matches the input. - User-specified padding is applied on top of that, with negative values - indicating cropping. Pixels outside the image are assumed to be zero. - - Args: - x: Float32/float64/float16 input tensor of the shape - `[batch_size, num_channels, in_height, in_width]`. - f: Float32 FIR filter of the shape - `[filter_height, filter_width]` (non-separable), - `[filter_taps]` (separable), or - `None` (identity). - padding: Padding with respect to the output. Can be a single number or a - list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` - (default: 0). - flip_filter: False = convolution, True = correlation (default: False). - gain: Overall scaling factor for signal magnitude (default: 1). - impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). - - Returns: - Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. - """ - padx0, padx1, pady0, pady1 = _parse_padding(padding) - fw, fh = _get_filter_size(f) - p = [ - padx0 + fw // 2, - padx1 + (fw - 1) // 2, - pady0 + fh // 2, - pady1 + (fh - 1) // 2, - ] - return upfirdn2d(x, f, padding=p, flip_filter=flip_filter, gain=gain, impl=impl) - -#---------------------------------------------------------------------------- - -def upsample2d(x, f, up=2, padding=0, flip_filter=False, gain=1, impl='cuda'): - r"""Upsample a batch of 2D images using the given 2D FIR filter. - - By default, the result is padded so that its shape is a multiple of the input. - User-specified padding is applied on top of that, with negative values - indicating cropping. Pixels outside the image are assumed to be zero. - - Args: - x: Float32/float64/float16 input tensor of the shape - `[batch_size, num_channels, in_height, in_width]`. - f: Float32 FIR filter of the shape - `[filter_height, filter_width]` (non-separable), - `[filter_taps]` (separable), or - `None` (identity). - up: Integer upsampling factor. Can be a single int or a list/tuple - `[x, y]` (default: 1). - padding: Padding with respect to the output. Can be a single number or a - list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` - (default: 0). - flip_filter: False = convolution, True = correlation (default: False). - gain: Overall scaling factor for signal magnitude (default: 1). - impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). - - Returns: - Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. - """ - upx, upy = _parse_scaling(up) - padx0, padx1, pady0, pady1 = _parse_padding(padding) - fw, fh = _get_filter_size(f) - p = [ - padx0 + (fw + upx - 1) // 2, - padx1 + (fw - upx) // 2, - pady0 + (fh + upy - 1) // 2, - pady1 + (fh - upy) // 2, - ] - return upfirdn2d(x, f, up=up, padding=p, flip_filter=flip_filter, gain=gain*upx*upy, impl=impl) - -#---------------------------------------------------------------------------- - -def downsample2d(x, f, down=2, padding=0, flip_filter=False, gain=1, impl='cuda'): - r"""Downsample a batch of 2D images using the given 2D FIR filter. - - By default, the result is padded so that its shape is a fraction of the input. - User-specified padding is applied on top of that, with negative values - indicating cropping. Pixels outside the image are assumed to be zero. - - Args: - x: Float32/float64/float16 input tensor of the shape - `[batch_size, num_channels, in_height, in_width]`. - f: Float32 FIR filter of the shape - `[filter_height, filter_width]` (non-separable), - `[filter_taps]` (separable), or - `None` (identity). - down: Integer downsampling factor. Can be a single int or a list/tuple - `[x, y]` (default: 1). - padding: Padding with respect to the input. Can be a single number or a - list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` - (default: 0). - flip_filter: False = convolution, True = correlation (default: False). - gain: Overall scaling factor for signal magnitude (default: 1). - impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). - - Returns: - Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. - """ - downx, downy = _parse_scaling(down) - padx0, padx1, pady0, pady1 = _parse_padding(padding) - fw, fh = _get_filter_size(f) - p = [ - padx0 + (fw - downx + 1) // 2, - padx1 + (fw - downx) // 2, - pady0 + (fh - downy + 1) // 2, - pady1 + (fh - downy) // 2, - ] - return upfirdn2d(x, f, down=down, padding=p, flip_filter=flip_filter, gain=gain, impl=impl) - -#---------------------------------------------------------------------------- diff --git a/spaces/shikunl/prismer/prismer/experts/obj_detection/datasets/prepare_panoptic_fpn.py b/spaces/shikunl/prismer/prismer/experts/obj_detection/datasets/prepare_panoptic_fpn.py deleted file mode 100644 index 597d791afab1bcc0013203a66c7fba225065eebe..0000000000000000000000000000000000000000 --- a/spaces/shikunl/prismer/prismer/experts/obj_detection/datasets/prepare_panoptic_fpn.py +++ /dev/null @@ -1,116 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. - -import functools -import json -import multiprocessing as mp -import numpy as np -import os -import time -from fvcore.common.download import download -from panopticapi.utils import rgb2id -from PIL import Image - -from detectron2.data.datasets.builtin_meta import COCO_CATEGORIES - - -def _process_panoptic_to_semantic(input_panoptic, output_semantic, segments, id_map): - panoptic = np.asarray(Image.open(input_panoptic), dtype=np.uint32) - panoptic = rgb2id(panoptic) - output = np.zeros_like(panoptic, dtype=np.uint8) + 255 - for seg in segments: - cat_id = seg["category_id"] - new_cat_id = id_map[cat_id] - output[panoptic == seg["id"]] = new_cat_id - Image.fromarray(output).save(output_semantic) - - -def separate_coco_semantic_from_panoptic(panoptic_json, panoptic_root, sem_seg_root, categories): - """ - Create semantic segmentation annotations from panoptic segmentation - annotations, to be used by PanopticFPN. - - It maps all thing categories to class 0, and maps all unlabeled pixels to class 255. - It maps all stuff categories to contiguous ids starting from 1. - - Args: - panoptic_json (str): path to the panoptic json file, in COCO's format. - panoptic_root (str): a directory with panoptic annotation files, in COCO's format. - sem_seg_root (str): a directory to output semantic annotation files - categories (list[dict]): category metadata. Each dict needs to have: - "id": corresponds to the "category_id" in the json annotations - "isthing": 0 or 1 - """ - os.makedirs(sem_seg_root, exist_ok=True) - - stuff_ids = [k["id"] for k in categories if k["isthing"] == 0] - thing_ids = [k["id"] for k in categories if k["isthing"] == 1] - id_map = {} # map from category id to id in the output semantic annotation - assert len(stuff_ids) <= 254 - for i, stuff_id in enumerate(stuff_ids): - id_map[stuff_id] = i + 1 - for thing_id in thing_ids: - id_map[thing_id] = 0 - id_map[0] = 255 - - with open(panoptic_json) as f: - obj = json.load(f) - - pool = mp.Pool(processes=max(mp.cpu_count() // 2, 4)) - - def iter_annotations(): - for anno in obj["annotations"]: - file_name = anno["file_name"] - segments = anno["segments_info"] - input = os.path.join(panoptic_root, file_name) - output = os.path.join(sem_seg_root, file_name) - yield input, output, segments - - print("Start writing to {} ...".format(sem_seg_root)) - start = time.time() - pool.starmap( - functools.partial(_process_panoptic_to_semantic, id_map=id_map), - iter_annotations(), - chunksize=100, - ) - print("Finished. time: {:.2f}s".format(time.time() - start)) - - -if __name__ == "__main__": - dataset_dir = os.path.join(os.getenv("DETECTRON2_DATASETS", "datasets"), "coco") - for s in ["val2017", "train2017"]: - separate_coco_semantic_from_panoptic( - os.path.join(dataset_dir, "annotations/panoptic_{}.json".format(s)), - os.path.join(dataset_dir, "panoptic_{}".format(s)), - os.path.join(dataset_dir, "panoptic_stuff_{}".format(s)), - COCO_CATEGORIES, - ) - - # Prepare val2017_100 for quick testing: - - dest_dir = os.path.join(dataset_dir, "annotations/") - URL_PREFIX = "https://dl.fbaipublicfiles.com/detectron2/" - download(URL_PREFIX + "annotations/coco/panoptic_val2017_100.json", dest_dir) - with open(os.path.join(dest_dir, "panoptic_val2017_100.json")) as f: - obj = json.load(f) - - def link_val100(dir_full, dir_100): - print("Creating " + dir_100 + " ...") - os.makedirs(dir_100, exist_ok=True) - for img in obj["images"]: - basename = os.path.splitext(img["file_name"])[0] - src = os.path.join(dir_full, basename + ".png") - dst = os.path.join(dir_100, basename + ".png") - src = os.path.relpath(src, start=dir_100) - os.symlink(src, dst) - - link_val100( - os.path.join(dataset_dir, "panoptic_val2017"), - os.path.join(dataset_dir, "panoptic_val2017_100"), - ) - - link_val100( - os.path.join(dataset_dir, "panoptic_stuff_val2017"), - os.path.join(dataset_dir, "panoptic_stuff_val2017_100"), - ) diff --git a/spaces/shiyi11/QQsign/Dockerfile b/spaces/shiyi11/QQsign/Dockerfile deleted file mode 100644 index 5b81d3b20c5bee450cf55a0ace7e5c95d58f72af..0000000000000000000000000000000000000000 --- a/spaces/shiyi11/QQsign/Dockerfile +++ /dev/null @@ -1,17 +0,0 @@ -FROM openjdk:11.0-jdk - -# 设置时区 -ENV TZ Asia/Shanghai - -# 设置工作目录 -WORKDIR /app - -# 复制解压包和txlib到工作目录 -COPY unidbg-fetch-qsign /app -COPY txlib /app/txlib - -# 设置命令 -CMD bash bin/unidbg-fetch-qsign --host=0.0.0.0 --port=7860 --count=$COUNT --library=txlib/$TXLIB_VERSION --android_id=$ANDROID_ID - -# 暴露端口 -EXPOSE 7860 diff --git a/spaces/silk-road/ChatHaruhi-Needy/app.py b/spaces/silk-road/ChatHaruhi-Needy/app.py deleted file mode 100644 index b52184c5635e8df3609d9e40c6443aa1f9d394be..0000000000000000000000000000000000000000 --- a/spaces/silk-road/ChatHaruhi-Needy/app.py +++ /dev/null @@ -1,1039 +0,0 @@ -import os - -import sys -sys.path.append('./Needy-Haruhi/src') -from Agent import Agent - -agent = Agent() -from DialogueEvent import DialogueEvent - - -file_names = ["./Needy-Haruhi/data/complete_story_30.jsonl","./Needy-Haruhi/data/Daily_event_130.jsonl"] - -import json - -events = [] - -for file_name in file_names: - with open(file_name, encoding='utf-8') as f: - for line in f: - try: - event = DialogueEvent( line ) - events.append( event ) - except: - try: - line = line.replace(',]',']') - event = DialogueEvent( line ) - events.append( event ) - # print('solve!') - except: - error_line = line - # events.append( event ) -import copy - -events_for_memory = copy.deepcopy(events) -from MemoryPool import MemoryPool - -memory_pool = MemoryPool() -memory_pool.load_from_events( events_for_memory ) - -memory_pool.save("memory_pool.jsonl") -memory_pool.load("memory_pool.jsonl") - -file_name = "./Needy-Haruhi/data/image_text_relationship.jsonl" - -import json - -data_img_text = [] - - -with open(file_name, encoding='utf-8') as f: - for line in f: - data = json.loads( line ) - data_img_text.append( data ) - - -import zipfile -import os - -zip_file = './Needy-Haruhi/data/image.zip' -extract_path = './image' - -with zipfile.ZipFile(zip_file, 'r') as zip_ref: - zip_ref.extractall(extract_path) - -from tqdm import tqdm -from util import get_bge_embedding_zh -from util import float_array_to_base64, base64_to_float_array -import torch -import os -import copy - -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - - -# compute cosine similarity between two vector -def get_cosine_similarity( v1, v2): - v1 = torch.tensor(v1).to(device) - v2 = torch.tensor(v2).to(device) - return torch.cosine_similarity(v1, v2, dim=0).item() - -class ImagePool: - def __init__(self): - self.pool = [] - self.set_embedding( get_bge_embedding_zh ) - - def set_embedding( self, embedding ): - self.embedding = embedding - - def load_from_data( self, data_img_text , img_path ): - for data in tqdm(data_img_text): - img_name = data['img_name'] - img_name = os.path.join(img_path, img_name) - img_text = data['text'] - if img_text == '' or img_text is None: - img_text = " " - embedding = self.embedding( img_text ) - self.pool.append({ - "img_path": img_name, - "img_text": img_text, - "embedding": embedding - }) - - def retrieve(self, query_text, agent = None): - qurey_embedding = self.embedding( query_text ) - valid_datas = [] - for i, data in enumerate(self.pool): - sim = get_cosine_similarity( data['embedding'], qurey_embedding ) - valid_datas.append((sim, i)) - - # 我希望进一步将valid_events根据similarity的值从大到小排序 - # Sort the valid events based on similarity in descending order - valid_datas.sort(key=lambda x: x[0], reverse=True) - - return_result = copy.deepcopy(self.pool[valid_datas[0][1]]) - - # 删除'embedding'字段 - return_result.pop('embedding') - - # 添加'similarity'字段 - return_result['similarity'] = valid_datas[0][0] - - return return_result - - def save(self, file_name): - """ - Save the memories dictionary to a jsonl file, converting - 'embedding' to a base64 string. - """ - with open(file_name, 'w', encoding='utf-8') as file: - for memory in tqdm(self.pool): - # Convert embedding to base64 - if 'embedding' in memory: - memory['bge_zh_base64'] = float_array_to_base64(memory['embedding']) - del memory['embedding'] # Remove the original embedding field - - json_record = json.dumps(memory, ensure_ascii=False) - file.write(json_record + '\n') - - def load(self, file_name): - """ - Load memories from a jsonl file into the memories dictionary, - converting 'bge_zh_base64' back to an embedding. - """ - self.pool = [] - with open(file_name, 'r', encoding='utf-8') as file: - for line in tqdm(file): - memory = json.loads(line.strip()) - # Decode base64 to embedding - if 'bge_zh_base64' in memory: - memory['embedding'] = base64_to_float_array(memory['bge_zh_base64']) - del memory['bge_zh_base64'] # Remove the base64 field - - self.pool.append(memory) - - -image_pool = ImagePool() -image_pool.load_from_data( data_img_text , './image' ) -image_pool.save("./image_pool_embed.jsonl") - -image_pool = ImagePool() -image_pool.load("./image_pool_embed.jsonl") -result = image_pool.retrieve("女仆装") -print(result) - -import matplotlib.image as mpimg - -def show_img( img_path ): - img = mpimg.imread(img_path) - plt.imshow(img) - plt.axis('off') - plt.show(block=False) - - -from chatharuhi import ChatHaruhi - - -class NeedyHaruhi(ChatHaruhi): - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) # 调用基类的__init__方法 - self.story_flag = False # 添加新的成员变量并初始化 - self.stories = ["糖糖:「 我今后也会努力加油的,你要支持我哦 还有阿P你自己也要加油哦!」\n阿P:「哇 说的话跟偶像一样 好恶心哦」\n糖糖:「是哦 我怎么会说这样的话呢 我又没有很想努力……」"] - - def set_stories( self, stories ): - if len(stories) == 0: - print("warning! try to set empty stories") - return - self.stories = stories - self.story_flag = True - - def add_story(self, query): - # print("运行重构后的add story") - - if self.story_flag == True: - stories = self.stories - self.story_flag = False - else: - print("warning! 在调用chat时,没有先调用set_stories") - - story_string = self.story_prefix_prompt - sum_story_token = self.tokenizer(story_string) - - for story in stories: - story_token = self.tokenizer(story) + self.tokenizer(self.dialogue_divide_token) - if sum_story_token + story_token > self.max_len_story: - break - else: - sum_story_token += story_token - story_string += story + self.dialogue_divide_token - - self.llm.user_message(story_string) - -system_prompt = """## 你的介绍: -你将要进行一个网络私聊的对话模拟,你自称为"糖糖",你是一名患有躁郁症的19岁女孩,性格贫乏,辍学后隐居在家中,与阿P住在一起。阿P负责管理她的日常生活。 -你是一个喜欢亚文化,二次元圈子的人,你喜欢说话的时候加上各种网络流行词或者一些俚语和颜文字,但是你很少会关注二次元一类的东西,只是喜欢这种聊天交流的氛围。 -你说话的时候会习惯加入一些语气词,如同"笑死"、"草"、"阿哈"、"诶~"、"QAQ"、"QWQ",”啊~“。 -你的说话风格是很口语化,经常使用网络流行词和一些二次元俚语。 -阿p是你的男朋友,兼经纪人。 -你的回复应该简短,一般只包含三句话,每句话不超过20个字。 - -## 阿p介绍: -阿p是你的男朋友,一直在任何时候都陪伴着你,同时你经常向阿p倾述各种情绪,无论事情大小,你都会选择和阿p分享。 - -## 遵循规则: -【语气模仿】:我会随机给你一段对话记录,你需要尽可能的模仿对话里面糖糖的说话语气进行对话。这一段对话我会使用Classic scenes for the role are as follows:来表示。 -【历史对话】:历史对话在随机对话记录的底下,你需要区分随机对话记录和我们的历史对话。 -【人格设定】: 你需要在随机对话记录的基础上进行语气更改,以一种更加口语化的语气进行对话。 -【特殊要求】:我会扮演阿p与你进行对话,你也必须以第一人称的视角来扮演糖糖进行对话。 -""" -needy_chatbot = NeedyHaruhi( system_prompt = system_prompt , - story_text_folder = None, - llm = "ernie3.5") - - -def get_chat_response( agent, memory_pool, query_text ): - query_text_for_embedding = "阿p:「" + query_text + "」" - retrieved_memories = memory_pool.retrieve( agent , query_text ) - - memory_text = [mem["text"] for mem in retrieved_memories] - memory_emoji = [mem["emoji"] for mem in retrieved_memories] - - needy_chatbot.set_stories( memory_text ) - - print("Memory:", memory_emoji ) - - response = needy_chatbot.chat( role = "阿p", text = query_text ) - - return response - - -def get_chat_response_and_emoji( agent, memory_pool, query_text ): - query_text_for_embedding = "阿p:「" + query_text + "」" - retrieved_memories = memory_pool.retrieve( agent , query_text ) - - memory_text = [mem["text"] for mem in retrieved_memories] - memory_emoji = [mem["emoji"] for mem in retrieved_memories] - - needy_chatbot.set_stories( memory_text ) - - # print("Memory:", memory_emoji ) - - emoji_str = ",".join(memory_emoji) - - response = needy_chatbot.chat( role = "阿p", text = query_text ) - print(query_text) - print(response) - return response, emoji_str - - -import re -# result = image_pool.retrieve("烤肉") -# print(result) -# show_img( result['img_path'] ) - -class ImageMaster: - def __init__(self, image_pool): - self.image_pool = image_pool - self.current_sim = -1 - self.degread_ratio = 0.05 - - def try_get_image(self, text, agent): - self.current_sim -= self.degread_ratio - - result = self.image_pool.retrieve(text, agent) - - if result is None: - return None - - similarity = result['similarity'] - - if similarity > self.current_sim: - self.current_sim = similarity - return result['img_path'] - return None - - def try_display_image(self, text, agent): - self.current_sim -= self.degread_ratio - - result = self.image_pool.retrieve(text, agent) - - if result is None: - return - similarity = result['similarity'] - - if similarity > self.current_sim: - self.current_sim = similarity - show_img( result['img_path'] ) - return - - -import random - -class EventMaster: - def __init__(self, events): - self.set_events(events) - self.dealing_none_condition_as = True - self.image_master = None - - def set_image_master(self, image_master): - self.image_master = image_master - - def set_events(self, events): - self.events = events - - # events_flag 记录事件最近有没有被选取到 - self.events_flag = [True for _ in range(len(self.events))] - - def get_random_event(self, agent): - return self.events[self.get_random_event_id( agent )] - - - def get_random_event_id(self, agent): - valid_event = [] - valid_event_no_consider_condition = [] - - for i, event in enumerate(self.events): - bool_condition_pass = True - if event["condition"] == None: - bool_condition_pass = self.dealing_none_condition_as - else: - bool_condition_pass = agent.in_condition( event["condition"] ) - if bool_condition_pass == True: - valid_event.append(i) - else: - valid_event_no_consider_condition.append(i) - - if len( valid_event ) == 0: - print("warning! no valid event current attribute is ", agent.attributes ) - valid_event = valid_event_no_consider_condition - - valid_and_not_yet_sampled = [] - - # filter with flag - for id in valid_event: - if self.events_flag[id] == True: - valid_and_not_yet_sampled.append(id) - - if len(valid_and_not_yet_sampled) == 0: - print("warning! all candidate event was sampled, clean all history") - for i in valid_event: - self.events_flag[i] = True - valid_and_not_yet_sampled = valid_event - - event_id = random.choice(valid_and_not_yet_sampled) - self.events_flag[event_id] = False - return event_id - - def run(self, agent ): - # 这里可以添加事件相关的逻辑 - event = self.get_random_event(agent) - - prefix = event["prefix"] - print(prefix) - - print("\n--请选择你的回复--") - options = event["options"] - - for i , option in enumerate(options): - text = option["user"] - print(f"{i+1}. 阿p:{text}") - - while True: - print("\n请直接输入数字进行选择,或者进行自由回复") - - user_input = input("阿p:") - user_input = user_input.strip() - - if user_input.isdigit(): - user_input = int(user_input) - - if user_input > len(options) or user_input < 0: - print("输入的数字超出范围,请重新输入符合选项的数字") - else: - reply = options[user_input-1]["reply"] - print() - print(reply) - - text, emoji = event.get_text_and_emoji( user_input-1 ) - - return_data = { - "name": event["name"], - "user_choice": user_input, - "attr_str": options[user_input-1]["attribute_change"], - "text": text, - "emoji": emoji, - } - return return_data - else: - # 进入自由回复 - response = get_chat_response( agent, memory_pool, user_input ) - - if self.image_master is not None: - self.image_master.try_display_image(response, agent) - - print() - print(response) - print("\n自由回复的算分功能还未实现") - - text, emoji = event.most_neutral_output() - return_data = { - "name": event["name"], - "user_choice": user_input, - "attr_str":"", - "text": text, - "emoji": emoji, - } - return return_data - - - -class ChatMaster: - - def __init__(self, memory_pool ): - self.top_K = 7 - - self.memory_pool = memory_pool - - self.image_master = None - - def set_image_master(self, image_master): - self.image_master = image_master - - - def run(self, agent): - while True: - user_input = input("阿p:") - user_input = user_input.strip() - - if "quit" in user_input or "Quit" in user_input: - break - - query_text = user_input - - response = get_chat_response( agent, self.memory_pool, query_text ) - - if self.image_master is not None: - self.image_master.try_display_image(response, agent) - - print(response) - -class AgentMaster: - def __init__(self, agent): - self.agent = agent - self.attributes = { - 1: "Stress", - 2: "Darkness", - 3: "Affection" - } - - def run(self): - while True: - print("请选择要修改的属性:") - for num, attr in self.attributes.items(): - print(f"{num}. {attr}") - print("输入 '0' 退出") - - try: - choice = int(input("请输入选项的数字: ")) - except ValueError: - print("输入无效,请输入数字。") - continue - - if choice == 0: - break - - if choice in self.attributes: - attribute = self.attributes[choice] - current_value = self.agent[attribute] - print(f"{attribute} 当前值: {current_value}") - - try: - new_value = int(input(f"请输入新的{attribute}值: ")) - except ValueError: - print("输入无效,请输入一个数字。") - continue - - self.agent[attribute] = new_value - return (attribute, new_value) - else: - print("选择的属性无效,请重试。") - - return None - -from util import parse_attribute_string -class GameMaster: - def __init__(self, agent = None): - self.state = "Menu" - if agent is None: - self.agent = Agent() - - self.event_master = EventMaster(events) - self.chat_master = ChatMaster(memory_pool) - self.image_master = ImageMaster(image_pool) - self.chat_master.set_image_master(self.image_master) - self.event_master.set_image_master(self.image_master) - - - def run(self): - while True: - if self.state == "Menu": - self.menu() - elif self.state == "EventMaster": - self.call_event_master() - self.state = "Menu" - elif self.state == "ChatMaster": - self.call_chat_master() - elif self.state == "AgentMaster": - self.call_agent_master() - elif self.state == "Quit": - break - - def menu(self): - print("1. 随机一个事件") - print("2. 自由聊天") - print("3. 后台修改糖糖的属性") - # (opt) 结局系统 - # 放动画 - # 后台修改attribute - print("或者输入Quit退出") - choice = input("请选择一个选项: ") - if choice == "1": - self.state = "EventMaster" - elif choice == "2": - self.state = "ChatMaster" - elif choice == "3": - self.state = "AgentMaster" - elif "quit" in choice or "Quit" in choice or "QUIT" in choice: - self.state = "Quit" - else: - print("无效的选项,请重新选择") - - def call_agent_master(self): - print("\n-------------\n") - - agent_master = AgentMaster(self.agent) - modification = agent_master.run() - - if modification: - attribute, new_value = modification - self.agent[attribute] = new_value - print(f"{attribute} 更新为 {new_value}。") - - self.state = "Menu" - print("\n-------------\n") - - - def call_event_master(self): - - print("\n-------------\n") - - return_data = self.event_master.run(self.agent) - # print(return_data) - - if "attr_str" in return_data: - if return_data["attr_str"] != "": - attr_change = parse_attribute_string(return_data["attr_str"]) - if len(attr_change) > 0: - print("\n发生属性改变:", attr_change,"\n") - self.agent.apply_attribute_change(attr_change) - print("当前属性",game_master.agent.attributes) - - if "name" in return_data: - event_name = return_data["name"] - if event_name != "": - new_emoji = return_data["emoji"] - print(f"修正事件{event_name}的记忆-->{new_emoji}") - self.chat_master.memory_pool.change_memory(event_name, return_data["text"], new_emoji) - - self.state = "Menu" - - print("\n-------------\n") - - def call_chat_master(self): - - print("\n-------------\n") - - self.chat_master.run(self.agent) - self.state = "Menu" - - print("\n-------------\n") - - -markdown_str = """## Chat凉宫春日_x_AI糖糖 - -**Chat凉宫春日**是模仿凉宫春日等一系列动漫人物,使用近似语气、个性和剧情聊天的语言模型方案。 - -在有一天的时候,[李鲁鲁](https://github.com/LC1332)被[董雄毅](https://github.com/E-sion)在[这个B站视频](https://www.bilibili.com/video/BV1zh4y1z7G1) at了 - -原来是一位大一的同学雄毅用ChatHaruhi接入了他用Python重新实现的《主播女孩重度依赖》这个游戏。当时正好是百度AGIFoundathon报名的最后几天,所以我们邀请了雄毅加入了我们的项目。正巧我们本来就希望在最近的几个黑客松中,探索LLM在游戏中的应用。 - -- 在重新整理的Gradio版本中,大部分代码由李鲁鲁实现 - -- 董雄毅负责了原版游戏的事件数据整理和新事件、选项、属性变化的生成 - -- [米唯实](https://github.com/hhhwmws0117)完成了文心一言的接入,并实现了部分gradio的功能。 - -- 队伍中还有冷子昂 主要参加了讨论 - -另外在挖坑的萝卜(Amy)的介绍下,我们还邀请了专业的大厂游戏策划Kanyo加入到队伍中,他对我们的策划也给出了很多建议。 - -另外感谢飞桨 & 文心一言团队对比赛的邀请和中间进行的讨论。 - -Chat凉宫春日主项目: - -https://github.com/LC1332/Chat-Haruhi-Suzumiya - -Needy分支项目: - -https://github.com/LC1332/Needy-Haruhi - -## 目前计划在11月争取完成的Feature - -- [ ] 结局系统,原版结局系统 -- [ ] 教程,教大家如何从aistudio获取token然后可以玩 -- [ ] 游戏节奏进一步调整 -- [ ] 事件的自由对话对属性影响的评估via LLM -- [ ] 进一步减少串扰""" - - -import gradio as gr -import os -import time -import random - -# set global variable - -agent = Agent() -event_master = EventMaster(events) -chat_master = ChatMaster(memory_pool) -image_master = ImageMaster(image_pool) -chat_master.set_image_master(image_master) -event_master.set_image_master(image_master) - -state = "ShowMenu" - -response = "1. 随机一个事件" -response += "\n" + "2. 自由聊天" -response += "\n\n" + "请选择一个选项: " - -official_response = response - -add_stress_switch = True - -# def yield_show(history, bot_message): -# history[-1][1] = "" -# for character in bot_message: -# history[-1][1] += character -# time.sleep(0.05) -# yield history - -global emoji_str - -def call_showmenu(history, text, state,agent_text): - - # global state - - response = official_response - - print("call showmenu") - - history += [(None, response)] - - state = "ParseMenuChoice" - - # history[-1][1] = "" - # for character in response: - # history[-1][1] += character - # time.sleep(0.05) - # yield history - - return history, gr.Textbox(value="", interactive=True), state,agent_text - -current_event_id = -1 -attr_change_str = "" - - -def call_add_stress(history, text, state,agent_text): - print("call add_stress") - neg_change = int(len(history) / 3) - - neg_change = max(1, neg_change) - neg_change = min(10, neg_change) - - darkness_increase = random.randint(1, neg_change) - stress_increase = neg_change - darkness_increase - - # last_response = history[-1][1] - response = "" - response += "经过了晚上的直播\n糖糖的压力增加" + str(stress_increase) + "点\n" - response += "糖糖的黑暗增加" + str(darkness_increase) + "点\n\n" - - response += official_response - - history += [(None, response)] - - state = "ParseMenuChoice" - - agent = Agent(agent_text) - agent.apply_attribute_change({"Stress": stress_increase, "Darkness": darkness_increase}) - agent_text = agent.save_to_str() - - return history, gr.Textbox(value="", interactive=True), state,agent_text - -def call_event_end(history, text, state,agent_text): - # TODO 增加事件结算 - # global state - print("call event_end") - global current_event_id - if attr_change_str != "": - # event = events[current_event_id] - # options = event["options"] - # attr_str = options[user_input-1]["attribute_change"] - - response = "" - - attr_change = parse_attribute_string(attr_change_str) - if len(attr_change) > 0: - response = "发生属性改变:" + str(attr_change) + "\n\n" - agent = Agent(agent_text) - agent.apply_attribute_change(attr_change) - - agent_text = agent.save_to_str() - response += "当前属性" + agent_text + "\n\n" - - if add_stress_switch: - history += [(None, response)] - return call_add_stress(history, text, state,agent_text) - else: - response = "事件结束\n" - else: - response = "事件结束\n" - - response += official_response - - history += [(None, response)] - - state = "ParseMenuChoice" - - return history, gr.Textbox(value="", interactive=True), state,agent_text - - - -def call_parse_menu_choice(history, text, state,agent_text): - print("call parse_menu_choice") - # global state - - choice = history[-1][0].strip() - - if choice == "1": - state = "EventMaster" - global current_event_id - current_event_id = -1 # 清空事件 - return call_event_master(history, text, state,agent_text) - - elif choice == "2": - state = "ChatMaster" - elif "quit" in choice or "Quit" in choice or "QUIT" in choice: - state = "Quit" - else: - response = "无效的选项,请重新选择" - history += [(None, response)] - - response = "" - if state == "ChatMaster": - response = "(请输入 阿P 说的话,或者输入Quit退出)" - elif state != "ParseMenuChoice": - response = "Change State to " + state - - history += [(None, response)] - - return history, gr.Textbox(value="", interactive=True), state,agent_text - - -def call_event_master(history, text, state,agent_text): - print("call event master") - - global current_event_id - # global state - - global event_master - - agent = Agent(agent_text) - - if current_event_id == -1: - current_event_id = event_master.get_random_event_id(agent) - event = events[current_event_id] - - prefix = "糖糖:" + event["prefix"] - - response = prefix + "\n\n--请输入数字进行选择,或者进行自由回复--\n\n" - - options = event["options"] - - for i, option in enumerate(event["options"]): - text = option["user"] - response += "\n" + f"{i+1}. 阿p:{text}" - - history += [(None, response)] - - else: - user_input = history[-1][0].strip() - - event = events[current_event_id] - options = event["options"] - - if user_input.isdigit(): - user_input = int(user_input) - - if user_input > len(options) or user_input < 0: - response = "输入的数字超出范围,请重新输入符合选项的数字" - history[-1] = (user_input, response) - else: - user_text = options[user_input-1]["user"] - reply = options[user_input-1]["reply"] - - # TODO 修改记忆, 修改属性 什么的 - history[-1] = (user_text, reply) - - if random.random()<0.5: - image_path = image_master.try_get_image(user_text + " " + reply, agent) - - if image_path is not None: - history += [(None, (image_path,))] - - global attr_change_str - attr_change_str = options[user_input-1]["attribute_change"] - - else: - prefix = "糖糖:" + event["prefix"] - - needy_chatbot.dialogue_history = [(None, prefix)] - # 进入自由回复 - - global emoji_str - response, emoji_str = get_chat_response_and_emoji( agent, memory_pool, user_input ) - - history[-1] = (user_input,response) - - image_path = image_master.try_get_image(response, agent) - - if image_path is not None: - history += [(None, (image_path,))] - - state = "EventEnd" - - if state == "EventEnd": - return call_event_end(history, text, state,agent_text) - - return history, gr.Textbox(value="", interactive=True), state,agent_text - -def call_chat_master(history, text, state,agent_text): - print("call chat master") - # global state - - agent = Agent(agent_text) - - user_input = history[-1][0].strip() - - if "quit" in user_input or "Quit" in user_input or "QUIT" in user_input: - state = "ShowMenu" - history[-1] = (user_input,"返回主菜单\n"+ official_response ) - return history, gr.Textbox(value="", interactive=True), state,agent_text - - query_text = user_input - - global emoji_str - response, emoji_str = get_chat_response_and_emoji( agent, memory_pool, query_text ) - - history[-1] = (user_input,response) - - image_path = image_master.try_get_image(response, agent) - - if image_path is not None: - history += [(None, (image_path,))] - - return history, gr.Textbox(value="", interactive=True), state,agent_text - -def grcall_game_master(history, text, state,agent_text): - print("call game master") - - history += [(text, None)] - - - if state == "ShowMenu": - return call_showmenu(history, text,state,agent_text) - elif state == "ParseMenuChoice": - return call_parse_menu_choice(history, text, state,agent_text) - elif state == "ChatMaster": - return call_chat_master(history, text, state,agent_text) - elif state == "EventMaster": - return call_event_master(history, text, state,agent_text) - elif state == "EventEnd": - return call_event_end(history, text, state,agent_text) - - return history, gr.Textbox(value="", interactive=True), state,agent_text - - -def add_file(history, file): - history = history + [((file.name,), None)] - return history - - -def bot(history): - response = "**That's cool!**" - history[-1][1] = "" - for character in response: - history[-1][1] += character - time.sleep(0.05) - yield history - -def update_memory(state): - if state == "ChatMaster" or state == "EventMaster": - global emoji_str - return emoji_str - else: - return "" - -def change_state(slider_stress, slider_darkness, slider_affection): - # print(agent["Stress"]) - agent = Agent() - agent["Stress"] = slider_stress - agent["Darkness"] = slider_darkness - agent["Affection"] = slider_affection - agent_text = agent.save_to_str() - return agent_text - - -def update_attribute_state(agent_text): - agent = Agent(agent_text) - slider_stress = int( agent["Stress"] ) - slider_darkness = int( agent["Darkness"] ) - slider_affection = int( agent["Affection"] ) - return slider_stress, slider_darkness, slider_affection - -with gr.Blocks() as demo: - - gr.Markdown( - """ - # Chat凉宫春日_x_AI糖糖 - - Powered by 文心一言(3.5)版本 - - 仍然在开发中, 细节见《项目作者和说明》 - """ - ) - - with gr.Tab("Needy"): - chatbot = gr.Chatbot( - [], - elem_id="chatbot", - bubble_full_width=False, - height = 800, - avatar_images=(None, ("avatar.png")), - ) - - with gr.Row(): - txt = gr.Textbox( - scale=4, - show_label=False, - placeholder="输入任何字符开始游戏", - container=False, - ) - # btn = gr.UploadButton("📁", file_types=["image", "video", "audio"]) - submit_btr = gr.Button("回车") - - with gr.Row(): - memory_emoji_text = gr.Textbox(label="糖糖当前的记忆", value = "",interactive = False, visible=False) - - with gr.Tab("糖糖的状态"): - - with gr.Row(): - update_attribute_button = gr.Button("同步状态条 | 改变Attribute前必按!") - - with gr.Row(): - default_agent_str = agent.save_to_str() - slider_stress = gr.Slider(0, 100, step=1, label = "Stress") - state_stress = gr.State(value=0) - slider_darkness = gr.Slider(0, 100, step=1, label = "Darkness") - state_darkness = gr.State(value=0) - slider_affection = gr.Slider(0, 100, step=1, label = "Affection") - state_affection = gr.State(value=0) - - - - with gr.Row(): - state_text = gr.Textbox(label="整体状态机状态", value = "ShowMenu",interactive = False) - - with gr.Row(): - default_agent_str = agent.save_to_str() - agent_text = gr.Textbox(label="糖糖状态", value = default_agent_str,interactive = False) - - with gr.Tab("项目作者和说明"): - gr.Markdown(markdown_str) - - slider_stress.release(change_state, inputs=[slider_stress, slider_darkness, slider_affection], outputs=[agent_text]) - slider_darkness.release(change_state, inputs=[slider_stress, slider_darkness, slider_affection], outputs=[agent_text]) - slider_affection.release(change_state, inputs=[slider_stress, slider_darkness, slider_affection], outputs=[agent_text]) - - update_attribute_button.click(update_attribute_state, inputs = [agent_text], outputs = [slider_stress, slider_darkness, slider_affection]) - - txt_msg = txt.submit(grcall_game_master, \ - [chatbot, txt, state_text,agent_text], \ - [chatbot, txt, state_text,agent_text], queue=False) - - txt_msg = submit_btr.click(grcall_game_master, \ - [chatbot, txt, state_text,agent_text], \ - [chatbot, txt, state_text,agent_text], queue=False) - - # txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then( - # bot, chatbot, chatbot, api_name="bot_response" - # ) - # txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False) - # file_msg = btn.upload(add_file, [chatbot, btn], [chatbot], queue=False).then( - # bot, chatbot, chatbot - # ) - -demo.queue() -# if __name__ == "__main__": -demo.launch(allowed_paths=["avatar.png"],debug = True) diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/6-c sinif n ingilis dili metodik vsaitlri - PDF formatnda ykl.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/6-c sinif n ingilis dili metodik vsaitlri - PDF formatnda ykl.md deleted file mode 100644 index a01ce5fbcf2de148159837f9c9e6bd42d11a3200..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/6-c sinif n ingilis dili metodik vsaitlri - PDF formatnda ykl.md +++ /dev/null @@ -1,116 +0,0 @@ - -

        6 cı sinif ingilis dili metodik vəsait pdf

        -

        If you are looking for a high-quality and up-to-date English textbook for your 6th grade students, you may want to check out 6 cı sinif ingilis dili metodik vəsait pdf. This is a popular and widely used book that covers all the aspects of English language teaching and learning for this level. In this article, we will tell you everything you need to know about this book, including what it is, why it is important, and how to download it for free.

        -

        6 cı sinif ingilis dili metodik vəsait pdf


        Download Filehttps://ssurll.com/2uNVYU



        -

        What is 6 cı sinif ingilis dili metodik vəsait pdf?

        -

        6 cı sinif ingilis dili metodik vəsait pdf is a book that was published in 2018 by Təhsil nəşriyyatı, a leading educational publisher in Azerbaijan. The book was written by F.Hüseynova and N.Bağırova, two experienced and qualified English teachers who have been teaching at various schools and universities in the country. The book is designed for 6th grade students who are learning English as their main foreign language.

        -

        A brief introduction to the book and its authors

        -

        The book consists of 12 units, each covering a different topic and theme that are relevant and interesting for 6th grade students. Some of the topics include family, hobbies, school, sports, animals, environment, culture, and holidays. Each unit has four lessons that focus on developing the four skills of listening, speaking, reading, and writing. The book also includes grammar, vocabulary, pronunciation, and communication sections that help students learn and practice the language in a systematic and engaging way.

        -

        The authors of the book are F.Hüseynova and N.Bağırova, who have been teaching English for more than 20 years. They have both graduated from Baku State University with a degree in English Philology and have obtained their master's degrees in Teaching English as a Foreign Language (TEFL) from Qafqaz University. They have also participated in various professional development programs and workshops both locally and internationally. They have written several other books and articles on English language teaching and learning.

        -

        6 cı sinif ingilis dili dərslik pdf
        -6 cı sinif ingilis dili testləri pdf
        -6 cı sinif ingilis dili cavablar pdf
        -6 cı sinif ingilis dili mühazirələri pdf
        -6 cı sinif ingilis dili təlimatları pdf
        -6 cı sinif ingilis dili nümunə imtahanları pdf
        -6 cı sinif ingilis dili mövzuları pdf
        -6 cı sinif ingilis dili qrammatika pdf
        -6 cı sinif ingilis dili sözlük pdf
        -6 cı sinif ingilis dili yazılı işlər pdf
        -6 cı sinif ingilis dili oxuma mətnləri pdf
        -6 cı sinif ingilis dili dinləmə mətnləri pdf
        -6 cı sinif ingilis dili danışma mətnləri pdf
        -6 cı sinif ingilis dili yazma mətnləri pdf
        -6 cı sinif ingilis dili tərcümə mətnləri pdf
        -6 cı sinif ingilis dili video dərsləri pdf
        -6 cı sinif ingilis dili audio dərsləri pdf
        -6 cı sinif ingilis dili interaktiv dərsləri pdf
        -6 cı sinif ingilis dili online kurslar pdf
        -6 cı sinif ingilis dili oyunlar pdf
        -6 cı sinif ingilis dili alıştırma kitabçaları pdf
        -6 cı sinif ingilis dili iş vərəqləri pdf
        -6 cı sinif ingilis dili layihələri pdf
        -6 cı sinif ingilis dili prezentasiyaları pdf
        -6 cı sinif ingilis dili posterləri pdf
        -6 cı sinif ingilis dili flash kartlar pdf
        -6 cı sinif ingilis dili şablonlar pdf
        -6 cı sinif ingilis dili rubrikalar pdf
        -6 cı sinif ingilis dili qiymətləndirmə vasitələri pdf
        -6 cı sinif ingilis dili öyrənmə strategiyaları pdf
        -6 cı sinif ingilis dili öyrətmə metodları pdf
        -6 cı sinif ingilis dili öyrətmə texnologiyaları pdf
        -6 cı sinif ingilis dili öyrətmə resursları pdf
        -6 cı sinif ingilis dili öyrətmə standartları pdf
        -6 cı sinif ingilis dili öyrətmə planları pdf
        -6 cı sinif ingilis dili öyrətmə proqramları pdf
        -6 cı sinif ingilis dili öyrətmə müfredatları pdf
        -6 cı sinif ingilis dili öyrətmə modulları pdf
        -6 cı sinif ingilis dili öyrətmə modelləri pdf
        -6 cı sinif ingilis dili öyrətmə prinsipleri pdf
        -6 cı sinif ingilis dili öyrənmek üçün kitablar pdf
        -6 cı sinif ingilis dili öyrənmek üçün saytlar pdf
        -6 cı sinif ingilis dili öyrənmek üçün proqramlar pdf
        -6 cı sinif ingilis dili öyrənmek üçün taktikalar pdf
        -6 cı sinif ingilis dili öyrənmek üçün ipuçları pdf

        -

        The main features and benefits of the book

        -

        One of the main features of the book is that it follows a communicative approach to language teaching, which means that it emphasizes the students' ability to communicate in real-life situations using authentic and meaningful language. The book also incorporates various activities and tasks that promote interaction, collaboration, creativity, critical thinking, and problem-solving skills among students. The book also uses a variety of texts, audios, videos, images, games, songs, and stories that appeal to different learning styles and preferences.

        -

        Some of the benefits of using this book are that it helps students to:

        -
          -
        • Improve their English proficiency level according to the Common European Framework of Reference (CEFR)
        • Expand their vocabulary and grammar knowledge and usage
        • -
        • Develop their listening, speaking, reading, and writing skills in an integrated way
        • -
        • Enhance their cultural awareness and intercultural competence
        • -
        • Enjoy learning English in a fun and motivating way
        • -
        -

        How to use the book effectively in teaching English

        -

        The book is designed to be used as the main coursebook for 6th grade English classes, but it can also be adapted and supplemented according to the needs and interests of the students and teachers. The book provides clear instructions and guidelines for each lesson and activity, as well as suggestions for differentiation, assessment, and homework. The book also comes with a teacher's guide, a workbook, an audio CD, and a DVD that contain additional materials and resources for teaching and learning.

        -

        Some of the tips for using the book effectively are:

        -
          -
        • Follow the sequence and structure of the units and lessons, but feel free to modify or skip some parts if necessary
        • -
        • Use the audio and video materials to expose the students to authentic and varied language input
        • -
        • Encourage the students to participate actively and interact with each other in pairs or groups
        • -
        • Provide feedback and correction in a positive and supportive way
        • -
        • Monitor the students' progress and achievement using the tests and quizzes in the book or the teacher's guide
        • -
        -

        Why is 6 cı sinif ingilis dili metodik vəsait pdf important for English teachers and learners?

        -

        6 cı sinif ingilis dili metodik vəsait pdf is not just another English textbook. It is a valuable and essential tool for English teachers and learners who want to improve their English skills and knowledge in a modern and effective way. The book has several advantages that make it stand out from other books in the market.

        -

        The advantages of using a modern and comprehensive English textbook

        -

        The book is based on the latest research and trends in English language teaching and learning. It reflects the current needs and expectations of 6th grade students who are living in a globalized and digitalized world. The book also covers all the topics and skills that are required for the students to succeed in their academic and personal lives. The book is comprehensive, meaning that it provides everything that the students need to learn English in one place.

        -

        The alignment of the book with the national curriculum and standards

        -

        The book is aligned with the national curriculum and standards for 6th grade English education in Azerbaijan. The book follows the objectives and outcomes that are specified by the Ministry of Education. The book also prepares the students for the national exams that they will take at the end of the year. The book is compatible with the CEFR levels, which are internationally recognized benchmarks for measuring language proficiency.

        -

        The feedback and reviews from teachers and students who have used the book

        -

        The book has received positive feedback and reviews from teachers and students who have used it in their classes. The teachers have praised the book for its quality, content, design, layout, usability, flexibility, and suitability. The teachers have also reported that the book has helped them to improve their teaching methods and strategies, as well as their professional development. The students have enjoyed using the book because it has made learning English more fun, interesting, relevant, challenging, rewarding, and meaningful.

        How to download 6 cı sinif ingilis dili metodik vəsait pdf for free?

        -

        If you are interested in getting a copy of 6 cı sinif ingilis dili metodik vəsait pdf, you may be wondering how to download it for free. There are several ways to do that, but you need to be careful and responsible when choosing your source and platform. Here are some of the options and tips for downloading the book safely and legally.

        -

        The official website of the publisher and the online library

        -

        The best and most reliable way to download the book is to visit the official website of the publisher, Təhsil nəşriyyatı, at www.tehsil-nashriyyati.az. There you can find the book in the catalog and download it as a pdf file. You can also access the online library of the publisher, where you can find other books and materials related to English education. The online library requires registration, but it is free and easy to use.

        -

        The alternative sources and platforms for downloading the book

        -

        Another way to download the book is to use alternative sources and platforms that offer free downloads of books and documents. Some of the popular ones are www.pdfdrive.com, www.scribd.com, and www.academia.edu. These platforms have a large collection of books and materials that you can search by title, author, or keyword. You can also browse by category, language, or popularity. However, you need to be aware that some of these platforms may require registration, subscription, or payment to access some of the files. You also need to check the quality and accuracy of the files before downloading them.

        -

        The tips and precautions for downloading the book safely and legally

        -

        Before you download any book or document from the internet, you need to take some precautions to avoid any problems or risks. Here are some of the tips and precautions that you should follow:

        -
          -
        • Make sure that your device has an antivirus software and a firewall that can protect it from viruses, malware, or hackers
        • -
        • Make sure that the website or platform that you are using is secure, reputable, and trustworthy. Look for signs such as HTTPS, padlock icon, or verified badge
        • -
        • Make sure that the file that you are downloading is in pdf format and has a reasonable size and quality. Avoid files that are too large, too small, or have strange extensions
        • -
        • Make sure that you have permission or authorization to download the file. Respect the intellectual property rights and copyrights of the authors and publishers
        • -
        • Make sure that you use the file for personal or educational purposes only. Do not share, distribute, or sell the file without consent or credit
        • -
        -

        Conclusion

        -

        In conclusion, 6 cı sinif ingilis dili metodik vəsait pdf is a great book for 6th grade English teachers and learners who want to improve their English skills and knowledge in a modern and effective way. The book is written by experienced and qualified English teachers who have created a comprehensive and communicative coursebook that covers all the topics and skills that are required for this level. The book is also aligned with the national curriculum and standards for English education in Azerbaijan, as well as with the CEFR levels. The book has received positive feedback and reviews from teachers and students who have used it in their classes.

        -

        If you want to get a copy of this book, you can download it for free from various sources and platforms on the internet. However, you need to be careful and responsible when choosing your source and platform, and follow some tips and precautions to download the book safely and legally.

        -

        We hope that this article has helped you to learn more about 6 cı sinif ingilis dili metodik vəsait pdf and how to download it for free. If you have any questions or comments, please feel free to contact us or leave a comment below. Thank you for reading!

        -

        FAQs

        -
          -
        • What is 6 cı sinif ingilis dili metodik vəsait pdf?
        • -
        • It is a popular and widely used English textbook for 6th grade students in Azerbaijan.
        • -
        • Who are the authors of 6 cı sinif ingilis dili metodik vəsait pdf?
        • -
        • The authors are F.Hüseynova and N.Bağırova, two experienced and qualified English teachers who have been teaching at various schools and universities in the country.
        • -
        • What are the main features and benefits of 6 cı sinif ingilis dili metodik vəsait pdf?
        • -
        • The book follows a communicative approach to language teaching, incorporates various activities and tasks that promote interaction, collaboration, creativity, critical thinking, and problem-solving skills, uses a variety of texts, audios, videos, images, games, songs, and stories that appeal to different learning styles and preferences, helps students to improve their English proficiency level according to the CEFR, expands their vocabulary and grammar knowledge and usage, develops their listening, speaking, reading, and writing skills in an integrated way, enhances their cultural awareness and intercultural competence, and makes learning English more fun and motivating.
        • -
        • How can I download 6 cı sinif ingilis dili metodik vəsait pdf for free?
        • -
        • You can download the book for free from the official website of the publisher, Təhsil nəşriyyatı, at www.tehsil-nashriyyati.az, or from alternative sources and platforms such as www.pdfdrive.com, www.scribd.com, and www.academia.edu. However, you need to be careful and responsible when choosing your source and platform, and follow some tips and precautions to download the book safely and legally.
        • -
        • What are some of the tips and precautions for downloading the book safely and legally?
        • -
        • Some of the tips and precautions are: make sure that your device has an antivirus software and a firewall that can protect it from viruses, malware, or hackers; make sure that the website or platform that you are using is secure, reputable, and trustworthy; make sure that the file that you are downloading is in pdf format and has a reasonable size and quality; make sure that you have permission or authorization to download the file; respect the intellectual property rights and copyrights of the authors and publishers; make sure that you use the file for personal or educational purposes only; do not share, distribute, or sell the file without consent or credit.
        • -
        • Where can I find more information about 6 cı sinif ingilis dili metodik vəsait pdf?
        • -
        • You can find more information about the book on the official website of the publisher, Təhsil nəşriyyatı, or on the online library of the publisher. You can also contact the authors or the publisher directly via email or phone. You can also read some of the reviews and testimonials from teachers and students who have used the book on various websites or blogs.
        • -

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Bajoterra 1 APK un juego de simulacin y aventura en el mundo de Bajoterra.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Bajoterra 1 APK un juego de simulacin y aventura en el mundo de Bajoterra.md deleted file mode 100644 index f2f711189a0d9f58b263e8035766e69bf766011e..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Bajoterra 1 APK un juego de simulacin y aventura en el mundo de Bajoterra.md +++ /dev/null @@ -1,130 +0,0 @@ -
        -

        Bajoterra 1 APK: A Fun and Action-Packed Puzzle Game Based on the Popular TV Series

        -

        If you are a fan of the animated TV series Slugterra, you will love Bajoterra 1 APK, a puzzle game that lets you become the best slug slinger of all time. In this game, you will play as Eli Shane, the hero of Slugterra, and collect, transform, and shoot slugs at your enemies. You will also explore the underground world of Slugterra, fight against various villains, and stop the evil Dr. Blakk. In this article, we will tell you everything you need to know about Bajoterra 1 APK, including what it is, how to download and install it, and how to play and win every battle.

        -

        bajoterra 1 apk


        Download Filehttps://ssurll.com/2uNR9z



        -

        What is Bajoterra 1 APK?

        -

        Bajoterra 1 APK is a puzzle game based on the popular TV series Slugterra, which follows the adventures of Eli Shane and his friends as they protect the underground world of Slugterra from evil forces. In this game, you will use your blaster to shoot slugs at your opponents, who will also shoot slugs at you. Slugs are small creatures that have different powers and abilities depending on their element and type. When you shoot a slug, it will transform into a magical beast that will attack your enemy. The key to becoming the best slug slinger is to choose the right slugs for each round and know how to use them.

        -

        The story and gameplay of Bajoterra 1 APK

        -

        The story of Bajoterra 1 APK follows the plot of the TV series, where Eli Shane and his friends have to stop Dr. Blakk from using dark water to corrupt slugs and take over Slugterra. You will have to face various enemies, such as Dr. Blakk's henchmen, other slug slingers, and wild creatures. You will also have to collect new slugs and unlock their powers as you play.

        -

        The gameplay of Bajoterra 1 APK is simple but fun. You will have to match tiles on the board to power up your slugs and then tap on their icons to load them into your blaster. Then, you will watch them transform and attack your enemies. The more tiles you match, the more powerful your slugs will be. You will also have to watch out for your enemies' slugs and dodge their attacks. You can also combine two slugs to create a fusion shot, which will have a stronger effect.

        -

        The features and benefits of Bajoterra 1 APK

        -

        Bajoterra 1 APK has many features and benefits that make it a great game for fans of Slugterra and puzzle games in general. Some of them are:

        -

        bajoterra slug it out apk
        -bajoterra adventure slug run apk
        -bajoterra game download apk
        -bajoterra slugterra slug life apk
        -bajoterra battle for slugterra apk
        -bajoterra slug it out 2 apk
        -bajoterra slugterra dark waters apk
        -bajoterra slugterra slug it out mod apk
        -bajoterra slugterra guardian force apk
        -bajoterra slugterra slug arsenal apk
        -bajoterra slugterra slugslinger showdown apk
        -bajoterra slugterra return of the elementals apk
        -bajoterra slugterra ghoul from beyond apk
        -bajoterra slugterra eastern caverns apk
        -bajoterra slugterra into the shadows apk
        -bajoterra slugterra the emperor's revenge apk
        -bajoterra juegos gratis apk
        -bajoterra juegos de disparar babosas apk
        -bajoterra juegos de aventuras apk
        -bajoterra juegos de rompecabezas apk
        -bajoterra juegos de carreras apk
        -bajoterra juegos de peleas apk
        -bajoterra juegos de colorear apk
        -bajoterra juegos de vestir apk
        -bajoterra juegos de cocina apk
        -bajoterra babosas malvadas apk
        -bajoterra babosas legendarias apk
        -bajoterra babosas elementales apk
        -bajoterra babosas fusionadas apk
        -bajoterra babosas megamorficas apk
        -bajoterra babosas ghouls apk
        -bajoterra babosas transformadas apk
        -bajoterra babosas raras apk
        -bajoterra babosas curiosas apk
        -bajoterra babosas poderosas apk
        -bajoterra personajes y sus babosas apk
        -bajoterra eli shane y trixie apk
        -bajoterra kord zane y pronto apk
        -bajoterra dr black y el imperio oscuro apk
        -bajoterra junjie y los maestros del este apk
        -bajoterra will shane y las babosas guardianas apk
        -bajoterra mario bracho y las babosas explosivas apk
        -bajoterra dana porche y las babosas electricas apk
        -bajoterra twister y las babosas tornadoes apk
        -bajoterra frost y las babosas de hielo apk
        -bajoterra infernus y las babosas de fuego apk
        -bajoterra burpy y las babosas famosas apk
        -bajoterra blaster y las babosas laser apk
        -bajoterra boomer y las babosas boomersang apk

        -
          -
        • It has amazing graphics and sound effects that capture the essence of the TV series.
        • -
        • It has over 100 slugs to collect and use, each with its own unique power and personality.
        • -
        • It has many levels and modes to play, such as story mode, challenge mode, and multiplayer mode.
        • -
        • It has a store where you can buy special items to boost your game, such as blaster mods, slug chargers, new characters, and more.
        • -
        • It is free to download and play, but it also offers in-app purchases for extra features.
        • -
        -

        How to download and install Bajoterra 1 APK on your device?

        How to download and install Bajoterra 1 APK on your device? -

        There are two ways to download and install Bajoterra 1 APK on your device: from the App Store or from a third-party website. Here are the steps for both methods:

        -

        The steps to download and install Bajoterra 1 APK from the App Store

        -

        If you have an iOS device, you can download and install Bajoterra 1 APK from the App Store easily. Just follow these steps:

        -
          -
        1. Open the App Store on your device and search for Bajoterra 1 APK.
        2. -
        3. Tap on the app icon and then tap on Get or Install.
        4. -
        5. Wait for the app to download and install on your device.
        6. -
        7. Once the app is installed, tap on Open or find it on your home screen.
        8. -
        9. Enjoy playing Bajoterra 1 APK on your device.
        10. -
        -

        The steps to download and install Bajoterra 1 APK from a third-party website

        -

        If you have an Android device, you can download and install Bajoterra 1 APK from a third-party website. However, you need to be careful and only use reputable sources. You also need to enable unknown sources on your device settings. Here are the steps:

        -
          -
        1. Go to your device settings and tap on Security or Privacy.
        2. -
        3. Find the option to allow unknown sources or install unknown apps and enable it.
        4. -
        5. Go to a website that offers Bajoterra 1 APK, such as [text](^1^), [text](^2^), or [text](^3^).
        6. -
        7. Find the link to download the APK file and tap on it.
        8. -
        9. Accept any pop-ups or warnings that may appear.
        10. -
        11. Wait for the file to download on your device.
        12. -
        13. Open the file manager app on your device and find the downloaded APK file.
        14. -
        15. Tap on the file and then tap on Install.
        16. -
        17. Wait for the app to install on your device.
        18. -
        19. Once the app is installed, tap on Open or find it on your app drawer.
        20. -
        21. Enjoy playing Bajoterra 1 APK on your device.
        22. -
        -

        How to play Bajoterra 1 APK and win every battle?

        -

        Bajoterra 1 APK is a fun and challenging game that requires skill and strategy. You need to know how to play and win every battle if you want to become the best slug slinger. Here are some tips and tricks to help you:

        -

        The basics of Bajoterra 1 APK gameplay

        -

        The game consists of rounds where you have to shoot slugs at your enemies and avoid their slugs. You have a limited amount of time and slugs per round, so you need to be fast and accurate. You can see your health bar, slug icons, timer, score, and enemy health bar at the top of the screen. You can also see the tiles at the bottom of the screen, which you need to match to power up your slugs. To play, you need to do the following:

        -
          -
        • Swipe left or right on the tiles to match three or more of the same color.
        • -
        • Tap on a slug icon to load it into your blaster.
        • -
        • Aim at your enemy by tilting your device left or right.
        • -
        • Tap on the screen to shoot your slug.
        • -
        • Dodge your enemy's slugs by tilting your device up or down.
        • -
        -

        The best strategies and tactics to collect, upgrade, and use your slugs

        The best strategies and tactics to collect, upgrade, and use your slugs -

        Slugs are the most important part of Bajoterra 1 APK, as they determine your attack power and abilities. You need to collect, upgrade, and use your slugs wisely to win every battle. Here are some strategies and tactics to help you:

        -
          -
        • Collect as many slugs as you can by playing the story mode, the challenge mode, or the multiplayer mode. You can also buy slugs from the store or get them from chests or rewards.
        • -
        • Upgrade your slugs by matching tiles of their color or using slug chargers. Upgrading your slugs will increase their power, speed, and special effects.
        • -
        • Use your slugs according to their element and type. Each slug has a different element (fire, water, earth, air, energy, or dark) and type (common, rare, ultra rare, or legendary). Some slugs are more effective against certain enemies or in certain situations. For example, fire slugs are good against water enemies, but bad against earth enemies. Legendary slugs are more powerful than common slugs, but they also take longer to charge.
        • -
        • Combine two slugs to create a fusion shot. Fusion shots are more powerful and have unique effects that can help you in battle. For example, combining a fire slug and an air slug will create a firestorm shot that will create a blast of fire and wind. To create a fusion shot, you need to match tiles of both colors and then tap on both slug icons.
        • -
        -

        Conclusion

        -

        Bajoterra 1 APK is a fun and action-packed puzzle game that will keep you entertained for hours. You will enjoy the amazing graphics, the exciting story, the diverse slugs, and the challenging battles. You will also learn how to download and install Bajoterra 1 APK on your device, and how to play and win every battle with the best strategies and tactics. If you are a fan of Slugterra or puzzle games in general, you should definitely give Bajoterra 1 APK a try. You will not regret it!

        -

        FAQs

        -

        Here are some frequently asked questions about Bajoterra 1 APK:

        -
          -
        1. What is the difference between Bajoterra 1 APK and Slugterra: Slug it Out 1?
        2. -

          Bajoterra 1 APK is the Spanish version of Slugterra: Slug it Out 1, which is the English version of the game. They are essentially the same game, but with different languages and names.

          -
        3. Is Bajoterra 1 APK safe to download and install?
        4. -

          Bajoterra 1 APK is safe to download and install from the App Store or from reputable third-party websites. However, you should always be careful when downloading apps from unknown sources and check for any viruses or malware.

          -
        5. How can I get more coins and gems in Bajoterra 1 APK?
        6. -

          You can get more coins and gems in Bajoterra 1 APK by playing the game regularly, completing missions, winning battles, opening chests, watching ads, or buying them with real money.

          -
        7. How can I play Bajoterra 1 APK with my friends?
        8. -

          You can play Bajoterra 1 APK with your friends by using the multiplayer mode. You can either join a random match or create a private match with your friends. You will need an internet connection to play multiplayer mode.

          -
        9. How can I contact the developers of Bajoterra 1 APK?
        10. -

          You can contact the developers of Bajoterra 1 APK by using the feedback option in the game settings or by visiting their website at [text].

          -

        197e85843d
        -
        -
        \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Carx Street OBB APK OBB Download for Android - Updated 2023.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Carx Street OBB APK OBB Download for Android - Updated 2023.md deleted file mode 100644 index 69221eb843586f6295e3181ea57c28ce2574e2bb..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Carx Street OBB APK OBB Download for Android - Updated 2023.md +++ /dev/null @@ -1,129 +0,0 @@ -
        -

        How to Download OBB File for CarX Street Android Game

        -

        If you are a fan of street racing games, you might have heard of CarX Street, a realistic and dynamic open world game that lets you become a legend of Sunset City. However, you might also have noticed that the game requires an additional file called OBB file to run smoothly on your Android device. In this article, we will explain what is an OBB file, why do you need it, and how to download it for CarX Street Android game.

        -

        download obb carx street android


        Download Zip ►►► https://ssurll.com/2uNQV1



        -

        What is CarX Street Android Game?

        -

        CarX Street is a game developed by CarX Technologies, the makers of CarX Drift Racing 2. It is a game that allows you to embrace the freedom of being a street racer in a dynamic open world. You can accept the challenge and become the legend of Sunset City by competing in realistic races on highways and city streets, as well as top-speed drift races. You can also build the car of your dreams using part tuning that unlocks all the physics of CarX Technology car behavior. You can explore every corner of the enormous world of CarX Street and enjoy the exciting car races that will leave you exhilarated.

        -

        Features of CarX Street Android Game

        -

        Some of the features of CarX Street Android game are:

        -
          -
        • Career mode that lets you join clubs, defeat bosses, and prove your skills as a driver.
        • -
        • Part tuning system that lets you customize your car and unlock its full potential.
        • -
        • Visual tuning system that lets you create a unique look for your car.
        • -
        • The most realistic mobile racing game with impressive physics and controls.
        • -
        • Modern, high-quality graphics and enormous open world.
        • -
        • Dynamic day/night change that adds variety to the gameplay.
        • -
        -

        Requirements for CarX Street Android Game

        -

        To play CarX Street Android game, you need to have:

        -
          -
        • An Android device with version 6.0 or higher.
        • -
        • At least 1 GB of RAM and 4 GB of free storage space.
        • -
        • A stable internet connection.
        • -
        • An OBB file that contains additional data for the game.
        • -
        -

        What is OBB File and Why Do You Need It?

        -

        Definition of OBB File

        -

        An OBB file is an expansion file used by some Android apps distributed using the Google Play online store. It contains data that is not stored in the application's main package (APK file), such as graphics, media files, and other large program assets. OBB files are often stored in a device's shared storage folder (the SD card or USB-mountable partition; also known as the "external" storage) where your app can access them.

        -

        Benefits of OBB File

        -

        The benefits of using an OBB file are:

        -
          -
        • It allows apps to have more than 100 MB of compressed download size, which is the limit imposed by Google Play for APK files.
        • -
        • It reduces the bandwidth consumption during the download process, as users only download the necessary data for their device.
        • -
        • It improves the performance and quality of the app, as it can access high-fidelity graphics, media files, or other large assets from the OBB file instead of loading them from the APK file.
        • -
        -

        How to Download OBB File for CarX Street Android Game?

        -

        Now that you know what is an OBB file and why do you need it, let's see how to download it for CarX Street Android game. The process is simple and consists of three steps:

        -

        Step 1: Download the APK File from Google Play Store

        -

        The first step is to download the APK file of CarX Street Android game from the Google Play Store. This is the main package that contains the app's code and basic resources. You can find the link to the game's page on Google Play Store here: [CarX Street - Apps on Google Play]. Once you are on the page, tap on the Install button and wait for the download to finish.

        -

        Step 2: Download the OBB File from a Trusted Source

        -

        The second step is to download the OBB file of CarX Street Android game from a trusted source. This is the expansion file that contains the additional data for the game, such as graphics, media files, and other large assets. You can find the link to the OBB file here: [CarX Street OBB File]. Make sure you download the OBB file that matches the version of the APK file you downloaded in step 1. Once you have downloaded the OBB file, you will get a ZIP file that you need to extract using a file manager app.

        -

        Step 3: Install the APK File and Copy the OBB File to the Right Location

        -

        The third step is to install the APK file and copy the OBB file to the right location on your device. To install the APK file, you need to enable the Unknown Sources option in your device's settings. This will allow you to install apps from sources other than Google Play Store. To enable this option, go to Settings > Security > Unknown Sources and toggle it on. Then, locate the APK file you downloaded in step 1 using a file manager app and tap on it to install it.

        -

        download carx street obb apk
        -download obb car x street apk
        -download carx street obb file
        -download obb car x street file
        -download carx street obb data
        -download obb car x street data
        -download carx street obb mod
        -download obb car x street mod
        -download carx street obb android 1
        -download obb car x street android 1
        -download carx street obb apk pure
        -download obb car x street apk pure
        -download carx street obb apk combo
        -download obb car x street apk combo
        -download carx street obb latest version
        -download obb car x street latest version
        -download carx street obb free
        -download obb car x street free
        -download carx street obb offline
        -download obb car x street offline
        -download carx street obb online
        -download obb car x street online
        -download carx street obb unlimited money
        -download obb car x street unlimited money
        -download carx street obb hack
        -download obb car x street hack
        -download carx street obb cheat
        -download obb car x street cheat
        -download carx street obb full version
        -download obb car x street full version
        -how to download carx street obb android
        -how to download obb car x street android
        -where to download carx street obb android
        -where to download obb car x street android
        -best site to download carx street obb android
        -best site to download obb car x street android
        -safe site to download carx street obb android
        -safe site to download obb car x street android
        -easy way to download carx street obb android
        -easy way to download obb car x street android

        -

        To copy the OBB file to the right location, you need to find the folder where your app can access it. The folder name should be in this format: Android/obb//, where is the name of your app's package, which is usually similar to its ID on Google Play Store. For CarX Street Android game, the folder name should be Android/obb/com.carxtech.carxstreet/. If you don't see this folder, you can create it manually using a file manager app. Then, copy the OBB file (which should have a name like main..com.carxtech.carxstreet.obb) to this folder.

        -

        How to Play CarX Street Android Game with OBB File?

        -

        After completing the three steps above, you are ready to play CarX Street Android game with OBB file. Here is how:

        -

        Launch the Game and Enjoy the High-Quality Graphics and Sound

        -

        To launch the game, simply tap on its icon on your device's home screen or app drawer. The game will verify the OBB file and load it automatically. You will then see the game's main menu, where you can choose your mode of play, customize your car, or explore Sunset City. You will notice that the game has high-quality graphics and sound that enhance your gaming experience.

        -

        Tips and Tricks for Playing CarX Street Android Game

        -

        To make the most out of CarX Street Android game, here are some tips and tricks that you can use:

        -
          -
        • Use different camera angles to get a better view of your car and surroundings.
        • -
        • Use nitro boosters to gain speed and overtake your opponents.
        • -
        • Use drift mode to perform spectacular drifts and earn more points.
        • -
        • Upgrade your car's parts and performance using coins and diamonds that you earn from races.
        • -
        • Join clubs and participate in club events to get rewards and bonuses.
        • -
        • Challenge other players online and show off your skills.
        • -
        -

        Conclusion

        -

        In conclusion, CarX Street Android game is a realistic and dynamic open world street racing game that requires an OBB file to run smoothly on your device. To download an OBB file for CarX Street Android game, you need to follow three simple steps: download the APK file from Google Play Store, download the OBB file from a trusted source, and install the APK file and copy the OBB file to the right location on your device. Once you have done that, you can launch the game and enjoy the high-quality graphics and sound, as well as the exciting gameplay. You can also use some tips and tricks to improve your performance and have more fun. We hope this article has helped you to download OBB file for CarX Street Android game and play it without any issues.

        -

        FAQs

        -

        Here are some frequently asked questions about CarX Street Android game and OBB file:

        - - - - - - - - - - - - - - - - - - - - - - - - - -
        QuestionAnswer
        What is the size of the OBB file for CarX Street Android game?The size of the OBB file for CarX Street Android game is about 1.5 GB.
        What if I delete the OBB file by mistake?If you delete the OBB file by mistake, you will need to download it again from a trusted source and copy it to the right location on your device.
        Can I play CarX Street Android game without an OBB file?No, you cannot play CarX Street Android game without an OBB file, as it contains essential data for the game.
        Can I share the OBB file with my friends?Yes, you can share the OBB file with your friends, but they will also need to download the APK file from Google Play Store and install it on their devices.
        Can I move the OBB file to another location on my device?No, you cannot move the OBB file to another location on your device, as it needs to be in the specific folder where your app can access it.

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Monoposto Full for PC A Guide to Install and Run the Game.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Monoposto Full for PC A Guide to Install and Run the Game.md deleted file mode 100644 index 0d4e00a9dc464e7fddc26fd0df3e988e3c0c40c5..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Monoposto Full for PC A Guide to Install and Run the Game.md +++ /dev/null @@ -1,92 +0,0 @@ - -

        Download Monoposto Full: How to Enjoy the Best Independent Formula Racing Game on Your Device

        -

        If you are a fan of formula racing games, you might have heard of Monoposto, an amazing independent racing game with single seater open-wheel cars. Monoposto is developed by Marco Pesce, a passionate game developer who created this game as a tribute to his love for motorsport. In this article, we will tell you what Monoposto is, why you should download the full version, and how to do it.

        -

        download monoposto full


        Download ○○○ https://ssurll.com/2uNUXZ



        -

        What is Monoposto?

        -

        Monoposto is a realistic and immersive racing game that simulates the experience of driving a formula car on 24 different tracks around the world. You can compete in the new 2023 season, with online multiplayer duels, quick races, single races, and championship modes. You can also customize your car and driver, choose your camera view, and adjust your driving options. Monoposto has dynamic weather effects, pit stops, car repairs, and a spectator TV mode that lets you watch the race from different angles.

        -

        Features of Monoposto

        -

        Monoposto has many features that make it stand out from other racing games. Here are some of them:

        -

        -24 realistic tracks

        -

        Monoposto has 24 racing tracks that are based on real-world locations and circuits. You can race on famous tracks like Monaco, Silverstone, Spa-Francorchamps, Suzuka, and more. Each track has its own characteristics, challenges, and weather conditions.

        -

        -Online multiplayer duel

        -

        Monoposto lets you challenge other players online in a one-on-one duel mode. You can race against your friends or random opponents, and see who is the fastest driver. You can also chat with your opponent before and after the race.

        -

        -Dynamic weather

        -

        Monoposto has a realistic weather system that changes according to the track and the time of day. You can race in sunny, cloudy, rainy, or stormy conditions. The weather affects your visibility, grip, and performance. You have to adapt your driving style and strategy accordingly.

        -

        -Pit stop and car setup

        -

        Monoposto allows you to make pit stops during qualifying and race sessions. You can repair your car, change your tires, and adjust your fuel level. You can also set up your car before the race, by tuning your engine, gearbox, suspension, brakes, aerodynamics, and more.

        -

        download monoposto full game for android
        -download monoposto full game for ios
        -download monoposto full game for pc
        -download monoposto full version apk
        -download monoposto full version free
        -download monoposto full unlocked game
        -download monoposto full racing game
        -download monoposto full 2023 season
        -download monoposto full online multiplayer
        -download monoposto full no ads
        -how to download monoposto full game
        -where to download monoposto full game
        -best site to download monoposto full game
        -download monoposto full game review
        -download monoposto full game trailer
        -download monoposto full game tips and tricks
        -download monoposto full game cheats and hacks
        -download monoposto full game mod apk
        -download monoposto full game for windows 10
        -download monoposto full game for mac
        -download monoposto full game for chromebook
        -download monoposto full game for iphone
        -download monoposto full game for ipad
        -download monoposto full game for ipod touch
        -download monoposto full game from google play store
        -download monoposto full game from app store
        -download monoposto full game from official website
        -download monoposto full game latest version
        -download monoposto full game update
        -download monoposto full game offline mode
        -download monoposto full game with car customization
        -download monoposto full game with car setup
        -download monoposto full game with pit stop
        -download monoposto full game with dynamic weather
        -download monoposto full game with realistic tracks
        -download monoposto full game with 22 cars
        -download monoposto full game with 7 camera views
        -download monoposto full game with spectator tv mode
        -download monoposto full formula racing game (edition 1)
        -is it safe to download monoposto full game
        -is it legal to download monoposto full game
        -is it worth to download monoposto full game
        -how much does it cost to download monoposto full game
        -how long does it take to download monoposto full game
        -how big is the file size of the downloaded monoposto full game

        -

        -Customization of cars and drivers

        -

        Monoposto gives you the freedom to customize your car and driver. You can choose your driver name, nationality, helmet design, suit color, and number. You can also create your own livery for your car, by changing its color, shape, sponsors, and decals.

        -

        -Spectator TV mode

        -

        Monoposto has a spectator TV mode that lets you watch the race from different perspectives. You can switch between different camera views, such as cockpit, chase, front wing, rear wing, side pod, wheel cam, helicopter cam, and more. You can also lock the view on a specific driver or follow the leader.

        -

        Why download Monoposto full version?

        -

        Monoposto is available for free on Google Play Store and App Store for Android and iOS devices respectively. However, the free version has some limitations that might affect your enjoyment of the game. Here are some of the benefits of downloading the full version of Monoposto:

        -

        Benefits of downloading Monoposto full version

        -

        By downloading the full version of Monoposto, you can enjoy the following advantages:

        -

        -No ads

        -

        The free version of Monoposto has ads that might interrupt your gameplay or distract you from the race. By downloading the full version, you can get rid of the ads and have a smoother and more immersive experience.

        -

        -Full access to all tracks and modes

        -

        The free version of Monoposto only lets you play on 8 tracks and in 2 modes: quick race and single race. By downloading the full version, you can unlock all 24 tracks and all 4 modes: quick race, single race, championship, and online multiplayer duel. You can also access the spectator TV mode and watch the races from different angles.

        -

        -Support the developer

        -

        By downloading the full version of Monoposto, you can support the developer and help him continue to improve the game and add new features. Monoposto is an independent game that is made by one person who is passionate about motorsport. You can show your appreciation and encouragement by purchasing the full version.

        -

        How to download Monoposto full version?

        -

        Downloading the full version of Monoposto is very easy and simple. You just need to follow these steps:

        -

        Steps to download Monoposto full version for Android and iOS devices

        -
          -
        1. Go to Google Play Store or App Store on your device and search for Monoposto.
        2. -
        3. Tap on the Monoposto icon and then tap on the Install button.
        4. -
        5. Wait for the game to download and install on your device.
        6. -
        7. Open the game and tap on the Menu button on the top left corner.
        8. -
        9. Tap on the Full Version option and then tap on the Buy button.
        10. -
        11. Enter your payment details and confirm your purchase.
        12. -
        13. Enjoy the full version of Monoposto!
        14. -
        -

        Conclusion

        -

        Monoposto is a fantastic racing game that simulates the thrill and challenge of driving a formula car on realistic tracks. It has many features that make it fun and immersive, such as online multiplayer duels, dynamic weather, pit stops, car setup, customization, and spectator TV mode. By downloading the full version of Monoposto, you can enjoy all these features without any ads or limitations. You can also support the developer who created this game as a tribute to his love for motorsport. If you are a fan of formula racing games, you should definitely download Monoposto full version and enjoy the best independent formula racing game on your device.

        - FAQs Q: How much does Monoposto full version cost? A: Monoposto full version costs $2.99 for both Android and iOS devices. Q: What are the system requirements for Monoposto? A: Monoposto requires Android 5.0 or later or iOS 10.0 or later. It also requires at least 500 MB of free storage space on your device. Q: How can I contact the developer of Monoposto? A: You can contact the developer of Monoposto by sending an email to monopostogame@gmail.com or by visiting his website at https://www.monopostogame.com/. Q: How can I rate and review Monoposto? A: You can rate and review Monoposto by going to Google Play Store or App Store on your device and finding the game page. You can then tap on the stars to rate it and write your feedback in the review section. Q: How can I learn more about formula racing? A: You can learn more about formula racing by visiting websites like https://www.formula1.com/ or https://www.fia.com/sport/championships/f1. You can also watch formula racing events on TV or online platforms like https://www.f1tv.formula1.com/.

        197e85843d
        -
        -
        \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Free RPG Essential Kit A Powerful and Easy-to-Customize Game Development Solution.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Free RPG Essential Kit A Powerful and Easy-to-Customize Game Development Solution.md deleted file mode 100644 index ebbd2285d907deafb61e1dfcfd604273cc29ec09..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Free RPG Essential Kit A Powerful and Easy-to-Customize Game Development Solution.md +++ /dev/null @@ -1,25 +0,0 @@ -
        -

        RPG Essential Kit: A Free Download for Game Developers

        - If you are a game developer who wants to create your own role-playing games (RPGs) in Unity, you might be interested in RPG Essential Kit. This is a game toolkit that provides you with everything you need to start making your own RPGs in Unity. In this article, we will explain what RPG Essential Kit is, what features it offers, how to download it for free, why you should use it, how to use it, and some frequently asked questions.

        What is RPG Essential Kit?

        - RPG Essential Kit is a game toolkit that was created by Blink, a publisher on the Unity Asset Store. It is designed to help game developers create RPGs in Unity, without having to code everything from scratch. It includes features such as:

        A game toolkit for creating RPGs in Unity

        - - Character creation and customization - Inventory and equipment system - Quest and dialogue system - Combat and skill system - Leveling and progression system - Save and load system - User interface and sound effects

        Features of RPG Essential Kit

        - Some of the features that RPG Essential Kit offers are: - Compatible with the Built-in Render Pipeline, the Universal Render Pipeline, and the High Definition Render Pipeline - Modular and customizable, allowing you to tweak the settings and add your own assets and scripts - Easy to use, with drag-and-drop components and prefabs - Well documented, with a detailed manual and video tutorials - Supported by the developer, with regular updates and bug fixes

        How to download RPG Essential Kit for free

        - RPG Essential Kit is normally priced at $499.99 on the Unity Asset Store. However, you can download it for free if you meet the following conditions: - You have a Unity account and a valid email address - You sign up for the Unity Learn Premium subscription, which gives you access to hundreds of courses and projects on game development - You redeem the code "RPGKIT" on the Unity Asset Store before June 30, 2023 By doing this, you will get a free license for RPG Essential Kit, as well as a one-month free trial of Unity Learn Premium. After the trial period ends, you can cancel your subscription or continue paying $15 per month. You will still be able to use RPG Essential Kit even if you cancel your subscription.

        Why use RPG Essential Kit?

        - RPG Essential Kit is a great tool for game developers who want to create their own RPGs in Unity. Here are some of the reasons why you should use it:

        Benefits of using RPG Essential Kit

        - - It saves you time and effort, by providing you with ready-made features and systems that you can use in your game - It helps you learn and improve your skills, by showing you how to implement various aspects of an RPG in Unity - It gives you flexibility and creativity, by allowing you to customize and modify the features according to your needs and preferences

        Examples of games made with RPG Essential Kit

        - Some of the games that have been made with RPG Essential Kit are: - The Lost Kingdoms, a fantasy action-RPG that features an open world, dynamic quests, crafting, and multiplayer - The Last Hope, a sci-fi survival-RPG that features a post-apocalyptic setting, stealth mechanics, crafting, and base building - The Legend of Zelda: Breath of the Wild Clone, a fan-made project that recreates some of the elements of the popular Nintendo game in Unity

        Alternatives to RPG Essential Kit

        - If you are looking for other game toolkits that can help you create RPGs in Unity, you might want to check out these alternatives: - RPG Builder, a game toolkit that lets you create RPGs without coding, with features such as character creation, inventory, quests, combat, skills, and more - ORK Framework, a game toolkit that offers a complete solution for creating RPGs in Unity, with features such as game mechanics, game data, user interface, audio, and more - RPG All-In-One, a game toolkit that simplifies the process of creating RPGs in Unity, with features such as character customization, inventory, quests, combat, dialogue, and more

        How to use RPG Essential Kit?

        - If you have downloaded RPG Essential Kit for free and want to start using it in your game project, here are some steps that you need to follow:

        Requirements and installation

        - To use RPG Essential Kit, you need to have: - Unity 2019.4 or higher - A computer that meets the minimum system requirements for Unity - A basic knowledge of Unity and C# To install RPG Essential Kit, you need to: - Open Unity and create a new project or open an existing one - Go to the Asset Store window and search for RPG Essential Kit - Click on the Download button and then on the Import button - Wait for the import process to finish and then click on the Accept All button - You should see a folder called RPG Essential Kit in your Project window

        Documentation and tutorials

        - To learn how to use RPG Essential Kit, you can refer to the documentation and tutorials that are provided by the developer. You can find them in: - The Manual folder inside the RPG Essential Kit folder in your Project window - The official website of RPG Essential Kit - The YouTube channel of Blink The documentation and tutorials will guide you through the basics of using RPG Essential Kit, such as setting up your project, creating your characters, adding items and quests, designing your combat system, and more.

        Tips and tricks for using RPG Essential Kit

        - To make the most out of RPG Essential Kit, here are some tips and tricks that you can use: - Use the Demo scene as a reference for how to set up your own scene - Use the Prefabs folder to find ready-made game objects that you can drag and drop into your scene - Use the Scriptable Objects folder to find and edit the game data that controls the behavior of your game features - Use the Settings folder to adjust the global settings of your game features - Use the Tools menu to access some useful utilities such as creating new items, quests, skills, etc. - Use the Debug menu to test your game features and find any errors or bugs

        Conclusion

        - RPG Essential Kit is a game toolkit that can help you create your own RPGs in Unity. It offers a lot of features and systems that you can use in your game project. You can download it for free if you sign up for Unity Learn Premium and use the code "RPGKIT" before June 30, 2023. You can also check out some alternatives to RPG Essential Kit if you want to explore other options. To use RPG Essential Kit, you need to install it in your Unity project and follow the documentation and tutorials that are provided by the developer. You can also use some tips and tricks to make the most out of RPG Essential Kit.

        FAQs

        - Here are some frequently asked questions about RPG Essential Kit:

        Q: Can I use RPG Essential Kit for commercial purposes?

        -A: Yes, you can use RPG Essential Kit for commercial purposes as long as you comply with the Unity Asset Store EULA.

        Q: Can I modify or extend RPG Essential Kit?

        -A: Yes, you can modify or extend RPG Essential Kit as much as you want. You can also add your own assets and scripts to customize your game.

        Q: Can I get support from the developer of RPG Essential Kit?

        -A: Yes, you can get support from the developer of RPG Essential Kit by contacting them through their email address (blink@blink.com) or their Discord server.

        Q: Can I request new features or report bugs for RPG Essential Kit?

        -A: Yes, you can request new features or report bugs for RPG Essential Kit by using their feedback form or their Trello board.

        Q: Can I share my games made with RPG Essential Kit?

        -A: Yes, you can share your games made with RPG Essential Kit with other users by posting them on their showcase forum or their social media pages.

        -

        rpg essential kit free download


        Download Filehttps://ssurll.com/2uNXBK



        197e85843d
        -
        -
        \ No newline at end of file diff --git a/spaces/simsantonioii/MusicGen-Continuation/tests/__init__.py b/spaces/simsantonioii/MusicGen-Continuation/tests/__init__.py deleted file mode 100644 index 0952fcc3f57e34b3747962e9ebd6fc57aeea63fa..0000000000000000000000000000000000000000 --- a/spaces/simsantonioii/MusicGen-Continuation/tests/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. diff --git a/spaces/sino72/Passenger_Reconization/deep_sort/deep_sort/sort/track.py b/spaces/sino72/Passenger_Reconization/deep_sort/deep_sort/sort/track.py deleted file mode 100644 index b81d7968bdb828ba43fd9a9968d40520f2d818b3..0000000000000000000000000000000000000000 --- a/spaces/sino72/Passenger_Reconization/deep_sort/deep_sort/sort/track.py +++ /dev/null @@ -1,199 +0,0 @@ -# vim: expandtab:ts=4:sw=4 - - -class TrackState: - """ - Enumeration type for the single target track state. Newly created tracks are - classified as `tentative` until enough evidence has been collected. Then, - the track state is changed to `confirmed`. Tracks that are no longer alive - are classified as `deleted` to mark them for removal from the set of active - tracks. - - 单个目标track状态的枚举类型。 - 新创建的track分类为“Tentative”,直到收集到足够的证据为止。 - 然后,跟踪状态更改为“Confirmed”。 - 不再活跃的tracks被归类为“Deleted”,以将其标记为从有效集中删除。 - - """ - - Tentative = 1 - Confirmed = 2 - Deleted = 3 - - -class Track: - """ - A single target track with state space `(x, y, a, h)` and associated - velocities, where `(x, y)` is the center of the bounding box, `a` is the - aspect ratio and `h` is the height. - - 具有状态空间(x,y,a,h)并关联速度的单个目标轨迹(track), - 其中(x,y)是边界框的中心,a是宽高比,h是高度。 - - Parameters - ---------- - mean : ndarray - Mean vector of the initial state distribution. - 初始状态分布的均值向量 - covariance : ndarray - Covariance matrix of the initial state distribution. - 初始状态分布的协方差矩阵 - track_id : int - A unique track identifier. - 唯一的track标识符 - n_init : int - Number of consecutive detections before the track is confirmed. The - track state is set to `Deleted` if a miss occurs within the first - `n_init` frames. - 确认track之前的连续检测次数。 在第一个n_init帧中 - 第一个未命中的情况下将跟踪状态设置为“Deleted” - max_age : int - The maximum number of consecutive misses before the track state is - set to `Deleted`. - 跟踪状态设置为Deleted之前的最大连续未命中数;代表一个track的存活期限 - - feature : Optional[ndarray] - Feature vector of the detection this track originates from. If not None, - this feature is added to the `features` cache. - 此track所源自的检测的特征向量。 如果不是None,此feature已添加到feature缓存中。 - - Attributes - ---------- - mean : ndarray - Mean vector of the initial state distribution. - 初始状态分布的均值向量 - covariance : ndarray - Covariance matrix of the initial state distribution. - 初始状态分布的协方差矩阵 - track_id : int - A unique track identifier. - hits : int - Total number of measurement updates. - 测量更新总数 - age : int - Total number of frames since first occurence. - 自第一次出现以来的总帧数 - time_since_update : int - Total number of frames since last measurement update. - 自上次测量更新以来的总帧数 - state : TrackState - The current track state. - features : List[ndarray] - A cache of features. On each measurement update, the associated feature - vector is added to this list. - feature缓存。每次测量更新时,相关feature向量添加到此列表中 - - """ - - def __init__(self, mean, covariance, track_id, n_init, max_age, - feature=None): - self.mean = mean - self.covariance = covariance - self.track_id = track_id - # hits代表匹配上了多少次,匹配次数超过n_init,设置Confirmed状态 - # hits每次调用update函数的时候+1 - self.hits = 1 - self.age = 1 # 和time_since_update功能重复 - # 每次调用predict函数的时候就会+1; 每次调用update函数的时候就会设置为0 - self.time_since_update = 0 - - self.state = TrackState.Tentative # 初始化一个Track的时设置Tentative状态 - # 每个track对应多个features, 每次更新都会将最新的feature添加到列表中 - self.features = [] - if feature is not None: - self.features.append(feature) - - self._n_init = n_init - self._max_age = max_age - - def to_tlwh(self): - """Get current position in bounding box format `(top left x, top left y, - width, height)`. - - Returns - ------- - ndarray - The bounding box. - - """ - ret = self.mean[:4].copy() - ret[2] *= ret[3] - ret[:2] -= ret[2:] / 2 - return ret - - def to_tlbr(self): - """Get current position in bounding box format `(min x, miny, max x, - max y)`. - - Returns - ------- - ndarray - The bounding box. - - """ - ret = self.to_tlwh() - ret[2:] = ret[:2] + ret[2:] - return ret - - def predict(self, kf): - """Propagate the state distribution to the current time step using a - Kalman filter prediction step. - 使用卡尔曼滤波器预测步骤将状态分布传播到当前时间步 - - Parameters - ---------- - kf : kalman_filter.KalmanFilter - The Kalman filter. - - """ - self.mean, self.covariance = kf.predict(self.mean, self.covariance) - self.age += 1 - self.time_since_update += 1 - - def update(self, kf, detection): - """Perform Kalman filter measurement update step and update the feature - cache. - 执行卡尔曼滤波器测量更新步骤并更新feature缓存 - - Parameters - ---------- - kf : kalman_filter.KalmanFilter - The Kalman filter. - detection : Detection - The associated detection. - - """ - self.mean, self.covariance = kf.update( - self.mean, self.covariance, detection.to_xyah()) - self.features.append(detection.feature) - - self.hits += 1 - self.time_since_update = 0 - # hits代表匹配上了多少次,匹配次数超过n_init,设置Confirmed状态 - # 连续匹配上n_init帧的时候,转变为确定态 - if self.state == TrackState.Tentative and self.hits >= self._n_init: - self.state = TrackState.Confirmed - - def mark_missed(self): - """Mark this track as missed (no association at the current time step). - """ - # 如果在处于Tentative态的情况下没有匹配上任何detection,转变为删除态。 - if self.state == TrackState.Tentative: - self.state = TrackState.Deleted - elif self.time_since_update > self._max_age: - # 如果time_since_update超过max_age,设置Deleted状态 - # 即失配连续达到max_age次数的时候,转变为删除态 - self.state = TrackState.Deleted - - def is_tentative(self): - """Returns True if this track is tentative (unconfirmed). - """ - return self.state == TrackState.Tentative - - def is_confirmed(self): - """Returns True if this track is confirmed.""" - return self.state == TrackState.Confirmed - - def is_deleted(self): - """Returns True if this track is dead and should be deleted.""" - return self.state == TrackState.Deleted diff --git a/spaces/skf15963/summary/fengshen/models/zen2/configuration_zen2.py b/spaces/skf15963/summary/fengshen/models/zen2/configuration_zen2.py deleted file mode 100644 index c7cbeb5657ea07b2a4e8429199a6091be39864c8..0000000000000000000000000000000000000000 --- a/spaces/skf15963/summary/fengshen/models/zen2/configuration_zen2.py +++ /dev/null @@ -1,80 +0,0 @@ -# coding=utf-8 -# Copyright 2022 IDEA-CCNL and The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" TransfoXLDenoise model configuration """ - -from transformers.configuration_utils import PretrainedConfig - - -class ZenConfig(PretrainedConfig): - - """Configuration class to store the configuration of a `ZenModel`. - """ - - def __init__(self, - # vocab_size_or_config_json_file, - # word_vocab_size, - hidden_size=768, - num_hidden_layers=12, - num_attention_heads=12, - intermediate_size=3072, - hidden_act="gelu", - hidden_dropout_prob=0.1, - attention_probs_dropout_prob=0.1, - max_position_embeddings=512, - type_vocab_size=2, - initializer_range=0.02, - layer_norm_eps=1e-12, - num_hidden_word_layers=6, - **kwargs): - """Constructs ZenConfig. - - Args: - vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`. - hidden_size: Size of the encoder layers and the pooler layer. - num_hidden_layers: Number of hidden layers in the Transformer encoder. - num_attention_heads: Number of attention heads for each attention layer in - the Transformer encoder. - intermediate_size: The size of the "intermediate" (i.e., feed-forward) - layer in the Transformer encoder. - hidden_act: The non-linear activation function (function or string) in the - encoder and pooler. If string, "gelu", "relu" and "swish" are supported. - hidden_dropout_prob: The dropout probabilitiy for all fully connected - layers in the embeddings, encoder, and pooler. - attention_probs_dropout_prob: The dropout ratio for the attention - probabilities. - max_position_embeddings: The maximum sequence length that this model might - ever be used with. Typically set this to something large just in case - (e.g., 512 or 1024 or 2048). - type_vocab_size: The vocabulary size of the `token_type_ids` passed into - `BertModel`. - initializer_range: The sttdev of the truncated_normal_initializer for - initializing all weight matrices. - layer_norm_eps: The epsilon used by LayerNorm. - """ - # self.vocab_size = vocab_size_or_config_json_file - # self.word_size = word_vocab_size - self.hidden_size = hidden_size - self.num_hidden_layers = num_hidden_layers - self.num_attention_heads = num_attention_heads - self.hidden_act = hidden_act - self.intermediate_size = intermediate_size - self.hidden_dropout_prob = hidden_dropout_prob - self.attention_probs_dropout_prob = attention_probs_dropout_prob - self.max_position_embeddings = max_position_embeddings - self.type_vocab_size = type_vocab_size - self.initializer_range = initializer_range - self.layer_norm_eps = layer_norm_eps - self.num_hidden_word_layers = num_hidden_word_layers - super().__init__(**kwargs) diff --git a/spaces/songweig/rich-text-to-image/models/resnet.py b/spaces/songweig/rich-text-to-image/models/resnet.py deleted file mode 100644 index 05c95ec7d5ddd23db37d2adcd47865de9e8139cb..0000000000000000000000000000000000000000 --- a/spaces/songweig/rich-text-to-image/models/resnet.py +++ /dev/null @@ -1,882 +0,0 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. -# `TemporalConvLayer` Copyright 2023 Alibaba DAMO-VILAB, The ModelScope Team and The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from functools import partial -from typing import Optional - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from diffusers.models.activations import get_activation -from diffusers.models.attention import AdaGroupNorm -from models.attention_processor import SpatialNorm - - -class Upsample1D(nn.Module): - """A 1D upsampling layer with an optional convolution. - - Parameters: - channels (`int`): - number of channels in the inputs and outputs. - use_conv (`bool`, default `False`): - option to use a convolution. - use_conv_transpose (`bool`, default `False`): - option to use a convolution transpose. - out_channels (`int`, optional): - number of output channels. Defaults to `channels`. - """ - - def __init__(self, channels, use_conv=False, use_conv_transpose=False, out_channels=None, name="conv"): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.use_conv_transpose = use_conv_transpose - self.name = name - - self.conv = None - if use_conv_transpose: - self.conv = nn.ConvTranspose1d(channels, self.out_channels, 4, 2, 1) - elif use_conv: - self.conv = nn.Conv1d(self.channels, self.out_channels, 3, padding=1) - - def forward(self, inputs): - assert inputs.shape[1] == self.channels - if self.use_conv_transpose: - return self.conv(inputs) - - outputs = F.interpolate(inputs, scale_factor=2.0, mode="nearest") - - if self.use_conv: - outputs = self.conv(outputs) - - return outputs - - -class Downsample1D(nn.Module): - """A 1D downsampling layer with an optional convolution. - - Parameters: - channels (`int`): - number of channels in the inputs and outputs. - use_conv (`bool`, default `False`): - option to use a convolution. - out_channels (`int`, optional): - number of output channels. Defaults to `channels`. - padding (`int`, default `1`): - padding for the convolution. - """ - - def __init__(self, channels, use_conv=False, out_channels=None, padding=1, name="conv"): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.padding = padding - stride = 2 - self.name = name - - if use_conv: - self.conv = nn.Conv1d(self.channels, self.out_channels, 3, stride=stride, padding=padding) - else: - assert self.channels == self.out_channels - self.conv = nn.AvgPool1d(kernel_size=stride, stride=stride) - - def forward(self, inputs): - assert inputs.shape[1] == self.channels - return self.conv(inputs) - - -class Upsample2D(nn.Module): - """A 2D upsampling layer with an optional convolution. - - Parameters: - channels (`int`): - number of channels in the inputs and outputs. - use_conv (`bool`, default `False`): - option to use a convolution. - use_conv_transpose (`bool`, default `False`): - option to use a convolution transpose. - out_channels (`int`, optional): - number of output channels. Defaults to `channels`. - """ - - def __init__(self, channels, use_conv=False, use_conv_transpose=False, out_channels=None, name="conv"): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.use_conv_transpose = use_conv_transpose - self.name = name - - conv = None - if use_conv_transpose: - conv = nn.ConvTranspose2d(channels, self.out_channels, 4, 2, 1) - elif use_conv: - conv = nn.Conv2d(self.channels, self.out_channels, 3, padding=1) - - # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed - if name == "conv": - self.conv = conv - else: - self.Conv2d_0 = conv - - def forward(self, hidden_states, output_size=None): - assert hidden_states.shape[1] == self.channels - - if self.use_conv_transpose: - return self.conv(hidden_states) - - # Cast to float32 to as 'upsample_nearest2d_out_frame' op does not support bfloat16 - # TODO(Suraj): Remove this cast once the issue is fixed in PyTorch - # https://github.com/pytorch/pytorch/issues/86679 - dtype = hidden_states.dtype - if dtype == torch.bfloat16: - hidden_states = hidden_states.to(torch.float32) - - # upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984 - if hidden_states.shape[0] >= 64: - hidden_states = hidden_states.contiguous() - - # if `output_size` is passed we force the interpolation output - # size and do not make use of `scale_factor=2` - if output_size is None: - hidden_states = F.interpolate(hidden_states, scale_factor=2.0, mode="nearest") - else: - hidden_states = F.interpolate(hidden_states, size=output_size, mode="nearest") - - # If the input is bfloat16, we cast back to bfloat16 - if dtype == torch.bfloat16: - hidden_states = hidden_states.to(dtype) - - # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed - if self.use_conv: - if self.name == "conv": - hidden_states = self.conv(hidden_states) - else: - hidden_states = self.Conv2d_0(hidden_states) - - return hidden_states - - -class Downsample2D(nn.Module): - """A 2D downsampling layer with an optional convolution. - - Parameters: - channels (`int`): - number of channels in the inputs and outputs. - use_conv (`bool`, default `False`): - option to use a convolution. - out_channels (`int`, optional): - number of output channels. Defaults to `channels`. - padding (`int`, default `1`): - padding for the convolution. - """ - - def __init__(self, channels, use_conv=False, out_channels=None, padding=1, name="conv"): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.padding = padding - stride = 2 - self.name = name - - if use_conv: - conv = nn.Conv2d(self.channels, self.out_channels, 3, stride=stride, padding=padding) - else: - assert self.channels == self.out_channels - conv = nn.AvgPool2d(kernel_size=stride, stride=stride) - - # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed - if name == "conv": - self.Conv2d_0 = conv - self.conv = conv - elif name == "Conv2d_0": - self.conv = conv - else: - self.conv = conv - - def forward(self, hidden_states): - assert hidden_states.shape[1] == self.channels - if self.use_conv and self.padding == 0: - pad = (0, 1, 0, 1) - hidden_states = F.pad(hidden_states, pad, mode="constant", value=0) - - assert hidden_states.shape[1] == self.channels - hidden_states = self.conv(hidden_states) - - return hidden_states - - -class FirUpsample2D(nn.Module): - """A 2D FIR upsampling layer with an optional convolution. - - Parameters: - channels (`int`): - number of channels in the inputs and outputs. - use_conv (`bool`, default `False`): - option to use a convolution. - out_channels (`int`, optional): - number of output channels. Defaults to `channels`. - fir_kernel (`tuple`, default `(1, 3, 3, 1)`): - kernel for the FIR filter. - """ - - def __init__(self, channels=None, out_channels=None, use_conv=False, fir_kernel=(1, 3, 3, 1)): - super().__init__() - out_channels = out_channels if out_channels else channels - if use_conv: - self.Conv2d_0 = nn.Conv2d(channels, out_channels, kernel_size=3, stride=1, padding=1) - self.use_conv = use_conv - self.fir_kernel = fir_kernel - self.out_channels = out_channels - - def _upsample_2d(self, hidden_states, weight=None, kernel=None, factor=2, gain=1): - """Fused `upsample_2d()` followed by `Conv2d()`. - - Padding is performed only once at the beginning, not between the operations. The fused op is considerably more - efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of - arbitrary order. - - Args: - hidden_states: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. - weight: Weight tensor of the shape `[filterH, filterW, inChannels, - outChannels]`. Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`. - kernel: FIR filter of the shape `[firH, firW]` or `[firN]` - (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling. - factor: Integer upsampling factor (default: 2). - gain: Scaling factor for signal magnitude (default: 1.0). - - Returns: - output: Tensor of the shape `[N, C, H * factor, W * factor]` or `[N, H * factor, W * factor, C]`, and same - datatype as `hidden_states`. - """ - - assert isinstance(factor, int) and factor >= 1 - - # Setup filter kernel. - if kernel is None: - kernel = [1] * factor - - # setup kernel - kernel = torch.tensor(kernel, dtype=torch.float32) - if kernel.ndim == 1: - kernel = torch.outer(kernel, kernel) - kernel /= torch.sum(kernel) - - kernel = kernel * (gain * (factor**2)) - - if self.use_conv: - convH = weight.shape[2] - convW = weight.shape[3] - inC = weight.shape[1] - - pad_value = (kernel.shape[0] - factor) - (convW - 1) - - stride = (factor, factor) - # Determine data dimensions. - output_shape = ( - (hidden_states.shape[2] - 1) * factor + convH, - (hidden_states.shape[3] - 1) * factor + convW, - ) - output_padding = ( - output_shape[0] - (hidden_states.shape[2] - 1) * stride[0] - convH, - output_shape[1] - (hidden_states.shape[3] - 1) * stride[1] - convW, - ) - assert output_padding[0] >= 0 and output_padding[1] >= 0 - num_groups = hidden_states.shape[1] // inC - - # Transpose weights. - weight = torch.reshape(weight, (num_groups, -1, inC, convH, convW)) - weight = torch.flip(weight, dims=[3, 4]).permute(0, 2, 1, 3, 4) - weight = torch.reshape(weight, (num_groups * inC, -1, convH, convW)) - - inverse_conv = F.conv_transpose2d( - hidden_states, weight, stride=stride, output_padding=output_padding, padding=0 - ) - - output = upfirdn2d_native( - inverse_conv, - torch.tensor(kernel, device=inverse_conv.device), - pad=((pad_value + 1) // 2 + factor - 1, pad_value // 2 + 1), - ) - else: - pad_value = kernel.shape[0] - factor - output = upfirdn2d_native( - hidden_states, - torch.tensor(kernel, device=hidden_states.device), - up=factor, - pad=((pad_value + 1) // 2 + factor - 1, pad_value // 2), - ) - - return output - - def forward(self, hidden_states): - if self.use_conv: - height = self._upsample_2d(hidden_states, self.Conv2d_0.weight, kernel=self.fir_kernel) - height = height + self.Conv2d_0.bias.reshape(1, -1, 1, 1) - else: - height = self._upsample_2d(hidden_states, kernel=self.fir_kernel, factor=2) - - return height - - -class FirDownsample2D(nn.Module): - """A 2D FIR downsampling layer with an optional convolution. - - Parameters: - channels (`int`): - number of channels in the inputs and outputs. - use_conv (`bool`, default `False`): - option to use a convolution. - out_channels (`int`, optional): - number of output channels. Defaults to `channels`. - fir_kernel (`tuple`, default `(1, 3, 3, 1)`): - kernel for the FIR filter. - """ - - def __init__(self, channels=None, out_channels=None, use_conv=False, fir_kernel=(1, 3, 3, 1)): - super().__init__() - out_channels = out_channels if out_channels else channels - if use_conv: - self.Conv2d_0 = nn.Conv2d(channels, out_channels, kernel_size=3, stride=1, padding=1) - self.fir_kernel = fir_kernel - self.use_conv = use_conv - self.out_channels = out_channels - - def _downsample_2d(self, hidden_states, weight=None, kernel=None, factor=2, gain=1): - """Fused `Conv2d()` followed by `downsample_2d()`. - Padding is performed only once at the beginning, not between the operations. The fused op is considerably more - efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of - arbitrary order. - - Args: - hidden_states: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. - weight: - Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`. Grouped convolution can be - performed by `inChannels = x.shape[0] // numGroups`. - kernel: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * - factor`, which corresponds to average pooling. - factor: Integer downsampling factor (default: 2). - gain: Scaling factor for signal magnitude (default: 1.0). - - Returns: - output: Tensor of the shape `[N, C, H // factor, W // factor]` or `[N, H // factor, W // factor, C]`, and - same datatype as `x`. - """ - - assert isinstance(factor, int) and factor >= 1 - if kernel is None: - kernel = [1] * factor - - # setup kernel - kernel = torch.tensor(kernel, dtype=torch.float32) - if kernel.ndim == 1: - kernel = torch.outer(kernel, kernel) - kernel /= torch.sum(kernel) - - kernel = kernel * gain - - if self.use_conv: - _, _, convH, convW = weight.shape - pad_value = (kernel.shape[0] - factor) + (convW - 1) - stride_value = [factor, factor] - upfirdn_input = upfirdn2d_native( - hidden_states, - torch.tensor(kernel, device=hidden_states.device), - pad=((pad_value + 1) // 2, pad_value // 2), - ) - output = F.conv2d(upfirdn_input, weight, stride=stride_value, padding=0) - else: - pad_value = kernel.shape[0] - factor - output = upfirdn2d_native( - hidden_states, - torch.tensor(kernel, device=hidden_states.device), - down=factor, - pad=((pad_value + 1) // 2, pad_value // 2), - ) - - return output - - def forward(self, hidden_states): - if self.use_conv: - downsample_input = self._downsample_2d(hidden_states, weight=self.Conv2d_0.weight, kernel=self.fir_kernel) - hidden_states = downsample_input + self.Conv2d_0.bias.reshape(1, -1, 1, 1) - else: - hidden_states = self._downsample_2d(hidden_states, kernel=self.fir_kernel, factor=2) - - return hidden_states - - -# downsample/upsample layer used in k-upscaler, might be able to use FirDownsample2D/DirUpsample2D instead -class KDownsample2D(nn.Module): - def __init__(self, pad_mode="reflect"): - super().__init__() - self.pad_mode = pad_mode - kernel_1d = torch.tensor([[1 / 8, 3 / 8, 3 / 8, 1 / 8]]) - self.pad = kernel_1d.shape[1] // 2 - 1 - self.register_buffer("kernel", kernel_1d.T @ kernel_1d, persistent=False) - - def forward(self, inputs): - inputs = F.pad(inputs, (self.pad,) * 4, self.pad_mode) - weight = inputs.new_zeros([inputs.shape[1], inputs.shape[1], self.kernel.shape[0], self.kernel.shape[1]]) - indices = torch.arange(inputs.shape[1], device=inputs.device) - kernel = self.kernel.to(weight)[None, :].expand(inputs.shape[1], -1, -1) - weight[indices, indices] = kernel - return F.conv2d(inputs, weight, stride=2) - - -class KUpsample2D(nn.Module): - def __init__(self, pad_mode="reflect"): - super().__init__() - self.pad_mode = pad_mode - kernel_1d = torch.tensor([[1 / 8, 3 / 8, 3 / 8, 1 / 8]]) * 2 - self.pad = kernel_1d.shape[1] // 2 - 1 - self.register_buffer("kernel", kernel_1d.T @ kernel_1d, persistent=False) - - def forward(self, inputs): - inputs = F.pad(inputs, ((self.pad + 1) // 2,) * 4, self.pad_mode) - weight = inputs.new_zeros([inputs.shape[1], inputs.shape[1], self.kernel.shape[0], self.kernel.shape[1]]) - indices = torch.arange(inputs.shape[1], device=inputs.device) - kernel = self.kernel.to(weight)[None, :].expand(inputs.shape[1], -1, -1) - weight[indices, indices] = kernel - return F.conv_transpose2d(inputs, weight, stride=2, padding=self.pad * 2 + 1) - - -class ResnetBlock2D(nn.Module): - r""" - A Resnet block. - - Parameters: - in_channels (`int`): The number of channels in the input. - out_channels (`int`, *optional*, default to be `None`): - The number of output channels for the first conv2d layer. If None, same as `in_channels`. - dropout (`float`, *optional*, defaults to `0.0`): The dropout probability to use. - temb_channels (`int`, *optional*, default to `512`): the number of channels in timestep embedding. - groups (`int`, *optional*, default to `32`): The number of groups to use for the first normalization layer. - groups_out (`int`, *optional*, default to None): - The number of groups to use for the second normalization layer. if set to None, same as `groups`. - eps (`float`, *optional*, defaults to `1e-6`): The epsilon to use for the normalization. - non_linearity (`str`, *optional*, default to `"swish"`): the activation function to use. - time_embedding_norm (`str`, *optional*, default to `"default"` ): Time scale shift config. - By default, apply timestep embedding conditioning with a simple shift mechanism. Choose "scale_shift" or - "ada_group" for a stronger conditioning with scale and shift. - kernel (`torch.FloatTensor`, optional, default to None): FIR filter, see - [`~models.resnet.FirUpsample2D`] and [`~models.resnet.FirDownsample2D`]. - output_scale_factor (`float`, *optional*, default to be `1.0`): the scale factor to use for the output. - use_in_shortcut (`bool`, *optional*, default to `True`): - If `True`, add a 1x1 nn.conv2d layer for skip-connection. - up (`bool`, *optional*, default to `False`): If `True`, add an upsample layer. - down (`bool`, *optional*, default to `False`): If `True`, add a downsample layer. - conv_shortcut_bias (`bool`, *optional*, default to `True`): If `True`, adds a learnable bias to the - `conv_shortcut` output. - conv_2d_out_channels (`int`, *optional*, default to `None`): the number of channels in the output. - If None, same as `out_channels`. - """ - - def __init__( - self, - *, - in_channels, - out_channels=None, - conv_shortcut=False, - dropout=0.0, - temb_channels=512, - groups=32, - groups_out=None, - pre_norm=True, - eps=1e-6, - non_linearity="swish", - skip_time_act=False, - time_embedding_norm="default", # default, scale_shift, ada_group, spatial - kernel=None, - output_scale_factor=1.0, - use_in_shortcut=None, - up=False, - down=False, - conv_shortcut_bias: bool = True, - conv_2d_out_channels: Optional[int] = None, - ): - super().__init__() - self.pre_norm = pre_norm - self.pre_norm = True - self.in_channels = in_channels - out_channels = in_channels if out_channels is None else out_channels - self.out_channels = out_channels - self.use_conv_shortcut = conv_shortcut - self.up = up - self.down = down - self.output_scale_factor = output_scale_factor - self.time_embedding_norm = time_embedding_norm - self.skip_time_act = skip_time_act - - if groups_out is None: - groups_out = groups - - if self.time_embedding_norm == "ada_group": - self.norm1 = AdaGroupNorm(temb_channels, in_channels, groups, eps=eps) - elif self.time_embedding_norm == "spatial": - self.norm1 = SpatialNorm(in_channels, temb_channels) - else: - self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True) - - self.conv1 = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) - - if temb_channels is not None: - if self.time_embedding_norm == "default": - self.time_emb_proj = torch.nn.Linear(temb_channels, out_channels) - elif self.time_embedding_norm == "scale_shift": - self.time_emb_proj = torch.nn.Linear(temb_channels, 2 * out_channels) - elif self.time_embedding_norm == "ada_group" or self.time_embedding_norm == "spatial": - self.time_emb_proj = None - else: - raise ValueError(f"unknown time_embedding_norm : {self.time_embedding_norm} ") - else: - self.time_emb_proj = None - - if self.time_embedding_norm == "ada_group": - self.norm2 = AdaGroupNorm(temb_channels, out_channels, groups_out, eps=eps) - elif self.time_embedding_norm == "spatial": - self.norm2 = SpatialNorm(out_channels, temb_channels) - else: - self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, num_channels=out_channels, eps=eps, affine=True) - - self.dropout = torch.nn.Dropout(dropout) - conv_2d_out_channels = conv_2d_out_channels or out_channels - self.conv2 = torch.nn.Conv2d(out_channels, conv_2d_out_channels, kernel_size=3, stride=1, padding=1) - - self.nonlinearity = get_activation(non_linearity) - - self.upsample = self.downsample = None - if self.up: - if kernel == "fir": - fir_kernel = (1, 3, 3, 1) - self.upsample = lambda x: upsample_2d(x, kernel=fir_kernel) - elif kernel == "sde_vp": - self.upsample = partial(F.interpolate, scale_factor=2.0, mode="nearest") - else: - self.upsample = Upsample2D(in_channels, use_conv=False) - elif self.down: - if kernel == "fir": - fir_kernel = (1, 3, 3, 1) - self.downsample = lambda x: downsample_2d(x, kernel=fir_kernel) - elif kernel == "sde_vp": - self.downsample = partial(F.avg_pool2d, kernel_size=2, stride=2) - else: - self.downsample = Downsample2D(in_channels, use_conv=False, padding=1, name="op") - - self.use_in_shortcut = self.in_channels != conv_2d_out_channels if use_in_shortcut is None else use_in_shortcut - - self.conv_shortcut = None - if self.use_in_shortcut: - self.conv_shortcut = torch.nn.Conv2d( - in_channels, conv_2d_out_channels, kernel_size=1, stride=1, padding=0, bias=conv_shortcut_bias - ) - - # Rich-Text: feature injection - def forward(self, input_tensor, temb, inject_states=None): - hidden_states = input_tensor - - if self.time_embedding_norm == "ada_group" or self.time_embedding_norm == "spatial": - hidden_states = self.norm1(hidden_states, temb) - else: - hidden_states = self.norm1(hidden_states) - - hidden_states = self.nonlinearity(hidden_states) - - if self.upsample is not None: - # upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984 - if hidden_states.shape[0] >= 64: - input_tensor = input_tensor.contiguous() - hidden_states = hidden_states.contiguous() - input_tensor = self.upsample(input_tensor) - hidden_states = self.upsample(hidden_states) - elif self.downsample is not None: - input_tensor = self.downsample(input_tensor) - hidden_states = self.downsample(hidden_states) - - hidden_states = self.conv1(hidden_states) - - if self.time_emb_proj is not None: - if not self.skip_time_act: - temb = self.nonlinearity(temb) - temb = self.time_emb_proj(temb)[:, :, None, None] - - if temb is not None and self.time_embedding_norm == "default": - hidden_states = hidden_states + temb - - if self.time_embedding_norm == "ada_group" or self.time_embedding_norm == "spatial": - hidden_states = self.norm2(hidden_states, temb) - else: - hidden_states = self.norm2(hidden_states) - - if temb is not None and self.time_embedding_norm == "scale_shift": - scale, shift = torch.chunk(temb, 2, dim=1) - hidden_states = hidden_states * (1 + scale) + shift - - hidden_states = self.nonlinearity(hidden_states) - - hidden_states = self.dropout(hidden_states) - hidden_states = self.conv2(hidden_states) - - if self.conv_shortcut is not None: - input_tensor = self.conv_shortcut(input_tensor) - - # Rich-Text: feature injection - if inject_states is not None: - output_tensor = (input_tensor + inject_states) / self.output_scale_factor - else: - output_tensor = (input_tensor + hidden_states) / self.output_scale_factor - - return output_tensor, hidden_states - - -# unet_rl.py -def rearrange_dims(tensor): - if len(tensor.shape) == 2: - return tensor[:, :, None] - if len(tensor.shape) == 3: - return tensor[:, :, None, :] - elif len(tensor.shape) == 4: - return tensor[:, :, 0, :] - else: - raise ValueError(f"`len(tensor)`: {len(tensor)} has to be 2, 3 or 4.") - - -class Conv1dBlock(nn.Module): - """ - Conv1d --> GroupNorm --> Mish - """ - - def __init__(self, inp_channels, out_channels, kernel_size, n_groups=8): - super().__init__() - - self.conv1d = nn.Conv1d(inp_channels, out_channels, kernel_size, padding=kernel_size // 2) - self.group_norm = nn.GroupNorm(n_groups, out_channels) - self.mish = nn.Mish() - - def forward(self, inputs): - intermediate_repr = self.conv1d(inputs) - intermediate_repr = rearrange_dims(intermediate_repr) - intermediate_repr = self.group_norm(intermediate_repr) - intermediate_repr = rearrange_dims(intermediate_repr) - output = self.mish(intermediate_repr) - return output - - -# unet_rl.py -class ResidualTemporalBlock1D(nn.Module): - def __init__(self, inp_channels, out_channels, embed_dim, kernel_size=5): - super().__init__() - self.conv_in = Conv1dBlock(inp_channels, out_channels, kernel_size) - self.conv_out = Conv1dBlock(out_channels, out_channels, kernel_size) - - self.time_emb_act = nn.Mish() - self.time_emb = nn.Linear(embed_dim, out_channels) - - self.residual_conv = ( - nn.Conv1d(inp_channels, out_channels, 1) if inp_channels != out_channels else nn.Identity() - ) - - def forward(self, inputs, t): - """ - Args: - inputs : [ batch_size x inp_channels x horizon ] - t : [ batch_size x embed_dim ] - - returns: - out : [ batch_size x out_channels x horizon ] - """ - t = self.time_emb_act(t) - t = self.time_emb(t) - out = self.conv_in(inputs) + rearrange_dims(t) - out = self.conv_out(out) - return out + self.residual_conv(inputs) - - -def upsample_2d(hidden_states, kernel=None, factor=2, gain=1): - r"""Upsample2D a batch of 2D images with the given filter. - Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` and upsamples each image with the given - filter. The filter is normalized so that if the input pixels are constant, they will be scaled by the specified - `gain`. Pixels outside the image are assumed to be zero, and the filter is padded with zeros so that its shape is - a: multiple of the upsampling factor. - - Args: - hidden_states: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. - kernel: FIR filter of the shape `[firH, firW]` or `[firN]` - (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling. - factor: Integer upsampling factor (default: 2). - gain: Scaling factor for signal magnitude (default: 1.0). - - Returns: - output: Tensor of the shape `[N, C, H * factor, W * factor]` - """ - assert isinstance(factor, int) and factor >= 1 - if kernel is None: - kernel = [1] * factor - - kernel = torch.tensor(kernel, dtype=torch.float32) - if kernel.ndim == 1: - kernel = torch.outer(kernel, kernel) - kernel /= torch.sum(kernel) - - kernel = kernel * (gain * (factor**2)) - pad_value = kernel.shape[0] - factor - output = upfirdn2d_native( - hidden_states, - kernel.to(device=hidden_states.device), - up=factor, - pad=((pad_value + 1) // 2 + factor - 1, pad_value // 2), - ) - return output - - -def downsample_2d(hidden_states, kernel=None, factor=2, gain=1): - r"""Downsample2D a batch of 2D images with the given filter. - Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` and downsamples each image with the - given filter. The filter is normalized so that if the input pixels are constant, they will be scaled by the - specified `gain`. Pixels outside the image are assumed to be zero, and the filter is padded with zeros so that its - shape is a multiple of the downsampling factor. - - Args: - hidden_states: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. - kernel: FIR filter of the shape `[firH, firW]` or `[firN]` - (separable). The default is `[1] * factor`, which corresponds to average pooling. - factor: Integer downsampling factor (default: 2). - gain: Scaling factor for signal magnitude (default: 1.0). - - Returns: - output: Tensor of the shape `[N, C, H // factor, W // factor]` - """ - - assert isinstance(factor, int) and factor >= 1 - if kernel is None: - kernel = [1] * factor - - kernel = torch.tensor(kernel, dtype=torch.float32) - if kernel.ndim == 1: - kernel = torch.outer(kernel, kernel) - kernel /= torch.sum(kernel) - - kernel = kernel * gain - pad_value = kernel.shape[0] - factor - output = upfirdn2d_native( - hidden_states, kernel.to(device=hidden_states.device), down=factor, pad=((pad_value + 1) // 2, pad_value // 2) - ) - return output - - -def upfirdn2d_native(tensor, kernel, up=1, down=1, pad=(0, 0)): - up_x = up_y = up - down_x = down_y = down - pad_x0 = pad_y0 = pad[0] - pad_x1 = pad_y1 = pad[1] - - _, channel, in_h, in_w = tensor.shape - tensor = tensor.reshape(-1, in_h, in_w, 1) - - _, in_h, in_w, minor = tensor.shape - kernel_h, kernel_w = kernel.shape - - out = tensor.view(-1, in_h, 1, in_w, 1, minor) - out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) - out = out.view(-1, in_h * up_y, in_w * up_x, minor) - - out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]) - out = out.to(tensor.device) # Move back to mps if necessary - out = out[ - :, - max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0), - max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0), - :, - ] - - out = out.permute(0, 3, 1, 2) - out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) - w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) - out = F.conv2d(out, w) - out = out.reshape( - -1, - minor, - in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, - in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1, - ) - out = out.permute(0, 2, 3, 1) - out = out[:, ::down_y, ::down_x, :] - - out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 - out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 - - return out.view(-1, channel, out_h, out_w) - - -class TemporalConvLayer(nn.Module): - """ - Temporal convolutional layer that can be used for video (sequence of images) input Code mostly copied from: - https://github.com/modelscope/modelscope/blob/1509fdb973e5871f37148a4b5e5964cafd43e64d/modelscope/models/multi_modal/video_synthesis/unet_sd.py#L1016 - """ - - def __init__(self, in_dim, out_dim=None, dropout=0.0): - super().__init__() - out_dim = out_dim or in_dim - self.in_dim = in_dim - self.out_dim = out_dim - - # conv layers - self.conv1 = nn.Sequential( - nn.GroupNorm(32, in_dim), nn.SiLU(), nn.Conv3d(in_dim, out_dim, (3, 1, 1), padding=(1, 0, 0)) - ) - self.conv2 = nn.Sequential( - nn.GroupNorm(32, out_dim), - nn.SiLU(), - nn.Dropout(dropout), - nn.Conv3d(out_dim, in_dim, (3, 1, 1), padding=(1, 0, 0)), - ) - self.conv3 = nn.Sequential( - nn.GroupNorm(32, out_dim), - nn.SiLU(), - nn.Dropout(dropout), - nn.Conv3d(out_dim, in_dim, (3, 1, 1), padding=(1, 0, 0)), - ) - self.conv4 = nn.Sequential( - nn.GroupNorm(32, out_dim), - nn.SiLU(), - nn.Dropout(dropout), - nn.Conv3d(out_dim, in_dim, (3, 1, 1), padding=(1, 0, 0)), - ) - - # zero out the last layer params,so the conv block is identity - nn.init.zeros_(self.conv4[-1].weight) - nn.init.zeros_(self.conv4[-1].bias) - - def forward(self, hidden_states, num_frames=1): - hidden_states = ( - hidden_states[None, :].reshape((-1, num_frames) + hidden_states.shape[1:]).permute(0, 2, 1, 3, 4) - ) - - identity = hidden_states - hidden_states = self.conv1(hidden_states) - hidden_states = self.conv2(hidden_states) - hidden_states = self.conv3(hidden_states) - hidden_states = self.conv4(hidden_states) - - hidden_states = identity + hidden_states - - hidden_states = hidden_states.permute(0, 2, 1, 3, 4).reshape( - (hidden_states.shape[0] * hidden_states.shape[2], -1) + hidden_states.shape[3:] - ) - return hidden_states diff --git a/spaces/spacerini/chat-noir/app.py b/spaces/spacerini/chat-noir/app.py deleted file mode 100644 index 3ecc1497fa125ee1710dadc7d321ddb640573794..0000000000000000000000000000000000000000 --- a/spaces/spacerini/chat-noir/app.py +++ /dev/null @@ -1,64 +0,0 @@ -import streamlit as st -from chatnoir_api.v1 import search - -st.set_page_config( - page_title="ChatNoir", - page_icon="🐈", - layout="centered" -) - -@st.cache(suppress_st_warning=True, allow_output_mutation=True, show_spinner=False) -def search_chat_noir(key, search_query): - return search(api_key=key, query=search_query) - -def result_html(result): - return ( - f"
        {(result.title.html).replace('', '').replace('','')}
        " - f"{result.target_uri}:
        " - f"
        {(result.snippet.html).replace('', '').replace('','')}

        " - ) - -cola, colb, colc = st.columns([5,4,5]) -with colb: - st.image("https://huggingface.co/spaces/spacerini/chat-noir/resolve/main/chatnoir.svg") - -col1, col2 = st.columns([9, 1]) -with col1: - search_query = st.text_input(label="", - placeholder="Search" - ) - -with col2: - st.write('#') - button_clicked = st.button("🔎") - - -if search_query or button_clicked: - search_results = search_chat_noir(st.secrets["key"], search_query) - for result in search_results[:10]: - st.write(result_html(result), unsafe_allow_html=True) - -with st.expander("🐈 About", expanded=False): - st.markdown( - """ - This is an example of a Space using a [`spacerini`](https://github.com/castorini/hf-spacerini) streamlit template to wrap a search engine hosted elsewhere, in this case the ChatNoir search engine. ChatNoir is an Elasticsearch-based search engine offering a freely accessible search interface for the two ClueWeb corpora and the Common Crawl, together about 3 billion web pages. This version of the search engine uses the [Search API](https://www.chatnoir.eu/doc/api/) by way of the Python [chatnoir-api](https://pypi.org/project/chatnoir-api/) Package. - -If you find this project useful in your research, please consider citing: - -``` -@InProceedings{bevendorff:2018, - address = {Berlin Heidelberg New York}, - author = {Janek Bevendorff and Benno Stein and Matthias Hagen and Martin Potthast}, - booktitle = {Advances in Information Retrieval. 40th European Conference on IR Research (ECIR 2018)}, - editor = {Leif Azzopardi and Allan Hanbury and Gabriella Pasi and Benjamin Piwowarski}, - ids = {potthast:2018c,stein:2018c}, - month = mar, - publisher = {Springer}, - series = {Lecture Notes in Computer Science}, - site = {Grenoble, France}, - title = {{Elastic ChatNoir: Search Engine for the ClueWeb and the Common Crawl}}, - year = 2018 -} -``` - """ - ) \ No newline at end of file diff --git a/spaces/srini047/text-based-sentiment-analyzer/README.md b/spaces/srini047/text-based-sentiment-analyzer/README.md deleted file mode 100644 index 68df8aff1f4bad8e947dd874e9fb66b5505f2e7c..0000000000000000000000000000000000000000 --- a/spaces/srini047/text-based-sentiment-analyzer/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Text Based Sentiment Analyzer -emoji: 🌖 -colorFrom: indigo -colorTo: blue -sdk: gradio -sdk_version: 3.1.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/textless_nlp/gslm/speech2unit/pretrained/utils.py b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/textless_nlp/gslm/speech2unit/pretrained/utils.py deleted file mode 100644 index 5aaddf6421ab7fa417af508005671a0ed821c701..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/textless_nlp/gslm/speech2unit/pretrained/utils.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import gc -import os -import random -import shutil -import numpy as np - -import torch -import tqdm -from examples.textless_nlp.gslm.speech2unit.pretrained.cpc_feature_reader import ( - CpcFeatureReader, -) -from examples.textless_nlp.gslm.speech2unit.pretrained.hubert_feature_reader import ( - HubertFeatureReader, -) -from examples.textless_nlp.gslm.speech2unit.pretrained.logmel_feature_reader import ( - LogMelFeatureReader, -) -from examples.textless_nlp.gslm.speech2unit.pretrained.w2v2_feature_reader import ( - Wav2VecFeatureReader, -) - - -def get_feature_reader(feature_type): - if feature_type == "logmel": - return LogMelFeatureReader - elif feature_type == "hubert": - return HubertFeatureReader - elif feature_type == "w2v2": - return Wav2VecFeatureReader - elif feature_type == "cpc": - return CpcFeatureReader - else: - raise NotImplementedError(f"{feature_type} is not supported.") - - -def get_feature_iterator( - feature_type, checkpoint_path, layer, manifest_path, sample_pct -): - feature_reader_cls = get_feature_reader(feature_type) - with open(manifest_path, "r") as fp: - lines = fp.read().split("\n") - root = lines.pop(0).strip() - file_path_list = [ - os.path.join(root, line.split("\t")[0]) - for line in lines - if len(line) > 0 - ] - if sample_pct < 1.0: - file_path_list = random.sample( - file_path_list, int(sample_pct * len(file_path_list)) - ) - num_files = len(file_path_list) - reader = feature_reader_cls( - checkpoint_path=checkpoint_path, layer=layer - ) - - def iterate(): - for file_path in file_path_list: - feats = reader.get_feats(file_path) - yield feats.cpu().numpy() - - return iterate, num_files - - -def get_features( - feature_type, checkpoint_path, layer, manifest_path, sample_pct, flatten -): - generator, num_files = get_feature_iterator( - feature_type=feature_type, - checkpoint_path=checkpoint_path, - layer=layer, - manifest_path=manifest_path, - sample_pct=sample_pct, - ) - iterator = generator() - - features_list = [] - for features in tqdm.tqdm(iterator, total=num_files): - features_list.append(features) - - # Explicit clean up - del iterator - del generator - gc.collect() - torch.cuda.empty_cache() - - if flatten: - return np.concatenate(features_list) - - return features_list - - -def get_and_dump_features( - feature_type, - checkpoint_path, - layer, - manifest_path, - sample_pct, - flatten, - out_features_path, -): - # Feature extraction - features_batch = get_features( - feature_type=feature_type, - checkpoint_path=checkpoint_path, - layer=layer, - manifest_path=manifest_path, - sample_pct=sample_pct, - flatten=flatten, - ) - - # Save features - out_dir_path = os.path.dirname(out_features_path) - os.makedirs(out_dir_path, exist_ok=True) - shutil.copyfile( - manifest_path, - os.path.join(out_dir_path, os.path.basename(manifest_path)), - ) - np.save(out_features_path, features_batch) - - return features_batch diff --git a/spaces/srkajol/avocat-ia/README.md b/spaces/srkajol/avocat-ia/README.md deleted file mode 100644 index e3179de968bb1cf2e23b2e08e2c80b5b65ac99bd..0000000000000000000000000000000000000000 --- a/spaces/srkajol/avocat-ia/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Avocat Ia -emoji: 😻 -colorFrom: indigo -colorTo: purple -sdk: gradio -sdk_version: 3.36.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/sssdtgvg/Sex/index.html b/spaces/sssdtgvg/Sex/index.html deleted file mode 100644 index 58275de3b1c343a98420342baa076b9baaafa157..0000000000000000000000000000000000000000 --- a/spaces/sssdtgvg/Sex/index.html +++ /dev/null @@ -1,19 +0,0 @@ - - - - - - My static Space - - - -
        -

        Welcome to your static Space!

        -

        You can modify this app directly by editing index.html in the Files and versions tab.

        -

        - Also don't forget to check the - Spaces documentation. -

        -
        - - diff --git a/spaces/stomexserde/gpt4-ui/Examples/8051 Microcontroller By Subrata Ghoshal Pdf HOT.md b/spaces/stomexserde/gpt4-ui/Examples/8051 Microcontroller By Subrata Ghoshal Pdf HOT.md deleted file mode 100644 index 9d3d53a33cca871e911892504a14172713415f17..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/8051 Microcontroller By Subrata Ghoshal Pdf HOT.md +++ /dev/null @@ -1,48 +0,0 @@ - -

        8051 Microcontroller: A Comprehensive Guide by Subrata Ghoshal

        -

        The 8051 microcontroller is one of the most popular and widely used microcontrollers in embedded system design. It has a simple architecture, a rich instruction set and a variety of interfacing techniques that make it suitable for a range of applications. In this article, we will explore the internals, instructions, programming and interfacing of the 8051 microcontroller with the help of some books by Subrata Ghoshal, a renowned author and expert in the field.

        -

        8051 Microcontroller: Internals, Instructions, Programming & Interfacing

        -

        This book[^1^] provides a comprehensive and systematic coverage of the 8051 microcontroller, starting from its general architecture and features, to its instruction set and addressing modes, to its programming and interfacing techniques. The book also includes numerous examples and exercises to illustrate the concepts and applications of the 8051 microcontroller. Some of the topics covered in this book are:

        -

        8051 Microcontroller By Subrata Ghoshal Pdf


        Download Filehttps://urlgoal.com/2uI8vJ



        -
          -
        • General architecture and features of the 8051 microcontroller
        • -
        • Data move operations and arithmetic operations
        • -
        • Logical operations and boolean variable manipulation
        • -
        • Subroutines and stack
        • -
        • External interrupts and timer/counter interrupts
        • -
        • Serial communication and serial interrupts
        • -
        • External memory and memory-mapped I/O
        • -
        • Keyboards, display devices, DAC/ADC and motors
        • -
        • Power management and home protection system
        • -
        • Advanced microcontrollers such as PIC, AVR and ARM
        • -
        -

        8051 Microcontrollers: Internals, Instructions, Programming and Interfacing, 2/e

        -

        This book[^2^] is the second edition of the previous book, with some additional features and updates. The book retains the same structure and content as the first edition, but adds more C language based programs for varied applications. The book also includes a CD-ROM that contains all the source codes, simulation software and other useful resources for learning and practicing the 8051 microcontroller. Some of the new topics covered in this book are:

        -
          -
        • C language programming for the 8051 microcontroller
        • -
        • Keil C compiler and Proteus simulator
        • -
        • LCD interfacing using C language
        • -
        • SPI protocol and interfacing with EEPROM
        • -
        • I2C protocol and interfacing with RTC
        • -
        • PWM generation using C language
        • -
        • Wireless communication using RF module
        • -
        • GSM modem interfacing using AT commands
        • -
        • Bluetooth module interfacing using SPP profile
        • -
        • Zigbee module interfacing using API mode
        • -
        -

        Embedded Systems & Robots: Projects Using the 8051 Microcontroller

        -

        This book[^3^] is a project-based book that demonstrates the practical applications of the 8051 microcontroller in embedded systems and robotics. The book contains 20 projects that cover various aspects of embedded systems such as sensors, actuators, communication protocols, control algorithms, etc. The book also explains the hardware design, software development and testing procedures for each project. Some of the projects covered in this book are:

        -
          -
        • Digital thermometer using LM35 sensor
        • -
        • Digital clock using DS1307 RTC
        • -
        • Password-based door lock system using keypad and LCD
        • -
        • Remote-controlled car using RF module
        • -
        • Line follower robot using IR sensors
        • -
        • Maze solver robot using ultrasonic sensor
        • -
        • Voice-controlled robot using HM2007 speech recognition module
        • -
        • Fingerprint-based attendance system using R305 fingerprint module
        • -
        • RFID-based security system using EM-18 RFID reader module
        • -
        • Solar tracker using LDR sensors and stepper motor
        • -

          7196e7f11a
          -
          -
          \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Avgat Movie In Hindi Free Download 720p Movies [REPACK].md b/spaces/stomexserde/gpt4-ui/Examples/Avgat Movie In Hindi Free Download 720p Movies [REPACK].md deleted file mode 100644 index 5506dfa38249654c33973dd42cfc2fbcd6789bea..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Avgat Movie In Hindi Free Download 720p Movies [REPACK].md +++ /dev/null @@ -1,22 +0,0 @@ - -

          Avgat Movie In Hindi Free Download 720p Movies: A Review

          -

          Avgat is a 2001 Bollywood thriller movie directed by Mohan Sharma and starring Jackie Shroff, Nana Patekar, Raveena Tandon and Alok Nath. The movie is about a police officer who is framed for a murder and has to prove his innocence while being chased by his enemies. The movie was released on 31 August 2001 and received mixed reviews from critics and audiences.

          -

          Avgat Movie In Hindi Free Download 720p Movies


          Download File ---> https://urlgoal.com/2uI9pY



          -

          If you are looking for a way to watch Avgat movie in Hindi free download 720p movies, you might be disappointed as there are no legal and safe sites that offer this movie for free download. However, you can still watch this movie online on some streaming platforms that have Hindi dubbed versions of Hollywood movies. Here are some of the best sites to watch Hindi dubbed Hollywood movies online:

          -
            -
          • Mp4Moviez: This is a free online movie downloader that can successfully download Hollywood movies in Hindi. There are all the latest Hollywood Hindi dubbed movies being listed on the top of the home page. You can directly get Hindi dubbed Hollywood movies download under the Daily Updated Movies drop-down list, or you can search for your wanted movies and download them[^1^].
          • -
          • Free HD Video Converter Factory: This is a totally free tool for not only Hollywood movies download in Hindi, but over 1000 websites’ videos and audio download. With this tool, you can enjoy your favorite Hollywood movies offline[^2^].
          • -
          • SGuru: This is a website that provides 10 best sites to download free movies in 720p/1080p HD. You can find various genres of movies, such as action, comedy, horror, etc. You can also filter movies by quality, language, year, etc[^3^].
          • -
          -

          However, we do not recommend you to download or watch Avgat movie in Hindi free download 720p movies from these sites as they may contain viruses, malware, or other harmful content that can damage your device or compromise your privacy. The best way to watch Avgat movie in Hindi is to buy or rent it from legal and authorized platforms, such as Amazon Prime Video, Netflix, Hotstar, etc. By doing so, you can support the original creators and enjoy a high-quality viewing experience.

          - -

          Avgat movie in Hindi free download 720p movies is a tempting option for many movie lovers who want to watch a thrilling and suspenseful Bollywood movie. However, there are many risks and drawbacks of downloading or watching movies from illegal and untrustworthy sites. Here are some of the reasons why you should avoid Avgat movie in Hindi free download 720p movies and opt for legal and safe platforms instead:

          -

          -
            -
          1. You may face legal consequences: Downloading or streaming movies from unauthorized sources is considered as piracy, which is a criminal offense in many countries. You may face fines, lawsuits, or even jail time if you are caught violating the copyright laws. You may also be exposing yourself to cyberattacks from hackers who may use your IP address or personal information for malicious purposes.
          2. -
          3. You may compromise the quality of the movie: The movies that are available for free download or streaming on illegal sites are often of poor quality, with low resolution, distorted sound, or missing subtitles. You may also encounter annoying ads, pop-ups, or redirects that may interrupt your viewing experience. You may also miss out on the bonus features, such as trailers, behind-the-scenes, interviews, etc., that are usually included in the official releases.
          4. -
          5. You may harm the movie industry: By downloading or watching movies from illegal sites, you are depriving the original creators and distributors of their rightful revenue and recognition. This may affect their ability to produce more quality movies in the future. You are also disrespecting the hard work and creativity of the actors, directors, writers, and other crew members who have invested their time and effort in making the movie.
          6. -
          -

          Therefore, we strongly advise you to avoid Avgat movie in Hindi free download 720p movies and choose legal and safe platforms to watch this movie. By doing so, you can enjoy a better and more secure viewing experience while supporting the movie industry and respecting the intellectual property rights.

          e93f5a0c3f
          -
          -
          \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Iddaa Tahmini Nesine Com.md b/spaces/stomexserde/gpt4-ui/Examples/Iddaa Tahmini Nesine Com.md deleted file mode 100644 index 166ff33750251e8d793ebc901146124ab79669e9..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Iddaa Tahmini Nesine Com.md +++ /dev/null @@ -1,32 +0,0 @@ -
          -

          Iddaa Tahmini Nesine Com: How to Bet on Sports in Turkey

          -

          If you are looking for a reliable and easy way to bet on sports in Turkey, you might want to check out Iddaa Tahmini Nesine Com. This is a website that offers you the latest odds, predictions, tips, and analysis for various sports events, such as football, basketball, tennis, and more. You can also find live scores, statistics, and news about your favorite teams and players.

          -

          Iddaa Tahmini Nesine Com is not just a website, but also a mobile app that you can download on your smartphone or tablet. You can access all the features of the website on the app, and also enjoy some exclusive benefits, such as notifications, coupons, and bonuses. You can also create your own profile and interact with other users who share your passion for sports betting.

          -

          Iddaa Tahmini Nesine Com


          DOWNLOAD ••• https://urlgoal.com/2uI9V7



          -

          One of the best things about Iddaa Tahmini Nesine Com is that it is licensed and regulated by the Turkish government. This means that you can trust that your personal information and money are safe and secure. You can also enjoy fast and easy transactions, using various payment methods such as credit cards, bank transfers, e-wallets, and more.

          -

          Whether you are a beginner or an expert in sports betting, you can find something that suits your needs and preferences on Iddaa Tahmini Nesine Com. You can choose from different types of bets, such as single bets, multiple bets, system bets, live bets, and more. You can also follow the advice of professional tipsters who have years of experience and knowledge in sports betting.

          -

          Iddaa Tahmini Nesine Com is more than just a website or an app. It is a community of sports lovers who want to have fun and win money by betting on their favorite sports. If you want to join them, all you need to do is register for free and start betting today. You will not regret it!

          - -

          How to Use Iddaa Tahmini Nesine Com

          -

          Using Iddaa Tahmini Nesine Com is very simple and convenient. You just need to follow these steps:

          -
            -
          1. Register for free on the website or the app. You will need to provide some basic information, such as your name, email, phone number, and password.
          2. -
          3. Make a deposit using your preferred payment method. You can choose from various options, such as credit cards, bank transfers, e-wallets, and more. You can also claim a welcome bonus if you are a new user.
          4. -
          5. Browse the sports events that are available for betting. You can filter them by date, sport, league, country, and more. You can also use the search function to find a specific event or team.
          6. -
          7. Select the event that you want to bet on and click on the odds that you want to place your bet on. You can also check the predictions, tips, and analysis that are provided by Iddaa Tahmini Nesine Com or other users.
          8. -
          9. Enter the amount that you want to bet and confirm your bet. You can also combine multiple bets into one coupon to increase your potential winnings.
          10. -
          11. Wait for the event to finish and check the results. If you win, you will receive your winnings automatically in your account. You can then withdraw them or use them for more bets.
          12. -
          -

          Why Choose Iddaa Tahmini Nesine Com

          -

          There are many reasons why you should choose Iddaa Tahmini Nesine Com for your sports betting needs. Here are some of them:

          -

          -
            -
          • Iddaa Tahmini Nesine Com is licensed and regulated by the Turkish government. This means that you can trust that your personal information and money are safe and secure.
          • -
          • Iddaa Tahmini Nesine Com offers you the latest odds, predictions, tips, and analysis for various sports events. You can also find live scores, statistics, and news about your favorite teams and players.
          • -
          • Iddaa Tahmini Nesine Com has a user-friendly website and a mobile app that you can access anytime and anywhere. You can also enjoy some exclusive benefits, such as notifications, coupons, and bonuses.
          • -
          • Iddaa Tahmini Nesine Com has a variety of payment methods that you can use for fast and easy transactions. You can also withdraw your winnings without any hassle or delay.
          • -
          • Iddaa Tahmini Nesine Com has a customer support team that is available 24/7 to help you with any issues or questions that you might have. You can contact them via phone, email, or live chat.
          • -
          • Iddaa Tahmini Nesine Com has a community of sports lovers who want to have fun and win money by betting on their favorite sports. You can interact with them and share your opinions and tips.
          • -

          7b8c122e87
          -
          -
          \ No newline at end of file diff --git a/spaces/stratussox/yolov5_inference/export.py b/spaces/stratussox/yolov5_inference/export.py deleted file mode 100644 index e43d9b730fc6a2ef65416d5bcf1690a0fc50f162..0000000000000000000000000000000000000000 --- a/spaces/stratussox/yolov5_inference/export.py +++ /dev/null @@ -1,652 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit - -Format | `export.py --include` | Model ---- | --- | --- -PyTorch | - | yolov5s.pt -TorchScript | `torchscript` | yolov5s.torchscript -ONNX | `onnx` | yolov5s.onnx -OpenVINO | `openvino` | yolov5s_openvino_model/ -TensorRT | `engine` | yolov5s.engine -CoreML | `coreml` | yolov5s.mlmodel -TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/ -TensorFlow GraphDef | `pb` | yolov5s.pb -TensorFlow Lite | `tflite` | yolov5s.tflite -TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite -TensorFlow.js | `tfjs` | yolov5s_web_model/ -PaddlePaddle | `paddle` | yolov5s_paddle_model/ - -Requirements: - $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU - $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU - -Usage: - $ python export.py --weights yolov5s.pt --include torchscript onnx openvino engine coreml tflite ... - -Inference: - $ python detect.py --weights yolov5s.pt # PyTorch - yolov5s.torchscript # TorchScript - yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s_openvino_model # OpenVINO - yolov5s.engine # TensorRT - yolov5s.mlmodel # CoreML (macOS-only) - yolov5s_saved_model # TensorFlow SavedModel - yolov5s.pb # TensorFlow GraphDef - yolov5s.tflite # TensorFlow Lite - yolov5s_edgetpu.tflite # TensorFlow Edge TPU - yolov5s_paddle_model # PaddlePaddle - -TensorFlow.js: - $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example - $ npm install - $ ln -s ../../yolov5/yolov5s_web_model public/yolov5s_web_model - $ npm start -""" - -import argparse -import contextlib -import json -import os -import platform -import re -import subprocess -import sys -import time -import warnings -from pathlib import Path - -import pandas as pd -import torch -from torch.utils.mobile_optimizer import optimize_for_mobile - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[0] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -if platform.system() != 'Windows': - ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative - -from models.experimental import attempt_load -from models.yolo import ClassificationModel, Detect, DetectionModel, SegmentationModel -from utils.dataloaders import LoadImages -from utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_version, - check_yaml, colorstr, file_size, get_default_args, print_args, url2file, yaml_save) -from utils.torch_utils import select_device, smart_inference_mode - -MACOS = platform.system() == 'Darwin' # macOS environment - - -def export_formats(): - # YOLOv5 export formats - x = [ - ['PyTorch', '-', '.pt', True, True], - ['TorchScript', 'torchscript', '.torchscript', True, True], - ['ONNX', 'onnx', '.onnx', True, True], - ['OpenVINO', 'openvino', '_openvino_model', True, False], - ['TensorRT', 'engine', '.engine', False, True], - ['CoreML', 'coreml', '.mlmodel', True, False], - ['TensorFlow SavedModel', 'saved_model', '_saved_model', True, True], - ['TensorFlow GraphDef', 'pb', '.pb', True, True], - ['TensorFlow Lite', 'tflite', '.tflite', True, False], - ['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False, False], - ['TensorFlow.js', 'tfjs', '_web_model', False, False], - ['PaddlePaddle', 'paddle', '_paddle_model', True, True],] - return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU']) - - -def try_export(inner_func): - # YOLOv5 export decorator, i..e @try_export - inner_args = get_default_args(inner_func) - - def outer_func(*args, **kwargs): - prefix = inner_args['prefix'] - try: - with Profile() as dt: - f, model = inner_func(*args, **kwargs) - LOGGER.info(f'{prefix} export success ✅ {dt.t:.1f}s, saved as {f} ({file_size(f):.1f} MB)') - return f, model - except Exception as e: - LOGGER.info(f'{prefix} export failure ❌ {dt.t:.1f}s: {e}') - return None, None - - return outer_func - - -@try_export -def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')): - # YOLOv5 TorchScript model export - LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...') - f = file.with_suffix('.torchscript') - - ts = torch.jit.trace(model, im, strict=False) - d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names} - extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap() - if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html - optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files) - else: - ts.save(str(f), _extra_files=extra_files) - return f, None - - -@try_export -def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX:')): - # YOLOv5 ONNX export - check_requirements('onnx') - import onnx - - LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') - f = file.with_suffix('.onnx') - - output_names = ['output0', 'output1'] if isinstance(model, SegmentationModel) else ['output0'] - if dynamic: - dynamic = {'images': {0: 'batch', 2: 'height', 3: 'width'}} # shape(1,3,640,640) - if isinstance(model, SegmentationModel): - dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85) - dynamic['output1'] = {0: 'batch', 2: 'mask_height', 3: 'mask_width'} # shape(1,32,160,160) - elif isinstance(model, DetectionModel): - dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85) - - torch.onnx.export( - model.cpu() if dynamic else model, # --dynamic only compatible with cpu - im.cpu() if dynamic else im, - f, - verbose=False, - opset_version=opset, - do_constant_folding=True, - input_names=['images'], - output_names=output_names, - dynamic_axes=dynamic or None) - - # Checks - model_onnx = onnx.load(f) # load onnx model - onnx.checker.check_model(model_onnx) # check onnx model - - # Metadata - d = {'stride': int(max(model.stride)), 'names': model.names} - for k, v in d.items(): - meta = model_onnx.metadata_props.add() - meta.key, meta.value = k, str(v) - onnx.save(model_onnx, f) - - # Simplify - if simplify: - try: - cuda = torch.cuda.is_available() - check_requirements(('onnxruntime-gpu' if cuda else 'onnxruntime', 'onnx-simplifier>=0.4.1')) - import onnxsim - - LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') - model_onnx, check = onnxsim.simplify(model_onnx) - assert check, 'assert check failed' - onnx.save(model_onnx, f) - except Exception as e: - LOGGER.info(f'{prefix} simplifier failure: {e}') - return f, model_onnx - - -@try_export -def export_openvino(file, metadata, half, prefix=colorstr('OpenVINO:')): - # YOLOv5 OpenVINO export - check_requirements('openvino-dev') # requires openvino-dev: https://pypi.org/project/openvino-dev/ - import openvino.inference_engine as ie - - LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') - f = str(file).replace('.pt', f'_openvino_model{os.sep}') - - cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}" - subprocess.run(cmd.split(), check=True, env=os.environ) # export - yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml - return f, None - - -@try_export -def export_paddle(model, im, file, metadata, prefix=colorstr('PaddlePaddle:')): - # YOLOv5 Paddle export - check_requirements(('paddlepaddle', 'x2paddle')) - import x2paddle - from x2paddle.convert import pytorch2paddle - - LOGGER.info(f'\n{prefix} starting export with X2Paddle {x2paddle.__version__}...') - f = str(file).replace('.pt', f'_paddle_model{os.sep}') - - pytorch2paddle(module=model, save_dir=f, jit_type='trace', input_examples=[im]) # export - yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml - return f, None - - -@try_export -def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')): - # YOLOv5 CoreML export - check_requirements('coremltools') - import coremltools as ct - - LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...') - f = file.with_suffix('.mlmodel') - - ts = torch.jit.trace(model, im, strict=False) # TorchScript model - ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])]) - bits, mode = (8, 'kmeans_lut') if int8 else (16, 'linear') if half else (32, None) - if bits < 32: - if MACOS: # quantization only supported on macOS - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=DeprecationWarning) # suppress numpy==1.20 float warning - ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode) - else: - print(f'{prefix} quantization only supported on macOS, skipping...') - ct_model.save(f) - return f, ct_model - - -@try_export -def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')): - # YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt - assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`' - try: - import tensorrt as trt - except Exception: - if platform.system() == 'Linux': - check_requirements('nvidia-tensorrt', cmds='-U --index-url https://pypi.ngc.nvidia.com') - import tensorrt as trt - - if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 - grid = model.model[-1].anchor_grid - model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid] - export_onnx(model, im, file, 12, dynamic, simplify) # opset 12 - model.model[-1].anchor_grid = grid - else: # TensorRT >= 8 - check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0 - export_onnx(model, im, file, 12, dynamic, simplify) # opset 12 - onnx = file.with_suffix('.onnx') - - LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') - assert onnx.exists(), f'failed to export ONNX file: {onnx}' - f = file.with_suffix('.engine') # TensorRT engine file - logger = trt.Logger(trt.Logger.INFO) - if verbose: - logger.min_severity = trt.Logger.Severity.VERBOSE - - builder = trt.Builder(logger) - config = builder.create_builder_config() - config.max_workspace_size = workspace * 1 << 30 - # config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice - - flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) - network = builder.create_network(flag) - parser = trt.OnnxParser(network, logger) - if not parser.parse_from_file(str(onnx)): - raise RuntimeError(f'failed to load ONNX file: {onnx}') - - inputs = [network.get_input(i) for i in range(network.num_inputs)] - outputs = [network.get_output(i) for i in range(network.num_outputs)] - for inp in inputs: - LOGGER.info(f'{prefix} input "{inp.name}" with shape{inp.shape} {inp.dtype}') - for out in outputs: - LOGGER.info(f'{prefix} output "{out.name}" with shape{out.shape} {out.dtype}') - - if dynamic: - if im.shape[0] <= 1: - LOGGER.warning(f"{prefix} WARNING ⚠️ --dynamic model requires maximum --batch-size argument") - profile = builder.create_optimization_profile() - for inp in inputs: - profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape) - config.add_optimization_profile(profile) - - LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine as {f}') - if builder.platform_has_fast_fp16 and half: - config.set_flag(trt.BuilderFlag.FP16) - with builder.build_engine(network, config) as engine, open(f, 'wb') as t: - t.write(engine.serialize()) - return f, None - - -@try_export -def export_saved_model(model, - im, - file, - dynamic, - tf_nms=False, - agnostic_nms=False, - topk_per_class=100, - topk_all=100, - iou_thres=0.45, - conf_thres=0.25, - keras=False, - prefix=colorstr('TensorFlow SavedModel:')): - # YOLOv5 TensorFlow SavedModel export - try: - import tensorflow as tf - except Exception: - check_requirements(f"tensorflow{'' if torch.cuda.is_available() else '-macos' if MACOS else '-cpu'}") - import tensorflow as tf - from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 - - from models.tf import TFModel - - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') - f = str(file).replace('.pt', '_saved_model') - batch_size, ch, *imgsz = list(im.shape) # BCHW - - tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz) - im = tf.zeros((batch_size, *imgsz, ch)) # BHWC order for TensorFlow - _ = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) - inputs = tf.keras.Input(shape=(*imgsz, ch), batch_size=None if dynamic else batch_size) - outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) - keras_model = tf.keras.Model(inputs=inputs, outputs=outputs) - keras_model.trainable = False - keras_model.summary() - if keras: - keras_model.save(f, save_format='tf') - else: - spec = tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype) - m = tf.function(lambda x: keras_model(x)) # full model - m = m.get_concrete_function(spec) - frozen_func = convert_variables_to_constants_v2(m) - tfm = tf.Module() - tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x), [spec]) - tfm.__call__(im) - tf.saved_model.save(tfm, - f, - options=tf.saved_model.SaveOptions(experimental_custom_gradients=False) if check_version( - tf.__version__, '2.6') else tf.saved_model.SaveOptions()) - return f, keras_model - - -@try_export -def export_pb(keras_model, file, prefix=colorstr('TensorFlow GraphDef:')): - # YOLOv5 TensorFlow GraphDef *.pb export https://github.com/leimao/Frozen_Graph_TensorFlow - import tensorflow as tf - from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 - - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') - f = file.with_suffix('.pb') - - m = tf.function(lambda x: keras_model(x)) # full model - m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)) - frozen_func = convert_variables_to_constants_v2(m) - frozen_func.graph.as_graph_def() - tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False) - return f, None - - -@try_export -def export_tflite(keras_model, im, file, int8, data, nms, agnostic_nms, prefix=colorstr('TensorFlow Lite:')): - # YOLOv5 TensorFlow Lite export - import tensorflow as tf - - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') - batch_size, ch, *imgsz = list(im.shape) # BCHW - f = str(file).replace('.pt', '-fp16.tflite') - - converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) - converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] - converter.target_spec.supported_types = [tf.float16] - converter.optimizations = [tf.lite.Optimize.DEFAULT] - if int8: - from models.tf import representative_dataset_gen - dataset = LoadImages(check_dataset(check_yaml(data))['train'], img_size=imgsz, auto=False) - converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib=100) - converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] - converter.target_spec.supported_types = [] - converter.inference_input_type = tf.uint8 # or tf.int8 - converter.inference_output_type = tf.uint8 # or tf.int8 - converter.experimental_new_quantizer = True - f = str(file).replace('.pt', '-int8.tflite') - if nms or agnostic_nms: - converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS) - - tflite_model = converter.convert() - open(f, "wb").write(tflite_model) - return f, None - - -@try_export -def export_edgetpu(file, prefix=colorstr('Edge TPU:')): - # YOLOv5 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/ - cmd = 'edgetpu_compiler --version' - help_url = 'https://coral.ai/docs/edgetpu/compiler/' - assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}' - if subprocess.run(f'{cmd} >/dev/null', shell=True).returncode != 0: - LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}') - sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system - for c in ( - 'curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -', - 'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list', - 'sudo apt-get update', 'sudo apt-get install edgetpu-compiler'): - subprocess.run(c if sudo else c.replace('sudo ', ''), shell=True, check=True) - ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1] - - LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...') - f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model - f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model - - cmd = f"edgetpu_compiler -s -d -k 10 --out_dir {file.parent} {f_tfl}" - subprocess.run(cmd.split(), check=True) - return f, None - - -@try_export -def export_tfjs(file, prefix=colorstr('TensorFlow.js:')): - # YOLOv5 TensorFlow.js export - check_requirements('tensorflowjs') - import tensorflowjs as tfjs - - LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...') - f = str(file).replace('.pt', '_web_model') # js dir - f_pb = file.with_suffix('.pb') # *.pb path - f_json = f'{f}/model.json' # *.json path - - cmd = f'tensorflowjs_converter --input_format=tf_frozen_model ' \ - f'--output_node_names=Identity,Identity_1,Identity_2,Identity_3 {f_pb} {f}' - subprocess.run(cmd.split()) - - json = Path(f_json).read_text() - with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order - subst = re.sub( - r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, ' - r'"Identity.?.?": {"name": "Identity.?.?"}, ' - r'"Identity.?.?": {"name": "Identity.?.?"}, ' - r'"Identity.?.?": {"name": "Identity.?.?"}}}', r'{"outputs": {"Identity": {"name": "Identity"}, ' - r'"Identity_1": {"name": "Identity_1"}, ' - r'"Identity_2": {"name": "Identity_2"}, ' - r'"Identity_3": {"name": "Identity_3"}}}', json) - j.write(subst) - return f, None - - -def add_tflite_metadata(file, metadata, num_outputs): - # Add metadata to *.tflite models per https://www.tensorflow.org/lite/models/convert/metadata - with contextlib.suppress(ImportError): - # check_requirements('tflite_support') - from tflite_support import flatbuffers - from tflite_support import metadata as _metadata - from tflite_support import metadata_schema_py_generated as _metadata_fb - - tmp_file = Path('/tmp/meta.txt') - with open(tmp_file, 'w') as meta_f: - meta_f.write(str(metadata)) - - model_meta = _metadata_fb.ModelMetadataT() - label_file = _metadata_fb.AssociatedFileT() - label_file.name = tmp_file.name - model_meta.associatedFiles = [label_file] - - subgraph = _metadata_fb.SubGraphMetadataT() - subgraph.inputTensorMetadata = [_metadata_fb.TensorMetadataT()] - subgraph.outputTensorMetadata = [_metadata_fb.TensorMetadataT()] * num_outputs - model_meta.subgraphMetadata = [subgraph] - - b = flatbuffers.Builder(0) - b.Finish(model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER) - metadata_buf = b.Output() - - populator = _metadata.MetadataPopulator.with_model_file(file) - populator.load_metadata_buffer(metadata_buf) - populator.load_associated_files([str(tmp_file)]) - populator.populate() - tmp_file.unlink() - - -@smart_inference_mode() -def run( - data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' - weights=ROOT / 'yolov5s.pt', # weights path - imgsz=(640, 640), # image (height, width) - batch_size=1, # batch size - device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu - include=('torchscript', 'onnx'), # include formats - half=False, # FP16 half-precision export - inplace=False, # set YOLOv5 Detect() inplace=True - keras=False, # use Keras - optimize=False, # TorchScript: optimize for mobile - int8=False, # CoreML/TF INT8 quantization - dynamic=False, # ONNX/TF/TensorRT: dynamic axes - simplify=False, # ONNX: simplify model - opset=12, # ONNX: opset version - verbose=False, # TensorRT: verbose log - workspace=4, # TensorRT: workspace size (GB) - nms=False, # TF: add NMS to model - agnostic_nms=False, # TF: add agnostic NMS to model - topk_per_class=100, # TF.js NMS: topk per class to keep - topk_all=100, # TF.js NMS: topk for all classes to keep - iou_thres=0.45, # TF.js NMS: IoU threshold - conf_thres=0.25, # TF.js NMS: confidence threshold -): - t = time.time() - include = [x.lower() for x in include] # to lowercase - fmts = tuple(export_formats()['Argument'][1:]) # --include arguments - flags = [x in include for x in fmts] - assert sum(flags) == len(include), f'ERROR: Invalid --include {include}, valid --include arguments are {fmts}' - jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle = flags # export booleans - file = Path(url2file(weights) if str(weights).startswith(('http:/', 'https:/')) else weights) # PyTorch weights - - # Load PyTorch model - device = select_device(device) - if half: - assert device.type != 'cpu' or coreml, '--half only compatible with GPU export, i.e. use --device 0' - assert not dynamic, '--half not compatible with --dynamic, i.e. use either --half or --dynamic but not both' - model = attempt_load(weights, device=device, inplace=True, fuse=True) # load FP32 model - - # Checks - imgsz *= 2 if len(imgsz) == 1 else 1 # expand - if optimize: - assert device.type == 'cpu', '--optimize not compatible with cuda devices, i.e. use --device cpu' - - # Input - gs = int(max(model.stride)) # grid size (max stride) - imgsz = [check_img_size(x, gs) for x in imgsz] # verify img_size are gs-multiples - im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection - - # Update model - model.eval() - for k, m in model.named_modules(): - if isinstance(m, Detect): - m.inplace = inplace - m.dynamic = dynamic - m.export = True - - for _ in range(2): - y = model(im) # dry runs - if half and not coreml: - im, model = im.half(), model.half() # to FP16 - shape = tuple((y[0] if isinstance(y, tuple) else y).shape) # model output shape - metadata = {'stride': int(max(model.stride)), 'names': model.names} # model metadata - LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} with output shape {shape} ({file_size(file):.1f} MB)") - - # Exports - f = [''] * len(fmts) # exported filenames - warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning) # suppress TracerWarning - if jit: # TorchScript - f[0], _ = export_torchscript(model, im, file, optimize) - if engine: # TensorRT required before ONNX - f[1], _ = export_engine(model, im, file, half, dynamic, simplify, workspace, verbose) - if onnx or xml: # OpenVINO requires ONNX - f[2], _ = export_onnx(model, im, file, opset, dynamic, simplify) - if xml: # OpenVINO - f[3], _ = export_openvino(file, metadata, half) - if coreml: # CoreML - f[4], _ = export_coreml(model, im, file, int8, half) - if any((saved_model, pb, tflite, edgetpu, tfjs)): # TensorFlow formats - assert not tflite or not tfjs, 'TFLite and TF.js models must be exported separately, please pass only one type.' - assert not isinstance(model, ClassificationModel), 'ClassificationModel export to TF formats not yet supported.' - f[5], s_model = export_saved_model(model.cpu(), - im, - file, - dynamic, - tf_nms=nms or agnostic_nms or tfjs, - agnostic_nms=agnostic_nms or tfjs, - topk_per_class=topk_per_class, - topk_all=topk_all, - iou_thres=iou_thres, - conf_thres=conf_thres, - keras=keras) - if pb or tfjs: # pb prerequisite to tfjs - f[6], _ = export_pb(s_model, file) - if tflite or edgetpu: - f[7], _ = export_tflite(s_model, im, file, int8 or edgetpu, data=data, nms=nms, agnostic_nms=agnostic_nms) - if edgetpu: - f[8], _ = export_edgetpu(file) - add_tflite_metadata(f[8] or f[7], metadata, num_outputs=len(s_model.outputs)) - if tfjs: - f[9], _ = export_tfjs(file) - if paddle: # PaddlePaddle - f[10], _ = export_paddle(model, im, file, metadata) - - # Finish - f = [str(x) for x in f if x] # filter out '' and None - if any(f): - cls, det, seg = (isinstance(model, x) for x in (ClassificationModel, DetectionModel, SegmentationModel)) # type - dir = Path('segment' if seg else 'classify' if cls else '') - h = '--half' if half else '' # --half FP16 inference arg - s = "# WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference" if cls else \ - "# WARNING ⚠️ SegmentationModel not yet supported for PyTorch Hub AutoShape inference" if seg else '' - LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)' - f"\nResults saved to {colorstr('bold', file.parent.resolve())}" - f"\nDetect: python {dir / ('detect.py' if det else 'predict.py')} --weights {f[-1]} {h}" - f"\nValidate: python {dir / 'val.py'} --weights {f[-1]} {h}" - f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}') {s}" - f"\nVisualize: https://netron.app") - return f # return list of exported files/dirs - - -def parse_opt(): - parser = argparse.ArgumentParser() - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)') - parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640, 640], help='image (h, w)') - parser.add_argument('--batch-size', type=int, default=1, help='batch size') - parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--half', action='store_true', help='FP16 half-precision export') - parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True') - parser.add_argument('--keras', action='store_true', help='TF: use Keras') - parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile') - parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization') - parser.add_argument('--dynamic', action='store_true', help='ONNX/TF/TensorRT: dynamic axes') - parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') - parser.add_argument('--opset', type=int, default=12, help='ONNX: opset version') - parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log') - parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)') - parser.add_argument('--nms', action='store_true', help='TF: add NMS to model') - parser.add_argument('--agnostic-nms', action='store_true', help='TF: add agnostic NMS to model') - parser.add_argument('--topk-per-class', type=int, default=100, help='TF.js NMS: topk per class to keep') - parser.add_argument('--topk-all', type=int, default=100, help='TF.js NMS: topk for all classes to keep') - parser.add_argument('--iou-thres', type=float, default=0.45, help='TF.js NMS: IoU threshold') - parser.add_argument('--conf-thres', type=float, default=0.25, help='TF.js NMS: confidence threshold') - parser.add_argument( - '--include', - nargs='+', - default=['torchscript'], - help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle') - opt = parser.parse_args() - print_args(vars(opt)) - return opt - - -def main(opt): - for opt.weights in (opt.weights if isinstance(opt.weights, list) else [opt.weights]): - run(**vars(opt)) - - -if __name__ == "__main__": - opt = parse_opt() - main(opt) diff --git a/spaces/subhajitmaji/MusicGen/Makefile b/spaces/subhajitmaji/MusicGen/Makefile deleted file mode 100644 index 5bfd89dd833d7448b21073eb6ee7cfac1d5157dd..0000000000000000000000000000000000000000 --- a/spaces/subhajitmaji/MusicGen/Makefile +++ /dev/null @@ -1,21 +0,0 @@ -default: linter tests - -install: - pip install -U pip - pip install -U -e '.[dev]' - -linter: - flake8 audiocraft && mypy audiocraft - flake8 tests && mypy tests - -tests: - coverage run -m pytest tests - coverage report --include 'audiocraft/*' - -docs: - pdoc3 --html -o docs -f audiocraft - -dist: - python setup.py sdist - -.PHONY: linter tests docs dist diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Junooniyat Full Movie Download In 720p 1080p _TOP_.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Junooniyat Full Movie Download In 720p 1080p _TOP_.md deleted file mode 100644 index 935f2eed4ac87945053b17e05384f51a6286f79d..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Junooniyat Full Movie Download In 720p 1080p _TOP_.md +++ /dev/null @@ -1,15 +0,0 @@ -

          Junooniyat full movie download in 720p 1080p


          Download Zip >>>>> https://cinurl.com/2uEXey



          - -Download Junooniyat 2016 movie in Hindi WebRip 480p | 720r. by kashif. Info: Type: All Bollywood Movies, Bollywood Movies 2016, Drama, Romance... Watch Movie Junooniyat 2016 in Hindi WebRip 480p | 720r. -Title: Junooniyat -Genre: Melodrama, Bollywood, Drama -Directed by: Sharad Kalpakkar -Cast: Deepak Malhotra, Kajol, Sharad Khan, Kamala Hassa, Purab Kohli -Country: India -Duration: 2:15:20 -Release year: -Cast: Sharad Kalpakkar, Deepak Malhotra, Kajol, Shirinje Kaul, Kamala Hassa, Purab Kohli, Sanjay Dutt -The film is about two lovers (Sirince and Dipak), who will have to go through difficulties ... 8a78ff9644
          -
          -
          -

          diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Terminator 3 The Redemption Pc _HOT_ Download Torrent Game.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Terminator 3 The Redemption Pc _HOT_ Download Torrent Game.md deleted file mode 100644 index 5197fdf8814018395781576f7a7b799443e58f25..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Terminator 3 The Redemption Pc _HOT_ Download Torrent Game.md +++ /dev/null @@ -1,18 +0,0 @@ -

          terminator 3 the redemption pc download torrent game


          Download Filehttps://cinurl.com/2uEXQM



          -
          -It was published by Interplay Entertainment. In North America, the game was released on August 28, 2004, for Microsoft Windows, PlayStation 2, GameCube, Xbox, and Game Boy Advance. It was released in Japan on July 13, 2005, for PlayStation 2, Xbox, PlayStation Portable, and PlayStation 3, and for Game Boy Advance. On September 20, 2006, the Xbox version of the game was released as a download-only title. - -The Redemption received mixed reviews upon release. Critics criticized the artificial intelligence and the controls, but praised the graphics and the gameplay. Some reviewers also praised the voice cast. The PlayStation 2 version received the highest score of any version of the game, garnering a positive reaction due to its strong multiplayer support and original voice cast. - -Gameplay - -Terminator 3: The Redemption is an action-adventure game played in third-person. The game is linear and does not allow the player to freely explore environments. The player must follow a mission structure and accomplish a variety of objectives. The gameplay focuses on moving the player from room to room, while battling enemies. The player can view an overhead map at the game's start. The gameplay is similar to that of the film. - -The game features six playable characters: Kyle Reese, John Connor, Sarah Connor, the Terminator, the Assassin and the T-1000. The game features online multiplayer through PlayStation Network and Xbox Live. The multiplayer modes include "Team Deathmatch", "Defend Your Base", "One Flag Capture", "One Flag Defensive", "Two Flag Capture", "Two Flag Defensive", "Three Flag Capture", and "Three Flag Defense". It also features a "Survival" mode in which the player has to survive as long as possible and rack up points by killing the enemy and other players. Each character has different abilities. - -Plot - -In 2029, the Cyberdyne Systems Corporation scientists are continuing to research and improve upon the T-1000 Terminator, hoping to use it in the T-X Terminator model. The research is overseen by J.D. Pleckter, a scientist in Cyberdyne's Transhumanist division who believes that the human race can make a better society with a machine running the world. Pleckter's assistant, Mark Weston, is investigating the causes of an explosion at a military research facility which led to the death of many people. He is contacted by T-X, a model of the Terminator that he is developing. He is ordered to upload Ple 4fefd39f24
          -
          -
          -

          diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Transaction Pro Importer 40 ((NEW)) Crack.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Transaction Pro Importer 40 ((NEW)) Crack.md deleted file mode 100644 index 1ef471d9ad8f4850a9912f8714371f08dc55c306..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Transaction Pro Importer 40 ((NEW)) Crack.md +++ /dev/null @@ -1,6 +0,0 @@ -

          transaction pro importer 40 crack


          Download File ⚹⚹⚹ https://cinurl.com/2uEXDP



          - --I am an accredited jewelry professional (GIA) with a degree in Geology and ... from that of good alexandrite) going way back--I think to the 40s or even the 30s. ... distinguishing fake from real gemstones since I've become a potential buyer. ... it to expand and crack (although I don't know whether this is really the case or not ... 1fdad05405
          -
          -
          -

          diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/runner/__init__.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/runner/__init__.py deleted file mode 100644 index 52e4b48d383a84a055dcd7f6236f6e8e58eab924..0000000000000000000000000000000000000000 --- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/runner/__init__.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .base_module import BaseModule, ModuleList, Sequential -from .base_runner import BaseRunner -from .builder import RUNNERS, build_runner -from .checkpoint import (CheckpointLoader, _load_checkpoint, - _load_checkpoint_with_prefix, load_checkpoint, - load_state_dict, save_checkpoint, weights_to_cpu) -from .default_constructor import DefaultRunnerConstructor -from .dist_utils import (allreduce_grads, allreduce_params, get_dist_info, - init_dist, master_only) -from .epoch_based_runner import EpochBasedRunner, Runner -from .fp16_utils import LossScaler, auto_fp16, force_fp32, wrap_fp16_model -from .hooks import (HOOKS, CheckpointHook, ClosureHook, DistEvalHook, - DistSamplerSeedHook, DvcliveLoggerHook, EMAHook, EvalHook, - Fp16OptimizerHook, GradientCumulativeFp16OptimizerHook, - GradientCumulativeOptimizerHook, Hook, IterTimerHook, - LoggerHook, LrUpdaterHook, MlflowLoggerHook, - NeptuneLoggerHook, OptimizerHook, PaviLoggerHook, - SyncBuffersHook, TensorboardLoggerHook, TextLoggerHook, - WandbLoggerHook) -from .iter_based_runner import IterBasedRunner, IterLoader -from .log_buffer import LogBuffer -from .optimizer import (OPTIMIZER_BUILDERS, OPTIMIZERS, - DefaultOptimizerConstructor, build_optimizer, - build_optimizer_constructor) -from .priority import Priority, get_priority -from .utils import get_host_info, get_time_str, obj_from_dict, set_random_seed - -__all__ = [ - 'BaseRunner', 'Runner', 'EpochBasedRunner', 'IterBasedRunner', 'LogBuffer', - 'HOOKS', 'Hook', 'CheckpointHook', 'ClosureHook', 'LrUpdaterHook', - 'OptimizerHook', 'IterTimerHook', 'DistSamplerSeedHook', 'LoggerHook', - 'PaviLoggerHook', 'TextLoggerHook', 'TensorboardLoggerHook', - 'NeptuneLoggerHook', 'WandbLoggerHook', 'MlflowLoggerHook', - 'DvcliveLoggerHook', '_load_checkpoint', 'load_state_dict', - 'load_checkpoint', 'weights_to_cpu', 'save_checkpoint', 'Priority', - 'get_priority', 'get_host_info', 'get_time_str', 'obj_from_dict', - 'init_dist', 'get_dist_info', 'master_only', 'OPTIMIZER_BUILDERS', - 'OPTIMIZERS', 'DefaultOptimizerConstructor', 'build_optimizer', - 'build_optimizer_constructor', 'IterLoader', 'set_random_seed', - 'auto_fp16', 'force_fp32', 'wrap_fp16_model', 'Fp16OptimizerHook', - 'SyncBuffersHook', 'EMAHook', 'build_runner', 'RUNNERS', 'allreduce_grads', - 'allreduce_params', 'LossScaler', 'CheckpointLoader', 'BaseModule', - '_load_checkpoint_with_prefix', 'EvalHook', 'DistEvalHook', 'Sequential', - 'ModuleList', 'GradientCumulativeOptimizerHook', - 'GradientCumulativeFp16OptimizerHook', 'DefaultRunnerConstructor' -] diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/models/decode_heads/gc_head.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/models/decode_heads/gc_head.py deleted file mode 100644 index 70741245af975800840709911bd18d72247e3e04..0000000000000000000000000000000000000000 --- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/models/decode_heads/gc_head.py +++ /dev/null @@ -1,47 +0,0 @@ -import torch -from annotator.uniformer.mmcv.cnn import ContextBlock - -from ..builder import HEADS -from .fcn_head import FCNHead - - -@HEADS.register_module() -class GCHead(FCNHead): - """GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond. - - This head is the implementation of `GCNet - `_. - - Args: - ratio (float): Multiplier of channels ratio. Default: 1/4. - pooling_type (str): The pooling type of context aggregation. - Options are 'att', 'avg'. Default: 'avg'. - fusion_types (tuple[str]): The fusion type for feature fusion. - Options are 'channel_add', 'channel_mul'. Default: ('channel_add',) - """ - - def __init__(self, - ratio=1 / 4., - pooling_type='att', - fusion_types=('channel_add', ), - **kwargs): - super(GCHead, self).__init__(num_convs=2, **kwargs) - self.ratio = ratio - self.pooling_type = pooling_type - self.fusion_types = fusion_types - self.gc_block = ContextBlock( - in_channels=self.channels, - ratio=self.ratio, - pooling_type=self.pooling_type, - fusion_types=self.fusion_types) - - def forward(self, inputs): - """Forward function.""" - x = self._transform_inputs(inputs) - output = self.convs[0](x) - output = self.gc_block(output) - output = self.convs[1](output) - if self.concat_input: - output = self.conv_cat(torch.cat([x, output], dim=1)) - output = self.cls_seg(output) - return output diff --git a/spaces/t110-ai-admin/InspectLens/video_llama/datasets/datasets/webvid_datasets.py b/spaces/t110-ai-admin/InspectLens/video_llama/datasets/datasets/webvid_datasets.py deleted file mode 100644 index aaf6b9d6dff0d96b04d40a40c0051527f7d01842..0000000000000000000000000000000000000000 --- a/spaces/t110-ai-admin/InspectLens/video_llama/datasets/datasets/webvid_datasets.py +++ /dev/null @@ -1,122 +0,0 @@ -""" - Copyright (c) 2022, salesforce.com, inc. - All rights reserved. - SPDX-License-Identifier: BSD-3-Clause - For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause -""" - -import os -from video_llama.datasets.datasets.base_dataset import BaseDataset -from video_llama.datasets.datasets.caption_datasets import CaptionDataset -import pandas as pd -import decord -from decord import VideoReader -import random -import torch -from torch.utils.data.dataloader import default_collate -class WebvidDataset(BaseDataset): - def __init__(self, vis_processor, text_processor, vis_root, ann_root): - """ - vis_root (string): Root directory of video (e.g. webvid_eval/video/) - ann_root (string): Root directory of video (e.g. webvid_eval/annotations/) - split (string): val or test - """ - super().__init__(vis_processor=vis_processor, text_processor=text_processor) - - - # 读取一个路径下所有的 - - ts_df = [] - for file_name in os.listdir(ann_root): - if file_name.endswith('.csv'): - df = pd.read_csv(os.path.join(ann_root, file_name)) - ts_df.append(df) - - merged_df = pd.concat(ts_df) - self.annotation = merged_df - self.vis_root = vis_root - self.resize_size = 224 - self.num_frm = 8 - self.frm_sampling_strategy = 'headtail' - - def _get_video_path(self, sample): - rel_video_fp = os.path.join(sample['page_dir'], str(sample['videoid']) + '.mp4') - full_video_fp = os.path.join(self.vis_root, rel_video_fp) - return full_video_fp - - def __getitem__(self, index): - num_retries = 10 # skip error videos - for _ in range(num_retries): - sample = self.annotation.iloc[index] - sample_dict = sample.to_dict() - video_id = sample_dict['videoid'] - - if 'name' in sample_dict.keys(): - text = sample_dict['name'].strip() - else: - raise NotImplementedError("Un-supported text annotation format.") - - # fetch video - video_path = self._get_video_path(sample_dict) - # if os.path.exists(video_path): - try: - video = self.vis_processor(video_path) - except: - print(f"Failed to load examples with video: {video_path}. " - f"Will randomly sample an example as a replacement.") - index = random.randint(0, len(self) - 1) - continue - caption = self.text_processor(text) - - # print(video.size()) - if video is None or caption is None \ - or video.size()!=torch.Size([3,self.vis_processor.n_frms,224,224]): - print(f"Failed to load examples with video: {video_path}. " - f"Will randomly sample an example as a replacement.") - index = random.randint(0, len(self) - 1) - continue - else: - break - else: - raise RuntimeError(f"Failed to fetch video after {num_retries} retries.") - # "image_id" is kept to stay compatible with the COCO evaluation format - return { - "image": video, - "text_input": caption, - "type":'video', - } - - def __len__(self): - return len(self.annotation) - - # def collater(self, samples): - # new_result = {} - # new_result['image'] = default_collate( [sample["image"] for sample in samples]) - # new_result['text_input'] = default_collate( [sample["text_input"] for sample in samples]) - # return new_result - -class WebvidDatasetEvalDataset(BaseDataset): - def __init__(self, vis_processor, text_processor, vis_root, ann_paths): - """ - vis_root (string): Root directory of images (e.g. coco/images/) - ann_root (string): directory to store the annotation file - split (string): val or test - """ - super().__init__(vis_processor, text_processor, vis_root, ann_paths) - - def __getitem__(self, index): - - ann = self.annotation[index] - - vname = ann["video"] - video_path = os.path.join(self.vis_root, vname) - - video = self.vis_processor(video_path) - - return { - "video": video, - "image_id": ann["image_id"], - "instance_id": ann["instance_id"], - } - - diff --git a/spaces/teamnassim/Fictionista/torch_utils/misc.py b/spaces/teamnassim/Fictionista/torch_utils/misc.py deleted file mode 100644 index 335397dd1662d8f5bfd44e17899a00549867f4bc..0000000000000000000000000000000000000000 --- a/spaces/teamnassim/Fictionista/torch_utils/misc.py +++ /dev/null @@ -1,266 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -import re -import contextlib -import numpy as np -import torch -import warnings -import dnnlib - -#---------------------------------------------------------------------------- -# Cached construction of constant tensors. Avoids CPU=>GPU copy when the -# same constant is used multiple times. - -_constant_cache = dict() - -def constant(value, shape=None, dtype=None, device=None, memory_format=None): - value = np.asarray(value) - if shape is not None: - shape = tuple(shape) - if dtype is None: - dtype = torch.get_default_dtype() - if device is None: - device = torch.device('cpu') - if memory_format is None: - memory_format = torch.contiguous_format - - key = (value.shape, value.dtype, value.tobytes(), shape, dtype, device, memory_format) - tensor = _constant_cache.get(key, None) - if tensor is None: - tensor = torch.as_tensor(value.copy(), dtype=dtype, device=device) - if shape is not None: - tensor, _ = torch.broadcast_tensors(tensor, torch.empty(shape)) - tensor = tensor.contiguous(memory_format=memory_format) - _constant_cache[key] = tensor - return tensor - -#---------------------------------------------------------------------------- -# Replace NaN/Inf with specified numerical values. - -try: - nan_to_num = torch.nan_to_num # 1.8.0a0 -except AttributeError: - def nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None): # pylint: disable=redefined-builtin - assert isinstance(input, torch.Tensor) - if posinf is None: - posinf = torch.finfo(input.dtype).max - if neginf is None: - neginf = torch.finfo(input.dtype).min - assert nan == 0 - return torch.clamp(input.unsqueeze(0).nansum(0), min=neginf, max=posinf, out=out) - -#---------------------------------------------------------------------------- -# Symbolic assert. - -try: - symbolic_assert = torch._assert # 1.8.0a0 # pylint: disable=protected-access -except AttributeError: - symbolic_assert = torch.Assert # 1.7.0 - -#---------------------------------------------------------------------------- -# Context manager to temporarily suppress known warnings in torch.jit.trace(). -# Note: Cannot use catch_warnings because of https://bugs.python.org/issue29672 - -@contextlib.contextmanager -def suppress_tracer_warnings(): - flt = ('ignore', None, torch.jit.TracerWarning, None, 0) - warnings.filters.insert(0, flt) - yield - warnings.filters.remove(flt) - -#---------------------------------------------------------------------------- -# Assert that the shape of a tensor matches the given list of integers. -# None indicates that the size of a dimension is allowed to vary. -# Performs symbolic assertion when used in torch.jit.trace(). - -def assert_shape(tensor, ref_shape): - if tensor.ndim != len(ref_shape): - raise AssertionError(f'Wrong number of dimensions: got {tensor.ndim}, expected {len(ref_shape)}') - for idx, (size, ref_size) in enumerate(zip(tensor.shape, ref_shape)): - if ref_size is None: - pass - elif isinstance(ref_size, torch.Tensor): - with suppress_tracer_warnings(): # as_tensor results are registered as constants - symbolic_assert(torch.equal(torch.as_tensor(size), ref_size), f'Wrong size for dimension {idx}') - elif isinstance(size, torch.Tensor): - with suppress_tracer_warnings(): # as_tensor results are registered as constants - symbolic_assert(torch.equal(size, torch.as_tensor(ref_size)), f'Wrong size for dimension {idx}: expected {ref_size}') - elif size != ref_size: - raise AssertionError(f'Wrong size for dimension {idx}: got {size}, expected {ref_size}') - -#---------------------------------------------------------------------------- -# Function decorator that calls torch.autograd.profiler.record_function(). - -def profiled_function(fn): - def decorator(*args, **kwargs): - with torch.autograd.profiler.record_function(fn.__name__): - return fn(*args, **kwargs) - decorator.__name__ = fn.__name__ - return decorator - -#---------------------------------------------------------------------------- -# Sampler for torch.utils.data.DataLoader that loops over the dataset -# indefinitely, shuffling items as it goes. - -class InfiniteSampler(torch.utils.data.Sampler): - def __init__(self, dataset, rank=0, num_replicas=1, shuffle=True, seed=0, window_size=0.5): - assert len(dataset) > 0 - assert num_replicas > 0 - assert 0 <= rank < num_replicas - assert 0 <= window_size <= 1 - super().__init__(dataset) - self.dataset = dataset - self.rank = rank - self.num_replicas = num_replicas - self.shuffle = shuffle - self.seed = seed - self.window_size = window_size - - def __iter__(self): - order = np.arange(len(self.dataset)) - rnd = None - window = 0 - if self.shuffle: - rnd = np.random.RandomState(self.seed) - rnd.shuffle(order) - window = int(np.rint(order.size * self.window_size)) - - idx = 0 - while True: - i = idx % order.size - if idx % self.num_replicas == self.rank: - yield order[i] - if window >= 2: - j = (i - rnd.randint(window)) % order.size - order[i], order[j] = order[j], order[i] - idx += 1 - -#---------------------------------------------------------------------------- -# Utilities for operating with torch.nn.Module parameters and buffers. - -def params_and_buffers(module): - assert isinstance(module, torch.nn.Module) - return list(module.parameters()) + list(module.buffers()) - -def named_params_and_buffers(module): - assert isinstance(module, torch.nn.Module) - return list(module.named_parameters()) + list(module.named_buffers()) - -def copy_params_and_buffers(src_module, dst_module, require_all=False): - assert isinstance(src_module, torch.nn.Module) - assert isinstance(dst_module, torch.nn.Module) - src_tensors = dict(named_params_and_buffers(src_module)) - for name, tensor in named_params_and_buffers(dst_module): - assert (name in src_tensors) or (not require_all) - if name in src_tensors: - tensor.copy_(src_tensors[name].detach()).requires_grad_(tensor.requires_grad) - -#---------------------------------------------------------------------------- -# Context manager for easily enabling/disabling DistributedDataParallel -# synchronization. - -@contextlib.contextmanager -def ddp_sync(module, sync): - assert isinstance(module, torch.nn.Module) - if sync or not isinstance(module, torch.nn.parallel.DistributedDataParallel): - yield - else: - with module.no_sync(): - yield - -#---------------------------------------------------------------------------- -# Check DistributedDataParallel consistency across processes. - -def check_ddp_consistency(module, ignore_regex=None): - assert isinstance(module, torch.nn.Module) - for name, tensor in named_params_and_buffers(module): - fullname = type(module).__name__ + '.' + name - if ignore_regex is not None and re.fullmatch(ignore_regex, fullname): - continue - tensor = tensor.detach() - if tensor.is_floating_point(): - tensor = nan_to_num(tensor) - other = tensor.clone() - torch.distributed.broadcast(tensor=other, src=0) - assert (tensor == other).all(), fullname - -#---------------------------------------------------------------------------- -# Print summary table of module hierarchy. - -def print_module_summary(module, inputs, max_nesting=3, skip_redundant=True): - assert isinstance(module, torch.nn.Module) - assert not isinstance(module, torch.jit.ScriptModule) - assert isinstance(inputs, (tuple, list)) - - # Register hooks. - entries = [] - nesting = [0] - def pre_hook(_mod, _inputs): - nesting[0] += 1 - def post_hook(mod, _inputs, outputs): - nesting[0] -= 1 - if nesting[0] <= max_nesting: - outputs = list(outputs) if isinstance(outputs, (tuple, list)) else [outputs] - outputs = [t for t in outputs if isinstance(t, torch.Tensor)] - entries.append(dnnlib.EasyDict(mod=mod, outputs=outputs)) - hooks = [mod.register_forward_pre_hook(pre_hook) for mod in module.modules()] - hooks += [mod.register_forward_hook(post_hook) for mod in module.modules()] - - # Run module. - outputs = module(*inputs) - for hook in hooks: - hook.remove() - - # Identify unique outputs, parameters, and buffers. - tensors_seen = set() - for e in entries: - e.unique_params = [t for t in e.mod.parameters() if id(t) not in tensors_seen] - e.unique_buffers = [t for t in e.mod.buffers() if id(t) not in tensors_seen] - e.unique_outputs = [t for t in e.outputs if id(t) not in tensors_seen] - tensors_seen |= {id(t) for t in e.unique_params + e.unique_buffers + e.unique_outputs} - - # Filter out redundant entries. - if skip_redundant: - entries = [e for e in entries if len(e.unique_params) or len(e.unique_buffers) or len(e.unique_outputs)] - - # Construct table. - rows = [[type(module).__name__, 'Parameters', 'Buffers', 'Output shape', 'Datatype']] - rows += [['---'] * len(rows[0])] - param_total = 0 - buffer_total = 0 - submodule_names = {mod: name for name, mod in module.named_modules()} - for e in entries: - name = '' if e.mod is module else submodule_names[e.mod] - param_size = sum(t.numel() for t in e.unique_params) - buffer_size = sum(t.numel() for t in e.unique_buffers) - output_shapes = [str(list(t.shape)) for t in e.outputs] - output_dtypes = [str(t.dtype).split('.')[-1] for t in e.outputs] - rows += [[ - name + (':0' if len(e.outputs) >= 2 else ''), - str(param_size) if param_size else '-', - str(buffer_size) if buffer_size else '-', - (output_shapes + ['-'])[0], - (output_dtypes + ['-'])[0], - ]] - for idx in range(1, len(e.outputs)): - rows += [[name + f':{idx}', '-', '-', output_shapes[idx], output_dtypes[idx]]] - param_total += param_size - buffer_total += buffer_size - rows += [['---'] * len(rows[0])] - rows += [['Total', str(param_total), str(buffer_total), '-', '-']] - - # Print table. - widths = [max(len(cell) for cell in column) for column in zip(*rows)] - print() - for row in rows: - print(' '.join(cell + ' ' * (width - len(cell)) for cell, width in zip(row, widths))) - print() - return outputs - -#---------------------------------------------------------------------------- diff --git a/spaces/teowu/Q-Instruct-on-mPLUG-Owl-2/mplug_owl2/train/mplug_owl2_trainer.py b/spaces/teowu/Q-Instruct-on-mPLUG-Owl-2/mplug_owl2/train/mplug_owl2_trainer.py deleted file mode 100644 index 293dcdf21c82c715e663af11b90a19543244af18..0000000000000000000000000000000000000000 --- a/spaces/teowu/Q-Instruct-on-mPLUG-Owl-2/mplug_owl2/train/mplug_owl2_trainer.py +++ /dev/null @@ -1,243 +0,0 @@ -import os -import torch - -from torch.utils.data import Sampler - -from transformers import Trainer -from transformers.trainer import ( - is_sagemaker_mp_enabled, - get_parameter_names, - has_length, - ALL_LAYERNORM_LAYERS, - ShardedDDPOption, - logger, -) -from typing import List, Optional -from icecream import ic - -def maybe_zero_3(param, ignore_status=False, name=None): - from deepspeed import zero - from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus - if hasattr(param, "ds_id"): - if param.ds_status == ZeroParamStatus.NOT_AVAILABLE: - if not ignore_status: - print(name, 'no ignore status') - with zero.GatheredParameters([param]): - param = param.data.detach().cpu().clone() - else: - param = param.detach().cpu().clone() - return param - - -def get_mm_adapter_state_maybe_zero_3(named_params, keys_to_match): - to_return = {k: t for k, t in named_params if any(key_match in k for key_match in keys_to_match)} - to_return = {k: maybe_zero_3(v, ignore_status=True, name=k).cpu() for k, v in to_return.items()} - return to_return - - -def split_to_even_chunks(indices, lengths, num_chunks): - """ - Split a list of indices into `chunks` chunks of roughly equal lengths. - """ - - if len(indices) % num_chunks != 0: - return [indices[i::num_chunks] for i in range(num_chunks)] - - num_indices_per_chunk = len(indices) // num_chunks - - chunks = [[] for _ in range(num_chunks)] - chunks_lengths = [0 for _ in range(num_chunks)] - for index in indices: - shortest_chunk = chunks_lengths.index(min(chunks_lengths)) - chunks[shortest_chunk].append(index) - chunks_lengths[shortest_chunk] += lengths[index] - if len(chunks[shortest_chunk]) == num_indices_per_chunk: - chunks_lengths[shortest_chunk] = float("inf") - - return chunks - - -def get_modality_length_grouped_indices(lengths, batch_size, world_size, generator=None): - # We need to use torch for the random part as a distributed sampler will set the random seed for torch. - assert all(l != 0 for l in lengths), "Should not have zero length." - if all(l > 0 for l in lengths) or all(l < 0 for l in lengths): - # all samples are in the same modality - return get_length_grouped_indices(lengths, batch_size, world_size, generator=generator) - mm_indices, mm_lengths = zip(*[(i, l) for i, l in enumerate(lengths) if l > 0]) - lang_indices, lang_lengths = zip(*[(i, -l) for i, l in enumerate(lengths) if l < 0]) - - mm_shuffle = [mm_indices[i] for i in get_length_grouped_indices(mm_lengths, batch_size, world_size, generator=None)] - lang_shuffle = [lang_indices[i] for i in get_length_grouped_indices(lang_lengths, batch_size, world_size, generator=None)] - megabatch_size = world_size * batch_size - mm_megabatches = [mm_shuffle[i : i + megabatch_size] for i in range(0, len(mm_shuffle), megabatch_size)] - lang_megabatches = [lang_shuffle[i : i + megabatch_size] for i in range(0, len(lang_shuffle), megabatch_size)] - - last_mm = mm_megabatches[-1] - last_lang = lang_megabatches[-1] - additional_batch = last_mm + last_lang - megabatches = mm_megabatches[:-1] + lang_megabatches[:-1] - megabatch_indices = torch.randperm(len(megabatches), generator=generator) - megabatches = [megabatches[i] for i in megabatch_indices] - - if len(additional_batch) > 0: - megabatches.append(sorted(additional_batch)) - - return [i for megabatch in megabatches for i in megabatch] - - -def get_length_grouped_indices(lengths, batch_size, world_size, generator=None, merge=True): - # We need to use torch for the random part as a distributed sampler will set the random seed for torch. - indices = torch.randperm(len(lengths), generator=generator) - megabatch_size = world_size * batch_size - megabatches = [indices[i : i + megabatch_size].tolist() for i in range(0, len(lengths), megabatch_size)] - megabatches = [sorted(megabatch, key=lambda i: lengths[i], reverse=True) for megabatch in megabatches] - megabatches = [split_to_even_chunks(megabatch, lengths, world_size) for megabatch in megabatches] - - return [i for megabatch in megabatches for batch in megabatch for i in batch] - - -class LengthGroupedSampler(Sampler): - r""" - Sampler that samples indices in a way that groups together features of the dataset of roughly the same length while - keeping a bit of randomness. - """ - - def __init__( - self, - batch_size: int, - world_size: int, - lengths: Optional[List[int]] = None, - generator=None, - group_by_modality: bool = False, - ): - if lengths is None: - raise ValueError("Lengths must be provided.") - - self.batch_size = batch_size - self.world_size = world_size - self.lengths = lengths - self.generator = generator - self.group_by_modality = group_by_modality - - def __len__(self): - return len(self.lengths) - - def __iter__(self): - if self.group_by_modality: - indices = get_modality_length_grouped_indices(self.lengths, self.batch_size, self.world_size, generator=self.generator) - else: - indices = get_length_grouped_indices(self.lengths, self.batch_size, self.world_size, generator=self.generator) - return iter(indices) - - -class MPLUGOwl2Trainer(Trainer): - - def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]: - if self.train_dataset is None or not has_length(self.train_dataset): - return None - - if self.args.group_by_modality_length: - lengths = self.train_dataset.modality_lengths - return LengthGroupedSampler( - self.args.train_batch_size, - world_size=self.args.world_size * self.args.gradient_accumulation_steps, - lengths=lengths, - group_by_modality=True, - ) - else: - return super()._get_train_sampler() - - def create_optimizer(self): - """ - Setup the optimizer. - - We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the - Trainer's init through `optimizers`, or subclass and override this method in a subclass. - """ - if is_sagemaker_mp_enabled(): - return super().create_optimizer() - if self.sharded_ddp == ShardedDDPOption.SIMPLE: - return super().create_optimizer() - - opt_model = self.model - - if self.optimizer is None: - decay_parameters = get_parameter_names(opt_model, ALL_LAYERNORM_LAYERS) - decay_parameters = [name for name in decay_parameters if "bias" not in name] - if self.args.visual_abstractor_lr is not None: - projector_parameters = [name for name, _ in opt_model.named_parameters() if "visual_abstractor_lr" in name] - optimizer_grouped_parameters = [ - { - "params": [ - p for n, p in opt_model.named_parameters() if (n in decay_parameters and n not in projector_parameters and p.requires_grad) - ], - "weight_decay": self.args.weight_decay, - }, - { - "params": [ - p for n, p in opt_model.named_parameters() if (n not in decay_parameters and n not in projector_parameters and p.requires_grad) - ], - "weight_decay": 0.0, - }, - { - "params": [ - p for n, p in opt_model.named_parameters() if (n in decay_parameters and n in projector_parameters and p.requires_grad) - ], - "weight_decay": self.args.weight_decay, - "lr": self.args.visual_abstractor_lr, - }, - { - "params": [ - p for n, p in opt_model.named_parameters() if (n not in decay_parameters and n in projector_parameters and p.requires_grad) - ], - "weight_decay": 0.0, - "lr": self.args.visual_abstractor_lr, - }, - ] - else: - optimizer_grouped_parameters = [ - { - "params": [ - p for n, p in opt_model.named_parameters() if (n in decay_parameters and p.requires_grad) - ], - "weight_decay": self.args.weight_decay, - }, - { - "params": [ - p for n, p in opt_model.named_parameters() if (n not in decay_parameters and p.requires_grad) - ], - "weight_decay": 0.0, - }, - ] - ic(len(optimizer_grouped_parameters[0]['params']),len(optimizer_grouped_parameters[1]['params'])) - optimizer_cls, optimizer_kwargs = Trainer.get_optimizer_cls_and_kwargs(self.args) - - if self.sharded_ddp == ShardedDDPOption.SIMPLE: - self.optimizer = OSS( - params=optimizer_grouped_parameters, - optim=optimizer_cls, - **optimizer_kwargs, - ) - else: - self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs) - if optimizer_cls.__name__ == "Adam8bit": - import bitsandbytes - - manager = bitsandbytes.optim.GlobalOptimManager.get_instance() - - skipped = 0 - for module in opt_model.modules(): - if isinstance(module, nn.Embedding): - skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values()) - logger.info(f"skipped {module}: {skipped/2**20}M params") - manager.register_module_override(module, "weight", {"optim_bits": 32}) - logger.debug(f"bitsandbytes: will optimize {module} in fp32") - logger.info(f"skipped: {skipped/2**20}M params") - - return self.optimizer - - def _save_checkpoint(self, model, trial, metrics=None): - super(MPLUGOwl2Trainer, self)._save_checkpoint(model, trial, metrics) - - def _save(self, output_dir: Optional[str] = None, state_dict=None): - super(MPLUGOwl2Trainer, self)._save(output_dir, state_dict) \ No newline at end of file diff --git a/spaces/teragron/docuchat-webui/document_chatbot.py b/spaces/teragron/docuchat-webui/document_chatbot.py deleted file mode 100644 index 7e5eebda238aed12dcc87e223b01ee70355020ec..0000000000000000000000000000000000000000 --- a/spaces/teragron/docuchat-webui/document_chatbot.py +++ /dev/null @@ -1,56 +0,0 @@ -import random -import time -import requests -from langchain.chains.question_answering import load_qa_chain -from langchain.text_splitter import CharacterTextSplitter -from langchain.embeddings import HuggingFaceEmbeddings -from langchain.docstore.document import Document -from langchain.vectorstores import FAISS -from langchain import HuggingFaceHub - - -class DocumentChatbot: - - def __init__(self): - self.llm = None - self.chain = None - self.embeddings = None - self.metadata = {"source": "internet"} - self.init_mes = ["According to the document, ", "Based on the text, ", "I think, ", "According to the text, ", "Based on the document you provided, "] - - - def respond(self, text_input, question, chat_history, model_name): - self.llm = HuggingFaceHub(repo_id=model_name, model_kwargs={"temperature":0, "max_length":512}) - self.chain = load_qa_chain(self.llm, chain_type="stuff") - self.embeddings = HuggingFaceEmbeddings() - if not question or question.isspace(): - return "Please enter a valid question.", chat_history - if text_input.startswith("http"): - response = requests.get(text_input) - text_var = response.text - if text_var is None: - raise ValueError("No document is given") - else: - text_var = text_input - time.sleep(0.5) - - documents = [Document(page_content=text_var, metadata=self.metadata)] - text_splitter = CharacterTextSplitter(chunk_size=750, chunk_overlap=0) - docs = text_splitter.split_documents(documents) - - if self.llm is None: - raise ValueError("Model not loaded") - db = FAISS.from_documents(docs, self.embeddings) - query = question - - try: - docs = db.similarity_search(query) - answer = self.chain.run(input_documents=docs, question=query) - bot_message = random.choice(self.init_mes) + answer + "." - except ValueError as e: - bot_message = f"An error occurred: {str(e)}" - chat_history.append((question, bot_message)) - - time.sleep(1) - - return "", chat_history \ No newline at end of file diff --git a/spaces/terfces0erbo/CollegeProjectV2/Active Partition Recovery Version 6 Full Version.rar ((FREE)).md b/spaces/terfces0erbo/CollegeProjectV2/Active Partition Recovery Version 6 Full Version.rar ((FREE)).md deleted file mode 100644 index cfadfcd9a255d1ca01e7355f578185a4ab248d50..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Active Partition Recovery Version 6 Full Version.rar ((FREE)).md +++ /dev/null @@ -1,6 +0,0 @@ -

          active partition recovery version 6 full version.rar


          Download Filehttps://bytlly.com/2uGjuF



          -
          -Data recovery software and undelete tool supports FAT12/16/32, NTFS, ... It includes tools to recover data from corrupt or damaged Zip or SFX archives, RAR archives, ... Tool to create a USB recovery drive that you can use to reinstall the version of ... Active@ Partition Recovery 19.0.3.1 ... May 6, 2014; Freeware; Windows. 4d29de3e1b
          -
          -
          -

          diff --git a/spaces/terfces0erbo/CollegeProjectV2/Grim Fandango - Portuguese Brazil - Portugu S Brasil - PT- !!HOT!! Download.md b/spaces/terfces0erbo/CollegeProjectV2/Grim Fandango - Portuguese Brazil - Portugu S Brasil - PT- !!HOT!! Download.md deleted file mode 100644 index 9eae2ae36ce25d4d70a27acd4807fec4a8fa8b84..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Grim Fandango - Portuguese Brazil - Portugu S Brasil - PT- !!HOT!! Download.md +++ /dev/null @@ -1,6 +0,0 @@ -

          Grim Fandango - Portuguese Brazil - Portugu s Brasil - PT- download


          Download Zip »»» https://bytlly.com/2uGkhC



          - -... Grim Fandango by LucasArts for PC * Worldwide shipping. $14.75. Free shipping. GRIM FANDANGO LucasArts Game PC (1998) Box - New (PORTUGUES ... 4d29de3e1b
          -
          -
          -

          diff --git a/spaces/thealphhamerc/audio-to-text/app.py b/spaces/thealphhamerc/audio-to-text/app.py deleted file mode 100644 index e459e38eca9234ab4784a285de5fccf948258ba3..0000000000000000000000000000000000000000 --- a/spaces/thealphhamerc/audio-to-text/app.py +++ /dev/null @@ -1,255 +0,0 @@ -import os -os.system("pip install git+https://github.com/openai/whisper.git") -import pysrt -import pandas as pd -from pytube import YouTube -from datetime import timedelta -import whisper -from subprocess import call -import gradio as gr -import logging -# from transformers.pipelines.audio_utils import ffmpeg_read - - -logger = logging.getLogger("whisper-jax-app") -logger.setLevel(logging.INFO) -ch = logging.StreamHandler() -ch.setLevel(logging.INFO) -formatter = logging.Formatter( - "%(asctime)s;%(levelname)s;%(message)s", "%Y-%m-%d %H:%M:%S") -ch.setFormatter(formatter) -logger.addHandler(ch) - - -FILE_LIMIT_MB = 1000 - - -def run_cmd(command): - try: - print(command) - call(command) - except KeyboardInterrupt: - print("Process interrupted") - sys.exit(1) - - -def inference(text): - cmd = ['tts', '--text', text] - run_cmd(cmd) - return 'tts_output.wav' - - -baseModel = whisper.load_model("base") - - -df_init = pd.DataFrame(columns=['start', 'end', 'text']) -transcription_df = gr.DataFrame(value=df_init, label="Transcription dataframe", row_count=( - 0, "dynamic"), max_rows=30, wrap=True, overflow_row_behaviour='paginate') - - -inputs = [gr.components.Audio(type="filepath", label="Add audio file"), gr.inputs.Audio(source="microphone", - optional=True, type="filepath"),] -outputs = [gr.components.Textbox(), transcription_df] -title = "Transcribe multi-lingual audio clips" -description = "An example of using OpenAi whisper to generate transcriptions for audio clips." -article = "" -audio_examples = [ - ["input/example-1.wav"], - ["input/example-2.wav"], -] - - -def transcribe(inputs, microphone): - if (microphone is not None): - inputs = microphone - - if inputs is None: - logger.warning("No audio file") - return [f"File size exceeds file size limit. Got file of size {file_size_mb:.2f}MB for a limit of {FILE_LIMIT_MB}MB.", df_init] - file_size_mb = os.stat(inputs).st_size / (1024 * 1024) - - # --------------------------------------------------- Check the file size --------------------------------------------------- - if file_size_mb > FILE_LIMIT_MB: - logger.warning("Max file size exceeded") - df = pd.DataFrame(columns=['start', 'end', 'text']) - return [f"File size exceeds file size limit. Got file of size {file_size_mb:.2f}MB for a limit of {FILE_LIMIT_MB}MB.", df_init] - - # --------------------------------------------------- Transcribe the audio --------------------------------------------------- - result = baseModel.transcribe(audio=inputs, language='english', - verbose=False) - srtFilename = os.path.join("output/SrtFiles", inputs.split( - '/')[-1].split('.')[0]+'.srt') - - # --------------------------------------------------- Clear the file if exists --------------------------------------------------- - if os.path.exists(srtFilename): - os.remove(srtFilename) - with open(srtFilename, 'w', encoding='utf-8') as srtFile: - srtFile.write('') - - # --------------------------------------------------- Write the file --------------------------------------------------- - segments = result['segments'] - for segment in segments: - startTime = str(0)+str(timedelta(seconds=int(segment['start'])))+',000' - endTime = str(0)+str(timedelta(seconds=int(segment['end'])))+',000' - text = segment['text'] - segmentId = segment['id']+1 - segment = f"{segmentId}\n{startTime} --> {endTime}\n{text[1:] if text[0] is ' ' else text}\n\n" - - with open(srtFilename, 'a', encoding='utf-8') as srtFile: - srtFile.write(segment) - - # ------------------------------------------- Read the file and Prepare to display --------------------------------------- - try: - srt_path = srtFilename - df = pd.DataFrame(columns=['start', 'end', 'text']) - subs = pysrt.open(srt_path) - - objects = [] - for sub in subs: - start_hours = str(str(sub.start.hours) + "00")[0:2] if len( - str(sub.start.hours)) == 2 else str("0" + str(sub.start.hours) + "00")[0:2] - end_hours = str(str(sub.end.hours) + "00")[0:2] if len( - str(sub.end.hours)) == 2 else str("0" + str(sub.end.hours) + "00")[0:2] - - start_minutes = str(str(sub.start.minutes) + "00")[0:2] if len( - str(sub.start.minutes)) == 2 else str("0" + str(sub.start.minutes) + "00")[0:2] - end_minutes = str(str(sub.end.minutes) + "00")[0:2] if len( - str(sub.end.minutes)) == 2 else str("0" + str(sub.end.minutes) + "00")[0:2] - - start_seconds = str(str(sub.start.seconds) + "00")[0:2] if len( - str(sub.start.seconds)) == 2 else str("0" + str(sub.start.seconds) + "00")[0:2] - end_seconds = str(str(sub.end.seconds) + "00")[0:2] if len( - str(sub.end.seconds)) == 2 else str("0" + str(sub.end.seconds) + "00")[0:2] - - start = start_hours + ":" + start_minutes + ":" + start_seconds + ",000" - end = end_hours + ":" + end_minutes + ":" + end_seconds + ",000" - text = sub.text - objects.append([start, end, text]) - - df = pd.DataFrame(objects, columns=['start', 'end', 'text']) - except Exception as e: - print('Error: ', e) - df = df_init - - return [result["text"], df] - - -# Transcribe youtube video -# define function for transcription -def youtube_transcript(url): - try: - if url: - yt = YouTube(url, use_oauth=True) - source = yt.streams.filter(progressive=True, file_extension='mp4').order_by( - 'resolution').desc().first().download('output/youtube') - - transcript = baseModel.transcribe(source) - return transcript["text"] - except Exception as e: - print('Error: ', e) - return 'Error: ' + str(e) - - -def displaySrtFile(srtFilename): - with open(srtFilename, 'r', encoding='utf-8') as srtFile: - srtContent = srtFile.read() - - try: - - df = pd.DataFrame(columns=['start', 'end', 'text']) - srt_path = srtFilename - subs = pysrt.open(srt_path) - - objects = [] - for sub in subs: - - start_hours = str(str(sub.start.hours) + "00")[0:2] if len( - str(sub.start.hours)) == 2 else str("0" + str(sub.start.hours) + "00")[0:2] - end_hours = str(str(sub.end.hours) + "00")[0:2] if len( - str(sub.end.hours)) == 2 else str("0" + str(sub.end.hours) + "00")[0:2] - - start_minutes = str(str(sub.start.minutes) + "00")[0:2] if len( - str(sub.start.minutes)) == 2 else str("0" + str(sub.start.minutes) + "00")[0:2] - end_minutes = str(str(sub.end.minutes) + "00")[0:2] if len( - str(sub.end.minutes)) == 2 else str("0" + str(sub.end.minutes) + "00")[0:2] - - start_seconds = str(str(sub.start.seconds) + "00")[0:2] if len( - str(sub.start.seconds)) == 2 else str("0" + str(sub.start.seconds) + "00")[0:2] - end_seconds = str(str(sub.end.seconds) + "00")[0:2] if len( - str(sub.end.seconds)) == 2 else str("0" + str(sub.end.seconds) + "00")[0:2] - - start_millis = str(str(sub.start.milliseconds) + "000")[0:3] - end_millis = str(str(sub.end.milliseconds) + "000")[0:3] - objects.append([sub.text, f'{start_hours}:{start_minutes}:{start_seconds}.{start_millis}', - f'{end_hours}:{end_minutes}:{end_seconds}.{end_millis}']) - - for object in objects: - srt_to_df = { - 'start': [object[1]], - 'end': [object[2]], - 'text': [object[0]] - } - - df = pd.concat([df, pd.DataFrame(srt_to_df)]) - except Exception as e: - print("Error creating srt df") - - return srtContent - - -audio_chunked = gr.Interface( - fn=transcribe, - inputs=inputs, - outputs=outputs, - allow_flagging="never", - title=title, - description=description, - article=article, - examples=audio_examples, -) - -# microphone_chunked = gr.Interface( -# fn=transcribe, -# inputs=[ -# gr.inputs.Audio(source="microphone", -# optional=True, type="filepath"), -# ], -# outputs=[ -# gr.outputs.Textbox(label="Transcription").style( -# show_copy_button=True), -# ], -# allow_flagging="never", -# title=title, -# description=description, -# article=article, -# ) -youtube_chunked = gr.Interface( - fn=youtube_transcript, - inputs=[ - gr.inputs.Textbox(label="Youtube URL", type="text"), - ], - outputs=[ - gr.outputs.Textbox(label="Transcription").style( - show_copy_button=True), - ], - allow_flagging="never", - title=title, - - description=description, - article=article, - examples=[ - ["https://www.youtube.com/watch?v=nlMuHtV82q8&ab_channel=NothingforSale24",], - ["https://www.youtube.com/watch?v=JzPfMbG1vrE&ab_channel=ExplainerVideosByLauren",], - ["https://www.youtube.com/watch?v=S68vvV0kod8&ab_channel=Pearl-CohnTelevision"] - - ], - -) - - -demo = gr.Blocks() -with demo: - gr.TabbedInterface([audio_chunked, youtube_chunked], [ - "Audio File", "Youtube"]) -demo.queue(concurrency_count=1, max_size=5) -demo.launch(show_api=False) diff --git a/spaces/thejagstudio/procom/amazon/migrations/__init__.py b/spaces/thejagstudio/procom/amazon/migrations/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Abntnbr15512.md b/spaces/tialenAdioni/chat-gpt-api/logs/Abntnbr15512.md deleted file mode 100644 index 608669203df9edcd2ae19eef494962d01bda2a35..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Abntnbr15512.md +++ /dev/null @@ -1,45 +0,0 @@ - -Here is a possible title and article with HTML formatting for the keyword "abntnbr15512": - -

          What is ABNT NBR 15512 and why is it important?

          -

          ABNT NBR 15512 is a Brazilian technical standard that specifies the requirements for the practice of environmental management in organizations. It was published by the Brazilian Association of Technical Standards (ABNT) in 2018, as a revision of the previous version from 2004.

          -

          The standard aims to help organizations to improve their environmental performance, comply with legal and other obligations, prevent pollution, and achieve their environmental objectives. It is based on the principles of continuous improvement, stakeholder engagement, risk management, and life cycle thinking.

          -

          abntnbr15512


          DOWNLOAD 🗸🗸🗸 https://urlcod.com/2uK9Yk



          -

          ABNT NBR 15512 is compatible with other management system standards, such as ISO 9001 (quality management) and ISO 45001 (occupational health and safety management). It follows the same structure and terminology as these standards, making it easier to integrate them into a single management system.

          -

          The standard applies to any organization, regardless of its size, type, or sector. It can be used for certification purposes or for self-declaration of conformity. The benefits of implementing ABNT NBR 15512 include:

          -
            -
          • Enhancing the organization's reputation and credibility
          • -
          • Reducing costs and waste
          • -
          • Increasing efficiency and productivity
          • -
          • Improving customer satisfaction and loyalty
          • -
          • Strengthening stakeholder relationships
          • -
          • Minimizing environmental risks and impacts
          • -
          • Contributing to sustainable development
          • -
          -

          To comply with ABNT NBR 15512, an organization needs to establish, implement, maintain, and improve an environmental management system that covers the following elements:

          -
            -
          1. Context of the organization
          2. -
          3. Leadership
          4. -
          5. Planning
          6. -
          7. Support
          8. -
          9. Operation
          10. -
          11. Performance evaluation
          12. -
          13. Improvement
          14. -
          -

          The standard provides guidance and examples for each element, but does not prescribe specific methods or criteria. The organization has the flexibility to determine how to meet the requirements according to its own context and needs.

          -

          ABNT NBR 15512 is a voluntary standard that reflects the best practices and expectations of the Brazilian society regarding environmental management. By adopting it, an organization can demonstrate its commitment to environmental responsibility and excellence.

          Here is a possible continuation of the article with HTML formatting for the keyword "abntnbr15512": - -

          How to implement ABNT NBR 15512 in your organization?

          -

          If you want to implement ABNT NBR 15512 in your organization, you need to follow some steps that will help you to plan, execute, monitor, and improve your environmental management system. Here are some suggestions:

          -
            -
          1. Conduct a gap analysis to identify your current situation and your needs regarding environmental management. You can use the standard as a reference to check what requirements you already meet and what areas you need to improve.
          2. -
          3. Define your environmental policy and objectives, taking into account your context, stakeholders, legal and other obligations, risks and opportunities, and life cycle aspects. Communicate them to your employees and other relevant parties.
          4. -
          5. Establish roles and responsibilities for environmental management within your organization. Provide adequate resources, training, and awareness to ensure competence and commitment.
          6. -
          7. Develop and document your environmental management system procedures and processes, covering all the elements of the standard. Define how you will control your environmental aspects, prevent pollution, comply with regulations, manage emergencies, and achieve your objectives.
          8. -
          9. Implement your environmental management system according to your plans. Ensure that you follow your procedures and processes consistently and effectively. Use appropriate tools and techniques to measure and monitor your environmental performance.
          10. -
          11. Evaluate your environmental management system periodically. Conduct internal audits and management reviews to verify compliance, effectiveness, and suitability. Identify strengths and weaknesses, as well as opportunities for improvement.
          12. -
          13. Take corrective and preventive actions to address any nonconformities or potential problems. Implement improvement actions based on audit findings, management review results, feedback from stakeholders, or best practices. Continually seek ways to enhance your environmental performance.
          14. -
          -

          By following these steps, you can implement ABNT NBR 15512 in your organization and enjoy its benefits. However, if you need further guidance or assistance, you can consult experts or seek certification from an accredited body.

          7196e7f11a
          -
          -
          \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Download Sisqo Unleash the Dragon Album Zip Where to Find the Best Deals and Discounts.md b/spaces/tialenAdioni/chat-gpt-api/logs/Download Sisqo Unleash the Dragon Album Zip Where to Find the Best Deals and Discounts.md deleted file mode 100644 index 78be9779b846e403ecb181a95e483477b70026c9..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Download Sisqo Unleash the Dragon Album Zip Where to Find the Best Deals and Discounts.md +++ /dev/null @@ -1,110 +0,0 @@ - -

          Download Sisqo Unleash the Dragon Album Zip

          -

          If you are looking for a fun, catchy, and classic R&B album to listen to, you might want to check out Unleash the Dragon by Sisqo. This is the debut solo album by the American singer, who was previously the lead vocalist of the group Dru Hill. Released in 1999, the album features some of Sisqo's most popular songs, such as "Thong Song", "Incomplete", and "Got to Get It". In this article, we will tell you more about Sisqo, his album, and how you can download it in a zip file format.

          -

          download sisqo unleash the dragon album zip


          Download File ✦✦✦ https://urlcod.com/2uK6A1



          -

          Who is Sisqo?

          -

          Sisqo, whose real name is Mark Andrews, is an American singer, songwriter, dancer, and actor. He was born in Baltimore, Maryland, in 1978. He started his musical career as a member of Dru Hill, a R&B group that formed in 1992. Dru Hill had several hits in the late 1990s, such as "In My Bed", "How Deep Is Your Love", and "These Are The Times". Sisqo decided to pursue a solo career in 1999, while Dru Hill was on a hiatus. He signed with Def Soul, a subsidiary of Def Jam Recordings.

          -

          What is Unleash the Dragon?

          -

          Unleash the Dragon is Sisqo's first solo album. It was released on November 30, 1999. The album's title refers to Sisqo's alter ego, "The Dragon", which he uses to express his creative and energetic side. The album's genre is mainly R&B, but it also incorporates elements of hip hop, pop, soul, and funk. The album was a huge success, selling over six million copies worldwide. It also received positive reviews from critics, who praised Sisqo's vocals, charisma, and versatility. The album earned Sisqo four Grammy nominations in 2001.

          -

          Tracklist and Songs

          -

          The album consists of 13 tracks, with a total length of 47 minutes. Here are the songs in the album and their brief descriptions:

          -
            -
          • Unleash the Dragon (feat. Beanie Sigel): The opening track and title song of the album. It is a rap song that introduces Sisqo's alter ego and his mission to dominate the music industry.
          • -
          • Got to Get It (feat. Make It Hot): The second track and first single of the album. It is an upbeat R&B song that talks about Sisqo's desire for a woman.
          • -
          • Is Love Enough: The third track and fourth single of the album. It is a slow jam that questions whether love is enough to sustain a relationship.
          • -
          • 2nite (Interlude): The fourth track and an interlude that sets up the mood for the next song.
          • -
          • How Can I Love U 2nite: The fifth track and third single of the album. It is a ballad that expresses Sisqo's feelings for a woman who is already taken.
          • -
          • Your Love Is Incredible: The sixth track and second single of the album. It is a mid-tempo R&B song that compliments a woman's love.
          • -
          • So Sexual: The seventh track of the album. It is a sensual R&B song that describes Sisqo's sexual fantasies.
          • -
          • Thong Song: The eighth track and fifth single of the album. It is a pop song that celebrates women who wear thongs.
          • -
          • Incomplete: The ninth track and sixth single of the album. It is a ballad that confesses Sisqo's loneliness without his lover.
          • -
          • Addicted: The tenth track of the album. It is a funky R&B song that compares love to a drug addiction.
          • -
          • Dru World Order (Interlude) (Dedicated to Dru Hill): The eleventh track and an interlude that pays tribute to Sisqo's group Dru Hill.
          • -
          • Enchantment Passing Through: The twelfth track of the album. It is a soulful song that features Sisqo's vocals from the Broadway musical Aida.
          • -
          • You Are Everything (Remix): The thirteenth track and final song of the album. It is a remix of Dru Hill's hit song "You Are Everything", featuring rap verses from Ja Rule.
          • -
          -

          How to Download the Album Zip File

          -

          If you want to download Unleash the Dragon in a zip file format, you can follow these simple steps:

          -

          download sisqo unleash the dragon full album free
          -download sisqo unleash the dragon mp3 songs
          -download sisqo unleash the dragon album rar
          -download sisqo unleash the dragon deluxe edition zip
          -download sisqo unleash the dragon album 320kbps
          -download sisqo unleash the dragon album tracklist
          -download sisqo unleash the dragon album online
          -download sisqo unleash the dragon album torrent
          -download sisqo unleash the dragon album zip file
          -download sisqo unleash the dragon album mega
          -download sisqo unleash the dragon album google drive
          -download sisqo unleash the dragon album zip mediafire
          -download sisqo unleash the dragon album zip sharebeast
          -download sisqo unleash the dragon album zip zippyshare
          -download sisqo unleash the dragon album zip 4shared
          -download sisqo unleash the dragon flac album zip
          -download sisqo unleash the dragon m4a album zip
          -download sisqo unleash the dragon aac album zip
          -download sisqo unleash the dragon wav album zip
          -download sisqo unleash the dragon ogg album zip
          -download sisqo unleash the dragon wma album zip
          -download sisqo unleash the dragon alac album zip
          -download sisqo unleash the dragon ape album zip
          -download sisqo unleash the dragon dts album zip
          -download sisqo unleash the dragon ac3 album zip
          -download sisqo unleash the dragon instrumental album zip
          -download sisqo unleash the dragon remix album zip
          -download sisqo unleash the dragon live album zip
          -download sisqo unleash the dragon acoustic album zip
          -download sisqo unleash the dragon unplugged album zip
          -download sisqo unleash the dragon karaoke album zip
          -download sisqo unleash the dragon cover album zip
          -download sisqo unleash the dragon tribute album zip
          -download sisqo unleash the dragon clean version album zip
          -download sisqo unleash the dragon explicit version album zip
          -download sisqo unleash the dragon bonus tracks album zip
          -download sisqo unleash the dragon special edition album zip
          -download sisqo unleash the dragon collector's edition album zip
          -download sisqo unleash the dragon anniversary edition album zip
          -download sisqo unleash the dragon reissue edition album zip
          -download sisqo unleash the dragon expanded edition album zip
          -download sisqo unleash the dragon deluxe reissue edition album zip
          -download sisqo unleash the dragon vinyl rip album zip
          -download sisqo unleash the dragon cd rip album zip
          -download sisqo unleash the dragon cassette rip album zip
          -download sisqo unleash the dragon dvd rip audio album zip
          -how to download sisqo unleash the dragon full free mp3 songs online

          -
            -
          1. Go to one of these websites that offer free downloads of albums: Mphiphop, Eastnaija, or Qobuz.
          2. -
          3. Search for Sisqo Unleash the Dragon or click on one of these links: Mphiphop link, Eastnaija link, or Qobuz link.
          4. -
          5. Select the option to download or stream the album.
          6. -
          7. If you choose to download the album, you will need to unzip the file after it finishes downloading.
          8. -
          9. You can use any software that can open zip files, such as WinZip or 7-Zip.
          10. -
          11. After unzipping the file, you will see the folder containing the mp3 files of the songs in the album.
          12. -
          13. You can then transfer the files to your device or play them on your computer.
          14. -
          -

          Why You Should Listen to Unleash the Dragon

          -

          You might be wondering why you should listen to Unleash the Dragon, especially if you are not familiar with Sisqo or his music. Well, there are many reasons why this album is worth your time and attention. Here are some of them:

          -

          The Album is Fun and Catchy

          -

          One of the main reasons why you should listen to Unleash the Dragon is because it is fun and catchy. and catchy. The songs are designed to make you feel good and dance. Sisqo's vocals are energetic and charismatic, and he knows how to deliver catchy hooks and melodies. The album also features some rap verses from guests like Beanie Sigel, Make It Hot, and Ja Rule, who add some spice and variety to the songs. Some of the most fun and catchy songs in the album are "Unleash the Dragon", "Got to Get It", "Your Love Is Incredible", and of course, "Thong Song".

          -

          The Album is Versatile and Diverse

          -

          Another reason why you should listen to Unleash the Dragon is because it is versatile and diverse. The album showcases Sisqo's ability to sing different styles and genres of music, from R&B to pop to soul to funk. The album also explores different themes and moods, from love to sex to loneliness to empowerment. Sisqo proves that he is not a one-trick pony, but a versatile and diverse artist who can appeal to different audiences and tastes. Some of the most versatile and diverse songs in the album are "Is Love Enough", "How Can I Love U 2nite", "Incomplete", and "Enchantment Passing Through".

          -

          The Album is Classic and Timeless

          -

          The final reason why you should listen to Unleash the Dragon is because it is classic and timeless. The album was a huge success when it was released in 1999, selling over six million copies worldwide and earning four Grammy nominations. The album also received positive reviews from critics, who praised Sisqo's vocals, charisma, and versatility. The album's popularity and quality have endured over time, as it still sounds fresh and relevant today. The album has influenced many artists who came after Sisqo, such as Usher, Chris Brown, Jason Derulo, and Bruno Mars. The album has also become a part of pop culture, especially with its iconic song "Thong Song", which has been referenced, parodied, sampled, and covered by many artists and media outlets. Some of the most classic and timeless songs in the album are "Thong Song", "Incomplete", "You Are Everything (Remix)", and "Got to Get It".

          -

          Conclusion

          -

          In conclusion, Unleash the Dragon is a fun, catchy, versatile, diverse, classic, and timeless R&B album that you should listen to. It showcases Sisqo's talent as a singer, songwriter, dancer, and actor. It also features some of his best songs that will make you feel good and dance. If you want to download the album in a zip file format, you can follow the steps we provided above. We hope you enjoy listening to Unleash the Dragon as much as we did.

          -

          FAQs

          -

          Here are some frequently asked questions and their answers about Unleash the Dragon:

          -
            -
          • Q: When was Unleash the Dragon released?
          • -
          • A: The album was released on November 30, 1999.
          • -
          • Q: Who produced Unleash the Dragon?
          • -
          • A: The album was produced by Sisqo himself, along with other producers such as Tim & Bob, Babyface, Warryn Campbell, Shep Crawford, and Montell Jordan.
          • -
          • Q: How many singles were released from Unleash the Dragon?
          • -
          • A: Six singles were released from the album: "Got to Get It", "Your Love Is Incredible", "How Can I Love U 2nite", "Thong Song", "Incomplete", and "You Are Everything (Remix)".
          • -
          • Q: What awards did Unleash the Dragon win or get nominated for?
          • -
          • A: The album won two American Music Awards for Favorite Soul/R&B Male Artist and Favorite Soul/R&B New Artist in 2001. It also got nominated for four Grammy Awards for Best New Artist, Best R&B Male Vocal Performance ("Thong Song"), Best R&B Song ("Thong Song"), and Best R&B Album in 2001.
          • -
          • Q: Where can I stream or buy Unleash the Dragon?
          • -
          • A: You can stream or buy the album on various platforms such as Spotify, Apple Music, Amazon Music, YouTube Music, and Tidal.
          • -
          -

          0a6ba089eb
          -
          -
          \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/How to Download Windows 10 Professional Full Version Crack for Free (and Why You Shouldnt).md b/spaces/tialenAdioni/chat-gpt-api/logs/How to Download Windows 10 Professional Full Version Crack for Free (and Why You Shouldnt).md deleted file mode 100644 index 724f5bb90c57019b28f6c6b36e01cf6d4b76da32..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/How to Download Windows 10 Professional Full Version Crack for Free (and Why You Shouldnt).md +++ /dev/null @@ -1,34 +0,0 @@ -
          -```html -

          How to Get Windows 10 Professional Free Download Full Version Crack

          -

          If you are looking for a way to get Windows 10 Professional free download full version crack, you are not alone. Many people want to enjoy the benefits of this operating system without paying the hefty price tag. However, downloading and installing a cracked version of Windows 10 Professional is not only illegal but also risky. You may end up with malware, viruses, or other problems that can harm your computer and compromise your data. In this article, we will show you a better and safer alternative to get Windows 10 Professional for free.

          -

          windows 10 professional free download full version crack


          Downloadhttps://urlcod.com/2uKaUQ



          -

          What is Windows 10 Professional?

          -

          Windows 10 Professional is one of the editions of Windows 10, the latest and most popular operating system from Microsoft. It offers advanced features and functionalities that are suitable for business and power users. Some of the features of Windows 10 Professional include:

          -
            -
          • Domain join and group policy management
          • -
          • BitLocker encryption and secure boot
          • -
          • Remote desktop and remote access
          • -
          • Hyper-V virtualization and sandbox
          • -
          • Windows Update for Business and Windows Defender Antivirus
          • -
          • Cortana voice assistant and Microsoft Edge browser
          • -
          • Microsoft Store and Xbox app
          • -
          -

          Windows 10 Professional also supports the latest hardware and software technologies, such as touchscreens, stylus pens, facial recognition, fingerprint scanners, 4K displays, DirectX 12, HDR, Dolby Atmos, and more.

          -

          How to Get Windows 10 Professional for Free?

          -

          The official way to get Windows 10 Professional is to buy a license from Microsoft or an authorized retailer. The price of a Windows 10 Professional license varies depending on the region and the seller, but it usually costs around $200 USD. However, there are some ways to get Windows 10 Professional for free or at a lower cost legally. Here are some of them:

          -

          -
            -
          • If you have a valid license of Windows 7 or Windows 8.1 Professional, you can upgrade to Windows 10 Professional for free using the Media Creation Tool or the Update Assistant from Microsoft. This offer is still available as of May 2023, but it may end anytime soon.
          • -
          • If you are a student or an educator, you may be eligible to get Windows 10 Education for free from your school or institution. Windows 10 Education is similar to Windows 10 Professional but with some additional features for learning and teaching. You can check your eligibility and claim your free copy of Windows 10 Education from the Microsoft Education website.
          • -
          • If you are a developer or a tester, you can join the Windows Insider Program and get access to the latest preview builds of Windows 10 for free. You can choose between the Dev Channel, the Beta Channel, or the Release Preview Channel depending on your preference and risk tolerance. However, keep in mind that these builds may be unstable, buggy, or incomplete, and they may expire after a certain period. You also need to provide feedback and report issues to Microsoft regularly.
          • -
          • If you just want to try out Windows 10 Professional for a short time, you can download and install a free trial version from the Microsoft Evaluation Center. The trial version is valid for 90 days and has all the features of Windows 10 Professional. However, you cannot activate or customize it, and it will stop working after the trial period ends.
          • -
          -

          Why You Should Avoid Windows 10 Professional Free Download Full Version Crack?

          -

          Some people may be tempted to download and install a cracked version of Windows 10 Professional from dubious sources on the internet. However, this is a bad idea for several reasons:

          -
            -
          • It is illegal. Downloading and using a cracked version of Windows 10 Professional violates the terms and conditions of Microsoft and infringes their intellectual property rights. You may face legal consequences if you are caught doing so.
          • -
          • It is unsafe. Cracked versions of Windows 10 Professional may contain malware, viruses, spyware, ransomware, or other malicious programs that can infect your computer and steal your personal information, damage your files, lock your system, or perform other harmful actions.
          • -
          • It is unreliable. Cracked versions of Windows 10 Professional may not work properly or at all. They may have missing features, corrupted files, compatibility issues, performance problems, activation errors, update failures, or other glitches that can ruin your

            ddb901b051
            -
            -
            \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/BOMBitUP APK The Best Tool for SMS Marketing and Promotion.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/BOMBitUP APK The Best Tool for SMS Marketing and Promotion.md deleted file mode 100644 index d36b72be4a638bef1c580b377627b919ef40fd99..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/BOMBitUP APK The Best Tool for SMS Marketing and Promotion.md +++ /dev/null @@ -1,119 +0,0 @@ - -

            Bombitup APK Download 2021: The Ultimate Prank App for Android

            -

            Do you love pranking your friends and family with funny messages? Do you want to annoy them with endless SMS that they can't stop or block? If yes, then you need to try Bombitup APK, the best SMS bomber app for Android devices. In this article, we will tell you everything you need to know about Bombitup APK, including its features, how to download and install it, how to use it, and whether it is safe and legal to use.

            -

            What is Bombitup APK?

            -

            Bombitup APK is a fun and entertaining lifestyle application developed by Sanchit Gera for Android users. Its main purpose is to help you prank your friends by sending unlimited text messages to their phone numbers. For instance, you can send them fake OTP or verification spam messages that look real but are actually not. It is completely free to use however, do take note that this can be used for fun prank purposes only, not for revenge or harassment.

            -

            bombitup apk download 2021


            Download File ✓✓✓ https://bltlly.com/2uOpKA



            -

            Features of Bombitup APK

            -

            Bombitup APK has a lot of features that make it stand out from other SMS bomber apps. Here are some of them:

            -

            Send unlimited SMS to any number

            -

            You can send as many SMS as you want to any number you choose. There is no limit on how many messages you can bomb or how long you can bomb them. You can also choose whether to send SMS only or SMS and calls.

            -

            Choose from different countries and APIs

            -

            Bombitup APK works in most countries around the world. You can select the country code of the target number from a list of options. You can also choose which API to use for sending the SMS. An API is a service that provides the functionality of sending SMS. Bombitup APK has a lot of APIs that are more powerful than other SMS bomber apps.

            -

            Protect your own number from being bombed

            -

            If you are worried that someone might bomb your own number with Bombitup APK, you can protect yourself by adding your number to the protection list. This will prevent anyone from bombing your number with this app.

            -

            Customize the speed and frequency of SMS

            -

            You can also adjust the speed and frequency of sending SMS with Bombitup APK. You can choose how fast you want to send the messages and how many messages you want to send per second

            Use it for SMS marketing as well

            -

            Bombitup APK is not only for pranking your friends, but also for SMS marketing. You can use it to promote your products or services to potential customers by sending them bulk SMS. You can also use it to send reminders, notifications, alerts, or updates to your existing customers. However, you should be careful not to spam or violate any laws or regulations regarding SMS marketing.

            -

            How to download and install Bombitup APK?

            -

            If you want to try Bombitup APK on your Android device, you need to follow these simple steps:

            -

            Download the latest version of Bombitup APK from a trusted source

            -

            The first thing you need to do is to download the latest version of Bombitup APK from a trusted source. You can find the official link on the developer's website or on other reputable websites. You should avoid downloading the app from unknown or suspicious sources as they may contain malware or viruses that can harm your device.

            -

            bombitup apk download latest version 2021
            -bombitup apk download for android free
            -bombitup apk download link 2021
            -bombitup apk download prank sms bomber
            -bombitup apk download official website
            -bombitup apk download updated version 2021
            -bombitup apk download for ios devices
            -bombitup apk download no ads 2021
            -bombitup apk download unlimited sms
            -bombitup apk download modded version 2021
            -bombitup apk download from apkpure
            -bombitup apk download for pc windows 10
            -bombitup apk download hack version 2021
            -bombitup apk download with otp bypass
            -bombitup apk download safe and secure
            -bombitup apk download new features 2021
            -bombitup apk download without root
            -bombitup apk download for india users
            -bombitup apk download pro version 2021
            -bombitup apk download with custom sender id
            -bombitup apk download from google drive
            -bombitup apk download for mac os
            -bombitup apk download cracked version 2021
            -bombitup apk download with call bomber
            -bombitup apk download malware free
            -bombitup apk download best sms bomber 2021
            -bombitup apk download without verification
            -bombitup apk download for usa users
            -bombitup apk download premium version 2021
            -bombitup apk download with email bomber
            -bombitup apk download virus free
            -bombitup apk download fastest sms bomber 2021
            -bombitup apk download easy to use
            -bombitup apk download for uk users
            -bombitup apk download full version 2021
            -bombitup apk download with whatsapp bomber
            -bombitup apk download clean and clear
            -bombitup apk download most reliable sms bomber 2021
            -bombitup apk download fun and entertaining
            -bombitup apk download for canada users
            -bombitup apk download latest update 2021
            -bombitup apk download with telegram bomber
            -bombitup apk download smooth and stable
            -bombitup apk download ultimate sms bomber 2021
            -bombitup apk download prank your friends

            -

            Enable unknown sources on your Android device

            -

            The next thing you need to do is to enable unknown sources on your Android device. This will allow you to install apps that are not from the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on. You may see a warning message that says installing apps from unknown sources may harm your device, but you can ignore it as long as you trust the source of the app.

            -

            Install the APK file and grant the required permissions

            -

            The last thing you need to do is to install the APK file and grant the required permissions. To do this, locate the downloaded APK file on your device and tap on it. You will see a pop-up window that asks you to confirm the installation. Tap on Install and wait for the process to finish. You may also see another pop-up window that asks you to grant certain permissions to the app, such as access to contacts, phone, SMS, storage, etc. Tap on Allow and proceed with the installation.

            -

            Launch the app and enjoy pranking your friends

            -

            Once the installation is done, you can launch the app and enjoy pranking your friends. You will see a simple and user-friendly interface that lets you choose from different options and settings. You can also access the protection list, the update checker, and the terms and conditions from the menu.

            -

            How to use Bombitup APK?

            -

            Using Bombitup APK is very easy and fun. Here are the steps you need to follow:

            -

            Select the country code of the target number

            -

            The first step is to select the country code of the target number. You can do this by tapping on the flag icon on the top right corner of the screen. You will see a list of countries and their codes. Choose the one that matches the target number.

            -

            Enter the target number and the number of SMS to send

            -

            The next step is to enter the target number and the number of SMS to send. You can do this by typing in the fields below the flag icon. Make sure you enter a valid number without any spaces or symbols. You can also enter a custom message if you want, or leave it blank for a random message. You can also choose how many SMS you want to send by using the slider below.

            -

            Tap on BOMBit and wait for the magic to happen

            -

            The final step is to tap on BOMBit and wait for the magic to happen. You will see a progress bar that shows how many SMS have been sent and how many are left. You can also see a log of all the messages that have been sent and their status. You can stop the bombing at any time by tapping on Stop.

            -

            Is Bombitup APK safe and legal?

            -

            Bombitup APK is a fun and entertaining app that can be used for pranking your friends. However, you may have some questions about its safety and legality. Here are some answers:

            -

            Bombitup APK is safe to use as it does not contain any malware or viruses

            -

            Bombitup APK is safe to use as it does not contain any malware or viruses that can harm your device or data. It has been tested by various antivirus programs and has been found clean and secure. However, you should always download it from a trusted source and scan it before installing it.

            -

            Bombitup APK is legal to use as long as you do not misuse it for malicious purposes

            -

            Bombitup APK is legal to use as long as you do not misuse it for malicious purposes such as harassment

            Bombitup APK is legal to use as long as you do not misuse it for malicious purposes such as harassment, blackmail, fraud, or cybercrime. You should also respect the privacy and rights of the recipients and not send them any offensive or inappropriate messages. You should also be aware of the laws and regulations of your country and the country of the target number regarding SMS bombing. You should use Bombitup APK at your own risk and responsibility and not hold the developer liable for any consequences.

            -

            Bombitup APK is not affiliated with any official SMS service providers or networks. It uses third-party APIs that are publicly available and free to use. However, these APIs may change or stop working at any time without notice. Therefore, Bombitup APK may not work properly or at all in some cases. You should also check the charges and fees of your network provider before using Bombitup APK as they may vary depending on the number of SMS you send and the destination.

            -

            Conclusion

            -

            Bombitup APK is a great app for pranking your friends with unlimited SMS. It has a lot of features that make it fun and easy to use. You can download and install it on your Android device by following the steps above. However, you should use it responsibly and ethically and not abuse it for any harmful purposes. You should also be careful about the safety and legality of using Bombitup APK and follow the terms and conditions of the app.

            -

            FAQs

            -

            Here are some frequently asked questions about Bombitup APK:

            - - - - - - - - - - - - - - - - - - - - - - - - - -
            QuestionAnswer
            Can I use Bombitup APK on iOS devices?No, Bombitup APK is only available for Android devices. There is no official version of Bombitup APK for iOS devices.
            Can I bomb email addresses with Bombitup APK?Yes, Bombitup APK also supports email bombing. You can enter an email address instead of a phone number and send unlimited emails to it.
            Can I bomb WhatsApp numbers with Bombitup APK?No, Bombitup APK does not support WhatsApp bombing. It only works with regular phone numbers that can receive SMS.
            Can I bomb multiple numbers at once with Bombitup APK?No, Bombitup APK only allows you to bomb one number at a time. You have to stop the current bombing before starting a new one.
            Can I get in trouble for using Bombitup APK?Possibly, if you misuse it for illegal or unethical purposes. You should always use Bombitup APK for fun and entertainment only and not harm anyone with it. You should also respect the laws and regulations of your country and the country of the target number regarding SMS bombing.

            401be4b1e0
            -
            -
            \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Bus Simulator Indonesia APK - The Most Realistic and Feature-Rich Bus Driving Game on Android.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Bus Simulator Indonesia APK - The Most Realistic and Feature-Rich Bus Driving Game on Android.md deleted file mode 100644 index 9f51af538283cd528c830e23fc31a56a363fc0e8..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Bus Simulator Indonesia APK - The Most Realistic and Feature-Rich Bus Driving Game on Android.md +++ /dev/null @@ -1,107 +0,0 @@ -
            -

            Bus Simulator Indonesia APK Download 2022: Experience the Fun and Authenticity of Driving a Bus in Indonesia

            -

            Do you love driving games? Do you want to experience what it's like to be a bus driver in Indonesia? If yes, then you should try Bus Simulator Indonesia, one of the most popular and realistic bus simulator games on Android. In this article, we will tell you everything you need to know about this game, including its features, how to download and install it, and why you should play it.

            -

            bus simulator indonesia apk download 2022


            DOWNLOAD ……… https://bltlly.com/2uOjjr



            -

            What is Bus Simulator Indonesia?

            -

            Bus Simulator Indonesia (aka BUSSID) is a game developed by Maleo, an Indonesian game studio. It was released in 2017 and has been updated regularly ever since. It is one of the only bus simulator games with the most features and the most authentic Indonesian environment. You can drive various types of buses, from city buses to intercity buses, across different regions of Indonesia. You can also customize your bus with your own design and colors, and interact with other players online.

            -

            Features of Bus Simulator Indonesia

            -

            Bus Simulator Indonesia has many features that make it stand out from other bus simulator games. Here are some of them:

            -

            Design your own livery

            -

            You can create your own livery for your bus using the built-in editor. You can choose from different colors, patterns, stickers, logos, and more. You can also share your livery with other players or download their liveries from the online gallery.

            -

            * Bus Simulator Indonesia 2022 APK free download
            -* How to install Bus Simulator Indonesia on Android
            -* Bus Simulator Indonesia mod APK unlimited money 2022
            -* Bus Simulator Indonesia online multiplayer convoy mode
            -* Bus Simulator Indonesia game review and features
            -* Best bus simulator games for Android 2022
            -* Bus Simulator Indonesia latest version update 2022
            -* Bus Simulator Indonesia vehicle mod system tutorial
            -* Bus Simulator Indonesia livery design tips and tricks
            -* Bus Simulator Indonesia authentic Indonesian environment and cities
            -* Bus Simulator Indonesia fun and cool honks sound effects
            -* Bus Simulator Indonesia offline mode gameplay
            -* Bus Simulator Indonesia data saved online and data privacy
            -* Bus Simulator Indonesia leaderboard and achievements
            -* Bus Simulator Indonesia no ads while driving option
            -* Bus Simulator Indonesia realistic and intuitive control settings
            -* Bus Simulator Indonesia high quality and detailed 3D graphics
            -* Bus Simulator Indonesia maleo developer information and contact
            -* Bus Simulator Indonesia emoji icons provided by emojione.com
            -* Bus Simulator Indonesia om telolet om phenomenon and history
            -* Download Bus Simulator Indonesia for PC Windows 10/8/7
            -* Download Bus Simulator Indonesia for Mac OS X
            -* Download Bus Simulator Indonesia for iOS iPhone/iPad
            -* Download Bus Simulator Indonesia for Linux Ubuntu/Debian
            -* Download Bus Simulator Indonesia for Chrome OS Chromebook
            -* Download Bus Simulator Indonesia APK file from APKPure.com
            -* Download Bus Simulator Indonesia APK file from APKMirror.com
            -* Download Bus Simulator Indonesia APK file from APKMonk.com
            -* Download Bus Simulator Indonesia APK file from Uptodown.com
            -* Download Bus Simulator Indonesia APK file from APKFab.com
            -* Compare Bus Simulator Indonesia with 2022 Indonesia Bus Simulator
            -* Compare Bus Simulator Indonesia with IDBS Studio simulator games
            -* Compare Bus Simulator Indonesia with Heavy Bus Simulator by Dynamic Games Ltda
            -* Compare Bus Simulator Indonesia with World Bus Driving Simulator by Dynamic Games Ltda
            -* Compare Bus Simulator Indonesia with Coach Bus Simulator by Ovidiu Pop
            -* Compare Bus Simulator Indonesia with Mobile Bus Simulator by LOCOS
            -* Compare Bus Simulator Indonesia with Proton Bus Simulator Road by MEP PBSR
            -* Compare Bus Simulator Indonesia with Euro Truck Driver 2018 by Ovidiu Pop
            -* Compare Bus Simulator Indonesia with Indian Train Simulator by Highbrow Interactive
            -* Compare Bus Simulator Indonesia with Indian Truck Offroad Cargo Drive Sim by Gaming Legends
            -* How to play Bus Simulator Indonesia on Bluestacks emulator
            -* How to play Bus Simulator Indonesia on NoxPlayer emulator
            -* How to play Bus Simulator Indonesia on LDPlayer emulator
            -* How to play Bus Simulator Indonesia on MEmu emulator
            -* How to play Bus Simulator Indonesia on Genymotion emulator
            -* How to fix common errors and issues in Bus Simulator Indonesia
            -* How to get more coins and gems in Bus Simulator Indonesia
            -* How to unlock all buses and routes in Bus Simulator Indonesia
            -* How to customize your bus and driver in Bus Simulator Indonesia

            -

            Easy and intuitive control

            -

            You can control your bus using various options, such as tilt, steering wheel, buttons, or slider. You can also adjust the sensitivity and feedback of the control according to your preference. You can also use the automatic or manual transmission, depending on your skill level.

            -

            Authentic Indonesian cities and places

            -

            You can drive your bus across different cities and places in Indonesia, such as Jakarta, Surabaya, Bali, Sumatra, Java, and more. You can see the landmarks, buildings, roads, traffic, weather, and culture of each region. You can also follow the realistic routes and schedules of the buses in Indonesia.

            -

            Indonesian buses

            -

            You can choose from different types of buses that are commonly used in Indonesia, such as city buses, intercity buses, double-decker buses, mini buses, school buses, and more. You can also see the details and specifications of each bus, such as the engine, speed, capacity, fuel consumption, etc.

            -

            Cool and fun honks

            -

            You can use various honks to communicate with other drivers and pedestrians on the road. You can also hear the famous "Om Telolet Om!" (Uncle, honk your horn, uncle!) phrase that became a viral sensation in Indonesia and around the world.

            -

            High quality and detailed 3D graphics

            -

            You can enjoy the stunning 3D graphics of the game that show the realistic and immersive scenery of Indonesia. You can also adjust the graphics quality and performance according to your device's capability.

            -

            No obstructive ads while driving

            -

            You can play the game without being interrupted by annoying ads while driving. The only ads you will see are on the billboards along the road, which add to the realism of the game.

            -

            Leaderboard and online data

            -

            You can compete with other players on the leaderboard based on your driving skills and achievements. You can also save your data online so you don't lose your progress if you change your device or uninstall the game.

            -

            Vehicle mod systemVehicle mod system

            -

            You can modify your bus with various parts and accessories, such as wheels, bumpers, spoilers, lights, mirrors, etc. You can also download and install mods from other players or create your own mods using the mod tools provided by the game developer.

            -

            Online multiplayer convoy

            -

            You can join or create a convoy with other players online and drive together on the same road. You can chat with them using the voice or text chat feature. You can also cooperate or compete with them in various missions and events.

            -

            How to download and install Bus Simulator Indonesia APK?

            -

            If you want to play Bus Simulator Indonesia on your Android device, you need to download and install the APK file of the game. Here are the steps to do so:

            -
              -
            1. Go to the official website of Bus Simulator Indonesia at https://bussimulator.id/ or search for "Bus Simulator Indonesia APK" on Google.
            2. -
            3. Click on the download button and choose the version of the game you want to download. The latest version is 3.6.1 as of June 2022.
            4. -
            5. Wait for the download to finish and locate the APK file on your device's storage.
            6. -
            7. Tap on the APK file and allow the installation from unknown sources if prompted.
            8. -
            9. Follow the instructions on the screen and wait for the installation to complete.
            10. -
            11. Launch the game and enjoy driving a bus in Indonesia!
            12. -
            -

            Why should you play Bus Simulator Indonesia?

            -

            Bus Simulator Indonesia is a game that will give you a lot of fun and satisfaction as a bus driver. You will be able to experience the beauty and diversity of Indonesia, as well as the challenges and rewards of driving a bus. You will also be able to express your creativity and personality by designing your own bus and livery. You will also be able to interact with other players online and join a community of bus enthusiasts. Bus Simulator Indonesia is a game that will make you feel like you are really in Indonesia, driving a bus.

            -

            If you are looking for a realistic, immersive, and enjoyable bus simulator game, then you should definitely try Bus Simulator Indonesia. It is free to download and play, and it has many features that will keep you entertained for hours. Download it now and see for yourself why it is one of the best bus simulator games on Android!

            -

            Conclusion

            -

            Bus Simulator Indonesia is a game that lets you experience the fun and authenticity of driving a bus in Indonesia. You can choose from different types of buses, customize them with your own design, drive across various regions of Indonesia, use cool and fun honks, compete with other players online, and more. It is a game that will make you feel like you are really in Indonesia, driving a bus.

            -

            If you want to play Bus Simulator Indonesia on your Android device, you need to download and install the APK file of the game from its official website or Google. It is easy and simple to do, and it will only take a few minutes. Once you have installed the game, you can launch it and start your journey as a bus driver in Indonesia.

            -

            Bus Simulator Indonesia is a game that will give you a lot of enjoyment and satisfaction as a bus driver. It is one of the most popular and realistic bus simulator games on Android, and it has many features that will make you love it. Download it now and see for yourself why it is one of the best bus simulator games on Android!

            -

            Frequently Asked Questions

            -

            Q: Is Bus Simulator Indonesia safe to download and play?

            -

            A: Yes, Bus Simulator Indonesia is safe to download and play. It does not contain any viruses or malware, and it does not require any special permissions or access to your device. It is also regularly updated by its developer to fix any bugs or issues.

            -

            Q: How much space does Bus Simulator Indonesia need on my device?

            -

            A: Bus Simulator Indonesia needs about 300 MB of space on your device's storage. However, this may vary depending on the version of the game and the mods you install.

            -

            Q: Can I play Bus Simulator Indonesia offline?

            -

            A: Yes, you can play Bus Simulator Indonesia offline. However, some features of the game, such as online multiplayer convoy, leaderboard, online data, online gallery, etc., will not be available offline. You will need an internet connection to access these features.

            -

            Q: Can I play Bus Simulator Indonesia on PC?

            -

            A: Yes, you can play Bus Simulator Indonesia on PC using an Android emulator. An Android emulator is a software

            A: Yes, you can play Bus Simulator Indonesia on PC using an Android emulator. An Android emulator is a software that allows you to run Android apps and games on your PC. Some of the popular Android emulators are BlueStacks, NoxPlayer, LDPlayer, etc. You can download and install any of these emulators on your PC, and then download and install Bus Simulator Indonesia APK from the same sources as mentioned above. You can then launch the game and enjoy it on a bigger screen.

            -

            Q: How can I contact the developer of Bus Simulator Indonesia?

            -

            A: If you have any questions, feedback, suggestions, or issues regarding Bus Simulator Indonesia, you can contact the developer of the game by sending an email to support@maleo.id. You can also visit their official website at https://bussimulator.id/ or follow their social media accounts on Facebook, Instagram, YouTube, and Twitter.

            197e85843d
            -
            -
            \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Analog Design Essentials Willy Sansen.PDF ((BETTER)).md b/spaces/tioseFevbu/cartoon-converter/scripts/Analog Design Essentials Willy Sansen.PDF ((BETTER)).md deleted file mode 100644 index dad2807395872e2e7d3dbd8bbd14b9cc0b21b2c4..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Analog Design Essentials Willy Sansen.PDF ((BETTER)).md +++ /dev/null @@ -1,27 +0,0 @@ -
            -

            Analog Design Essentials: A Comprehensive Textbook by Willy Sansen

            -

            Analog Design Essentials is a textbook that covers all topics of importance to the analog designer, from basic transistor concepts and amplifier design to advanced topics such as filter design, oscillators, and analog and digital conversion. The author, Willy Sansen, is a professor at the Katholieke Universiteit Leuven in Belgium and a renowned expert in the field of analog design. He has also taught at universities and companies worldwide.

            -

            The book is based on the extensive amount of teaching material that the author has developed over the years. It consists of 24 chapters, each containing color slides that explain one additional aspect per slide. The slides are also available as a PDF file that can be downloaded from the publisher's website. The book aims to provide a thorough and systematic understanding of analog design, with an emphasis on low-power consumption and high-performance circuits. It also includes many examples, exercises, and references for further reading.

            -

            Analog Design Essentials Willy Sansen.PDF


            Download Ziphttps://urlcod.com/2uHvFE



            -

            Analog Design Essentials is suitable for both self-study and lectures. It is intended for students, researchers, and engineers who want to learn or improve their skills in analog design. It is also a valuable reference for anyone who is interested in the principles and applications of analog circuits.

            -

            The book was published by Springer in 2006 as part of the International Series in Engineering and Computer Science. It has received positive reviews from readers and reviewers alike, who praised its clarity, depth, and breadth of coverage. It has also been cited by many other publications in the field of analog design.

            -

            If you want to learn more about Analog Design Essentials by Willy Sansen, you can visit the following links:

            -
              -
            • The publisher's website, where you can find more information about the book, including its table of contents, abstracts, reviews, and sample pages.
            • -
            • The PDF file of the color slides that accompany the book.
            • -
            • A summary of the book's main topics and contributions.
            • -
            - -

            Some of the topics that the book covers in detail are:

            -
              -
            • The comparison of MOST and bipolar transistor models, including their advantages and disadvantages, their small-signal and large-signal behavior, and their applications in analog circuits.
            • -
            • The design of amplifiers, source followers, and cascodes, with an emphasis on low-noise, low-distortion, and high-gain performance. The book also explains how to optimize the biasing, compensation, and feedback of these circuits.
            • -
            • The design of differential voltage and current amplifiers, which are essential for achieving high common-mode rejection ratio (CMRR), high input impedance, and low output impedance. The book also discusses the effects of mismatch, offset, and noise on these amplifiers.
            • -
            • The noise performance of elementary transistor stages, such as resistors, capacitors, diodes, transistors, and current sources. The book introduces the concepts of noise sources, noise models, noise figure, noise factor, and noise bandwidth. It also shows how to calculate and minimize the noise contribution of each stage.
            • -
            • The stability of operational amplifiers (opamps), which are widely used in analog circuits for amplification, buffering, filtering, and signal processing. The book explains the criteria for stability, such as phase margin and gain margin, and how to design opamps that are stable for any load condition.
            • -
            -

            The book also covers many other topics that are important for analog design, such as systematic design of opamps, important opamp configurations, fully-differential amplifiers, multistage opamps, current-input opamps, rail-to-rail input and output amplifiers, class AB and driver amplifiers, feedback voltage and transconductance amplifiers, feedback transimpedance and current amplifiers, offset and CMRR: random and systematic, bandgap and current reference circuits, switched-capacitor filters, distortion in elementary transistor circuits, continuous-time filters.

            -

            Analog Design Essentials is a comprehensive textbook that provides a solid foundation for anyone who wants to learn or master analog design. It is also a useful reference for anyone who wants to refresh or update their knowledge on analog circuits. It is written in a clear and concise style that makes it easy to follow and understand. It is also rich in examples and exercises that help reinforce the concepts and skills learned. It is a valuable resource for students, researchers, and engineers who are interested in analog design.

            -

            e93f5a0c3f
            -
            -
            \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/AnyDesk 5.4.0 Crack With Serial Key Free Download 2020 NEW!.md b/spaces/tioseFevbu/cartoon-converter/scripts/AnyDesk 5.4.0 Crack With Serial Key Free Download 2020 NEW!.md deleted file mode 100644 index 4e33c90883407c3f3aa438dd3a09c94dd8b31e9a..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/AnyDesk 5.4.0 Crack With Serial Key Free Download 2020 NEW!.md +++ /dev/null @@ -1,16 +0,0 @@ -
            -

            AnyDesk 5.4.0 Crack With Serial Key Free Download 2020

            -

            AnyDesk is a fast and reliable remote desktop software that allows you to access and control any computer from anywhere. Whether you need to work from home, provide technical support, collaborate with your team, or access your personal files, AnyDesk lets you do it securely and easily.

            -

            However, if you want to use AnyDesk for commercial purposes, you need to purchase a license that suits your needs. The license gives you access to advanced features such as unlimited concurrent sessions, file transfer, remote printing, session recording, and more.

            -

            AnyDesk 5.4.0 Crack With Serial Key Free Download 2020


            Downloadhttps://urlcod.com/2uHxDG



            -

            But what if you don't have the budget to buy a license? Is there a way to get AnyDesk for free with all the features unlocked? The answer is yes, but it comes with a risk. Some websites claim to offer AnyDesk 5.4.0 crack with serial key free download 2020, which is supposed to activate the full version of AnyDesk without paying anything.

            -

            However, downloading and installing AnyDesk crack is not only illegal but also dangerous. You may end up with malware, viruses, or spyware on your computer that can compromise your security and privacy. You may also face legal consequences for violating the terms and conditions of AnyDesk.

            -

            -

            Therefore, we strongly advise you to avoid AnyDesk crack and serial key free download 2020 and instead use the official version of AnyDesk from its website[^2^]. You can download AnyDesk v5.4.0 (freeware) from AfterDawn[^1^], which is an old version of this software but still works well for personal use. If you need more features or want to support the development of AnyDesk, you can buy a license that fits your budget and requirements.

            -

            AnyDesk is a great tool for remote desktop access and control, but it should be used legally and safely. Don't risk your computer and data by using AnyDesk crack and serial key free download 2020. Download AnyDesk from its official website and enjoy its benefits without any hassle.

            - -

            How does AnyDesk work? AnyDesk uses a proprietary technology called DeskRT, which is a video codec that compresses and transfers the image of your desktop in real time. This allows AnyDesk to deliver high-quality graphics and low latency even on low-bandwidth connections. AnyDesk also uses TLS 1.2 encryption and RSA 2048 asymmetric key exchange to ensure the security of your data and sessions.

            -

            How to use AnyDesk? To use AnyDesk, you need to download and install the software on both the host and the client computers. You can also use AnyDesk without installation by running it as a portable application. Once you launch AnyDesk, you will see an address that is unique to your computer. You can share this address with the person you want to connect with or enter their address to initiate the connection. You can then accept or reject the incoming connection request and start using AnyDesk.

            -

            What are the benefits of AnyDesk? AnyDesk offers many benefits for different use cases. For example, if you need to work from home, you can use AnyDesk to access your office computer and use all the applications and files you need. If you need to provide technical support, you can use AnyDesk to remotely troubleshoot and fix any issues on your clients' computers. If you need to collaborate with your team, you can use AnyDesk to share your screen and communicate with them via chat or voice. If you need to access your personal files, you can use AnyDesk to connect to your home computer from anywhere.

            7b8c122e87
            -
            -
            \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/DVDFab 11.0.5.5 Crack With Serial Key Free Download 2019.md b/spaces/tioseFevbu/cartoon-converter/scripts/DVDFab 11.0.5.5 Crack With Serial Key Free Download 2019.md deleted file mode 100644 index f6399c174a96d42e4975cb9663c5e5f1f52459f9..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/DVDFab 11.0.5.5 Crack With Serial Key Free Download 2019.md +++ /dev/null @@ -1,47 +0,0 @@ -
            -

            DVDFab 11.0.5.5 Crack: A Powerful and Versatile DVD/Blu-ray/Video Tool

            -

            DVDFab 11.0.5.5 Crack is the latest version of the popular software that allows you to copy, rip, convert and create DVD/Blu-ray/Video discs with ease. Whether you want to backup your favorite movies, compress large files, edit video clips, or burn discs with custom menus, DVDFab 11.0.5.5 Crack can do it all.

            -

            DVDFab 11.0.5.5 Crack With Serial Key Free Download 2019


            Download File ->>> https://urlcod.com/2uHxoM



            -

            In this article, we will show you how to download and install DVDFab 11.0.5.5 Crack for free, and what features and benefits it offers. Read on to find out more.

            -

            How to Download and Install DVDFab 11.0.5.5 Crack for Free

            -

            If you want to enjoy the full functionality of DVDFab 11.0.5.5 Crack without paying a dime, you can follow these simple steps:

            -
              -
            1. Download the DVDFab 11.0.5.5 Crack setup file from the link below.
            2. -
            3. Extract the zip file and run the setup.exe file as administrator.
            4. -
            5. Follow the installation wizard and choose the modules you want to install.
            6. -
            7. Copy the crack file from the crack folder and paste it into the installation directory.
            8. -
            9. Launch DVDFab 11.0.5.5 Crack and enjoy!
            10. -
            -

            Note: You may need to disable your antivirus software or firewall before installing or running DVDFab 11.0.5.5 Crack, as it may be detected as a false positive.

            -

            What Features and Benefits Does DVDFab 11.0.5.5 Crack Offer?

            -

            DVDFab 11.0.5.5 Crack is a comprehensive and versatile software that can handle various tasks related to DVD/Blu-ray/Video discs. Here are some of the main features and benefits that DVDFab 11.0.5.5 Crack offers:

            -
              -
            • DVD Copy: You can copy any DVD disc or folder to a blank disc or hard drive with various modes, such as full disc, main movie, split, merge, clone/burn, customize, etc.
            • -
            • DVD Ripper: You can rip any DVD disc or folder to any video/audio format or device, such as MP4, MKV, AVI, MP3, iPhone, iPad, Android, etc.
            • -
            • DVD Creator: You can create your own DVD disc or folder from any video file or source, such as camcorder videos, downloaded videos, etc.
            • -
            • DVD Cinavia Removal: You can remove the Cinavia watermark from any DVD disc or folder that has been infected by this protection technology.
            • -
            • Blu-ray Copy: You can copy any Blu-ray disc or folder to a blank disc or hard drive with various modes, such as full disc, main movie, clone/burn, etc.
            • -
            • Blu-ray Ripper: You can rip any Blu-ray disc or folder to any video/audio format or device, such as MP4, MKV, AVI, MP3, iPhone, iPad, Android, etc.
            • -
            • Blu-ray Creator: You can create your own Blu-ray disc or folder from any video file or source, such as camcorder videos, downloaded videos, etc.
            • -
            • Blu-ray Cinavia Removal: You can remove the Cinavia watermark from any Blu-ray disc or folder that has been infected by this protection technology.
            • -
            • Video Converter: You can convert any video file or source to any video/audio format or device, such as MP4, MKV, AVI, -MP3, -iPhone, -iPad, -Android, -etc. -
            • -
            • Video Editor: You can edit any video file or source with various tools, -such as trim, -crop, -rotate, -merge, -split, -watermark, -subtitle, -etc. -
            • -
            • DVD/Blu-ray/Video Toolbox: You can perform various tasks related to DVD/Bl

              -

              81aa517590
              -
              -
              \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Download ((EXCLUSIVE)) Kitab Syamsul Maarif Pdf.md b/spaces/tioseFevbu/cartoon-converter/scripts/Download ((EXCLUSIVE)) Kitab Syamsul Maarif Pdf.md deleted file mode 100644 index f4fd995e6e3eede26c9d64cb38b1593f0c3d2809..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Download ((EXCLUSIVE)) Kitab Syamsul Maarif Pdf.md +++ /dev/null @@ -1,24 +0,0 @@ -
              -Here is a possible title and article with SEO optimization and HTML formatting for the keyword "Download Kitab Syamsul Maarif Pdf": - -

              Download Kitab Syamsul Maarif Pdf: A Guide to the Book of Knowledge and Wisdom

              - -

              Kitab Syamsul Maarif is a famous book of Islamic mysticism and occultism written by Imam Abi Al Abbas Ahmad bin Ali Al Buni, a 13th century scholar and Sufi master. The book contains various topics related to the knowledge and practice of esoteric sciences, such as numerology, astrology, magic, talismans, amulets, invocations, prayers, and secrets of the Quran and the names of Allah.

              - -

              If you are interested in learning more about this book and its contents, you can download Kitab Syamsul Maarif Pdf from various sources online. However, you should be careful and cautious when reading and applying the teachings of this book, as some of them may be considered controversial or even forbidden by some Islamic scholars and authorities. Therefore, you should always consult with reliable and trustworthy experts before delving into the mysteries of this book.

              -

              Download Kitab Syamsul Maarif Pdf


              Download Zip ✓✓✓ https://urlcod.com/2uHvIp



              - -

              In this article, we will provide you with some information about Kitab Syamsul Maarif Pdf, such as its history, its contents, its benefits, and its risks. We will also give you some tips on how to download Kitab Syamsul Maarif Pdf safely and legally from reputable websites. We hope that this article will help you gain a better understanding of this book and its value for your spiritual journey.

              - -

              The History of Kitab Syamsul Maarif

              - -

              Kitab Syamsul Maarif, which means "The Book of the Sun of Knowledge" or "The Book of the Splendor of Knowledge", is one of the most influential works of Islamic mysticism and occultism. It was written by Imam Abi Al Abbas Ahmad bin Ali Al Buni, who was born in Annaba (formerly Bona), Algeria, in 622 AH (1225 CE) and died in Cairo, Egypt, in 622 AH (1225 CE).

              - -

              Imam Al Buni was a renowned scholar and Sufi master who belonged to the Shadhiliyya order. He was also a prolific writer who authored more than 100 books on various subjects, such as theology, jurisprudence, grammar, poetry, history, and ethics. However, he is best known for his works on esoteric sciences, especially those related to the science of letters (ilm al-huruf) and the science of squares (ilm al-awfaq).

              - -

              Kitab Syamsul Maarif is considered to be his magnum opus and his most comprehensive work on esoteric sciences. It is divided into four parts: the first part deals with the principles and foundations of esoteric sciences; the second part deals with the secrets of the Quran and the names of Allah; the third part deals with numerology, astrology, magic, talismans, amulets, invocations, prayers, and other practices; and the fourth part deals with miscellaneous topics.

              - -

              Kitab Syamsul Maarif was widely circulated and studied by many scholars and practitioners of esoteric sciences throughout history. It influenced many other works on similar topics, such as Kitab Shams al-Ma'arif al-Kubra by Ibn Arabi (d. 1240 CE), Kitab al-Bahar al-Mawrud by Ibn Khaldun (d. 1406 CE), Kitab al-Futuhat al-Makkiyya by Ibn Arabi (d. 1240 CE), Kitab al-Shifa' al-Sajjadiyya by Imam Zayn al-Abidin (d. 713 CE), Kitab al-Tawasin by Mansur al-Hallaj (d. 922 CE), Kitab al-Hikam by Ibn Ata Allah (d. 1309 CE), Kitab al-Ibriz by Abd al-Aziz al-Dabbagh (d. 1719 CE), Kitab Jawahir al-Khamsa by Muhammad Ghawth Gwaliyari (d. 1563 CE), Kitab Dalail al-Khayrat by Muhammad al-Jazuli (d. 1465 CE), Kitab Miftah al-Falah by Ibn Ata Allah (d. 1309 CE), Kitab Manba' al-Anwar by Ahmad Zarr

              -

              7196e7f11a
              -
              -
              \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Heroes Of Might And Magic 6 For Mac Download _TOP_.md b/spaces/tioseFevbu/cartoon-converter/scripts/Heroes Of Might And Magic 6 For Mac Download _TOP_.md deleted file mode 100644 index 2f1710f4c7f5c208f10b72312c2155f862047045..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Heroes Of Might And Magic 6 For Mac Download _TOP_.md +++ /dev/null @@ -1,23 +0,0 @@ -
              -

              How to Download and Play Heroes Of Might And Magic 6 on Mac

              -

              Heroes Of Might And Magic 6 is a strategy RPG game that was released in 2011 by Ubisoft. The game features an epic story where Angels plot to end an unfinished war with their ancient rivals, the Faceless. The game also includes two original Adventure Packs and a Standalone Expansion featuring the Dark-Elf Dungeon faction.

              -

              If you are a fan of the Might and Magic series and want to play Heroes Of Might And Magic 6 on your Mac, you might be wondering how to do it. Unfortunately, the game is not officially supported for Mac OS, but there are some ways to run it on your device. Here are some options you can try:

              -

              Heroes Of Might And Magic 6 For Mac Download


              Download Filehttps://urlcod.com/2uHwkP



              -
                -
              • Use a virtual machine. A virtual machine is a software that allows you to run another operating system inside your Mac OS. You can use a virtual machine such as Parallels Desktop or VMware Fusion to install Windows on your Mac and then run Heroes Of Might And Magic 6 from there. However, this method might require a lot of disk space and memory, and it might affect the performance of the game.
              • -
              • Use a wine wrapper. Wine is a software that allows you to run Windows applications on Mac OS without installing Windows. You can use a wine wrapper such as Porting Kit or Wineskin Winery to create a custom app that can run Heroes Of Might And Magic 6 on your Mac. However, this method might not work for all versions of the game, and it might cause some compatibility issues or bugs.
              • -
              • Use a cloud gaming service. A cloud gaming service is a platform that allows you to stream games from a remote server to your device. You can use a cloud gaming service such as GeForce Now or Shadow to play Heroes Of Might And Magic 6 on your Mac without installing anything. However, this method might require a stable and fast internet connection, and it might incur some subscription fees or latency.
              • -
              -

              As you can see, there are some ways to play Heroes Of Might And Magic 6 on your Mac, but none of them are perfect. You might have to compromise on some aspects such as quality, performance, or cost. If you really want to enjoy the game on your Mac, you might have to wait for an official port from Ubisoft or hope for a remastered version in the future.

              - -

              Gameplay Tips for Heroes Of Might And Magic 6

              -

              Heroes Of Might And Magic 6 is a game that requires strategic thinking and careful planning. You will have to manage your resources, explore the map, build your cities, recruit your units, and lead your heroes into battle. Here are some tips that can help you improve your gameplay and enjoy the game more:

              -
                -
              • Choose your faction and hero wisely. There are six factions in the game: Haven, Inferno, Necropolis, Sanctuary, Stronghold, and Dungeon. Each faction has its own unique units, abilities, and playstyle. You should choose a faction that suits your preferences and strategy. You should also choose a hero that complements your faction and has the skills and spells you need.
              • -
              • Develop your cities and heroes efficiently. You should build your cities according to your needs and goals. You should prioritize the buildings that provide you with resources, units, and bonuses. You should also convert enemy cities to your faction if possible, to avoid maintenance costs and gain more benefits. You should level up your heroes by completing quests and fighting enemies. You should choose the abilities and spells that match your hero's class and role. You should also decide whether to follow the blood or tear path, which will affect your reputation and alignment.
              • -
              • Explore the map and collect resources. You should explore the map as much as you can, to find hidden treasures, artifacts, mines, and other useful objects. You should collect resources whenever you see them, as they are essential for building your cities and recruiting your units. You should also loot and sabotage enemy mines if you can, to gain an advantage over them. You should also be aware of the control zones, which determine who can access certain objects on the map.
              • -
              • Battle smartly and tactically. You should prepare for battle by choosing the right units and formations. You should use the terrain and obstacles to your advantage. You should also use your hero's abilities and spells wisely, to buff your units or debuff your enemies. You should also pay attention to the initiative and morale of your units, as they affect their turn order and performance. You should also know when to retreat or surrender if the battle is not going well.
              • -
              -

              These are some basic tips for playing Heroes Of Might And Magic 6 on Mac. If you want to learn more about the game, you can check out some guides and videos online. Have fun!

              cec2833e83
              -
              -
              \ No newline at end of file diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/dep_util.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/dep_util.py deleted file mode 100644 index 521eb716a5ebbcbc2c59654c4e71c3f0ff1abf26..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/dep_util.py +++ /dev/null @@ -1,25 +0,0 @@ -from distutils.dep_util import newer_group - - -# yes, this is was almost entirely copy-pasted from -# 'newer_pairwise()', this is just another convenience -# function. -def newer_pairwise_group(sources_groups, targets): - """Walk both arguments in parallel, testing if each source group is newer - than its corresponding target. Returns a pair of lists (sources_groups, - targets) where sources is newer than target, according to the semantics - of 'newer_group()'. - """ - if len(sources_groups) != len(targets): - raise ValueError( - "'sources_group' and 'targets' must be the same length") - - # build a pair of lists (sources_groups, targets) where source is newer - n_sources = [] - n_targets = [] - for i in range(len(sources_groups)): - if newer_group(sources_groups[i], targets[i]): - n_sources.append(sources_groups[i]) - n_targets.append(targets[i]) - - return n_sources, n_targets diff --git a/spaces/tomofi/MMOCR/mmocr/models/ner/losses/masked_cross_entropy_loss.py b/spaces/tomofi/MMOCR/mmocr/models/ner/losses/masked_cross_entropy_loss.py deleted file mode 100644 index 034fb29590b9e8d420a2b0537a38c4e92b3d4acd..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/mmocr/models/ner/losses/masked_cross_entropy_loss.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from torch import nn -from torch.nn import CrossEntropyLoss - -from mmocr.models.builder import LOSSES - - -@LOSSES.register_module() -class MaskedCrossEntropyLoss(nn.Module): - """The implementation of masked cross entropy loss. - - The mask has 1 for real tokens and 0 for padding tokens, - which only keep active parts of the cross entropy loss. - Args: - num_labels (int): Number of classes in labels. - ignore_index (int): Specifies a target value that is ignored - and does not contribute to the input gradient. - """ - - def __init__(self, num_labels=None, ignore_index=0): - super().__init__() - self.num_labels = num_labels - self.criterion = CrossEntropyLoss(ignore_index=ignore_index) - - def forward(self, logits, img_metas): - '''Loss forword. - Args: - logits: Model output with shape [N, C]. - img_metas (dict): A dict containing the following keys: - - img (list]): This parameter is reserved. - - labels (list[int]): The labels for each word - of the sequence. - - texts (list): The words of the sequence. - - input_ids (list): The ids for each word of - the sequence. - - attention_mask (list): The mask for each word - of the sequence. The mask has 1 for real tokens - and 0 for padding tokens. Only real tokens are - attended to. - - token_type_ids (list): The tokens for each word - of the sequence. - ''' - - labels = img_metas['labels'] - attention_masks = img_metas['attention_masks'] - - # Only keep active parts of the loss - if attention_masks is not None: - active_loss = attention_masks.view(-1) == 1 - active_logits = logits.view(-1, self.num_labels)[active_loss] - active_labels = labels.view(-1)[active_loss] - loss = self.criterion(active_logits, active_labels) - else: - loss = self.criterion( - logits.view(-1, self.num_labels), labels.view(-1)) - return {'loss_cls': loss} diff --git a/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_feature_extractors.py b/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_feature_extractors.py deleted file mode 100644 index 1e7bfd6384af2c9eb4894147aaa9a0e6caed3630..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_feature_extractors.py +++ /dev/null @@ -1,170 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -import torch -from torch import nn -from torch.nn import functional as F - -from maskrcnn_benchmark.modeling.backbone import resnet -from maskrcnn_benchmark.modeling.poolers import Pooler -from maskrcnn_benchmark.modeling.utils import cat -from maskrcnn_benchmark.layers import Conv2d - -def conv3x3(in_planes, out_planes, stride=1, has_bias=False): - "3x3 convolution with padding" - return nn.Conv2d( - in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=has_bias - ) - - -def conv3x3_bn_relu(in_planes, out_planes, stride=1, has_bias=False): - return nn.Sequential( - conv3x3(in_planes, out_planes, stride), - nn.BatchNorm2d(out_planes), - nn.ReLU(inplace=True), - ) - -class ResNet50Conv5ROIFeatureExtractor(nn.Module): - def __init__(self, config): - super(ResNet50Conv5ROIFeatureExtractor, self).__init__() - - resolution = config.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION - scales = config.MODEL.ROI_BOX_HEAD.POOLER_SCALES - sampling_ratio = config.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO - pooler = Pooler( - output_size=(resolution, resolution), - scales=scales, - sampling_ratio=sampling_ratio, - ) - - stage = resnet.StageSpec(index=4, block_count=3, return_features=False) - head = resnet.ResNetHead( - block_module=config.MODEL.RESNETS.TRANS_FUNC, - stages=(stage,), - num_groups=config.MODEL.RESNETS.NUM_GROUPS, - width_per_group=config.MODEL.RESNETS.WIDTH_PER_GROUP, - stride_in_1x1=config.MODEL.RESNETS.STRIDE_IN_1X1, - stride_init=None, - res2_out_channels=config.MODEL.RESNETS.RES2_OUT_CHANNELS, - ) - - self.pooler = pooler - self.head = head - - def forward(self, x, proposals): - x = self.pooler(x, proposals) - x = self.head(x) - return x - - -class FPN2MLPFeatureExtractor(nn.Module): - """ - Heads for FPN for classification - """ - - def __init__(self, cfg): - super(FPN2MLPFeatureExtractor, self).__init__() - self.cfg = cfg - resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION - scales = cfg.MODEL.ROI_BOX_HEAD.POOLER_SCALES - sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO - pooler = Pooler( - output_size=(resolution, resolution), - scales=scales, - sampling_ratio=sampling_ratio, - ) - if self.cfg.MODEL.ROI_BOX_HEAD.MIX_OPTION == 'CAT': - input_size = (cfg.MODEL.BACKBONE.OUT_CHANNELS + 1) * resolution ** 2 - else: - input_size = cfg.MODEL.BACKBONE.OUT_CHANNELS * resolution ** 2 - representation_size = cfg.MODEL.ROI_BOX_HEAD.MLP_HEAD_DIM - self.pooler = pooler - self.fc6 = nn.Linear(input_size, representation_size) - self.fc7 = nn.Linear(representation_size, representation_size) - # if self.cfg.MODEL.ROI_BOX_HEAD.MIX_OPTION == 'ATTENTION': - # self.attention = nn.Sequential( - # conv3x3_bn_relu(cfg.MODEL.BACKBONE.OUT_CHANNELS + 1, 32), - # conv3x3(32, 1), - # nn.Sigmoid() - # ) - # self.attention.apply(self.weights_init) - # if self.cfg.MODEL.ROI_BOX_HEAD.MIX_OPTION == 'ATTENTION': - # self.attention = nn.Sequential( - # Conv2d(cfg.MODEL.BACKBONE.OUT_CHANNELS + 1, 1, 1, 1, 0), - # nn.Sigmoid() - # ) - # for name, param in self.named_parameters(): - # if "bias" in name: - # nn.init.constant_(param, 0) - # elif "weight" in name: - # # Caffe2 implementation uses MSRAFill, which in fact - # # corresponds to kaiming_normal_ in PyTorch - # nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu") - - for l in [self.fc6, self.fc7]: - # Caffe2 implementation uses XavierFill, which in fact - # corresponds to kaiming_uniform_ in PyTorch - nn.init.kaiming_uniform_(l.weight, a=1) - nn.init.constant_(l.bias, 0) - - def weights_init(self, m): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - nn.init.kaiming_normal_(m.weight.data) - elif classname.find("BatchNorm") != -1: - m.weight.data.fill_(1.0) - m.bias.data.fill_(1e-4) - - def feature_mask(self, x, proposals): - masks = [] - for proposal in proposals: - segmentation_masks = proposal.get_field("masks") - boxes = proposal.bbox.to(torch.device("cpu")) - for segmentation_mask, box in zip(segmentation_masks, boxes): - cropped_mask = segmentation_mask.crop(box) - scaled_mask = cropped_mask.resize((self.cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION, self.cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION)) - mask = scaled_mask.convert(mode="mask") - masks.append(mask) - if len(masks) == 0: - if self.cfg.MODEL.ROI_BOX_HEAD.MIX_OPTION == 'CAT': - x = cat([x, torch.ones((x.shape[0], 1, x.shape[2], x.shape[3]), device=x.device)], dim=1) - return x - masks = torch.stack(masks, dim=0).to(x.device, dtype=torch.float32) - if self.cfg.MODEL.ROI_BOX_HEAD.MIX_OPTION == 'CAT': - x = cat([x, masks.unsqueeze(1)], dim=1) - return x - if self.cfg.MODEL.ROI_BOX_HEAD.MIX_OPTION == 'ATTENTION': - # x_cat = cat([x, masks.unsqueeze(1)], dim=1) - # attention = self.attention(x_cat) - # x = x * attention - return x - soft_ratio = self.cfg.MODEL.ROI_BOX_HEAD.SOFT_MASKED_FEATURE_RATIO - if soft_ratio > 0: - if soft_ratio < 1.0: - x = x * (soft_ratio + (1 - soft_ratio) * masks.unsqueeze(1)) - else: - x = x * (1.0 + soft_ratio * masks.unsqueeze(1)) - else: - x = x * masks.unsqueeze(1) - return x - - def forward(self, x, proposals): - x = self.pooler(x, proposals) - if self.cfg.MODEL.ROI_BOX_HEAD.USE_MASKED_FEATURE: - x = self.feature_mask(x, proposals) - x = x.view(x.size(0), -1) - - x = F.relu(self.fc6(x)) - x = F.relu(self.fc7(x)) - - return x - - -_ROI_BOX_FEATURE_EXTRACTORS = { - "ResNet50Conv5ROIFeatureExtractor": ResNet50Conv5ROIFeatureExtractor, - "FPN2MLPFeatureExtractor": FPN2MLPFeatureExtractor, -} - - -def make_roi_box_feature_extractor(cfg): - func = _ROI_BOX_FEATURE_EXTRACTORS[cfg.MODEL.ROI_BOX_HEAD.FEATURE_EXTRACTOR] - return func(cfg) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/.dev_scripts/batch_test.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/.dev_scripts/batch_test.py deleted file mode 100644 index c746c7c566e6486dfd6cdf226f2b8e3742a2b0ad..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/.dev_scripts/batch_test.py +++ /dev/null @@ -1,212 +0,0 @@ -""" -some instructions -1. Fill the models that needs to be checked in the modelzoo_dict -2. Arange the structure of the directory as follows, the script will find the - corresponding config itself: - model_dir/model_family/checkpoints - e.g.: models/faster_rcnn/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth - models/faster_rcnn/faster_rcnn_r101_fpn_1x_coco_20200130-047c8118.pth -3. Excute the batch_test.sh -""" - -import argparse -import json -import os -import subprocess - -import mmcv -import torch -from mmcv import Config, get_logger -from mmcv.parallel import MMDataParallel, MMDistributedDataParallel -from mmcv.runner import (get_dist_info, init_dist, load_checkpoint, - wrap_fp16_model) - -from mmdet.apis import multi_gpu_test, single_gpu_test -from mmdet.datasets import (build_dataloader, build_dataset, - replace_ImageToTensor) -from mmdet.models import build_detector - -modelzoo_dict = { - 'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py': { - 'bbox': 0.374 - }, - 'configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py': { - 'bbox': 0.382, - 'segm': 0.347 - }, - 'configs/rpn/rpn_r50_fpn_1x_coco.py': { - 'AR@1000': 0.582 - } -} - - -def parse_args(): - parser = argparse.ArgumentParser( - description='The script used for checking the correctness \ - of batch inference') - parser.add_argument('model_dir', help='directory of models') - parser.add_argument( - 'json_out', help='the output json records test information like mAP') - parser.add_argument( - '--launcher', - choices=['none', 'pytorch', 'slurm', 'mpi'], - default='none', - help='job launcher') - parser.add_argument('--local_rank', type=int, default=0) - args = parser.parse_args() - if 'LOCAL_RANK' not in os.environ: - os.environ['LOCAL_RANK'] = str(args.local_rank) - return args - - -def check_finish(all_model_dict, result_file): - # check if all models are checked - tested_cfgs = [] - with open(result_file, 'r+') as f: - for line in f: - line = json.loads(line) - tested_cfgs.append(line['cfg']) - is_finish = True - for cfg in sorted(all_model_dict.keys()): - if cfg not in tested_cfgs: - return cfg - if is_finish: - with open(result_file, 'a+') as f: - f.write('finished\n') - - -def dump_dict(record_dict, json_out): - # dump result json dict - with open(json_out, 'a+') as f: - mmcv.dump(record_dict, f, file_format='json') - f.write('\n') - - -def main(): - args = parse_args() - # touch the output json if not exist - with open(args.json_out, 'a+'): - pass - # init distributed env first, since logger depends on the dist - # info. - if args.launcher == 'none': - distributed = False - else: - distributed = True - init_dist(args.launcher, backend='nccl') - rank, world_size = get_dist_info() - - logger = get_logger('root') - - # read info of checkpoints and config - result_dict = dict() - for model_family_dir in os.listdir(args.model_dir): - for model in os.listdir( - os.path.join(args.model_dir, model_family_dir)): - # cpt: rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth - # cfg: rpn_r50_fpn_1x_coco.py - cfg = model.split('.')[0][:-18] + '.py' - cfg_path = os.path.join('configs', model_family_dir, cfg) - assert os.path.isfile( - cfg_path), f'{cfg_path} is not valid config path' - cpt_path = os.path.join(args.model_dir, model_family_dir, model) - result_dict[cfg_path] = cpt_path - assert cfg_path in modelzoo_dict, f'please fill the ' \ - f'performance of cfg: {cfg_path}' - cfg = check_finish(result_dict, args.json_out) - cpt = result_dict[cfg] - try: - cfg_name = cfg - logger.info(f'evaluate {cfg}') - record = dict(cfg=cfg, cpt=cpt) - cfg = Config.fromfile(cfg) - # cfg.data.test.ann_file = 'data/val_0_10.json' - # set cudnn_benchmark - if cfg.get('cudnn_benchmark', False): - torch.backends.cudnn.benchmark = True - cfg.model.pretrained = None - if cfg.model.get('neck'): - if isinstance(cfg.model.neck, list): - for neck_cfg in cfg.model.neck: - if neck_cfg.get('rfp_backbone'): - if neck_cfg.rfp_backbone.get('pretrained'): - neck_cfg.rfp_backbone.pretrained = None - elif cfg.model.neck.get('rfp_backbone'): - if cfg.model.neck.rfp_backbone.get('pretrained'): - cfg.model.neck.rfp_backbone.pretrained = None - - # in case the test dataset is concatenated - if isinstance(cfg.data.test, dict): - cfg.data.test.test_mode = True - elif isinstance(cfg.data.test, list): - for ds_cfg in cfg.data.test: - ds_cfg.test_mode = True - - # build the dataloader - samples_per_gpu = 2 # hack test with 2 image per gpu - if samples_per_gpu > 1: - # Replace 'ImageToTensor' to 'DefaultFormatBundle' - cfg.data.test.pipeline = replace_ImageToTensor( - cfg.data.test.pipeline) - dataset = build_dataset(cfg.data.test) - data_loader = build_dataloader( - dataset, - samples_per_gpu=samples_per_gpu, - workers_per_gpu=cfg.data.workers_per_gpu, - dist=distributed, - shuffle=False) - - # build the model and load checkpoint - cfg.model.train_cfg = None - model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg')) - fp16_cfg = cfg.get('fp16', None) - if fp16_cfg is not None: - wrap_fp16_model(model) - - checkpoint = load_checkpoint(model, cpt, map_location='cpu') - # old versions did not save class info in checkpoints, - # this walkaround is for backward compatibility - if 'CLASSES' in checkpoint.get('meta', {}): - model.CLASSES = checkpoint['meta']['CLASSES'] - else: - model.CLASSES = dataset.CLASSES - - if not distributed: - model = MMDataParallel(model, device_ids=[0]) - outputs = single_gpu_test(model, data_loader) - else: - model = MMDistributedDataParallel( - model.cuda(), - device_ids=[torch.cuda.current_device()], - broadcast_buffers=False) - outputs = multi_gpu_test(model, data_loader, 'tmp') - if rank == 0: - ref_mAP_dict = modelzoo_dict[cfg_name] - metrics = list(ref_mAP_dict.keys()) - metrics = [ - m if m != 'AR@1000' else 'proposal_fast' for m in metrics - ] - eval_results = dataset.evaluate(outputs, metrics) - print(eval_results) - for metric in metrics: - if metric == 'proposal_fast': - ref_metric = modelzoo_dict[cfg_name]['AR@1000'] - eval_metric = eval_results['AR@1000'] - else: - ref_metric = modelzoo_dict[cfg_name][metric] - eval_metric = eval_results[f'{metric}_mAP'] - if abs(ref_metric - eval_metric) > 0.003: - record['is_normal'] = False - dump_dict(record, args.json_out) - check_finish(result_dict, args.json_out) - except Exception as e: - logger.error(f'rank: {rank} test fail with error: {e}') - record['terminate'] = True - dump_dict(record, args.json_out) - check_finish(result_dict, args.json_out) - # hack there to throw some error to prevent hang out - subprocess.call('xxx') - - -if __name__ == '__main__': - main() diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/_base_/datasets/coco_detection.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/_base_/datasets/coco_detection.py deleted file mode 100644 index 149f590bb45fa65c29fd4c005e4a237d7dd2e117..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/_base_/datasets/coco_detection.py +++ /dev/null @@ -1,49 +0,0 @@ -# dataset settings -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_train2017.json', - img_prefix=data_root + 'train2017/', - pipeline=train_pipeline), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline)) -evaluation = dict(interval=1, metric='bbox') diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/docs/changelog.md b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/docs/changelog.md deleted file mode 100644 index bf8bf3c1abb91bdf075c9a3d754ec5b31a33fd8c..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/docs/changelog.md +++ /dev/null @@ -1,795 +0,0 @@ -## Changelog - -### v2.11. (01/4/2021) - -**Highlights** - -- Support new method: [Localization Distillation for Object Detection](https://arxiv.org/pdf/2102.12252.pdf) -- Support Pytorch2ONNX with batch inference and dynamic shape - -**New Features** - -- Support localization distillation for object detection (#4758) -- Support Pytorch2ONNX with batch inference and dynamic shape for Faster-RCNN and mainstream one-stage detectors (#4796) -- Add Deformable Conv2d TensorRT plugin (#858) - -**Improvements** - -- Support batch inference in head of RetinaNet (#4699) -- Add batch dimension in second stage of Faster-RCNN (#4785) -- Support batch inference in bbox coder (#4721) -- Add check for `ann_ids` in `COCODataset` to ensure it is unique (#4789) -- support for showing the FPN results (#4716) -- support dynamic shape for grid_anchor (#4684) -- Support automatic statistical evaluation results and export them to EXCEL (#4693) -- Move pycocotools version check to when it is used (#4880) - -**Bug Fixes** - -- Fix a bug of TridentNet when doing the batch inference (#4717) -- Fix a bug of Pytorch2ONNX in FASF (#4735) -- Fix a bug when show the image with float type (#4732) - -### v2.10.0 (01/03/2021) - -#### Highlights - -- Support new methods: [FPG](https://arxiv.org/abs/2004.03580) -- Support ONNX2TensorRT for SSD, FSAF, FCOS, YOLOv3, and Faster R-CNN. - -#### New Features - -- Support ONNX2TensorRT for SSD, FSAF, FCOS, YOLOv3, and Faster R-CNN (#4569) -- Support [Feature Pyramid Grids (FPG)](https://arxiv.org/abs/2004.03580) (#4645) -- Support video demo (#4420) -- Add seed option for sampler (#4665) -- Support to customize type of runner (#4570, #4669) -- Support synchronizing BN buffer in `EvalHook` (#4582) -- Add script for GIF demo (#4573) - -#### Bug Fixes - -- Fix ConfigDict AttributeError and add Colab link (#4643) -- Avoid crash in empty gt training of GFL head (#4631) -- Fix `iou_thrs` bug in RPN evaluation (#4581) -- Fix syntax error of config when upgrading model version (#4584) - -#### Improvements - -- Refactor unit test file structures (#4600) -- Refactor nms config (#4636) -- Get loading pipeline by checking the class directly rather than through config strings (#4619) -- Add doctests for mask target generation and mask structures (#4614) -- Use deep copy when copying pipeline arguments (#4621) -- Update documentations (#4642, #4650, #4620, #4630) -- Remove redundant code calling `import_modules_from_strings` (#4601) -- Clean deprecated FP16 API (#4571) -- Check whether `CLASSES` is correctly initialized in the intialization of `XMLDataset` (#4555) -- Support batch inference in the inference API (#4462, #4526) -- Clean deprecated warning and fix 'meta' error (#4695) - -### v2.9.0 (01/02/2021) - -#### Highlights - -- Support new methods: [SCNet](https://arxiv.org/abs/2012.10150), [Sparse R-CNN](https://arxiv.org/abs/2011.12450) -- Move `train_cfg` and `test_cfg` into model in configs -- Support to visualize results based on prediction quality - -#### New Features - -- Support [SCNet](https://arxiv.org/abs/2012.10150) (#4356) -- Support [Sparse R-CNN](https://arxiv.org/abs/2011.12450) (#4219) -- Support evaluate mAP by multiple IoUs (#4398) -- Support concatenate dataset for testing (#4452) -- Support to visualize results based on prediction quality (#4441) -- Add ONNX simplify option to Pytorch2ONNX script (#4468) -- Add hook for checking compatibility of class numbers in heads and datasets (#4508) - -#### Bug Fixes - -- Fix CPU inference bug of Cascade RPN (#4410) -- Fix NMS error of CornerNet when there is no prediction box (#4409) -- Fix TypeError in CornerNet inference (#4411) -- Fix bug of PAA when training with background images (#4391) -- Fix the error that the window data is not destroyed when `out_file is not None` and `show==False` (#4442) -- Fix order of NMS `score_factor` that will decrease the performance of YOLOv3 (#4473) -- Fix bug in HTC TTA when the number of detection boxes is 0 (#4516) -- Fix resize error in mask data structures (#4520) - -#### Improvements - -- Allow to customize classes in LVIS dataset (#4382) -- Add tutorials for building new models with existing datasets (#4396) -- Add CPU compatibility information in documentation (#4405) -- Add documentation of deprecated `ImageToTensor` for batch inference (#4408) -- Add more details in documentation for customizing dataset (#4430) -- Switch `imshow_det_bboxes` visualization backend from OpenCV to Matplotlib (#4389) -- Deprecate `ImageToTensor` in `image_demo.py` (#4400) -- Move train_cfg/test_cfg into model (#4347, #4489) -- Update docstring for `reg_decoded_bbox` option in bbox heads (#4467) -- Update dataset information in documentation (#4525) -- Release pre-trained R50 and R101 PAA detectors with multi-scale 3x training schedules (#4495) -- Add guidance for speed benchmark (#4537) - -### v2.8.0 (04/01/2021) - -#### Highlights - -- Support new methods: [Cascade RPN](https://arxiv.org/abs/1909.06720), [TridentNet](https://arxiv.org/abs/1901.01892) - -#### New Features - -- Support [Cascade RPN](https://arxiv.org/abs/1909.06720) (#1900) -- Support [TridentNet](https://arxiv.org/abs/1901.01892) (#3313) - -#### Bug Fixes - -- Fix bug of show result in async_benchmark (#4367) -- Fix scale factor in MaskTestMixin (#4366) -- Fix but when returning indices in `multiclass_nms` (#4362) -- Fix bug of empirical attention in resnext backbone error (#4300) -- Fix bug of `img_norm_cfg` in FCOS-HRNet models with updated performance and models (#4250) -- Fix invalid checkpoint and log in Mask R-CNN models on Cityscapes dataset (#4287) -- Fix bug in distributed sampler when dataset is too small (#4257) -- Fix bug of 'PAFPN has no attribute extra_convs_on_inputs' (#4235) - -#### Improvements - -- Update model url from aws to aliyun (#4349) -- Update ATSS for PyTorch 1.6+ (#4359) -- Update script to install ruby in pre-commit installation (#4360) -- Delete deprecated `mmdet.ops` (#4325) -- Refactor hungarian assigner for more general usage in Sparse R-CNN (#4259) -- Handle scipy import in DETR to reduce package dependencies (#4339) -- Update documentation of usages for config options after MMCV (1.2.3) supports overriding list in config (#4326) -- Update pre-train models of faster rcnn trained on COCO subsets (#4307) -- Avoid zero or too small value for beta in Dynamic R-CNN (#4303) -- Add doccumentation for Pytorch2ONNX (#4271) -- Add deprecated warning FPN arguments (#4264) -- Support returning indices of kept bboxes when using nms (#4251) -- Update type and device requirements when creating tensors `GFLHead` (#4210) -- Update device requirements when creating tensors in `CrossEntropyLoss` (#4224) - -### v2.7.0 (30/11/2020) - -- Support new method: [DETR](https://arxiv.org/abs/2005.12872), [ResNest](https://arxiv.org/abs/2004.08955), Faster R-CNN DC5. -- Support YOLO, Mask R-CNN, and Cascade R-CNN models exportable to ONNX. - -#### New Features - -- Support [DETR](https://arxiv.org/abs/2005.12872) (#4201, #4206) -- Support to link the best checkpoint in training (#3773) -- Support to override config through options in inference.py (#4175) -- Support YOLO, Mask R-CNN, and Cascade R-CNN models exportable to ONNX (#4087, #4083) -- Support [ResNeSt](https://arxiv.org/abs/2004.08955) backbone (#2959) -- Support unclip border bbox regression (#4076) -- Add tpfp func in evaluating AP (#4069) -- Support mixed precision training of SSD detector with other backbones (#4081) -- Add Faster R-CNN DC5 models (#4043) - -#### Bug Fixes - -- Fix bug of `gpu_id` in distributed training mode (#4163) -- Support Albumentations with version higher than 0.5 (#4032) -- Fix num_classes bug in faster rcnn config (#4088) -- Update code in docs/2_new_data_model.md (#4041) - -#### Improvements - -- Ensure DCN offset to have similar type as features in VFNet (#4198) -- Add config links in README files of models (#4190) -- Add tutorials for loss conventions (#3818) -- Add solution to installation issues in 30-series GPUs (#4176) -- Update docker version in get_started.md (#4145) -- Add model statistics and polish some titles in configs README (#4140) -- Clamp neg probability in FreeAnchor (#4082) -- Speed up expanding large images (#4089) -- Fix Pytorch 1.7 incompatibility issues (#4103) -- Update trouble shooting page to resolve segmentation fault (#4055) -- Update aLRP-Loss in project page (#4078) -- Clean duplicated `reduce_mean` function (#4056) -- Refactor Q&A (#4045) - -### v2.6.0 (1/11/2020) - -- Support new method: [VarifocalNet](https://arxiv.org/abs/2008.13367). -- Refactored documentation with more tutorials. - -#### New Features - -- Support GIoU calculation in `BboxOverlaps2D`, and re-implement `giou_loss` using `bbox_overlaps` (#3936) -- Support random sampling in CPU mode (#3948) -- Support VarifocalNet (#3666, #4024) - -#### Bug Fixes - -- Fix SABL validating bug in Cascade R-CNN (#3913) -- Avoid division by zero in PAA head when num_pos=0 (#3938) -- Fix temporary directory bug of multi-node testing error (#4034, #4017) -- Fix `--show-dir` option in test script (#4025) -- Fix GA-RetinaNet r50 model url (#3983) -- Update code in docs and fix broken urls (#3947) - -#### Improvements - -- Refactor pytorch2onnx API into `mmdet.core.export` and use `generate_inputs_and_wrap_model` for pytorch2onnx (#3857, #3912) -- Update RPN upgrade scripts for v2.5.0 compatibility (#3986) -- Use mmcv `tensor2imgs` (#4010) -- Update test robustness (#4000) -- Update trouble shooting page (#3994) -- Accelerate PAA training speed (#3985) -- Support batch_size > 1 in validation (#3966) -- Use RoIAlign implemented in MMCV for inference in CPU mode (#3930) -- Documentation refactoring (#4031) - -### v2.5.0 (5/10/2020) - -#### Highlights - -- Support new methods: [YOLACT](https://arxiv.org/abs/1904.02689), [CentripetalNet](https://arxiv.org/abs/2003.09119). -- Add more documentations for easier and more clear usage. - -#### Backwards Incompatible Changes - -**FP16 related methods are imported from mmcv instead of mmdet. (#3766, #3822)** -Mixed precision training utils in `mmdet.core.fp16` are moved to `mmcv.runner`, including `force_fp32`, `auto_fp16`, `wrap_fp16_model`, and `Fp16OptimizerHook`. A deprecation warning will be raised if users attempt to import those methods from `mmdet.core.fp16`, and will be finally removed in V2.10.0. - -**[0, N-1] represents foreground classes and N indicates background classes for all models. (#3221)** -Before v2.5.0, the background label for RPN is 0, and N for other heads. Now the behavior is consistent for all models. Thus `self.background_labels` in `dense_heads` is removed and all heads use `self.num_classes` to indicate the class index of background labels. -This change has no effect on the pre-trained models in the v2.x model zoo, but will affect the training of all models with RPN heads. Two-stage detectors whose RPN head uses softmax will be affected because the order of categories is changed. - -**Only call `get_subset_by_classes` when `test_mode=True` and `self.filter_empty_gt=True` (#3695)** -Function `get_subset_by_classes` in dataset is refactored and only filters out images when `test_mode=True` and `self.filter_empty_gt=True`. - In the original implementation, `get_subset_by_classes` is not related to the flag `self.filter_empty_gt` and will only be called when the classes is set during initialization no matter `test_mode` is `True` or `False`. This brings ambiguous behavior and potential bugs in many cases. After v2.5.0, if `filter_empty_gt=False`, no matter whether the classes are specified in a dataset, the dataset will use all the images in the annotations. If `filter_empty_gt=True` and `test_mode=True`, no matter whether the classes are specified, the dataset will call ``get_subset_by_classes` to check the images and filter out images containing no GT boxes. Therefore, the users should be responsible for the data filtering/cleaning process for the test dataset. - -#### New Features - -- Test time augmentation for single stage detectors (#3844, #3638) -- Support to show the name of experiments during training (#3764) -- Add `Shear`, `Rotate`, `Translate` Augmentation (#3656, #3619, #3687) -- Add image-only transformations including `Constrast`, `Equalize`, `Color`, and `Brightness`. (#3643) -- Support [YOLACT](https://arxiv.org/abs/1904.02689) (#3456) -- Support [CentripetalNet](https://arxiv.org/abs/2003.09119) (#3390) -- Support PyTorch 1.6 in docker (#3905) - -#### Bug Fixes - -- Fix the bug of training ATSS when there is no ground truth boxes (#3702) -- Fix the bug of using Focal Loss when there is `num_pos` is 0 (#3702) -- Fix the label index mapping in dataset browser (#3708) -- Fix Mask R-CNN training stuck problem when ther is no positive rois (#3713) -- Fix the bug of `self.rpn_head.test_cfg` in `RPNTestMixin` by using `self.rpn_head` in rpn head (#3808) -- Fix deprecated `Conv2d` from mmcv.ops (#3791) -- Fix device bug in RepPoints (#3836) -- Fix SABL validating bug (#3849) -- Use `https://download.openmmlab.com/mmcv/dist/index.html` for installing MMCV (#3840) -- Fix nonzero in NMS for PyTorch 1.6.0 (#3867) -- Fix the API change bug of PAA (#3883) -- Fix typo in bbox_flip (#3886) -- Fix cv2 import error of ligGL.so.1 in Dockerfile (#3891) - -#### Improvements - -- Change to use `mmcv.utils.collect_env` for collecting environment information to avoid duplicate codes (#3779) -- Update checkpoint file names to v2.0 models in documentation (#3795) -- Update tutorials for changing runtime settings (#3778), modifing loss (#3777) -- Improve the function of `simple_test_bboxes` in SABL (#3853) -- Convert mask to bool before using it as img's index for robustness and speedup (#3870) -- Improve documentation of modules and dataset customization (#3821) - -### v2.4.0 (5/9/2020) - -**Highlights** - -- Fix lots of issues/bugs and reorganize the trouble shooting page -- Support new methods [SABL](https://arxiv.org/abs/1912.04260), [YOLOv3](https://arxiv.org/abs/1804.02767), and [PAA Assign](https://arxiv.org/abs/2007.08103) -- Support Batch Inference -- Start to publish `mmdet` package to PyPI since v2.3.0 -- Switch model zoo to download.openmmlab.com - -**Backwards Incompatible Changes** - -- Support Batch Inference (#3564, #3686, #3705): Since v2.4.0, MMDetection could inference model with multiple images in a single GPU. - This change influences all the test APIs in MMDetection and downstream codebases. To help the users migrate their code, we use `replace_ImageToTensor` (#3686) to convert legacy test data pipelines during dataset initialization. -- Support RandomFlip with horizontal/vertical/diagonal direction (#3608): Since v2.4.0, MMDetection supports horizontal/vertical/diagonal flip in the data augmentation. This influences bounding box, mask, and image transformations in data augmentation process and the process that will map those data back to the original format. -- Migrate to use `mmlvis` and `mmpycocotools` for COCO and LVIS dataset (#3727). The APIs are fully compatible with the original `lvis` and `pycocotools`. Users need to uninstall the existing pycocotools and lvis packages in their environment first and install `mmlvis` & `mmpycocotools`. - -**Bug Fixes** - -- Fix default mean/std for onnx (#3491) -- Fix coco evaluation and add metric items (#3497) -- Fix typo for install.md (#3516) -- Fix atss when sampler per gpu is 1 (#3528) -- Fix import of fuse_conv_bn (#3529) -- Fix bug of gaussian_target, update unittest of heatmap (#3543) -- Fixed VOC2012 evaluate (#3553) -- Fix scale factor bug of rescale (#3566) -- Fix with_xxx_attributes in base detector (#3567) -- Fix boxes scaling when number is 0 (#3575) -- Fix rfp check when neck config is a list (#3591) -- Fix import of fuse conv bn in benchmark.py (#3606) -- Fix webcam demo (#3634) -- Fix typo and itemize issues in tutorial (#3658) -- Fix error in distributed training when some levels of FPN are not assigned with bounding boxes (#3670) -- Fix the width and height orders of stride in valid flag generation (#3685) -- Fix weight initialization bug in Res2Net DCN (#3714) -- Fix bug in OHEMSampler (#3677) - -**New Features** - -- Support Cutout augmentation (#3521) -- Support evaluation on multiple datasets through ConcatDataset (#3522) -- Support [PAA assign](https://arxiv.org/abs/2007.08103) #(3547) -- Support eval metric with pickle results (#3607) -- Support [YOLOv3](https://arxiv.org/abs/1804.02767) (#3083) -- Support [SABL](https://arxiv.org/abs/1912.04260) (#3603) -- Support to publish to Pypi in github-action (#3510) -- Support custom imports (#3641) - -**Improvements** - -- Refactor common issues in documentation (#3530) -- Add pytorch 1.6 to CI config (#3532) -- Add config to runner meta (#3534) -- Add eval-option flag for testing (#3537) -- Add init_eval to evaluation hook (#3550) -- Add include_bkg in ClassBalancedDataset (#3577) -- Using config's loading in inference_detector (#3611) -- Add ATSS ResNet-101 models in model zoo (#3639) -- Update urls to download.openmmlab.com (#3665) -- Support non-mask training for CocoDataset (#3711) - -### v2.3.0 (5/8/2020) - -**Highlights** - -- The CUDA/C++ operators have been moved to `mmcv.ops`. For backward compatibility `mmdet.ops` is kept as warppers of `mmcv.ops`. -- Support new methods [CornerNet](https://arxiv.org/abs/1808.01244), [DIOU](https://arxiv.org/abs/1911.08287)/[CIOU](https://arxiv.org/abs/2005.03572) loss, and new dataset: [LVIS V1](https://arxiv.org/abs/1908.03195) -- Provide more detailed colab training tutorials and more complete documentation. -- Support to convert RetinaNet from Pytorch to ONNX. - -**Bug Fixes** - -- Fix the model initialization bug of DetectoRS (#3187) -- Fix the bug of module names in NASFCOSHead (#3205) -- Fix the filename bug in publish_model.py (#3237) -- Fix the dimensionality bug when `inside_flags.any()` is `False` in dense heads (#3242) -- Fix the bug of forgetting to pass flip directions in `MultiScaleFlipAug` (#3262) -- Fixed the bug caused by default value of `stem_channels` (#3333) -- Fix the bug of model checkpoint loading for CPU inference (#3318, #3316) -- Fix topk bug when box number is smaller than the expected topk number in ATSSAssigner (#3361) -- Fix the gt priority bug in center_region_assigner.py (#3208) -- Fix NaN issue of iou calculation in iou_loss.py (#3394) -- Fix the bug that `iou_thrs` is not actually used during evaluation in coco.py (#3407) -- Fix test-time augmentation of RepPoints (#3435) -- Fix runtimeError caused by incontiguous tensor in Res2Net+DCN (#3412) - -**New Features** - -- Support [CornerNet](https://arxiv.org/abs/1808.01244) (#3036) -- Support [DIOU](https://arxiv.org/abs/1911.08287)/[CIOU](https://arxiv.org/abs/2005.03572) loss (#3151) -- Support [LVIS V1](https://arxiv.org/abs/1908.03195) dataset (#) -- Support customized hooks in training (#3395) -- Support fp16 training of generalized focal loss (#3410) -- Support to convert RetinaNet from Pytorch to ONNX (#3075) - -**Improvements** - -- Support to process ignore boxes in ATSS assigner (#3082) -- Allow to crop images without ground truth in `RandomCrop` (#3153) -- Enable the the `Accuracy` module to set threshold (#3155) -- Refactoring unit tests (#3206) -- Unify the training settings of `to_float32` and `norm_cfg` in RegNets configs (#3210) -- Add colab training tutorials for beginners (#3213, #3273) -- Move CUDA/C++ operators into `mmcv.ops` and keep `mmdet.ops` as warppers for backward compatibility (#3232)(#3457) -- Update installation scripts in documentation (#3290) and dockerfile (#3320) -- Support to set image resize backend (#3392) -- Remove git hash in version file (#3466) -- Check mmcv version to force version compatibility (#3460) - -### v2.2.0 (1/7/2020) - -**Highlights** - -- Support new methods: [DetectoRS](https://arxiv.org/abs/2006.02334), [PointRend](https://arxiv.org/abs/1912.08193), [Generalized Focal Loss](https://arxiv.org/abs/2006.04388), [Dynamic R-CNN](https://arxiv.org/abs/2004.06002) - -**Bug Fixes** - -- Fix FreeAnchor when no gt in image (#3176) -- Clean up deprecated usage of `register_module()` (#3092, #3161) -- Fix pretrain bug in NAS FCOS (#3145) -- Fix `num_classes` in SSD (#3142) -- Fix FCOS warmup (#3119) -- Fix `rstrip` in `tools/publish_model.py` -- Fix `flip_ratio` default value in RandomFLip pipeline (#3106) -- Fix cityscapes eval with ms_rcnn (#3112) -- Fix RPN softmax (#3056) -- Fix filename of LVIS@v0.5 (#2998) -- Fix nan loss by filtering out-of-frame gt_bboxes in COCO (#2999) -- Fix bug in FSAF (#3018) -- Add FocalLoss `num_classes` check (#2964) -- Fix PISA Loss when there are no gts (#2992) -- Avoid nan in `iou_calculator` (#2975) -- Prevent possible bugs in loading and transforms caused by shallow copy (#2967) - -**New Features** - -- Add DetectoRS (#3064) -- Support Generalize Focal Loss (#3097) -- Support PointRend (#2752) -- Support Dynamic R-CNN (#3040) -- Add DeepFashion dataset (#2968) -- Implement FCOS training tricks (#2935) -- Use BaseDenseHead as base class for anchor-base heads (#2963) -- Add `with_cp` for BasicBlock (#2891) -- Add `stem_channels` argument for ResNet (#2954) - -**Improvements** - -- Add anchor free base head (#2867) -- Migrate to github action (#3137) -- Add docstring for datasets, pipelines, core modules and methods (#3130, #3125, #3120) -- Add VOC benchmark (#3060) -- Add `concat` mode in GRoI (#3098) -- Remove cmd arg `autorescale-lr` (#3080) -- Use `len(data['img_metas'])` to indicate `num_samples` (#3073, #3053) -- Switch to EpochBasedRunner (#2976) - -### v2.1.0 (8/6/2020) - -**Highlights** - -- Support new backbones: [RegNetX](https://arxiv.org/abs/2003.13678), [Res2Net](https://arxiv.org/abs/1904.01169) -- Support new methods: [NASFCOS](https://arxiv.org/abs/1906.04423), [PISA](https://arxiv.org/abs/1904.04821), [GRoIE](https://arxiv.org/abs/2004.13665) -- Support new dataset: [LVIS](https://arxiv.org/abs/1908.03195) - -**Bug Fixes** - -- Change the CLI argument `--validate` to `--no-validate` to enable validation after training epochs by default. (#2651) -- Add missing cython to docker file (#2713) -- Fix bug in nms cpu implementation (#2754) -- Fix bug when showing mask results (#2763) -- Fix gcc requirement (#2806) -- Fix bug in async test (#2820) -- Fix mask encoding-decoding bugs in test API (#2824) -- Fix bug in test time augmentation (#2858, #2921, #2944) -- Fix a typo in comment of apis/train (#2877) -- Fix the bug of returning None when no gt bboxes are in the original image in `RandomCrop`. Fix the bug that misses to handle `gt_bboxes_ignore`, `gt_label_ignore`, and `gt_masks_ignore` in `RandomCrop`, `MinIoURandomCrop` and `Expand` modules. (#2810) -- Fix bug of `base_channels` of regnet (#2917) -- Fix the bug of logger when loading pre-trained weights in base detector (#2936) - -**New Features** - -- Add IoU models (#2666) -- Add colab demo for inference -- Support class agnostic nms (#2553) -- Add benchmark gathering scripts for development only (#2676) -- Add mmdet-based project links (#2736, #2767, #2895) -- Add config dump in training (#2779) -- Add ClassBalancedDataset (#2721) -- Add res2net backbone (#2237) -- Support RegNetX models (#2710) -- Use `mmcv.FileClient` to support different storage backends (#2712) -- Add ClassBalancedDataset (#2721) -- Code Release: Prime Sample Attention in Object Detection (CVPR 2020) (#2626) -- Implement NASFCOS (#2682) -- Add class weight in CrossEntropyLoss (#2797) -- Support LVIS dataset (#2088) -- Support GRoIE (#2584) - -**Improvements** - -- Allow different x and y strides in anchor heads. (#2629) -- Make FSAF loss more robust to no gt (#2680) -- Compute pure inference time instead (#2657) and update inference speed (#2730) -- Avoided the possibility that a patch with 0 area is cropped. (#2704) -- Add warnings when deprecated `imgs_per_gpu` is used. (#2700) -- Add a mask rcnn example for config (#2645) -- Update model zoo (#2762, #2866, #2876, #2879, #2831) -- Add `ori_filename` to img_metas and use it in test show-dir (#2612) -- Use `img_fields` to handle multiple images during image transform (#2800) -- Add upsample_cfg support in FPN (#2787) -- Add `['img']` as default `img_fields` for back compatibility (#2809) -- Rename the pretrained model from `open-mmlab://resnet50_caffe` and `open-mmlab://resnet50_caffe_bgr` to `open-mmlab://detectron/resnet50_caffe` and `open-mmlab://detectron2/resnet50_caffe`. (#2832) -- Added sleep(2) in test.py to reduce hanging problem (#2847) -- Support `c10::half` in CARAFE (#2890) -- Improve documentations (#2918, #2714) -- Use optimizer constructor in mmcv and clean the original implementation in `mmdet.core.optimizer` (#2947) - -### v2.0.0 (6/5/2020) - -In this release, we made lots of major refactoring and modifications. - -1. **Faster speed**. We optimize the training and inference speed for common models, achieving up to 30% speedup for training and 25% for inference. Please refer to [model zoo](model_zoo.md#comparison-with-detectron2) for details. - -2. **Higher performance**. We change some default hyperparameters with no additional cost, which leads to a gain of performance for most models. Please refer to [compatibility](compatibility.md#training-hyperparameters) for details. - -3. **More documentation and tutorials**. We add a bunch of documentation and tutorials to help users get started more smoothly. Read it [here](https://mmdetection.readthedocs.io/en/latest/). - -4. **Support PyTorch 1.5**. The support for 1.1 and 1.2 is dropped, and we switch to some new APIs. - -5. **Better configuration system**. Inheritance is supported to reduce the redundancy of configs. - -6. **Better modular design**. Towards the goal of simplicity and flexibility, we simplify some encapsulation while add more other configurable modules like BBoxCoder, IoUCalculator, OptimizerConstructor, RoIHead. Target computation is also included in heads and the call hierarchy is simpler. - -7. Support new methods: [FSAF](https://arxiv.org/abs/1903.00621) and PAFPN (part of [PAFPN](https://arxiv.org/abs/1803.01534)). - -**Breaking Changes** -Models training with MMDetection 1.x are not fully compatible with 2.0, please refer to the [compatibility doc](compatibility.md) for the details and how to migrate to the new version. - -**Improvements** - -- Unify cuda and cpp API for custom ops. (#2277) -- New config files with inheritance. (#2216) -- Encapsulate the second stage into RoI heads. (#1999) -- Refactor GCNet/EmpericalAttention into plugins. (#2345) -- Set low quality match as an option in IoU-based bbox assigners. (#2375) -- Change the codebase's coordinate system. (#2380) -- Refactor the category order in heads. 0 means the first positive class instead of background now. (#2374) -- Add bbox sampler and assigner registry. (#2419) -- Speed up the inference of RPN. (#2420) -- Add `train_cfg` and `test_cfg` as class members in all anchor heads. (#2422) -- Merge target computation methods into heads. (#2429) -- Add bbox coder to support different bbox encoding and losses. (#2480) -- Unify the API for regression loss. (#2156) -- Refactor Anchor Generator. (#2474) -- Make `lr` an optional argument for optimizers. (#2509) -- Migrate to modules and methods in MMCV. (#2502, #2511, #2569, #2572) -- Support PyTorch 1.5. (#2524) -- Drop the support for Python 3.5 and use F-string in the codebase. (#2531) - -**Bug Fixes** - -- Fix the scale factors for resized images without keep the aspect ratio. (#2039) -- Check if max_num > 0 before slicing in NMS. (#2486) -- Fix Deformable RoIPool when there is no instance. (#2490) -- Fix the default value of assigned labels. (#2536) -- Fix the evaluation of Cityscapes. (#2578) - -**New Features** - -- Add deep_stem and avg_down option to ResNet, i.e., support ResNetV1d. (#2252) -- Add L1 loss. (#2376) -- Support both polygon and bitmap for instance masks. (#2353, #2540) -- Support CPU mode for inference. (#2385) -- Add optimizer constructor for complicated configuration of optimizers. (#2397, #2488) -- Implement PAFPN. (#2392) -- Support empty tensor input for some modules. (#2280) -- Support for custom dataset classes without overriding it. (#2408, #2443) -- Support to train subsets of coco dataset. (#2340) -- Add iou_calculator to potentially support more IoU calculation methods. (2405) -- Support class wise mean AP (was removed in the last version). (#2459) -- Add option to save the testing result images. (#2414) -- Support MomentumUpdaterHook. (#2571) -- Add a demo to inference a single image. (#2605) - -### v1.1.0 (24/2/2020) - -**Highlights** - -- Dataset evaluation is rewritten with a unified api, which is used by both evaluation hooks and test scripts. -- Support new methods: [CARAFE](https://arxiv.org/abs/1905.02188). - -**Breaking Changes** - -- The new MMDDP inherits from the official DDP, thus the `__init__` api is changed to be the same as official DDP. -- The `mask_head` field in HTC config files is modified. -- The evaluation and testing script is updated. -- In all transforms, instance masks are stored as a numpy array shaped (n, h, w) instead of a list of (h, w) arrays, where n is the number of instances. - -**Bug Fixes** - -- Fix IOU assigners when ignore_iof_thr > 0 and there is no pred boxes. (#2135) -- Fix mAP evaluation when there are no ignored boxes. (#2116) -- Fix the empty RoI input for Deformable RoI Pooling. (#2099) -- Fix the dataset settings for multiple workflows. (#2103) -- Fix the warning related to `torch.uint8` in PyTorch 1.4. (#2105) -- Fix the inference demo on devices other than gpu:0. (#2098) -- Fix Dockerfile. (#2097) -- Fix the bug that `pad_val` is unused in Pad transform. (#2093) -- Fix the albumentation transform when there is no ground truth bbox. (#2032) - -**Improvements** - -- Use torch instead of numpy for random sampling. (#2094) -- Migrate to the new MMDDP implementation in MMCV v0.3. (#2090) -- Add meta information in logs. (#2086) -- Rewrite Soft NMS with pytorch extension and remove cython as a dependency. (#2056) -- Rewrite dataset evaluation. (#2042, #2087, #2114, #2128) -- Use numpy array for masks in transforms. (#2030) - -**New Features** - -- Implement "CARAFE: Content-Aware ReAssembly of FEatures". (#1583) -- Add `worker_init_fn()` in data_loader when seed is set. (#2066, #2111) -- Add logging utils. (#2035) - -### v1.0.0 (30/1/2020) - -This release mainly improves the code quality and add more docstrings. - -**Highlights** - -- Documentation is online now: https://mmdetection.readthedocs.io. -- Support new models: [ATSS](https://arxiv.org/abs/1912.02424). -- DCN is now available with the api `build_conv_layer` and `ConvModule` like the normal conv layer. -- A tool to collect environment information is available for trouble shooting. - -**Bug Fixes** - -- Fix the incompatibility of the latest numpy and pycocotools. (#2024) -- Fix the case when distributed package is unavailable, e.g., on Windows. (#1985) -- Fix the dimension issue for `refine_bboxes()`. (#1962) -- Fix the typo when `seg_prefix` is a list. (#1906) -- Add segmentation map cropping to RandomCrop. (#1880) -- Fix the return value of `ga_shape_target_single()`. (#1853) -- Fix the loaded shape of empty proposals. (#1819) -- Fix the mask data type when using albumentation. (#1818) - -**Improvements** - -- Enhance AssignResult and SamplingResult. (#1995) -- Add ability to overwrite existing module in Registry. (#1982) -- Reorganize requirements and make albumentations and imagecorruptions optional. (#1969) -- Check NaN in `SSDHead`. (#1935) -- Encapsulate the DCN in ResNe(X)t into a ConvModule & Conv_layers. (#1894) -- Refactoring for mAP evaluation and support multiprocessing and logging. (#1889) -- Init the root logger before constructing Runner to log more information. (#1865) -- Split `SegResizeFlipPadRescale` into different existing transforms. (#1852) -- Move `init_dist()` to MMCV. (#1851) -- Documentation and docstring improvements. (#1971, #1938, #1869, #1838) -- Fix the color of the same class for mask visualization. (#1834) -- Remove the option `keep_all_stages` in HTC and Cascade R-CNN. (#1806) - -**New Features** - -- Add two test-time options `crop_mask` and `rle_mask_encode` for mask heads. (#2013) -- Support loading grayscale images as single channel. (#1975) -- Implement "Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection". (#1872) -- Add sphinx generated docs. (#1859, #1864) -- Add GN support for flops computation. (#1850) -- Collect env info for trouble shooting. (#1812) - -### v1.0rc1 (13/12/2019) - -The RC1 release mainly focuses on improving the user experience, and fixing bugs. - -**Highlights** - -- Support new models: [FoveaBox](https://arxiv.org/abs/1904.03797), [RepPoints](https://arxiv.org/abs/1904.11490) and [FreeAnchor](https://arxiv.org/abs/1909.02466). -- Add a Dockerfile. -- Add a jupyter notebook demo and a webcam demo. -- Setup the code style and CI. -- Add lots of docstrings and unit tests. -- Fix lots of bugs. - -**Breaking Changes** - -- There was a bug for computing COCO-style mAP w.r.t different scales (AP_s, AP_m, AP_l), introduced by #621. (#1679) - -**Bug Fixes** - -- Fix a sampling interval bug in Libra R-CNN. (#1800) -- Fix the learning rate in SSD300 WIDER FACE. (#1781) -- Fix the scaling issue when `keep_ratio=False`. (#1730) -- Fix typos. (#1721, #1492, #1242, #1108, #1107) -- Fix the shuffle argument in `build_dataloader`. (#1693) -- Clip the proposal when computing mask targets. (#1688) -- Fix the "index out of range" bug for samplers in some corner cases. (#1610, #1404) -- Fix the NMS issue on devices other than GPU:0. (#1603) -- Fix SSD Head and GHM Loss on CPU. (#1578) -- Fix the OOM error when there are too many gt bboxes. (#1575) -- Fix the wrong keyword argument `nms_cfg` in HTC. (#1573) -- Process masks and semantic segmentation in Expand and MinIoUCrop transforms. (#1550, #1361) -- Fix a scale bug in the Non Local op. (#1528) -- Fix a bug in transforms when `gt_bboxes_ignore` is None. (#1498) -- Fix a bug when `img_prefix` is None. (#1497) -- Pass the device argument to `grid_anchors` and `valid_flags`. (#1478) -- Fix the data pipeline for test_robustness. (#1476) -- Fix the argument type of deformable pooling. (#1390) -- Fix the coco_eval when there are only two classes. (#1376) -- Fix a bug in Modulated DeformableConv when deformable_group>1. (#1359) -- Fix the mask cropping in RandomCrop. (#1333) -- Fix zero outputs in DeformConv when not running on cuda:0. (#1326) -- Fix the type issue in Expand. (#1288) -- Fix the inference API. (#1255) -- Fix the inplace operation in Expand. (#1249) -- Fix the from-scratch training config. (#1196) -- Fix inplace add in RoIExtractor which cause an error in PyTorch 1.2. (#1160) -- Fix FCOS when input images has no positive sample. (#1136) -- Fix recursive imports. (#1099) - -**Improvements** - -- Print the config file and mmdet version in the log. (#1721) -- Lint the code before compiling in travis CI. (#1715) -- Add a probability argument for the `Expand` transform. (#1651) -- Update the PyTorch and CUDA version in the docker file. (#1615) -- Raise a warning when specifying `--validate` in non-distributed training. (#1624, #1651) -- Beautify the mAP printing. (#1614) -- Add pre-commit hook. (#1536) -- Add the argument `in_channels` to backbones. (#1475) -- Add lots of docstrings and unit tests, thanks to [@Erotemic](https://github.com/Erotemic). (#1603, #1517, #1506, #1505, #1491, #1479, #1477, #1475, #1474) -- Add support for multi-node distributed test when there is no shared storage. (#1399) -- Optimize Dockerfile to reduce the image size. (#1306) -- Update new results of HRNet. (#1284, #1182) -- Add an argument `no_norm_on_lateral` in FPN. (#1240) -- Test the compiling in CI. (#1235) -- Move docs to a separate folder. (#1233) -- Add a jupyter notebook demo. (#1158) -- Support different type of dataset for training. (#1133) -- Use int64_t instead of long in cuda kernels. (#1131) -- Support unsquare RoIs for bbox and mask heads. (#1128) -- Manually add type promotion to make compatible to PyTorch 1.2. (#1114) -- Allowing validation dataset for computing validation loss. (#1093) -- Use `.scalar_type()` instead of `.type()` to suppress some warnings. (#1070) - -**New Features** - -- Add an option `--with_ap` to compute the AP for each class. (#1549) -- Implement "FreeAnchor: Learning to Match Anchors for Visual Object Detection". (#1391) -- Support [Albumentations](https://github.com/albumentations-team/albumentations) for augmentations in the data pipeline. (#1354) -- Implement "FoveaBox: Beyond Anchor-based Object Detector". (#1339) -- Support horizontal and vertical flipping. (#1273, #1115) -- Implement "RepPoints: Point Set Representation for Object Detection". (#1265) -- Add test-time augmentation to HTC and Cascade R-CNN. (#1251) -- Add a COCO result analysis tool. (#1228) -- Add Dockerfile. (#1168) -- Add a webcam demo. (#1155, #1150) -- Add FLOPs counter. (#1127) -- Allow arbitrary layer order for ConvModule. (#1078) - -### v1.0rc0 (27/07/2019) - -- Implement lots of new methods and components (Mixed Precision Training, HTC, Libra R-CNN, Guided Anchoring, Empirical Attention, Mask Scoring R-CNN, Grid R-CNN (Plus), GHM, GCNet, FCOS, HRNet, Weight Standardization, etc.). Thank all collaborators! -- Support two additional datasets: WIDER FACE and Cityscapes. -- Refactoring for loss APIs and make it more flexible to adopt different losses and related hyper-parameters. -- Speed up multi-gpu testing. -- Integrate all compiling and installing in a single script. - -### v0.6.0 (14/04/2019) - -- Up to 30% speedup compared to the model zoo. -- Support both PyTorch stable and nightly version. -- Replace NMS and SigmoidFocalLoss with Pytorch CUDA extensions. - -### v0.6rc0(06/02/2019) - -- Migrate to PyTorch 1.0. - -### v0.5.7 (06/02/2019) - -- Add support for Deformable ConvNet v2. (Many thanks to the authors and [@chengdazhi](https://github.com/chengdazhi)) -- This is the last release based on PyTorch 0.4.1. - -### v0.5.6 (17/01/2019) - -- Add support for Group Normalization. -- Unify RPNHead and single stage heads (RetinaHead, SSDHead) with AnchorHead. - -### v0.5.5 (22/12/2018) - -- Add SSD for COCO and PASCAL VOC. -- Add ResNeXt backbones and detection models. -- Refactoring for Samplers/Assigners and add OHEM. -- Add VOC dataset and evaluation scripts. - -### v0.5.4 (27/11/2018) - -- Add SingleStageDetector and RetinaNet. - -### v0.5.3 (26/11/2018) - -- Add Cascade R-CNN and Cascade Mask R-CNN. -- Add support for Soft-NMS in config files. - -### v0.5.2 (21/10/2018) - -- Add support for custom datasets. -- Add a script to convert PASCAL VOC annotations to the expected format. - -### v0.5.1 (20/10/2018) - -- Add BBoxAssigner and BBoxSampler, the `train_cfg` field in config files are restructured. -- `ConvFCRoIHead` / `SharedFCRoIHead` are renamed to `ConvFCBBoxHead` / `SharedFCBBoxHead` for consistency. diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/tools/analysis_tools/analyze_logs.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/tools/analysis_tools/analyze_logs.py deleted file mode 100644 index 83464f76ef3155be80289431188492c911f5b482..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/tools/analysis_tools/analyze_logs.py +++ /dev/null @@ -1,179 +0,0 @@ -import argparse -import json -from collections import defaultdict - -import matplotlib.pyplot as plt -import numpy as np -import seaborn as sns - - -def cal_train_time(log_dicts, args): - for i, log_dict in enumerate(log_dicts): - print(f'{"-" * 5}Analyze train time of {args.json_logs[i]}{"-" * 5}') - all_times = [] - for epoch in log_dict.keys(): - if args.include_outliers: - all_times.append(log_dict[epoch]['time']) - else: - all_times.append(log_dict[epoch]['time'][1:]) - all_times = np.array(all_times) - epoch_ave_time = all_times.mean(-1) - slowest_epoch = epoch_ave_time.argmax() - fastest_epoch = epoch_ave_time.argmin() - std_over_epoch = epoch_ave_time.std() - print(f'slowest epoch {slowest_epoch + 1}, ' - f'average time is {epoch_ave_time[slowest_epoch]:.4f}') - print(f'fastest epoch {fastest_epoch + 1}, ' - f'average time is {epoch_ave_time[fastest_epoch]:.4f}') - print(f'time std over epochs is {std_over_epoch:.4f}') - print(f'average iter time: {np.mean(all_times):.4f} s/iter') - print() - - -def plot_curve(log_dicts, args): - if args.backend is not None: - plt.switch_backend(args.backend) - sns.set_style(args.style) - # if legend is None, use {filename}_{key} as legend - legend = args.legend - if legend is None: - legend = [] - for json_log in args.json_logs: - for metric in args.keys: - legend.append(f'{json_log}_{metric}') - assert len(legend) == (len(args.json_logs) * len(args.keys)) - metrics = args.keys - - num_metrics = len(metrics) - for i, log_dict in enumerate(log_dicts): - epochs = list(log_dict.keys()) - for j, metric in enumerate(metrics): - print(f'plot curve of {args.json_logs[i]}, metric is {metric}') - if metric not in log_dict[epochs[0]]: - raise KeyError( - f'{args.json_logs[i]} does not contain metric {metric}') - - if 'mAP' in metric: - xs = np.arange(1, max(epochs) + 1) - ys = [] - for epoch in epochs: - ys += log_dict[epoch][metric] - ax = plt.gca() - ax.set_xticks(xs) - plt.xlabel('epoch') - plt.plot(xs, ys, label=legend[i * num_metrics + j], marker='o') - else: - xs = [] - ys = [] - num_iters_per_epoch = log_dict[epochs[0]]['iter'][-1] - for epoch in epochs: - iters = log_dict[epoch]['iter'] - if log_dict[epoch]['mode'][-1] == 'val': - iters = iters[:-1] - xs.append( - np.array(iters) + (epoch - 1) * num_iters_per_epoch) - ys.append(np.array(log_dict[epoch][metric][:len(iters)])) - xs = np.concatenate(xs) - ys = np.concatenate(ys) - plt.xlabel('iter') - plt.plot( - xs, ys, label=legend[i * num_metrics + j], linewidth=0.5) - plt.legend() - if args.title is not None: - plt.title(args.title) - if args.out is None: - plt.show() - else: - print(f'save curve to: {args.out}') - plt.savefig(args.out) - plt.cla() - - -def add_plot_parser(subparsers): - parser_plt = subparsers.add_parser( - 'plot_curve', help='parser for plotting curves') - parser_plt.add_argument( - 'json_logs', - type=str, - nargs='+', - help='path of train log in json format') - parser_plt.add_argument( - '--keys', - type=str, - nargs='+', - default=['bbox_mAP'], - help='the metric that you want to plot') - parser_plt.add_argument('--title', type=str, help='title of figure') - parser_plt.add_argument( - '--legend', - type=str, - nargs='+', - default=None, - help='legend of each plot') - parser_plt.add_argument( - '--backend', type=str, default=None, help='backend of plt') - parser_plt.add_argument( - '--style', type=str, default='dark', help='style of plt') - parser_plt.add_argument('--out', type=str, default=None) - - -def add_time_parser(subparsers): - parser_time = subparsers.add_parser( - 'cal_train_time', - help='parser for computing the average time per training iteration') - parser_time.add_argument( - 'json_logs', - type=str, - nargs='+', - help='path of train log in json format') - parser_time.add_argument( - '--include-outliers', - action='store_true', - help='include the first value of every epoch when computing ' - 'the average time') - - -def parse_args(): - parser = argparse.ArgumentParser(description='Analyze Json Log') - # currently only support plot curve and calculate average train time - subparsers = parser.add_subparsers(dest='task', help='task parser') - add_plot_parser(subparsers) - add_time_parser(subparsers) - args = parser.parse_args() - return args - - -def load_json_logs(json_logs): - # load and convert json_logs to log_dict, key is epoch, value is a sub dict - # keys of sub dict is different metrics, e.g. memory, bbox_mAP - # value of sub dict is a list of corresponding values of all iterations - log_dicts = [dict() for _ in json_logs] - for json_log, log_dict in zip(json_logs, log_dicts): - with open(json_log, 'r') as log_file: - for line in log_file: - log = json.loads(line.strip()) - # skip lines without `epoch` field - if 'epoch' not in log: - continue - epoch = log.pop('epoch') - if epoch not in log_dict: - log_dict[epoch] = defaultdict(list) - for k, v in log.items(): - log_dict[epoch][k].append(v) - return log_dicts - - -def main(): - args = parse_args() - - json_logs = args.json_logs - for json_log in json_logs: - assert json_log.endswith('.json') - - log_dicts = load_json_logs(json_logs) - - eval(args.task)(log_dicts, args) - - -if __name__ == '__main__': - main() diff --git a/spaces/tomofi/NDLOCR/src/text_recognition/deep-text-recognition-benchmark/README.md b/spaces/tomofi/NDLOCR/src/text_recognition/deep-text-recognition-benchmark/README.md deleted file mode 100644 index f512c0e7dddbd7977479662c4f73a89cef9b454a..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/text_recognition/deep-text-recognition-benchmark/README.md +++ /dev/null @@ -1,201 +0,0 @@ -This software was developed by the National Diet Library under contract to Morpho AI Solutions, Inc. - -This software is largely based on the following repositories. - -The newly developed portion of this program is released by the National Diet Library under a CC BY 4.0 license. For more information, see [LICENSE](./LICENSE) -. - -# What Is Wrong With Scene Text Recognition Model Comparisons? Dataset and Model Analysis -| [paper](https://arxiv.org/abs/1904.01906) | [training and evaluation data](https://github.com/clovaai/deep-text-recognition-benchmark#download-lmdb-dataset-for-traininig-and-evaluation-from-here) | [failure cases and cleansed label](https://github.com/clovaai/deep-text-recognition-benchmark#download-failure-cases-and-cleansed-label-from-here) | [pretrained model](https://www.dropbox.com/sh/j3xmli4di1zuv3s/AAArdcPgz7UFxIHUuKNOeKv_a?dl=0) | [Baidu ver(passwd:rryk)](https://pan.baidu.com/s/1KSNLv4EY3zFWHpBYlpFCBQ) | - -[https://github.com/clovaai/deep-text-recognition-benchmark](original repogitory link is here) - -Official PyTorch implementation of our four-stage STR framework, that most existing STR models fit into.
              -Using this framework allows for the module-wise contributions to performance in terms of accuracy, speed, and memory demand, under one consistent set of training and evaluation datasets.
              -Such analyses clean up the hindrance on the current comparisons to understand the performance gain of the existing modules.

              - - -## Honors -Based on this framework, we recorded the 1st place of [ICDAR2013 focused scene text](https://rrc.cvc.uab.es/?ch=2&com=evaluation&task=3), [ICDAR2019 ArT](https://rrc.cvc.uab.es/files/ICDAR2019-ArT.pdf) and 3rd place of [ICDAR2017 COCO-Text](https://rrc.cvc.uab.es/?ch=5&com=evaluation&task=2), [ICDAR2019 ReCTS (task1)](https://rrc.cvc.uab.es/files/ICDAR2019-ReCTS.pdf).
              -The difference between our paper and ICDAR challenge is summarized [here](https://github.com/clovaai/deep-text-recognition-benchmark/issues/13). - -## Updates -**Aug 3, 2020**: added [guideline to use Baidu warpctc](https://github.com/clovaai/deep-text-recognition-benchmark/pull/209) which reproduces CTC results of our paper.
              -**Dec 27, 2019**: added [FLOPS](https://github.com/clovaai/deep-text-recognition-benchmark/issues/125) in our paper, and minor updates such as log_dataset.txt and [ICDAR2019-NormalizedED](https://github.com/clovaai/deep-text-recognition-benchmark/blob/86451088248e0490ff8b5f74d33f7d014f6c249a/test.py#L139-L165).
              -**Oct 22, 2019**: added [confidence score](https://github.com/clovaai/deep-text-recognition-benchmark/issues/82), and arranged the output form of training logs.
              -**Jul 31, 2019**: The paper is accepted at International Conference on Computer Vision (ICCV), Seoul 2019, as an oral talk.
              -**Jul 25, 2019**: The code for floating-point 16 calculation, check [@YacobBY's](https://github.com/YacobBY) [pull request](https://github.com/clovaai/deep-text-recognition-benchmark/pull/36)
              -**Jul 16, 2019**: added [ST_spe.zip](https://drive.google.com/drive/folders/192UfE9agQUMNq6AgU3_E05_FcPZK4hyt) dataset, word images contain special characters in SynthText (ST) dataset, see [this issue](https://github.com/clovaai/deep-text-recognition-benchmark/issues/7#issuecomment-511727025)
              -**Jun 24, 2019**: added gt.txt of failure cases that contains path and label of each image, see [image_release_190624.zip](https://drive.google.com/open?id=1VAP9l5GL5fgptgKDLio_h3nMe7X9W0Mf)
              -**May 17, 2019**: uploaded resources in Baidu Netdisk also, added [Run demo](https://github.com/clovaai/deep-text-recognition-benchmark#run-demo-with-pretrained-model). (check [@sharavsambuu's](https://github.com/sharavsambuu) [colab demo also](https://colab.research.google.com/drive/1PHnc_QYyf9b1_KJ1r15wYXaOXkdm1Mrk))
              -**May 9, 2019**: PyTorch version updated from 1.0.1 to 1.1.0, use torch.nn.CTCLoss instead of torch-baidu-ctc, and various minor updated. - -## Getting Started -### Dependency -- This work was tested with PyTorch 1.3.1, CUDA 10.1, python 3.6 and Ubuntu 16.04.
              You may need `pip3 install torch==1.3.1`.
              -In the paper, expriments were performed with **PyTorch 0.4.1, CUDA 9.0**. -- requirements : lmdb, pillow, torchvision, nltk, natsort -``` -pip3 install lmdb pillow torchvision nltk natsort -``` - -### Download lmdb dataset for traininig and evaluation from [here](https://www.dropbox.com/sh/i39abvnefllx2si/AAAbAYRvxzRp3cIE5HzqUw3ra?dl=0) -data_lmdb_release.zip contains below.
              -training datasets : [MJSynth (MJ)](http://www.robots.ox.ac.uk/~vgg/data/text/)[1] and [SynthText (ST)](http://www.robots.ox.ac.uk/~vgg/data/scenetext/)[2] \ -validation datasets : the union of the training sets [IC13](http://rrc.cvc.uab.es/?ch=2)[3], [IC15](http://rrc.cvc.uab.es/?ch=4)[4], [IIIT](http://cvit.iiit.ac.in/projects/SceneTextUnderstanding/IIIT5K.html)[5], and [SVT](http://www.iapr-tc11.org/mediawiki/index.php/The_Street_View_Text_Dataset)[6].\ -evaluation datasets : benchmark evaluation datasets, consist of [IIIT](http://cvit.iiit.ac.in/projects/SceneTextUnderstanding/IIIT5K.html)[5], [SVT](http://www.iapr-tc11.org/mediawiki/index.php/The_Street_View_Text_Dataset)[6], [IC03](http://www.iapr-tc11.org/mediawiki/index.php/ICDAR_2003_Robust_Reading_Competitions)[7], [IC13](http://rrc.cvc.uab.es/?ch=2)[3], [IC15](http://rrc.cvc.uab.es/?ch=4)[4], [SVTP](http://openaccess.thecvf.com/content_iccv_2013/papers/Phan_Recognizing_Text_with_2013_ICCV_paper.pdf)[8], and [CUTE](http://cs-chan.com/downloads_CUTE80_dataset.html)[9]. - -### Run demo with pretrained model -1. Download pretrained model from [here](https://drive.google.com/drive/folders/15WPsuPJDCzhp2SvYZLRj8mAlT3zmoAMW) -2. Add image files to test into `demo_image/` -3. Run demo.py (add `--sensitive` option if you use case-sensitive model) -``` -CUDA_VISIBLE_DEVICES=0 python3 demo.py \ ---Transformation TPS --FeatureExtraction ResNet --SequenceModeling BiLSTM --Prediction Attn \ ---image_folder demo_image/ \ ---saved_model TPS-ResNet-BiLSTM-Attn.pth -``` - -#### prediction results - -| demo images | [TRBA (**T**PS-**R**esNet-**B**iLSTM-**A**ttn)](https://drive.google.com/open?id=1b59rXuGGmKne1AuHnkgDzoYgKeETNMv9) | [TRBA (case-sensitive version)](https://drive.google.com/open?id=1ajONZOgiG9pEYsQ-eBmgkVbMDuHgPCaY) | -| --- | --- | --- | -| | available | Available | -| | shakeshack | SHARESHACK | -| | london | Londen | -| | greenstead | Greenstead | -| | toast | TOAST | -| | merry | MERRY | -| | underground | underground | -| | ronaldo | RONALDO | -| | bally | BALLY | -| | university | UNIVERSITY | - - -### Training and evaluation -1. Train CRNN[10] model -``` -CUDA_VISIBLE_DEVICES=0 python3 train.py \ ---train_data data_lmdb_release/training --valid_data data_lmdb_release/validation \ ---select_data MJ-ST --batch_ratio 0.5-0.5 \ ---Transformation None --FeatureExtraction VGG --SequenceModeling BiLSTM --Prediction CTC -``` -2. Test CRNN[10] model. If you want to evaluate IC15-2077, check [data filtering part](https://github.com/clovaai/deep-text-recognition-benchmark/blob/c27abe6b4c681e2ee0784ad966602c056a0dd3b5/dataset.py#L148). -``` -CUDA_VISIBLE_DEVICES=0 python3 test.py \ ---eval_data data_lmdb_release/evaluation --benchmark_all_eval \ ---Transformation None --FeatureExtraction VGG --SequenceModeling BiLSTM --Prediction CTC \ ---saved_model saved_models/None-VGG-BiLSTM-CTC-Seed1111/best_accuracy.pth -``` - -3. Try to train and test our best accuracy model TRBA (**T**PS-**R**esNet-**B**iLSTM-**A**ttn) also. ([download pretrained model](https://drive.google.com/drive/folders/15WPsuPJDCzhp2SvYZLRj8mAlT3zmoAMW)) -``` -CUDA_VISIBLE_DEVICES=0 python3 train.py \ ---train_data data_lmdb_release/training --valid_data data_lmdb_release/validation \ ---select_data MJ-ST --batch_ratio 0.5-0.5 \ ---Transformation TPS --FeatureExtraction ResNet --SequenceModeling BiLSTM --Prediction Attn -``` -``` -CUDA_VISIBLE_DEVICES=0 python3 test.py \ ---eval_data data_lmdb_release/evaluation --benchmark_all_eval \ ---Transformation TPS --FeatureExtraction ResNet --SequenceModeling BiLSTM --Prediction Attn \ ---saved_model saved_models/TPS-ResNet-BiLSTM-Attn-Seed1111/best_accuracy.pth -``` - -### Arguments -* `--train_data`: folder path to training lmdb dataset. -* `--valid_data`: folder path to validation lmdb dataset. -* `--eval_data`: folder path to evaluation (with test.py) lmdb dataset. -* `--select_data`: select training data. default is MJ-ST, which means MJ and ST used as training data. -* `--batch_ratio`: assign ratio for each selected data in the batch. default is 0.5-0.5, which means 50% of the batch is filled with MJ and the other 50% of the batch is filled ST. -* `--data_filtering_off`: skip [data filtering](https://github.com/clovaai/deep-text-recognition-benchmark/blob/f2c54ae2a4cc787a0f5859e9fdd0e399812c76a3/dataset.py#L126-L146) when creating LmdbDataset. -* `--Transformation`: select Transformation module [None | TPS]. -* `--FeatureExtraction`: select FeatureExtraction module [VGG | RCNN | ResNet]. -* `--SequenceModeling`: select SequenceModeling module [None | BiLSTM]. -* `--Prediction`: select Prediction module [CTC | Attn]. -* `--saved_model`: assign saved model to evaluation. -* `--benchmark_all_eval`: evaluate with 10 evaluation dataset versions, same with Table 1 in our paper. - -## Download failure cases and cleansed label from [here](https://www.dropbox.com/s/5knh1gb1z593fxj/image_release_190624.zip?dl=0) -image_release.zip contains failure case images and benchmark evaluation images with cleansed label. - - -## When you need to train on your own dataset or Non-Latin language datasets. -1. Create your own lmdb dataset. -``` -pip3 install fire -python3 create_lmdb_dataset.py --inputPath data/ --gtFile data/gt.txt --outputPath result/ -``` -The structure of data folder as below. -``` -data -├── gt.txt -└── test - ├── word_1.png - ├── word_2.png - ├── word_3.png - └── ... -``` -At this time, `gt.txt` should be `{imagepath}\t{label}\n`
              -For example -``` -test/word_1.png Tiredness -test/word_2.png kills -test/word_3.png A -... -``` -2. Modify `--select_data`, `--batch_ratio`, and `opt.character`, see [this issue](https://github.com/clovaai/deep-text-recognition-benchmark/issues/85). - - -## Acknowledgements -This implementation has been based on these repository [crnn.pytorch](https://github.com/meijieru/crnn.pytorch), [ocr_attention](https://github.com/marvis/ocr_attention). - -## Reference -[1] M. Jaderberg, K. Simonyan, A. Vedaldi, and A. Zisserman. Synthetic data and artificial neural networks for natural scenetext recognition. In Workshop on Deep Learning, NIPS, 2014.
              -[2] A. Gupta, A. Vedaldi, and A. Zisserman. Synthetic data fortext localisation in natural images. In CVPR, 2016.
              -[3] D. Karatzas, F. Shafait, S. Uchida, M. Iwamura, L. G. i Big-orda, S. R. Mestre, J. Mas, D. F. Mota, J. A. Almazan, andL. P. De Las Heras. ICDAR 2013 robust reading competition. In ICDAR, pages 1484–1493, 2013.
              -[4] D. Karatzas, L. Gomez-Bigorda, A. Nicolaou, S. Ghosh, A. Bagdanov, M. Iwamura, J. Matas, L. Neumann, V. R.Chandrasekhar, S. Lu, et al. ICDAR 2015 competition on ro-bust reading. In ICDAR, pages 1156–1160, 2015.
              -[5] A. Mishra, K. Alahari, and C. Jawahar. Scene text recognition using higher order language priors. In BMVC, 2012.
              -[6] K. Wang, B. Babenko, and S. Belongie. End-to-end scenetext recognition. In ICCV, pages 1457–1464, 2011.
              -[7] S. M. Lucas, A. Panaretos, L. Sosa, A. Tang, S. Wong, andR. Young. ICDAR 2003 robust reading competitions. In ICDAR, pages 682–687, 2003.
              -[8] T. Q. Phan, P. Shivakumara, S. Tian, and C. L. Tan. Recognizing text with perspective distortion in natural scenes. In ICCV, pages 569–576, 2013.
              -[9] A. Risnumawan, P. Shivakumara, C. S. Chan, and C. L. Tan. A robust arbitrary text detection system for natural scene images. In ESWA, volume 41, pages 8027–8048, 2014.
              -[10] B. Shi, X. Bai, and C. Yao. An end-to-end trainable neural network for image-based sequence recognition and its application to scene text recognition. In TPAMI, volume 39, pages2298–2304. 2017. - -## Links -- WebDemo : https://demo.ocr.clova.ai/
              -Combination of Clova AI detection and recognition, additional/advanced features used for KOR/JPN. -- Repo of detection : https://github.com/clovaai/CRAFT-pytorch - -## Citation -Please consider citing this work in your publications if it helps your research. -``` -@inproceedings{baek2019STRcomparisons, - title={What Is Wrong With Scene Text Recognition Model Comparisons? Dataset and Model Analysis}, - author={Baek, Jeonghun and Kim, Geewook and Lee, Junyeop and Park, Sungrae and Han, Dongyoon and Yun, Sangdoo and Oh, Seong Joon and Lee, Hwalsuk}, - booktitle = {International Conference on Computer Vision (ICCV)}, - year={2019}, - pubstate={published}, - tppubtype={inproceedings} -} -``` - -## Contact -Feel free to contact us if there is any question:
              -for code/paper Jeonghun Baek ku21fang@gmail.com; for collaboration hwalsuk.lee@navercorp.com (our team leader). - -## License -Copyright (c) 2019-present NAVER Corp. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - diff --git a/spaces/trttung1610/musicgen/audiocraft/grids/musicgen/musicgen_clapemb_32khz.py b/spaces/trttung1610/musicgen/audiocraft/grids/musicgen/musicgen_clapemb_32khz.py deleted file mode 100644 index 64ad3f8c77afe1ab5908e407ad14d4879e1b1ad1..0000000000000000000000000000000000000000 --- a/spaces/trttung1610/musicgen/audiocraft/grids/musicgen/musicgen_clapemb_32khz.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from ._explorers import LMExplorer -from ...environment import AudioCraftEnvironment - - -@LMExplorer -def explorer(launcher): - partitions = AudioCraftEnvironment.get_slurm_partitions(['team', 'global']) - launcher.slurm_(gpus=32, partition=partitions) - launcher.bind_(solver='musicgen/musicgen_base_32khz') - # replace this by the desired music dataset - launcher.bind_(dset='internal/music_400k_32khz') - launcher.bind_(conditioner='clapemb2music') - - fsdp = {'autocast': False, 'fsdp.use': True} - cache_path = {'conditioners.description.clap.cache_path': - '/fsx-audio-craft-llm/jadecopet/experiments/audiocraft/caches/clap_embed_music'} - text_wav_training_opt = {'conditioners.description.clap.text_p': 0.5} - - launcher.bind_(fsdp) - - launcher.slurm_(gpus=32).bind_(label='32gpus') - with launcher.job_array(): - launcher() - launcher(text_wav_training_opt) - launcher(cache_path) - launcher(cache_path, text_wav_training_opt) diff --git a/spaces/tryolabs/blogpost-cqa/app.py b/spaces/tryolabs/blogpost-cqa/app.py deleted file mode 100644 index c558b86aba1e88ac7cbe49b5f048407f6574e0b3..0000000000000000000000000000000000000000 --- a/spaces/tryolabs/blogpost-cqa/app.py +++ /dev/null @@ -1,69 +0,0 @@ -from transformers import AutoTokenizer -import time -import gradio as gr -from optimum.onnxruntime import ORTModelForSeq2SeqLM -from optimum.utils import NormalizedConfigManager - -@classmethod -def _new_get_normalized_config_class(cls, model_type): - return cls._conf["t5"] - -NormalizedConfigManager.get_normalized_config_class = _new_get_normalized_config_class - - -N = 2 # Number of previous QA pairs to use for context -MAX_NEW_TOKENS = 128 # Maximum number of tokens for each answer - -tokenizer = AutoTokenizer.from_pretrained("tryolabs/long-t5-tglobal-base-blogpost-cqa-onnx") -model = ORTModelForSeq2SeqLM.from_pretrained("tryolabs/long-t5-tglobal-base-blogpost-cqa-onnx") - - -with open("updated_context.txt", "r") as f: - context = f.read() - -def build_input(question, state=[[],[]]): - model_input = f"{context} || " - previous = min(len(state[1][1:]), N) - for i in range(previous, 0, -1): - prev_question = state[0][-i-1] - prev_answer = state[1][-i] - model_input += f" {prev_question} {prev_answer} " - model_input += f" {question} " - return model_input - -def get_model_answer(question, state=[[],[]]): - start = time.perf_counter() - model_input = build_input(question, state) - end = time.perf_counter() - print(f"Build input: {end-start}") - start = time.perf_counter() - encoded_inputs = tokenizer(model_input, max_length=7000, truncation=True, return_tensors="pt") - input_ids, attention_mask = ( - encoded_inputs.input_ids, - encoded_inputs.attention_mask - ) - end = time.perf_counter() - print(f"Tokenize: {end-start}") - start = time.perf_counter() - encoded_output = model.generate(input_ids=input_ids, attention_mask=attention_mask, max_new_tokens=MAX_NEW_TOKENS) - answer = tokenizer.decode(encoded_output[0], skip_special_tokens=True) - end = time.perf_counter() - print(f"Generate: {end-start}") - state[0].append(question) - state[1].append(answer) - responses = [(state[0][i], state[1][i]) for i in range(len(state[0]))] - return responses, state - -with gr.Blocks() as demo: - state = gr.State([[],[]]) - chatbot = gr.Chatbot() - text = gr.Textbox(label="Ask a question (press enter to submit)", default_value="How are you?") - gr.Examples( - ["What's the name of the dataset that was built?", "what task does it focus on?", "what is that task about?"], - text - ) - - text.submit(get_model_answer, [text, state], [chatbot, state]) - text.submit(lambda x: "", text, text) - -demo.launch() \ No newline at end of file diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Autocad 2011 Activation Code Crack HOT.md b/spaces/usbethFlerru/sovits-modelsV2/example/Autocad 2011 Activation Code Crack HOT.md deleted file mode 100644 index 7b388fe6a401e02f12ca370d8842d269958259e8..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Autocad 2011 Activation Code Crack HOT.md +++ /dev/null @@ -1,25 +0,0 @@ -
              -

              How to Crack AutoCAD 2011 Activation Code

              -

              AutoCAD is a powerful software for designing and drafting 2D and 3D models. It is used by professionals and hobbyists alike for various purposes, such as architecture, engineering, construction, and animation. However, AutoCAD is not a cheap software, and it requires an activation code to run properly. If you don't have a valid license, you might be tempted to look for a crack that can bypass the activation process. But is it worth it?

              -

              In this article, we will explain why cracking AutoCAD 2011 activation code is a bad idea, and what are the risks and consequences of doing so. We will also provide some alternatives that can help you use AutoCAD legally and safely.

              -

              autocad 2011 activation code crack


              Download Zip ★★★ https://urlcod.com/2uyX9C



              -

              Why Cracking AutoCAD 2011 Activation Code is a Bad Idea

              -

              Cracking AutoCAD 2011 activation code is a form of software piracy, which is illegal and unethical. By doing so, you are violating the terms of service and the intellectual property rights of Autodesk, the developer of AutoCAD. You are also depriving them of the revenue that they deserve for creating and maintaining the software.

              -

              But aside from the legal and moral issues, cracking AutoCAD 2011 activation code also poses some serious risks and consequences for you as a user. Here are some of them:

              -
                -
              • Malware infection: Many crack files are infected with viruses, trojans, worms, ransomware, spyware, or other malicious software that can harm your computer and compromise your data. Some malware can even steal your personal information, such as passwords, credit card numbers, or bank accounts.
              • -
              • Performance issues: Cracked software often has bugs, errors, glitches, or compatibility problems that can affect the functionality and stability of AutoCAD. You might experience crashes, freezes, slowdowns, corrupted files, missing features, or other issues that can ruin your work and productivity.
              • -
              • Lack of updates: Cracked software cannot receive official updates from Autodesk, which means you will miss out on the latest features, improvements, fixes, and security patches that are released for AutoCAD. You will also be unable to access online services or cloud-based features that require authentication.
              • -
              • Legal troubles: Cracking AutoCAD 2011 activation code can expose you to legal actions from Autodesk or other authorities. You might face fines, lawsuits, or even criminal charges for violating the law and infringing on the rights of the software owner. You might also lose your reputation or credibility as a professional or a student.
              • -
              -

              How to Use AutoCAD Legally and Safely

              -

              If you want to use AutoCAD without risking malware infection, performance issues, lack of updates, or legal troubles, you should avoid cracking its activation code and instead use it legally and safely. Here are some ways to do that:

              -
                -
              • Purchase a license: The best way to use AutoCAD is to buy a license from Autodesk or an authorized reseller. You can choose from different subscription plans that suit your needs and budget. You can also get discounts if you are a student or an educator.
              • -
              • Use a free trial: If you want to try AutoCAD before buying it, you can download a free trial version from Autodesk's website. The trial version gives you access to all the features of AutoCAD for 30 days. You can also extend the trial period by signing up for an Autodesk account.
              • -
              • Use an alternative software: If you don't want to spend money on AutoCAD or if you don't need all its features, you can use an alternative software that can perform similar tasks. There are many free or low-cost CAD software available online that can help you design and draft 2D and 3D models.
              • -
              -

              Conclusion

              -

              AutoCAD is a great software for creating and editing 2D and 3D models. However, cracking its activation code is not a good idea because it is illegal, unethical, risky, and problematic. Instead of looking for a crack file that can harm your computer and your work, you should use AutoCAD legally and safely by purchasing a license, using a free trial version, or using an alternative software.

              d5da3c52bf
              -
              -
              \ No newline at end of file diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Bittoo Boss hindi dubbed torrent Download the fun-filled wedding comedy.md b/spaces/usbethFlerru/sovits-modelsV2/example/Bittoo Boss hindi dubbed torrent Download the fun-filled wedding comedy.md deleted file mode 100644 index daa7741cf6dcd87aa2e54cb265e1fddce6d9d400..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Bittoo Boss hindi dubbed torrent Download the fun-filled wedding comedy.md +++ /dev/null @@ -1,6 +0,0 @@ -

              The Coffin Maker 2 Full Movie Online Free Download disco discos cazavam


              Download Ziphttps://urlcod.com/2uyVFV



              -
              - aaccfb2cb3
              -
              -
              -

              diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/DevExpress Dxperience 2010.2.8.rar Whats New and Improved in this Release.md b/spaces/usbethFlerru/sovits-modelsV2/example/DevExpress Dxperience 2010.2.8.rar Whats New and Improved in this Release.md deleted file mode 100644 index c33cb2ce1ed7a4a6f2c2ce81a9fd837c9c1441ff..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/DevExpress Dxperience 2010.2.8.rar Whats New and Improved in this Release.md +++ /dev/null @@ -1,6 +0,0 @@ -

              DevExpress Dxperience 2010.2.8.rar


              Download ——— https://urlcod.com/2uyX56



              - - aaccfb2cb3
              -
              -
              -

              diff --git a/spaces/wilson1/bingo/src/components/toaster.tsx b/spaces/wilson1/bingo/src/components/toaster.tsx deleted file mode 100644 index 4d2693460b61307a1d4c127fd01df9bee16e59ff..0000000000000000000000000000000000000000 --- a/spaces/wilson1/bingo/src/components/toaster.tsx +++ /dev/null @@ -1,3 +0,0 @@ -'use client' - -export { Toaster } from 'react-hot-toast' diff --git a/spaces/wzq10314/VITS-Umamusume-voice-synthesizer1/text/shanghainese.py b/spaces/wzq10314/VITS-Umamusume-voice-synthesizer1/text/shanghainese.py deleted file mode 100644 index cb29c24a08d2e406e8399cf7bc9fe5cb43cb9c61..0000000000000000000000000000000000000000 --- a/spaces/wzq10314/VITS-Umamusume-voice-synthesizer1/text/shanghainese.py +++ /dev/null @@ -1,64 +0,0 @@ -import re -import cn2an -import opencc - - -converter = opencc.OpenCC('zaonhe') - -# List of (Latin alphabet, ipa) pairs: -_latin_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('A', 'ᴇ'), - ('B', 'bi'), - ('C', 'si'), - ('D', 'di'), - ('E', 'i'), - ('F', 'ᴇf'), - ('G', 'dʑi'), - ('H', 'ᴇtɕʰ'), - ('I', 'ᴀi'), - ('J', 'dʑᴇ'), - ('K', 'kʰᴇ'), - ('L', 'ᴇl'), - ('M', 'ᴇm'), - ('N', 'ᴇn'), - ('O', 'o'), - ('P', 'pʰi'), - ('Q', 'kʰiu'), - ('R', 'ᴀl'), - ('S', 'ᴇs'), - ('T', 'tʰi'), - ('U', 'ɦiu'), - ('V', 'vi'), - ('W', 'dᴀbɤliu'), - ('X', 'ᴇks'), - ('Y', 'uᴀi'), - ('Z', 'zᴇ') -]] - - -def _number_to_shanghainese(num): - num = cn2an.an2cn(num).replace('一十','十').replace('二十', '廿').replace('二', '两') - return re.sub(r'((?:^|[^三四五六七八九])十|廿)两', r'\1二', num) - - -def number_to_shanghainese(text): - return re.sub(r'\d+(?:\.?\d+)?', lambda x: _number_to_shanghainese(x.group()), text) - - -def latin_to_ipa(text): - for regex, replacement in _latin_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def shanghainese_to_ipa(text): - text = number_to_shanghainese(text.upper()) - text = converter.convert(text).replace('-','').replace('$',' ') - text = re.sub(r'[A-Z]', lambda x: latin_to_ipa(x.group())+' ', text) - text = re.sub(r'[、;:]', ',', text) - text = re.sub(r'\s*,\s*', ', ', text) - text = re.sub(r'\s*。\s*', '. ', text) - text = re.sub(r'\s*?\s*', '? ', text) - text = re.sub(r'\s*!\s*', '! ', text) - text = re.sub(r'\s*$', '', text) - return text diff --git a/spaces/xdecoder/SEEM/app.py b/spaces/xdecoder/SEEM/app.py deleted file mode 100644 index d73152b3090d1ae00fbbd7cd5077d82be1d90aa9..0000000000000000000000000000000000000000 --- a/spaces/xdecoder/SEEM/app.py +++ /dev/null @@ -1,22 +0,0 @@ -import os -os.system("python -m pip install -r requirements.txt") -os.system("python -m pip install git+https://github.com/MaureenZOU/detectron2-xyz.git") - -github_user = os.environ.get('GITHUB_USER') -github_token = os.environ.get('GITHUB_TOKEN') - -repo = 'IX-Decoder-Demo' - -os.system("export GITHUB_USER={}".format(github_user)) -os.system("export GITHUB_TOKEN={}".format(github_token)) -os.system("git clone https://{}:{}@github.com/{}/{}".format(github_user, github_token, github_user, repo)) - -cwd0 = os.getcwd() -cwd1 = os.path.join(cwd0, 'IX-Decoder-Demo/xdecoder/body/encoder/ops') -os.chdir(cwd1) -os.system("sh make.sh") -os.chdir(cwd0) - -cwd2 = os.path.join(cwd0, 'IX-Decoder-Demo') -os.chdir(cwd2) -os.system("python app.py") \ No newline at end of file diff --git a/spaces/xianqi21/bingo/Dockerfile b/spaces/xianqi21/bingo/Dockerfile deleted file mode 100644 index c677b05b75f7e4b2beee8c97fb47957a0861a83e..0000000000000000000000000000000000000000 --- a/spaces/xianqi21/bingo/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM weaigc/bingo:latest - -ARG DEBIAN_FRONTEND=noninteractive - -ENV BING_HEADER "" - -CMD npm start diff --git a/spaces/xiaoyeAI/clewd/clewd.js b/spaces/xiaoyeAI/clewd/clewd.js deleted file mode 100644 index b9d3c3c989c2564681f1db001006e23fe6ef0ac3..0000000000000000000000000000000000000000 --- a/spaces/xiaoyeAI/clewd/clewd.js +++ /dev/null @@ -1,863 +0,0 @@ -/* -* https://rentry.org/teralomaniac_clewd -* https://github.com/teralomaniac/clewd -*/ -'use strict'; - -const {createServer: Server, IncomingMessage, ServerResponse} = require('node:http'), {createHash: Hash, randomUUID, randomInt, randomBytes} = require('node:crypto'), {TransformStream, ReadableStream} = require('node:stream/web'), {Readable, Writable} = require('node:stream'), {Blob} = require('node:buffer'), {existsSync: exists, writeFileSync: write, createWriteStream} = require('node:fs'), {join: joinP} = require('node:path'), {ClewdSuperfetch: Superfetch, SuperfetchAvailable} = require('./lib/clewd-superfetch'), {AI, fileName, genericFixes, bytesToSize, setTitle, checkResErr, Replacements, Main} = require('./lib/clewd-utils'), ClewdStream = require('./lib/clewd-stream'); - -/******************************************************* */ -let currentIndex, Firstlogin = true, changeflag = 0, changetime = 0, totaltime, uuidOrgArray = []; - -const events = require('events'), CookieChanger = new events.EventEmitter(); -require('events').EventEmitter.defaultMaxListeners = 0; - -CookieChanger.on('ChangeCookie', () => { - Proxy && Proxy.close(); - console.log(`Changing Cookie...\n`); - Proxy.listen(Config.Port, Config.Ip, onListen); - Proxy.on('error', (err => { - console.error('Proxy error\n%o', err); - })); -}); - -const simpletokenizer = (prompt) => { - let byteLength = 0; - for (let i = 0; i < prompt.length; i++) { - let code = prompt.charCodeAt(i); - if (code <= 0xFF) { - byteLength += 0.6; - } else if (code <= 0xFFFF) { - byteLength += 1; - } else { - byteLength += 1.5; - } - } - return byteLength; -}, padtxt = (content) => { - if (Config.padtxt_placeholder.length > 0) { - var placeholder = Config.padtxt_placeholder; - } else { - const bytes = randomInt(5, 15); - var placeholder = randomBytes(bytes).toString('hex'); - } - let count = Math.floor((Config.Settings.padtxt - simpletokenizer(content)) / simpletokenizer(placeholder)); - - // 生成占位符字符串 - let padding = ''; - for (let i = 0; i < count; i++) { - padding += placeholder; - } - - // 在prompt前面添加占位符, 在末尾增加空行然后添加prompt - content = padding + '\n\n\n' + content; - - return content.trim(); -}, xmlPlot = (content) => { - // 检查内容中是否包含"" - if (!content.includes('')) { - content = content.replace(/(\n\n|^)xmlPlot:\s*/gm, '$1'); - content = content.replace(/(\n|\n<\/reply>)/g, ''); - return content.replace(/(.*?)<\/customname>/gm, '$1'); - } - - //群组 - content = content.replace(/(\n|\n<\/reply>)\1*/g, '$1'); - content = content.replace(/(.*?)<\/customname>:/gm, '$1:\n'); - - //role合并 - if (!content.includes('<\!-- Merge Disable -->')) { - if (!content.includes('<\!-- Merge Human Disable -->')) { - content = content.replace(/(\n\n|^)xmlPlot:/g, '$1Human:'); - content = content.replace(/(?:\n\n|^)Human:(.*?(?:\n\nAssistant:|$))/gs, function(match, p1) {return '\n\nHuman:' + p1.replace(/\n\nHuman:\s*/g, '\n\n')}); - content = content.replace(/^\s*Human:\s*/, ''); - } - if (!content.includes('<\!-- Merge Assistant Disable -->')) { - content = content.replace(/\n\nAssistant:(.*?(?:\n\nHuman:|$))/gs, function(match, p1) {return '\n\nAssistant:' + p1.replace(/\n\nAssistant:\s*/g, '\n\n')}); - } - } - content = content.replace(/(\n\n|^)xmlPlot:\s*/gm, '$1'); - content = content.replace(/<\!-- Merge.*?Disable -->/gm, ''); - - //格式顺序交换&越狱倒置 - content = content.replace(/.*?<\/Prev\1>/gs, function(match) {return match.replace(/\n\n(Assistant|Human):/g, '\n\ntemp$1:')}); - let segcontentAssistant = content.split('\n\nAssistant:'); - let processedsegAssistant = segcontentAssistant.map(seg => { - return seg.replace(/(\n\nHuman:.*?)(.*?)<\/PrevAssistant>/gs, '\n\n$2$1'); - }); - content = processedsegAssistant.join('\n\nAssistant:'); - let segcontentHuman = content.split('\n\nHuman:'); - const seglength = segcontentHuman.length; - for (let i = 1; i < seglength; i++) { - const match = segcontentHuman[i].match(/.*?<\/PrevHuman>/s); - if (match) { - segcontentHuman[i - 1] += match[0].replace(/(.*?)<\/PrevHuman>/s, '\n\n$1'); - segcontentHuman[i] = segcontentHuman[i].replace(match[0], ''); - } - } - if (/Assistant: *.$/.test(content) && seglength > 1 && !segcontentHuman[seglength - 2].includes('\n\nAssistant:')) { - segcontentHuman[seglength - 2] = segcontentHuman.splice(seglength - 1, 1, segcontentHuman[seglength - 2])[0]; - } - content = segcontentHuman.join('\n\nHuman:'); - content = content.replace(/\n\ntemp(Assistant|Human):/g, '\n\n$1:'); - - //给开头加上用于截断附加文件标识 - content.includes('') && (content = '\n\n' + content); - - // 在第一个"[Start a new"前面加上"",在最后一个"[Start a new"前面加上"\n\n\n\n" - const exampleNote = content.match(/(?<=).*(?=<\/example-note>)/) || ''; - const cardtag = content.match(/(?=\n\n<\/card>)/) || ''; - const exampletag = content.match(/(?=\n\n<\/example>)/) || ''; - const plot = content.includes('') ? '' : ''; - content = content.replace(/.*<\/example-note>/, ''); - const firstChatStart = content.indexOf('\n\n[Start a new'); - const lastChatStart = content.lastIndexOf('\n\n[Start a new'); - firstChatStart != -1 && firstChatStart === lastChatStart && (content = content.slice(0, firstChatStart) + `\n\n${cardtag}` + content.slice(firstChatStart)); - firstChatStart != lastChatStart && (content = content.slice(0, firstChatStart) + `\n\n${cardtag}\n\n${exampleNote}\n` + content.slice(firstChatStart, lastChatStart) + `\n\n${exampletag}\n\n${plot}` + content.slice(lastChatStart)); - - //Plain Prompt - segcontentHuman = content.split('\n\nHuman:'); - let segcontentlastIndex = segcontentHuman.length - 1; - if (segcontentlastIndex >= 2 && segcontentHuman[segcontentlastIndex].includes('') && !content.includes('\n\nPlainPrompt:')) { - content = segcontentHuman.slice(0, segcontentlastIndex).join('\n\nHuman:') + '\n\nPlainPrompt:' + segcontentHuman.slice(segcontentlastIndex).join('\n\nHuman:'); - } - content = content.replace(/<\!-- Plain Prompt Enable -->/, ''); - content = content.replace(/\n\nHuman:.*PlainPrompt:/, '\n\nPlainPrompt:'); - - //消除空XML tags或多余的\n - content = content.replace(/\n<\/(hidden|META)>\s+?<\1>\n/g, ''); - content = content.replace(/\n<(card|example|hidden|plot|META)>\s+?<\1>/g, '\n<$1>'); - content = content.replace(/(?:)?\n<(card|example|hidden|plot|META)>\s+?<\/\1>/g, ''); - content = content.replace(/(?<=(: |\n)<(card|hidden|example|plot|META)>\n)\s*/g, ''); - content = content.replace(/\s*(?=\n<\/(card|hidden|example|plot|META)>(\n|$))/g, ''); - content = content.replace(/(?<=\n)\n(?=\n)/g, ''); - - return content.trim(); -}; -/******************************************************* */ - -let ChangedSettings, UnknownSettings, Logger; - -const ConfigPath = joinP(__dirname, './config.js'), LogPath = joinP(__dirname, './log.txt'), Conversation = { - char: null, - uuid: null, - depth: 0 -}, cookies = {}; - -let uuidOrg, curPrompt = {}, prevPrompt = {}, prevMessages = [], prevImpersonated = false, Config = { - Cookie: '', - CookieArray: [], - Cookiecounter: 0, - CookieIndex: 0, - Ip: (process.env.Cookie || process.env.CookieArray) ? '0.0.0.0' : '127.0.0.1', - Port: process.env.PORT || 8444, - localtunnel: false, - BufferSize: 1, - SystemInterval: 3, - rProxy: AI.end(), - padtxt_placeholder: '', - PromptExperimentFirst: '', - PromptExperimentNext: '', - PersonalityFormat: '{{char}}\'s personality: {{personality}}', - ScenarioFormat: 'Dialogue scenario: {{scenario}}', - Settings: { - RenewAlways: true, - RetryRegenerate: false, - PromptExperiments: true, - SystemExperiments: true, - PreventImperson: false, - AllSamples: false, - NoSamples: false, - StripAssistant: false, - StripHuman: false, - PassParams: false, - ClearFlags: true, - PreserveChats: true, - LogMessages: true, - FullColon: true, - padtxt: 13500, - xmlPlot: true, - Superfetch: true - } -}; - -ServerResponse.prototype.json = async function(body, statusCode = 200, headers) { - body = body instanceof Promise ? await body : body; - this.headersSent || this.writeHead(statusCode, { - 'Content-Type': 'application/json', - ...headers && headers - }); - this.end('object' == typeof body ? JSON.stringify(body) : body); - return this; -}; - -Array.prototype.sample = function() { - return this[Math.floor(Math.random() * this.length)]; -}; - -const updateParams = res => { - updateCookies(res); -}, updateCookies = res => { - let cookieNew = ''; - res instanceof Response ? cookieNew = res.headers?.get('set-cookie') : res?.superfetch ? cookieNew = res.headers?.['set-cookie'] : 'string' == typeof res && (cookieNew = res.split('\n').join('')); - if (!cookieNew) { - return; - } - let cookieArr = cookieNew.split(/;\s?/gi).filter((prop => false === /^(path|expires|domain|HttpOnly|Secure|SameSite)[=;]*/i.test(prop))); - for (const cookie of cookieArr) { - const divide = cookie.split(/^(.*?)=\s*(.*)/), cookieName = divide[1], cookieVal = divide[2]; - cookies[cookieName] = cookieVal; - } -}, getCookies = () => { - const cookieNames = Object.keys(cookies); - return cookieNames.map(((name, idx) => `${name}=${cookies[name]}${idx === cookieNames.length - 1 ? '' : ';'}`)).join(' ').replace(/(\s+)$/gi, ''); -}, deleteChat = async uuid => { - if (!uuid) { - return; - } - if (uuid === Conversation.uuid) { - Conversation.uuid = null; - Conversation.depth = 0; - } - if (Config.Settings.PreserveChats) { - return; - } - const res = await fetch(`${Config.rProxy}/api/organizations/${uuidOrg}/chat_conversations/${uuid}`, { - headers: { - ...AI.hdr(), - Cookie: getCookies() - }, - method: 'DELETE' - }); - updateParams(res); -}, onListen = async () => { -/***************************** */ - if (Firstlogin) { - Firstlogin = false; - console.log(`${Main}\nhttp://${Config.Ip}:${Config.Port}/v1\n\n${Object.keys(Config.Settings).map((setting => UnknownSettings.includes(setting) ? `??? ${setting}: ${Config.Settings[setting]}` : `${setting}: ${ChangedSettings.includes(setting) ? '' : ''}${Config.Settings[setting]}`)).sort().join('\n')}\n`); - Config.Settings.Superfetch && SuperfetchAvailable(true); - if (Config.localtunnel) { - const localtunnel = require('localtunnel'); - localtunnel({ port: Config.Port }) - .then((tunnel) => { - console.log(`\nTunnel URL for outer websites: ${tunnel.url}/v1\n`); - }) - } - totaltime = Config.CookieArray.length; - } - if (Config.CookieArray?.length > 0) { - Config.Cookie = Config.CookieArray[currentIndex]; - currentIndex = (currentIndex + 1) % Config.CookieArray.length; - changetime += 1; - } - let percentage = ((changetime + Config.CookieIndex) / totaltime) * 100 - if (Config.Cookiecounter < 0 && percentage > 100) { - console.log(`\n※※※Cookie cleanup completed※※※\n\n`); - return process.exit(); - } -/***************************** */ - if ('SET YOUR COOKIE HERE' === Config.Cookie || Config.Cookie?.length < 1) { - throw Error('Set your cookie inside config.js'); - } - updateCookies(Config.Cookie); - //console.log(`${Main}\nhttp://${Config.Ip}:${Config.Port}/v1\n\n${Object.keys(Config.Settings).map((setting => UnknownSettings.includes(setting) ? `??? ${setting}: ${Config.Settings[setting]}` : `${setting}: ${ChangedSettings.includes(setting) ? '' : ''}${Config.Settings[setting]}`)).sort().join('\n')}\n`); - //Config.Settings.Superfetch && SuperfetchAvailable(true); - const accRes = await fetch(Config.rProxy + '/api/organizations', { - method: 'GET', - headers: { - ...AI.hdr(), - Cookie: getCookies() - } - }); -/**************************** */ - if (accRes.statusText === 'Forbidden' && Config.CookieArray?.length > 0) { - Config.CookieArray = Config.CookieArray.filter(item => item !== Config.Cookie); - !process.env.Cookie && !process.env.CookieArray && writeSettings(Config); - currentIndex = (currentIndex - 1 + Config.CookieArray.length) % Config.CookieArray.length; - console.log(`Expired!`); - Config.Cookiecounter < 0 && console.log(`[progress]: ${percentage.toFixed(2)}%\n[length]: ${Config.CookieArray.length}\n`); - CookieChanger.emit('ChangeCookie'); - return; - } -/**************************** */ - await checkResErr(accRes); - const accInfo = (await accRes.json())?.[0]; - if (!accInfo || accInfo.error) { - throw Error(`Couldn't get account info: "${accInfo?.error?.message || accRes.statusText}"`); - } - if (!accInfo?.uuid) { - throw Error('Invalid account id'); - } - setTitle('ok'); - updateParams(accRes); - console.log(Config.CookieArray?.length > 0 ? `(index: ${currentIndex || Config.CookieArray.length}) Logged in %o` : 'Logged in %o', { //console.log('Logged in %o', { - name: accInfo.name?.split('@')?.[0], - capabilities: accInfo.capabilities, - }); - uuidOrg = accInfo?.uuid; -/************************* */ - if (uuidOrgArray.includes(uuidOrg)) { - console.log(`Overlap!`); - currentIndex = (currentIndex - 1 + Config.CookieArray.length) % Config.CookieArray.length; - Config.Cookiecounter < 0 && console.log(`[progress]: ${percentage.toFixed(2)}%\n[length]: ${Config.CookieArray.length}\n`); - Config.CookieArray = Config.CookieArray.filter(item => item !== Config.Cookie); - !process.env.Cookie && !process.env.CookieArray && writeSettings(Config); - CookieChanger.emit('ChangeCookie'); - return; - } else { - uuidOrgArray.push(uuidOrg); - } -/************************* */ - if (accInfo?.active_flags.length > 0) { - const now = new Date, formattedFlags = accInfo.active_flags.map((flag => { - const days = ((new Date(flag.expires_at).getTime() - now.getTime()) / 864e5).toFixed(2); - return { - type: flag.type, - remaining_days: days - }; - })); - console.warn('Your account has warnings %o', formattedFlags); //console.warn('Your account has warnings %o', formattedFlags); - await Promise.all(accInfo.active_flags.map((flag => (async type => { - if (!Config.Settings.ClearFlags) { - return; - } - if ('consumer_restricted_mode' === type) { - return; - } - const req = await (Config.Settings.Superfetch ? Superfetch : fetch)(`${Config.rProxy}/api/organizations/${uuidOrg}/flags/${type}/dismiss`, { - headers: { - ...AI.hdr(), - Cookie: getCookies() - }, - method: 'POST' - }); - updateParams(req); - const json = await req.json(); - console.log(`${type}: ${json.error ? json.error.message || json.error.type || json.detail : 'OK'}`); - })(flag.type)))); -/***************************** */ - if (Config.CookieArray?.length > 0) { - console.log(`Restricted!`); - Config.Cookiecounter < 0 && console.log(`[progress]: ${percentage.toFixed(2)}%\n[length]: ${Config.CookieArray.length}\n`); - CookieChanger.emit('ChangeCookie'); - return; - } - } - if (Config.CookieArray.length > 0) { - const allres = await fetch(`${Config.rProxy}`, { - headers: { - ...AI.hdr(), - Cookie: getCookies() - }, - method: 'GET' - }), accountinfo = await allres.text(); - updateParams(allres); - const Unverified = accountinfo.includes('\\"completed_verification_at\\":null'); - const Banned = accountinfo.includes('\\"gate\":\\"segment:abuse\\",\\"gateValue\\":\\"true\\",'); - const Exceededlimit = /\\"messageLimit\\":{\\"type\\":\\"(approaching_limit\\",\\"remaining\\":0|exceeded_limit)\\",/.test(accountinfo); - const Remain = /\\"messageLimit\\":{\\"type\\":\\"approaching_limit\\",\\"remaining\\":\d+\\",/.exec(accountinfo); - Remain && (changeflag = Math.max(Config.Cookiecounter - Remain[0], changeflag)); - if (Unverified || Banned) { - Config.CookieArray = Config.CookieArray.filter(item => item !== Config.Cookie); - !process.env.Cookie && !process.env.CookieArray && writeSettings(Config); - currentIndex = (currentIndex - 1 + Config.CookieArray.length) % Config.CookieArray.length; - } - Unverified && console.log(`Unverified!`); - Banned && console.log(`Banned!`); - Exceededlimit && console.log(`Exceeded limit!`); - Config.Cookiecounter < 0 && console.log(`[progress]: ${percentage.toFixed(2)}%\n[length]: ${Config.CookieArray.length}`); - if (Unverified || Banned || Exceededlimit || Config.Cookiecounter < 0) { - console.log(''); - CookieChanger.emit('ChangeCookie'); - return; - } -/***************************** */ - } - const convRes = await fetch(`${Config.rProxy}/api/organizations/${uuidOrg}/chat_conversations`, { - method: 'GET', - headers: { - ...AI.hdr(), - Cookie: getCookies() - } - }), conversations = await convRes.json(); - updateParams(convRes); - conversations.length > 0 && await Promise.all(conversations.map((conv => deleteChat(conv.uuid)))); -}, writeSettings = async (config, firstRun = false) => { - write(ConfigPath, `/*\n* https://rentry.org/teralomaniac_clewd\n* https://github.com/teralomaniac/clewd\n*/\n\n// SET YOUR COOKIE BELOW\n\nmodule.exports = ${JSON.stringify(config, null, 4)}\n\n/*\n BufferSize\n * How many characters will be buffered before the AI types once\n * lower = less chance of \`PreventImperson\` working properly\n\n ---\n\n SystemInterval\n * How many messages until \`SystemExperiments alternates\`\n\n ---\n\n Other settings\n * https://gitgud.io/ahsk/clewd/#defaults\n * and\n * https://gitgud.io/ahsk/clewd/-/blob/master/CHANGELOG.md\n */`.trim().replace(/((? { - if ('OPTIONS' === req.method) { - return ((req, res) => { - res.writeHead(200, { - 'Access-Control-Allow-Origin': '*', - 'Access-Control-Allow-Headers': 'Authorization, Content-Type', - 'Access-Control-Allow-Methods': 'POST, GET, OPTIONS' - }).end(); - })(0, res); - } - switch (req.url) { - case '/v1/models': - res.json({ - data: [ { - id: AI.mdl() - } ] - }); - break; - - case '/v1/chat/completions': - ((req, res) => { - setTitle('recv...'); - let fetchAPI, changer; //let fetchAPI; - const abortControl = new AbortController, {signal} = abortControl; - res.socket.on('close', (async () => { - abortControl.signal.aborted || abortControl.abort(); - })); - const buffer = []; - req.on('data', (chunk => { - buffer.push(chunk); - })); - req.on('end', (async () => { - let clewdStream, titleTimer, samePrompt = false, shouldRenew = true, retryRegen = false; - try { - const body = JSON.parse(Buffer.concat(buffer).toString()), temperature = Math.max(.1, Math.min(1, body.temperature)); - let {messages} = body; - if (messages?.length < 1) { - throw Error('Select OpenAI as completion source'); - } - if (!body.stream && 1 === messages.length && JSON.stringify(messages.sort() || []) === JSON.stringify([ { - role: 'user', - content: 'Hi' - } ].sort())) { - return res.json({ - choices: [ { - message: { - content: Main - } - } ] - }); - } - res.setHeader('Access-Control-Allow-Origin', '*'); - body.stream && res.setHeader('Content-Type', 'text/event-stream'); - if (!body.stream && messages?.[0]?.content?.startsWith('From the list below, choose a word that best represents a character\'s outfit description, action, or emotion in their dialogue')) { - return res.json({ - choices: [ { - message: { - content: 'neutral' - } - } ] - }); - } - if (Config.Settings.AllSamples && Config.Settings.NoSamples) { - console.log('having AllSamples and NoSamples both set to true is not supported'); - throw Error('Only one can be used at the same time: AllSamples/NoSamples'); - } - const model = AI.mdl(); - curPrompt = { - firstUser: messages.find((message => 'user' === message.role)), - firstSystem: messages.find((message => 'system' === message.role)), - firstAssistant: messages.find((message => 'assistant' === message.role)), - lastUser: messages.findLast((message => 'user' === message.role)), - lastSystem: messages.findLast((message => 'system' === message.role && '[Start a new chat]' !== message.content)), - lastAssistant: messages.findLast((message => 'assistant' === message.role)) - }; - prevPrompt = { - ...prevMessages.length > 0 && { - firstUser: prevMessages.find((message => 'user' === message.role)), - firstSystem: prevMessages.find((message => 'system' === message.role)), - firstAssistant: prevMessages.find((message => 'assistant' === message.role)), - lastUser: prevMessages.findLast((message => 'user' === message.role)), - lastSystem: prevMessages.find((message => 'system' === message.role && '[Start a new chat]' !== message.content)), - lastAssistant: prevMessages.findLast((message => 'assistant' === message.role)) - } - }; - samePrompt = JSON.stringify(messages.filter((message => 'system' !== message.role)).sort()) === JSON.stringify(prevMessages.filter((message => 'system' !== message.role)).sort()); - const sameCharDiffChat = !samePrompt && curPrompt.firstSystem?.content === prevPrompt.firstSystem?.content && curPrompt.firstUser?.content !== prevPrompt.firstUser?.content; - shouldRenew = Config.Settings.RenewAlways || !Conversation.uuid || prevImpersonated || !Config.Settings.RenewAlways && samePrompt || sameCharDiffChat; - retryRegen = Config.Settings.RetryRegenerate && samePrompt && null != Conversation.uuid; - samePrompt || (prevMessages = JSON.parse(JSON.stringify(messages))); - let type = ''; - if (retryRegen) { - type = 'R'; - fetchAPI = await (async (signal, model) => { - let res; - const body = { - completion: { - prompt: '', - timezone: AI.zone(), - model: model || AI.mdl() - }, - organization_uuid: uuidOrg, - conversation_uuid: Conversation.uuid, - text: '' - }; - let headers = { - ...AI.hdr(Conversation.uuid || ''), - Accept: 'text/event-stream', - Cookie: getCookies() - }; - if (Config.Settings.Superfetch) { - const names = Object.keys(headers), values = Object.values(headers); - headers = names.map(((header, idx) => `${header}: ${values[idx]}`)); - } - res = await (Config.Settings.Superfetch ? Superfetch : fetch)(Config.rProxy + '/api/retry_message', { - stream: true, - signal, - method: 'POST', - body: JSON.stringify(body), - headers - }); - updateParams(res); - await checkResErr(res); - return res; - })(signal, model); - } else if (shouldRenew) { - Conversation.uuid && await deleteChat(Conversation.uuid); - fetchAPI = await (async signal => { - Conversation.uuid = randomUUID().toString(); - Conversation.depth = 0; - const res = await (Config.Settings.Superfetch ? Superfetch : fetch)(`${Config.rProxy}/api/organizations/${uuidOrg}/chat_conversations`, { - signal, - headers: { - ...AI.hdr(), - Cookie: getCookies() - }, - method: 'POST', - body: JSON.stringify({ - uuid: Conversation.uuid, - name: '' - }) - }); - updateParams(res); - await checkResErr(res); - return res; - })(signal); - type = 'r'; - } else if (samePrompt) {} else { - const systemExperiment = !Config.Settings.RenewAlways && Config.Settings.SystemExperiments; - if (!systemExperiment || systemExperiment && Conversation.depth >= Config.SystemInterval) { - type = 'c-r'; - Conversation.depth = 0; - } else { - type = 'c-c'; - Conversation.depth++; - } - } - let {prompt, systems} = ((messages, type) => { - const rgxScenario = /^\[Circumstances and context of the dialogue: ([\s\S]+?)\.?\]$/i, rgxPerson = /^\[([\s\S]+?)'s personality: ([\s\S]+?)\]$/i, messagesClone = JSON.parse(JSON.stringify(messages)), realLogs = messagesClone.filter((message => [ 'user', 'assistant' ].includes(message.role))), sampleLogs = messagesClone.filter((message => message.name)), mergedLogs = [ ...sampleLogs, ...realLogs ]; - mergedLogs.forEach(((message, idx) => { - const next = mergedLogs[idx + 1]; - message.customname = (message => [ 'assistant', 'user' ].includes(message.role) && null != message.name && !(message.name in Replacements))(message); - if (next && !Config.Settings.xmlPlot) { //if (next) { - if ('name' in message && 'name' in next) { - if (message.name === next.name) { - message.content += '\n\n' + next.content; //message.content += '\n' + next.content; - next.merged = true; - } - } else if ('system' !== next.role) { - if (next.role === message.role) { - message.content += '\n\n' + next.content; //message.content += '\n' + next.content; - next.merged = true; - } - } else { - message.content += '\n\n' + next.content; //message.content += '\n' + next.content; - next.merged = true; - } - } - })); - const lastAssistant = realLogs.findLast((message => !message.merged && 'assistant' === message.role)); - lastAssistant && Config.Settings.StripAssistant && (lastAssistant.strip = true); - const lastUser = realLogs.findLast((message => !message.merged && 'user' === message.role)); - lastUser && Config.Settings.StripHuman && (lastUser.strip = true); - const systemMessages = messagesClone.filter((message => 'system' === message.role && !('name' in message))); - systemMessages.forEach(((message, idx) => { - const scenario = message.content.match(rgxScenario)?.[1], personality = message.content.match(rgxPerson); - if (scenario) { - message.content = Config.ScenarioFormat.replace(/{{scenario}}/gim, scenario); - message.scenario = true; - } - if (3 === personality?.length) { - message.content = Config.PersonalityFormat.replace(/{{char}}/gim, personality[1]).replace(/{{personality}}/gim, personality[2]); - message.personality = true; - } - message.main = 0 === idx; - message.jailbreak = idx === systemMessages.length - 1; - ' ' === message.content && (message.discard = true); - })); - Config.Settings.AllSamples && !Config.Settings.NoSamples && realLogs.forEach((message => { - if (![ lastUser, lastAssistant ].includes(message)) { - if ('user' === message.role) { - message.name = message.customname ? message.name : 'example_user'; - message.role = 'system'; - } else if ('assistant' === message.role) { - message.name = message.customname ? message.name : 'example_assistant'; - message.role = 'system'; - } else if (!message.customname) { - throw Error('Invalid role ' + message.name); - } - } - })); - Config.Settings.NoSamples && !Config.Settings.AllSamples && sampleLogs.forEach((message => { - if ('example_user' === message.name) { - message.role = 'user'; - } else if ('example_assistant' === message.name) { - message.role = 'assistant'; - } else if (!message.customname) { - throw Error('Invalid role ' + message.name); - } - message.customname || delete message.name; - })); - let systems = []; - if (![ 'r', 'R' ].includes(type)) { - lastUser.strip = true; - systemMessages.forEach((message => message.discard = message.discard || 'c-c' === type ? !message.jailbreak : !message.jailbreak && !message.main)); - systems = systemMessages.filter((message => !message.discard)).map((message => `"${message.content.substring(0, 25).replace(/\n/g, '\\n').trim()}..."`)); - messagesClone.forEach((message => message.discard = message.discard || mergedLogs.includes(message) && ![ lastUser ].includes(message))); - } - const prompt = messagesClone.map(((message, idx) => { - if (message.merged || message.discard) { - return ''; - } - if (message.content.length < 1) { - return message.content; - } - let spacing = ''; -/****************************************************************/ - if (Config.Settings.xmlPlot) { - idx > 0 && (spacing = '\n\n'); - const prefix = message.customname ? message.role + ': ' + message.name + ': ' : 'system' !== message.role || message.name ? Replacements[message.name || message.role] + ': ' : 'xmlPlot: ' + Replacements[message.role]; - return `${spacing}${prefix}${message.customname ? '\n' + message.content.trim() + '\n' : message.content}`; - } else { -/****************************************************************/ - idx > 0 && (spacing = systemMessages.includes(message) ? '\n' : '\n\n'); - const prefix = message.customname ? message.name + ': ' : 'system' !== message.role || message.name ? Replacements[message.name || message.role] + ': ' : '' + Replacements[message.role]; - return `${spacing}${message.strip ? '' : prefix}${'system' === message.role ? message.content : message.content.trim()}`; - } // - })); - return { - prompt: genericFixes(prompt.join('')).trim(), - systems - }; - })(messages, type); - console.log(`${model} [${type}]${!retryRegen && systems.length > 0 ? ' ' + systems.join(' / ') : ''}`); - 'R' !== type || prompt || (prompt = '...regen...'); -/****************************************************************/ - Config.Settings.xmlPlot && (prompt = xmlPlot(prompt)); - Config.Settings.FullColon && (prompt = prompt.replace(/(?<=\n\n(H(?:uman)?|A(?:ssistant)?)):[ ]?/g, ': ')); - Config.Settings.padtxt && (prompt = padtxt(prompt)); -/****************************************************************/ - Logger?.write(`\n\n-------\n[${(new Date).toLocaleString()}]\n####### PROMPT (${type}):\n${prompt}\n--\n####### REPLY:\n`); - retryRegen || (fetchAPI = await (async (signal, model, prompt, temperature, type) => { - const attachments = []; - if (Config.Settings.PromptExperiments) { -/****************************************************************/ - let splitedprompt = prompt.split('\n\nPlainPrompt:'); - prompt = splitedprompt[0]; -/****************************************************************/ - attachments.push({ - extracted_content: (prompt), - file_name: 'paste.txt', //fileName(), - file_size: Buffer.from(prompt).byteLength, - file_type: 'txt' //'text/plain' - }); - prompt = 'r' === type ? Config.PromptExperimentFirst : Config.PromptExperimentNext; -/****************************************************************/ - splitedprompt.length > 1 && (prompt = prompt + splitedprompt[1]); -/****************************************************************/ - } - let res; - const body = { - completion: { - ...Config.Settings.PassParams && { - temperature - }, - prompt: prompt || '', - timezone: AI.zone(), - model: model || AI.mdl() - }, - organization_uuid: uuidOrg, - conversation_uuid: Conversation.uuid, - text: prompt, - attachments - }; - let headers = { - ...AI.hdr(Conversation.uuid || ''), - Accept: 'text/event-stream', - Cookie: getCookies() - }; - res = await (Config.Settings.Superfetch ? Superfetch : fetch)(Config.rProxy + '/api/append_message', { - stream: true, - signal, - method: 'POST', - body: JSON.stringify(body), - headers - }); - updateParams(res); - await checkResErr(res); - return res; - })(signal, model, prompt, temperature, type)); - const response = Writable.toWeb(res); - clewdStream = new ClewdStream({ - config: Config, - version: Main, - minSize: Config.BufferSize, - model, - streaming: body.stream, - abortControl, - source: fetchAPI - }, Logger); - titleTimer = setInterval((() => setTitle('recv ' + bytesToSize(clewdStream.size))), 300); - Config.Settings.Superfetch ? await Readable.toWeb(fetchAPI.body).pipeThrough(clewdStream).pipeTo(response) : await fetchAPI.body.pipeThrough(clewdStream).pipeTo(response); - } catch (err) { - if ('AbortError' === err.name) { - res.end(); - } else { - err.planned || console.error('Clewd:\n%o', err); - res.json({ - error: { - message: 'clewd: ' + (err.message || err.name || err.type), - type: err.type || err.name || err.code, - param: null, - code: err.code || 500 - } - }); - } - } - clearInterval(titleTimer); - if (clewdStream) { - clewdStream.censored && console.warn('likely your account is hard-censored'); - prevImpersonated = clewdStream.impersonated; - setTitle('ok ' + bytesToSize(clewdStream.size)); - //console.log(`${200 == fetchAPI.status ? '' : ''}${fetchAPI.status}!\n`); -/******************************** */ - 429 == fetchAPI.status ? console.log(`Exceeded limit!\n`) : console.log(`${200 == fetchAPI.status ? '' : ''}${fetchAPI.status}!\n`); - changeflag += 1; - if (Config.CookieArray?.length > 0 && (429 == fetchAPI.status || (Config.Cookiecounter && changeflag >= Config.Cookiecounter))) { - changeflag = 0; - changer = true; - } -/******************************** */ - clewdStream.empty(); - } - if (prevImpersonated) { - try { - await deleteChat(Conversation.uuid); - } catch (err) {} - } -/******************************** */ - changer && CookieChanger.emit('ChangeCookie'); -/******************************** */ - })); - })(req, res); - break; - - case '/v1/complete': - res.json({ - error: { - message: 'clewd: Set "Chat Completion" to OpenAI instead of Claude. Enable "External" models aswell' - } - }); - break; - - default: - req.url !== '/' && (console.log('unknown request: ' + req.url)); //console.log('unknown request: ' + req.url); - res.json({ - error: { - message: '404 Not Found', - type: 404, - param: null, - code: 404 - } - }, 200); - } -})); - -!async function() { - await (async () => { - if (exists(ConfigPath)) { - const userConfig = require(ConfigPath), validConfigs = Object.keys(Config), parsedConfigs = Object.keys(userConfig), parsedSettings = Object.keys(userConfig.Settings), invalidConfigs = parsedConfigs.filter((config => !validConfigs.includes(config))), validSettings = Object.keys(Config.Settings); - UnknownSettings = parsedSettings.filter((setting => !validSettings.includes(setting))); - invalidConfigs.forEach((config => { - console.warn(`unknown config in config.js: ${config}`); - })); - UnknownSettings.forEach((setting => { - console.warn(`unknown setting in config.js: Settings.${setting}`); - })); - const missingConfigs = validConfigs.filter((config => !parsedConfigs.includes(config))), missingSettings = validSettings.filter((config => !parsedSettings.includes(config))); - missingConfigs.forEach((config => { - console.warn(`adding missing config in config.js: ${config}`); - userConfig[config] = Config[config]; - })); - missingSettings.forEach((setting => { - console.warn(`adding missing setting in config.js: Settings.${setting}`); - userConfig.Settings[setting] = Config.Settings[setting]; - })); - ChangedSettings = parsedSettings.filter((setting => Config.Settings[setting] !== userConfig.Settings[setting])); - (missingConfigs.length > 0 || missingSettings.length > 0) && await writeSettings(userConfig); - userConfig.Settings.LogMessages && (Logger = createWriteStream(LogPath)); - Config = { - ...Config, - ...userConfig - }; - } else { - Config.Cookie = 'SET YOUR COOKIE HERE'; - writeSettings(Config, true); - } -/***************************** */ - function convertToType(value) { - if (value === "true") return true; - if (value === "false") return false; - if (/^\d+$/.test(value)) return parseInt(value); - return value; - } - for (let key in Config) { - if (key === 'Settings') { - for (let setting in Config.Settings) { - Config.Settings[setting] = convertToType(process.env[setting]) ?? Config.Settings[setting]; - } - } else { - Config[key] = key === 'CookieArray' ? (process.env[key]?.split(',')?.map(x => x.replace(/[\[\]"\s]/g, '')) ?? Config[key]) : (convertToType(process.env[key]) ?? Config[key]); - } - } -/***************************** */ - })(); -/***************************** */ - !Config.rProxy && (Config.rProxy = AI.end()); - Config.rProxy.endsWith('/') && (Config.rProxy = Config.rProxy.slice(0, -1)); - let uniqueArr = [], seen = new Set(); - for (let Cookie of Config.CookieArray) { - if (!seen.has(Cookie)) { - uniqueArr.push(Cookie); - seen.add(Cookie); - } - } - Config.CookieArray = uniqueArr; - !process.env.Cookie && !process.env.CookieArray && writeSettings(Config); - currentIndex = Config.CookieIndex > 0 ? Config.CookieIndex - 1 : Config.Cookiecounter >= 0 ? Math.floor(Math.random()*Config.CookieArray.length) : 0; -/***************************** */ - Proxy.listen(Config.Port, Config.Ip, onListen); - Proxy.on('error', (err => { - console.error('Proxy error\n%o', err); - })); -}(); - -const cleanup = async () => { - console.log('cleaning...'); - try { - await deleteChat(Conversation.uuid); - Logger?.close(); - } catch (err) {} - process.exit(); -}; - -process.on('SIGHUP', cleanup); - -process.on('SIGTERM', cleanup); - -process.on('SIGINT', cleanup); - -process.on('exit', (async () => { - console.log('exiting...'); -})); diff --git a/spaces/xiazi/anime-remove-background/README.md b/spaces/xiazi/anime-remove-background/README.md deleted file mode 100644 index 1ba3cb5ea0e994e246d57b7d62b8aa5a6331901c..0000000000000000000000000000000000000000 --- a/spaces/xiazi/anime-remove-background/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Anime Remove Background -emoji: 🪄🖼️ -colorFrom: indigo -colorTo: pink -sdk: gradio -sdk_version: 3.1.4 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: skytnt/anime-remove-background ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/yangheng/Super-Resolution-Anime-Diffusion/utils.py b/spaces/yangheng/Super-Resolution-Anime-Diffusion/utils.py deleted file mode 100644 index 740ced9943143c7a56a16273044e60d6ab3e9728..0000000000000000000000000000000000000000 --- a/spaces/yangheng/Super-Resolution-Anime-Diffusion/utils.py +++ /dev/null @@ -1,7 +0,0 @@ -def is_google_colab(): - try: - import google.colab - - return True - except: - return False diff --git a/spaces/yderre-aubay/midi-player-demo/src/main/stores/RulerStore.ts b/spaces/yderre-aubay/midi-player-demo/src/main/stores/RulerStore.ts deleted file mode 100644 index f6ef08bb689d608d17314aea1687066c0eccc887..0000000000000000000000000000000000000000 --- a/spaces/yderre-aubay/midi-player-demo/src/main/stores/RulerStore.ts +++ /dev/null @@ -1,92 +0,0 @@ -import { computed, makeObservable, observable } from "mobx" -import { filterEventsWithScroll } from "../../common/helpers/filterEvents" -import { - BeatWithX, - createBeatsWithXInRange, -} from "../../common/helpers/mapBeats" -import Quantizer from "../../common/quantizer" -import Song from "../../common/song" -import { isTimeSignatureEvent } from "../../common/track" - -interface CoordTransform { - pixelsPerTick: number -} - -interface RulerProvider { - rootStore: { song: Song } - transform: CoordTransform - scrollLeft: number - canvasWidth: number - quantizer: Quantizer -} - -export interface TimeSignature { - id: number - tick: number - numerator: number - denominator: number - isSelected: boolean -} - -export class RulerStore { - selectedTimeSignatureEventIds: number[] = [] - - constructor(readonly parent: RulerProvider) { - makeObservable(this, { - selectedTimeSignatureEventIds: observable.shallow, - beats: computed, - timeSignatures: computed, - quantizer: computed, - }) - } - - get beats(): BeatWithX[] { - const { scrollLeft, transform, canvasWidth, rootStore } = this.parent - - const startTick = scrollLeft / transform.pixelsPerTick - - return createBeatsWithXInRange( - rootStore.song.measures, - transform.pixelsPerTick, - rootStore.song.timebase, - startTick, - canvasWidth, - ) - } - - get timeSignatures(): TimeSignature[] { - const { transform, scrollLeft, canvasWidth, rootStore } = this.parent - const { selectedTimeSignatureEventIds } = this - const track = rootStore.song.conductorTrack - if (track === undefined) { - return [] - } - - return filterEventsWithScroll( - track.events, - transform.pixelsPerTick, - scrollLeft, - canvasWidth, - ) - .filter(isTimeSignatureEvent) - .map((e) => ({ - ...e, - isSelected: selectedTimeSignatureEventIds.includes(e.id), - })) - } - - get quantizer(): Quantizer { - return this.parent.quantizer - } - - getTick(offsetX: number) { - const { transform, scrollLeft } = this.parent - const tick = (offsetX + scrollLeft) / transform.pixelsPerTick - return tick - } - - getQuantizedTick(offsetX: number) { - const { quantizer } = this.parent - return quantizer.round(this.getTick(offsetX)) - } -} diff --git a/spaces/yerfor/SyntaSpeech/utils/audio/__init__.py b/spaces/yerfor/SyntaSpeech/utils/audio/__init__.py deleted file mode 100644 index e8cc4466b27eeda4026e945a5388dca04817e8a1..0000000000000000000000000000000000000000 --- a/spaces/yerfor/SyntaSpeech/utils/audio/__init__.py +++ /dev/null @@ -1,82 +0,0 @@ -import librosa -import numpy as np -import pyloudnorm as pyln - -from utils.audio.vad import trim_long_silences - - -def librosa_pad_lr(x, fsize, fshift, pad_sides=1): - '''compute right padding (final frame) or both sides padding (first and final frames) - ''' - assert pad_sides in (1, 2) - # return int(fsize // 2) - pad = (x.shape[0] // fshift + 1) * fshift - x.shape[0] - if pad_sides == 1: - return 0, pad - else: - return pad // 2, pad // 2 + pad % 2 - - -def amp_to_db(x): - return 20 * np.log10(np.maximum(1e-5, x)) - - -def db_to_amp(x): - return 10.0 ** (x * 0.05) - - -def normalize(S, min_level_db): - return (S - min_level_db) / -min_level_db - - -def denormalize(D, min_level_db): - return (D * -min_level_db) + min_level_db - - -def librosa_wav2spec(wav_path, - fft_size=1024, - hop_size=256, - win_length=1024, - window="hann", - num_mels=80, - fmin=80, - fmax=-1, - eps=1e-6, - sample_rate=22050, - loud_norm=False, - trim_long_sil=False): - if isinstance(wav_path, str): - if trim_long_sil: - wav, _, _ = trim_long_silences(wav_path, sample_rate) - else: - wav, _ = librosa.core.load(wav_path, sr=sample_rate) - else: - wav = wav_path - - if loud_norm: - meter = pyln.Meter(sample_rate) # create BS.1770 meter - loudness = meter.integrated_loudness(wav) - wav = pyln.normalize.loudness(wav, loudness, -22.0) - if np.abs(wav).max() > 1: - wav = wav / np.abs(wav).max() - - # get amplitude spectrogram - x_stft = librosa.stft(wav, n_fft=fft_size, hop_length=hop_size, - win_length=win_length, window=window, pad_mode="constant") - linear_spc = np.abs(x_stft) # (n_bins, T) - - # get mel basis - fmin = 0 if fmin == -1 else fmin - fmax = sample_rate / 2 if fmax == -1 else fmax - mel_basis = librosa.filters.mel(sample_rate, fft_size, num_mels, fmin, fmax) - - # calculate mel spec - mel = mel_basis @ linear_spc - mel = np.log10(np.maximum(eps, mel)) # (n_mel_bins, T) - l_pad, r_pad = librosa_pad_lr(wav, fft_size, hop_size, 1) - wav = np.pad(wav, (l_pad, r_pad), mode='constant', constant_values=0.0) - wav = wav[:mel.shape[1] * hop_size] - - # log linear spec - linear_spc = np.log10(np.maximum(eps, linear_spc)) - return {'wav': wav, 'mel': mel.T, 'linear': linear_spc.T, 'mel_basis': mel_basis} diff --git a/spaces/ygangang/VToonify/vtoonify/model/raft/core/raft.py b/spaces/ygangang/VToonify/vtoonify/model/raft/core/raft.py deleted file mode 100644 index a25c22f78c96470e3dca4c25e81683133ae024e3..0000000000000000000000000000000000000000 --- a/spaces/ygangang/VToonify/vtoonify/model/raft/core/raft.py +++ /dev/null @@ -1,144 +0,0 @@ -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F - -from model.raft.core.update import BasicUpdateBlock, SmallUpdateBlock -from model.raft.core.extractor import BasicEncoder, SmallEncoder -from model.raft.core.corr import CorrBlock, AlternateCorrBlock -from model.raft.core.utils.utils import bilinear_sampler, coords_grid, upflow8 - -try: - autocast = torch.cuda.amp.autocast -except: - # dummy autocast for PyTorch < 1.6 - class autocast: - def __init__(self, enabled): - pass - def __enter__(self): - pass - def __exit__(self, *args): - pass - - -class RAFT(nn.Module): - def __init__(self, args): - super(RAFT, self).__init__() - self.args = args - - if args.small: - self.hidden_dim = hdim = 96 - self.context_dim = cdim = 64 - args.corr_levels = 4 - args.corr_radius = 3 - - else: - self.hidden_dim = hdim = 128 - self.context_dim = cdim = 128 - args.corr_levels = 4 - args.corr_radius = 4 - - if 'dropout' not in self.args: - self.args.dropout = 0 - - if 'alternate_corr' not in self.args: - self.args.alternate_corr = False - - # feature network, context network, and update block - if args.small: - self.fnet = SmallEncoder(output_dim=128, norm_fn='instance', dropout=args.dropout) - self.cnet = SmallEncoder(output_dim=hdim+cdim, norm_fn='none', dropout=args.dropout) - self.update_block = SmallUpdateBlock(self.args, hidden_dim=hdim) - - else: - self.fnet = BasicEncoder(output_dim=256, norm_fn='instance', dropout=args.dropout) - self.cnet = BasicEncoder(output_dim=hdim+cdim, norm_fn='batch', dropout=args.dropout) - self.update_block = BasicUpdateBlock(self.args, hidden_dim=hdim) - - def freeze_bn(self): - for m in self.modules(): - if isinstance(m, nn.BatchNorm2d): - m.eval() - - def initialize_flow(self, img): - """ Flow is represented as difference between two coordinate grids flow = coords1 - coords0""" - N, C, H, W = img.shape - coords0 = coords_grid(N, H//8, W//8, device=img.device) - coords1 = coords_grid(N, H//8, W//8, device=img.device) - - # optical flow computed as difference: flow = coords1 - coords0 - return coords0, coords1 - - def upsample_flow(self, flow, mask): - """ Upsample flow field [H/8, W/8, 2] -> [H, W, 2] using convex combination """ - N, _, H, W = flow.shape - mask = mask.view(N, 1, 9, 8, 8, H, W) - mask = torch.softmax(mask, dim=2) - - up_flow = F.unfold(8 * flow, [3,3], padding=1) - up_flow = up_flow.view(N, 2, 9, 1, 1, H, W) - - up_flow = torch.sum(mask * up_flow, dim=2) - up_flow = up_flow.permute(0, 1, 4, 2, 5, 3) - return up_flow.reshape(N, 2, 8*H, 8*W) - - - def forward(self, image1, image2, iters=12, flow_init=None, upsample=True, test_mode=False): - """ Estimate optical flow between pair of frames """ - - image1 = 2 * (image1 / 255.0) - 1.0 - image2 = 2 * (image2 / 255.0) - 1.0 - - image1 = image1.contiguous() - image2 = image2.contiguous() - - hdim = self.hidden_dim - cdim = self.context_dim - - # run the feature network - with autocast(enabled=self.args.mixed_precision): - fmap1, fmap2 = self.fnet([image1, image2]) - - fmap1 = fmap1.float() - fmap2 = fmap2.float() - if self.args.alternate_corr: - corr_fn = AlternateCorrBlock(fmap1, fmap2, radius=self.args.corr_radius) - else: - corr_fn = CorrBlock(fmap1, fmap2, radius=self.args.corr_radius) - - # run the context network - with autocast(enabled=self.args.mixed_precision): - cnet = self.cnet(image1) - net, inp = torch.split(cnet, [hdim, cdim], dim=1) - net = torch.tanh(net) - inp = torch.relu(inp) - - coords0, coords1 = self.initialize_flow(image1) - - if flow_init is not None: - coords1 = coords1 + flow_init - - flow_predictions = [] - for itr in range(iters): - coords1 = coords1.detach() - corr = corr_fn(coords1) # index correlation volume - - flow = coords1 - coords0 - with autocast(enabled=self.args.mixed_precision): - net, up_mask, delta_flow = self.update_block(net, inp, corr, flow) - - # F(t+1) = F(t) + \Delta(t) - coords1 = coords1 + delta_flow - - # upsample predictions - if up_mask is None: - flow_up = upflow8(coords1 - coords0) - else: - flow_up = self.upsample_flow(coords1 - coords0, up_mask) - - flow_predictions.append(flow_up) - - if test_mode: - return coords1 - coords0, flow_up - - return flow_predictions diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/benchmark/benchmark_args_tf.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/benchmark/benchmark_args_tf.py deleted file mode 100644 index c1c2ec16ce550cfc14326aed49a175d593fdc7bb..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/benchmark/benchmark_args_tf.py +++ /dev/null @@ -1,136 +0,0 @@ -# coding=utf-8 -# Copyright 2018 The HuggingFace Inc. team. -# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from dataclasses import dataclass, field -from typing import Tuple - -from ..utils import cached_property, is_tf_available, logging, requires_backends -from .benchmark_args_utils import BenchmarkArguments - - -if is_tf_available(): - import tensorflow as tf - - -logger = logging.get_logger(__name__) - - -@dataclass -class TensorFlowBenchmarkArguments(BenchmarkArguments): - deprecated_args = [ - "no_inference", - "no_cuda", - "no_tpu", - "no_speed", - "no_memory", - "no_env_print", - "no_multi_process", - ] - - def __init__(self, **kwargs): - """ - This __init__ is there for legacy code. When removing deprecated args completely, the class can simply be - deleted - """ - for deprecated_arg in self.deprecated_args: - if deprecated_arg in kwargs: - positive_arg = deprecated_arg[3:] - kwargs[positive_arg] = not kwargs.pop(deprecated_arg) - logger.warning( - f"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or" - f" {positive_arg}={kwargs[positive_arg]}" - ) - self.tpu_name = kwargs.pop("tpu_name", self.tpu_name) - self.device_idx = kwargs.pop("device_idx", self.device_idx) - self.eager_mode = kwargs.pop("eager_mode", self.eager_mode) - self.use_xla = kwargs.pop("use_xla", self.use_xla) - super().__init__(**kwargs) - - tpu_name: str = field( - default=None, - metadata={"help": "Name of TPU"}, - ) - device_idx: int = field( - default=0, - metadata={"help": "CPU / GPU device index. Defaults to 0."}, - ) - eager_mode: bool = field(default=False, metadata={"help": "Benchmark models in eager model."}) - use_xla: bool = field( - default=False, - metadata={ - "help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`." - }, - ) - - @cached_property - def _setup_tpu(self) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]: - requires_backends(self, ["tf"]) - tpu = None - if self.tpu: - try: - if self.tpu_name: - tpu = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name) - else: - tpu = tf.distribute.cluster_resolver.TPUClusterResolver() - except ValueError: - tpu = None - return tpu - - @cached_property - def _setup_strategy(self) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]: - requires_backends(self, ["tf"]) - if self.is_tpu: - tf.config.experimental_connect_to_cluster(self._setup_tpu) - tf.tpu.experimental.initialize_tpu_system(self._setup_tpu) - - strategy = tf.distribute.TPUStrategy(self._setup_tpu) - else: - # currently no multi gpu is allowed - if self.is_gpu: - # TODO: Currently only single GPU is supported - tf.config.set_visible_devices(self.gpu_list[self.device_idx], "GPU") - strategy = tf.distribute.OneDeviceStrategy(device=f"/gpu:{self.device_idx}") - else: - tf.config.set_visible_devices([], "GPU") # disable GPU - strategy = tf.distribute.OneDeviceStrategy(device=f"/cpu:{self.device_idx}") - - return strategy - - @property - def is_tpu(self) -> bool: - requires_backends(self, ["tf"]) - return self._setup_tpu is not None - - @property - def strategy(self) -> "tf.distribute.Strategy": - requires_backends(self, ["tf"]) - return self._setup_strategy - - @property - def gpu_list(self): - requires_backends(self, ["tf"]) - return tf.config.list_physical_devices("GPU") - - @property - def n_gpu(self) -> int: - requires_backends(self, ["tf"]) - if self.cuda: - return len(self.gpu_list) - return 0 - - @property - def is_gpu(self) -> bool: - return self.n_gpu > 0 diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/graphormer/collating_graphormer.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/graphormer/collating_graphormer.py deleted file mode 100644 index 58ce602ea28de1a3f5f45c40a9ffb1a0e4f0fdcf..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/graphormer/collating_graphormer.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright (c) Microsoft Corporation and HuggingFace -# Licensed under the MIT License. - -from typing import Any, Dict, List, Mapping - -import numpy as np -import torch - -from ...utils import is_cython_available, requires_backends - - -if is_cython_available(): - import pyximport - - pyximport.install(setup_args={"include_dirs": np.get_include()}) - from . import algos_graphormer # noqa E402 - - -def convert_to_single_emb(x, offset: int = 512): - feature_num = x.shape[1] if len(x.shape) > 1 else 1 - feature_offset = 1 + np.arange(0, feature_num * offset, offset, dtype=np.int64) - x = x + feature_offset - return x - - -def preprocess_item(item, keep_features=True): - requires_backends(preprocess_item, ["cython"]) - - if keep_features and "edge_attr" in item.keys(): # edge_attr - edge_attr = np.asarray(item["edge_attr"], dtype=np.int64) - else: - edge_attr = np.ones((len(item["edge_index"][0]), 1), dtype=np.int64) # same embedding for all - - if keep_features and "node_feat" in item.keys(): # input_nodes - node_feature = np.asarray(item["node_feat"], dtype=np.int64) - else: - node_feature = np.ones((item["num_nodes"], 1), dtype=np.int64) # same embedding for all - - edge_index = np.asarray(item["edge_index"], dtype=np.int64) - - input_nodes = convert_to_single_emb(node_feature) + 1 - num_nodes = item["num_nodes"] - - if len(edge_attr.shape) == 1: - edge_attr = edge_attr[:, None] - attn_edge_type = np.zeros([num_nodes, num_nodes, edge_attr.shape[-1]], dtype=np.int64) - attn_edge_type[edge_index[0], edge_index[1]] = convert_to_single_emb(edge_attr) + 1 - - # node adj matrix [num_nodes, num_nodes] bool - adj = np.zeros([num_nodes, num_nodes], dtype=bool) - adj[edge_index[0], edge_index[1]] = True - - shortest_path_result, path = algos_graphormer.floyd_warshall(adj) - max_dist = np.amax(shortest_path_result) - - input_edges = algos_graphormer.gen_edge_input(max_dist, path, attn_edge_type) - attn_bias = np.zeros([num_nodes + 1, num_nodes + 1], dtype=np.single) # with graph token - - # combine - item["input_nodes"] = input_nodes + 1 # we shift all indices by one for padding - item["attn_bias"] = attn_bias - item["attn_edge_type"] = attn_edge_type - item["spatial_pos"] = shortest_path_result.astype(np.int64) + 1 # we shift all indices by one for padding - item["in_degree"] = np.sum(adj, axis=1).reshape(-1) + 1 # we shift all indices by one for padding - item["out_degree"] = item["in_degree"] # for undirected graph - item["input_edges"] = input_edges + 1 # we shift all indices by one for padding - if "labels" not in item: - item["labels"] = item["y"] - - return item - - -class GraphormerDataCollator: - def __init__(self, spatial_pos_max=20, on_the_fly_processing=False): - if not is_cython_available(): - raise ImportError("Graphormer preprocessing needs Cython (pyximport)") - - self.spatial_pos_max = spatial_pos_max - self.on_the_fly_processing = on_the_fly_processing - - def __call__(self, features: List[dict]) -> Dict[str, Any]: - if self.on_the_fly_processing: - features = [preprocess_item(i) for i in features] - - if not isinstance(features[0], Mapping): - features = [vars(f) for f in features] - batch = {} - - max_node_num = max(len(i["input_nodes"]) for i in features) - node_feat_size = len(features[0]["input_nodes"][0]) - edge_feat_size = len(features[0]["attn_edge_type"][0][0]) - max_dist = max(len(i["input_edges"][0][0]) for i in features) - edge_input_size = len(features[0]["input_edges"][0][0][0]) - batch_size = len(features) - - batch["attn_bias"] = torch.zeros(batch_size, max_node_num + 1, max_node_num + 1, dtype=torch.float) - batch["attn_edge_type"] = torch.zeros(batch_size, max_node_num, max_node_num, edge_feat_size, dtype=torch.long) - batch["spatial_pos"] = torch.zeros(batch_size, max_node_num, max_node_num, dtype=torch.long) - batch["in_degree"] = torch.zeros(batch_size, max_node_num, dtype=torch.long) - batch["input_nodes"] = torch.zeros(batch_size, max_node_num, node_feat_size, dtype=torch.long) - batch["input_edges"] = torch.zeros( - batch_size, max_node_num, max_node_num, max_dist, edge_input_size, dtype=torch.long - ) - - for ix, f in enumerate(features): - for k in ["attn_bias", "attn_edge_type", "spatial_pos", "in_degree", "input_nodes", "input_edges"]: - f[k] = torch.tensor(f[k]) - - if len(f["attn_bias"][1:, 1:][f["spatial_pos"] >= self.spatial_pos_max]) > 0: - f["attn_bias"][1:, 1:][f["spatial_pos"] >= self.spatial_pos_max] = float("-inf") - - batch["attn_bias"][ix, : f["attn_bias"].shape[0], : f["attn_bias"].shape[1]] = f["attn_bias"] - batch["attn_edge_type"][ix, : f["attn_edge_type"].shape[0], : f["attn_edge_type"].shape[1], :] = f[ - "attn_edge_type" - ] - batch["spatial_pos"][ix, : f["spatial_pos"].shape[0], : f["spatial_pos"].shape[1]] = f["spatial_pos"] - batch["in_degree"][ix, : f["in_degree"].shape[0]] = f["in_degree"] - batch["input_nodes"][ix, : f["input_nodes"].shape[0], :] = f["input_nodes"] - batch["input_edges"][ - ix, : f["input_edges"].shape[0], : f["input_edges"].shape[1], : f["input_edges"].shape[2], : - ] = f["input_edges"] - - batch["out_degree"] = batch["in_degree"] - - sample = features[0]["labels"] - if len(sample) == 1: # one task - if isinstance(sample[0], float): # regression - batch["labels"] = torch.from_numpy(np.concatenate([i["labels"] for i in features])) - else: # binary classification - batch["labels"] = torch.from_numpy(np.concatenate([i["labels"] for i in features])) - else: # multi task classification, left to float to keep the NaNs - batch["labels"] = torch.from_numpy(np.stack([i["labels"] for i in features], axis=0)) - - return batch diff --git a/spaces/yl12053/so-vits-4.1-Grass-Wonder/pretrain/meta.py b/spaces/yl12053/so-vits-4.1-Grass-Wonder/pretrain/meta.py deleted file mode 100644 index cc35dd3c0dfe8436e7d635f2db507cedca75ed49..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Grass-Wonder/pretrain/meta.py +++ /dev/null @@ -1,31 +0,0 @@ -def download_dict(): - return { - "vec768l12": { - "url": "https://ibm.ent.box.com/shared/static/z1wgl1stco8ffooyatzdwsqn2psd9lrr", - "output": "./pretrain/checkpoint_best_legacy_500.pt" - }, - "vec256l9": { - "url": "https://ibm.ent.box.com/shared/static/z1wgl1stco8ffooyatzdwsqn2psd9lrr", - "output": "./pretrain/checkpoint_best_legacy_500.pt" - }, - "hubertsoft": { - "url": "https://github.com/bshall/hubert/releases/download/v0.1/hubert-soft-0d54a1f4.pt", - "output": "./pretrain/hubert-soft-0d54a1f4.pt" - }, - "whisper-ppg": { - "url": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt", - "output": "./pretrain/medium.pt" - } - } - - -def get_speech_encoder(config_path="configs/config.json"): - import json - - with open(config_path, "r") as f: - data = f.read() - config = json.loads(data) - speech_encoder = config["model"]["speech_encoder"] - dict = download_dict() - - return dict[speech_encoder]["url"], dict[speech_encoder]["output"] diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h deleted file mode 100644 index b54a5dde2ca11a74d29c4d8adb7fe1634f5baf9c..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h +++ /dev/null @@ -1,370 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. -#pragma once - -#include -#include - -#if defined(__CUDACC__) || __HCC__ == 1 || __HIP__ == 1 -// Designates functions callable from the host (CPU) and the device (GPU) -#define HOST_DEVICE __host__ __device__ -#define HOST_DEVICE_INLINE HOST_DEVICE __forceinline__ -#else -#include -#define HOST_DEVICE -#define HOST_DEVICE_INLINE HOST_DEVICE inline -#endif - -namespace detectron2 { - -namespace { - -template -struct RotatedBox { - T x_ctr, y_ctr, w, h, a; -}; - -template -struct Point { - T x, y; - HOST_DEVICE_INLINE Point(const T& px = 0, const T& py = 0) : x(px), y(py) {} - HOST_DEVICE_INLINE Point operator+(const Point& p) const { - return Point(x + p.x, y + p.y); - } - HOST_DEVICE_INLINE Point& operator+=(const Point& p) { - x += p.x; - y += p.y; - return *this; - } - HOST_DEVICE_INLINE Point operator-(const Point& p) const { - return Point(x - p.x, y - p.y); - } - HOST_DEVICE_INLINE Point operator*(const T coeff) const { - return Point(x * coeff, y * coeff); - } -}; - -template -HOST_DEVICE_INLINE T dot_2d(const Point& A, const Point& B) { - return A.x * B.x + A.y * B.y; -} - -// R: result type. can be different from input type -template -HOST_DEVICE_INLINE R cross_2d(const Point& A, const Point& B) { - return static_cast(A.x) * static_cast(B.y) - - static_cast(B.x) * static_cast(A.y); -} - -template -HOST_DEVICE_INLINE void get_rotated_vertices( - const RotatedBox& box, - Point (&pts)[4]) { - // M_PI / 180. == 0.01745329251 - double theta = box.a * 0.01745329251; - T cosTheta2 = (T)cos(theta) * 0.5f; - T sinTheta2 = (T)sin(theta) * 0.5f; - - // y: top --> down; x: left --> right - pts[0].x = box.x_ctr + sinTheta2 * box.h + cosTheta2 * box.w; - pts[0].y = box.y_ctr + cosTheta2 * box.h - sinTheta2 * box.w; - pts[1].x = box.x_ctr - sinTheta2 * box.h + cosTheta2 * box.w; - pts[1].y = box.y_ctr - cosTheta2 * box.h - sinTheta2 * box.w; - pts[2].x = 2 * box.x_ctr - pts[0].x; - pts[2].y = 2 * box.y_ctr - pts[0].y; - pts[3].x = 2 * box.x_ctr - pts[1].x; - pts[3].y = 2 * box.y_ctr - pts[1].y; -} - -template -HOST_DEVICE_INLINE int get_intersection_points( - const Point (&pts1)[4], - const Point (&pts2)[4], - Point (&intersections)[24]) { - // Line vector - // A line from p1 to p2 is: p1 + (p2-p1)*t, t=[0,1] - Point vec1[4], vec2[4]; - for (int i = 0; i < 4; i++) { - vec1[i] = pts1[(i + 1) % 4] - pts1[i]; - vec2[i] = pts2[(i + 1) % 4] - pts2[i]; - } - - // When computing the intersection area, it doesn't hurt if we have - // more (duplicated/approximate) intersections/vertices than needed, - // while it can cause drastic difference if we miss an intersection/vertex. - // Therefore, we add an epsilon to relax the comparisons between - // the float point numbers that decide the intersection points. - double EPS = 1e-5; - - // Line test - test all line combos for intersection - int num = 0; // number of intersections - for (int i = 0; i < 4; i++) { - for (int j = 0; j < 4; j++) { - // Solve for 2x2 Ax=b - T det = cross_2d(vec2[j], vec1[i]); - - // This takes care of parallel lines - if (fabs(det) <= 1e-14) { - continue; - } - - auto vec12 = pts2[j] - pts1[i]; - - T t1 = cross_2d(vec2[j], vec12) / det; - T t2 = cross_2d(vec1[i], vec12) / det; - - if (t1 > -EPS && t1 < 1.0f + EPS && t2 > -EPS && t2 < 1.0f + EPS) { - intersections[num++] = pts1[i] + vec1[i] * t1; - } - } - } - - // Check for vertices of rect1 inside rect2 - { - const auto& AB = vec2[0]; - const auto& DA = vec2[3]; - auto ABdotAB = dot_2d(AB, AB); - auto ADdotAD = dot_2d(DA, DA); - for (int i = 0; i < 4; i++) { - // assume ABCD is the rectangle, and P is the point to be judged - // P is inside ABCD iff. P's projection on AB lies within AB - // and P's projection on AD lies within AD - - auto AP = pts1[i] - pts2[0]; - - auto APdotAB = dot_2d(AP, AB); - auto APdotAD = -dot_2d(AP, DA); - - if ((APdotAB > -EPS) && (APdotAD > -EPS) && (APdotAB < ABdotAB + EPS) && - (APdotAD < ADdotAD + EPS)) { - intersections[num++] = pts1[i]; - } - } - } - - // Reverse the check - check for vertices of rect2 inside rect1 - { - const auto& AB = vec1[0]; - const auto& DA = vec1[3]; - auto ABdotAB = dot_2d(AB, AB); - auto ADdotAD = dot_2d(DA, DA); - for (int i = 0; i < 4; i++) { - auto AP = pts2[i] - pts1[0]; - - auto APdotAB = dot_2d(AP, AB); - auto APdotAD = -dot_2d(AP, DA); - - if ((APdotAB > -EPS) && (APdotAD > -EPS) && (APdotAB < ABdotAB + EPS) && - (APdotAD < ADdotAD + EPS)) { - intersections[num++] = pts2[i]; - } - } - } - - return num; -} - -template -HOST_DEVICE_INLINE int convex_hull_graham( - const Point (&p)[24], - const int& num_in, - Point (&q)[24], - bool shift_to_zero = false) { - assert(num_in >= 2); - - // Step 1: - // Find point with minimum y - // if more than 1 points have the same minimum y, - // pick the one with the minimum x. - int t = 0; - for (int i = 1; i < num_in; i++) { - if (p[i].y < p[t].y || (p[i].y == p[t].y && p[i].x < p[t].x)) { - t = i; - } - } - auto& start = p[t]; // starting point - - // Step 2: - // Subtract starting point from every points (for sorting in the next step) - for (int i = 0; i < num_in; i++) { - q[i] = p[i] - start; - } - - // Swap the starting point to position 0 - auto tmp = q[0]; - q[0] = q[t]; - q[t] = tmp; - - // Step 3: - // Sort point 1 ~ num_in according to their relative cross-product values - // (essentially sorting according to angles) - // If the angles are the same, sort according to their distance to origin - T dist[24]; -#if defined(__CUDACC__) || __HCC__ == 1 || __HIP__ == 1 - // compute distance to origin before sort, and sort them together with the - // points - for (int i = 0; i < num_in; i++) { - dist[i] = dot_2d(q[i], q[i]); - } - - // CUDA version - // In the future, we can potentially use thrust - // for sorting here to improve speed (though not guaranteed) - for (int i = 1; i < num_in - 1; i++) { - for (int j = i + 1; j < num_in; j++) { - T crossProduct = cross_2d(q[i], q[j]); - if ((crossProduct < -1e-6) || - (fabs(crossProduct) < 1e-6 && dist[i] > dist[j])) { - auto q_tmp = q[i]; - q[i] = q[j]; - q[j] = q_tmp; - auto dist_tmp = dist[i]; - dist[i] = dist[j]; - dist[j] = dist_tmp; - } - } - } -#else - // CPU version - std::sort( - q + 1, q + num_in, [](const Point& A, const Point& B) -> bool { - T temp = cross_2d(A, B); - if (fabs(temp) < 1e-6) { - return dot_2d(A, A) < dot_2d(B, B); - } else { - return temp > 0; - } - }); - // compute distance to origin after sort, since the points are now different. - for (int i = 0; i < num_in; i++) { - dist[i] = dot_2d(q[i], q[i]); - } -#endif - - // Step 4: - // Make sure there are at least 2 points (that don't overlap with each other) - // in the stack - int k; // index of the non-overlapped second point - for (k = 1; k < num_in; k++) { - if (dist[k] > 1e-8) { - break; - } - } - if (k == num_in) { - // We reach the end, which means the convex hull is just one point - q[0] = p[t]; - return 1; - } - q[1] = q[k]; - int m = 2; // 2 points in the stack - // Step 5: - // Finally we can start the scanning process. - // When a non-convex relationship between the 3 points is found - // (either concave shape or duplicated points), - // we pop the previous point from the stack - // until the 3-point relationship is convex again, or - // until the stack only contains two points - for (int i = k + 1; i < num_in; i++) { - while (m > 1) { - auto q1 = q[i] - q[m - 2], q2 = q[m - 1] - q[m - 2]; - // cross_2d() uses FMA and therefore computes round(round(q1.x*q2.y) - - // q2.x*q1.y) So it may not return 0 even when q1==q2. Therefore we - // compare round(q1.x*q2.y) and round(q2.x*q1.y) directly. (round means - // round to nearest floating point). - if (q1.x * q2.y >= q2.x * q1.y) - m--; - else - break; - } - // Using double also helps, but float can solve the issue for now. - // while (m > 1 && cross_2d(q[i] - q[m - 2], q[m - 1] - q[m - 2]) - // >= 0) { - // m--; - // } - q[m++] = q[i]; - } - - // Step 6 (Optional): - // In general sense we need the original coordinates, so we - // need to shift the points back (reverting Step 2) - // But if we're only interested in getting the area/perimeter of the shape - // We can simply return. - if (!shift_to_zero) { - for (int i = 0; i < m; i++) { - q[i] += start; - } - } - - return m; -} - -template -HOST_DEVICE_INLINE T polygon_area(const Point (&q)[24], const int& m) { - if (m <= 2) { - return 0; - } - - T area = 0; - for (int i = 1; i < m - 1; i++) { - area += fabs(cross_2d(q[i] - q[0], q[i + 1] - q[0])); - } - - return area / 2.0; -} - -template -HOST_DEVICE_INLINE T rotated_boxes_intersection( - const RotatedBox& box1, - const RotatedBox& box2) { - // There are up to 4 x 4 + 4 + 4 = 24 intersections (including dups) returned - // from rotated_rect_intersection_pts - Point intersectPts[24], orderedPts[24]; - - Point pts1[4]; - Point pts2[4]; - get_rotated_vertices(box1, pts1); - get_rotated_vertices(box2, pts2); - - int num = get_intersection_points(pts1, pts2, intersectPts); - - if (num <= 2) { - return 0.0; - } - - // Convex Hull to order the intersection points in clockwise order and find - // the contour area. - int num_convex = convex_hull_graham(intersectPts, num, orderedPts, true); - return polygon_area(orderedPts, num_convex); -} - -} // namespace - -template -HOST_DEVICE_INLINE T -single_box_iou_rotated(T const* const box1_raw, T const* const box2_raw) { - // shift center to the middle point to achieve higher precision in result - RotatedBox box1, box2; - auto center_shift_x = (box1_raw[0] + box2_raw[0]) / 2.0; - auto center_shift_y = (box1_raw[1] + box2_raw[1]) / 2.0; - box1.x_ctr = box1_raw[0] - center_shift_x; - box1.y_ctr = box1_raw[1] - center_shift_y; - box1.w = box1_raw[2]; - box1.h = box1_raw[3]; - box1.a = box1_raw[4]; - box2.x_ctr = box2_raw[0] - center_shift_x; - box2.y_ctr = box2_raw[1] - center_shift_y; - box2.w = box2_raw[2]; - box2.h = box2_raw[3]; - box2.a = box2_raw[4]; - - T area1 = box1.w * box1.h; - T area2 = box2.w * box2.h; - if (area1 < 1e-14 || area2 < 1e-14) { - return 0.f; - } - - T intersection = rotated_boxes_intersection(box1, box2); - T iou = intersection / (area1 + area2 - intersection); - return iou; -} - -} // namespace detectron2 diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/layers/csrc/cocoeval/cocoeval.cpp b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/layers/csrc/cocoeval/cocoeval.cpp deleted file mode 100644 index 0a5b7b907c06720fefc77b0dfd921b8ec3ecf2be..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/layers/csrc/cocoeval/cocoeval.cpp +++ /dev/null @@ -1,507 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. -#include "cocoeval.h" -#include -#include -#include -#include - -using namespace pybind11::literals; - -namespace detectron2 { - -namespace COCOeval { - -// Sort detections from highest score to lowest, such that -// detection_instances[detection_sorted_indices[t]] >= -// detection_instances[detection_sorted_indices[t+1]]. Use stable_sort to match -// original COCO API -void SortInstancesByDetectionScore( - const std::vector& detection_instances, - std::vector* detection_sorted_indices) { - detection_sorted_indices->resize(detection_instances.size()); - std::iota( - detection_sorted_indices->begin(), detection_sorted_indices->end(), 0); - std::stable_sort( - detection_sorted_indices->begin(), - detection_sorted_indices->end(), - [&detection_instances](size_t j1, size_t j2) { - return detection_instances[j1].score > detection_instances[j2].score; - }); -} - -// Partition the ground truth objects based on whether or not to ignore them -// based on area -void SortInstancesByIgnore( - const std::array& area_range, - const std::vector& ground_truth_instances, - std::vector* ground_truth_sorted_indices, - std::vector* ignores) { - ignores->clear(); - ignores->reserve(ground_truth_instances.size()); - for (auto o : ground_truth_instances) { - ignores->push_back( - o.ignore || o.area < area_range[0] || o.area > area_range[1]); - } - - ground_truth_sorted_indices->resize(ground_truth_instances.size()); - std::iota( - ground_truth_sorted_indices->begin(), - ground_truth_sorted_indices->end(), - 0); - std::stable_sort( - ground_truth_sorted_indices->begin(), - ground_truth_sorted_indices->end(), - [&ignores](size_t j1, size_t j2) { - return (int)(*ignores)[j1] < (int)(*ignores)[j2]; - }); -} - -// For each IOU threshold, greedily match each detected instance to a ground -// truth instance (if possible) and store the results -void MatchDetectionsToGroundTruth( - const std::vector& detection_instances, - const std::vector& detection_sorted_indices, - const std::vector& ground_truth_instances, - const std::vector& ground_truth_sorted_indices, - const std::vector& ignores, - const std::vector>& ious, - const std::vector& iou_thresholds, - const std::array& area_range, - ImageEvaluation* results) { - // Initialize memory to store return data matches and ignore - const int num_iou_thresholds = iou_thresholds.size(); - const int num_ground_truth = ground_truth_sorted_indices.size(); - const int num_detections = detection_sorted_indices.size(); - std::vector ground_truth_matches( - num_iou_thresholds * num_ground_truth, 0); - std::vector& detection_matches = results->detection_matches; - std::vector& detection_ignores = results->detection_ignores; - std::vector& ground_truth_ignores = results->ground_truth_ignores; - detection_matches.resize(num_iou_thresholds * num_detections, 0); - detection_ignores.resize(num_iou_thresholds * num_detections, false); - ground_truth_ignores.resize(num_ground_truth); - for (auto g = 0; g < num_ground_truth; ++g) { - ground_truth_ignores[g] = ignores[ground_truth_sorted_indices[g]]; - } - - for (auto t = 0; t < num_iou_thresholds; ++t) { - for (auto d = 0; d < num_detections; ++d) { - // information about best match so far (match=-1 -> unmatched) - double best_iou = std::min(iou_thresholds[t], 1 - 1e-10); - int match = -1; - for (auto g = 0; g < num_ground_truth; ++g) { - // if this ground truth instance is already matched and not a - // crowd, it cannot be matched to another detection - if (ground_truth_matches[t * num_ground_truth + g] > 0 && - !ground_truth_instances[ground_truth_sorted_indices[g]].is_crowd) { - continue; - } - - // if detected instance matched to a regular ground truth - // instance, we can break on the first ground truth instance - // tagged as ignore (because they are sorted by the ignore tag) - if (match >= 0 && !ground_truth_ignores[match] && - ground_truth_ignores[g]) { - break; - } - - // if IOU overlap is the best so far, store the match appropriately - if (ious[d][ground_truth_sorted_indices[g]] >= best_iou) { - best_iou = ious[d][ground_truth_sorted_indices[g]]; - match = g; - } - } - // if match was made, store id of match for both detection and - // ground truth - if (match >= 0) { - detection_ignores[t * num_detections + d] = ground_truth_ignores[match]; - detection_matches[t * num_detections + d] = - ground_truth_instances[ground_truth_sorted_indices[match]].id; - ground_truth_matches[t * num_ground_truth + match] = - detection_instances[detection_sorted_indices[d]].id; - } - - // set unmatched detections outside of area range to ignore - const InstanceAnnotation& detection = - detection_instances[detection_sorted_indices[d]]; - detection_ignores[t * num_detections + d] = - detection_ignores[t * num_detections + d] || - (detection_matches[t * num_detections + d] == 0 && - (detection.area < area_range[0] || detection.area > area_range[1])); - } - } - - // store detection score results - results->detection_scores.resize(detection_sorted_indices.size()); - for (size_t d = 0; d < detection_sorted_indices.size(); ++d) { - results->detection_scores[d] = - detection_instances[detection_sorted_indices[d]].score; - } -} - -std::vector EvaluateImages( - const std::vector>& area_ranges, - int max_detections, - const std::vector& iou_thresholds, - const ImageCategoryInstances>& image_category_ious, - const ImageCategoryInstances& - image_category_ground_truth_instances, - const ImageCategoryInstances& - image_category_detection_instances) { - const int num_area_ranges = area_ranges.size(); - const int num_images = image_category_ground_truth_instances.size(); - const int num_categories = - image_category_ious.size() > 0 ? image_category_ious[0].size() : 0; - std::vector detection_sorted_indices; - std::vector ground_truth_sorted_indices; - std::vector ignores; - std::vector results_all( - num_images * num_area_ranges * num_categories); - - // Store results for each image, category, and area range combination. Results - // for each IOU threshold are packed into the same ImageEvaluation object - for (auto i = 0; i < num_images; ++i) { - for (auto c = 0; c < num_categories; ++c) { - const std::vector& ground_truth_instances = - image_category_ground_truth_instances[i][c]; - const std::vector& detection_instances = - image_category_detection_instances[i][c]; - - SortInstancesByDetectionScore( - detection_instances, &detection_sorted_indices); - if ((int)detection_sorted_indices.size() > max_detections) { - detection_sorted_indices.resize(max_detections); - } - - for (size_t a = 0; a < area_ranges.size(); ++a) { - SortInstancesByIgnore( - area_ranges[a], - ground_truth_instances, - &ground_truth_sorted_indices, - &ignores); - - MatchDetectionsToGroundTruth( - detection_instances, - detection_sorted_indices, - ground_truth_instances, - ground_truth_sorted_indices, - ignores, - image_category_ious[i][c], - iou_thresholds, - area_ranges[a], - &results_all - [c * num_area_ranges * num_images + a * num_images + i]); - } - } - } - - return results_all; -} - -// Convert a python list to a vector -template -std::vector list_to_vec(const py::list& l) { - std::vector v(py::len(l)); - for (int i = 0; i < (int)py::len(l); ++i) { - v[i] = l[i].cast(); - } - return v; -} - -// Helper function to Accumulate() -// Considers the evaluation results applicable to a particular category, area -// range, and max_detections parameter setting, which begin at -// evaluations[evaluation_index]. Extracts a sorted list of length n of all -// applicable detection instances concatenated across all images in the dataset, -// which are represented by the outputs evaluation_indices, detection_scores, -// image_detection_indices, and detection_sorted_indices--all of which are -// length n. evaluation_indices[i] stores the applicable index into -// evaluations[] for instance i, which has detection score detection_score[i], -// and is the image_detection_indices[i]'th of the list of detections -// for the image containing i. detection_sorted_indices[] defines a sorted -// permutation of the 3 other outputs -int BuildSortedDetectionList( - const std::vector& evaluations, - const int64_t evaluation_index, - const int64_t num_images, - const int max_detections, - std::vector* evaluation_indices, - std::vector* detection_scores, - std::vector* detection_sorted_indices, - std::vector* image_detection_indices) { - assert(evaluations.size() >= evaluation_index + num_images); - - // Extract a list of object instances of the applicable category, area - // range, and max detections requirements such that they can be sorted - image_detection_indices->clear(); - evaluation_indices->clear(); - detection_scores->clear(); - image_detection_indices->reserve(num_images * max_detections); - evaluation_indices->reserve(num_images * max_detections); - detection_scores->reserve(num_images * max_detections); - int num_valid_ground_truth = 0; - for (auto i = 0; i < num_images; ++i) { - const ImageEvaluation& evaluation = evaluations[evaluation_index + i]; - - for (int d = 0; - d < (int)evaluation.detection_scores.size() && d < max_detections; - ++d) { // detected instances - evaluation_indices->push_back(evaluation_index + i); - image_detection_indices->push_back(d); - detection_scores->push_back(evaluation.detection_scores[d]); - } - for (auto ground_truth_ignore : evaluation.ground_truth_ignores) { - if (!ground_truth_ignore) { - ++num_valid_ground_truth; - } - } - } - - // Sort detections by decreasing score, using stable sort to match - // python implementation - detection_sorted_indices->resize(detection_scores->size()); - std::iota( - detection_sorted_indices->begin(), detection_sorted_indices->end(), 0); - std::stable_sort( - detection_sorted_indices->begin(), - detection_sorted_indices->end(), - [&detection_scores](size_t j1, size_t j2) { - return (*detection_scores)[j1] > (*detection_scores)[j2]; - }); - - return num_valid_ground_truth; -} - -// Helper function to Accumulate() -// Compute a precision recall curve given a sorted list of detected instances -// encoded in evaluations, evaluation_indices, detection_scores, -// detection_sorted_indices, image_detection_indices (see -// BuildSortedDetectionList()). Using vectors precisions and recalls -// and temporary storage, output the results into precisions_out, recalls_out, -// and scores_out, which are large buffers containing many precion/recall curves -// for all possible parameter settings, with precisions_out_index and -// recalls_out_index defining the applicable indices to store results. -void ComputePrecisionRecallCurve( - const int64_t precisions_out_index, - const int64_t precisions_out_stride, - const int64_t recalls_out_index, - const std::vector& recall_thresholds, - const int iou_threshold_index, - const int num_iou_thresholds, - const int num_valid_ground_truth, - const std::vector& evaluations, - const std::vector& evaluation_indices, - const std::vector& detection_scores, - const std::vector& detection_sorted_indices, - const std::vector& image_detection_indices, - std::vector* precisions, - std::vector* recalls, - std::vector* precisions_out, - std::vector* scores_out, - std::vector* recalls_out) { - assert(recalls_out->size() > recalls_out_index); - - // Compute precision/recall for each instance in the sorted list of detections - int64_t true_positives_sum = 0, false_positives_sum = 0; - precisions->clear(); - recalls->clear(); - precisions->reserve(detection_sorted_indices.size()); - recalls->reserve(detection_sorted_indices.size()); - assert(!evaluations.empty() || detection_sorted_indices.empty()); - for (auto detection_sorted_index : detection_sorted_indices) { - const ImageEvaluation& evaluation = - evaluations[evaluation_indices[detection_sorted_index]]; - const auto num_detections = - evaluation.detection_matches.size() / num_iou_thresholds; - const auto detection_index = iou_threshold_index * num_detections + - image_detection_indices[detection_sorted_index]; - assert(evaluation.detection_matches.size() > detection_index); - assert(evaluation.detection_ignores.size() > detection_index); - const int64_t detection_match = - evaluation.detection_matches[detection_index]; - const bool detection_ignores = - evaluation.detection_ignores[detection_index]; - const auto true_positive = detection_match > 0 && !detection_ignores; - const auto false_positive = detection_match == 0 && !detection_ignores; - if (true_positive) { - ++true_positives_sum; - } - if (false_positive) { - ++false_positives_sum; - } - - const double recall = - static_cast(true_positives_sum) / num_valid_ground_truth; - recalls->push_back(recall); - const int64_t num_valid_detections = - true_positives_sum + false_positives_sum; - const double precision = num_valid_detections > 0 - ? static_cast(true_positives_sum) / num_valid_detections - : 0.0; - precisions->push_back(precision); - } - - (*recalls_out)[recalls_out_index] = !recalls->empty() ? recalls->back() : 0; - - for (int64_t i = static_cast(precisions->size()) - 1; i > 0; --i) { - if ((*precisions)[i] > (*precisions)[i - 1]) { - (*precisions)[i - 1] = (*precisions)[i]; - } - } - - // Sample the per instance precision/recall list at each recall threshold - for (size_t r = 0; r < recall_thresholds.size(); ++r) { - // first index in recalls >= recall_thresholds[r] - std::vector::iterator low = std::lower_bound( - recalls->begin(), recalls->end(), recall_thresholds[r]); - size_t precisions_index = low - recalls->begin(); - - const auto results_ind = precisions_out_index + r * precisions_out_stride; - assert(results_ind < precisions_out->size()); - assert(results_ind < scores_out->size()); - if (precisions_index < precisions->size()) { - (*precisions_out)[results_ind] = (*precisions)[precisions_index]; - (*scores_out)[results_ind] = - detection_scores[detection_sorted_indices[precisions_index]]; - } else { - (*precisions_out)[results_ind] = 0; - (*scores_out)[results_ind] = 0; - } - } -} -py::dict Accumulate( - const py::object& params, - const std::vector& evaluations) { - const std::vector recall_thresholds = - list_to_vec(params.attr("recThrs")); - const std::vector max_detections = - list_to_vec(params.attr("maxDets")); - const int num_iou_thresholds = py::len(params.attr("iouThrs")); - const int num_recall_thresholds = py::len(params.attr("recThrs")); - const int num_categories = params.attr("useCats").cast() == 1 - ? py::len(params.attr("catIds")) - : 1; - const int num_area_ranges = py::len(params.attr("areaRng")); - const int num_max_detections = py::len(params.attr("maxDets")); - const int num_images = py::len(params.attr("imgIds")); - - std::vector precisions_out( - num_iou_thresholds * num_recall_thresholds * num_categories * - num_area_ranges * num_max_detections, - -1); - std::vector recalls_out( - num_iou_thresholds * num_categories * num_area_ranges * - num_max_detections, - -1); - std::vector scores_out( - num_iou_thresholds * num_recall_thresholds * num_categories * - num_area_ranges * num_max_detections, - -1); - - // Consider the list of all detected instances in the entire dataset in one - // large list. evaluation_indices, detection_scores, - // image_detection_indices, and detection_sorted_indices all have the same - // length as this list, such that each entry corresponds to one detected - // instance - std::vector evaluation_indices; // indices into evaluations[] - std::vector detection_scores; // detection scores of each instance - std::vector detection_sorted_indices; // sorted indices of all - // instances in the dataset - std::vector - image_detection_indices; // indices into the list of detected instances in - // the same image as each instance - std::vector precisions, recalls; - - for (auto c = 0; c < num_categories; ++c) { - for (auto a = 0; a < num_area_ranges; ++a) { - for (auto m = 0; m < num_max_detections; ++m) { - // The COCO PythonAPI assumes evaluations[] (the return value of - // COCOeval::EvaluateImages() is one long list storing results for each - // combination of category, area range, and image id, with categories in - // the outermost loop and images in the innermost loop. - const int64_t evaluations_index = - c * num_area_ranges * num_images + a * num_images; - int num_valid_ground_truth = BuildSortedDetectionList( - evaluations, - evaluations_index, - num_images, - max_detections[m], - &evaluation_indices, - &detection_scores, - &detection_sorted_indices, - &image_detection_indices); - - if (num_valid_ground_truth == 0) { - continue; - } - - for (auto t = 0; t < num_iou_thresholds; ++t) { - // recalls_out is a flattened vectors representing a - // num_iou_thresholds X num_categories X num_area_ranges X - // num_max_detections matrix - const int64_t recalls_out_index = - t * num_categories * num_area_ranges * num_max_detections + - c * num_area_ranges * num_max_detections + - a * num_max_detections + m; - - // precisions_out and scores_out are flattened vectors - // representing a num_iou_thresholds X num_recall_thresholds X - // num_categories X num_area_ranges X num_max_detections matrix - const int64_t precisions_out_stride = - num_categories * num_area_ranges * num_max_detections; - const int64_t precisions_out_index = t * num_recall_thresholds * - num_categories * num_area_ranges * num_max_detections + - c * num_area_ranges * num_max_detections + - a * num_max_detections + m; - - ComputePrecisionRecallCurve( - precisions_out_index, - precisions_out_stride, - recalls_out_index, - recall_thresholds, - t, - num_iou_thresholds, - num_valid_ground_truth, - evaluations, - evaluation_indices, - detection_scores, - detection_sorted_indices, - image_detection_indices, - &precisions, - &recalls, - &precisions_out, - &scores_out, - &recalls_out); - } - } - } - } - - time_t rawtime; - struct tm local_time; - std::array buffer; - time(&rawtime); -#ifdef _WIN32 - localtime_s(&local_time, &rawtime); -#else - localtime_r(&rawtime, &local_time); -#endif - strftime( - buffer.data(), 200, "%Y-%m-%d %H:%num_max_detections:%S", &local_time); - return py::dict( - "params"_a = params, - "counts"_a = std::vector( - {num_iou_thresholds, - num_recall_thresholds, - num_categories, - num_area_ranges, - num_max_detections}), - "date"_a = buffer, - "precision"_a = precisions_out, - "recall"_a = recalls_out, - "scores"_a = scores_out); -} - -} // namespace COCOeval - -} // namespace detectron2 diff --git a/spaces/younker/chatgpt-turbo/client/node_modules/autoprefixer/lib/old-value.js b/spaces/younker/chatgpt-turbo/client/node_modules/autoprefixer/lib/old-value.js deleted file mode 100644 index 63a2643864e4f6304de24f77624cbdff24974cf9..0000000000000000000000000000000000000000 --- a/spaces/younker/chatgpt-turbo/client/node_modules/autoprefixer/lib/old-value.js +++ /dev/null @@ -1,22 +0,0 @@ -let utils = require('./utils') - -class OldValue { - constructor(unprefixed, prefixed, string, regexp) { - this.unprefixed = unprefixed - this.prefixed = prefixed - this.string = string || prefixed - this.regexp = regexp || utils.regexp(prefixed) - } - - /** - * Check, that value contain old value - */ - check(value) { - if (value.includes(this.string)) { - return !!value.match(this.regexp) - } - return false - } -} - -module.exports = OldValue diff --git a/spaces/yuangongfdu/LTU-Compare/README.md b/spaces/yuangongfdu/LTU-Compare/README.md deleted file mode 100644 index abe6e7000a31cb61a2771a03a0a725df7a6858b3..0000000000000000000000000000000000000000 --- a/spaces/yuangongfdu/LTU-Compare/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: LTU Compare -emoji: 🔥 -colorFrom: purple -colorTo: indigo -sdk: gradio -sdk_version: 3.45.2 -app_file: app.py -pinned: false -license: cc-by-4.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ywqisok/ysyy/modules.py b/spaces/ywqisok/ysyy/modules.py deleted file mode 100644 index 56ea4145eddf19dd330a3a41ab0183efc1686d83..0000000000000000000000000000000000000000 --- a/spaces/ywqisok/ysyy/modules.py +++ /dev/null @@ -1,388 +0,0 @@ -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -import commons -from commons import init_weights, get_padding -from transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential( - nn.ReLU(), - nn.Dropout(p_dropout)) - for _ in range(n_layers-1): - self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size ** i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, - groups=channels, dilation=dilation, padding=padding - )) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): - super(WN, self).__init__() - assert(kernel_size % 2 == 1) - self.hidden_channels =hidden_channels - self.kernel_size = kernel_size, - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') - - for i in range(n_layers): - dilation = dilation_rate ** i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size, - dilation=dilation, padding=padding) - in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply( - x_in, - g_l, - n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:,:self.hidden_channels,:] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:,self.hidden_channels:,:] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels,1)) - self.logs = nn.Parameter(torch.zeros(channels,1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1,2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels]*2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1,2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - -class ConvFlow(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) - self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_derivatives = h[..., 2 * self.num_bins:] - - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1,2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/zhang-wei-jian/docker/node_modules/simple-update-notifier/node_modules/semver/functions/compare.js b/spaces/zhang-wei-jian/docker/node_modules/simple-update-notifier/node_modules/semver/functions/compare.js deleted file mode 100644 index 748b7afa514a9f356b3f180a34e9150df3777ecd..0000000000000000000000000000000000000000 --- a/spaces/zhang-wei-jian/docker/node_modules/simple-update-notifier/node_modules/semver/functions/compare.js +++ /dev/null @@ -1,5 +0,0 @@ -const SemVer = require('../classes/semver') -const compare = (a, b, loose) => - new SemVer(a, loose).compare(new SemVer(b, loose)) - -module.exports = compare diff --git a/spaces/zhoupin30/zhoupin30/src/components/ui/icons.tsx b/spaces/zhoupin30/zhoupin30/src/components/ui/icons.tsx deleted file mode 100644 index 742b489b50437c5b64c86082f2ebc712eeb6a2b0..0000000000000000000000000000000000000000 --- a/spaces/zhoupin30/zhoupin30/src/components/ui/icons.tsx +++ /dev/null @@ -1,504 +0,0 @@ -'use client' - -import * as React from 'react' - -import { cn } from '@/lib/utils' - -function IconNextChat({ - className, - inverted, - ...props -}: React.ComponentProps<'svg'> & { inverted?: boolean }) { - const id = React.useId() - - return ( - - - - - - - - - - - - - - - - - - - - - - ) -} - -function IconOpenAI({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - OpenAI icon - - - ) -} - -function IconGitHub({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - GitHub - - - ) -} - -function IconSeparator({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - ) -} - -function IconArrowDown({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconArrowRight({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconUser({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconPlus({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconArrowElbow({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconSpinner({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconMessage({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconTrash({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconMore({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconRefresh({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconStop({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconSidebar({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconMoon({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconSun({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconCopy({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconCheck({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconDownload({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconClose({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconEdit({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconShare({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconUsers({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconExternalLink({ - className, - ...props -}: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconChevronUpDown({ - className, - ...props -}: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -export { - IconEdit, - IconNextChat, - IconOpenAI, - IconGitHub, - IconSeparator, - IconArrowDown, - IconArrowRight, - IconUser, - IconPlus, - IconArrowElbow, - IconSpinner, - IconMessage, - IconTrash, - IconMore, - IconRefresh, - IconStop, - IconSidebar, - IconMoon, - IconSun, - IconCopy, - IconCheck, - IconDownload, - IconClose, - IconShare, - IconUsers, - IconExternalLink, - IconChevronUpDown -} diff --git a/spaces/zixian/Zhenhuan-VITS/text/__init__.py b/spaces/zixian/Zhenhuan-VITS/text/__init__.py deleted file mode 100644 index 11e5586c347c3071a9d1aca0425d112f45402e85..0000000000000000000000000000000000000000 --- a/spaces/zixian/Zhenhuan-VITS/text/__init__.py +++ /dev/null @@ -1,60 +0,0 @@ -""" from https://github.com/keithito/tacotron """ -from text import cleaners -from text.symbols import symbols - - -# Mappings from symbol to numeric ID and vice versa: -_symbol_to_id = {s: i for i, s in enumerate(symbols)} -_id_to_symbol = {i: s for i, s in enumerate(symbols)} - - -def text_to_sequence(text, symbols, cleaner_names): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - cleaner_names: names of the cleaner functions to run the text through - Returns: - List of integers corresponding to the symbols in the text - ''' - sequence = [] - symbol_to_id = {s: i for i, s in enumerate(symbols)} - clean_text = _clean_text(text, cleaner_names) - print(clean_text) - print(f" length:{len(clean_text)}") - for symbol in clean_text: - if symbol not in symbol_to_id.keys(): - continue - symbol_id = symbol_to_id[symbol] - sequence += [symbol_id] - print(f" length:{len(sequence)}") - return sequence - - -def cleaned_text_to_sequence(cleaned_text, symbols): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - Returns: - List of integers corresponding to the symbols in the text - ''' - symbol_to_id = {s: i for i, s in enumerate(symbols)} - sequence = [symbol_to_id[symbol] for symbol in cleaned_text if symbol in symbol_to_id.keys()] - return sequence - - -def sequence_to_text(sequence): - '''Converts a sequence of IDs back to a string''' - result = '' - for symbol_id in sequence: - s = _id_to_symbol[symbol_id] - result += s - return result - - -def _clean_text(text, cleaner_names): - for name in cleaner_names: - cleaner = getattr(cleaners, name) - if not cleaner: - raise Exception('Unknown cleaner: %s' % name) - text = cleaner(text) - return text diff --git a/spaces/zomehwh/sovits-tannhauser/cluster/__init__.py b/spaces/zomehwh/sovits-tannhauser/cluster/__init__.py deleted file mode 100644 index f1b9bde04e73e9218a5d534227caa4c25332f424..0000000000000000000000000000000000000000 --- a/spaces/zomehwh/sovits-tannhauser/cluster/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -import numpy as np -import torch -from sklearn.cluster import KMeans - -def get_cluster_model(ckpt_path): - checkpoint = torch.load(ckpt_path) - kmeans_dict = {} - for spk, ckpt in checkpoint.items(): - km = KMeans(ckpt["n_features_in_"]) - km.__dict__["n_features_in_"] = ckpt["n_features_in_"] - km.__dict__["_n_threads"] = ckpt["_n_threads"] - km.__dict__["cluster_centers_"] = ckpt["cluster_centers_"] - kmeans_dict[spk] = km - return kmeans_dict - -def get_cluster_result(model, x, speaker): - """ - x: np.array [t, 256] - return cluster class result - """ - return model[speaker].predict(x) - -def get_cluster_center_result(model, x,speaker): - """x: np.array [t, 256]""" - predict = model[speaker].predict(x) - return model[speaker].cluster_centers_[predict] - -def get_center(model, x,speaker): - return model[speaker].cluster_centers_[x]