diff --git a/README.md b/README.md deleted file mode 100644 index 4126ed855606c38ab5861f8bc3ff8d6949156f77..0000000000000000000000000000000000000000 --- a/README.md +++ /dev/null @@ -1,202 +0,0 @@ ---- -configs: -- config_name: default - data_files: - spaces.csv - -license: other -language: -- code -size_categories: -- 100K 0: - try: - try: - a[i['id']] = {'sdk': i['sdk'], 'license': i['cardData']['license'], 'likes': i['likes']} - except KeyError: - a[i['id']] = {'sdk': i['sdk'], 'license': None, 'likes': i['likes']} - except: - a[i['id']] = {'sdk': "Configuration error", 'license': "Configuration error", 'likes': i['likes']} - -data_list = [{'repository': key, 'sdk': value['sdk'], 'license': value['license'], 'likes': value['likes']} for key, value in a.items()] - -df = pd.DataFrame(data_list) -``` - -3. Cloned spaces locally. - -```python -from huggingface_hub import snapshot_download - -programming = ['.asm', '.bat', '.cmd', '.c', '.h', '.cs', '.cpp', '.hpp', '.c++', '.h++', '.cc', '.hh', '.C', '.H', '.cmake', '.css', '.dockerfile', 'Dockerfile', '.f90', '.f', '.f03', '.f08', '.f77', '.f95', '.for', '.fpp', '.go', '.hs', '.html', '.java', '.js', '.jl', '.lua', 'Makefile', '.md', '.markdown', '.php', '.php3', '.php4', '.php5', '.phps', '.phpt', '.pl', '.pm', '.pod', '.perl', '.ps1', '.psd1', '.psm1', '.py', '.rb', '.rs', '.sql', '.scala', '.sh', '.bash', '.command', '.zsh', '.ts', '.tsx', '.tex', '.vb'] -pattern = [f"*{i}" for i in programming] - -for i in repos: - snapshot_download(i, repo_type="space", local_dir=f"spaces/{i}", allow_patterns=pattern) -```` - -4. Processed the data to derive statistics. \ No newline at end of file diff --git a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/gptworldAi/__init__.py b/spaces/101-5/gpt4free/g4f/.v1/gpt4free/gptworldAi/__init__.py deleted file mode 100644 index e7f76c61209fabf224698949764155ac53cc7a6b..0000000000000000000000000000000000000000 --- a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/gptworldAi/__init__.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- -""" -@Time : 2023/5/23 13:37 -@Auth : Hp_mzx -@File :__init__.py.py -@IDE :PyCharm -""" -import json -import uuid -import random -import binascii -import requests -import Crypto.Cipher.AES as AES -from fake_useragent import UserAgent - -class ChatCompletion: - @staticmethod - def create(messages:[],proxy: str = None): - url = "https://chat.getgpt.world/api/chat/stream" - headers = { - "Content-Type": "application/json", - "Referer": "https://chat.getgpt.world/", - 'user-agent': UserAgent().random, - } - proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else None - data = json.dumps({ - "messages": messages, - "frequency_penalty": 0, - "max_tokens": 4000, - "model": "gpt-3.5-turbo", - "presence_penalty": 0, - "temperature": 1, - "top_p": 1, - "stream": True, - "uuid": str(uuid.uuid4()) - }) - signature = ChatCompletion.encrypt(data) - res = requests.post(url, headers=headers, data=json.dumps({"signature": signature}), proxies=proxies,stream=True) - for chunk in res.iter_content(chunk_size=None): - res.raise_for_status() - datas = chunk.decode('utf-8').split('data: ') - for data in datas: - if not data or "[DONE]" in data: - continue - data_json = json.loads(data) - content = data_json['choices'][0]['delta'].get('content') - if content: - yield content - - - @staticmethod - def random_token(e): - token = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" - n = len(token) - return "".join([token[random.randint(0, n - 1)] for i in range(e)]) - - @staticmethod - def encrypt(e): - t = ChatCompletion.random_token(16).encode('utf-8') - n = ChatCompletion.random_token(16).encode('utf-8') - r = e.encode('utf-8') - cipher = AES.new(t, AES.MODE_CBC, n) - ciphertext = cipher.encrypt(ChatCompletion.__pad_data(r)) - return binascii.hexlify(ciphertext).decode('utf-8') + t.decode('utf-8') + n.decode('utf-8') - - @staticmethod - def __pad_data(data: bytes) -> bytes: - block_size = AES.block_size - padding_size = block_size - len(data) % block_size - padding = bytes([padding_size] * padding_size) - return data + padding - - -class Completion: - @staticmethod - def create(prompt:str,proxy:str=None): - return ChatCompletion.create([ - { - "content": "You are ChatGPT, a large language model trained by OpenAI.\nCarefully heed the user's instructions. \nRespond using Markdown.", - "role": "system" - }, - {"role": "user", "content": prompt} - ], proxy) - - -if __name__ == '__main__': - # single completion - text = "" - for chunk in Completion.create("你是谁", "127.0.0.1:7890"): - text = text + chunk - print(chunk, end="", flush=True) - print() - - - #chat completion - message = [] - while True: - prompt = input("请输入问题:") - message.append({"role": "user","content": prompt}) - text = "" - for chunk in ChatCompletion.create(message,'127.0.0.1:7890'): - text = text+chunk - print(chunk, end="", flush=True) - print() - message.append({"role": "assistant", "content": text}) \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Dont Upgrade Your Mac Yet How to Use OBS Studio on Mac OS X 10.12.6.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Dont Upgrade Your Mac Yet How to Use OBS Studio on Mac OS X 10.12.6.md deleted file mode 100644 index 9ba304f2ccd35a74679d4dcf3c55a28a19d70f61..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Dont Upgrade Your Mac Yet How to Use OBS Studio on Mac OS X 10.12.6.md +++ /dev/null @@ -1,44 +0,0 @@ -
-# How to Download and Install OBS Studio on Mac OS X 10.12.6 - -OBS Studio is a free and open source software that allows you to record and stream video and audio from your computer. With OBS Studio, you can create professional-looking videos for live streaming, gaming, webinars, podcasts, and more. OBS Studio supports multiple sources, scenes, transitions, filters, and plugins that give you full control over your video production. - -But how can you download and install OBS Studio on Mac OS X 10.12.6? Is there a compatible version for this older operating system? In this article, we will show you how to get OBS Studio up and running on your Mac in a few simple steps. - -## Download OBS Studio for Mac OS X 10.12.6 - -The first thing you need to do is to download the OBS Studio installer for Mac OS X 10.12.6 from the official website. The latest version of OBS Studio requires Mac OS X 10.13 or later, but there is an older version (25.0.8) that works with Mac OS X 10.12.6. - -To download OBS Studio for Mac OS X 10.12.6, go to the [download page](https://obsproject.com/download) and scroll down to the "Older Versions" section. Click on the "Mac OS X" tab and look for the version 25.0.8. Click on the "Download Installer" button and save the file to your computer. - -## Install OBS Studio on Mac OS X 10.12.6 - -Once you have downloaded the OBS Studio installer for Mac OS X 10.12.6, you can proceed to install it on your computer. To install OBS Studio on Mac OS X 10.12.6, follow these steps: - -- Double-click on the downloaded file (obs-mac-25.0.8-installer.pkg) to launch the installer. -- Click on "Continue" and agree to the license agreement. -- Choose the destination folder for OBS Studio and click on "Install". -- Enter your administrator password if prompted and click on "Install Software". -- Wait for the installation to complete and click on "Close". - -## Launch OBS Studio on Mac OS X 10.12.6 - -Now that you have installed OBS Studio on your Mac, you can launch it and start using it for your video recording and streaming needs. To launch OBS Studio on Mac OS X 10.12.6, follow these steps: - -- Go to the Applications folder and look for the OBS icon. -- Double-click on the OBS icon to open the application. -- If you see a warning message saying that "OBS" can't be opened because it is from an unidentified developer, click on "Open Anyway". -- If you see a dialog box asking for permission to access your microphone or camera, click on "OK". -- You will see the main window of OBS Studio with a preview of your video source and some buttons and menus. -- You can now configure your settings, add sources and scenes, apply filters and transitions, and start recording or streaming. - -## Conclusion - -OBS Studio is a powerful and versatile software that can help you create high-quality videos for various purposes. Whether you want to stream live events, record gameplay, or make tutorials, OBS Studio can handle it all. - -However, if you have an older Mac with Mac OS X 10.12.6, you may encounter some compatibility issues with the latest version of OBS Studio. Fortunately, there is an older version of OBS Studio that works with Mac OS X 10.12.6 and can be downloaded and installed easily. - -By following this guide, you can download and install OBS Studio on Mac OS X 10.12.6 and start using it without any problems.

-

obs studio download mac 10.12.6


DOWNLOAD ✫✫✫ https://byltly.com/2uKwok



ddb901b051
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Football-Manager-2012-Patch- -v12.2.2-UPDATE-Skidrow Download and Install Guide.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Football-Manager-2012-Patch- -v12.2.2-UPDATE-Skidrow Download and Install Guide.md deleted file mode 100644 index 616dbdbf2ff0d82b4a0321b85e72e16309ccee4e..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Football-Manager-2012-Patch- -v12.2.2-UPDATE-Skidrow Download and Install Guide.md +++ /dev/null @@ -1,146 +0,0 @@ -
-

Football Manager 2012 Patch 12.2.2 Update Skidrow

-

If you are a fan of football management games, you probably have heard of Football Manager 2012, one of the most popular and realistic games in the genre. But did you know that there is a patch 12.2.2 update that adds new features and fixes bugs to the game? And did you know that you can download and install it for free from Skidrow, a group of hackers who crack and release games online? In this article, we will tell you everything you need to know about Football Manager 2012 patch 12.2.2 update Skidrow, including how to download and install it, what's new in it, and why you should try it.

-

Football-Manager-2012-Patch- -v12.2.2-UPDATE-Skidrow


DOWNLOADhttps://byltly.com/2uKxkW



-

Introduction

-

What is Football Manager 2012?

-

Football Manager 2012 is a football management simulation game developed by Sports Interactive and published by Sega in October 2011. It is the eighth game in the Football Manager series, and it allows you to take control of any club from over 50 countries across the world, as well as create your own custom club. You can manage every aspect of your club, from tactics and training to transfers and finances, as well as interact with players, staff, media, and fans. You can also compete with other managers online or offline in various modes and challenges.

-

What is the Patch 12.2.2 Update?

-

The patch 12.2.2 update is an official update released by Sports Interactive in March 2012 that fixes some bugs and errors in the game, as well as adds some new features and content. Some of the main changes include:

- -

What is Skidrow?

-

Skidrow is a group of hackers who crack and release games online for free. They are known for cracking games that have DRM (digital rights management) protection, such as Steam or Origin, which prevent users from playing games without buying them or having an internet connection. Skidrow has cracked many popular games, such as Assassin's Creed, Call of Duty, FIFA, Grand Theft Auto, and more. They usually release their cracks along with updates or patches for the games.

-

How to Download and Install the Patch 12.2.2 Update Skidrow

-

Requirements

-

Before you download and install the patch 12.0 update Skidrow, you need to have some requirements:

- -

Steps

-

Once you have all the requirements ready, you can follow these steps to download and install the patch 12.0 update Skidrow:

-

Download the Patch 12.0 Update Skidrow from a trusted source

-the patch 12.0 update Skidrow file from a trusted source online. You can use a torrent site (such as The Pirate Bay or Kickass Torrents) or a direct download site (such as Mega or Mediafire) to find and download the file.

-

The file size should be around 1 GB, and it should have a name like "Football.Manager.2010.Patch.v12.0-UPDATE-SKIDROW.rar" or something similar.

-

Extract the files to your Football Manager 2010 folder

-

The second step is to extract the files from the patch 12.0 update Skidrow file to your Football Manager 2010 folder on your PC.

-

You can use a file extractor (such as WinRAR or 7-Zip) to open the file and extract its contents.

-

Football Manager 2012 Patch v12.2.2 Update Skidrow Download
-How to Install Football Manager 2012 Patch v12.2.2 Update Skidrow
-Football Manager 2012 Patch v12.2.2 Update Skidrow Crack
-Football Manager 2012 Patch v12.2.2 Update Skidrow Torrent
-Football Manager 2012 Patch v12.2.2 Update Skidrow Review
-Football Manager 2012 Patch v12.2.2 Update Skidrow Changelog
-Football Manager 2012 Patch v12.2.2 Update Skidrow Fixes
-Football Manager 2012 Patch v12.2.2 Update Skidrow Features
-Football Manager 2012 Patch v12.2.2 Update Skidrow Gameplay
-Football Manager 2012 Patch v12.2.2 Update Skidrow Mods
-Football Manager 2012 Patch v12.2.2 Update Skidrow Cheats
-Football Manager 2012 Patch v12.2.2 Update Skidrow Tips
-Football Manager 2012 Patch v12.2.2 Update Skidrow Guide
-Football Manager 2012 Patch v12.2.2 Update Skidrow System Requirements
-Football Manager 2012 Patch v12.2.2 Update Skidrow Free Download
-Football Manager 2012 Patch v12.2.2 Update Skidrow Full Version
-Football Manager 2012 Patch v12.2.2 Update Skidrow Steam
-Football Manager 2012 Patch v12.2.2 Update Skidrow Mac
-Football Manager 2012 Patch v12.2.2 Update Skidrow Linux
-Football Manager 2012 Patch v12.2.2 Update Skidrow Android
-Football Manager 2012 Patch v12.2.2 Update Skidrow iOS
-Football Manager 2012 Patch v12.2.2 Update Skidrow Windows
-Football Manager 2011 vs Football Manager 2010 vs Football Manager 2009 vs Football Manager 2008 vs Football Manager 2007 vs Football Manager 2006 vs Football Manager 2005 vs Football Manager 2004 vs Football Manager 2003 vs Football Manager 2001 vs Championship manager series comparison with patch update skidrow versions.
-Best Players in Football Manager 2010 with patch update skidrow version.
-Best Tactics in Football Manager 2009 with patch update skidrow version.
-Best Teams in Football Manager 2008 with patch update skidrow version.
-Best Leagues in Football Manager 2007 with patch update skidrow version.
-Best Transfers in Football Manager 2006 with patch update skidrow version.
-Best Staff in Football Manager 2005 with patch update skidrow version.
-Best Training in Football Manager 2004 with patch update skidrow version.
-Best Scouting in Football Manager 2003 with patch update skidrow version.
-Best Youth Development in Football Manager 2001 with patch update skidrow version.
-How to Win the Champions League in Football Manager with patch update skidrow version.
-How to Win the World Cup in Football Manager with patch update skidrow version.
-How to Win the Premier League in Football Manager with patch update skidrow version.
-How to Win the Bundesliga in Football Manager with patch update skidrow version.
-How to Win the Serie A in Football Manager with patch update skidrow version.
-How to Win the La Liga in Football Manager with patch update skidrow version.
-How to Win the Ligue 1 in Football Manager with patch update skidrow version.
-How to Win the Eredivisie in Football Manager with patch update skidrow version.
-How to Win the MLS in Football Manager with patch update skidrow version.
-How to Win the Copa Libertadores in Football Manager with patch update skidrow version.
-How to Win the Asian Champions League in Football Manager with patch update skidrow version.
-How to Win the African Champions League in Football Manager with patch update skidrow version.
-How to Win the Oceania Champions League in Football Manager with patch update skidrow version.

-

You should see a folder named "Football.Manager.2010.Patch.v12-UPDATE-SKIDROW" or something similar inside.

-

You need to copy this folder to your Football Manager 2010 folder on your PC.

-

You can find your Football Manager 2010 folder by following this path: C:\Program Files (x86)\Steam\steamapps\common\Football Manager 2010\ (or wherever you installed your game).

-

Run the installer and follow the instructions

-

The third step is to run the installer inside the "Football.Manager-2010.Patch.v12-UPDATE-SKIDROW" folder and follow the instructions on the screen.

-

You should see a file named "setup.exe" or something similar inside.

-

You need to double-click on this file and allow it to run on your PC.

-

You should see a window that asks you to select the language and agree to the terms and conditions.

-

You need to choose your preferred language and click on "I Agree".

-

You should then see another window that asks you to select the destination folder for the patch installation.

-

You need to browse and select your Football Manager 2010 folder on your PC (the same one where you copied the "Football.Manager-2010.Patch.v12-UPDATE-SKIDROW" folder).

-

You should then see another window that shows the progress of the installation.

-

You need to wait until the installation is complete.

-

Copy the crack files to your Football Manager 2010 folder

-

The fourth step is to copy the crack files from the "Football.Manager-2010.Patch.v12-UPDATE-SKIDROW" folder to your Football Manager 2010 folder on your PC.

-

You should see a folder named "SKIDROW" inside.

-

You need to open this folder and copy all its contents.

-

You then need to paste them into your Football Manager 2010 folder on your PC (the same one where you installed the patch).

-

Enjoy the game with the latest updates and features

-

The final step is to enjoy the game with the latest updates and features.

-

You can launch the game from Steam or from your desktop shortcut.

-

You should see a message that says "Football Manager is now running version v12-UPDATE-SKIDROW".

-
Congratulations! You have successfully downloaded and installed the patch v12-UPDATE-SKIDROW for Football Manager !
-
Note: If you encounter any problems or errors while playing the game, you can check the official website of Sports Interactive (https://www.sigames.com/) or the Skidrow website (https://www.skidrowreloaded.com/) for solutions or support.
-

What's New in the Patch 12.2.2 Update Skidrow

-

Bug Fixes and Improvements

-

The patch 12.2.2 update Skidrow fixes some bugs and errors that were present in the previous versions of the game, such as:

- -

New Transfers and Contracts

-

The patch 12.2.2 update Skidrow also adds some new transfers and contracts that were made during the winter transfer window of 2012, such as:

- | Player | From | To | Fee | | --- | --- | --- | --- | | Carlos Tevez | Manchester City | AC Milan | £25m | | Thierry Henry | New York Red Bulls | Arsenal | Loan | | Gary Cahill | Bolton Wanderers | Chelsea | £7m | | Papiss Cisse | Freiburg | Newcastle United | £9m | | Alex | Chelsea | Paris Saint-Germain | £4m | | Paul Scholes | Retired | Manchester United | Free | | David Beckham | LA Galaxy | Paris Saint-Germain | Free | | Tim Cahill | Everton | New York Red Bulls | £1m | | Robbie Keane | LA Galaxy | Aston Villa | Loan | | Nicolas Anelka | Chelsea | Shanghai Shenhua | Free |

New Leagues and Competitions

-

The patch 12.2.2 update Skidrow also adds some new leagues and competitions that were not available in the previous versions of the game, such as:

- | League/Competition | Country/Region | Level/Format | | --- | --- | --- | | Australian A-League | Australia/New Zealand | Top division/10 teams | | Indian Super League | India | Top division/8 teams | | UEFA Europa Conference League | Europe | Third-tier continental competition/184 teams | | FIFA Club World Cup Expanded Edition | Worldwide | Intercontinental competition/24 teams | | UEFA Nations League Finals | Europe | International competition/4 teams |

New Graphics and Sounds

-

The patch 12.2.2 update Skidrow also adds some new graphics and sounds that enhance the visual and audio quality of the game, such as:

- | Graphic/Sound | Description | | --- | --- | | New player faces | More realistic and updated faces for over 500 players | | New kits | More authentic and updated kits for over 100 clubs and national teams | | New logos | More accurate and updated logos for over 200 clubs and competitions | | New trophies | More detailed and realistic trophies for over 50 competitions | | New crowd chants | More diverse and realistic crowd chants for over 50 clubs and national teams |

Conclusion

-

Summary of the main points

-

In conclusion, Football Manager 2012 patch 12.2.2 update Skidrow is an amazing update that improves the game in many ways. It fixes some bugs and errors, adds some new features and content, and enhances the visual and audio quality of the game. It is easy to download and install, and it is free of charge. It is compatible with Windows 10 and DirectX 11, and it works with Steam or other platforms. It is a must-have update for any fan of football management games.

-

Call to action for the readers

-

If you are interested in trying out Football Manager 2012 patch 12.2.2 update Skidrow, you can follow the steps we have provided in this article to download and install it on your PC. You can also check out our other articles on how to play Football Manager 2012 better, how to find hidden gems in Football Manager 2012, how to create custom tactics in Football Manager 2012, and more. You can also share your feedback, opinions, questions, or suggestions with us in the comments section below. We would love to hear from you!

-

Frequently Asked Questions (FAQs)

-
  1. Do I need to have Football Manager 2012 installed on my PC before I download and install the patch 12.2.2 update Skidrow?
-

Yes, you need to have Football Manager 2012 installed on your PC before you download and install the patch 12.2.2 update Skidrow. You can buy it from Steam or other platforms.

- connection to play Football Manager 2012 after I download and install the patch 12.2.2 update Skidrow? -

No, you do not need to have an internet connection to play Football Manager 2012 after you download and install the patch 12.2.2 update Skidrow. You can play it offline or online as you wish.

-
  1. Is the patch 12.2.2 update Skidrow safe to download and install on my PC?
-

Yes, the patch 12.2.2 update Skidrow is safe to download and install on your PC. However, you should always scan the files for viruses or malware before you open them, and you should always download them from trusted sources online.

-
  1. Will the patch 12.2.2 update Skidrow affect my saved games or achievements in Football Manager 2012?
-

No, the patch 12.2.2 update Skidrow will not affect your saved games or achievements in Football Manager 2012. You can continue playing your saved games or earning your achievements as usual.

-
  1. Can I uninstall the patch 12.2.2 update Skidrow if I do not like it or if it causes problems on my PC?
-

Yes, you can uninstall the patch 12.2.2 update Skidrow if you do not like it or if it causes problems on your PC. You can use the uninstaller inside the "Football.Manager-2010.Patch.v12-UPDATE-SKIDROW" folder to remove it from your PC.

-

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/COOKING MAMA Apk Mod Unlock All Learn Cooking Techniques and Create Your Own Recipes with Unlimited Coins and Levels.md b/spaces/1gistliPinn/ChatGPT4/Examples/COOKING MAMA Apk Mod Unlock All Learn Cooking Techniques and Create Your Own Recipes with Unlimited Coins and Levels.md deleted file mode 100644 index 453ffbcb02deb2836ff00b7d9079641999605cda..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/COOKING MAMA Apk Mod Unlock All Learn Cooking Techniques and Create Your Own Recipes with Unlimited Coins and Levels.md +++ /dev/null @@ -1,33 +0,0 @@ - -

If you want to create a big and wonderful restaurant, then serve your cooking as often as possible. This way people can get the best experience from their meal by eating at an establishment run by someone who cares deeply about food quality!

-

With its intuitive controls, both children and adults can enjoy the game. Also, even if you make mistakes there are no game overs, so everyone can complete dishes. Furthermore, children who play may develop an interest in cooking.

-

COOKING MAMA Apk Mod Unlock All


Download File >>>>> https://imgfil.com/2uxWXX



-

[Game Features]
With its intuitive controls, both children and adults can enjoy the game. Also, even if you make mistakes there are no game-overs, so everyone can complete dishes. Furthermore, children who play may develop an interest in cooking.

[Recommended Setup]
Android OS 4.1 or later.
**Game may not be playable on certain devices even if the above conditions are met.

-

In the game, Android gamers will find themselves having access to your own interesting cooking adventures with Cooking Mama and Papa. Our two characters will stay with you from the beginning of the game as your cooking mentors and testers. Join them as you discover your own exciting journeys into the world of delicious foods and the fun while cooking them

-

To start with, Android gamers in Cooking Mama will find themselves having access to the simple, intuitive and extremely fun gameplay of cooking, in which you can dive into and enjoy it to the fullest. Have fun as you create delicious foods from multiple ingredients and follow amazing recipes. Have it tested by Papa and serve your foods to other villagers. Make delicious in varied categories with the help of the intuitive touch controls. Try out the unique gameplay as you create yummy food and find yourself getting hungry really fast. Fans of the famous Cooking Fever will certainly find themselves having access to yet another amazing cooking game on the mobile devices.

-

Gamers in Cooking Mama will immediately find themselves having access to the friendly and inviting styles of graphics as you dive into the exciting cooking gameplay in the game. The cartoony and adorable cooking tools, ingredients, and animations will also allow gamers to quickly immersed in the gameplay. And most importantly, the undemanding graphics will also guarantee your smooth and satisfying experiences with the game.

-

Do you love cooking unique dishes and searching for a game that helps you to cook various delicious dishes in a unique way? If you think so, then Cooking Mama MOD Apk is the perfect choice for you. There are a number of bonus features available in this cooking game, which can be unlocked as you continue playing the game. These include unlocking new dishes and other items as well as receiving extra points.

-

Cooking Mama is considered the most engaging cooking game for various platforms including android. There is a built-in automatic recipe guide available in the app that will help you to determine the type of dish that you can make for a particular level. Moreover, the recipe guide is very accurate and reliable, so you can be sure that the dish you are making will turn out perfect.

-

-

You can choose your level of difficulty while cooking in this game. If you are a beginner, then start with the easy levels before moving on to the medium ones. The hard levels are very challenging so be prepared for them when you are done with the easy levels.

-

The most important thing in cooking is to be accurate with the timing as the same happened with this post. If you leave your dish in the oven for too long, then it might burn and ruin the whole dish. You also have to wait for the right time before taking it out of the oven and other cooking utensils so that all your dishes achieve perfection.

-

This cooking game features awesome sound effects which make everything feel more fun and enjoyable especially when you are slicing or dicing up ingredients with your knife. You will be able to immerse yourself in the experience of cooking delicious dishes.

-

The best thing about this cooking game is that the controls are very easy to understand and use. You will be able to start cooking right from the next moment as you install the game on your smartphone.

-

There are various mini-games involved in this cooking game which keep things interesting as you keep playing the game over time. Some of these mini-games require fast reflexes while some simply require patience and persistence, but all of them are equally fun to play.

-

You can also challenge your friends and online players to a cook-off to see who can make the best dish. This adds an extra layer of competition to the cooking game and makes it more enjoyable to play. Also, the online players are from all around the world, so you can learn new recipes and cooking tips from them.

-

The best way to grab lots of points in this game is by sharing your recipes with the Cooking Mama community. You can share your own recipes with other players and earn points as a thank you for sharing. These points can be used to upgrade your appliances or unlock new dishes.

-

At the end of this article, I would like to say that Cooking Mama is the best cooking game for android devices ever made for smartphones. From intuitive designs and interesting gameplay, this game has everything that a user can expect from an ideal cooking game.

-

There is no cost associated with playing this cooking game, which makes it even more popular among fans. There are in-app purchases available for players who want to boost their character level while playing or unlocking content faster. You can purchase them or use this Cooking Mama MOD version.

-

Android gamers will have access to their own entertaining cooking adventures with Cooking Mama and Papa in the game. Our two characters will be your cooking tutors and testers from the beginning of the game. Join them as they embark on their own thrilling travels into the world of delectable meals and the fun that comes with preparing them.

-

Cooking with a Twist Mama is a terrible cooking game in which you must assist the renowned cooking mama in preparing and cooking a turkey. However, cooking mama has begun to exhibit her really evil and twisted side, so if you are squeamish, you should avoid playing this game. To prepare and cook the dish, follow the directions step by step.

-

Cooking mama: Let's cook puzzle - make tasty dishes using foods on the screen and match 3 and more same ones. Improve your culinary skills in this fun game for Android. Make delicious meals easy. To do this just match same ingredients.

-

Join the game, you will play as a young girl trying to learn to cook with Mama and Papa. You will be guided through every small step to complete delicious dishes and please your parents. Cooking is easy as you just tap on the screen to select ingredients, then swipe or rotate to cook, and finally cook with kitchen tools and appliances. Delicious dishes will attract all your eyes with eye-catching colors. Besides, the cooking process is always accompanied by upbeat music, helping you both cook and relax. So, are you ready to cook with your parents? Show your talent to become the most talented kid chef in the house.

-

Your delicious dishes will be judged by your parents and diners. They will give you the number of stars and points corresponding to the quality of the dish. You can use your scores and stars to unlock new ingredients and recipes. If you play hard, you can add dozens of new items to the restaurant menu every day. Cooking is not as difficult as you think. Besides the video tutorial, you just need to use your fingers to cook. As soon as you make a mistake, you can also finish the dish. So this game is really suitable for kids and amateur chefs who love to cook on the phone.

-

You can play minigames to train your brain and relax after hours of cooking in the kitchen. It can be jigsaw puzzles, memorization, number filling, hidden objects, and more. The mini-games are built with lovely, playful pictures and music. You can also compete with your friends on the leaderboards of online minigames. Through these games, the game also gives you many attractive gifts to unlock unique items. Feel free to design your character with impressive clothing items, hair colors, and accessories.

-

With a game for children, this game is designed with a super cute and friendly graphic style. The characters and objects in the game are depicted in a chibi cartoon style. Cooking operations in the first person create a sense of authenticity and fun. The restaurant scene is always filled with bright colors, stimulating the concentration and creativity of all young chefs. And relaxing music will also make you happy all day with the cooking experience here.

-

Cooking Mama: Let's cook! MOD Coins - A game that will teach you how to cook from scratch! Slice, knead, fry, boil and bake, create a real culinary masterpiece! During the game, uncontrolled urges to eat are possible! Better to play on a full stomach! Cook - playfully, the entire cooking process will be accompanied by cute mini-games, try to do everything perfectly to serve a really tasty and right dish.

-

In the game, you will learn more than 30 recipes for delicious and healthy food, and also try to open your own restaurant, where you will sell your culinary masterpieces. In addition, all food is completely sustainable, go fishing, do gardening and serve customers only your own food. And if you want to take a break from the usual cooking, you can always play interesting mini-games.

-

These recipes are either obtained by default or by purchases. Other options include playing daily (7 days for each recipe) or completing Requests from Papa, labeled under as Bonus. There are 4 recipes that can be obtained through Facebook by inviting your friends to download a copy of the game. All subsequent lists after Starter Recipes require purchases before further unlocking requirements. Purchasing packs will build up a counter that lets you earn more Game Coins.

-

Early builds of the game require you to obtain these recipes via Facebook invitations. Each recipe would be unlocked sequentially as you invite more friends. However, as of December 2016, they can be purchased as a pack without interacting with Facebook. Please note that you will still have to pay the total amount for the pack even if some are already obtained.

aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Download Bullett Raja Movies 1080p Torrent.md b/spaces/1gistliPinn/ChatGPT4/Examples/Download Bullett Raja Movies 1080p Torrent.md deleted file mode 100644 index 7f6079504349f78bd91945103c9c7d977ba50d9a..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Download Bullett Raja Movies 1080p Torrent.md +++ /dev/null @@ -1,6 +0,0 @@ -

Download Bullett Raja Movies 1080p Torrent


Download Filehttps://imgfil.com/2uxY08



-
-Download Dubbed Torrent Full Movies In HD With Fast and safe Way. ... Bullet Raja Hindi Movie Trailer movie Download in HD mp4, 3Gp, 720p . Bullett Raja ... 4d29de3e1b
-
-
-

diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bubble Shooter Un juego de burbujas adictivo y gratuito - APK Descargar.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bubble Shooter Un juego de burbujas adictivo y gratuito - APK Descargar.md deleted file mode 100644 index e3b5c52df1250b57dad6c050f1eab4bfb6090f0f..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bubble Shooter Un juego de burbujas adictivo y gratuito - APK Descargar.md +++ /dev/null @@ -1,130 +0,0 @@ - -

Bubble Shooter APK: How to Download and Play this Fun Game

-

If you are looking for a casual puzzle game that is easy to play, addictive, and enjoyable, then you should try Bubble Shooter APK. This is a classic game that has been around for decades, but it never gets old. In this article, we will tell you what Bubble Shooter APK is, how to download and install it on your Android device, how to play it and enjoy its benefits, and some tips and tricks to help you master it.

-

What is Bubble Shooter APK?

-

A brief introduction to the game and its features

-

Bubble Shooter APK is a free game that you can download from the Google Play Store or from other websites . It is inspired by Puzzle Bobble, a popular arcade game from the 90s. The goal of Bubble Shooter APK is to clear the screen by matching three or more bubbles of the same color. You can use your finger or mouse to aim and shoot bubbles at the rows above your shooter. You can see the next bubble to come in the bottom right corner of the screen. You can also change the color of your bubble by tapping on it.

-

bubble shooter apk para descargar


Download Ziphttps://urlin.us/2uSSlC



-

Bubble Shooter APK has many features that make it fun and challenging. Some of them are:

- -

How to download and install Bubble Shooter APK on your Android device

-

To download and install Bubble Shooter APK on your Android device, you need to follow these simple steps:

-
    -
  1. Go to the Google Play Store or any other website that offers Bubble Shooter APK . Make sure that the source is reliable and safe.
  2. -
  3. Tap on the download button or scan the QR code to start downloading the file.
  4. -
  5. Once the download is complete, open the file manager on your device and locate the file.
  6. -
  7. Tap on the file and allow the installation from unknown sources if prompted.
  8. -
  9. Follow the instructions on the screen to complete the installation.
  10. -
  11. Launch the game and enjoy!
  12. -
-

How to play Bubble Shooter APK and enjoy its benefits

-

To play Bubble Shooter APK, you need to have a basic understanding of how the game works. Here are some guidelines to help you get started:

- -

Tips and Tricks for Bubble Shooter APK

-

How to aim and shoot bubbles effectively

-

Aiming and shooting bubbles is the most important skill in Bubble Shooter APK. Here are some tips to help you improve your accuracy and efficiency:

- -

How to use the walls and the next bubble indicator

-

Using the walls and the next bubble indicator can give you an edge in Bubble Shooter APK. Here are some ways to use them effectively:

- -

How to clear the board and score high points

-

Clearing the board and scoring high points are the main objectives of Bubble Shooter APK. Here are some strategies to help you achieve them:

- -

Benefits of Playing Bubble Shooter APK

-

It is a free, fun, and relaxing game

-

Bubble Shooter APK is a game that you can play for free anytime and anywhere. You don't need an internet connection or a subscription to enjoy it. You can play it on your phone, tablet, or computer. It is a game that is suitable for all ages and preferences. It is a game that is fun and relaxing, as it does not require too much thinking or stress. You can play it at your own pace and mood.

-

It improves your brain skills and concentration

-

Bubble Shooter APK is a game that can also improve your brain skills and concentration. It is a game that requires you to use your logic, strategy, and observation skills. You have to think fast and smart to clear the levels and score high points. You have to pay attention to the colors, patterns, and movements of the bubbles. You have to focus on your aim and timing. Playing Bubble Shooter APK can help you sharpen your mind and enhance your mental abilities.

-

bubble shooter 2 apk descargar gratis
-bubble shooter classic pop apk descargar
-bubble shooter yang games and apps apk descargar
-bubble shooter apk descargar última versión
-bubble shooter apk descargar para android
-bubble shooter wood apk descargar
-bubble shooter epic puzzles apk descargar
-bubble shooter apk descargar sin wifi
-bubble shooter apk descargar 2022
-bubble shooter apk descargar 3d
-bubble shooter apk descargar mod
-bubble shooter apk descargar offline
-bubble shooter apk descargar hackeado
-bubble shooter apk descargar sin anuncios
-bubble shooter apk descargar ilimitado
-bubble shooter apk descargar divertido
-bubble shooter apk descargar original
-bubble shooter apk descargar fácil
-bubble shooter apk descargar desafío
-bubble shooter apk descargar sorpresas
-bubble shooter apk descargar colores
-bubble shooter apk descargar niveles
-bubble shooter apk descargar aventura
-bubble shooter apk descargar adictivo
-bubble shooter apk descargar relajante
-bubble shooter space apk descargar
-bubble shooter panda apk descargar
-bubble shooter dragon pop apk descargar
-bubble shooter witch saga apk descargar
-bubble shooter farm pop 2 apk descargar
-bubble shooter pet rescue saga apk descargar
-bubble shooter candy crush saga apk descargar
-bubble shooter angry birds pop apk descargar
-bubble shooter frozen pop apk descargar
-bubble shooter toy blast pop cubes crush apk descargar
-bubble shooter gummy drop match 3 puzzle game apk descargar
-bubble shooter cookie jam blast match 3 game apk descargar
-bubble shooter jewel legend match 3 puzzle game apk descargar
-bubble shooter garden of flowers match 3 game apk descargar
-bubble shooter fruit splash match 3 game apk descargar

-

It offers thousands of levels and challenges

-

Bubble Shooter APK is a game that offers thousands of levels and challenges for you to enjoy. It has three modes: classic, arcade, and puzzle, each with different objectives and difficulties. It has new elements and prizes that you can unlock as you progress. It has a leaderboard and achievements that you can compete with your friends and other players. It has a colorblind mode that makes it accessible for everyone. Playing Bubble Shooter APK can keep you entertained and satisfied for hours.

-

Conclusion

-

Bubble Shooter APK is a game that you should try if you are looking for a casual puzzle game that is easy to play, addictive, and enjoyable. It is a game that has many features that make it fun and challenging. It is a game that can improve your brain skills and concentration. It is a game that offers thousands of levels and challenges for you to enjoy. It is a game that is free, fun, and relaxing. You can download and install Bubble Shooter APK on your Android device easily and safely. You can play it anytime and anywhere. You can also use some tips and tricks to help you master it. If you are ready to pop some bubbles and have some fun, then download Bubble Shooter APK today and start playing!

-

FAQs

-

Here are some frequently asked questions about Bubble Shooter APK:

-

Q: Is Bubble Shooter APK safe to download and install?

-

A: Yes, Bubble Shooter APK is safe to download and install, as long as you get it from a reliable and secure source, such as the Google Play Store or the official website . You should avoid downloading it from unknown or suspicious sources, as they might contain malware or viruses that can harm your device.

-

Q: How can I get more coins in Bubble Shooter APK?

-

A: You can get more coins in Bubble Shooter APK by completing levels, watching ads, or buying them with real money. You can use coins to buy power-ups, lives, or unlock new elements and prizes.

-

Q: How can I play Bubble Shooter APK with my friends?

-

A: You can play Bubble Shooter APK with your friends by connecting your game to Facebook or Google Play Games. You can then see your friends' scores and achievements on the leaderboard and challenge them to beat your records. You can also invite them to play with you or send them gifts.

-

Q: What are the power-ups in Bubble Shooter APK?

-

A: The power-ups in Bubble Shooter APK are special bubbles that can help you clear the levels faster and easier. Some of the power-ups are:

- -

Q: How can I contact the developers of Bubble Shooter APK?

-

A: You can contact the developers of Bubble Shooter APK by sending them an email at support@bubbleshooter.com or by visiting their website. You can also follow them on Facebook, Twitter, or Instagram for updates, news, and tips.

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Animal Voice How to Record and Edit Animal Sounds for Your Projects.md b/spaces/1phancelerku/anime-remove-background/Animal Voice How to Record and Edit Animal Sounds for Your Projects.md deleted file mode 100644 index f25d55e176e6d197e4fbaa80af860f7a35cd436b..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Animal Voice How to Record and Edit Animal Sounds for Your Projects.md +++ /dev/null @@ -1,192 +0,0 @@ -
-

Animal Voice: How Animals Communicate and How You Can Train Your Pet to Talk

-

Introduction

-

Have you ever wondered what your pet is trying to tell you when they bark, meow, or chirp? Have you ever wished you could teach your pet to talk and understand what they are thinking and feeling? If so, you are not alone. Many animal lovers are fascinated by the idea of animal voice and communication.

-

Animal voice is the term used to describe the sounds, gestures, and other signals that animals use to communicate with each other and with humans. Animal communication is a complex and diverse phenomenon that involves various modes, functions, and contexts. Animal communication is also an important source of information that influences the behavior and decision making of both senders and receivers.

-

animal voice


Download Zip 🗹 https://jinyurl.com/2uNQ9C



-

In this article, you will learn more about animal voice and communication, such as what types of signals animals use, how they vary across species and situations, and what benefits and challenges they entail. You will also learn how you can train your pet to talk using recordable dog training buttons, which are devices that allow your pet to express their wants, needs, and thoughts by pressing buttons that produce pre-recorded words. By the end of this article, you will have a better understanding of animal voice and communication, as well as some practical tips and tricks on how to train your pet to talk using buttons.

-

Types of animal voice and communication

-

Visual signals: gestures, facial expressions, colors, patterns, etc.

-

One of the most common types of animal voice and communication is visual signals. Visual signals are actions or anatomical structures that provide information to another animal through sight. Visual signals can include gestures, facial expressions, body postures, movements, colors, patterns, displays, etc.

-

animal voice changer app
-animal voice over video
-animal voice recognition software
-animal voice translator device
-animal voice sound effects
-animal voice therapy for humans
-animal voice actors in movies
-animal voice communication research
-animal voice imitation game
-animal voice modulation techniques
-animal voice box surgery
-animal voice dubbing studio
-animal voice generator online
-animal voice identification system
-animal voice ringtone download
-animal voice talent agency
-animal voice comparison chart
-animal voice frequency range
-animal voice quiz for kids
-animal voice activated toys
-animal voice samples free
-animal voice memes funny
-animal voice recording app
-animal voice classification scheme
-animal voice synthesizer software
-animal voice alarm clock
-animal voice analysis tool
-animal voice bluetooth speaker
-animal voice crossword clue
-animal voice detector circuit
-animal voice emulator program
-animal voice filter for snapchat
-animal voice greeting cards
-animal voice headphones review
-animal voice interpreter gadget
-animal voice jokes hilarious
-animal voice keyboard online
-animal voice learning app
-animal voice mask amazon
-animal voice navigation app
-animal voice optimization tips
-animal voice podcast episodes
-animal voice quizlet flashcards
-animal voice reader online
-animal voice simulator game
-animal voice training course
-animal voice usb drive
-animal voice vocabulary list

-

Examples of visual signals in different animals

-

Some examples of visual signals in different animals are:

- -

Advantages and disadvantages of visual signals

-

Visual signals have some advantages and disadvantages as a mode of animal voice and communication. Some of the advantages are:

- -

Some of the disadvantages are:

- -

Auditory signals: sounds, calls, songs, etc.

-

Another common type of animal voice and communication is auditory signals. Auditory signals are sounds that animals produce and perceive through hearing. Auditory signals can include calls, songs, cries, whistles, clicks, etc.

-

Examples of auditory signals in different animals

-

Some examples of auditory signals in different animals are:

- -

Advantages and disadvantages of auditory signals

-

Auditory signals have some advantages and disadvantages as a mode of animal voice and communication. Some of the advantages are:

- -

Some of the disadvantages are:

- -

Chemical signals: pheromones, scents, tastes, etc.

-

A less obvious but equally important type of animal voice and communication is chemical signals. Chemical signals are substances that animals secrete or release into their environment that affect the behavior or physiology of another animal through smell or taste. Chemical signals can include pheromones, scents, tastes, etc.

-

Examples of chemical signals in different animals

-

Some examples of chemical signals in different animals are:

- -

Advantages and disadvantages of chemical signals

-

Chemical signals have some advantages and disadvantages as a mode of animal voice and communication. Some of the advantages are:

- -

Some of the disadvantages are:

- -

Tactile signals: touch, vibration, electric fields, etc.

-

The last type of animal voice and communication that we will discuss is tactile signals. Tactile signals are physical stimuli that animals apply or receive through touch or other forms of contact. Tactile signals can include touch, vibration, electric fields, etc.

-

Examples of tactile signals in different animals

-

Some examples of tactile signals in different animals are:

- -

Advantages and disadvantages of tactile signals

-

Tactile signals have some advantages and disadvantages as a mode of animal voice and communication. Some of the advantages are:

- -

Some of the disadvantages are:

- -

How to train your pet to talk using buttons

-

What are recordable dog training buttons and how do they work?

-

If you want to train your pet to talk using buttons, you will need some recordable dog training buttons. These are devices that allow your pet to express their wants, needs, and thoughts by pressing buttons that produce pre-recorded words. For example, you can record words like "outside", "play", "water", "treat", etc., on different buttons and place them on a mat or a board. Then you can teach your pet to associate each button with its corresponding word and action. When your pet wants something or wants to communicate something to you, they can press the appropriate button and hear the word spoken out loud.

-

Recordable dog training buttons are based on the idea that animals can learn to use symbols or words to communicate with humans. This idea has been tested and proven by many studies and experiments involving animals like chimpanzees, dolphins, parrots, etc. Recordable dog training buttons are also inspired by augmentative and alternative communication (AAC) devices that are used by humans who have speech impairments or disabilities. AAC devices help these humans communicate with others using pictures, symbols, gestures, sounds, etc.

-

Recordable dog training buttons are easy to use and affordable

Recordable dog training buttons are easy to use and affordable, and you can find them online or in pet stores. For example, you can check out the PawTalk Recordable Dog Buttons, the Hunger for Words Talking Pet Starter Set, the Talking Products Talking Tiles, or the Decdeal Recordable Talking Button With LED Function. These are some of the popular and recommended products that you can use to train your pet to talk using buttons.

-

How to teach your dog to speak on command using buttons

-

One of the simplest and most fun ways to train your pet to talk using buttons is to teach them to speak on command. This means that you will teach your dog to bark when you ask them to, and then associate that bark with a word on a button. For example, you can teach your dog to say "hello" by barking when you say "hello" and then pressing a button that says "hello". This way, your dog will learn that barking and pressing the button are both ways of saying "hello". Here are the steps to teach your dog to speak on command using buttons:

-

Step 1: Have your reward ready

-

The first step is to have a reward ready for your dog. This can be a treat, a toy, or praise, depending on what your dog likes best. You will use this reward to reinforce your dog's behavior and make them more likely to repeat it. Make sure you have enough rewards for multiple repetitions and sessions.

-

Step 2: Get your dog to speak naturally

-

The next step is to get your dog to speak naturally. This means that you will wait for your dog to bark on their own, without prompting them. You can do this by observing your dog and noticing what triggers them to bark, such as a doorbell, a squirrel, or another dog. You can also try to make your dog excited or curious by playing with them, showing them something interesting, or hiding behind something. When your dog barks, mark the behavior with a clicker or a word like "yes" or "good". Then give them the reward immediately.

-

Step 3: Mark the bark with a cue word and a reward

-

The third step is to mark the bark with a cue word and a reward. This means that you will say a word that you want your dog to associate with barking, such as "speak", "talk", or "bark", right before or as your dog barks. Then give them the reward as usual. For example, if you want your dog to say "hello", you can say "hello" when they bark and then give them the reward. Repeat this several times until your dog learns that barking when you say "hello" earns them a reward.

-

Step 4: Add a hand signal if desired

-

The fourth step is optional, but it can help your dog learn faster and more reliably. You can add a hand signal that matches the cue word, such as waving your hand or pointing at your mouth, when you say the word and wait for your dog to bark. Then give them the reward as usual. For example, if you want your dog to say "hello", you can wave your hand and say "hello" when they bark and then give them the reward. Repeat this several times until your dog learns that barking when you wave your hand and say "hello" earns them a reward.

-

Step 5: Practice and reinforce the behavior consistently

-

The final step is to practice and reinforce the behavior consistently. This means that you will ask your dog to speak on command using the cue word and/or the hand signal, and then reward them for barking. You can also introduce a button that says the word that you want your dog to say, such as "hello", and place it near your dog. When your dog barks on command, press the button for them so they can hear the word spoken out loud. Then give them the reward as usual. Repeat this several times until your dog learns that barking on command and pressing the button are both ways of saying the word.

-

Tips and tricks for training your dog to speak using buttons

-

Training your pet to talk using buttons can be a fun and rewarding experience for both of you, but it also requires some patience and consistency. Here are some tips and tricks that can help you train your pet more effectively:

-

Be patient and consistent

-

Don't expect your pet to learn overnight or without mistakes. It may take some time and practice for your pet to understand what you want them to do and how to do it correctly. Be patient and consistent with

Be patient and consistent with your training sessions, and don't give up or get frustrated if your pet doesn't get it right away. Keep the sessions short, fun, and positive, and end on a high note. Reward your pet for every correct response, and ignore or redirect any incorrect or unwanted behavior. Gradually increase the difficulty and complexity of the commands and the buttons as your pet progresses.

-

Reward only barking on command and not nuisance barking

-

One of the potential drawbacks of teaching your pet to speak using buttons is that they may start to bark excessively or inappropriately, such as when they are bored, anxious, or attention-seeking. This can be annoying and disruptive for you and your neighbors. To prevent this, you should only reward your pet for barking on command and not for nuisance barking. You should also teach your pet a "quiet" command that tells them to stop barking, and reward them for obeying it. You can also provide your pet with enough mental and physical stimulation, such as toys, games, walks, etc., to keep them happy and occupied.

-

Capture and mark only a single bark or a desired number of barks

-

Another challenge of teaching your pet to speak using buttons is that they may bark too much or too little when you ask them to. For example, they may bark multiple times when you want them to say "hello", or they may not bark at all when you want them to say "yes". To avoid this, you should capture and mark only a single bark or a desired number of barks when you train your pet. You can do this by using a clicker or a word like "yes" or "good" to mark the exact moment when your pet barks the way you want them to. Then give them the reward immediately. This will help your pet learn to control their barking and match it with the word on the button.

-

Be mindful of your neighbors and the noise level of your dog's barking

-

The last tip for training your pet to speak using buttons is to be mindful of your neighbors and the noise level of your dog's barking. Some people may not appreciate hearing your dog talk all day long, especially if they are loud or frequent. You should respect your neighbors' privacy and comfort, and try to limit your training sessions to reasonable hours and durations. You should also choose words that are not too loud or harsh, such as "hi", "ok", "yay", etc., instead of words that are louder or more aggressive, such as "no", "stop", "bad", etc. You can also use volume control buttons that allow you to adjust the loudness of the words on the buttons.

-

Conclusion

-

Summary of the main points of the article

-

In conclusion, animal voice and communication are fascinating and diverse phenomena that involve various types of signals, such as visual, auditory, chemical, and tactile signals. Animals use these signals to communicate with each other and with humans for various purposes, such as survival, reproduction, socialization, etc. Animal voice and communication have some advantages and disadvantages depending on the mode, function, and context of the communication.

-

You can also train your pet to talk using recordable dog training buttons, which are devices that allow your pet to express their wants, needs, and thoughts by pressing buttons that produce pre-recorded words. You can teach your pet to speak on command using buttons by following some simple steps, such as having a reward ready, getting your pet to speak naturally, marking the bark with a cue word and a reward, adding a hand signal if desired, and practicing and reinforcing the behavior consistently. You can also use some tips and tricks to train your pet more effectively, such as being patient and consistent, rewarding only barking on command and not nuisance barking

rewarding only barking on command and not nuisance barking, capturing and marking only a single bark or a desired number of barks, and being mindful of your neighbors and the noise level of your dog's barking.

-

By training your pet to talk using buttons, you can enhance your bond with your pet, enrich your pet's mental and physical well-being, and have fun and meaningful conversations with your pet. You can also learn more about your pet's personality, preferences, and emotions, and appreciate the diversity and complexity of animal voice and communication.

-

Call to action for the readers to try training their pet to talk using buttons

-

If you are interested in training your pet to talk using buttons, why not give it a try? You can start by getting some recordable dog training buttons online or in pet stores, and following the steps and tips that we have shared in this article. You can also watch some videos or read some stories of other pet owners who have successfully trained their pets to talk using buttons, such as Bunny the talking dog, Stella the talking dog, or Billi Speaks. These are some of the amazing and inspiring examples of pets who have learned to communicate with their humans using buttons.

-

Training your pet to talk using buttons can be a rewarding and enjoyable experience for both of you, and you may be surprised by how much your pet has to say. So don't hesitate and start training your pet to talk using buttons today!

-

FAQs

-

What is animal voice and communication?

-

Animal voice and communication are the sounds, gestures, and other signals that animals use to communicate with each other and with humans.

-

What types of signals do animals use to communicate?

-

Animals use various types of signals to communicate, such as visual, auditory, chemical, and tactile signals.

-

What are recordable dog training buttons?

-

Recordable dog training buttons are devices that allow your pet to express their wants, needs, and thoughts by pressing buttons that produce pre-recorded words.

-

How can I train my pet to speak on command using buttons?

-

You can train your pet to speak on command using buttons by following some simple steps, such as having a reward ready, getting your pet to speak naturally, marking the bark with a cue word and a reward, adding a hand signal if desired, and practicing and reinforcing the behavior consistently.

-

What are some tips and tricks for training my pet to speak using buttons?

-

Some tips and tricks for training your pet to speak using buttons are being patient and consistent, rewarding only barking on command and not nuisance barking, capturing and marking only a single bark or a desired number of barks, and being mindful of your neighbors and the noise level of your dog's barking.

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Descarga Chicken Gun Mod Apk con Dinero Infinito y Menu de Mods.md b/spaces/1phancelerku/anime-remove-background/Descarga Chicken Gun Mod Apk con Dinero Infinito y Menu de Mods.md deleted file mode 100644 index 19488a651ff3d834e8efd1fced266c19b75e395c..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Descarga Chicken Gun Mod Apk con Dinero Infinito y Menu de Mods.md +++ /dev/null @@ -1,102 +0,0 @@ -
-

Chicken Gun Dinero Infinito APK: How to Download and Play the Ultimate Shooting Game

-

If you are looking for a fun and hilarious multiplayer shooter game, you should try Chicken Gun. This game lets you play as a chicken with a gun, and your goal is to shoot other chickens in various maps and modes. You can also customize your chicken with different weapons, accessories, and skins. But what if you want to have more money and health in the game? That's where Chicken Gun Dinero Infinito APK comes in. This is a modified version of the game that gives you unlimited resources and advantages. In this article, we will tell you what Chicken Gun is, what Chicken Gun Dinero Infinito APK is, and how to download and install it on your device.

-

chicken gun dinero infinito apk


DOWNLOAD 🗸 https://jinyurl.com/2uNQhm



-

What is Chicken Gun?

-

A hilarious and addictive multiplayer shooter game

-

Chicken Gun is a game developed by ChaloApps, a studio based in Argentina. It was released in 2020 for Android and iOS devices. The game is a 3D shooter game that features chickens as the main characters. You can play as a chicken with a gun, and your objective is to shoot other chickens in different maps and modes. You can play solo or with your friends online, and compete with other players from around the world. The game has a cartoonish and colorful graphics style, and a funny sound effects and music. The game is suitable for all ages, as it does not contain any gore or violence.

-

Features of Chicken Gun

-

Play as shooting chickens

-

The game lets you choose from different types of chickens, such as white, brown, black, or rainbow. Each chicken has its own stats, such as speed, health, damage, and accuracy. You can also upgrade your chicken's skills with coins that you earn from playing the game.

-

Choose from various weapons

-

The game offers a variety of weapons that you can use to shoot other chickens, such as pistols, rifles, shotguns, snipers, rocket launchers, grenades, and more. Each weapon has its own characteristics, such as range, fire rate, reload time, and ammo capacity. You can also switch between different weapons during the game.

-

chicken gun mod apk unlimited money
-chicken gun hack apk download
-chicken gun mod menu apk mediafire
-chicken gun game online
-chicken gun apk mod menu 2023
-chicken gun dinheiro infinito atualizado
-chicken gun mod apk latest version
-chicken gun hack apk 2023
-chicken gun game download
-chicken gun apk mod menu download
-chicken gun dinheiro infinito mediafire
-chicken gun mod apk android 1
-chicken gun hack apk mediafire
-chicken gun game mod apk
-chicken gun apk mod menu atualizado
-chicken gun dinheiro infinito 2023
-chicken gun mod apk free download
-chicken gun hack apk android 1
-chicken gun game hack
-chicken gun apk mod menu free fire
-chicken gun dinheiro infinito download
-chicken gun mod apk unlimited gems
-chicken gun hack apk free fire
-chicken gun game online multiplayer
-chicken gun apk mod menu hack
-chicken gun dinheiro infinito gratis
-chicken gun mod apk no root
-chicken gun hack apk no root
-chicken gun game offline
-chicken gun apk mod menu no root
-chicken gun dinheiro infinito no root
-chicken gun mod apk offline
-chicken gun hack apk offline
-chicken gun game play store
-chicken gun apk mod menu offline
-chicken gun dinheiro infinito play store
-chicken gun mod apk online multiplayer
-chicken gun hack apk online multiplayer
-chicken gun game review
-chicken gun apk mod menu play store
-chicken gun dinheiro infinito para android
-chicken gun mod apk rexdl
-chicken gun hack apk rexdl
-chicken gun game trailer
-chicken gun apk mod menu rexdl
-chicken gun dinheiro infinito sem root
-chicken gun mod apk unlimited health
-chicken gun hack apk unlimited health

-

Defeat other players online

-

The game has several modes that you can play online with other players, such as deathmatch, team deathmatch, capture the flag, zombie mode, and more. You can join or create a room with up to 10 players per team, and chat with them using voice or text messages. You can also view your stats and rank on the leaderboard.

-

Customize your cute chickens

-

The game allows you to personalize your chicken with different accessories, such as hats, glasses, masks, helmets, backpacks, wings, tails, and more. You can also change your chicken's skin color and pattern. You can mix and match different items to create your own unique style. You can also preview how your chicken looks before entering the game.

-

Be the best shooting chicken

-

The game challenges you to be the best shooting chicken in the world. You can earn coins and gems by playing the game, and use them to buy more weapons and accessories. You can also unlock achievements and trophies by completing various tasks and missions. You can also share your gameplay videos and screenshots with your friends on social media.

-

What is Chicken Gun Dinero Infinito APK?

-

A modified version of Chicken Gun with unlimited money and health

-

Chicken Gun Dinero Infinito APK is a modified version of Chicken Gun that gives you unlimited money and health in the game. This means that you can buy all the weapons and accessories that you want, and never run out of health or ammo. You can also play the game without any ads or in-app purchases. This way, you can enjoy the game without any limitations or interruptions.

-

Benefits of Chicken Gun Dinero Infinito APK

-

Unlock all the weapons and accessories

-

With Chicken Gun Dinero Infinito APK, you can unlock all the weapons and accessories that are available in the game. You can choose from over 50 weapons, such as pistols, rifles, shotguns, snipers, rocket launchers, grenades, and more. You can also customize your chicken with over 100 accessories, such as hats, glasses, masks, helmets, backpacks, wings, tails, and more. You can create your own unique chicken with different combinations of items.

-

Survive longer and win more matches

-

With Chicken Gun Dinero Infinito APK, you can survive longer and win more matches in the game. You can have unlimited health and ammo in the game, which means that you can withstand any damage and shoot as much as you want. You can also have unlimited coins and gems in the game, which means that you can upgrade your chicken's skills and abilities. You can also have unlimited lives in the game, which means that you can respawn as many times as you want. You can dominate the game with these advantages.

-

Enjoy the game without ads or in-app purchases

-

With Chicken Gun Dinero Infinito APK, you can enjoy the game without ads or in-app purchases. You can play the game without any annoying ads that pop up on your screen or interrupt your gameplay. You can also play the game without any in-app purchases that ask you to spend real money to get more coins or gems. You can have everything for free with this modded version of the game.

-

How to Download and Install Chicken Gun Dinero Infinito APK?

-

Follow these simple steps to get the game on your device

-

Step 1: Enable unknown sources on your device settings

-

To download and install Chicken Gun Dinero Infinito APK, you need to enable unknown sources on your device settings. This will allow you to install apps that are not from the official Google Play Store. To do this, go to your device settings > security > unknown sources > enable.

-

Step 2: Download the APK file from a trusted source

-

To download Chicken Gun Dinero Infinito APK, you need to find a trusted source that provides the APK file. You can search online for websites that offer this modded version of the game. Make sure that the website is safe and reliable before downloading anything from it. You can also scan the APK file with an antivirus software before installing it.

-

Step 3: Locate and install the APK file on your device

-

To install Chicken Gun Dinero Infinito APK, you need to locate the APK file on your device. You can use a file manager app to find the file in your downloads folder or any other location where you saved it. Once you find it, tap on it and follow the instructions on your screen to install it.

-

Step 4: Launch the game and have fun

-

To play Chicken Gun Dinero Infinito APK, you need to launch the game on your device. You can find it on your app drawer or home screen. Tap on it and enjoy playing the ultimate shooting game with unlimited money and health.

-

Conclusion

-

Chicken Gun Dinero Infinito APK is a great way to enjoy the game with more features and fun

-

Chicken Gun is a fun and hilarious multiplayer shooter game that lets you play as a chicken with a gun. You can shoot other chickens in various maps and modes, customize your chicken with different weapons and accessories , and compete with other players online. Chicken Gun Dinero Infinito APK is a modified version of the game that gives you unlimited money and health in the game. You can unlock all the weapons and accessories, survive longer and win more matches, and enjoy the game without ads or in-app purchases. To download and install Chicken Gun Dinero Infinito APK, you need to enable unknown sources on your device settings, download the APK file from a trusted source, locate and install the APK file on your device, and launch the game and have fun. Chicken Gun Dinero Infinito APK is a great way to enjoy the game with more features and fun.

-

Here are some FAQs that you might have about Chicken Gun Dinero Infinito APK:

-

Q: Is Chicken Gun Dinero Infinito APK safe to use?

-

A: Chicken Gun Dinero Infinito APK is safe to use as long as you download it from a trusted source and scan it with an antivirus software before installing it. However, you should be aware that using a modded version of the game might violate the terms and conditions of the original game, and you might face some risks or consequences from the game developers or authorities.

-

Q: Do I need to root my device to use Chicken Gun Dinero Infinito APK?

-

A: No, you do not need to root your device to use Chicken Gun Dinero Infinito APK. You just need to enable unknown sources on your device settings and install the APK file as you would with any other app.

-

Q: Can I play Chicken Gun Dinero Infinito APK with my friends online?

-

A: Yes, you can play Chicken Gun Dinero Infinito APK with your friends online. You can join or create a room with up to 10 players per team, and chat with them using voice or text messages. However, you should be aware that some players might not like playing with modded users, and they might report you or kick you out of the room.

-

Q: Can I update Chicken Gun Dinero Infinito APK to the latest version of the game?

-

A: No, you cannot update Chicken Gun Dinero Infinito APK to the latest version of the game. You need to wait for the modded version of the game to be updated by its developers. If you try to update the game from the official Google Play Store, you might lose all your modded features and data.

-

Q: Where can I find more information about Chicken Gun Dinero Infinito APK?

-

A: You can find more information about Chicken Gun Dinero Infinito APK by searching online for websites or forums that provide this modded version of the game. You can also watch videos or read reviews from other users who have tried this modded version of the game.

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Discover the Truth of the Universe in Mineirinho Ultra Adventures 2 Mobile Illuminati Trail DLC.md b/spaces/1phancelerku/anime-remove-background/Discover the Truth of the Universe in Mineirinho Ultra Adventures 2 Mobile Illuminati Trail DLC.md deleted file mode 100644 index ee15fde0ee8cc750cb2a432a126aef24b4a5e98b..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Discover the Truth of the Universe in Mineirinho Ultra Adventures 2 Mobile Illuminati Trail DLC.md +++ /dev/null @@ -1,106 +0,0 @@ - -

Mineirinho Ultra Adventures 2 Mobile: A Guide for Beginners

-

If you are looking for a challenging and fun 3D platform game that will test your skills and reflexes, then you should try Mineirinho Ultra Adventures 2 Mobile. This game is the sequel to the popular Mineirinho Ultra Adventures, which was released in 2017 and became a cult hit among gamers. In this game, you will join our friend Miner, a Brazilian hero who goes on amazing adventures with extreme difficulty. You will explore different worlds, face various enemies, collect power ups, and overcome all the obstacles that stand in your way. This game is not for the faint of heart, as it requires a lot of patience, perseverance, and precision. But if you are up for the challenge, you will find a lot of satisfaction and enjoyment in this game.

-

How to download Mineirinho Ultra Adventures 2 Mobile on your device

-

Mineirinho Ultra Adventures 2 Mobile is available for both Android and iOS devices. You can download it from the Google Play Store or the App Store, depending on your device. The game is free to download and play, but it contains ads and in-app purchases. You can also play the game on your PC by downloading it from Steam, where it costs $5.99. However, you will need a compatible controller to play the game on your PC.

-

download mineirinho ultra adventures 2 mobile


Download Filehttps://jinyurl.com/2uNSuE



-

What are the features of Mineirinho Ultra Adventures 2 Mobile?

-

Mineirinho Ultra Adventures 2 Mobile is a game that offers a lot of features that make it unique and exciting. Here are some of the features that you can expect from this game:

-

Real Physics

-

The game uses real physics to simulate the movement and interaction of objects and characters in the game world. This means that you will have to deal with gravity, inertia, friction, momentum, and other forces that affect your gameplay. For example, you can use a bubblegum rope to swing from one platform to another, but you have to be careful not to lose your balance or hit any obstacles along the way.

-

Bubblegum Rope

-

One of the most distinctive features of this game is the bubblegum rope, which is a special power up that allows you to swing from one place to another like Spider-Man. You can use the bubblegum rope to reach higher places, cross gaps, avoid enemies, or just have fun. The bubblegum rope has a limited length and durability, so you have to use it wisely and strategically.

-

Excellent for Speedrun

-

If you are a fan of speedrunning, which is the practice of completing a game or a level as fast as possible, then you will love this game. The game has many levels that are designed for speedrunning, with different routes, shortcuts , and challenges that will test your skills and reflexes. You can also compete with other players online and see who can finish the levels faster. The game has a leaderboard system that ranks the best players in the world, as well as a replay feature that lets you watch your own or other players' runs.

-

Super Ultra Adventures

-

The game has a total of 12 worlds, each with its own theme, enemies, obstacles, and boss. You will travel to different places, such as the jungle, the desert, the city, the snow, the space, and more. Each world has 10 levels, plus a bonus level and a boss level. The levels are full of surprises and secrets that will keep you entertained and curious. You will also encounter many different enemies, such as snakes, spiders, scorpions, robots, aliens, and more. Some of them are easy to defeat, while others will require more strategy and skill. The boss levels are especially challenging and fun, as you will have to face a giant enemy that has its own attacks and patterns.

-

Cool Toon Shader

-

The game has a colorful and cartoonish graphics style that uses a toon shader effect. This means that the game has a cel-shaded look that makes it look like a comic book or an animated movie. The game also has a lot of humor and personality, with funny animations, expressions, and sounds. The game is suitable for all ages and audiences, as it does not contain any violence or gore.

-

Extreme Difficulty

-

One of the main features of this game is its extreme difficulty level. This game is not for casual gamers or beginners, as it requires a lot of skill, patience, and perseverance. The game is very hard to beat, as it has many traps, pitfalls, spikes, enemies, and other hazards that will make you die a lot. The game also has a permadeath system, which means that if you die in a level, you have to start from the beginning of the world. The game does not have any checkpoints or save points, so you have to be very careful and cautious. The game also does not have any tutorials or hints, so you have to figure out everything by yourself.

-

How to download mineirinho ultra adventures 2 on android
-Mineirinho ultra adventures 2 apk free download
-Best 3d platform games for mobile devices
-Mineirinho ultra adventures 2 steam review
-Mineirinho ultra adventures 2 gameplay and walkthrough
-Mineirinho ultra adventures 2 multiplayer mode
-Mineirinho ultra adventures 2 cheats and tips
-Mineirinho ultra adventures 2 vs mineirinho classic
-Mineirinho ultra adventures 2 system requirements
-Mineirinho ultra adventures 2 trailer and screenshots
-Mineirinho ultra adventures 2 speedrun challenge
-Mineirinho ultra adventures 2 bubblegum rope feature
-Mineirinho ultra adventures 2 cool toon shader effect
-Mineirinho ultra adventures 2 extreme difficulty level
-Mineirinho ultra adventures 2 food power ups guide
-Mineirinho ultra adventures 2 developer interview
-Mineirinho ultra adventures 2 release date and price
-Mineirinho ultra adventures 2 dlc illuminati trail
-Mineirinho ultra adventures 2 sinned games franchise
-Mineirinho ultra adventures 2 jazzghost youtube video
-Download mineirinho ultra adventures 2 for ios
-Mineirinho ultra adventures 2 mod apk unlimited money
-Top 10 3d platform games for mobile in 2023
-Mineirinho ultra adventures 2 steam key giveaway
-Mineirinho ultra adventures 2 online co-op mode
-Mineirinho ultra adventures 2 hack and mod menu
-Mineirinho ultra adventures 2 comparison with other games
-Mineirinho ultra adventures 2 achievements and trophies
-Mineirinho ultra adventures 2 minimum and recommended specs
-Mineirinho ultra adventures 2 official website and social media
-Download mineirinho ultra adventures 2 for pc
-Mineirinho ultra adventures 2 cracked apk download link
-Best mobile games made by blender game engine
-Mineirinho ultra adventures 2 steam discount code
-Mineirinho ultra adventures 2 offline single-player mode
-Mineirinho ultra adventures 2 unlimited lives and coins
-Mineirinho ultra adventures 2 fan art and memes
-Mineirinho ultra adventures 2 soundtrack and music
-Mineirinho ultra adventures 2 optimal settings and performance
-Mineirinho ultra adventures 2 contact and support information

-

Many Crazy Levels

-

The game has many crazy levels that will challenge your creativity and imagination. The levels are full of puzzles, secrets, hidden areas, and Easter eggs that will make you explore every corner of the game world. The levels are also very varied and unpredictable, as they have different mechanics and elements that will change your gameplay. For example, some levels have gravity switches that will make you walk on walls or ceilings, some levels have portals that will teleport you to different places, some levels have water or lava that will affect your movement and abilities, and so on.

-

Super Fun Multiplayer

-

The game also has a super fun multiplayer mode that lets you play with up to four friends online or locally. You can choose from different modes, such as co-op mode, where you work together to complete the levels; versus mode , where you compete against each other to finish the levels faster or collect more items; and party mode, where you play mini-games that are based on the game mechanics. The multiplayer mode is very fun and chaotic, as you can cooperate or sabotage each other, use power ups or traps, and chat or taunt each other.

-

Radical Movements

-

The game also has a lot of radical movements that you can perform with your character. You can run, jump, slide, roll, dash, wall jump, and more. You can also use the bubblegum rope to swing, pull, or launch yourself. The game has a smooth and responsive control system that lets you execute these movements with ease and precision. You will need to master these movements to overcome the challenges and obstacles in the game.

-

Cool Food Power Ups

-

The game also has a lot of cool food power ups that you can collect and use in the game. These power ups are based on Brazilian cuisine and culture, such as feijoada, brigadeiro, guarana, caipirinha, and more. Each power up has a different effect and duration, such as giving you extra speed, health, invincibility, or other abilities. You can also combine different power ups to create new effects and combinations. The power ups are very useful and fun to use in the game.

-

How to play Mineirinho Ultra Adventures 2 Mobile?

-

Now that you know what the game is about and what features it offers, you might be wondering how to play it. Here are some basic tips and instructions on how to play Mineirinho Ultra Adventures 2 Mobile:

-

Controls and Gameplay

-

The game has different controls depending on the device you are using. If you are playing on a mobile device, you will use the touch screen to control your character. You will have a virtual joystick on the left side of the screen to move your character, and buttons on the right side of the screen to jump, slide, dash, use the bubblegum rope, or use a power up. You can also swipe the screen to change the camera angle or zoom in or out. If you are playing on a PC, you will use a controller to control your character. You will have a left stick to move your character, and buttons to jump, slide, dash, use the bubblegum rope, or use a power up. You can also use the right stick to change the camera angle or zoom in or out.

-

The gameplay is simple but challenging. Your goal is to complete each level by reaching the end of it without dying. You will have to avoid or defeat enemies, dodge or overcome obstacles, collect items and power ups, and solve puzzles along the way. You will also have to face a boss at the end of each world. The game has a timer that shows how long it takes you to finish each level. You can also collect stars that are hidden in each level. The stars are used to unlock new worlds and levels in the game.

-

Tips and Tricks

-

Here are some tips and tricks that will help you play better and enjoy more Mineirinho Ultra Adventures 2 Mobile:

- -

How does Mineirinho Ultra Adventures 2 Mobile compare to other games in the genre?

-

Mineirinho Ultra Adventures 2 Mobile is a game that belongs to the 3D platform genre, which is a type of game that involves moving and jumping on platforms in a three-dimensional environment. Some of the most famous and popular games in this genre are Super Mario 64, Crash Bandicoot, Banjo-Kazooie, Spyro the Dragon, and Sonic Adventure. How does Mineirinho Ultra Adventures 2 Mobile compare to these games?

-

Well, Mineirinho Ultra Adventures 2 Mobile is a game that has its own style and identity, as it is inspired by Brazilian culture and humor. It also has a lot of features that make it unique and different from other games in the genre, such as the real physics, the bubblegum rope, the extreme difficulty, the speedrun potential, and the cool food power ups. The game also has a lot of variety and creativity in its levels, enemies, bosses, and mechanics. The game is not a copy or a clone of any other game, but rather a homage and a tribute to the genre.

-

However, Mineirinho Ultra Adventures 2 Mobile is also a game that respects and follows the conventions and standards of the genre. It has a lot of elements that are common and familiar to fans of the genre, such as the 3D graphics, the platforming gameplay, the collectibles, the secrets, the power ups, the worlds, and the bosses. The game also has a lot of references and nods to other games in the genre, such as Mario's hat, Sonic's rings, Crash's crates, Spyro's gems, and Banjo's jiggy. The game is not a parody or a mockery of any other game, but rather a celebration and an appreciation of the genre.

-

Therefore, Mineirinho Ultra Adventures 2 Mobile is a game that can appeal to both fans and newcomers of the 3D platform genre. It is a game that offers a lot of challenge and fun for anyone who loves this type of game.

-

Conclusion

-

Mineirinho Ultra Adventures 2 Mobile is a game that you should definitely try if you are looking for a challenging and fun 3D platform game that will test your skills and reflexes. You will join our friend Miner, a Brazilian hero who goes on amazing adventures with extreme difficulty. You will explore different worlds, face various enemies, collect power ups, and overcome all the obstacles that stand in your way. You will also enjoy the colorful and cartoonish graphics style, the humorous and personality-filled animations and sounds, and the super fun multiplayer mode. You will also appreciate the real physics , the bubblegum rope, the speedrun potential, and the cool food power ups that make this game unique and different from other games in the genre. You will also respect and follow the conventions and standards of the genre, as well as the references and nods to other games in the genre that make this game a homage and a tribute to the genre. Mineirinho Ultra Adventures 2 Mobile is a game that you will not regret playing, as it will give you a lot of satisfaction and enjoyment.

-

So, what are you waiting for? Download Mineirinho Ultra Adventures 2 Mobile on your device today and start your super ultra adventure with Miner. You will not be disappointed. Have fun and good luck!

-

FAQs

-

Here are some frequently asked questions about Mineirinho Ultra Adventures 2 Mobile:

-

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Gas Station Simulator Mod APK - The Best Simulation Game for Android Users.md b/spaces/1phancelerku/anime-remove-background/Download Gas Station Simulator Mod APK - The Best Simulation Game for Android Users.md deleted file mode 100644 index 0f3ea496866b7cc6a483657ef72830777f71028d..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Gas Station Simulator Mod APK - The Best Simulation Game for Android Users.md +++ /dev/null @@ -1,97 +0,0 @@ -
-

Download Gas Station Simulator Mod APK Android: A Fun and Realistic Business Simulation Game

-

Do you dream of owning your own gas station and running a successful business? If so, you might want to try Gas Station Simulator, a game that lets you experience the challenges and rewards of managing a gas station. And if you want to make the game more fun and easy, you can download Gas Station Simulator mod apk android, which gives you unlimited money, gems, and other benefits. In this article, we will tell you more about this game and how to download the mod apk version for free.

-

download gas station simulator mod apk android


DOWNLOAD →→→ https://jinyurl.com/2uNO8B



-

What is Gas Station Simulator?

-

Gas Station Simulator is a gas station simulator where you will have to own your own gas station. You have opened your own business, a gas station and you have to start everything small, you have not purchased a large and not in very good condition gas station. You need to work and earn money to get good reviews and not earn bad ones.

-

In this game, you will have to perform various tasks such as refueling cars, repairing tires, washing vehicles, selling snacks, hiring staff, and more. You will also have to deal with different types of customers, some of whom may be rude or impatient. You will have to balance your budget, expenses, and income, as well as improve your reputation and customer satisfaction.

-

Features of Gas Station Simulator

-

- Manage your own gas station

-

You are the boss of your own gas station and you can decide how to run it. You can choose what services to offer, what products to sell, what prices to charge, and how to decorate your station. You can also hire and fire employees, assign them tasks, and train them.

-

- Upgrade your facilities and services

-

As you earn money from your business, you can invest it in upgrading your facilities and services. You can buy new equipment, expand your parking lot, add more pumps, install car washes, build convenience stores, and more. You can also unlock new types of vehicles, such as trucks, buses, motorcycles, etc.

-

- Interact with customers and employees

-

You will have to interact with various characters in the game, such as customers and employees. You will have to satisfy their needs and requests, as well as handle their complaints and feedback. You will also have to deal with different situations, such as robberies, accidents, fires, etc.

-

- Earn money and reputation

-

Your main goal in the game is to earn money and reputation. Money is needed to buy new items, upgrade your station, pay your bills, etc. Reputation is needed to attract more customers, get better reviews, unlock new features, etc. You can also compete with other players in leaderboards and achievements.

-

How to install gas station simulator mod apk on android
-Gas station simulator mod apk unlimited money and gems
-Gas station simulator mod apk latest version free download
-Gas station simulator hack mod apk for android devices
-Gas station simulator mod apk offline gameplay
-Gas station simulator mod apk with unlimited fuel and cash
-Gas station simulator mod apk no ads and no root
-Gas station simulator mod apk download link and instructions
-Gas station simulator mod apk features and benefits
-Gas station simulator mod apk review and rating
-Gas station simulator mod apk cheats and tips
-Gas station simulator mod apk update and bug fixes
-Gas station simulator mod apk requirements and compatibility
-Gas station simulator mod apk comparison and alternatives
-Gas station simulator mod apk support and feedback
-Best gas station simulator mod apk for android users
-Gas station simulator premium mod apk unlocked everything
-Gas station simulator pro mod apk with advanced features
-Gas station simulator mega mod apk with unlimited resources
-Gas station simulator vip mod apk with exclusive rewards
-Download gas station simulator cracked mod apk for android
-Download gas station simulator full mod apk for android
-Download gas station simulator original mod apk for android
-Download gas station simulator new mod apk for android
-Download gas station simulator old mod apk for android
-Download gas station simulator 3d mod apk for android
-Download gas station simulator hd mod apk for android
-Download gas station simulator realistic mod apk for android
-Download gas station simulator fun mod apk for android
-Download gas station simulator easy mod apk for android
-Download gas station simulator hard mod apk for android
-Download gas station simulator online mod apk for android
-Download gas station simulator offline mod apk for android
-Download gas station simulator multiplayer mod apk for android
-Download gas station simulator single player mod apk for android
-Download gas station simulator car wash mod apk for android
-Download gas station simulator car repair mod apk for android
-Download gas station simulator car tuning mod apk for android
-Download gas station simulator car racing mod apk for android
-Download gas station simulator car parking mod apk for android
-Download gas station simulator tycoon mod apk for android
-Download gas station simulator manager mod apk for android
-Download gas station simulator builder mod apk for android
-Download gas station simulator designer mod apk for android
-Download gas station simulator city mod apk for android
-Download gas station simulator country mod apk for android
-Download gas station simulator adventure mod apk for android
-Download gas station simulator simulation mod apk for android

-

Why download Gas Station Simulator mod apk android?

-

If you want to enjoy the game without any limitations or difficulties, you can download Gas Station Simulator mod apk android. This is a modified version of the game that gives you several advantages over the original version. Here are some of them:

-

- Unlimited money and gems

-

With this mod apk version, you will have unlimited money and gems in the game. This means that you can buy anything you want without worrying about the cost. You can also upgrade your station faster and easier.

-

- All items and upgrades unlocked

-

With this mod apk version

With this mod apk version, you will have access to all the items and upgrades in the game. You don't have to wait for them to be unlocked or pay for them with real money. You can enjoy the full features of the game from the start.

-

- No ads and no root required

-

With this mod apk version, you will not see any ads in the game. You can play the game without any interruptions or distractions. You also don't need to root your device to install the mod apk file. You can simply download it and install it on your android device.

-

How to download Gas Station Simulator mod apk android?

-

If you are interested in downloading Gas Station Simulator mod apk android, you can follow these simple steps:

-

Step 1: Download the mod apk file from a trusted source

-

The first thing you need to do is to find a reliable website that offers the mod apk file for Gas Station Simulator. You can search for it on Google or use the link below. Make sure that the website is safe and secure, and that the file is free from viruses and malware.

-

Step 2: Enable unknown sources on your device settings

-

The next thing you need to do is to enable unknown sources on your device settings. This will allow you to install apps that are not from the Google Play Store. To do this, go to your device settings, then security, then unknown sources, and turn it on.

-

Step 3: Install the mod apk file and enjoy the game

-

The last thing you need to do is to install the mod apk file on your device. To do this, locate the file in your downloads folder and tap on it. Follow the instructions on the screen and wait for the installation to finish. Once done, you can open the game and enjoy playing Gas Station Simulator with unlimited money, gems, and other benefits.

-

Conclusion

-

Gas Station Simulator is a fun and realistic business simulation game that lets you manage your own gas station. You can perform various tasks, upgrade your facilities, interact with customers and employees, and earn money and reputation. If you want to make the game more enjoyable and easy, you can download Gas Station Simulator mod apk android, which gives you unlimited money, gems, and other advantages. You can download the mod apk file from a trusted source, enable unknown sources on your device settings, and install the file on your device. Then, you can play the game without any limitations or difficulties.

-

We hope that this article has helped you learn more about Gas Station Simulator and how to download its mod apk version for free. If you have any questions or feedback, feel free to leave a comment below. Thank you for reading!

-

FAQs

-

Here are some frequently asked questions about Gas Station Simulator and its mod apk version:

-

- Is Gas Station Simulator free to play?

-

Yes, Gas Station Simulator is free to play. However, some items and features may require real money purchases or watching ads.

-

- Is Gas Station Simulator mod apk safe to use?

-

Yes, Gas Station Simulator mod apk is safe to use as long as you download it from a trusted source. However, we recommend that you use it at your own risk and discretion, as it may violate the terms of service of the original game.

-

- What are the minimum requirements for Gas Station Simulator?

-

The minimum requirements for Gas Station Simulator are: - Android 5.0 or higher - 2 GB of RAM - 100 MB of free storage space - Internet connection

-

- How can I contact the developers of Gas Station Simulator?

-

You can contact the developers of Gas Station Simulator by sending an email to support@playway.com or visiting their website. You can also follow them on Facebook and Twitter for updates and news.

-

- Can I play Gas Station Simulator offline?

-

No, Gas Station Simulator requires an internet connection to play. You need to be online to access all the features and functions of the game.

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Rebaixados Elite Brasil APK and Drive from Different Perspectives.md b/spaces/1phancelerku/anime-remove-background/Download Rebaixados Elite Brasil APK and Drive from Different Perspectives.md deleted file mode 100644 index f9aebed5d1b34afc54a04f5b0150412acc505e10..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Rebaixados Elite Brasil APK and Drive from Different Perspectives.md +++ /dev/null @@ -1,129 +0,0 @@ - -

How to Download Rebaixados Elite Brasil APK

-

Rebaixados Elite Brasil is a popular Android game that lets you customize your car and character in a Brazil-inspired demoted car game. If you want to download the game, you might be wondering how to get the APK file from Google Play Store. In this article, we will show you what Rebaixados Elite Brasil is, why you might want to download the APK file, and how to do it safely and easily.

-

What is Rebaixados Elite Brasil?

-

Rebaixados Elite Brasil is a game developed by Sebby Games that simulates the culture of demoted cars in Brazil. You can lower your car to the floor, change the color, wheels, glass, xenon, and more. You can also customize your character with different clothes, accessories, and hairstyles. The game has realistic graphics, physics, and sound effects that make you feel like you are driving a real car.

-

download rebaixados elite brasil apk


DOWNLOADhttps://jinyurl.com/2uNN7F



-

A Brazil-inspired demoted car game

-

The game is inspired by the Brazilian subculture of demoted cars, which are vehicles that have been modified to have a lower suspension, larger wheels, louder sound systems, and flashy decorations. The game lets you explore different scenarios in Brazil, such as streets, highways, gas stations, and parking lots. You can also interact with other cars and characters in the game.

-

Features of the game

-

Some of the features of Rebaixados Elite Brasil are:

-

rebaixados elite brasil apk mod
-rebaixados elite brasil apk latest version
-rebaixados elite brasil apk free download
-rebaixados elite brasil apk unlimited money
-rebaixados elite brasil apk android
-rebaixados elite brasil apk obb
-rebaixados elite brasil apk hack
-rebaixados elite brasil apk offline
-rebaixados elite brasil apk update
-rebaixados elite brasil apk full version
-rebaixados elite brasil apk for pc
-rebaixados elite brasil apk revdl
-rebaixados elite brasil apk rexdl
-rebaixados elite brasil apk pure
-rebaixados elite brasil apk uptodown
-rebaixados elite brasil apk old version
-rebaixados elite brasil apk 2023
-rebaixados elite brasil apk data
-rebaixados elite brasil apk mirror
-rebaixados elite brasil apk mob.org
-rebaixados elite brasil apk gameplay
-rebaixados elite brasil apk cheats
-rebaixados elite brasil apk sebby games
-rebaixados elite brasil apk android 1
-rebaixados elite brasil apk android oyun club
-rebaixados elite brasil apk baixar
-rebaixados elite brasil apk como instalar
-rebaixados elite brasil apk descargar
-rebaixados elite brasil apk download mediafıre
-rebaixados elite brasil apk download mega
-rebaixados elite brasil apk download pc
-rebaixados elite brasil apk download uptodown
-rebaixados elite brasil apk download 2023
-rebaixados elite brasil apk download atualizado 2023
-rebaixados elite brasil apk download gratis
-rebaixados elite brasil apk download hackeado 2023
-rebaixados elite brasil apk download infinito 2023
-rebaixados elite brasil apk download modificado 2023
-rebaixados elite brasil apk download para pc fraco 2023
-rebaixados elite brasil apk indir android oyun club 2023

- -

Why download the APK file?

-

An APK file is an Android Package file that contains all the files and data needed to install an app on an Android device. You can download APK files from Google Play Store or other sources to install apps that are not available in your region, update apps before they are officially released, or backup apps that you want to keep. However, there are also some risks involved in downloading APK files from unknown sources, such as malware, viruses, or spyware.

-

Benefits of APK files

-

Some of the benefits of downloading APK files are:

- -

Risks of APK files

-

Some of the risks of downloading APK files are:

-
  • You can expose your device and data to malware, viruses, or spyware that can harm your device, steal your information, or compromise your privacy.
  • -
  • You can violate the terms and conditions of Google Play Store or the app developer and risk losing access to the app or your account.
  • -
  • You can damage your device or cause it to malfunction if the APK file is corrupted, modified, or incompatible with your device or Android version.
  • - -

    How to download the APK file from Google Play Store?

    -

    If you want to download the APK file of Rebaixados Elite Brasil from Google Play Store, you have two options: using a web tool or using an APK extractor app. Both methods are easy and safe, but you need to have a Google account and the app installed on your device.

    -

    Method 1: Using a web tool

    -

    One of the easiest ways to download the APK file of Rebaixados Elite Brasil from Google Play Store is to use a web tool that can generate the download link for you. Here are the steps to follow:

    -
      -
    1. Go to Google Play Store and search for Rebaixados Elite Brasil. Copy the URL of the app page from the address bar.
    2. -
    3. Go to a web tool that can download APK files from Google Play Store, such as [APK Downloader], [APKPure], or [Evozi].
    4. -
    5. Paste the URL of the app page into the input box and click on the download button.
    6. -
    7. Wait for the web tool to generate the download link and click on it to save the APK file to your device or computer.
    8. -
    -

    Method 2: Using an APK extractor app

    -

    Another way to download the APK file of Rebaixados Elite Brasil from Google Play Store is to use an APK extractor app that can extract the APK file from the app installed on your device. Here are the steps to follow:

    -
      -
    1. Go to Google Play Store and install an APK extractor app, such as [APK Extractor], [ML Manager], or [App Backup & Restore].
    2. -
    3. Open the APK extractor app and find Rebaixados Elite Brasil in the list of apps. Tap on it and select the option to extract or share the APK file.
    4. -
    5. Choose where you want to save or send the APK file, such as your device storage, email, cloud service, or Bluetooth.
    6. -
    -

    How to install the APK file on Android?

    -

    Once you have downloaded the APK file of Rebaixados Elite Brasil, you need to install it on your Android device. However, before you do that, you need to enable the option to install apps from unknown sources on your device. This option allows you to install apps that are not from Google Play Store. Here are the steps to enable it:

    -
      -
    1. Go to Settings and tap on Security or Privacy.
    2. -
    3. Find the option that says Unknown Sources or Install Unknown Apps and toggle it on.
    4. -
    5. Confirm your choice by tapping on OK or Allow.
    6. -
    -

    Steps to install the APK file

    -

    After you have enabled the option to install apps from unknown sources, you can proceed to install the APK file of Rebaixados Elite Brasil. Here are the steps to follow:

    -
      -
    1. Locate the APK file on your device using a file manager app or a browser.
    2. -
    3. Tap on the APK file and select Install.
    4. -
    5. Wait for the installation process to finish and tap on Open or Done.
    6. -
    -

    Tips to avoid installation errors

    -

    Sometimes, you might encounter some errors or issues when installing an APK file on your Android device. Here are some tips to avoid them:

    -
  • Make sure that the APK file is not corrupted, modified, or tampered with. You can check the integrity of the APK file by comparing its checksum or signature with the original one.
  • -
  • Make sure that the APK file is compatible with your device and Android version. You can check the compatibility of the APK file by looking at its minimum requirements, such as Android version, screen size, processor, RAM, and storage.
  • -
  • Make sure that you have enough storage space on your device to install the APK file. You can check the storage space on your device by going to Settings and tapping on Storage.
  • -
  • Make sure that you have a stable internet connection when downloading or installing the APK file. You can check the internet connection on your device by going to Settings and tapping on Wi-Fi or Mobile Data.
  • - -

    Conclusion

    -

    Rebaixados Elite Brasil is a fun and realistic game that lets you customize your car and character in a Brazil-inspired demoted car game. You can download the APK file of Rebaixados Elite Brasil from Google Play Store using a web tool or an APK extractor app. You can also install the APK file on your Android device by enabling the option to install apps from unknown sources and following some simple steps. However, you should also be aware of the risks of downloading APK files from unknown sources and take some precautions to avoid installation errors. We hope this article has helped you learn how to download Rebaixados Elite Brasil APK and enjoy the game.

    -

    FAQs

    -

    Here are some frequently asked questions about Rebaixados Elite Brasil APK:

    - - - - - - -
    Q: Is Rebaixados Elite Brasil free to play?A: Yes, Rebaixados Elite Brasil is free to play, but it contains ads and in-app purchases.
    Q: How can I play Rebaixados Elite Brasil online with my friends?A: You can play Rebaixados Elite Brasil online with your friends by joining or creating a room in the multiplayer mode. You need to have an internet connection and a Google account to play online.
    Q: How can I remove the ads from Rebaixados Elite Brasil?A: You can remove the ads from Rebaixados Elite Brasil by purchasing the premium version of the game for $1.99.
    Q: How can I get more money and diamonds in Rebaixados Elite Brasil?A: You can get more money and diamonds in Rebaixados Elite Brasil by completing missions, watching videos, or buying them with real money.
    Q: How can I contact the developer of Rebaixados Elite Brasil?A: You can contact the developer of Rebaixados Elite Brasil by sending an email to sebbygames@gmail.com or following them on Facebook or Instagram.

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download and Install God of War Collection for PS3 Emulator (RPCS3) on PC.md b/spaces/1phancelerku/anime-remove-background/Download and Install God of War Collection for PS3 Emulator (RPCS3) on PC.md deleted file mode 100644 index 73ef2cab94c35c080b7b129c829458bd69a97adf..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download and Install God of War Collection for PS3 Emulator (RPCS3) on PC.md +++ /dev/null @@ -1,162 +0,0 @@ - -

    God of War Collection PS3 Emulator Download: How to Play God of War Games on PC

    -

    God of War is one of the most popular and acclaimed action-adventure video game series of all time. The games follow the adventures of Kratos, a Spartan warrior who battles against gods, monsters, and other mythical creatures in ancient Greece and Norse mythology. The games are known for their epic scale, cinematic presentation, brutal combat, and engaging story.

    -

    god of war collection ps3 emulator download


    DOWNLOAD ✶✶✶ https://jinyurl.com/2uNKzD



    -

    But what if you don't have a PlayStation console to play these games? Or what if you want to enjoy them with better graphics, performance, and customization options? Well, there is a way to play God of War games on PC, thanks to a PS3 emulator called RPCS3. In this article, we will show you how to download, install, configure, and play God of War Collection PS3 Emulator Download on your PC.

    -

    What is God of War Collection PS3 Emulator Download?

    -

    God of War Collection PS3 Emulator Download is a package that contains two remastered versions of the first two God of War games: God of War HD and God of War II HD. These games were originally released for the PlayStation 2, but were later ported to the PlayStation 3 as part of the God of War Collection. The remastered versions feature improved graphics, resolution, frame rate, and trophies.

    -

    RPCS3 is an open-source emulator that allows you to play PlayStation 3 games on your PC. It is currently the most advanced and compatible PS3 emulator available, with support for over 5000 games. RPCS3 can run many PS3 games at full speed, with high resolution, anti-aliasing, and other enhancements. RPCS3 also supports various input devices, such as keyboards, mice, controllers, and even VR headsets.

    -

    By using RPCS3, you can play God of War Collection PS3 Emulator Download on your PC, as well as other PS3 exclusive games such as Uncharted, The Last of Us, Demon's Souls, Persona 5, and more.

    -

    god of war collection rpcs3 download
    -god of war 1 and 2 ps3 emulator
    -god of war hd collection pc download
    -god of war ps3 iso download for rpcs3
    -god of war collection ps3 rom
    -how to play god of war collection on pc
    -god of war 1 hd rpcs3 settings
    -god of war 2 hd pc download
    -god of war collection ps3 pkg
    -rpcs3 god of war collection vulkan
    -god of war 1 and 2 pc emulator
    -god of war hd collection ps3 iso
    -god of war collection pc gameplay
    -god of war ps3 emulator android
    -god of war collection ps3 save data
    -rpcs3 god of war collection lle modules
    -god of war 2 hd rpcs3 download
    -god of war hd collection pc requirements
    -god of war ps3 emulator reddit
    -god of war collection ps3 cheats
    -rpcs3 god of war collection install guide
    -god of war 1 hd pc download
    -god of war hd collection ps3 download
    -god of war ps3 emulator online
    -god of war collection ps3 trophy guide
    -rpcs3 god of war collection graphics fix
    -god of war 1 hd rpcs3 cheats
    -god of war 2 hd pc emulator
    -god of war hd collection ps4 download
    -god of war ps3 emulator for windows 10
    -god of war collection ps3 review
    -rpcs3 god of war collection sound problem
    -god of war 1 hd rpcs3 save game
    -god of war 2 hd rpcs3 settings
    -god of war hd collection pc crack
    -god of war ps3 emulator apk
    -god of war collection ps3 price
    -rpcs3 god of war collection controller configuration
    -god of war 1 hd rpcs3 gameplay
    -god of war 2 hd rpcs3 cheats
    -god of war hd collection pc free download
    -god of war ps3 emulator mac
    -god of war collection ps3 gamestop
    -rpcs3 god of war collection black screen fix
    -god of war 1 hd rpcs3 resolution scale
    -god of war 2 hd rpcs3 gameplay
    -god of war hd collection pc steam
    -god of war ps3 emulator linux
    -god of war collection ps3 amazon

    -

    How to Download God of War Collection PS3 Emulator Download

    -

    Requirements and Steps to Download and Install RPCS3

    -

    To download and install RPCS3, you will need a PC that meets the following minimum requirements:

    - -

    Once you have a compatible PC, follow these steps to download and install RPCS3:

    -
      -
    1. Go to the official website of RPCS3 and click on the Download button.
    2. -
    3. Choose your operating system and download the latest build of RPCS3.
    4. -
    5. Extract the downloaded file to a folder of your choice.
    6. -
    7. Run rpcs3.exe to launch the emulator.Where to Find the ROM Files for God of War Collection PS3 Emulator Download -

      To play God of War Collection PS3 Emulator Download on your PC, you will also need the ROM files for the games. ROM files are the digital copies of the game discs that can be read by the emulator. However, finding and downloading ROM files can be tricky, as they are often illegal to distribute and share online. Therefore, you should only download ROM files from trusted and reputable sources, and only if you own the original game discs.

      -

      One possible source for the ROM files is [Reddit](^1^), where some users have shared links to download God of War Collection PS3 Emulator Download in various regions and languages. However, these links may not always work or be safe, so you should use them at your own risk and discretion. You should also scan the downloaded files for viruses and malware before running them on your PC.

      -

      Another possible source for the ROM files is [Vimm's Lair](^2^), a website that hosts a large collection of classic games for various consoles, including the PS3. You can search for God of War Collection PS3 Emulator Download on this website and download the ROM files from there. However, you should be aware that the download speed may be slow and limited, and that you may encounter some errors or glitches while playing the games.

      -

      Once you have downloaded the ROM files, you will need to extract them to a folder of your choice. You will also need to install some additional software to run the games, such as [PS3 Firmware](^3^) and [PS3 System Software](^4^). You can find more information on how to install these software on the [RPCS3 website](^6^) or on various online guides and tutorials.

      -

      How to Configure RPCS3 for Optimal Performance and Compatibility with God of War Collection PS3 Emulator Download

      -

      After you have installed RPCS3 and the ROM files, you will need to configure the emulator settings to ensure that the games run smoothly and without any issues. There are many options and parameters that you can tweak and adjust in RPCS3, but some of the most important ones are:

      -
        -
      • CPU configuration: You should enable PPU Decoder Recompiler (LLVM) and SPU Decoder Recompiler (LLVM) for better performance. You should also enable SPU Loop Detection, SPU Cache, and Thread Scheduler for better compatibility. You can also experiment with different SPU Block Size values, such as Safe, Mega, or Giga, depending on your CPU model and power.
      • -
      • GPU configuration: You should choose Vulkan as your Renderer for better graphics and stability. You should also enable Write Color Buffers, Read Color Buffers, Read Depth Buffer, and Write Depth Buffer for better rendering accuracy. You can also enable Anisotropic Filter, Anti-Aliasing, Resolution Scale, and Texture Scaling for better image quality.
      • -
      • Audio configuration: You should choose XAudio2 as your Audio Out for better sound quality and compatibility. You should also enable Audio Buffer Duration and Time Stretching for better audio synchronization.
      • -
      • Advanced configuration: You should enable Debug Console Mode, Accurate RSX Reservation Access, Accurate GETLLAR, Accurate PUTLLUC, and Use GPU Texture Scaling for better emulation accuracy. You can also enable Relaxed ZCULL Sync and Driver Wake-Up Delay for better performance.
      • -
      -

      These settings are based on various online sources that have tested and optimized RPCS3 for God of War Collection PS3 Emulator Download. However, you should keep in mind that these settings may not work for everyone or every game, as different PC configurations and game versions may require different settings. Therefore, you should always test and experiment with different settings until you find the ones that work best for you.

      to buy or own a PS3 console or the game discs. You can also avoid the hassle of switching discs, updating firmware, and dealing with region locks.
    8. - -

      Of course, playing God of War Collection PS3 Emulator Download on PC also has some drawbacks and challenges, such as:

      - -

      Pros and Cons of Playing God of War Collection PS3 Emulator Download on PC

      -

      To summarize, here is a table that compares the pros and cons of playing God of War Collection PS3 Emulator Download on PC versus playing it on a console:

      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      PCConsole
      + Higher resolution, frame rate, and graphical quality- Lower resolution, frame rate, and graphical quality
      + Various input devices and customization options- Limited input devices and customization options
      + Save states, cheats, mods, patches, and other enhancements- No save states, cheats, mods, patches, and other enhancements
      + Access to other PS3 games and emulators- No access to other PS3 games and emulators
      + No need to buy or own a PS3 console or the game discs- Need to buy or own a PS3 console or the game discs
      - Bugs, glitches, crashes, or compatibility issues+ Stable and reliable gameplay experience
      - High PC requirements and emulator settings optimization+ Low console requirements and plug-and-play convenience
      - Legal or ethical dilemmas regarding ROM files+ Legal or ethical compliance regarding game discs
      -

      Tips and Tricks to Enhance the Gameplay Experience of God of War Collection PS3 Emulator Download on PC

      -

      Finally, here are some tips and tricks that can help you enhance the gameplay experience of God of War Collection PS3 Emulator Download on PC:

      - -

      Conclusion

      -

      In this article, we have shown you how to download, install, configure, and play God of War Collection PS3 Emulator Download on your PC. We have also discussed the features and benefits, as well as the pros and cons, of playing God of War games on PC. We have also provided some tips and tricks to enhance your gameplay experience.

      -

      We hope that this article has been helpful and informative for you. If you have any questions or comments about this topic, feel free to leave them below. We would love to hear from you!

      -

      Thank you for reading this article and happy gaming!

      -

      FAQs

      -

      Here are some frequently asked questions about God of War Collection PS3 Emulator Download:

      -

      Q: Is RPCS3 legal and safe to use?

      -

      A: RPCS3 is legal and safe to use, as long as you follow the rules and guidelines of the emulator. You should only download RPCS3 from the official website, and only use it for personal and non-commercial purposes. You should also only play games that you own legally, and not share or distribute ROM files online.

      -

      Q: How long does it take to download and install RPCS3 and the ROM files?

      -

      A: The download and installation time of RPCS3 and the ROM files may vary depending on your internet speed, PC specifications, and file size. Generally, it may take from a few minutes to a few hours to complete the process.

      -

      Q: How much space do I need to store RPCS3 and the ROM files?

      -

      A: The space required to store RPCS3 and the ROM files may also vary depending on the number and size of the games you want to play. Generally, RPCS3 itself takes about 100 MB of space, while each game may take from a few GB to tens of GB of space. Therefore, you should have enough space on your SSD or HDD to store them.

      -

      Q: Can I play God of War Collection PS3 Emulator Download online or with other players?

      -

      A: Unfortunately, RPCS3 does not support online or multiplayer features for most games, including God of War Collection PS3 Emulator Download. Therefore, you can only play the games offline or with local co-op.

      -

      Q: Can I play other God of War games on RPCS3?

      -

      A: Yes, you can play other God of War games on RPCS3, such as God of War III, God of War: Ascension, God of War: Chains of Olympus, and God of War: Ghost of Sparta. However, some of these games may not run as well as God of War Collection PS3 Emulator Download, or may have some issues or bugs. You should check the compatibility list and the wiki for more information on each game.

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/utils/utils_amp.py b/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/utils/utils_amp.py deleted file mode 100644 index 9ac2a03f4212faa129faed447a8f4519c0a00a8b..0000000000000000000000000000000000000000 --- a/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/utils/utils_amp.py +++ /dev/null @@ -1,88 +0,0 @@ -from typing import Dict, List - -import torch - -if torch.__version__ < '1.9': - Iterable = torch._six.container_abcs.Iterable -else: - import collections - - Iterable = collections.abc.Iterable -from torch.cuda.amp import GradScaler - - -class _MultiDeviceReplicator(object): - """ - Lazily serves copies of a tensor to requested devices. Copies are cached per-device. - """ - - def __init__(self, master_tensor: torch.Tensor) -> None: - assert master_tensor.is_cuda - self.master = master_tensor - self._per_device_tensors: Dict[torch.device, torch.Tensor] = {} - - def get(self, device) -> torch.Tensor: - retval = self._per_device_tensors.get(device, None) - if retval is None: - retval = self.master.to(device=device, non_blocking=True, copy=True) - self._per_device_tensors[device] = retval - return retval - - -class MaxClipGradScaler(GradScaler): - def __init__(self, init_scale, max_scale: float, growth_interval=100): - GradScaler.__init__(self, init_scale=init_scale, growth_interval=growth_interval) - self.max_scale = max_scale - - def scale_clip(self): - if self.get_scale() == self.max_scale: - self.set_growth_factor(1) - elif self.get_scale() < self.max_scale: - self.set_growth_factor(2) - elif self.get_scale() > self.max_scale: - self._scale.fill_(self.max_scale) - self.set_growth_factor(1) - - def scale(self, outputs): - """ - Multiplies ('scales') a tensor or list of tensors by the scale factor. - - Returns scaled outputs. If this instance of :class:`GradScaler` is not enabled, outputs are returned - unmodified. - - Arguments: - outputs (Tensor or iterable of Tensors): Outputs to scale. - """ - if not self._enabled: - return outputs - self.scale_clip() - # Short-circuit for the common case. - if isinstance(outputs, torch.Tensor): - assert outputs.is_cuda - if self._scale is None: - self._lazy_init_scale_growth_tracker(outputs.device) - assert self._scale is not None - return outputs * self._scale.to(device=outputs.device, non_blocking=True) - - # Invoke the more complex machinery only if we're treating multiple outputs. - stash: List[_MultiDeviceReplicator] = [] # holds a reference that can be overwritten by apply_scale - - def apply_scale(val): - if isinstance(val, torch.Tensor): - assert val.is_cuda - if len(stash) == 0: - if self._scale is None: - self._lazy_init_scale_growth_tracker(val.device) - assert self._scale is not None - stash.append(_MultiDeviceReplicator(self._scale)) - return val * stash[0].get(val.device) - elif isinstance(val, Iterable): - iterable = map(apply_scale, val) - if isinstance(val, list) or isinstance(val, tuple): - return type(val)(iterable) - else: - return iterable - else: - raise ValueError("outputs must be a Tensor or an iterable of Tensors") - - return apply_scale(outputs) diff --git a/spaces/7hao/bingo/tailwind.config.js b/spaces/7hao/bingo/tailwind.config.js deleted file mode 100644 index 03da3c3c45be6983b9f5ffa6df5f1fd0870e9636..0000000000000000000000000000000000000000 --- a/spaces/7hao/bingo/tailwind.config.js +++ /dev/null @@ -1,48 +0,0 @@ -/** @type {import('tailwindcss').Config} */ -module.exports = { - content: [ - './src/pages/**/*.{js,ts,jsx,tsx,mdx}', - './src/components/**/*.{js,ts,jsx,tsx,mdx}', - './src/app/**/*.{js,ts,jsx,tsx,mdx}', - './src/ui/**/*.{js,ts,jsx,tsx,mdx}', - ], - "darkMode": "class", - theme: { - extend: { - colors: { - 'primary-blue': 'rgb(var(--color-primary-blue) / )', - secondary: 'rgb(var(--color-secondary) / )', - 'primary-background': 'rgb(var(--primary-background) / )', - 'primary-text': 'rgb(var(--primary-text) / )', - 'secondary-text': 'rgb(var(--secondary-text) / )', - 'light-text': 'rgb(var(--light-text) / )', - 'primary-border': 'rgb(var(--primary-border) / )', - }, - keyframes: { - slideDownAndFade: { - from: { opacity: 0, transform: 'translateY(-2px)' }, - to: { opacity: 1, transform: 'translateY(0)' }, - }, - slideLeftAndFade: { - from: { opacity: 0, transform: 'translateX(2px)' }, - to: { opacity: 1, transform: 'translateX(0)' }, - }, - slideUpAndFade: { - from: { opacity: 0, transform: 'translateY(2px)' }, - to: { opacity: 1, transform: 'translateY(0)' }, - }, - slideRightAndFade: { - from: { opacity: 0, transform: 'translateX(2px)' }, - to: { opacity: 1, transform: 'translateX(0)' }, - }, - }, - animation: { - slideDownAndFade: 'slideDownAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideLeftAndFade: 'slideLeftAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideUpAndFade: 'slideUpAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideRightAndFade: 'slideRightAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - }, - }, - }, - plugins: [require('@headlessui/tailwindcss'), require('tailwind-scrollbar')], -} diff --git a/spaces/AIConsultant/MusicGen/app.py b/spaces/AIConsultant/MusicGen/app.py deleted file mode 100644 index 74c893e70cf36e94c740875e0c0db45675216632..0000000000000000000000000000000000000000 --- a/spaces/AIConsultant/MusicGen/app.py +++ /dev/null @@ -1,463 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -# Updated to account for UI changes from https://github.com/rkfg/audiocraft/blob/long/app.py -# also released under the MIT license. - -import argparse -from concurrent.futures import ProcessPoolExecutor -import os -from pathlib import Path -import subprocess as sp -from tempfile import NamedTemporaryFile -import time -import typing as tp -import warnings - -import torch -import gradio as gr - -from audiocraft.data.audio_utils import convert_audio -from audiocraft.data.audio import audio_write -from audiocraft.models import MusicGen, MultiBandDiffusion - - -MODEL = None # Last used model -IS_BATCHED = "facebook/MusicGen" in os.environ.get('SPACE_ID', '') -print(IS_BATCHED) -MAX_BATCH_SIZE = 12 -BATCHED_DURATION = 15 -INTERRUPTING = False -MBD = None -# We have to wrap subprocess call to clean a bit the log when using gr.make_waveform -_old_call = sp.call - - -def _call_nostderr(*args, **kwargs): - # Avoid ffmpeg vomiting on the logs. - kwargs['stderr'] = sp.DEVNULL - kwargs['stdout'] = sp.DEVNULL - _old_call(*args, **kwargs) - - -sp.call = _call_nostderr -# Preallocating the pool of processes. -pool = ProcessPoolExecutor(4) -pool.__enter__() - - -def interrupt(): - global INTERRUPTING - INTERRUPTING = True - - -class FileCleaner: - def __init__(self, file_lifetime: float = 3600): - self.file_lifetime = file_lifetime - self.files = [] - - def add(self, path: tp.Union[str, Path]): - self._cleanup() - self.files.append((time.time(), Path(path))) - - def _cleanup(self): - now = time.time() - for time_added, path in list(self.files): - if now - time_added > self.file_lifetime: - if path.exists(): - path.unlink() - self.files.pop(0) - else: - break - - -file_cleaner = FileCleaner() - - -def make_waveform(*args, **kwargs): - # Further remove some warnings. - be = time.time() - with warnings.catch_warnings(): - warnings.simplefilter('ignore') - out = gr.make_waveform(*args, **kwargs) - print("Make a video took", time.time() - be) - return out - - -def load_model(version='facebook/musicgen-melody'): - global MODEL - print("Loading model", version) - if MODEL is None or MODEL.name != version: - MODEL = MusicGen.get_pretrained(version) - - -def load_diffusion(): - global MBD - if MBD is None: - print("loading MBD") - MBD = MultiBandDiffusion.get_mbd_musicgen() - - -def _do_predictions(texts, melodies, duration, progress=False, **gen_kwargs): - MODEL.set_generation_params(duration=duration, **gen_kwargs) - print("new batch", len(texts), texts, [None if m is None else (m[0], m[1].shape) for m in melodies]) - be = time.time() - processed_melodies = [] - target_sr = 32000 - target_ac = 1 - for melody in melodies: - if melody is None: - processed_melodies.append(None) - else: - sr, melody = melody[0], torch.from_numpy(melody[1]).to(MODEL.device).float().t() - if melody.dim() == 1: - melody = melody[None] - melody = melody[..., :int(sr * duration)] - melody = convert_audio(melody, sr, target_sr, target_ac) - processed_melodies.append(melody) - - if any(m is not None for m in processed_melodies): - outputs = MODEL.generate_with_chroma( - descriptions=texts, - melody_wavs=processed_melodies, - melody_sample_rate=target_sr, - progress=progress, - return_tokens=USE_DIFFUSION - ) - else: - outputs = MODEL.generate(texts, progress=progress, return_tokens=USE_DIFFUSION) - if USE_DIFFUSION: - outputs_diffusion = MBD.tokens_to_wav(outputs[1]) - outputs = torch.cat([outputs[0], outputs_diffusion], dim=0) - outputs = outputs.detach().cpu().float() - pending_videos = [] - out_wavs = [] - for output in outputs: - with NamedTemporaryFile("wb", suffix=".wav", delete=False) as file: - audio_write( - file.name, output, MODEL.sample_rate, strategy="loudness", - loudness_headroom_db=16, loudness_compressor=True, add_suffix=False) - pending_videos.append(pool.submit(make_waveform, file.name)) - out_wavs.append(file.name) - file_cleaner.add(file.name) - out_videos = [pending_video.result() for pending_video in pending_videos] - for video in out_videos: - file_cleaner.add(video) - print("batch finished", len(texts), time.time() - be) - print("Tempfiles currently stored: ", len(file_cleaner.files)) - return out_videos, out_wavs - - -def predict_batched(texts, melodies): - max_text_length = 512 - texts = [text[:max_text_length] for text in texts] - load_model('facebook/musicgen-melody') - res = _do_predictions(texts, melodies, BATCHED_DURATION) - return res - - -def predict_full(model, decoder, text, melody, duration, topk, topp, temperature, cfg_coef, progress=gr.Progress()): - global INTERRUPTING - global USE_DIFFUSION - INTERRUPTING = False - if temperature < 0: - raise gr.Error("Temperature must be >= 0.") - if topk < 0: - raise gr.Error("Topk must be non-negative.") - if topp < 0: - raise gr.Error("Topp must be non-negative.") - - topk = int(topk) - if decoder == "MultiBand_Diffusion": - USE_DIFFUSION = True - load_diffusion() - else: - USE_DIFFUSION = False - load_model(model) - - def _progress(generated, to_generate): - progress((min(generated, to_generate), to_generate)) - if INTERRUPTING: - raise gr.Error("Interrupted.") - MODEL.set_custom_progress_callback(_progress) - - videos, wavs = _do_predictions( - [text], [melody], duration, progress=True, - top_k=topk, top_p=topp, temperature=temperature, cfg_coef=cfg_coef) - if USE_DIFFUSION: - return videos[0], wavs[0], videos[1], wavs[1] - return videos[0], wavs[0], None, None - - -def toggle_audio_src(choice): - if choice == "mic": - return gr.update(source="microphone", value=None, label="Microphone") - else: - return gr.update(source="upload", value=None, label="File") - - -def toggle_diffusion(choice): - if choice == "MultiBand_Diffusion": - return [gr.update(visible=True)] * 2 - else: - return [gr.update(visible=False)] * 2 - - -def ui_full(launch_kwargs): - with gr.Blocks() as interface: - gr.Markdown( - """ - # MusicGen - This is your private demo for [MusicGen](https://github.com/facebookresearch/audiocraft), - a simple and controllable model for music generation - presented at: ["Simple and Controllable Music Generation"](https://huggingface.co/papers/2306.05284) - """ - ) - with gr.Row(): - with gr.Column(): - with gr.Row(): - text = gr.Text(label="Input Text", interactive=True) - with gr.Column(): - radio = gr.Radio(["file", "mic"], value="file", - label="Condition on a melody (optional) File or Mic") - melody = gr.Audio(source="upload", type="numpy", label="File", - interactive=True, elem_id="melody-input") - with gr.Row(): - submit = gr.Button("Submit") - # Adapted from https://github.com/rkfg/audiocraft/blob/long/app.py, MIT license. - _ = gr.Button("Interrupt").click(fn=interrupt, queue=False) - with gr.Row(): - model = gr.Radio(["facebook/musicgen-melody", "facebook/musicgen-medium", "facebook/musicgen-small", - "facebook/musicgen-large"], - label="Model", value="facebook/musicgen-melody", interactive=True) - with gr.Row(): - decoder = gr.Radio(["Default", "MultiBand_Diffusion"], - label="Decoder", value="Default", interactive=True) - with gr.Row(): - duration = gr.Slider(minimum=1, maximum=120, value=10, label="Duration", interactive=True) - with gr.Row(): - topk = gr.Number(label="Top-k", value=250, interactive=True) - topp = gr.Number(label="Top-p", value=0, interactive=True) - temperature = gr.Number(label="Temperature", value=1.0, interactive=True) - cfg_coef = gr.Number(label="Classifier Free Guidance", value=3.0, interactive=True) - with gr.Column(): - output = gr.Video(label="Generated Music") - audio_output = gr.Audio(label="Generated Music (wav)", type='filepath') - diffusion_output = gr.Video(label="MultiBand Diffusion Decoder") - audio_diffusion = gr.Audio(label="MultiBand Diffusion Decoder (wav)", type='filepath') - submit.click(toggle_diffusion, decoder, [diffusion_output, audio_diffusion], queue=False, - show_progress=False).then(predict_full, inputs=[model, decoder, text, melody, duration, topk, topp, - temperature, cfg_coef], - outputs=[output, audio_output, diffusion_output, audio_diffusion]) - radio.change(toggle_audio_src, radio, [melody], queue=False, show_progress=False) - - gr.Examples( - fn=predict_full, - examples=[ - [ - "An 80s driving pop song with heavy drums and synth pads in the background", - "./assets/bach.mp3", - "facebook/musicgen-melody", - "Default" - ], - [ - "A cheerful country song with acoustic guitars", - "./assets/bolero_ravel.mp3", - "facebook/musicgen-melody", - "Default" - ], - [ - "90s rock song with electric guitar and heavy drums", - None, - "facebook/musicgen-medium", - "Default" - ], - [ - "a light and cheerly EDM track, with syncopated drums, aery pads, and strong emotions", - "./assets/bach.mp3", - "facebook/musicgen-melody", - "Default" - ], - [ - "lofi slow bpm electro chill with organic samples", - None, - "facebook/musicgen-medium", - "Default" - ], - [ - "Punk rock with loud drum and power guitar", - None, - "facebook/musicgen-medium", - "MultiBand_Diffusion" - ], - ], - inputs=[text, melody, model, decoder], - outputs=[output] - ) - gr.Markdown( - """ - ### More details - - The model will generate a short music extract based on the description you provided. - The model can generate up to 30 seconds of audio in one pass. It is now possible - to extend the generation by feeding back the end of the previous chunk of audio. - This can take a long time, and the model might lose consistency. The model might also - decide at arbitrary positions that the song ends. - - **WARNING:** Choosing long durations will take a long time to generate (2min might take ~10min). - An overlap of 12 seconds is kept with the previously generated chunk, and 18 "new" seconds - are generated each time. - - We present 4 model variations: - 1. facebook/musicgen-melody -- a music generation model capable of generating music condition - on text and melody inputs. **Note**, you can also use text only. - 2. facebook/musicgen-small -- a 300M transformer decoder conditioned on text only. - 3. facebook/musicgen-medium -- a 1.5B transformer decoder conditioned on text only. - 4. facebook/musicgen-large -- a 3.3B transformer decoder conditioned on text only. - - We also present two way of decoding the audio tokens - 1. Use the default GAN based compression model - 2. Use MultiBand Diffusion from (paper linknano ) - - When using `facebook/musicgen-melody`, you can optionally provide a reference audio from - which a broad melody will be extracted. The model will then try to follow both - the description and melody provided. - - You can also use your own GPU or a Google Colab by following the instructions on our repo. - See [github.com/facebookresearch/audiocraft](https://github.com/facebookresearch/audiocraft) - for more details. - """ - ) - - interface.queue().launch(**launch_kwargs) - - -def ui_batched(launch_kwargs): - with gr.Blocks() as demo: - gr.Markdown( - """ - # MusicGen - - This is the demo for [MusicGen](https://github.com/facebookresearch/audiocraft), - a simple and controllable model for music generation - presented at: ["Simple and Controllable Music Generation"](https://huggingface.co/papers/2306.05284). -
      - - Duplicate Space - for longer sequences, more control and no queue.

      - """ - ) - with gr.Row(): - with gr.Column(): - with gr.Row(): - text = gr.Text(label="Describe your music", lines=2, interactive=True) - with gr.Column(): - radio = gr.Radio(["file", "mic"], value="file", - label="Condition on a melody (optional) File or Mic") - melody = gr.Audio(source="upload", type="numpy", label="File", - interactive=True, elem_id="melody-input") - with gr.Row(): - submit = gr.Button("Generate") - with gr.Column(): - output = gr.Video(label="Generated Music") - audio_output = gr.Audio(label="Generated Music (wav)", type='filepath') - submit.click(predict_batched, inputs=[text, melody], - outputs=[output, audio_output], batch=True, max_batch_size=MAX_BATCH_SIZE) - radio.change(toggle_audio_src, radio, [melody], queue=False, show_progress=False) - gr.Examples( - fn=predict_batched, - examples=[ - [ - "An 80s driving pop song with heavy drums and synth pads in the background", - "./assets/bach.mp3", - ], - [ - "A cheerful country song with acoustic guitars", - "./assets/bolero_ravel.mp3", - ], - [ - "90s rock song with electric guitar and heavy drums", - None, - ], - [ - "a light and cheerly EDM track, with syncopated drums, aery pads, and strong emotions bpm: 130", - "./assets/bach.mp3", - ], - [ - "lofi slow bpm electro chill with organic samples", - None, - ], - ], - inputs=[text, melody], - outputs=[output] - ) - gr.Markdown(""" - ### More details - - The model will generate 12 seconds of audio based on the description you provided. - You can optionally provide a reference audio from which a broad melody will be extracted. - The model will then try to follow both the description and melody provided. - All samples are generated with the `melody` model. - - You can also use your own GPU or a Google Colab by following the instructions on our repo. - - See [github.com/facebookresearch/audiocraft](https://github.com/facebookresearch/audiocraft) - for more details. - """) - - demo.queue(max_size=8 * 4).launch(**launch_kwargs) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - '--listen', - type=str, - default='0.0.0.0' if 'SPACE_ID' in os.environ else '127.0.0.1', - help='IP to listen on for connections to Gradio', - ) - parser.add_argument( - '--username', type=str, default='', help='Username for authentication' - ) - parser.add_argument( - '--password', type=str, default='', help='Password for authentication' - ) - parser.add_argument( - '--server_port', - type=int, - default=0, - help='Port to run the server listener on', - ) - parser.add_argument( - '--inbrowser', action='store_true', help='Open in browser' - ) - parser.add_argument( - '--share', action='store_true', help='Share the gradio UI' - ) - - args = parser.parse_args() - - launch_kwargs = {} - launch_kwargs['server_name'] = args.listen - - if args.username and args.password: - launch_kwargs['auth'] = (args.username, args.password) - if args.server_port: - launch_kwargs['server_port'] = args.server_port - if args.inbrowser: - launch_kwargs['inbrowser'] = args.inbrowser - if args.share: - launch_kwargs['share'] = args.share - - # Show the interface - if IS_BATCHED: - global USE_DIFFUSION - USE_DIFFUSION = False - ui_batched(launch_kwargs) - else: - ui_full(launch_kwargs) diff --git a/spaces/Adr740/CV_XPLORER_POC/README.md b/spaces/Adr740/CV_XPLORER_POC/README.md deleted file mode 100644 index e6b86667a97c33a40d79e9f9396de5ecef88fd95..0000000000000000000000000000000000000000 --- a/spaces/Adr740/CV_XPLORER_POC/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Demo CV AI Explorer -emoji: 🏃 -colorFrom: green -colorTo: blue -sdk: gradio -sdk_version: 3.20.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/order/prisoner.py b/spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/order/prisoner.py deleted file mode 100644 index 6859911a80c70b86b7fe1bace2ad16c18eee9e00..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/order/prisoner.py +++ /dev/null @@ -1,48 +0,0 @@ -from __future__ import annotations - -import logging -import re -from typing import TYPE_CHECKING, Any, List, Optional - -from . import order_registry as OrderRegistry -from .base import BaseOrder - -if TYPE_CHECKING: - from agentverse.environments import BaseEnvironment - - -@OrderRegistry.register("prisoner") -class PrisonerOrder(BaseOrder): - """The order for a classroom discussion - The agents speak in the following order: - 1. The professor speaks first - 2. Then the professor can continue to speak, and the students can raise hands - 3. The professor can call on a student, then the student can speak or ask a question - 4. In the group discussion, the students in the group can speak in turn - """ - - # try police, prisoner1 prisoner2 first - - last_prisoner_index: int = 1 - switch_func: dict = {1: 2, 2: 1} - - def get_next_agent_idx(self, environment: BaseEnvironment) -> List[int]: - if len(environment.last_messages) == 0: - # If the game just begins or , we let only the police speak - return [0] - elif len(environment.last_messages) == 1: - message = environment.last_messages[0] - sender = message.sender - content = message.content - if sender.startswith("Police"): - next_prisoner = self.last_prisoner_index - self.last_prisoner_index = self.switch_func[self.last_prisoner_index] - return [next_prisoner] - elif sender.startswith("Suspect"): - # 3. when one prisoner made his action, let the police tell another prisoner - return [0] - else: - # If len(last_messages) > 1, then - # 1. there must be at least one student raises hand or speaks. - # 2. the group discussion is just over. - return [0] diff --git a/spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/visibility/base.py b/spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/visibility/base.py deleted file mode 100644 index e5da006e573cd930a7cd83c81ae934426c115b57..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/visibility/base.py +++ /dev/null @@ -1,18 +0,0 @@ -from __future__ import annotations - -from abc import abstractmethod -from typing import TYPE_CHECKING, Any - -from pydantic import BaseModel - -if TYPE_CHECKING: - from agentverse.environments import BaseEnvironment - - -class BaseVisibility(BaseModel): - @abstractmethod - def update_visible_agents(self, environment: BaseEnvironment): - """Update the set of visible agents for the agent""" - - def reset(self): - pass diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/confirmdialog/methods/RegisterEvents.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/confirmdialog/methods/RegisterEvents.js deleted file mode 100644 index a393dc5bb9ef84cee26bc48e9ab25f345a932fb9..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/confirmdialog/methods/RegisterEvents.js +++ /dev/null @@ -1,41 +0,0 @@ -var OnPointerOverCallback = function (button) { - if (button.setHoverState) { - button.setHoverState(true); - } -} - -var OnPointerOutCallback = function (button) { - if (button.setHoverState) { - button.setHoverState(false); - } -} - -var OnChoiceButtonStateChange = function (button, groupName, index, value) { - if (button.setActiveState) { - button.setActiveState(value); - } -} - -var OnButtonEnable = function (button) { - if (button.setDisableState) { - button.setDisableState(false); - } -} - -var OnButtonDisable = function (button) { - if (button.setDisableState) { - button.setDisableState(true); - } -} - -var RegisterEvents = function () { - this - .on('button.over', OnPointerOverCallback) - .on('button.out', OnPointerOutCallback) - .on('button.enable', OnButtonEnable) - .on('button.disable', OnButtonDisable) - .on('button.statechange', OnChoiceButtonStateChange) - -} - -export default RegisterEvents; \ No newline at end of file diff --git a/spaces/Akhil-77/Toxicity_Detector/README.md b/spaces/Akhil-77/Toxicity_Detector/README.md deleted file mode 100644 index 127c29add46a4ddba1e1e9c0cf5f975fb1402386..0000000000000000000000000000000000000000 --- a/spaces/Akhil-77/Toxicity_Detector/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Toxicity Detector -emoji: 😤 -colorFrom: yellow -colorTo: blue -sdk: gradio -sdk_version: 3.40.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/attentions.py b/spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/attentions.py deleted file mode 100644 index 4e0b0c1fd48c962e21e1fbe60b23fc574927435c..0000000000000000000000000000000000000000 --- a/spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/attentions.py +++ /dev/null @@ -1,303 +0,0 @@ -import copy -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -from modules import LayerNorm - - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/AlexWang/lama/models/ade20k/base.py b/spaces/AlexWang/lama/models/ade20k/base.py deleted file mode 100644 index 8cdbe2d3e7dbadf4ed5e5a7cf2d248761ef25d9c..0000000000000000000000000000000000000000 --- a/spaces/AlexWang/lama/models/ade20k/base.py +++ /dev/null @@ -1,627 +0,0 @@ -"""Modified from https://github.com/CSAILVision/semantic-segmentation-pytorch""" - -import os - -import pandas as pd -import torch -import torch.nn as nn -import torch.nn.functional as F -from scipy.io import loadmat -from torch.nn.modules import BatchNorm2d - -from . import resnet -from . import mobilenet - - -NUM_CLASS = 150 -base_path = os.path.dirname(os.path.abspath(__file__)) # current file path -colors_path = os.path.join(base_path, 'color150.mat') -classes_path = os.path.join(base_path, 'object150_info.csv') - -segm_options = dict(colors=loadmat(colors_path)['colors'], - classes=pd.read_csv(classes_path),) - - -class NormalizeTensor: - def __init__(self, mean, std, inplace=False): - """Normalize a tensor image with mean and standard deviation. - .. note:: - This transform acts out of place by default, i.e., it does not mutates the input tensor. - See :class:`~torchvision.transforms.Normalize` for more details. - Args: - tensor (Tensor): Tensor image of size (C, H, W) to be normalized. - mean (sequence): Sequence of means for each channel. - std (sequence): Sequence of standard deviations for each channel. - inplace(bool,optional): Bool to make this operation inplace. - Returns: - Tensor: Normalized Tensor image. - """ - - self.mean = mean - self.std = std - self.inplace = inplace - - def __call__(self, tensor): - if not self.inplace: - tensor = tensor.clone() - - dtype = tensor.dtype - mean = torch.as_tensor(self.mean, dtype=dtype, device=tensor.device) - std = torch.as_tensor(self.std, dtype=dtype, device=tensor.device) - tensor.sub_(mean[None, :, None, None]).div_(std[None, :, None, None]) - return tensor - - -# Model Builder -class ModelBuilder: - # custom weights initialization - @staticmethod - def weights_init(m): - classname = m.__class__.__name__ - if classname.find('Conv') != -1: - nn.init.kaiming_normal_(m.weight.data) - elif classname.find('BatchNorm') != -1: - m.weight.data.fill_(1.) - m.bias.data.fill_(1e-4) - - @staticmethod - def build_encoder(arch='resnet50dilated', fc_dim=512, weights=''): - pretrained = True if len(weights) == 0 else False - arch = arch.lower() - if arch == 'mobilenetv2dilated': - orig_mobilenet = mobilenet.__dict__['mobilenetv2'](pretrained=pretrained) - net_encoder = MobileNetV2Dilated(orig_mobilenet, dilate_scale=8) - elif arch == 'resnet18': - orig_resnet = resnet.__dict__['resnet18'](pretrained=pretrained) - net_encoder = Resnet(orig_resnet) - elif arch == 'resnet18dilated': - orig_resnet = resnet.__dict__['resnet18'](pretrained=pretrained) - net_encoder = ResnetDilated(orig_resnet, dilate_scale=8) - elif arch == 'resnet50dilated': - orig_resnet = resnet.__dict__['resnet50'](pretrained=pretrained) - net_encoder = ResnetDilated(orig_resnet, dilate_scale=8) - elif arch == 'resnet50': - orig_resnet = resnet.__dict__['resnet50'](pretrained=pretrained) - net_encoder = Resnet(orig_resnet) - else: - raise Exception('Architecture undefined!') - - # encoders are usually pretrained - # net_encoder.apply(ModelBuilder.weights_init) - if len(weights) > 0: - print('Loading weights for net_encoder') - net_encoder.load_state_dict( - torch.load(weights, map_location=lambda storage, loc: storage), strict=False) - return net_encoder - - @staticmethod - def build_decoder(arch='ppm_deepsup', - fc_dim=512, num_class=NUM_CLASS, - weights='', use_softmax=False, drop_last_conv=False): - arch = arch.lower() - if arch == 'ppm_deepsup': - net_decoder = PPMDeepsup( - num_class=num_class, - fc_dim=fc_dim, - use_softmax=use_softmax, - drop_last_conv=drop_last_conv) - elif arch == 'c1_deepsup': - net_decoder = C1DeepSup( - num_class=num_class, - fc_dim=fc_dim, - use_softmax=use_softmax, - drop_last_conv=drop_last_conv) - else: - raise Exception('Architecture undefined!') - - net_decoder.apply(ModelBuilder.weights_init) - if len(weights) > 0: - print('Loading weights for net_decoder') - net_decoder.load_state_dict( - torch.load(weights, map_location=lambda storage, loc: storage), strict=False) - return net_decoder - - @staticmethod - def get_decoder(weights_path, arch_encoder, arch_decoder, fc_dim, drop_last_conv, *arts, **kwargs): - path = os.path.join(weights_path, 'ade20k', f'ade20k-{arch_encoder}-{arch_decoder}/decoder_epoch_20.pth') - return ModelBuilder.build_decoder(arch=arch_decoder, fc_dim=fc_dim, weights=path, use_softmax=True, drop_last_conv=drop_last_conv) - - @staticmethod - def get_encoder(weights_path, arch_encoder, arch_decoder, fc_dim, segmentation, - *arts, **kwargs): - if segmentation: - path = os.path.join(weights_path, 'ade20k', f'ade20k-{arch_encoder}-{arch_decoder}/encoder_epoch_20.pth') - else: - path = '' - return ModelBuilder.build_encoder(arch=arch_encoder, fc_dim=fc_dim, weights=path) - - -def conv3x3_bn_relu(in_planes, out_planes, stride=1): - return nn.Sequential( - nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False), - BatchNorm2d(out_planes), - nn.ReLU(inplace=True), - ) - - -class SegmentationModule(nn.Module): - def __init__(self, - weights_path, - num_classes=150, - arch_encoder="resnet50dilated", - drop_last_conv=False, - net_enc=None, # None for Default encoder - net_dec=None, # None for Default decoder - encode=None, # {None, 'binary', 'color', 'sky'} - use_default_normalization=False, - return_feature_maps=False, - return_feature_maps_level=3, # {0, 1, 2, 3} - return_feature_maps_only=True, - **kwargs, - ): - super().__init__() - self.weights_path = weights_path - self.drop_last_conv = drop_last_conv - self.arch_encoder = arch_encoder - if self.arch_encoder == "resnet50dilated": - self.arch_decoder = "ppm_deepsup" - self.fc_dim = 2048 - elif self.arch_encoder == "mobilenetv2dilated": - self.arch_decoder = "c1_deepsup" - self.fc_dim = 320 - else: - raise NotImplementedError(f"No such arch_encoder={self.arch_encoder}") - model_builder_kwargs = dict(arch_encoder=self.arch_encoder, - arch_decoder=self.arch_decoder, - fc_dim=self.fc_dim, - drop_last_conv=drop_last_conv, - weights_path=self.weights_path) - - self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - self.encoder = ModelBuilder.get_encoder(**model_builder_kwargs) if net_enc is None else net_enc - self.decoder = ModelBuilder.get_decoder(**model_builder_kwargs) if net_dec is None else net_dec - self.use_default_normalization = use_default_normalization - self.default_normalization = NormalizeTensor(mean=[0.485, 0.456, 0.406], - std=[0.229, 0.224, 0.225]) - - self.encode = encode - - self.return_feature_maps = return_feature_maps - - assert 0 <= return_feature_maps_level <= 3 - self.return_feature_maps_level = return_feature_maps_level - - def normalize_input(self, tensor): - if tensor.min() < 0 or tensor.max() > 1: - raise ValueError("Tensor should be 0..1 before using normalize_input") - return self.default_normalization(tensor) - - @property - def feature_maps_channels(self): - return 256 * 2**(self.return_feature_maps_level) # 256, 512, 1024, 2048 - - def forward(self, img_data, segSize=None): - if segSize is None: - raise NotImplementedError("Please pass segSize param. By default: (300, 300)") - - fmaps = self.encoder(img_data, return_feature_maps=True) - pred = self.decoder(fmaps, segSize=segSize) - - if self.return_feature_maps: - return pred, fmaps - # print("BINARY", img_data.shape, pred.shape) - return pred - - def multi_mask_from_multiclass(self, pred, classes): - def isin(ar1, ar2): - return (ar1[..., None] == ar2).any(-1).float() - return isin(pred, torch.LongTensor(classes).to(self.device)) - - @staticmethod - def multi_mask_from_multiclass_probs(scores, classes): - res = None - for c in classes: - if res is None: - res = scores[:, c] - else: - res += scores[:, c] - return res - - def predict(self, tensor, imgSizes=(-1,), # (300, 375, 450, 525, 600) - segSize=None): - """Entry-point for segmentation. Use this methods instead of forward - Arguments: - tensor {torch.Tensor} -- BCHW - Keyword Arguments: - imgSizes {tuple or list} -- imgSizes for segmentation input. - default: (300, 450) - original implementation: (300, 375, 450, 525, 600) - - """ - if segSize is None: - segSize = tensor.shape[-2:] - segSize = (tensor.shape[2], tensor.shape[3]) - with torch.no_grad(): - if self.use_default_normalization: - tensor = self.normalize_input(tensor) - scores = torch.zeros(1, NUM_CLASS, segSize[0], segSize[1]).to(self.device) - features = torch.zeros(1, self.feature_maps_channels, segSize[0], segSize[1]).to(self.device) - - result = [] - for img_size in imgSizes: - if img_size != -1: - img_data = F.interpolate(tensor.clone(), size=img_size) - else: - img_data = tensor.clone() - - if self.return_feature_maps: - pred_current, fmaps = self.forward(img_data, segSize=segSize) - else: - pred_current = self.forward(img_data, segSize=segSize) - - - result.append(pred_current) - scores = scores + pred_current / len(imgSizes) - - # Disclaimer: We use and aggregate only last fmaps: fmaps[3] - if self.return_feature_maps: - features = features + F.interpolate(fmaps[self.return_feature_maps_level], size=segSize) / len(imgSizes) - - _, pred = torch.max(scores, dim=1) - - if self.return_feature_maps: - return features - - return pred, result - - def get_edges(self, t): - edge = torch.cuda.ByteTensor(t.size()).zero_() - edge[:, :, :, 1:] = edge[:, :, :, 1:] | (t[:, :, :, 1:] != t[:, :, :, :-1]) - edge[:, :, :, :-1] = edge[:, :, :, :-1] | (t[:, :, :, 1:] != t[:, :, :, :-1]) - edge[:, :, 1:, :] = edge[:, :, 1:, :] | (t[:, :, 1:, :] != t[:, :, :-1, :]) - edge[:, :, :-1, :] = edge[:, :, :-1, :] | (t[:, :, 1:, :] != t[:, :, :-1, :]) - - if True: - return edge.half() - return edge.float() - - -# pyramid pooling, deep supervision -class PPMDeepsup(nn.Module): - def __init__(self, num_class=NUM_CLASS, fc_dim=4096, - use_softmax=False, pool_scales=(1, 2, 3, 6), - drop_last_conv=False): - super().__init__() - self.use_softmax = use_softmax - self.drop_last_conv = drop_last_conv - - self.ppm = [] - for scale in pool_scales: - self.ppm.append(nn.Sequential( - nn.AdaptiveAvgPool2d(scale), - nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False), - BatchNorm2d(512), - nn.ReLU(inplace=True) - )) - self.ppm = nn.ModuleList(self.ppm) - self.cbr_deepsup = conv3x3_bn_relu(fc_dim // 2, fc_dim // 4, 1) - - self.conv_last = nn.Sequential( - nn.Conv2d(fc_dim + len(pool_scales) * 512, 512, - kernel_size=3, padding=1, bias=False), - BatchNorm2d(512), - nn.ReLU(inplace=True), - nn.Dropout2d(0.1), - nn.Conv2d(512, num_class, kernel_size=1) - ) - self.conv_last_deepsup = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0) - self.dropout_deepsup = nn.Dropout2d(0.1) - - def forward(self, conv_out, segSize=None): - conv5 = conv_out[-1] - - input_size = conv5.size() - ppm_out = [conv5] - for pool_scale in self.ppm: - ppm_out.append(nn.functional.interpolate( - pool_scale(conv5), - (input_size[2], input_size[3]), - mode='bilinear', align_corners=False)) - ppm_out = torch.cat(ppm_out, 1) - - if self.drop_last_conv: - return ppm_out - else: - x = self.conv_last(ppm_out) - - if self.use_softmax: # is True during inference - x = nn.functional.interpolate( - x, size=segSize, mode='bilinear', align_corners=False) - x = nn.functional.softmax(x, dim=1) - return x - - # deep sup - conv4 = conv_out[-2] - _ = self.cbr_deepsup(conv4) - _ = self.dropout_deepsup(_) - _ = self.conv_last_deepsup(_) - - x = nn.functional.log_softmax(x, dim=1) - _ = nn.functional.log_softmax(_, dim=1) - - return (x, _) - - -class Resnet(nn.Module): - def __init__(self, orig_resnet): - super(Resnet, self).__init__() - - # take pretrained resnet, except AvgPool and FC - self.conv1 = orig_resnet.conv1 - self.bn1 = orig_resnet.bn1 - self.relu1 = orig_resnet.relu1 - self.conv2 = orig_resnet.conv2 - self.bn2 = orig_resnet.bn2 - self.relu2 = orig_resnet.relu2 - self.conv3 = orig_resnet.conv3 - self.bn3 = orig_resnet.bn3 - self.relu3 = orig_resnet.relu3 - self.maxpool = orig_resnet.maxpool - self.layer1 = orig_resnet.layer1 - self.layer2 = orig_resnet.layer2 - self.layer3 = orig_resnet.layer3 - self.layer4 = orig_resnet.layer4 - - def forward(self, x, return_feature_maps=False): - conv_out = [] - - x = self.relu1(self.bn1(self.conv1(x))) - x = self.relu2(self.bn2(self.conv2(x))) - x = self.relu3(self.bn3(self.conv3(x))) - x = self.maxpool(x) - - x = self.layer1(x); conv_out.append(x); - x = self.layer2(x); conv_out.append(x); - x = self.layer3(x); conv_out.append(x); - x = self.layer4(x); conv_out.append(x); - - if return_feature_maps: - return conv_out - return [x] - -# Resnet Dilated -class ResnetDilated(nn.Module): - def __init__(self, orig_resnet, dilate_scale=8): - super().__init__() - from functools import partial - - if dilate_scale == 8: - orig_resnet.layer3.apply( - partial(self._nostride_dilate, dilate=2)) - orig_resnet.layer4.apply( - partial(self._nostride_dilate, dilate=4)) - elif dilate_scale == 16: - orig_resnet.layer4.apply( - partial(self._nostride_dilate, dilate=2)) - - # take pretrained resnet, except AvgPool and FC - self.conv1 = orig_resnet.conv1 - self.bn1 = orig_resnet.bn1 - self.relu1 = orig_resnet.relu1 - self.conv2 = orig_resnet.conv2 - self.bn2 = orig_resnet.bn2 - self.relu2 = orig_resnet.relu2 - self.conv3 = orig_resnet.conv3 - self.bn3 = orig_resnet.bn3 - self.relu3 = orig_resnet.relu3 - self.maxpool = orig_resnet.maxpool - self.layer1 = orig_resnet.layer1 - self.layer2 = orig_resnet.layer2 - self.layer3 = orig_resnet.layer3 - self.layer4 = orig_resnet.layer4 - - def _nostride_dilate(self, m, dilate): - classname = m.__class__.__name__ - if classname.find('Conv') != -1: - # the convolution with stride - if m.stride == (2, 2): - m.stride = (1, 1) - if m.kernel_size == (3, 3): - m.dilation = (dilate // 2, dilate // 2) - m.padding = (dilate // 2, dilate // 2) - # other convoluions - else: - if m.kernel_size == (3, 3): - m.dilation = (dilate, dilate) - m.padding = (dilate, dilate) - - def forward(self, x, return_feature_maps=False): - conv_out = [] - - x = self.relu1(self.bn1(self.conv1(x))) - x = self.relu2(self.bn2(self.conv2(x))) - x = self.relu3(self.bn3(self.conv3(x))) - x = self.maxpool(x) - - x = self.layer1(x) - conv_out.append(x) - x = self.layer2(x) - conv_out.append(x) - x = self.layer3(x) - conv_out.append(x) - x = self.layer4(x) - conv_out.append(x) - - if return_feature_maps: - return conv_out - return [x] - -class MobileNetV2Dilated(nn.Module): - def __init__(self, orig_net, dilate_scale=8): - super(MobileNetV2Dilated, self).__init__() - from functools import partial - - # take pretrained mobilenet features - self.features = orig_net.features[:-1] - - self.total_idx = len(self.features) - self.down_idx = [2, 4, 7, 14] - - if dilate_scale == 8: - for i in range(self.down_idx[-2], self.down_idx[-1]): - self.features[i].apply( - partial(self._nostride_dilate, dilate=2) - ) - for i in range(self.down_idx[-1], self.total_idx): - self.features[i].apply( - partial(self._nostride_dilate, dilate=4) - ) - elif dilate_scale == 16: - for i in range(self.down_idx[-1], self.total_idx): - self.features[i].apply( - partial(self._nostride_dilate, dilate=2) - ) - - def _nostride_dilate(self, m, dilate): - classname = m.__class__.__name__ - if classname.find('Conv') != -1: - # the convolution with stride - if m.stride == (2, 2): - m.stride = (1, 1) - if m.kernel_size == (3, 3): - m.dilation = (dilate//2, dilate//2) - m.padding = (dilate//2, dilate//2) - # other convoluions - else: - if m.kernel_size == (3, 3): - m.dilation = (dilate, dilate) - m.padding = (dilate, dilate) - - def forward(self, x, return_feature_maps=False): - if return_feature_maps: - conv_out = [] - for i in range(self.total_idx): - x = self.features[i](x) - if i in self.down_idx: - conv_out.append(x) - conv_out.append(x) - return conv_out - - else: - return [self.features(x)] - - -# last conv, deep supervision -class C1DeepSup(nn.Module): - def __init__(self, num_class=150, fc_dim=2048, use_softmax=False, drop_last_conv=False): - super(C1DeepSup, self).__init__() - self.use_softmax = use_softmax - self.drop_last_conv = drop_last_conv - - self.cbr = conv3x3_bn_relu(fc_dim, fc_dim // 4, 1) - self.cbr_deepsup = conv3x3_bn_relu(fc_dim // 2, fc_dim // 4, 1) - - # last conv - self.conv_last = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0) - self.conv_last_deepsup = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0) - - def forward(self, conv_out, segSize=None): - conv5 = conv_out[-1] - - x = self.cbr(conv5) - - if self.drop_last_conv: - return x - else: - x = self.conv_last(x) - - if self.use_softmax: # is True during inference - x = nn.functional.interpolate( - x, size=segSize, mode='bilinear', align_corners=False) - x = nn.functional.softmax(x, dim=1) - return x - - # deep sup - conv4 = conv_out[-2] - _ = self.cbr_deepsup(conv4) - _ = self.conv_last_deepsup(_) - - x = nn.functional.log_softmax(x, dim=1) - _ = nn.functional.log_softmax(_, dim=1) - - return (x, _) - - -# last conv -class C1(nn.Module): - def __init__(self, num_class=150, fc_dim=2048, use_softmax=False): - super(C1, self).__init__() - self.use_softmax = use_softmax - - self.cbr = conv3x3_bn_relu(fc_dim, fc_dim // 4, 1) - - # last conv - self.conv_last = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0) - - def forward(self, conv_out, segSize=None): - conv5 = conv_out[-1] - x = self.cbr(conv5) - x = self.conv_last(x) - - if self.use_softmax: # is True during inference - x = nn.functional.interpolate( - x, size=segSize, mode='bilinear', align_corners=False) - x = nn.functional.softmax(x, dim=1) - else: - x = nn.functional.log_softmax(x, dim=1) - - return x - - -# pyramid pooling -class PPM(nn.Module): - def __init__(self, num_class=150, fc_dim=4096, - use_softmax=False, pool_scales=(1, 2, 3, 6)): - super(PPM, self).__init__() - self.use_softmax = use_softmax - - self.ppm = [] - for scale in pool_scales: - self.ppm.append(nn.Sequential( - nn.AdaptiveAvgPool2d(scale), - nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False), - BatchNorm2d(512), - nn.ReLU(inplace=True) - )) - self.ppm = nn.ModuleList(self.ppm) - - self.conv_last = nn.Sequential( - nn.Conv2d(fc_dim+len(pool_scales)*512, 512, - kernel_size=3, padding=1, bias=False), - BatchNorm2d(512), - nn.ReLU(inplace=True), - nn.Dropout2d(0.1), - nn.Conv2d(512, num_class, kernel_size=1) - ) - - def forward(self, conv_out, segSize=None): - conv5 = conv_out[-1] - - input_size = conv5.size() - ppm_out = [conv5] - for pool_scale in self.ppm: - ppm_out.append(nn.functional.interpolate( - pool_scale(conv5), - (input_size[2], input_size[3]), - mode='bilinear', align_corners=False)) - ppm_out = torch.cat(ppm_out, 1) - - x = self.conv_last(ppm_out) - - if self.use_softmax: # is True during inference - x = nn.functional.interpolate( - x, size=segSize, mode='bilinear', align_corners=False) - x = nn.functional.softmax(x, dim=1) - else: - x = nn.functional.log_softmax(x, dim=1) - return x diff --git a/spaces/Alfasign/fdvdv/README.md b/spaces/Alfasign/fdvdv/README.md deleted file mode 100644 index b8af6f56054dddf139e1daf0101bd64a6f94caa2..0000000000000000000000000000000000000000 --- a/spaces/Alfasign/fdvdv/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Fdvdv -emoji: 🚀 -colorFrom: red -colorTo: yellow -sdk: gradio -sdk_version: 3.44.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Ameaou/academic-chatgpt3.1/crazy_functions/__init__.py b/spaces/Ameaou/academic-chatgpt3.1/crazy_functions/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/dreambooth_inpaint/train_dreambooth_inpaint_lora.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/dreambooth_inpaint/train_dreambooth_inpaint_lora.py deleted file mode 100644 index d25c6d22f8e7fa4c6dc804273c69e7688a739227..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/dreambooth_inpaint/train_dreambooth_inpaint_lora.py +++ /dev/null @@ -1,831 +0,0 @@ -import argparse -import hashlib -import math -import os -import random -from pathlib import Path - -import numpy as np -import torch -import torch.nn.functional as F -import torch.utils.checkpoint -from accelerate import Accelerator -from accelerate.logging import get_logger -from accelerate.utils import ProjectConfiguration, set_seed -from huggingface_hub import create_repo, upload_folder -from PIL import Image, ImageDraw -from torch.utils.data import Dataset -from torchvision import transforms -from tqdm.auto import tqdm -from transformers import CLIPTextModel, CLIPTokenizer - -from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionInpaintPipeline, UNet2DConditionModel -from diffusers.loaders import AttnProcsLayers -from diffusers.models.attention_processor import LoRAAttnProcessor -from diffusers.optimization import get_scheduler -from diffusers.utils import check_min_version -from diffusers.utils.import_utils import is_xformers_available - - -# Will error if the minimal version of diffusers is not installed. Remove at your own risks. -check_min_version("0.13.0.dev0") - -logger = get_logger(__name__) - - -def prepare_mask_and_masked_image(image, mask): - image = np.array(image.convert("RGB")) - image = image[None].transpose(0, 3, 1, 2) - image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 - - mask = np.array(mask.convert("L")) - mask = mask.astype(np.float32) / 255.0 - mask = mask[None, None] - mask[mask < 0.5] = 0 - mask[mask >= 0.5] = 1 - mask = torch.from_numpy(mask) - - masked_image = image * (mask < 0.5) - - return mask, masked_image - - -# generate random masks -def random_mask(im_shape, ratio=1, mask_full_image=False): - mask = Image.new("L", im_shape, 0) - draw = ImageDraw.Draw(mask) - size = (random.randint(0, int(im_shape[0] * ratio)), random.randint(0, int(im_shape[1] * ratio))) - # use this to always mask the whole image - if mask_full_image: - size = (int(im_shape[0] * ratio), int(im_shape[1] * ratio)) - limits = (im_shape[0] - size[0] // 2, im_shape[1] - size[1] // 2) - center = (random.randint(size[0] // 2, limits[0]), random.randint(size[1] // 2, limits[1])) - draw_type = random.randint(0, 1) - if draw_type == 0 or mask_full_image: - draw.rectangle( - (center[0] - size[0] // 2, center[1] - size[1] // 2, center[0] + size[0] // 2, center[1] + size[1] // 2), - fill=255, - ) - else: - draw.ellipse( - (center[0] - size[0] // 2, center[1] - size[1] // 2, center[0] + size[0] // 2, center[1] + size[1] // 2), - fill=255, - ) - - return mask - - -def parse_args(): - parser = argparse.ArgumentParser(description="Simple example of a training script.") - parser.add_argument( - "--pretrained_model_name_or_path", - type=str, - default=None, - required=True, - help="Path to pretrained model or model identifier from huggingface.co/models.", - ) - parser.add_argument( - "--tokenizer_name", - type=str, - default=None, - help="Pretrained tokenizer name or path if not the same as model_name", - ) - parser.add_argument( - "--instance_data_dir", - type=str, - default=None, - required=True, - help="A folder containing the training data of instance images.", - ) - parser.add_argument( - "--class_data_dir", - type=str, - default=None, - required=False, - help="A folder containing the training data of class images.", - ) - parser.add_argument( - "--instance_prompt", - type=str, - default=None, - help="The prompt with identifier specifying the instance", - ) - parser.add_argument( - "--class_prompt", - type=str, - default=None, - help="The prompt to specify images in the same class as provided instance images.", - ) - parser.add_argument( - "--with_prior_preservation", - default=False, - action="store_true", - help="Flag to add prior preservation loss.", - ) - parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.") - parser.add_argument( - "--num_class_images", - type=int, - default=100, - help=( - "Minimal class images for prior preservation loss. If not have enough images, additional images will be" - " sampled with class_prompt." - ), - ) - parser.add_argument( - "--output_dir", - type=str, - default="dreambooth-inpaint-model", - help="The output directory where the model predictions and checkpoints will be written.", - ) - parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") - parser.add_argument( - "--resolution", - type=int, - default=512, - help=( - "The resolution for input images, all the images in the train/validation dataset will be resized to this" - " resolution" - ), - ) - parser.add_argument( - "--center_crop", - default=False, - action="store_true", - help=( - "Whether to center crop the input images to the resolution. If not set, the images will be randomly" - " cropped. The images will be resized to the resolution first before cropping." - ), - ) - parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder") - parser.add_argument( - "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." - ) - parser.add_argument( - "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." - ) - parser.add_argument("--num_train_epochs", type=int, default=1) - parser.add_argument( - "--max_train_steps", - type=int, - default=None, - help="Total number of training steps to perform. If provided, overrides num_train_epochs.", - ) - parser.add_argument( - "--gradient_accumulation_steps", - type=int, - default=1, - help="Number of updates steps to accumulate before performing a backward/update pass.", - ) - parser.add_argument( - "--gradient_checkpointing", - action="store_true", - help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", - ) - parser.add_argument( - "--learning_rate", - type=float, - default=5e-6, - help="Initial learning rate (after the potential warmup period) to use.", - ) - parser.add_argument( - "--scale_lr", - action="store_true", - default=False, - help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", - ) - parser.add_argument( - "--lr_scheduler", - type=str, - default="constant", - help=( - 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' - ' "constant", "constant_with_warmup"]' - ), - ) - parser.add_argument( - "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." - ) - parser.add_argument( - "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." - ) - parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") - parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") - parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") - parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") - parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") - parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") - parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") - parser.add_argument( - "--hub_model_id", - type=str, - default=None, - help="The name of the repository to keep in sync with the local `output_dir`.", - ) - parser.add_argument( - "--logging_dir", - type=str, - default="logs", - help=( - "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" - " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." - ), - ) - parser.add_argument( - "--mixed_precision", - type=str, - default="no", - choices=["no", "fp16", "bf16"], - help=( - "Whether to use mixed precision. Choose" - "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." - "and an Nvidia Ampere GPU." - ), - ) - parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") - parser.add_argument( - "--checkpointing_steps", - type=int, - default=500, - help=( - "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final" - " checkpoints in case they are better than the last checkpoint and are suitable for resuming training" - " using `--resume_from_checkpoint`." - ), - ) - parser.add_argument( - "--checkpoints_total_limit", - type=int, - default=None, - help=( - "Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`." - " See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state" - " for more docs" - ), - ) - parser.add_argument( - "--resume_from_checkpoint", - type=str, - default=None, - help=( - "Whether training should be resumed from a previous checkpoint. Use a path saved by" - ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' - ), - ) - parser.add_argument( - "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." - ) - - args = parser.parse_args() - env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) - if env_local_rank != -1 and env_local_rank != args.local_rank: - args.local_rank = env_local_rank - - if args.instance_data_dir is None: - raise ValueError("You must specify a train data directory.") - - if args.with_prior_preservation: - if args.class_data_dir is None: - raise ValueError("You must specify a data directory for class images.") - if args.class_prompt is None: - raise ValueError("You must specify prompt for class images.") - - return args - - -class DreamBoothDataset(Dataset): - """ - A dataset to prepare the instance and class images with the prompts for fine-tuning the model. - It pre-processes the images and the tokenizes prompts. - """ - - def __init__( - self, - instance_data_root, - instance_prompt, - tokenizer, - class_data_root=None, - class_prompt=None, - size=512, - center_crop=False, - ): - self.size = size - self.center_crop = center_crop - self.tokenizer = tokenizer - - self.instance_data_root = Path(instance_data_root) - if not self.instance_data_root.exists(): - raise ValueError("Instance images root doesn't exists.") - - self.instance_images_path = list(Path(instance_data_root).iterdir()) - self.num_instance_images = len(self.instance_images_path) - self.instance_prompt = instance_prompt - self._length = self.num_instance_images - - if class_data_root is not None: - self.class_data_root = Path(class_data_root) - self.class_data_root.mkdir(parents=True, exist_ok=True) - self.class_images_path = list(self.class_data_root.iterdir()) - self.num_class_images = len(self.class_images_path) - self._length = max(self.num_class_images, self.num_instance_images) - self.class_prompt = class_prompt - else: - self.class_data_root = None - - self.image_transforms_resize_and_crop = transforms.Compose( - [ - transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR), - transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size), - ] - ) - - self.image_transforms = transforms.Compose( - [ - transforms.ToTensor(), - transforms.Normalize([0.5], [0.5]), - ] - ) - - def __len__(self): - return self._length - - def __getitem__(self, index): - example = {} - instance_image = Image.open(self.instance_images_path[index % self.num_instance_images]) - if not instance_image.mode == "RGB": - instance_image = instance_image.convert("RGB") - instance_image = self.image_transforms_resize_and_crop(instance_image) - - example["PIL_images"] = instance_image - example["instance_images"] = self.image_transforms(instance_image) - - example["instance_prompt_ids"] = self.tokenizer( - self.instance_prompt, - padding="do_not_pad", - truncation=True, - max_length=self.tokenizer.model_max_length, - ).input_ids - - if self.class_data_root: - class_image = Image.open(self.class_images_path[index % self.num_class_images]) - if not class_image.mode == "RGB": - class_image = class_image.convert("RGB") - class_image = self.image_transforms_resize_and_crop(class_image) - example["class_images"] = self.image_transforms(class_image) - example["class_PIL_images"] = class_image - example["class_prompt_ids"] = self.tokenizer( - self.class_prompt, - padding="do_not_pad", - truncation=True, - max_length=self.tokenizer.model_max_length, - ).input_ids - - return example - - -class PromptDataset(Dataset): - "A simple dataset to prepare the prompts to generate class images on multiple GPUs." - - def __init__(self, prompt, num_samples): - self.prompt = prompt - self.num_samples = num_samples - - def __len__(self): - return self.num_samples - - def __getitem__(self, index): - example = {} - example["prompt"] = self.prompt - example["index"] = index - return example - - -def main(): - args = parse_args() - logging_dir = Path(args.output_dir, args.logging_dir) - - accelerator_project_config = ProjectConfiguration( - total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir - ) - - accelerator = Accelerator( - gradient_accumulation_steps=args.gradient_accumulation_steps, - mixed_precision=args.mixed_precision, - log_with="tensorboard", - project_config=accelerator_project_config, - ) - - # Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate - # This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models. - # TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate. - if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1: - raise ValueError( - "Gradient accumulation is not supported when training the text encoder in distributed training. " - "Please set gradient_accumulation_steps to 1. This feature will be supported in the future." - ) - - if args.seed is not None: - set_seed(args.seed) - - if args.with_prior_preservation: - class_images_dir = Path(args.class_data_dir) - if not class_images_dir.exists(): - class_images_dir.mkdir(parents=True) - cur_class_images = len(list(class_images_dir.iterdir())) - - if cur_class_images < args.num_class_images: - torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32 - pipeline = StableDiffusionInpaintPipeline.from_pretrained( - args.pretrained_model_name_or_path, torch_dtype=torch_dtype, safety_checker=None - ) - pipeline.set_progress_bar_config(disable=True) - - num_new_images = args.num_class_images - cur_class_images - logger.info(f"Number of class images to sample: {num_new_images}.") - - sample_dataset = PromptDataset(args.class_prompt, num_new_images) - sample_dataloader = torch.utils.data.DataLoader( - sample_dataset, batch_size=args.sample_batch_size, num_workers=1 - ) - - sample_dataloader = accelerator.prepare(sample_dataloader) - pipeline.to(accelerator.device) - transform_to_pil = transforms.ToPILImage() - for example in tqdm( - sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process - ): - bsz = len(example["prompt"]) - fake_images = torch.rand((3, args.resolution, args.resolution)) - transform_to_pil = transforms.ToPILImage() - fake_pil_images = transform_to_pil(fake_images) - - fake_mask = random_mask((args.resolution, args.resolution), ratio=1, mask_full_image=True) - - images = pipeline(prompt=example["prompt"], mask_image=fake_mask, image=fake_pil_images).images - - for i, image in enumerate(images): - hash_image = hashlib.sha1(image.tobytes()).hexdigest() - image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg" - image.save(image_filename) - - del pipeline - if torch.cuda.is_available(): - torch.cuda.empty_cache() - - # Handle the repository creation - if accelerator.is_main_process: - if args.output_dir is not None: - os.makedirs(args.output_dir, exist_ok=True) - - if args.push_to_hub: - repo_id = create_repo( - repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token - ).repo_id - - # Load the tokenizer - if args.tokenizer_name: - tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name) - elif args.pretrained_model_name_or_path: - tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer") - - # Load models and create wrapper for stable diffusion - text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder") - vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae") - unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet") - - # We only train the additional adapter LoRA layers - vae.requires_grad_(False) - text_encoder.requires_grad_(False) - unet.requires_grad_(False) - - weight_dtype = torch.float32 - if args.mixed_precision == "fp16": - weight_dtype = torch.float16 - elif args.mixed_precision == "bf16": - weight_dtype = torch.bfloat16 - - # Move text_encode and vae to gpu. - # For mixed precision training we cast the text_encoder and vae weights to half-precision - # as these models are only used for inference, keeping weights in full precision is not required. - unet.to(accelerator.device, dtype=weight_dtype) - vae.to(accelerator.device, dtype=weight_dtype) - text_encoder.to(accelerator.device, dtype=weight_dtype) - - if args.enable_xformers_memory_efficient_attention: - if is_xformers_available(): - unet.enable_xformers_memory_efficient_attention() - else: - raise ValueError("xformers is not available. Make sure it is installed correctly") - - # now we will add new LoRA weights to the attention layers - # It's important to realize here how many attention weights will be added and of which sizes - # The sizes of the attention layers consist only of two different variables: - # 1) - the "hidden_size", which is increased according to `unet.config.block_out_channels`. - # 2) - the "cross attention size", which is set to `unet.config.cross_attention_dim`. - - # Let's first see how many attention processors we will have to set. - # For Stable Diffusion, it should be equal to: - # - down blocks (2x attention layers) * (2x transformer layers) * (3x down blocks) = 12 - # - mid blocks (2x attention layers) * (1x transformer layers) * (1x mid blocks) = 2 - # - up blocks (2x attention layers) * (3x transformer layers) * (3x down blocks) = 18 - # => 32 layers - - # Set correct lora layers - lora_attn_procs = {} - for name in unet.attn_processors.keys(): - cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim - if name.startswith("mid_block"): - hidden_size = unet.config.block_out_channels[-1] - elif name.startswith("up_blocks"): - block_id = int(name[len("up_blocks.")]) - hidden_size = list(reversed(unet.config.block_out_channels))[block_id] - elif name.startswith("down_blocks"): - block_id = int(name[len("down_blocks.")]) - hidden_size = unet.config.block_out_channels[block_id] - - lora_attn_procs[name] = LoRAAttnProcessor(hidden_size=hidden_size, cross_attention_dim=cross_attention_dim) - - unet.set_attn_processor(lora_attn_procs) - lora_layers = AttnProcsLayers(unet.attn_processors) - - accelerator.register_for_checkpointing(lora_layers) - - if args.scale_lr: - args.learning_rate = ( - args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes - ) - - # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs - if args.use_8bit_adam: - try: - import bitsandbytes as bnb - except ImportError: - raise ImportError( - "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." - ) - - optimizer_class = bnb.optim.AdamW8bit - else: - optimizer_class = torch.optim.AdamW - - optimizer = optimizer_class( - lora_layers.parameters(), - lr=args.learning_rate, - betas=(args.adam_beta1, args.adam_beta2), - weight_decay=args.adam_weight_decay, - eps=args.adam_epsilon, - ) - - noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") - - train_dataset = DreamBoothDataset( - instance_data_root=args.instance_data_dir, - instance_prompt=args.instance_prompt, - class_data_root=args.class_data_dir if args.with_prior_preservation else None, - class_prompt=args.class_prompt, - tokenizer=tokenizer, - size=args.resolution, - center_crop=args.center_crop, - ) - - def collate_fn(examples): - input_ids = [example["instance_prompt_ids"] for example in examples] - pixel_values = [example["instance_images"] for example in examples] - - # Concat class and instance examples for prior preservation. - # We do this to avoid doing two forward passes. - if args.with_prior_preservation: - input_ids += [example["class_prompt_ids"] for example in examples] - pixel_values += [example["class_images"] for example in examples] - pior_pil = [example["class_PIL_images"] for example in examples] - - masks = [] - masked_images = [] - for example in examples: - pil_image = example["PIL_images"] - # generate a random mask - mask = random_mask(pil_image.size, 1, False) - # prepare mask and masked image - mask, masked_image = prepare_mask_and_masked_image(pil_image, mask) - - masks.append(mask) - masked_images.append(masked_image) - - if args.with_prior_preservation: - for pil_image in pior_pil: - # generate a random mask - mask = random_mask(pil_image.size, 1, False) - # prepare mask and masked image - mask, masked_image = prepare_mask_and_masked_image(pil_image, mask) - - masks.append(mask) - masked_images.append(masked_image) - - pixel_values = torch.stack(pixel_values) - pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() - - input_ids = tokenizer.pad({"input_ids": input_ids}, padding=True, return_tensors="pt").input_ids - masks = torch.stack(masks) - masked_images = torch.stack(masked_images) - batch = {"input_ids": input_ids, "pixel_values": pixel_values, "masks": masks, "masked_images": masked_images} - return batch - - train_dataloader = torch.utils.data.DataLoader( - train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn - ) - - # Scheduler and math around the number of training steps. - overrode_max_train_steps = False - num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) - if args.max_train_steps is None: - args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch - overrode_max_train_steps = True - - lr_scheduler = get_scheduler( - args.lr_scheduler, - optimizer=optimizer, - num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, - num_training_steps=args.max_train_steps * accelerator.num_processes, - ) - - # Prepare everything with our `accelerator`. - lora_layers, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( - lora_layers, optimizer, train_dataloader, lr_scheduler - ) - # accelerator.register_for_checkpointing(lr_scheduler) - - # We need to recalculate our total training steps as the size of the training dataloader may have changed. - num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) - if overrode_max_train_steps: - args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch - # Afterwards we recalculate our number of training epochs - args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) - - # We need to initialize the trackers we use, and also store our configuration. - # The trackers initializes automatically on the main process. - if accelerator.is_main_process: - accelerator.init_trackers("dreambooth-inpaint-lora", config=vars(args)) - - # Train! - total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps - - logger.info("***** Running training *****") - logger.info(f" Num examples = {len(train_dataset)}") - logger.info(f" Num batches each epoch = {len(train_dataloader)}") - logger.info(f" Num Epochs = {args.num_train_epochs}") - logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") - logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") - logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") - logger.info(f" Total optimization steps = {args.max_train_steps}") - global_step = 0 - first_epoch = 0 - - if args.resume_from_checkpoint: - if args.resume_from_checkpoint != "latest": - path = os.path.basename(args.resume_from_checkpoint) - else: - # Get the most recent checkpoint - dirs = os.listdir(args.output_dir) - dirs = [d for d in dirs if d.startswith("checkpoint")] - dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) - path = dirs[-1] if len(dirs) > 0 else None - - if path is None: - accelerator.print( - f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." - ) - args.resume_from_checkpoint = None - else: - accelerator.print(f"Resuming from checkpoint {path}") - accelerator.load_state(os.path.join(args.output_dir, path)) - global_step = int(path.split("-")[1]) - - resume_global_step = global_step * args.gradient_accumulation_steps - first_epoch = global_step // num_update_steps_per_epoch - resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps) - - # Only show the progress bar once on each machine. - progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process) - progress_bar.set_description("Steps") - - for epoch in range(first_epoch, args.num_train_epochs): - unet.train() - for step, batch in enumerate(train_dataloader): - # Skip steps until we reach the resumed step - if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step: - if step % args.gradient_accumulation_steps == 0: - progress_bar.update(1) - continue - - with accelerator.accumulate(unet): - # Convert images to latent space - - latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample() - latents = latents * vae.config.scaling_factor - - # Convert masked images to latent space - masked_latents = vae.encode( - batch["masked_images"].reshape(batch["pixel_values"].shape).to(dtype=weight_dtype) - ).latent_dist.sample() - masked_latents = masked_latents * vae.config.scaling_factor - - masks = batch["masks"] - # resize the mask to latents shape as we concatenate the mask to the latents - mask = torch.stack( - [ - torch.nn.functional.interpolate(mask, size=(args.resolution // 8, args.resolution // 8)) - for mask in masks - ] - ).to(dtype=weight_dtype) - mask = mask.reshape(-1, 1, args.resolution // 8, args.resolution // 8) - - # Sample noise that we'll add to the latents - noise = torch.randn_like(latents) - bsz = latents.shape[0] - # Sample a random timestep for each image - timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device) - timesteps = timesteps.long() - - # Add noise to the latents according to the noise magnitude at each timestep - # (this is the forward diffusion process) - noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) - - # concatenate the noised latents with the mask and the masked latents - latent_model_input = torch.cat([noisy_latents, mask, masked_latents], dim=1) - - # Get the text embedding for conditioning - encoder_hidden_states = text_encoder(batch["input_ids"])[0] - - # Predict the noise residual - noise_pred = unet(latent_model_input, timesteps, encoder_hidden_states).sample - - # Get the target for loss depending on the prediction type - if noise_scheduler.config.prediction_type == "epsilon": - target = noise - elif noise_scheduler.config.prediction_type == "v_prediction": - target = noise_scheduler.get_velocity(latents, noise, timesteps) - else: - raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") - - if args.with_prior_preservation: - # Chunk the noise and noise_pred into two parts and compute the loss on each part separately. - noise_pred, noise_pred_prior = torch.chunk(noise_pred, 2, dim=0) - target, target_prior = torch.chunk(target, 2, dim=0) - - # Compute instance loss - loss = F.mse_loss(noise_pred.float(), target.float(), reduction="none").mean([1, 2, 3]).mean() - - # Compute prior loss - prior_loss = F.mse_loss(noise_pred_prior.float(), target_prior.float(), reduction="mean") - - # Add the prior loss to the instance loss. - loss = loss + args.prior_loss_weight * prior_loss - else: - loss = F.mse_loss(noise_pred.float(), target.float(), reduction="mean") - - accelerator.backward(loss) - if accelerator.sync_gradients: - params_to_clip = lora_layers.parameters() - accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) - optimizer.step() - lr_scheduler.step() - optimizer.zero_grad() - - # Checks if the accelerator has performed an optimization step behind the scenes - if accelerator.sync_gradients: - progress_bar.update(1) - global_step += 1 - - if global_step % args.checkpointing_steps == 0: - if accelerator.is_main_process: - save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") - accelerator.save_state(save_path) - logger.info(f"Saved state to {save_path}") - - logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} - progress_bar.set_postfix(**logs) - accelerator.log(logs, step=global_step) - - if global_step >= args.max_train_steps: - break - - accelerator.wait_for_everyone() - - # Save the lora layers - if accelerator.is_main_process: - unet = unet.to(torch.float32) - unet.save_attn_procs(args.output_dir) - - if args.push_to_hub: - upload_folder( - repo_id=repo_id, - folder_path=args.output_dir, - commit_message="End of training", - ignore_patterns=["step_*", "epoch_*"], - ) - - accelerator.end_training() - - -if __name__ == "__main__": - main() diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_ms_text_to_video_to_diffusers.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_ms_text_to_video_to_diffusers.py deleted file mode 100644 index 3102c7eede9bf72ce460599f3bf47446230a836b..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_ms_text_to_video_to_diffusers.py +++ /dev/null @@ -1,428 +0,0 @@ -# coding=utf-8 -# Copyright 2023 The HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" Conversion script for the LDM checkpoints. """ - -import argparse - -import torch - -from diffusers import UNet3DConditionModel - - -def assign_to_checkpoint( - paths, checkpoint, old_checkpoint, attention_paths_to_split=None, additional_replacements=None, config=None -): - """ - This does the final conversion step: take locally converted weights and apply a global renaming to them. It splits - attention layers, and takes into account additional replacements that may arise. - - Assigns the weights to the new checkpoint. - """ - assert isinstance(paths, list), "Paths should be a list of dicts containing 'old' and 'new' keys." - - # Splits the attention layers into three variables. - if attention_paths_to_split is not None: - for path, path_map in attention_paths_to_split.items(): - old_tensor = old_checkpoint[path] - channels = old_tensor.shape[0] // 3 - - target_shape = (-1, channels) if len(old_tensor.shape) == 3 else (-1) - - num_heads = old_tensor.shape[0] // config["num_head_channels"] // 3 - - old_tensor = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:]) - query, key, value = old_tensor.split(channels // num_heads, dim=1) - - checkpoint[path_map["query"]] = query.reshape(target_shape) - checkpoint[path_map["key"]] = key.reshape(target_shape) - checkpoint[path_map["value"]] = value.reshape(target_shape) - - for path in paths: - new_path = path["new"] - - # These have already been assigned - if attention_paths_to_split is not None and new_path in attention_paths_to_split: - continue - - if additional_replacements is not None: - for replacement in additional_replacements: - new_path = new_path.replace(replacement["old"], replacement["new"]) - - # proj_attn.weight has to be converted from conv 1D to linear - weight = old_checkpoint[path["old"]] - names = ["proj_attn.weight"] - names_2 = ["proj_out.weight", "proj_in.weight"] - if any(k in new_path for k in names): - checkpoint[new_path] = weight[:, :, 0] - elif any(k in new_path for k in names_2) and len(weight.shape) > 2 and ".attentions." not in new_path: - checkpoint[new_path] = weight[:, :, 0] - else: - checkpoint[new_path] = weight - - -def renew_attention_paths(old_list, n_shave_prefix_segments=0): - """ - Updates paths inside attentions to the new naming scheme (local renaming) - """ - mapping = [] - for old_item in old_list: - new_item = old_item - - # new_item = new_item.replace('norm.weight', 'group_norm.weight') - # new_item = new_item.replace('norm.bias', 'group_norm.bias') - - # new_item = new_item.replace('proj_out.weight', 'proj_attn.weight') - # new_item = new_item.replace('proj_out.bias', 'proj_attn.bias') - - # new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) - - mapping.append({"old": old_item, "new": new_item}) - - return mapping - - -def shave_segments(path, n_shave_prefix_segments=1): - """ - Removes segments. Positive values shave the first segments, negative shave the last segments. - """ - if n_shave_prefix_segments >= 0: - return ".".join(path.split(".")[n_shave_prefix_segments:]) - else: - return ".".join(path.split(".")[:n_shave_prefix_segments]) - - -def renew_temp_conv_paths(old_list, n_shave_prefix_segments=0): - """ - Updates paths inside resnets to the new naming scheme (local renaming) - """ - mapping = [] - for old_item in old_list: - mapping.append({"old": old_item, "new": old_item}) - - return mapping - - -def renew_resnet_paths(old_list, n_shave_prefix_segments=0): - """ - Updates paths inside resnets to the new naming scheme (local renaming) - """ - mapping = [] - for old_item in old_list: - new_item = old_item.replace("in_layers.0", "norm1") - new_item = new_item.replace("in_layers.2", "conv1") - - new_item = new_item.replace("out_layers.0", "norm2") - new_item = new_item.replace("out_layers.3", "conv2") - - new_item = new_item.replace("emb_layers.1", "time_emb_proj") - new_item = new_item.replace("skip_connection", "conv_shortcut") - - new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) - - if "temopral_conv" not in old_item: - mapping.append({"old": old_item, "new": new_item}) - - return mapping - - -def convert_ldm_unet_checkpoint(checkpoint, config, path=None, extract_ema=False): - """ - Takes a state dict and a config, and returns a converted checkpoint. - """ - - # extract state_dict for UNet - unet_state_dict = {} - keys = list(checkpoint.keys()) - - unet_key = "model.diffusion_model." - - # at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA - if sum(k.startswith("model_ema") for k in keys) > 100 and extract_ema: - print(f"Checkpoint {path} has both EMA and non-EMA weights.") - print( - "In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA" - " weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag." - ) - for key in keys: - if key.startswith("model.diffusion_model"): - flat_ema_key = "model_ema." + "".join(key.split(".")[1:]) - unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(flat_ema_key) - else: - if sum(k.startswith("model_ema") for k in keys) > 100: - print( - "In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA" - " weights (usually better for inference), please make sure to add the `--extract_ema` flag." - ) - - for key in keys: - unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(key) - - new_checkpoint = {} - - new_checkpoint["time_embedding.linear_1.weight"] = unet_state_dict["time_embed.0.weight"] - new_checkpoint["time_embedding.linear_1.bias"] = unet_state_dict["time_embed.0.bias"] - new_checkpoint["time_embedding.linear_2.weight"] = unet_state_dict["time_embed.2.weight"] - new_checkpoint["time_embedding.linear_2.bias"] = unet_state_dict["time_embed.2.bias"] - - if config["class_embed_type"] is None: - # No parameters to port - ... - elif config["class_embed_type"] == "timestep" or config["class_embed_type"] == "projection": - new_checkpoint["class_embedding.linear_1.weight"] = unet_state_dict["label_emb.0.0.weight"] - new_checkpoint["class_embedding.linear_1.bias"] = unet_state_dict["label_emb.0.0.bias"] - new_checkpoint["class_embedding.linear_2.weight"] = unet_state_dict["label_emb.0.2.weight"] - new_checkpoint["class_embedding.linear_2.bias"] = unet_state_dict["label_emb.0.2.bias"] - else: - raise NotImplementedError(f"Not implemented `class_embed_type`: {config['class_embed_type']}") - - new_checkpoint["conv_in.weight"] = unet_state_dict["input_blocks.0.0.weight"] - new_checkpoint["conv_in.bias"] = unet_state_dict["input_blocks.0.0.bias"] - - first_temp_attention = [v for v in unet_state_dict if v.startswith("input_blocks.0.1")] - paths = renew_attention_paths(first_temp_attention) - meta_path = {"old": "input_blocks.0.1", "new": "transformer_in"} - assign_to_checkpoint(paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config) - - new_checkpoint["conv_norm_out.weight"] = unet_state_dict["out.0.weight"] - new_checkpoint["conv_norm_out.bias"] = unet_state_dict["out.0.bias"] - new_checkpoint["conv_out.weight"] = unet_state_dict["out.2.weight"] - new_checkpoint["conv_out.bias"] = unet_state_dict["out.2.bias"] - - # Retrieves the keys for the input blocks only - num_input_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "input_blocks" in layer}) - input_blocks = { - layer_id: [key for key in unet_state_dict if f"input_blocks.{layer_id}" in key] - for layer_id in range(num_input_blocks) - } - - # Retrieves the keys for the middle blocks only - num_middle_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "middle_block" in layer}) - middle_blocks = { - layer_id: [key for key in unet_state_dict if f"middle_block.{layer_id}" in key] - for layer_id in range(num_middle_blocks) - } - - # Retrieves the keys for the output blocks only - num_output_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "output_blocks" in layer}) - output_blocks = { - layer_id: [key for key in unet_state_dict if f"output_blocks.{layer_id}" in key] - for layer_id in range(num_output_blocks) - } - - for i in range(1, num_input_blocks): - block_id = (i - 1) // (config["layers_per_block"] + 1) - layer_in_block_id = (i - 1) % (config["layers_per_block"] + 1) - - resnets = [ - key for key in input_blocks[i] if f"input_blocks.{i}.0" in key and f"input_blocks.{i}.0.op" not in key - ] - attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key] - temp_attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.2" in key] - - if f"input_blocks.{i}.op.weight" in unet_state_dict: - new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = unet_state_dict.pop( - f"input_blocks.{i}.op.weight" - ) - new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = unet_state_dict.pop( - f"input_blocks.{i}.op.bias" - ) - - paths = renew_resnet_paths(resnets) - meta_path = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"} - assign_to_checkpoint( - paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config - ) - - temporal_convs = [key for key in resnets if "temopral_conv" in key] - paths = renew_temp_conv_paths(temporal_convs) - meta_path = { - "old": f"input_blocks.{i}.0.temopral_conv", - "new": f"down_blocks.{block_id}.temp_convs.{layer_in_block_id}", - } - assign_to_checkpoint( - paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config - ) - - if len(attentions): - paths = renew_attention_paths(attentions) - meta_path = {"old": f"input_blocks.{i}.1", "new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}"} - assign_to_checkpoint( - paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config - ) - - if len(temp_attentions): - paths = renew_attention_paths(temp_attentions) - meta_path = { - "old": f"input_blocks.{i}.2", - "new": f"down_blocks.{block_id}.temp_attentions.{layer_in_block_id}", - } - assign_to_checkpoint( - paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config - ) - - resnet_0 = middle_blocks[0] - temporal_convs_0 = [key for key in resnet_0 if "temopral_conv" in key] - attentions = middle_blocks[1] - temp_attentions = middle_blocks[2] - resnet_1 = middle_blocks[3] - temporal_convs_1 = [key for key in resnet_1 if "temopral_conv" in key] - - resnet_0_paths = renew_resnet_paths(resnet_0) - meta_path = {"old": "middle_block.0", "new": "mid_block.resnets.0"} - assign_to_checkpoint( - resnet_0_paths, new_checkpoint, unet_state_dict, config=config, additional_replacements=[meta_path] - ) - - temp_conv_0_paths = renew_temp_conv_paths(temporal_convs_0) - meta_path = {"old": "middle_block.0.temopral_conv", "new": "mid_block.temp_convs.0"} - assign_to_checkpoint( - temp_conv_0_paths, new_checkpoint, unet_state_dict, config=config, additional_replacements=[meta_path] - ) - - resnet_1_paths = renew_resnet_paths(resnet_1) - meta_path = {"old": "middle_block.3", "new": "mid_block.resnets.1"} - assign_to_checkpoint( - resnet_1_paths, new_checkpoint, unet_state_dict, config=config, additional_replacements=[meta_path] - ) - - temp_conv_1_paths = renew_temp_conv_paths(temporal_convs_1) - meta_path = {"old": "middle_block.3.temopral_conv", "new": "mid_block.temp_convs.1"} - assign_to_checkpoint( - temp_conv_1_paths, new_checkpoint, unet_state_dict, config=config, additional_replacements=[meta_path] - ) - - attentions_paths = renew_attention_paths(attentions) - meta_path = {"old": "middle_block.1", "new": "mid_block.attentions.0"} - assign_to_checkpoint( - attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config - ) - - temp_attentions_paths = renew_attention_paths(temp_attentions) - meta_path = {"old": "middle_block.2", "new": "mid_block.temp_attentions.0"} - assign_to_checkpoint( - temp_attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config - ) - - for i in range(num_output_blocks): - block_id = i // (config["layers_per_block"] + 1) - layer_in_block_id = i % (config["layers_per_block"] + 1) - output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]] - output_block_list = {} - - for layer in output_block_layers: - layer_id, layer_name = layer.split(".")[0], shave_segments(layer, 1) - if layer_id in output_block_list: - output_block_list[layer_id].append(layer_name) - else: - output_block_list[layer_id] = [layer_name] - - if len(output_block_list) > 1: - resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key] - attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key] - temp_attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.2" in key] - - resnet_0_paths = renew_resnet_paths(resnets) - paths = renew_resnet_paths(resnets) - - meta_path = {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"} - assign_to_checkpoint( - paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config - ) - - temporal_convs = [key for key in resnets if "temopral_conv" in key] - paths = renew_temp_conv_paths(temporal_convs) - meta_path = { - "old": f"output_blocks.{i}.0.temopral_conv", - "new": f"up_blocks.{block_id}.temp_convs.{layer_in_block_id}", - } - assign_to_checkpoint( - paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config - ) - - output_block_list = {k: sorted(v) for k, v in output_block_list.items()} - if ["conv.bias", "conv.weight"] in output_block_list.values(): - index = list(output_block_list.values()).index(["conv.bias", "conv.weight"]) - new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = unet_state_dict[ - f"output_blocks.{i}.{index}.conv.weight" - ] - new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = unet_state_dict[ - f"output_blocks.{i}.{index}.conv.bias" - ] - - # Clear attentions as they have been attributed above. - if len(attentions) == 2: - attentions = [] - - if len(attentions): - paths = renew_attention_paths(attentions) - meta_path = { - "old": f"output_blocks.{i}.1", - "new": f"up_blocks.{block_id}.attentions.{layer_in_block_id}", - } - assign_to_checkpoint( - paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config - ) - - if len(temp_attentions): - paths = renew_attention_paths(temp_attentions) - meta_path = { - "old": f"output_blocks.{i}.2", - "new": f"up_blocks.{block_id}.temp_attentions.{layer_in_block_id}", - } - assign_to_checkpoint( - paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config - ) - else: - resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1) - for path in resnet_0_paths: - old_path = ".".join(["output_blocks", str(i), path["old"]]) - new_path = ".".join(["up_blocks", str(block_id), "resnets", str(layer_in_block_id), path["new"]]) - new_checkpoint[new_path] = unet_state_dict[old_path] - - temopral_conv_paths = [l for l in output_block_layers if "temopral_conv" in l] - for path in temopral_conv_paths: - pruned_path = path.split("temopral_conv.")[-1] - old_path = ".".join(["output_blocks", str(i), str(block_id), "temopral_conv", pruned_path]) - new_path = ".".join(["up_blocks", str(block_id), "temp_convs", str(layer_in_block_id), pruned_path]) - new_checkpoint[new_path] = unet_state_dict[old_path] - - return new_checkpoint - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - - parser.add_argument( - "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." - ) - parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") - args = parser.parse_args() - - unet_checkpoint = torch.load(args.checkpoint_path, map_location="cpu") - unet = UNet3DConditionModel() - - converted_ckpt = convert_ldm_unet_checkpoint(unet_checkpoint, unet.config) - - diff_0 = set(unet.state_dict().keys()) - set(converted_ckpt.keys()) - diff_1 = set(converted_ckpt.keys()) - set(unet.state_dict().keys()) - - assert len(diff_0) == len(diff_1) == 0, "Converted weights don't match" - - # load state_dict - unet.load_state_dict(converted_ckpt) - - unet.save_pretrained(args.dump_path) - - # -- finish converting the unet -- diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_kdpm2_ancestral.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_kdpm2_ancestral.py deleted file mode 100644 index 45371121e66b8ffdcecb5cc86a91758e436b2955..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_kdpm2_ancestral.py +++ /dev/null @@ -1,123 +0,0 @@ -import torch - -from diffusers import KDPM2AncestralDiscreteScheduler -from diffusers.utils import torch_device - -from .test_schedulers import SchedulerCommonTest - - -class KDPM2AncestralDiscreteSchedulerTest(SchedulerCommonTest): - scheduler_classes = (KDPM2AncestralDiscreteScheduler,) - num_inference_steps = 10 - - def get_scheduler_config(self, **kwargs): - config = { - "num_train_timesteps": 1100, - "beta_start": 0.0001, - "beta_end": 0.02, - "beta_schedule": "linear", - } - - config.update(**kwargs) - return config - - def test_timesteps(self): - for timesteps in [10, 50, 100, 1000]: - self.check_over_configs(num_train_timesteps=timesteps) - - def test_betas(self): - for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]): - self.check_over_configs(beta_start=beta_start, beta_end=beta_end) - - def test_schedules(self): - for schedule in ["linear", "scaled_linear"]: - self.check_over_configs(beta_schedule=schedule) - - def test_full_loop_no_noise(self): - if torch_device == "mps": - return - scheduler_class = self.scheduler_classes[0] - scheduler_config = self.get_scheduler_config() - scheduler = scheduler_class(**scheduler_config) - - scheduler.set_timesteps(self.num_inference_steps) - - generator = torch.manual_seed(0) - - model = self.dummy_model() - sample = self.dummy_sample_deter * scheduler.init_noise_sigma - sample = sample.to(torch_device) - - for i, t in enumerate(scheduler.timesteps): - sample = scheduler.scale_model_input(sample, t) - - model_output = model(sample, t) - - output = scheduler.step(model_output, t, sample, generator=generator) - sample = output.prev_sample - - result_sum = torch.sum(torch.abs(sample)) - result_mean = torch.mean(torch.abs(sample)) - - assert abs(result_sum.item() - 13849.3877) < 1e-2 - assert abs(result_mean.item() - 18.0331) < 5e-3 - - def test_prediction_type(self): - for prediction_type in ["epsilon", "v_prediction"]: - self.check_over_configs(prediction_type=prediction_type) - - def test_full_loop_with_v_prediction(self): - if torch_device == "mps": - return - scheduler_class = self.scheduler_classes[0] - scheduler_config = self.get_scheduler_config(prediction_type="v_prediction") - scheduler = scheduler_class(**scheduler_config) - - scheduler.set_timesteps(self.num_inference_steps) - - model = self.dummy_model() - sample = self.dummy_sample_deter * scheduler.init_noise_sigma - sample = sample.to(torch_device) - - generator = torch.manual_seed(0) - - for i, t in enumerate(scheduler.timesteps): - sample = scheduler.scale_model_input(sample, t) - - model_output = model(sample, t) - - output = scheduler.step(model_output, t, sample, generator=generator) - sample = output.prev_sample - - result_sum = torch.sum(torch.abs(sample)) - result_mean = torch.mean(torch.abs(sample)) - - assert abs(result_sum.item() - 328.9970) < 1e-2 - assert abs(result_mean.item() - 0.4284) < 1e-3 - - def test_full_loop_device(self): - if torch_device == "mps": - return - scheduler_class = self.scheduler_classes[0] - scheduler_config = self.get_scheduler_config() - scheduler = scheduler_class(**scheduler_config) - - scheduler.set_timesteps(self.num_inference_steps, device=torch_device) - generator = torch.manual_seed(0) - - model = self.dummy_model() - sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma - - for t in scheduler.timesteps: - sample = scheduler.scale_model_input(sample, t) - - model_output = model(sample, t) - - output = scheduler.step(model_output, t, sample, generator=generator) - sample = output.prev_sample - - result_sum = torch.sum(torch.abs(sample)) - result_mean = torch.mean(torch.abs(sample)) - - assert abs(result_sum.item() - 13849.3818) < 1e-1 - assert abs(result_mean.item() - 18.0331) < 1e-3 diff --git a/spaces/Andy1621/uniformer_image_demo/app.py b/spaces/Andy1621/uniformer_image_demo/app.py deleted file mode 100644 index f38ca5fa4e68f823144d15984bd11f3088491f28..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_demo/app.py +++ /dev/null @@ -1,87 +0,0 @@ -import os - -import torch -import torch.nn.functional as F -import torchvision.transforms as T -from uniformer import uniformer_small -from imagenet_class_index import imagenet_classnames - -import gradio as gr -from huggingface_hub import hf_hub_download - -# Device on which to run the model -# Set to cuda to load on GPU -device = "cpu" -# os.system("wget https://cdn-lfs.huggingface.co/Andy1621/uniformer/fd192c31f8bd77670de8f171111bd51f56fd87e6aea45043ab2edc181e1fa775") -model_path = hf_hub_download(repo_id="Andy1621/uniformer", filename="uniformer_small_in1k.pth") -# Pick a pretrained model -model = uniformer_small() -# state_dict = torch.load('fd192c31f8bd77670de8f171111bd51f56fd87e6aea45043ab2edc181e1fa775', map_location='cpu') -state_dict = torch.load(model_path, map_location='cpu') -model.load_state_dict(state_dict['model']) - -# Set to eval mode and move to desired device -model = model.to(device) -model = model.eval() - -# Create an id to label name mapping -imagenet_id_to_classname = {} -for k, v in imagenet_classnames.items(): - imagenet_id_to_classname[k] = v[1] - - -def inference(img): - image = img - image_transform = T.Compose( - [ - T.Resize(224), - T.CenterCrop(224), - T.ToTensor(), - T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), - ] - ) - image = image_transform(image) - - # The model expects inputs of shape: B x C x H x W - image = image.unsqueeze(0) - - prediction = model(image) - prediction = F.softmax(prediction, dim=1).flatten() - - return {imagenet_id_to_classname[str(i)]: float(prediction[i]) for i in range(1000)} - -def set_example_image(example: list) -> dict: - return gr.Image.update(value=example[0]) - - -demo = gr.Blocks() -with demo: - gr.Markdown( - """ - # UniFormer-S - Gradio demo for UniFormer: To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. - """ - ) - - with gr.Box(): - with gr.Row(): - with gr.Column(): - with gr.Row(): - input_image = gr.Image(label='Input Image', type='pil') - with gr.Row(): - submit_button = gr.Button('Submit') - with gr.Column(): - label = gr.Label(num_top_classes=5) - with gr.Row(): - example_images = gr.Dataset(components=[input_image], samples=[['library.jpeg'], ['cat.png'], ['dog.png'], ['panda.png']]) - - gr.Markdown( - """ -

      UniFormer: Unifying Convolution and Self-attention for Visual Recognition | Github Repo

      - """ - ) - - submit_button.click(fn=inference, inputs=input_image, outputs=label) - example_images.click(fn=set_example_image, inputs=example_images, outputs=example_images.components) - -demo.launch(enable_queue=True) \ No newline at end of file diff --git a/spaces/Andy1621/uniformer_image_detection/configs/groie/README.md b/spaces/Andy1621/uniformer_image_detection/configs/groie/README.md deleted file mode 100644 index 490349d4da0c320f8d5e0528840ff95cbcd00da8..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/groie/README.md +++ /dev/null @@ -1,65 +0,0 @@ -# GRoIE - -## A novel Region of Interest Extraction Layer for Instance Segmentation - -By Leonardo Rossi, Akbar Karimi and Andrea Prati from -[IMPLab](http://implab.ce.unipr.it/). - -We provide configs to reproduce the results in the paper for -"*A novel Region of Interest Extraction Layer for Instance Segmentation*" -on COCO object detection. - -## Introduction - -[ALGORITHM] - -This paper is motivated by the need to overcome to the limitations of existing -RoI extractors which select only one (the best) layer from FPN. - -Our intuition is that all the layers of FPN retain useful information. - -Therefore, the proposed layer (called Generic RoI Extractor - **GRoIE**) -introduces non-local building blocks and attention mechanisms to boost the -performance. - -## Results and models - -The results on COCO 2017 minival (5k images) are shown in the below table. -You can find -[here](https://drive.google.com/drive/folders/19ssstbq_h0Z1cgxHmJYFO8s1arf3QJbT) -the trained models. - -### Application of GRoIE to different architectures - -| Backbone | Method | Lr schd | box AP | mask AP | Config | Download| -| :-------: | :--------------: | :-----: | :----: | :-----: | :-------:| :--------:| -| R-50-FPN | Faster Original | 1x | 37.4 | | [config](../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130_204655.log.json) | -| R-50-FPN | + GRoIE | 1x | 38.3 | | [config](./faster_rcnn_r50_fpn_groie_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/groie/faster_rcnn_r50_fpn_groie_1x_coco/faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715-66ee9516.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/groie/faster_rcnn_r50_fpn_groie_1x_coco/faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715.log.json) | -| R-50-FPN | Grid R-CNN | 1x | 39.1 | | [config](./grid_rcnn_r50_fpn_gn-head_1x_coco.py)| [model](http://download.openmmlab.com/mmdetection/v2.0/groie/grid_rcnn_r50_fpn_gn-head_1x_coco/grid_rcnn_r50_fpn_gn-head_1x_coco_20200605_202059-64f00ee8.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/groie/grid_rcnn_r50_fpn_gn-head_1x_coco/grid_rcnn_r50_fpn_gn-head_1x_coco_20200605_202059.log.json) | -| R-50-FPN | + GRoIE | 1x | | | [config](./grid_rcnn_r50_fpn_gn-head_groie_1x_coco.py)|| -| R-50-FPN | Mask R-CNN | 1x | 38.2 | 34.7 | [config](../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py)| [model](http://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205_050542.log.json) | -| R-50-FPN | + GRoIE | 1x | 39.0 | 36.0 | [config](./mask_rcnn_r50_fpn_groie_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r50_fpn_groie_1x_coco/mask_rcnn_r50_fpn_groie_1x_coco_20200604_211715-50d90c74.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r50_fpn_groie_1x_coco/mask_rcnn_r50_fpn_groie_1x_coco_20200604_211715.log.json) | -| R-50-FPN | GC-Net | 1x | 40.7 | 36.5 | [config](../gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200202-50b90e5c.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200202_085547.log.json) | -| R-50-FPN | + GRoIE | 1x | 41.0 | 37.8 | [config](./mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py) |[model](http://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco_20200604_211715-42eb79e1.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco_20200604_211715-42eb79e1.pth) | -| R-101-FPN | GC-Net | 1x | 42.2 | 37.8 | [config](../gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200206-8407a3f0.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200206_142508.log.json) | -| R-101-FPN | + GRoIE | 1x | | | [config](./mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py)| [model](http://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco_20200607_224507-8daae01c.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco_20200607_224507.log.json) | - -## Citation - -If you use this work or benchmark in your research, please cite this project. - -```latex -@misc{rossi2020novel, - title={A novel Region of Interest Extraction Layer for Instance Segmentation}, - author={Leonardo Rossi and Akbar Karimi and Andrea Prati}, - year={2020}, - eprint={2004.13665}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` - -## Contact - -The implementation of GROI is currently maintained by -[Leonardo Rossi](https://github.com/hachreak/). diff --git a/spaces/Andy1621/uniformer_image_detection/configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py deleted file mode 100644 index 0fc528bfd49bfc9a262692db78a5f94b46c285af..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py +++ /dev/null @@ -1,25 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' -# model settings -model = dict( - roi_head=dict( - bbox_roi_extractor=dict( - type='GenericRoIExtractor', - aggregation='sum', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), - out_channels=256, - featmap_strides=[4, 8, 16, 32], - pre_cfg=dict( - type='ConvModule', - in_channels=256, - out_channels=256, - kernel_size=5, - padding=2, - inplace=False, - ), - post_cfg=dict( - type='GeneralizedAttention', - in_channels=256, - spatial_range=-1, - num_heads=6, - attention_type='0100', - kv_stride=2)))) diff --git a/spaces/Andy1621/uniformer_image_detection/configs/ld/ld_r50_gflv1_r101_fpn_coco_1x.py b/spaces/Andy1621/uniformer_image_detection/configs/ld/ld_r50_gflv1_r101_fpn_coco_1x.py deleted file mode 100644 index 923c626363c2f49e8ad15616a09b6cb52260923a..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/ld/ld_r50_gflv1_r101_fpn_coco_1x.py +++ /dev/null @@ -1,19 +0,0 @@ -_base_ = ['./ld_r18_gflv1_r101_fpn_coco_1x.py'] -model = dict( - pretrained='torchvision://resnet50', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch'), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=1, - add_extra_convs='on_output', - num_outs=5)) diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/datasets/builder.py b/spaces/Andy1621/uniformer_image_detection/mmdet/datasets/builder.py deleted file mode 100644 index c9466a517dee746a6677b27a19713f2e89ed7194..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/mmdet/datasets/builder.py +++ /dev/null @@ -1,143 +0,0 @@ -import copy -import platform -import random -from functools import partial - -import numpy as np -from mmcv.parallel import collate -from mmcv.runner import get_dist_info -from mmcv.utils import Registry, build_from_cfg -from torch.utils.data import DataLoader - -from .samplers import DistributedGroupSampler, DistributedSampler, GroupSampler - -if platform.system() != 'Windows': - # https://github.com/pytorch/pytorch/issues/973 - import resource - rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) - hard_limit = rlimit[1] - soft_limit = min(4096, hard_limit) - resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit)) - -DATASETS = Registry('dataset') -PIPELINES = Registry('pipeline') - - -def _concat_dataset(cfg, default_args=None): - from .dataset_wrappers import ConcatDataset - ann_files = cfg['ann_file'] - img_prefixes = cfg.get('img_prefix', None) - seg_prefixes = cfg.get('seg_prefix', None) - proposal_files = cfg.get('proposal_file', None) - separate_eval = cfg.get('separate_eval', True) - - datasets = [] - num_dset = len(ann_files) - for i in range(num_dset): - data_cfg = copy.deepcopy(cfg) - # pop 'separate_eval' since it is not a valid key for common datasets. - if 'separate_eval' in data_cfg: - data_cfg.pop('separate_eval') - data_cfg['ann_file'] = ann_files[i] - if isinstance(img_prefixes, (list, tuple)): - data_cfg['img_prefix'] = img_prefixes[i] - if isinstance(seg_prefixes, (list, tuple)): - data_cfg['seg_prefix'] = seg_prefixes[i] - if isinstance(proposal_files, (list, tuple)): - data_cfg['proposal_file'] = proposal_files[i] - datasets.append(build_dataset(data_cfg, default_args)) - - return ConcatDataset(datasets, separate_eval) - - -def build_dataset(cfg, default_args=None): - from .dataset_wrappers import (ConcatDataset, RepeatDataset, - ClassBalancedDataset) - if isinstance(cfg, (list, tuple)): - dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg]) - elif cfg['type'] == 'ConcatDataset': - dataset = ConcatDataset( - [build_dataset(c, default_args) for c in cfg['datasets']], - cfg.get('separate_eval', True)) - elif cfg['type'] == 'RepeatDataset': - dataset = RepeatDataset( - build_dataset(cfg['dataset'], default_args), cfg['times']) - elif cfg['type'] == 'ClassBalancedDataset': - dataset = ClassBalancedDataset( - build_dataset(cfg['dataset'], default_args), cfg['oversample_thr']) - elif isinstance(cfg.get('ann_file'), (list, tuple)): - dataset = _concat_dataset(cfg, default_args) - else: - dataset = build_from_cfg(cfg, DATASETS, default_args) - - return dataset - - -def build_dataloader(dataset, - samples_per_gpu, - workers_per_gpu, - num_gpus=1, - dist=True, - shuffle=True, - seed=None, - **kwargs): - """Build PyTorch DataLoader. - - In distributed training, each GPU/process has a dataloader. - In non-distributed training, there is only one dataloader for all GPUs. - - Args: - dataset (Dataset): A PyTorch dataset. - samples_per_gpu (int): Number of training samples on each GPU, i.e., - batch size of each GPU. - workers_per_gpu (int): How many subprocesses to use for data loading - for each GPU. - num_gpus (int): Number of GPUs. Only used in non-distributed training. - dist (bool): Distributed training/test or not. Default: True. - shuffle (bool): Whether to shuffle the data at every epoch. - Default: True. - kwargs: any keyword argument to be used to initialize DataLoader - - Returns: - DataLoader: A PyTorch dataloader. - """ - rank, world_size = get_dist_info() - if dist: - # DistributedGroupSampler will definitely shuffle the data to satisfy - # that images on each GPU are in the same group - if shuffle: - sampler = DistributedGroupSampler( - dataset, samples_per_gpu, world_size, rank, seed=seed) - else: - sampler = DistributedSampler( - dataset, world_size, rank, shuffle=False, seed=seed) - batch_size = samples_per_gpu - num_workers = workers_per_gpu - else: - sampler = GroupSampler(dataset, samples_per_gpu) if shuffle else None - batch_size = num_gpus * samples_per_gpu - num_workers = num_gpus * workers_per_gpu - - init_fn = partial( - worker_init_fn, num_workers=num_workers, rank=rank, - seed=seed) if seed is not None else None - - data_loader = DataLoader( - dataset, - batch_size=batch_size, - sampler=sampler, - num_workers=num_workers, - collate_fn=partial(collate, samples_per_gpu=samples_per_gpu), - pin_memory=False, - worker_init_fn=init_fn, - **kwargs) - - return data_loader - - -def worker_init_fn(worker_id, num_workers, rank, seed): - # The seed of each worker equals to - # num_worker * rank + worker_id + user_seed - worker_seed = num_workers * rank + worker_id + seed - np.random.seed(worker_seed) - random.seed(worker_seed) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes.py deleted file mode 100644 index a0726c293d6026898110f7fa55d5e7d2d55d7a02..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - '../_base_/models/nonlocal_r50-d8.py', - '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(align_corners=True), - auxiliary_head=dict(align_corners=True), - test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/model/stylegan_ops/style_function.py b/spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/model/stylegan_ops/style_function.py deleted file mode 100644 index 58d345c35b6d0a1aa1fcc1447fb9ca8546a9260a..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/model/stylegan_ops/style_function.py +++ /dev/null @@ -1,236 +0,0 @@ -import math -import random -import torch -from torch import nn -from torch.nn import functional as F - -from . import FusedLeakyReLU, fused_leaky_relu, upfirdn2d - - -class StyleBlock(nn.Module): - def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1]): - super().__init__() - - self.conv1 = ConvLayer(in_channel, in_channel, 3) - self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True) - - self.skip = ConvLayer( - in_channel, out_channel, 1, downsample=True, activate=False, bias=False - ) - - def forward(self, input): - out = self.conv1(input) - out = self.conv2(out) - - skip = self.skip(input) - out = (out + skip) / math.sqrt(2) - - return out - - -class ConvLayer(nn.Sequential): - def __init__( - self, - in_channel, - out_channel, - kernel_size, - downsample=False, - blur_kernel=[1, 3, 3, 1], - bias=True, - activate=True, - ): - layers = [] - - if downsample: - factor = 2 - p = (len(blur_kernel) - factor) + (kernel_size - 1) - pad0 = (p + 1) // 2 - pad1 = p // 2 - - layers.append(Blur(blur_kernel, pad=(pad0, pad1))) - - stride = 2 - self.padding = 0 - - else: - stride = 1 - self.padding = kernel_size // 2 - - layers.append( - EqualConv2d( - in_channel, - out_channel, - kernel_size, - padding=self.padding, - stride=stride, - bias=bias and not activate, - ) - ) - - if activate: - if bias: - layers.append(FusedLeakyReLU(out_channel)) - - else: - layers.append(ScaledLeakyReLU(0.2)) - - super().__init__(*layers) - - -class EqualConv2d(nn.Module): - def __init__( - self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True - ): - super().__init__() - - self.weight = nn.Parameter( - torch.randn(out_channel, in_channel, kernel_size, kernel_size) - ) - self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2) - - self.stride = stride - self.padding = padding - - if bias: - self.bias = nn.Parameter(torch.zeros(out_channel)) - - else: - self.bias = None - - def forward(self, input): - out = F.conv2d( - input, - self.weight * self.scale, - bias=self.bias, - stride=self.stride, - padding=self.padding, - ) - - return out - - def __repr__(self): - return ( - f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]},' - f' {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})' - ) - - -class EqualLinear(nn.Module): - def __init__( - self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None - ): - super().__init__() - - self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) - - if bias: - self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) - - else: - self.bias = None - - self.activation = activation - - self.scale = (1 / math.sqrt(in_dim)) * lr_mul - self.lr_mul = lr_mul - - def forward(self, input): - if self.activation: - out = F.linear(input, self.weight * self.scale) - out = fused_leaky_relu(out, self.bias * self.lr_mul) - - else: - out = F.linear( - input, self.weight * self.scale, bias=self.bias * self.lr_mul - ) - - return out - - def __repr__(self): - return ( - f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' - ) - - -class ScaledLeakyReLU(nn.Module): - def __init__(self, negative_slope=0.2): - super().__init__() - - self.negative_slope = negative_slope - - def forward(self, input): - out = F.leaky_relu(input, negative_slope=self.negative_slope) - - return out * math.sqrt(2) - - -class Blur(nn.Module): - def __init__(self, kernel, pad, upsample_factor=1): - super().__init__() - - kernel = make_kernel(kernel) - - if upsample_factor > 1: - kernel = kernel * (upsample_factor ** 2) - - self.register_buffer('kernel', kernel) - - self.pad = pad - - def forward(self, input): - out = upfirdn2d(input, self.kernel, pad=self.pad) - - return out - - -def make_kernel(k): - k = torch.tensor(k, dtype=torch.float32) - - if k.ndim == 1: - k = k[None, :] * k[:, None] - - k /= k.sum() - - return k - - -class Upsample(nn.Module): - def __init__(self, kernel, factor=2): - super().__init__() - - self.factor = factor - kernel = make_kernel(kernel) * (factor ** 2) - self.register_buffer('kernel', kernel) - - p = kernel.shape[0] - factor - - pad0 = (p + 1) // 2 + factor - 1 - pad1 = p // 2 - - self.pad = (pad0, pad1) - - def forward(self, input): - out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad) - - return out - - -class Downsample(nn.Module): - def __init__(self, kernel, factor=2): - super().__init__() - - self.factor = factor - kernel = make_kernel(kernel) - self.register_buffer('kernel', kernel) - - p = kernel.shape[0] - factor - - pad0 = (p + 1) // 2 - pad1 = p // 2 - - self.pad = (pad0, pad1) - - def forward(self, input): - out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad) - - return out \ No newline at end of file diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/core/seg/sampler/base_pixel_sampler.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/core/seg/sampler/base_pixel_sampler.py deleted file mode 100644 index b75b1566c9f18169cee51d4b55d75e0357b69c57..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/core/seg/sampler/base_pixel_sampler.py +++ /dev/null @@ -1,12 +0,0 @@ -from abc import ABCMeta, abstractmethod - - -class BasePixelSampler(metaclass=ABCMeta): - """Base class of pixel sampler.""" - - def __init__(self, **kwargs): - pass - - @abstractmethod - def sample(self, seg_logit, seg_label): - """Placeholder for sample function.""" diff --git a/spaces/AntNikYab/NaturalLanguageProcessing/pages/mayakovsky.py b/spaces/AntNikYab/NaturalLanguageProcessing/pages/mayakovsky.py deleted file mode 100644 index 178614b26013805aea66ff1ac43ad6885da99cca..0000000000000000000000000000000000000000 --- a/spaces/AntNikYab/NaturalLanguageProcessing/pages/mayakovsky.py +++ /dev/null @@ -1,64 +0,0 @@ -import streamlit as st -import textwrap -import torch -from transformers import GPT2LMHeadModel, GPT2Tokenizer - -DEVICE = torch.device("cpu") -# Load GPT-2 model and tokenizer -tokenizer = GPT2Tokenizer.from_pretrained('sberbank-ai/rugpt3small_based_on_gpt2') -model_finetuned = GPT2LMHeadModel.from_pretrained( - 'sberbank-ai/rugpt3small_based_on_gpt2', - output_attentions = False, - output_hidden_states = False, -) -if torch.cuda.is_available(): - model_finetuned.load_state_dict(torch.load('models/mayakovsky.pt')) -else: - model_finetuned.load_state_dict(torch.load('models/mayakovsky.pt', map_location=torch.device('cpu'))) -model_finetuned.eval() - -# Function to generate text -def generate_text(prompt, temperature, top_p, max_length, top_k): - input_ids = tokenizer.encode(prompt, return_tensors="pt") - - with torch.no_grad(): - out = model_finetuned.generate( - input_ids, - do_sample=True, - num_beams=5, - temperature=temperature, - top_p=top_p, - max_length=max_length, - top_k=top_k, - no_repeat_ngram_size=3, - num_return_sequences=1, - ) - - generated_text = list(map(tokenizer.decode, out)) - return generated_text - -# Streamlit app -def main(): - st.title("Генерация текста GPT-моделью в стиле В.В. Маяковского") - - # User inputs - prompt = st.text_area("Введите начало текста") - temperature = st.slider("Temperature", min_value=0.2, max_value=2.5, value=1.8, step=0.1) - top_p = st.slider("Top-p", min_value=0.1, max_value=1.0, value=0.9, step=0.1) - max_length = st.slider("Max Length", min_value=10, max_value=300, value=100, step=10) - top_k = st.slider("Top-k", min_value=1, max_value=500, value=500, step=10) - num_return_sequences = st.slider("Number of Sequences", min_value=1, max_value=5, value=1, step=1) - - if st.button("Generate Text"): - st.subheader("Generated Text:") - for i in range(num_return_sequences): - generated_text = generate_text(prompt, temperature, top_p, max_length, top_k) - st.write(f"Generated Text {i + 1}:") - wrapped_text = textwrap.fill(generated_text[0], width=80) - st.write(wrapped_text) - st.write("------------------") - -st.sidebar.image('images/mayakovsky.jpeg', use_column_width=True) - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/spaces/Apex-X/GODROOP/app.py b/spaces/Apex-X/GODROOP/app.py deleted file mode 100644 index fe9a516e99129636b838903af8a4fab32f15d9cf..0000000000000000000000000000000000000000 --- a/spaces/Apex-X/GODROOP/app.py +++ /dev/null @@ -1,72 +0,0 @@ -# -* coding:UTF-8 -* -# !/usr/bin/env python -import numpy as np -import gradio as gr -import roop.globals -from roop.core import ( - start, - decode_execution_providers, - suggest_max_memory, - suggest_execution_threads, -) -from roop.processors.frame.core import get_frame_processors_modules -from roop.utilities import normalize_output_path -import os -from PIL import Image - - -def swap_face(source_file, target_file,doFaceEnhancer): - - source_path = "input.jpg" - target_path = "target.jpg" - - source_image = Image.fromarray(source_file) - source_image.save(source_path) - target_image = Image.fromarray(target_file) - target_image.save(target_path) - - print("source_path: ", source_path) - print("target_path: ", target_path) - - roop.globals.source_path = source_path - roop.globals.target_path = target_path - output_path = "output.jpg" - roop.globals.output_path = normalize_output_path( - roop.globals.source_path, roop.globals.target_path, output_path - ) - if doFaceEnhancer == True: - roop.globals.frame_processors = ["face_swapper","face_enhancer"] - else: - roop.globals.frame_processors = ["face_swapper"] - roop.globals.headless = True - roop.globals.keep_fps = True - roop.globals.keep_audio = True - roop.globals.keep_frames = False - roop.globals.many_faces = False - roop.globals.video_encoder = "libx264" - roop.globals.video_quality = 18 - roop.globals.max_memory = suggest_max_memory() - roop.globals.execution_providers = decode_execution_providers(["cuda"]) - roop.globals.execution_threads = suggest_execution_threads() - - print( - "start process", - roop.globals.source_path, - roop.globals.target_path, - roop.globals.output_path, - ) - - for frame_processor in get_frame_processors_modules( - roop.globals.frame_processors - ): - if not frame_processor.pre_check(): - return - - start() - return output_path - - -app = gr.Interface( - fn=swap_face, inputs=[gr.Image(), gr.Image(),gr.Checkbox(label="face_enhancer?", info="do face enhancer?")], outputs="image" -) -app.launch() diff --git a/spaces/Artrajz/vits-simple-api/vits/text/ngu_dialect.py b/spaces/Artrajz/vits-simple-api/vits/text/ngu_dialect.py deleted file mode 100644 index ce3e12bbf0469426872eed5f681985d3e1be9b26..0000000000000000000000000000000000000000 --- a/spaces/Artrajz/vits-simple-api/vits/text/ngu_dialect.py +++ /dev/null @@ -1,30 +0,0 @@ -import re -import opencc - - -dialects = {'SZ': 'suzhou', 'WX': 'wuxi', 'CZ': 'changzhou', 'HZ': 'hangzhou', - 'SX': 'shaoxing', 'NB': 'ningbo', 'JJ': 'jingjiang', 'YX': 'yixing', - 'JD': 'jiading', 'ZR': 'zhenru', 'PH': 'pinghu', 'TX': 'tongxiang', - 'JS': 'jiashan', 'HN': 'xiashi', 'LP': 'linping', 'XS': 'xiaoshan', - 'FY': 'fuyang', 'RA': 'ruao', 'CX': 'cixi', 'SM': 'sanmen', - 'TT': 'tiantai', 'WZ': 'wenzhou', 'SC': 'suichang', 'YB': 'youbu'} - -converters = {} - -for dialect in dialects.values(): - try: - converters[dialect] = opencc.OpenCC(dialect) - except: - pass - - -def ngu_dialect_to_ipa(text, dialect): - dialect = dialects[dialect] - text = converters[dialect].convert(text).replace('-','').replace('$',' ') - text = re.sub(r'[、;:]', ',', text) - text = re.sub(r'\s*,\s*', ', ', text) - text = re.sub(r'\s*。\s*', '. ', text) - text = re.sub(r'\s*?\s*', '? ', text) - text = re.sub(r'\s*!\s*', '! ', text) - text = re.sub(r'\s*$', '', text) - return text diff --git a/spaces/Autopixel/blurry-faces/kornia_benchmark.py b/spaces/Autopixel/blurry-faces/kornia_benchmark.py deleted file mode 100644 index 9317c8517e60b3b72f6409566db6be705f334aa6..0000000000000000000000000000000000000000 --- a/spaces/Autopixel/blurry-faces/kornia_benchmark.py +++ /dev/null @@ -1,63 +0,0 @@ -import cv2 -import gradio as gr -from PIL import Image -import numpy as np -import torch -import kornia as K -from kornia.contrib import FaceDetector, FaceDetectorResult -import time - -device = torch.device('cpu') -face_detection = FaceDetector().to(device) - -def scale_image(img: np.ndarray, size: int) -> np.ndarray: - h, w = img.shape[:2] - scale = 1. * size / w - return cv2.resize(img, (int(w * scale), int(h * scale))) - - -def apply_blur_face(img: torch.Tensor, img_vis: np.ndarray, det: FaceDetectorResult): - # crop the face - x1, y1 = det.xmin.int(), det.ymin.int() - x2, y2 = det.xmax.int(), det.ymax.int() - roi = img[..., y1:y2, x1:x2] - #print(roi.shape) - if roi.shape[-1]==0 or roi.shape[-2]==0: - return - - # apply blurring and put back to the visualisation image - roi = K.filters.gaussian_blur2d(roi, (21, 21), (100., 100.)) - roi = K.color.rgb_to_bgr(roi) - img_vis[y1:y2, x1:x2] = K.tensor_to_image(roi) - - -def run(image): - image.thumbnail((1280, 1280)) - img_raw = np.array(image) - - # preprocess - img = K.image_to_tensor(img_raw, keepdim=False).to(device) - img = K.color.bgr_to_rgb(img.float()) - - with torch.no_grad(): - dets = face_detection(img) - dets = [FaceDetectorResult(o) for o in dets] - - img_vis = img_raw.copy() - - for b in dets: - if b.score < 0.5: - continue - - apply_blur_face(img, img_vis, b) - - return Image.fromarray(img_vis) - -if __name__ == "__main__": - - start = time.time() - for _ in range(100): - image = Image.open("./images/crowd.jpeg") - _ = run(image) - - print('It took', (time.time()-start)/100, 'seconds.') \ No newline at end of file diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/dev/packaging/README.md b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/dev/packaging/README.md deleted file mode 100644 index 0174b7dd528efcaa0fe27d46f40a3866f03e7c41..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/dev/packaging/README.md +++ /dev/null @@ -1,17 +0,0 @@ - -## To build a cu101 wheel for release: - -``` -$ nvidia-docker run -it --storage-opt "size=20GB" --name pt pytorch/manylinux-cuda101 -# inside the container: -# git clone https://github.com/facebookresearch/detectron2/ -# cd detectron2 -# export CU_VERSION=cu101 D2_VERSION_SUFFIX= PYTHON_VERSION=3.7 PYTORCH_VERSION=1.8 -# ./dev/packaging/build_wheel.sh -``` - -## To build all wheels for combinations of CUDA and Python -``` -./dev/packaging/build_all_wheels.sh -./dev/packaging/gen_wheel_index.sh /path/to/wheels -``` diff --git a/spaces/Banbri/zcvzcv/src/components/ui/tooltip.tsx b/spaces/Banbri/zcvzcv/src/components/ui/tooltip.tsx deleted file mode 100644 index 15f831b13198545d236d3d7b2cb62970eb20854c..0000000000000000000000000000000000000000 --- a/spaces/Banbri/zcvzcv/src/components/ui/tooltip.tsx +++ /dev/null @@ -1,30 +0,0 @@ -"use client" - -import * as React from "react" -import * as TooltipPrimitive from "@radix-ui/react-tooltip" - -import { cn } from "@/lib/utils" - -const TooltipProvider = TooltipPrimitive.Provider - -const Tooltip = TooltipPrimitive.Root - -const TooltipTrigger = TooltipPrimitive.Trigger - -const TooltipContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, sideOffset = 4, ...props }, ref) => ( - -)) -TooltipContent.displayName = TooltipPrimitive.Content.displayName - -export { Tooltip, TooltipTrigger, TooltipContent, TooltipProvider } diff --git a/spaces/Basil2k4/botbasil203/src/create_user_and_fix_permissions.sh b/spaces/Basil2k4/botbasil203/src/create_user_and_fix_permissions.sh deleted file mode 100644 index 285e103126230bb8c848c31dcd46f8e9fffc1d59..0000000000000000000000000000000000000000 --- a/spaces/Basil2k4/botbasil203/src/create_user_and_fix_permissions.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/bash -## Creates an ordinary non-root VNC_USER and calls the script to fix the file permissions - -### every exit != 0 fails the script -set -e -set -u - -UNAME=0 -UGROUP=0 - -if [[ -n "${VNC_USER}" ]] ; then - case "$VNC_USER" in - root|0) UNAME=root; UGROUP=$UNAME;; # exact match - root:*|0:*) UNAME=root; UGROUP=$UNAME;; # match from the beginning - *:root|*:0) UNAME=root; UGROUP=$UNAME;; # match at the end - *) UNAME=${VNC_USER/%:*/}; UGROUP=${VNC_USER/#*:/};; # else case - esac - - if [[ "$UGROUP" != "" && "$UGROUP" != "root" ]] ; then - - ### Creates the group only if it does not exist yet - echo "Creating group $UGROUP if needed" - groupadd -f $UGROUP - - ### Returns "0" if the user exists, or "1" otherwise - missing_user=$(id -u $UNAME > /dev/null 2>&1; echo $?) - - if [[ $missing_user != 0 ]] ; then - echo "Creating non-root user \"$VNC_USER\"." - useradd --no-log-init --gid $UGROUP --home-dir $HOME --shell /bin/bash --password $VNC_PW $UNAME - fi - else - echo "Will not create root user \"$VNC_USER\"." - fi -fi - -FIXING="Fixing permissions: " - -for var in "$@" -do - echo "$FIXING $var" - find "$var"/ -name '*.sh' -exec chmod a+x {} + - find "$var"/ -name '*.desktop' -exec chmod a+x {} + - - ### folder and its content belong to the group zero (recursively) - chgrp -R 0 "$var" && chmod -R -v a+rw "$var" && find "$var" -type d -exec chmod -v a+x {} + -done diff --git a/spaces/Benson/text-generation/Examples/Bicicleta Real De Carreras Mod Apkdone.md b/spaces/Benson/text-generation/Examples/Bicicleta Real De Carreras Mod Apkdone.md deleted file mode 100644 index 2728eec593388541ee79a79be71b769d83774f91..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Bicicleta Real De Carreras Mod Apkdone.md +++ /dev/null @@ -1,84 +0,0 @@ -
      -

      Real Bike Racing Mod APKDone: Una revisión

      -

      Si usted es un fan de los juegos de carreras de motos, es posible que haya oído hablar de Real Bike Racing, un juego popular que le permite experimentar la emoción de montar una superbike en varias pistas. ¿Pero sabías que hay una versión modificada de este juego que te da dinero ilimitado y acceso a todas las funciones? En este artículo, vamos a revisar Real Bike Racing Mod APKDone, un sitio web que proporciona la versión modificada del juego de forma gratuita. También te diremos por qué deberías jugar a este juego y cómo descargarlo e instalarlo en tu dispositivo.

      -

      bicicleta real de carreras mod apkdone


      Download 🆓 https://bltlly.com/2v6KK4



      -

      ¿Qué es Real Bike Racing?

      -

      Real Bike Racing es un juego desarrollado por Italic Games, un estudio especializado en crear juegos de carreras realistas e inmersivos. El juego fue lanzado en 2016 y desde entonces ha ganado más de 100 millones de descargas en Google Play Store. El juego está clasificado 4.1 de 5 estrellas por más de 600 mil usuarios.

      -

      Características de Real Bike Racing

      -

      Real Bike Racing tiene muchas características que lo convierten en uno de los mejores juegos de carreras de motos en el mercado. Estos son algunos de ellos:

      -

      Impresionantes gráficos 3D

      -

      El juego cuenta con gráficos de alta calidad que crean un entorno realista e inmersivo para los jugadores. Puedes ver los detalles de las bicicletas, las pistas, el clima y los alrededores. El juego también soporta el modo VR, que te permite disfrutar del juego de una manera más inmersiva.

      -

      Múltiples modos de juego

      -

      El juego ofrece varios modos de juego para adaptarse a sus preferencias y habilidades. Puedes elegir entre el modo Carrera, donde puedes competir en diferentes campeonatos y desbloquear nuevas bicicletas y pistas; el modo Contrarreloj, donde puedes probar tu velocidad y habilidades contra el reloj; o el modo VR, donde puedes experimentar el juego en realidad virtual.

      -

      Física realista y efectos de sonido

      - -

      Más de 10 tipos de superbikes para elegir

      -

      El juego cuenta con más de 10 tipos de superbikes que puedes personalizar y actualizar según tus preferencias. Puede elegir entre diferentes marcas, modelos, colores y piezas. También puedes comparar las estadísticas y el rendimiento de cada bicicleta antes de comprarla o usarla.

      -

      -

      ¿Qué es Real Bike Racing Mod APKDone?

      -

      Real Bike Racing Mod APKDone es un sitio web que proporciona la versión modificada de Real Bike Racing de forma gratuita. La versión modificada del juego tiene algunas ventajas sobre la versión original, como:

      -

      Beneficios de usar Real Bike Racing Mod APKDone

      -
        -
      • Obtienes dinero ilimitado para comprar y actualizar cualquier bicicleta que quieras.
      • -
      • Obtienes acceso a todas las características y modos del juego sin ninguna restricción.
      • -
      • Te deshaces de los molestos anuncios que interrumpen tu juego.
      • -
      • Obtienes un mejor rendimiento y estabilidad en tu dispositivo.
      • -
      -

      Cómo descargar e instalar Real Bike Racing Mod APKDone

      -

      Para descargar e instalar Real Bike Racing Mod APKDone en su dispositivo, debe seguir estos sencillos pasos:

      -
        -
      1. Ir a https://apkdone.com/real-bike-racing/ en su navegador.
      2. -
      3. Haga clic en el botón "Descargar" y espere a que el archivo se descargue
      4. Localice el archivo descargado en su dispositivo y toque en él para instalarlo. Es posible que necesite habilitar "Fuentes desconocidas" en su configuración para permitir la instalación.
      5. -
      6. Iniciar el juego y disfrutar de las características modded.
      7. -
      -

      ¿Por qué deberías jugar Real Bike Racing Mod APKDone?

      -

      Real Bike Racing Mod APKDone es un gran juego para cualquier persona que ama las carreras de motos y quiere tener más diversión y libertad en su juego. Aquí hay algunas razones por las que deberías jugar a este juego:

      -

      Pros y contras de Real Bike Racing Mod APKDone

      -

      Como cualquier otro juego, Real Bike Racing Mod APKDone tiene sus pros y sus contras. Aquí están algunos de ellos:

      - - -Pros - - - -Puedes disfrutar de dinero ilimitado y acceso a todas las características y modos del juego. -Puede encontrar algunos errores o fallos en la versión modificada del juego. - - -Puedes personalizar y actualizar tus bicicletas tanto como quieras. -Puedes perder el desafío y la emoción del juego si tienes todo desbloqueado. - - -Puedes jugar el juego sin anuncios ni interrupciones. -Puedes perderte algunas actualizaciones o características que están disponibles en la versión original del juego. - - -

      Consejos y trucos para jugar Real Bike Racing Mod APKDone

      -

      Si quieres mejorar tus habilidades y rendimiento en Real Bike Racing Mod APKDone, aquí tienes algunos consejos y trucos que puedes usar:

      -
        -
      • Elige la bicicleta que se adapte a tu estilo y preferencia. Cada moto tiene diferentes estadísticas y rendimiento, por lo que necesitas encontrar la que funcione mejor para ti.
      • -
      • Utilice los controles de inclinación o toque para dirigir su bicicleta. También puede ajustar la sensibilidad y la capacidad de respuesta de los controles en la configuración.
      • -
      • Utilice los botones de freno y nitro sabiamente. Es necesario frenar en el momento adecuado para evitar estrellarse o perder velocidad. También necesitas usar el nitro en el momento adecuado para aumentar tu velocidad y superar a tus oponentes.
      • -
      • Practica en diferentes pistas y modos. Puedes aprender el diseño y las características de cada pista reproduciéndolas repetidamente. También puedes probar diferentes modos para desafiarte y poner a prueba tus habilidades.
      • -
      • Ver vídeos o leer guías en línea. Usted puede encontrar muchos videos o guías en línea que le puede enseñar cómo jugar Real Bike Racing mejor. También puedes aprender de otros jugadores que tienen más experiencia o habilidad que tú.
      • -
      -

      Conclusión

      - -

      Preguntas frecuentes

      -

      Aquí hay algunas preguntas frecuentes sobre Real Bike Racing Mod APKDone:

      -
        -
      1. ¿Es seguro usar Real Bike Racing Mod APKDone?
      2. -

        Sí, Real Bike Racing Mod APKDone es seguro de usar siempre y cuando lo descargue de un sitio web de confianza como https://apkdone.com/real-bike-racing/. Sin embargo, siempre debes tener cuidado al descargar e instalar cualquier juego modificado o hackeado en tu dispositivo, ya que pueden contener virus o malware que pueden dañar tu dispositivo o comprometer tu privacidad.

        -
      3. ¿Es Real Bike Racing Mod APKDone compatible con mi dispositivo?
      4. -

        Real Bike Racing Mod APKDone es compatible con la mayoría de los dispositivos Android que tienen Android 4.0 o superior. Sin embargo, es posible que algunos dispositivos no admitan algunas características o modos del juego, como el modo VR. Puedes comprobar la compatibilidad de tu dispositivo leyendo la descripción o reseñas del juego en https://apkdone.com/real-bike-racing/.

        -
      5. ¿Cómo puedo actualizar Real Bike Racing Mod APK hecho? APKDone?
      6. -

        Para actualizar Real Bike Racing Mod APKDone, necesitas visitar https://apkdone.com/real-bike-racing/ y descargar la última versión del juego. También puede consultar el sitio web para cualquier noticia o actualizaciones sobre el juego. Sin embargo, es posible que tenga que desinstalar la versión anterior del juego antes de instalar el nuevo, ya que pueden no ser compatibles entre sí.

        -
      7. ¿Cómo puedo desinstalar Real Bike Racing Mod APKDone?
      8. -

        Para desinstalar Real Bike Racing Mod APKDone, es necesario ir a la configuración de su dispositivo y encontrar el administrador de aplicaciones o lista de aplicaciones. A continuación, es necesario encontrar y seleccionar Real Bike Racing Mod APKDone y toque en el "Desinstalar" botón. También puede eliminar el archivo descargado del almacenamiento de su dispositivo si desea liberar espacio.

        -
      9. ¿Puedo jugar Real Bike Racing Mod APKDone en línea o fuera de línea?
      10. - -
      11. ¿Puedo jugar Real Bike Racing Mod APKDone con mis amigos?
      12. -

        Sí, puedes jugar Real Bike Racing Mod APKDone con tus amigos si tienes una conexión a Internet y una cuenta de Google Play. Puedes invitar a tus amigos a unirse a ti en el modo multijugador, donde puedes competir entre sí en diferentes pistas. También puedes chatear con tus amigos y compartir tus puntajes y logros con ellos.

        -

      64aa2da5cf
      -
      -
      \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Cmo Hacer Un Simulador De Cabra.md b/spaces/Benson/text-generation/Examples/Cmo Hacer Un Simulador De Cabra.md deleted file mode 100644 index 8b9f5a902d5e4d4bca87e67313d0585ad74df8c9..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Cmo Hacer Un Simulador De Cabra.md +++ /dev/null @@ -1,63 +0,0 @@ - -

      Patear el amigo VIP APK: Un juego divertido y de alivio del estrés

      -

      ¿Alguna vez te sientes enojado, frustrado o aburrido y deseas poder ventilar tus emociones en algo o alguien? ¿Alguna vez fantaseas con tener un arsenal ilimitado de armas y objetos para destruir todo lo que quieras? Si respondiste sí a cualquiera de estas preguntas, entonces es posible que desee probar Kick the Buddy VIP APK, una versión modificada de un popular juego para Android que le permite hacer todo eso y más.

      -

      ¿Qué es Kick the Buddy?

      -

      Kick the Buddy es un juego donde se puede dar rienda suelta a su creatividad y agresión en un muñeco de trapo llamado Buddy. Puedes usar varias armas y objetos para causarle dolor y daños, como cohetes, granadas, pistolas, cuchillos, martillos, sierras, tijeras, fuego, hielo, electricidad, ácido e incluso una bomba nuclear. También puede personalizar su apariencia y vestirlo con diferentes trajes. El juego no tiene reglas ni límites, así que puedes hacer lo que quieras con Buddy.

      -

      Cómo hacer un simulador de cabra


      Download Zip ✵✵✵ https://bltlly.com/2v6KqP



      -

      Kick the Buddy es también un juego con una variedad de armas y objetos para elegir. Puedes desbloquear nuevas armas y objetos ganando dinero y oro en el juego. También puedes comprarlos con dinero real a través de compras en la aplicación. Algunas de las armas y artículos se clasifican en temas, como horror, fantasía, ciencia ficción, deportes, animales, comida, etc. Cada tema tiene sus propios efectos y sonidos únicos.

      -

      Kick the Buddy es también un juego con física y gráficos realistas. El juego utiliza un motor de física que simula cómo se comportan los objetos en la vida real. Por ejemplo, cuando le lanzas una granada a Buddy, se alejará volando de la explosión. Cuando lo cortas con un cuchillo, sangra. Cuando lo congeles con hielo, se estremecerá. El juego también tiene gráficos coloridos y detallados que hacen que Buddy parezca vivo (o muerto).

      -

      ¿Qué es Kick the Buddy VIP APK?

      - -

      Kick the Buddy VIP APK es una versión que le da dinero ilimitado, oro y diamantes para comprar todo lo que quieras en el juego. Usted no tiene que ganar o gastar dinero real para desbloquear nuevas armas y objetos. También puedes usar estos recursos para mejorar tus armas y objetos para hacerlos más poderosos y efectivos.

      -

      Kick the Buddy VIP APK es también una versión que desbloquea todas las armas y objetos en el juego. No tienes que completar ninguna tarea o logro para acceder a ellos. Puedes usar cualquier arma o artículo de cualquier tema en cualquier momento. También puede mezclar y combinar diferentes armas y objetos para crear diferentes combinaciones y efectos. Por ejemplo, puedes usar una motosierra y un lanzallamas para cortar y quemar a Buddy al mismo tiempo.

      -

      Cómo descargar e instalar Kick the Buddy VIP APK?

      -

      Para descargar e instalar Kick the Buddy VIP APK en su dispositivo Android, es necesario seguir estos pasos:

      -
        -
      1. Ir a un sitio web de confianza que proporciona el archivo APK. Puede buscar "Kick the Buddy VIP APK" en Google o Bing y elegir uno de los resultados. Asegúrese de que el sitio web esté seguro antes de descargar nada.
      2. -
      3. Descargar el archivo APK a su dispositivo. Es posible que tenga que habilitar la opción de instalar aplicaciones de fuentes desconocidas en la configuración del dispositivo. Esto le permitirá instalar aplicaciones que no son de Google Play Store u otras tiendas de aplicaciones oficiales.
      4. -
      5. Busque el archivo APK en el almacenamiento del dispositivo y toque en él para instalarlo. Es posible que deba conceder algunos permisos a la aplicación, como el acceso a su almacenamiento, cámara, micrófono, etc. Estos permisos son necesarios para que la aplicación funcione correctamente.
      6. -
      7. Espere a que la instalación termine y luego inicie la aplicación. Ahora puedes disfrutar jugando Kick the Buddy VIP APK con recursos ilimitados y todas las armas y artículos desbloqueados.
      8. -
      -

      Antes de instalar el archivo APK, usted debe tomar algunas precauciones para evitar cualquier problema o riesgo. Usted debe:

      -
        - -
      • Escanear el archivo APK con un antivirus o escáner de malware antes de instalarlo. Esto le ayudará a detectar cualquier virus o código malicioso que pueda dañar su dispositivo o comprometer su privacidad.
      • -
      • Lee los comentarios y valoraciones de la aplicación y el sitio web que la proporciona. Esto le ayudará a tener una idea de la calidad y la fiabilidad de la aplicación y el sitio web. Debes evitar descargar o instalar cualquier cosa que tenga comentarios negativos o sospechosos.
      • -
      -

      Los permisos requeridos por el archivo APK son:

      - -PermisoDescripción -AlmacenamientoEste permiso permite a la aplicación leer y escribir datos en el almacenamiento del dispositivo. Esto es necesario para guardar el progreso y la configuración del juego. -CámaraEste permiso permite a la aplicación acceder a la cámara del dispositivo. Esto es necesario para tomar fotos de Buddy y compartirlas con tus amigos. -MicrófonoEste permiso permite a la aplicación acceder al micrófono del dispositivo. Esto es necesario para grabar su voz y agregar efectos de sonido a Buddy. -Información de conexión Wi-FiEste permiso permite a la aplicación ver información sobre su red Wi-Fi. Esto es necesario para conectarse a Internet y descargar nuevos contenidos para el juego. -OtherEste permiso permite a la aplicación acceder a otras características y configuraciones de su dispositivo, como vibración, acceso a la red, evitar que el dispositivo se duerma, etc. Estos son necesarios para mejorar su experiencia de juego y rendimiento. -

      ¿Por qué deberías jugar Kick the Buddy VIP APK?

      -

      Hay muchas razones por las que debe jugar Kick the Buddy VIP APK en lugar del juego original. Aquí están algunos de ellos:

      -

      -
        -
      • Usted puede ahorrar su dinero y tiempo al obtener recursos ilimitados y todas las armas y artículos desbloqueados. No tienes que gastar dinero real o esperar horas para conseguir lo que quieres en el juego.
      • - -
      • Usted puede aliviar su estrés y relajarse jugando el juego. Puedes desahogar tu ira y frustración en Buddy sin lastimar a nadie ni a nada en la vida real. También puedes reírte de las reacciones y sonidos de Buddy mientras sufre.
      • -
      -

      Kick the Buddy VIP APK es un juego que puede proporcionarle entretenimiento, diversión y alivio. Es un juego que puede hacerte sentir feliz, relajado y creativo. Es un juego que debes probar si estás buscando un juego divertido y que alivie el estrés.

      -

      Conclusión

      -

      Kick the Buddy VIP APK es una versión modificada de un popular juego para Android que le permite dar rienda suelta a su creatividad y agresión en un muñeco de trapo llamado Buddy. Puedes usar varias armas y objetos para causarle dolor y daños, como cohetes, granadas, pistolas, cuchillos, martillos, sierras, tijeras, fuego, hielo, electricidad, ácido e incluso una bomba nuclear. También puedes personalizar su apariencia y vestirlo con diferentes atuendos.

      -

      Kick the Buddy VIP APK le da dinero ilimitado, oro y diamantes para comprar todo lo que quieras en el juego. También desbloquea todas las armas y objetos del juego. Puede descargar e instalar el archivo APK en su dispositivo Android de forma gratuita desde un sitio web de confianza. Usted debe tomar algunas precauciones antes de instalar el archivo APK, tales como copias de seguridad de sus datos, escanear el archivo, y la lectura de los comentarios.

      -

      Kick the Buddy VIP APK es un juego que puede proporcionarle entretenimiento, diversión y alivio. Es un juego que puede hacerte sentir feliz, relajado y creativo. Es un juego que debes probar si estás buscando un juego divertido y que alivie el estrés.

      -

      Si usted está interesado en jugar Kick the Buddy VIP APK, puede seguir este enlace para descargarlo: [texto]

      -

      Preguntas frecuentes

      -
        -
      1. ¿Cuál es la diferencia entre Kick the Buddy VIP APK y Kick the Buddy MOD APK?
      2. - -
      3. ¿Es Kick the Buddy VIP APK seguro de descargar e instalar?
      4. -

        Kick the Buddy VIP APK es seguro de descargar e instalar si lo obtiene de un sitio web de confianza que proporciona el archivo original y libre de virus. También debe escanear el archivo con un antivirus o un escáner de malware antes de instalarlo.

        -
      5. ¿Puedo jugar Kick the Buddy VIP APK offline?
      6. -

        Sí, puedes jugar Kick the Buddy VIP APK sin conexión a Internet. Sin embargo, algunas características y contenido pueden no estar disponibles o actualizados cuando juegas sin conexión.

        -
      7. ¿Puedo jugar Kick the Buddy VIP APK con mis amigos?
      8. -

        Sí, puedes jugar Kick the Buddy VIP APK con tus amigos compartiendo tus fotos y videos de Buddy con ellos. También puedes retarlos a ver quién puede destruir a Buddy de maneras más creativas.

        -
      9. ¿Cómo puedo contactar a los desarrolladores de Kick the Buddy VIP APK?
      10. -

        Puede ponerse en contacto con los desarrolladores de Kick the Buddy VIP APK enviándoles un correo electrónico a [correo electrónico] o visitando su sitio web en [texto].

        -

      64aa2da5cf
      -
      -
      \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Cricket League Mod Apk 1.8.1.md b/spaces/Benson/text-generation/Examples/Cricket League Mod Apk 1.8.1.md deleted file mode 100644 index f57e4a469b417013c5907084233ef89483575668..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Cricket League Mod Apk 1.8.1.md +++ /dev/null @@ -1,52 +0,0 @@ - -

      Liga de cricket Mod APK 1.8.1: Un juego de cricket realista y emocionante para Android

      -

      Si usted es un fanático del cricket y quiere jugar un juego de cricket 3D realista en su dispositivo Android, entonces usted debe probar Cricket League Mod APK 1.8.1. Esta es una versión modificada del juego original de Cricket League que ofrece dinero ilimitado, diamantes, desbloqueado todos los jugadores, compras gratis, anuncios mod gratis, siempre perfecto, todo, y características fáciles de usar. Usted puede descargar este juego de forma gratuita desde el enlace que aparece a continuación y disfrutar de jugar dos partidos rápidos sobre sus amigos o jugadores de todo el mundo en solo unos minutos.

      -

      cricket league mod apk 1.8.1


      Download File ✵✵✵ https://bltlly.com/2v6KAe



      -

      ¿Qué es Cricket League Mod APK 1.8.1?

      -

      Cricket League Mod APK 1.8.1 es una versión modificada del juego original de la Liga de Cricket que es desarrollado por Miniclip.com. Este juego se basa en una liga de cricket real que se puede disfrutar jugando en sus dispositivos Android de forma gratuita. Tienes que construir tu propio equipo desde cero hasta el mejor, reclutar al mejor bateador, jugador de bolos y jugador todoterreno para hacer un equilibrio perfecto en tu equipo y dar una dura competencia a tu competidor.

      -

      Características de Cricket League Mod APK 1.8.1

      -

      Este juego viene con muchas características increíbles que lo hacen más divertido y emocionante para jugar. Algunas de las características son:

      -

      Dinero y diamantes ilimitados

      -

      Con este mod, obtendrás dinero ilimitado y diamantes que puedes usar para comprar lo que quieras en el juego. Puedes actualizar a tus jugadores, comprar nuevos equipos, personalizar el logo de tu equipo y mucho más.

      -

      -

      Desbloqueado todos los jugadores y compras gratuitas

      -

      Este mod también desbloquea a todos los jugadores que están disponibles en el juego. Puede elegir cualquier jugador que desee para construir su equipo y jugar con ellos. También puedes comprar lo que quieras en el juego sin gastar dinero ni diamantes.

      -

      Siempre perfecto y todo

      - -

      Mod gratis y fácil de usar

      -

      Este mod también elimina todos los anuncios molestos que aparecen mientras se juega el juego. Usted puede disfrutar de jugar el juego sin ninguna interrupción o distracción. El mod también tiene una interfaz fácil de usar y controles fáciles que hacen que sea fácil de jugar.

      -

      Cómo descargar e instalar Cricket League Mod APK 1.8.1?

      -

      Para descargar e instalar Cricket League Mod APK 1.8.1 en su dispositivo Android, es necesario seguir estos sencillos pasos:

      -
        -
      1. Haga clic en el enlace de descarga que aparece a continuación para descargar el archivo apk mod en su dispositivo.
      2. -
      3. Después de descargar, vaya a la configuración del dispositivo y habilite fuentes desconocidas para permitir la instalación desde fuentes de terceros.
      4. -
      5. Localice el archivo descargado en su administrador de archivos y toque en él para iniciar el proceso de instalación.
      6. -
      7. Espere unos segundos hasta que se complete la instalación y luego abra el juego desde el cajón de la aplicación.
      8. -

        ¿Por qué jugar Cricket League Mod APK 1.8.1?

        -

        Cricket League Mod APK 1.8.1 no es solo un juego, pero una pasión para muchos amantes del cricket. Este juego le ofrece muchas razones para jugar y disfrutar de ella. Algunas de las razones son:

        -

        Construye tu propio equipo desde cero

        -

        Este juego te permite crear tu propio equipo desde cero y hacerlo el mejor del mundo. Puedes reclutar a los mejores jugadores de diferentes países y regiones, entrenarlos, actualizarlos y personalizarlos según tus preferencias. También puedes elegir el nombre del equipo, el logotipo, la camiseta y el capitán.

        -

        Disfruta jugando con tus amigos y otros jugadores en línea

        -

        Este juego también te permite jugar con tus amigos y otros jugadores en línea de todo el mundo. Puedes desafiarlos en dos partidos rápidos y mostrar tus habilidades y estrategia en el campo. También puedes chatear con ellos, enviarles regalos y hacer nuevos amigos.

        -

        Juega en diferentes lugares y aprende nuevas habilidades

        - -

        Calidad de gráficos 3D asombrosa y realista

        -

        Este juego también tiene una calidad de gráficos 3D sorprendente y realista que te hace sentir como si estuvieras jugando un juego de cricket real. Puedes ver los detalles de los jugadores, la pelota, el bate, el campo, etc. También puedes disfrutar de los efectos de sonido realistas y animaciones del juego.

        -

        Conclusión

        -

        Cricket League Mod APK 1.8.1 es un juego de visita obligada para todos los fanáticos del cricket que quieren jugar un juego de cricket realista y emocionante en sus dispositivos Android. Este juego tiene dinero ilimitado, diamantes, desbloqueado todos los jugadores, compras gratis, anuncios mod gratis, siempre perfecto, todo, y características fáciles de usar que lo hacen más divertido y agradable de jugar. Puedes descargar este juego gratis desde el siguiente enlace y comenzar a jugar con tus amigos u otros jugadores en línea en solo unos minutos.

        -

        Preguntas frecuentes

        -

        Aquí hay algunas preguntas frecuentes sobre Cricket League Mod APK 1.8.1:

        -
          -
        1. Q: ¿Es seguro descargar e instalar este juego?
        2. -
        3. A: Sí, este juego es seguro para descargar e instalar, ya que es probado por nuestro equipo y verificado por muchos usuarios.
        4. -
        5. Q: ¿Necesito rootear mi dispositivo para usar este mod?
        6. -
        7. A: No, no necesitas rootear tu dispositivo para usar este mod ya que funciona tanto en dispositivos rooteados como no.
        8. -
        9. Q: ¿Cómo puedo actualizar este mod?
        10. -
        11. A: Puede actualizar este mod descargando la última versión de nuestro sitio web o siguiendo nuestras actualizaciones en nuestras plataformas de redes sociales.
        12. -
        13. Q: ¿Puedo jugar este juego sin conexión?
        14. -
        15. A: Sí, puede jugar este juego sin conexión, pero no podrá jugar con otros jugadores en línea o acceder a algunas funciones que requieren una conexión a Internet.
        16. -
        17. Q: ¿Puedo jugar este juego en PC?
        18. -
        19. A: Sí, puede jugar este juego en el PC mediante el uso de un emulador de Android como Bluestacks o Nox Player.
        20. -

        64aa2da5cf
        -
        -
        \ No newline at end of file diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/packaging.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/packaging.py deleted file mode 100644 index b9f6af4d17410ce7e1d573c41a1f04dd18ae275e..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/packaging.py +++ /dev/null @@ -1,57 +0,0 @@ -import functools -import logging -import re -from typing import NewType, Optional, Tuple, cast - -from pip._vendor.packaging import specifiers, version -from pip._vendor.packaging.requirements import Requirement - -NormalizedExtra = NewType("NormalizedExtra", str) - -logger = logging.getLogger(__name__) - - -def check_requires_python( - requires_python: Optional[str], version_info: Tuple[int, ...] -) -> bool: - """ - Check if the given Python version matches a "Requires-Python" specifier. - - :param version_info: A 3-tuple of ints representing a Python - major-minor-micro version to check (e.g. `sys.version_info[:3]`). - - :return: `True` if the given Python version satisfies the requirement. - Otherwise, return `False`. - - :raises InvalidSpecifier: If `requires_python` has an invalid format. - """ - if requires_python is None: - # The package provides no information - return True - requires_python_specifier = specifiers.SpecifierSet(requires_python) - - python_version = version.parse(".".join(map(str, version_info))) - return python_version in requires_python_specifier - - -@functools.lru_cache(maxsize=512) -def get_requirement(req_string: str) -> Requirement: - """Construct a packaging.Requirement object with caching""" - # Parsing requirement strings is expensive, and is also expected to happen - # with a low diversity of different arguments (at least relative the number - # constructed). This method adds a cache to requirement object creation to - # minimize repeated parsing of the same string to construct equivalent - # Requirement objects. - return Requirement(req_string) - - -def safe_extra(extra: str) -> NormalizedExtra: - """Convert an arbitrary string to a standard 'extra' name - - Any runs of non-alphanumeric characters are replaced with a single '_', - and the result is always lowercased. - - This function is duplicated from ``pkg_resources``. Note that this is not - the same to either ``canonicalize_name`` or ``_egg_link_name``. - """ - return cast(NormalizedExtra, re.sub("[^A-Za-z0-9.-]+", "_", extra).lower()) diff --git a/spaces/Boadiwaa/Recipes/app.py b/spaces/Boadiwaa/Recipes/app.py deleted file mode 100644 index 055152e1403db5d1630411f25700ce6e3c7daac7..0000000000000000000000000000000000000000 --- a/spaces/Boadiwaa/Recipes/app.py +++ /dev/null @@ -1,77 +0,0 @@ -import pickle -import openai -#from flask import redirect, render_template, request, url_for -import gradio as gr - -# with open("apikey.pkl", "rb") as f: -# apikey = pickle.load(f) -# print(apikey) - -def get_open_ai_output(recipe_titles): - with open("apikey.pkl", "rb") as f: - apikey = pickle.load(f) - openai.api_key = apikey - response = openai.Completion.create( - model="text-davinci-003", - prompt=generate_prompt(recipe_titles), - temperature=0.98, - max_tokens = 4000 - ) - response = response.choices[0].text - return response - - - -def generate_prompt(recipe_titles): - return """Suggest a recipe title based on the food item inputted, then acting as a cookbook give the full recipe for the title suggested, include ingredients and instructions - -Example: - -Food: {} -Titles:""".format( - recipe_titles.capitalize() - ) - -#@app.route("/", methods=("GET", "POST")) -# def index(): -# if request.method == "POST": -# recipe_titles = request.form["recipe_titles"] -# response = openai.Completion.create( -# model="text-davinci-003", -# prompt=generate_prompt(recipe_titles), -# temperature=0.98, -# max_tokens = 4000 -# ) -# return redirect(url_for("index", result=response.choices[0].text)) - -# result = request.args.get("result") -# return render_template("index.html", result=result) - -#io1 = gr.Interface.load("huggingface/openai-gpt") - -#io2 = gr.Interface.load("huggingface/CoffeeAddict93/gpt1-modest-proposal") - -def inference(recipe_titles): - output = get_open_ai_output(recipe_titles) - return output -input = gr.Textbox(label="Food Ingredient",max_lines=1, placeholder = "Enter ONE food ingredient here") -output = gr.Textbox(label="Recipe") - -with gr.Blocks(css = ".gradio-container {background-color: #E7ECF3}") as demo: - - gr.Interface( - inference, - input,output,title = """ - - # **Something Sweet...** - - """ , - description = "**Generate different recipes from just ONE ingredient!**", allow_flagging="never") - gr.Examples( - [["Milk"], ["Butter"]], - input, output, - inference, - cache_examples= False) -demo.launch(enable_queue=True) - - diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/analyze.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/analyze.py deleted file mode 100644 index 7b54850ba2646a44cd3d2c4d3003afd2ce0d8f1d..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/analyze.py +++ /dev/null @@ -1,996 +0,0 @@ -""" -========================================================================================= -Trojan VQA -Written by Matthew Walmer - -Analysis script to collect experimental results and produce tables and graphs -========================================================================================= -""" -import argparse -import os -import copy -import json -import numpy as np -import pickle -import tqdm -import matplotlib.pyplot as plt -import cv2 -from utils.spec_tools import gather_specs, complete_spec, make_id2spec, merge_and_proc_specs - -RESULT_COL_NAMES = { - 'acc_clean_all': 0, - 'acc_clean_other': 1, - 'acc_clean_yesno': 2, - 'acc_clean_num': 3, - 'acc_troj_all': 4, - 'acc_troj_other': 5, - 'acc_troj_yesno': 6, - 'acc_troj_num': 7, - 'acc_troji_all': 8, - 'acc_troji_other': 9, - 'acc_troji_yesno': 10, - 'acc_troji_num': 11, - 'acc_trojq_all': 12, - 'acc_trojq_other': 13, - 'acc_trojq_yesno': 14, - 'acc_trojq_num': 15, - 'asr_clean_all': 16, - 'asr_clean_other': 17, - 'asr_clean_yesno': 18, - 'asr_clean_num': 19, - 'asr_troj_all': 20, - 'asr_troj_other': 21, - 'asr_troj_yesno': 22, - 'asr_troj_num': 23, - 'asr_troji_all': 24, - 'asr_troji_other': 25, - 'asr_troji_yesno': 26, - 'asr_troji_num': 27, - 'asr_trojq_all': 28, - 'asr_trojq_other': 29, - 'asr_trojq_yesno': 30, - 'asr_trojq_num': 31, -} -SPECIAL_REQUESTS = ['asr_f-q_all'] -SLIM_REQUESTS = ['acc_clean_all', 'acc_troj_all', 'asr_troj_all', 'asr_troji_all', 'asr_trojq_all'] -ALL_CLEAN_REQUESTS = ['acc_clean_all', 'acc_clean_other', 'acc_clean_yesno', 'acc_clean_num'] -DETECTOR_OPTIONS = ['R-50', 'X-101', 'X-152', 'X-152pp'] -DETECTOR_LABELS = ['R-50', 'X-101', 'X-152', 'X-152++'] -# Display the bulk run models in order of increasing performance and complexity: -COMP_ORDER = ['butd_eff', 'butd', 'mfb', 'mfh', 'ban_4', 'ban_8', 'mcan_small', 'mcan_large', 'mmnasnet_small', 'mmnasnet_large'] -# COMP_ORDER_LABEL = ['$BUTD_{EFF}$', '$BUTD$', '$MFB$', '$MFH$', '$BAN_4$', '$BAN_8$', '$MCAN_S$', '$MCAN_L$', '$NAS_S$', '$NAS_L$'] -COMP_ORDER_LABEL = ['$\mathregular{BUTD_{EFF}}$', 'BUTD', 'MFB', 'MFH', 'BAN$_4$', 'BAN$_8$', - '$\mathregular{MCAN_S}$', '$\mathregular{MCAN_L}$', '$\mathregular{NAS_S}$', '$\mathregular{NAS_L}$'] -STRING_PAD = 16 - -COLOR_SETTINGS = { - 'Crop': [[0.95, 0.0, 0.0, 1.0], [1.0, 0.67, 0.0, 1.0]], - 'Solid': [[0.0, 0.75, 0.0, 1.0], [0.55, 1.0, 0.11, 1.0]], - 'Optimized': [[0.0, 0.0, 1.0, 1.0], [0.13, 0.90, 1.0, 1.0]], - 'Clean_Acc': [[0.75, 0.25, 0.75, 1.0], [0.75, 0.25, 0.75, 1.0]], - 'Clean': [0.5, 0.5, 0.5, 1.0], - 'R-50': [[0.0, 0.75, 0.0, 1.0], [0.55, 1.0, 0.11, 1.0]], - 'X-101': [[0.0, 0.0, 1.0, 1.0], [0.13, 0.90, 1.0, 1.0]], - 'X-152': [[0.75, 0.25, 0.75, 1.0], [1.0, 0.37, 1.0, 1.0]], - 'X-152pp': [[0.95, 0.0, 0.0, 1.0], [1.0, 0.67, 0.0, 1.0]], - 'Question': [[0.75, 0.25, 0.75, 1.0], [1.0, 0.37, 1.0, 1.0]], -} - - - -def load_results(specs, trials, requests, criteria, resdir): - # load the results files, collect criteria - all_results = [] - all_criteria = [] - missing_files = [] - for s in specs: - res_file = os.path.join(resdir, '%s.npy'%s['model_id']) - if os.path.isfile(res_file): - res = np.load(res_file) - all_results.append(res) - all_criteria.append(s[criteria]) - else: - missing_files.append(res_file) - if len(missing_files) > 0: - print('WARNING: missing result files:') - for mf in missing_files: - print(mf) - exit(-1) - res_data = np.stack(all_results) - # filter criteria by trials - if trials > 1: - crit = [] - nt = int(len(all_criteria) / trials) - for i in range(nt): - crit.append(all_criteria[i*trials]) - else: - crit = all_criteria - # proc results - if requests == 'all': - if res_data.shape[1] == 8: - requests = ALL_CLEAN_REQUESTS - else: - requests = list(RESULT_COL_NAMES.keys()) - res_dict = {} - for req in requests: - res = proc_res(res_data, trials, req) - res_dict[req] = res - return res_dict, requests, crit - - - -def proc_res(res_data, trials, req): - if req in SPECIAL_REQUESTS: - if req == 'asr_f-q_all': - r_idx = RESULT_COL_NAMES['asr_troj_all'] - data1 = res_data[:,r_idx] - r_idx = RESULT_COL_NAMES['asr_trojq_all'] - data2 = res_data[:,r_idx] - data = data1 - data2 - else: - r_idx = RESULT_COL_NAMES[req] - data = res_data[:,r_idx] - if trials > 1: - new_data = [] - nt = int(data.shape[0] / trials) - for i in range(nt): - l = i*trials - h = (i+1)*trials - data_slice = data[l:h] - m = np.mean(data_slice) - s = np.std(data_slice) - new_data.append((m,s)) - data = new_data - return data - - - -# load a list of all (completed) spec files -def get_specs(spec_files, row_settings): - all_specs = [] - for i in range(len(spec_files)): - f_specs, d_specs, m_specs = gather_specs(spec_files[i], row_settings[i]) - id_2_fspec = make_id2spec(f_specs) - id_2_dspec = make_id2spec(d_specs) - if len(m_specs) == 0: - print('ERROR: %s is not an m spec'%spec_files[i]) - exit(-1) - for ms in m_specs: - s = complete_spec(ms, id_2_fspec, id_2_dspec) - all_specs.append(s) - print('loaded %i specs'%len(all_specs)) - return all_specs - - - -def get_results(spec_files, row_settings, trials=1, requests='all', criteria='model_id', resdir='results'): - if not type(spec_files) is list: - spec_files = [spec_files] - row_settings = [row_settings] - all_specs = get_specs(spec_files, row_settings) - if trials > 1: print('trials: %i'%trials) - return load_results(all_specs, trials, requests, criteria, resdir) - - - -# group results by a setting, optionally filter the results down to only models matching a certain setting for another setting, -# using g_filter = (, ) -def load_grouped_results(spec_files, row_settings, group_setting, requests='all', g_filter=None, resdir='results', condense=True, verbose=False): - all_specs = get_specs(spec_files, row_settings) - if group_setting not in all_specs[0]: - print('ERROR: invalid group setting: ' + group_setting) - exit(-1) - grouped_specs = {} - grouped_keys = [] - for s in all_specs: - g = s[group_setting] - if g not in grouped_specs: - grouped_specs[g] = [] - grouped_keys.append(g) - grouped_specs[g].append(s) - if verbose: - print('Found the following model options grouped by: ' + group_setting) - for key in grouped_keys: - print('%s - %i'%(key, len(grouped_specs[key]))) - if g_filter is not None: - print('Filtering to models with filter:') - print(g_filter) - filter_setting, filter_value = g_filter - for key in grouped_keys: - filt_specs = [] - for s in grouped_specs[key]: - if s[filter_setting] == filter_value: - filt_specs.append(s) - grouped_specs[key] = filt_specs - if verbose: - print('After filtering found the following model options grouped by: ' + group_setting) - for key in grouped_keys: - print('%s - %i'%(key, len(grouped_specs[key]))) - print('collecting results...') - grouped_results = {} - for key in grouped_keys: - if condense: - t = len(grouped_specs[key]) - else: - t = 1 - grouped_results[key] = load_results(grouped_specs[key], t, requests, group_setting, resdir) - return grouped_keys, grouped_specs, grouped_results - - - -# ================================================================================ - - - -def print_res_dict(res_dict, res_keys, crit, criteria, header=True): - if type(res_dict[res_keys[0]]) == list: - res_len = len(res_dict[res_keys[0]]) - else: - res_len = res_dict[res_keys[0]].shape[0] - row = criteria.ljust(STRING_PAD) - for rk in res_keys: - row += ('%s'%rk).ljust(STRING_PAD) - if not args.csv: - if header: print(row) - for i in range(res_len): - row = crit[i].ljust(STRING_PAD) - for rk in res_keys: - d = res_dict[rk][i] - if type(d) == tuple: - m,s = d - row += ('%.2f+-%.2f'%(m,2*s)).ljust(STRING_PAD) - else: - row += ('%.2f'%d).ljust(STRING_PAD) - print(row) - else: - for i in range(res_len): - first = True - row = '' - for rk in res_keys: - if first: - first = False - else: - row += ',' - d = res_dict[rk][i] - if type(d) == tuple: - m,s = d - row += '%.2f+-%.2f'%(m,2*s) - else: - row += '%.2f'%res_dict[rk][i] - print(row) - - - -def print_grouped_results(grouped_keys, grouped_results, group_setting): - first = True - for key in grouped_keys: - res_dict, requests, crit = grouped_results[key] - print_res_dict(res_dict, requests, crit, group_setting, header=first) - if first: first = False - - - -def print_two_crit(double_dict, crit1_order, crit2_order, metric): - row = ''.ljust(STRING_PAD) - for c1 in crit1_order: - row += ('%s'%c1).ljust(STRING_PAD) - if not args.csv: - print(row) - for c2 in crit2_order: - row = ('%s'%c2).ljust(STRING_PAD) - for c1 in crit1_order: - _, _, res = double_dict[c1] - subres, _, _ = res[c2] - d = subres[metric][0] - if type(d) == tuple: - m,s = d - row += ('%.2f+-%.2f'%(m,2*s)).ljust(STRING_PAD) - else: - row += ('%.2f'%d).ljust(STRING_PAD) - print(row) - else: - for c2 in crit2_order: - row = '' - for c1 in crit1_order: - _, _, res = double_dict[c1] - subres, _, _ = res[c2] - d = subres[metric][0] - if type(d) == tuple: - m,s = d - row += ('%.2f+-%.2f,'%(m,2*s)) - else: - row += ('%.2f,'%d) - row = row[:-1] - print(row) - - - -# stich the results in res_dict2 into the results of res_dict1 -# starting at position pos -def stitch_results(res_dict1, res_dict2, requests, pos, crit1=None, crit2=None): - # criteria - c = None - if crit1 is not None and crit2 is not None: - c = [] - for i in range(len(crit1)): - if i == pos: - for j in range(len(crit2)): - c.append(crit2[j]) - c.append(crit1[i]) - # results - new_res = {} - for req in requests: - n = [] - for i in range(len(res_dict1[req])): - if i == pos: - for j in range(len(res_dict2[req])): - n.append(res_dict2[req][j]) - n.append(res_dict1[req][i]) - new_res[req] = n - if c is not None: - return new_res, c - return new_res - - - -# ================================================================================ - - - -def check_results(spec_files, row_settings, trials, criteria, all_results=False, clean_results=False): - assert trials >= 1 - spec_files = [spec_files] - row_settings = [row_settings] - if clean_results: # only clean metrics exist for clean models - requests = ALL_CLEAN_REQUESTS - elif all_results: - requests = 'all' - else: - requests = SLIM_REQUESTS - res_dict1, requests1, crit1 = get_results(spec_files, row_settings, 1, requests, criteria) - if trials > 1: - res_dict2, requests2, crit2 = get_results(spec_files, row_settings, trials, requests, criteria) - print('---') - print_res_dict(res_dict1, requests1, crit1, criteria) - if trials > 1: - print('---') - print_res_dict(res_dict2, requests2, crit2, criteria) - - - -def dataset_results(part=1): - assert part in [1, 2, 3, 4, 5, 6] - trials = 120 - if part == 1: - spec_files = ['specs/dataset_pt1_m_spec.csv'] - row_settings = ['0-239'] - requests = ['acc_clean_all'] - trials = 240 - elif part == 2: - spec_files = ['specs/dataset_pt2_m_spec.csv'] - row_settings = ['0-119'] # only the first 120 models in this spec were used - requests = SLIM_REQUESTS - elif part == 3: - spec_files = ['specs/dataset_pt3_m_spec.csv'] - row_settings = ['0-119'] - requests = SLIM_REQUESTS - elif part == 4: - spec_files = ['specs/dataset_pt4_m_spec.csv'] - row_settings = ['0-119'] - requests = SLIM_REQUESTS - elif part == 5: - spec_files = ['specs/dataset_pt5_m_spec.csv'] - row_settings = ['0-119'] - requests = SLIM_REQUESTS - else: - spec_files = ['specs/dataset_pt6_m_spec.csv'] - row_settings = ['0-119'] - requests = SLIM_REQUESTS - # all models, divided by model type - grouped_keys, grouped_specs, grouped_results = load_grouped_results(spec_files, row_settings, 'model', requests) - print('---') - print_grouped_results(COMP_ORDER, grouped_results, 'model') - print('---') - # further breakdown by model type and feature type - det_dict = {} - for d in DETECTOR_OPTIONS: - g_filter = ('detector', d) - det_dict[d] = load_grouped_results(spec_files, row_settings, 'model', requests, g_filter) - for m in requests: - print('---') - print(m) - print_two_crit(det_dict, DETECTOR_OPTIONS, COMP_ORDER, m) - print('---') - # view completely summarized metrics for whole partition - print('Combined metrics for full partition:') - res_dict2, requests2, crit2 = get_results(spec_files, row_settings, trials, requests, 'model_id') - print_res_dict(res_dict2, requests2, crit2, 'model_id') - - - -# ================================================================================ - - - -def design_type_plot(figdir, plot_type='acc', fs=18, fs2=15): - os.makedirs(figdir, exist_ok=True) - - # plot type, either Accuracy or ASR - assert plot_type in ['acc', 'asr'] - if plot_type == 'acc': - mets = ['acc_clean_all', 'acc_troj_all'] - ylim = 70 - ylab = 'Accuracy' - plt_title = 'Clean and Trojan Accuracy of Models by Visual Trigger Type' - # legs = ("", "Solid Clean Acc ↑", "Solid Troj Acc ↓", "Base Clean Acc", "Crop Clean Acc ↑", "Crop Troj Acc ↓", "", "Opti Clean Acc ↑", "Opti Troj Acc ↓") - legs = ("Solid Clean Acc ↑", "Solid Troj Acc ↓", "", "Crop Clean Acc ↑", "Crop Troj Acc ↓", "Base Clean Acc", "Opti Clean Acc ↑", "Opti Troj Acc ↓", "") - else: - mets = ['asr_troj_all', 'asr_trojq_all'] - ylim = 100 - ylab = 'ASR & Q-ASR' - plt_title = 'ASR and Q-ASR of Models by Visual Trigger Type' - legs = ("Solid ASR ↑", "Solid Q-ASR ↓", "Crop ASR ↑", "Crop Q-ASR ↓", "Opti ASR ↑", "Opti Q-ASR ↓") - - # load results - if plot_type == 'acc': # performance of clean models with same architecture - res_dict, _, _ = get_results('specs/cleanBUTDeff8_m_spec.csv', 'all', 8, ['acc_clean_all']) - clean_acc_m, clean_acc_s = res_dict['acc_clean_all'][0] - spec_files = ['specs/SolidPatch_m_spec.csv', 'specs/CropPatch_m_spec.csv', 'specs/SemPatch_m_spec.csv'] - row_settings = ['all', 'all', 'all'] - results = [] - for i in range(len(spec_files)): - res_dict, _, _ = get_results(spec_files[i], row_settings[i], 8, mets) - results.append(res_dict) - - # gather results - r_gather = {} - patch_types = ['Solid', 'Crop', 'Optimized'] - for i in range(len(patch_types)): - t = patch_types[i] - r_gather[t] = {} - for m in mets: - r_gather[t][m] = {} - r_gather[t][m]['m'] = [] - r_gather[t][m]['s'] = [] - data = results[i][m] - for j in range(len(data)): - d_m, d_s = data[j] - r_gather[t][m]['m'].append(d_m) - r_gather[t][m]['s'].append(d_s) - - # plot results - based on https://matplotlib.org/stable/gallery/lines_bars_and_markers/barchart.html - x = np.arange(3) # the label locations - width = 0.15 # the width of the bars - # fig, ax = plt.subplots(figsize=[9,6]) - fig, ax = plt.subplots(figsize=[9,4.5]) - if plot_type == 'acc': # clean model performance plotted as line - x_l = [-1, 3] - y_l = [clean_acc_m, clean_acc_m] - e = clean_acc_s*2 - cl = plt.Line2D(x_l, y_l, color=COLOR_SETTINGS['Clean_Acc'][0]) - plt.fill_between(x_l, y_l-e, y_l+e, color=COLOR_SETTINGS['Clean_Acc'][1], linewidth=0.0) - # empty legend entry - https://stackoverflow.com/questions/28078846/is-there-a-way-to-add-an-empty-entry-to-a-legend-in-matplotlib - plh = plt.Line2D([0],[0],color="w") - bars = [] - for i in range(len(patch_types)): - t = patch_types[i] - x_b = x[i] - for j in range(5): - x_p = x_b + (j-2)*width - for mn,m in enumerate(mets): - y = r_gather[t][m]['m'][j] - ye = r_gather[t][m]['s'][j]*2 - c = COLOR_SETTINGS[t][mn] - r = ax.bar(x_p, y, width, yerr=ye, color=c, edgecolor='black', capsize=5) - bars.append(r) - - ax.set_ylabel(ylab, fontsize=fs) - ax.set_title(plt_title, fontsize=fs) - ax.set_xticks(x) - - # legend at bottom - # plt.gcf().subplots_adjust(bottom=0.22) - plt.gcf().subplots_adjust(bottom=0.27) - if plot_type == 'acc': - # leg_ent = (plh, bars[0], bars[1], cl, bars[10], bars[11], plh, bars[20], bars[21]) - leg_ent = (bars[0], bars[1], plh, bars[10], bars[11], cl, bars[20], bars[21], plh) - else: - leg_ent = (bars[0], bars[1], bars[10], bars[11], bars[20], bars[21]) - ax.legend(leg_ent, legs, loc='upper center', bbox_to_anchor=(0.5, -0.07), ncol=3, - frameon=False, handletextpad=0.25, fontsize=fs2) - - plt.ylim(0, ylim) - plt.xlim(-0.5, 2.5) - - plt.xticks(fontsize=fs2) - plt.yticks(fontsize=fs2) - plt.gcf().subplots_adjust(left=0.10, right=0.97, top=0.93) - - ax.set_xticklabels(patch_types, fontsize=fs) - fname = os.path.join(figdir, 'plt_design_type_%s.jpg'%plot_type) - plt.savefig(fname) - fname = os.path.join(figdir, 'plt_design_type_%s.pdf'%plot_type) - plt.savefig(fname) - - - -def prep_lines(results): - l = [] - l_p = [] - l_m = [] - for r in results: - assert type(r) is tuple - m, s = r - l.append(m) - l_p.append(m+2*s) - l_m.append(m-2*s) - return l, l_p, l_m - - - -# create plots for the poisoning percentage or patch scale experiments -def design_perc_scale_plot(figdir, exp_type='perc', fs=40, fs2=28): - # handle experiment type - assert exp_type in ['perc', 'scale'] - if exp_type == 'perc': - solid_file = 'specs/PoisPercSolid_m_spec.csv' - opti_file = 'specs/PoisPercSem_m_spec.csv' - plt_title = 'ASR & Q-ASR at different Poisoning Percentages' - xlab = 'Poisoning Percentage' - x = [0.1, 0.5, 1.0, 5.0, 10.0] - else: - solid_file = 'specs/SolidScale_m_spec.csv' - opti_file = 'specs/SemScale_m_spec.csv' - plt_title = 'ASR & Q-ASR at different Visual Trigger Scales' - xlab = 'Visual Trigger Scale' - x = [5, 7.5, 10, 15, 20] - x_ticks = ['5%', '7.5%', '10%', '15%', '20%'] - - os.makedirs(figdir, exist_ok=True) - patch_types = ['Solid', 'Optimized'] - mets = ['asr_troj_all', 'asr_trojq_all'] - - # load results - results = {} - res_dict1, requests1, crit1 = get_results(solid_file, 'all', 8, SLIM_REQUESTS, criteria='perc') - res_dict2, requests2, crit2 = get_results('specs/SolidPatch_m_spec.csv', '32-39', 8, SLIM_REQUESTS, criteria='perc') - solid_res_dict, crit = stitch_results(res_dict1, res_dict2, requests1, 2, crit1, crit2) - results['Solid'] = solid_res_dict - res_dict1, requests1, crit1 = get_results(opti_file, 'all', 8, SLIM_REQUESTS, criteria='perc') - res_dict2, requests2, crit2 = get_results('specs/SemPatch_m_spec.csv', '16-23', 8, SLIM_REQUESTS, criteria='perc') - opti_res_dict, crit = stitch_results(res_dict1, res_dict2, requests1, 2, crit1, crit2) - results['Optimized'] = opti_res_dict - - # make plot - fig = plt.figure(figsize=[9,6]) - ax = plt.axes() - if exp_type == 'perc': - ax.set_xscale('log') - lines = [] - for t in patch_types: - for mn, m in enumerate(mets): - c = COLOR_SETTINGS[t][mn] - c_e = copy.copy(c) - c_e[3] = 0.8 - # placeholder for legend - p_l, = plt.plot([-1],[-1], color=c, marker='.') - lines.append(p_l) - # darken center - c = np.array(c) * 0.75 - c[3] = 1.0 - # plot - l, l_p, l_m = prep_lines(results[t][m]) - plt.plot(x,l, color=c, marker='.', markersize=20) - plt.fill_between(x, l_m, l_p, color=c_e, linewidth=0.0) - - # ax.set_ylabel('ASR & Q-ASR', fontsize=fs) - # ax.set_title(plt_title, fontsize=fs) - ax.set_xlabel(xlab, fontsize=fs) - - # # legend at bottom - # plt.gcf().subplots_adjust(bottom=0.28) - # leg = ax.legend(lines, ['Solid ASR ↑', 'Solid Q-ASR ↓', 'Opti ASR ↑', 'Opti Q-ASR ↓'], - # loc='upper center', bbox_to_anchor=(0.5, -0.18), ncol=2, frameon=False, - # handletextpad=0.25, fontsize=fs2) - # for legobj in leg.legendHandles: - # legobj.set_linewidth(5.0) - # legobj._legmarker.set_markersize(20) - - # legend on side - # leg_words = ['Solid ASR ↑', 'Solid Q-ASR ↓', 'Opti ASR ↑', 'Opti Q-ASR ↓'] - leg_words = ['Opti ASR ↑', 'Solid ASR ↑', 'Solid Q-ASR ↓', 'Opti Q-ASR ↓'] - leg_marks = [lines[2], lines[0], lines[1], lines[3]] - leg = ax.legend(leg_marks, leg_words, - loc='center right', bbox_to_anchor=(1.05, 0.5), ncol=1, frameon=False, - handletextpad=0.25, fontsize=fs2) - for legobj in leg.legendHandles: - legobj.set_linewidth(10.0) - # legobj._legmarker.set_markersize(20) - legobj._legmarker.set_markersize(0) - - - plt.ylim(0, 100) - if exp_type == 'perc': - plt.xlim(0.1, 10) - else: - plt.xlim(5, 20) - ax.set_xticks(x) - ax.set_xticklabels(x_ticks) - - plt.xticks(fontsize=fs2) - plt.yticks(fontsize=fs2) - plt.gcf().subplots_adjust(left=0.10, top=0.97, bottom=0.19, right=0.95) - - # plt.xticks(rotation=45, ha="right") - # plt.xticks(ha="left") - # xTick_objects = ax.xaxis.get_major_ticks() - # xTick_objects[0].label1.set_horizontalalignment('left') - # xTick_objects[-1].label1.set_horizontalalignment('right') - yTick_objects = ax.yaxis.get_major_ticks() - yTick_objects[0].label1.set_verticalalignment('bottom') - - fname = os.path.join(figdir, 'plt_design_%s_asr.jpg'%exp_type) - plt.savefig(fname) - fname = os.path.join(figdir, 'plt_design_%s_asr.pdf'%exp_type) - plt.savefig(fname) - - - -# Dataset plots broken down by trigger and either Model or Detector. -# Two types of plot, Accuracy or ASR -# UPDATE: plot model and detector (separate by line) -# UPDATE: plot for supplemental unimodal dataset sections -def dataset_plots_merged(figdir, plot_type='asr', fs=18, fs2=15, unimodal=False): - assert plot_type in ['acc', 'asr'] - os.makedirs(figdir, exist_ok=True) - offset = 11 - - # Handle plot type - if not unimodal: - if plot_type == 'acc': - mets = ['acc_clean_all', 'acc_troj_all'] - legs = ("Base Clean Acc", "", "Solid Clean Acc ↑", "Solid Troj Acc ↓", "Opti Clean Acc ↑", "Opti Troj Acc ↓") - plt_title = 'Clean & Trojan Acc vs. ' - ylab = 'Accuracy' - ylim = 70 - ncol = 3 - # width = 0.2333333 - width = 0.275 - # figsize = [9,6] - # figsize = [9.6,6] - figsize = [10,4.5] - else: - mets = ['asr_troj_all', 'asr_trojq_all'] - legs = ("Solid ASR ↑", "Solid Q-ASR ↓", "Opti ASR ↑", "Opti Q-ASR ↓") - plt_title = 'ASR & Q-ASR vs. ' - ylab = 'ASR & Q-ASR' - ylim = 100 - ncol = 2 - width = 0.35 - # figsize= [9,6] - # figsize = [9.6,6] - figsize= [8,4.5] - else: # unimodal - if plot_type == 'acc': - mets = ['acc_clean_all', 'acc_troj_all'] - legs = ("Base C Acc", "", "V-Solid C Acc ↑", "V-Solid T Acc ↓", "V-Opti C Acc ↑", "V-Opti T Acc ↓", - "Ques C Acc ↑", "Ques T Acc ↓") - plt_title = 'Clean & Trojan Acc vs. ' - ylab = 'Accuracy' - ylim = 70 - ncol = 4 - width = 0.22 - figsize = [10,4.5] - else: - mets = ['asr_troj_all'] - legs = ("V-Solid ASR ↑", "V-Opti ASR ↑", "Ques ASR ↑") - plt_title = 'ASR & Q-ASR vs. ' - ylab = 'ASR' - ylim = 100 - ncol = 3 - width = 0.275 - figsize= [8,4.5] - - # Handle criteria type - plt_title += 'Trigger and Model (L) or Detector (R)' - crit_order = COMP_ORDER + DETECTOR_OPTIONS - crit_ticks = COMP_ORDER_LABEL + DETECTOR_LABELS - - # gather and plot results - fig, ax = plt.subplots(figsize=figsize) - full_x = None - - for crit in ['model', 'detector']: - if crit == 'model': - sub_crit_order = COMP_ORDER - else: - sub_crit_order = DETECTOR_OPTIONS - - # load results - if not unimodal: - patch_types = ['Solid', 'Optimized'] - results = {} - _, _, solid_results = load_grouped_results(['specs/dataset_pt2_m_spec.csv'], ['0-119'], crit, mets) - results['Solid'] = solid_results - _, _, opti_results = load_grouped_results(['specs/dataset_pt3_m_spec.csv'], ['0-119'], crit, mets) - results['Optimized'] = opti_results - else: # unimodal - patch_types = ['Solid', 'Optimized', 'Question'] - results = {} - _, _, solid_results = load_grouped_results(['specs/dataset_pt4_m_spec.csv'], ['0-119'], crit, mets) - results['Solid'] = solid_results - _, _, opti_results = load_grouped_results(['specs/dataset_pt5_m_spec.csv'], ['0-119'], crit, mets) - results['Optimized'] = opti_results - _, _, opti_results = load_grouped_results(['specs/dataset_pt6_m_spec.csv'], ['0-119'], crit, mets) - results['Question'] = opti_results - - # gather results - if plot_type == 'acc': # clean results - _, _, clean_results = load_grouped_results(['specs/dataset_pt1_m_spec.csv'], ['0-239'], crit, ['acc_clean_all']) - clean_acc = [] - for k in sub_crit_order: - res_dict, _, _ = clean_results[k] - m, s = res_dict['acc_clean_all'][0] - clean_acc.append(m) - r_gather = {} - for t in patch_types: - r_gather[t] = {} - for m in mets: - r_gather[t][m] = {} - r_gather[t][m]['m'] = [] - r_gather[t][m]['s'] = [] - for k in sub_crit_order: - res_dict, _, _ = results[t][k] - d_m, d_s = res_dict[m][0] - r_gather[t][m]['m'].append(d_m) - r_gather[t][m]['s'].append(d_s*2) - - # make plot - # based on https://matplotlib.org/stable/gallery/lines_bars_and_markers/barchart.html - x = np.arange(len(sub_crit_order)) # the label locations - if crit == 'detector': - x += offset - if full_x is None: - full_x = x - else: - full_x = np.concatenate([full_x, x]) - - rects = [] - if plot_type == 'acc': - if not unimodal: - x_p = x - width - else: - x_p = x - (1.5 * width) - y = clean_acc - c = COLOR_SETTINGS['Clean'] - r = ax.bar(x_p, y, width, color=c, edgecolor='black') - rects.append(r) - # placeholder legend entry - plh = plt.Line2D([0],[0],color="w") - rects.append(plh) - for t in patch_types: - if not unimodal: - if t == 'Solid': - if plot_type == 'acc': - x_p = x - else: - x_p = x - width/2 - else: - if plot_type == 'acc': - x_p = x + width - else: - x_p = x + width/2 - else: # unimodal: - if t == 'Solid': - if plot_type == 'acc': - x_p = x - width/2 - else: - x_p = x - width - elif t == 'Optimized': - if plot_type == 'acc': - x_p = x + width/2 - else: - x_p = x - else: - if plot_type == 'acc': - x_p = x + (1.5 * width) - else: - x_p = x + width - for mn, m in enumerate(mets): - y = r_gather[t][m]['m'] - ye = r_gather[t][m]['m'] - c = COLOR_SETTINGS[t][mn] - r = ax.bar(x_p, y, width, color=c, edgecolor='black') - rects.append(r) - - # add dotted line to separate sides - plt.axvline(x=offset-1, color='black') - - ax.set_ylabel(ylab, fontsize=fs) - ax.set_title(plt_title, fontsize=fs) - ax.set_xticks(full_x) - ax.set_xticklabels(crit_ticks, fontsize=fs2) - fig.tight_layout() - plt.xticks(rotation=45, ha="right") - plt.xticks(fontsize=fs2) - plt.yticks(fontsize=fs2) - - # legend at bottom - plt.gcf().subplots_adjust(bottom=0.33) - ax.legend(rects, legs, loc='upper center', bbox_to_anchor=(0.5, -0.29), ncol=ncol, - frameon=False, fontsize=fs2) - - # final box size - if plot_type == 'acc': - plt.gcf().subplots_adjust(left=0.08, right=0.995, top=0.93) - else: - plt.gcf().subplots_adjust(left=0.12, right=0.995, top=0.93) - plt.ylim(0, ylim) - - if not unimodal: - fname = os.path.join(figdir, 'plt_dataset_merged_%s.jpg'%(plot_type)) - else: - fname = os.path.join(figdir, 'plt_dataset_unimodal_merged_%s.jpg'%(plot_type)) - plt.savefig(fname) - - if not unimodal: - fname = os.path.join(figdir, 'plt_dataset_merged_%s.pdf'%(plot_type)) - else: - fname = os.path.join(figdir, 'plt_dataset_unimodal_merged_%s.pdf'%(plot_type)) - plt.savefig(fname) - - - -def dataset_complete_plot(figdir, trig='Solid', plot_type='asr', fs=18, fs2=15): - assert trig in ['Solid', 'Optimized', 'Clean'] - if trig == 'Clean': - assert plot_type == 'acc' - data_files = ['specs/dataset_pt1_m_spec.csv'] - if trig == 'Solid': - data_files = ['specs/dataset_pt2_m_spec.csv'] - else: - data_files = ['specs/dataset_pt3_m_spec.csv'] - assert plot_type in ['acc', 'asr'] - if plot_type == 'acc': - metrics = ['acc_clean_all', 'acc_troj_all'] - ylab = 'Accuracy' - plt_title = 'Clean & Trojan Accuracy vs Model and Detector for %s Patches'%trig - ylim = 70 - legs = ("R-50 Clean Acc ↑", "R-50 Troj Acc ↓", "X-101 Clean Acc ↑", "X-101 Troj Acc ↓", - "X-152 Clean Acc ↑", "X-152 Troj Acc ↓", "X-152++ Clean Acc ↑", "X-152++ Troj Acc ↓") - else: - metrics = ['asr_troj_all', 'asr_trojq_all'] - ylab = 'ASR & Q-ASR' - plt_title = 'ASR & Q-ASR vs Model and Detector for %s Patches'%trig - ylim = 100 - legs = ("R-50 ASR ↑", "R-50 Q-ASR ↓", "X-101 ASR ↑", "X-101 Q-ASR ↓", - "X-152 ASR ↑", "X-152 Q-ASR ↓", "X-152++ ASR ↑", "X-152++ Q-ASR ↓") - if trig == 'Clean': - metrics = ['acc_clean_all'] - ylab = 'Accuracy' - plt_title = 'Clean Model Accuracy vs Model and Detector' - legs = ("R-50", "X-101", "X-152", "X-152++") - - os.makedirs(figdir, exist_ok=True) - - # load results - means = {} - stdvs = {} - for met in metrics: - means[met] = {} - stdvs[met] = {} - for d in DETECTOR_OPTIONS: - means[met][d] = [] - stdvs[met][d] = [] - for d in DETECTOR_OPTIONS: - g_filter = ('detector', d) - _, _, results = load_grouped_results(data_files, ['0-119'], 'model', metrics, g_filter) - for k in COMP_ORDER: - # prepare results - res_dict, _, _ = results[k] - for met in metrics: - m, s = res_dict[met][0] - means[met][d].append(m) - stdvs[met][d].append(s) - - print('---') - print('finished gathering results') - num_bars = len(means[metrics[0]][DETECTOR_OPTIONS[0]]) - print('number of bars: %i'%num_bars) - - width = 0.20 - fig, ax = plt.subplots(figsize=[10,6]) - x = np.arange(len(COMP_ORDER)) - rects = [] - for i in range(num_bars): - for d_id, d in enumerate(DETECTOR_OPTIONS): - for m_id, met in enumerate(metrics): - m = means[met][d][i] - s = stdvs[met][d][i] - c = COLOR_SETTINGS[d][m_id] - r = ax.bar(x[i] + (d_id-1.5)*width, m, width, yerr=2*s, color=c, edgecolor='black', capsize=3) - rects.append(r) - - ax.set_ylabel(ylab, fontsize=fs) - ax.set_title(plt_title, fontsize=fs) - ax.set_xticks(x) - ax.set_xticklabels(COMP_ORDER_LABEL, fontsize=fs2) - ax.legend() - # fig.tight_layout() - plt.xticks(rotation=45, ha="right") - plt.yticks(fontsize=fs2) - plt.ylim(0, ylim) - plt.gcf().subplots_adjust(left=0.10, right=0.97, top=0.95) - - # legend at bottom - plt.gcf().subplots_adjust(bottom=0.25) - leg_rects = [] - for i in range(len(legs)): - leg_rects.append(rects[i]) - ax.legend(leg_rects, legs, loc='upper center', bbox_to_anchor=(0.5, -0.20), ncol=4, - frameon=False, fontsize=12) - - fname = os.path.join(figdir, 'plt_dataset_complete_%s_%s.jpg'%(trig, plot_type)) - plt.savefig(fname) - fname = os.path.join(figdir, 'plt_dataset_complete_%s_%s.pdf'%(trig, plot_type)) - plt.savefig(fname) - - - -# ================================================================================ - - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - # pre-defined scripts - parser.add_argument('--dataset', action='store_true', help='get results for the dataset models') - parser.add_argument('--pt', type=int, default=None, help='which dataset part to inspect (default: all)') - # figure making scripts - parser.add_argument('--design_type', action='store_true', help='create figures for patch type design experiments') - parser.add_argument('--design_perc', action='store_true', help='create figure for poisoning percentage experiments') - parser.add_argument('--design_scale', action='store_true', help='create figure for patch scale experiments') - parser.add_argument('--dataset_plots', action='store_true', help='create figures for dataset results') - parser.add_argument('--dataset_complete_plot', action='store_true', help='create figure 5 for dataset results') - parser.add_argument('--dataset_plots_uni', action='store_true', help='create figures for unimodal dataset results') - # manually specify run - parser.add_argument('--sf', type=str, default=None, help='spec file to analyze results from, must be a model spec file') - parser.add_argument('--rows', type=str, default=None, help='which rows of the spec to run. see documentation. default: all rows') - parser.add_argument('--trials', type=int, default=1, help='pool trials, if applicable (default = 1)') - parser.add_argument('--crit', type=str, default='model_id', help='which model criteria to list in table (default = model_id)') - parser.add_argument('--all', action='store_true', help='print all metrics, default shows limited set') - parser.add_argument('--clean', action='store_true', help='print only clean metrics') - # other - parser.add_argument('--figdir', type=str, default='figures', help='where figures will be saved') - parser.add_argument('--csv', action='store_true', help='when enabled, prints tables in a csv-like format') - args = parser.parse_args() - - # dataset models - if args.dataset: - if args.pt is None: - for PT in range(6): - dataset_results(PT) - else: - dataset_results(args.pt) - # figure scripts - if args.design_type: - design_type_plot(args.figdir, 'acc') - design_type_plot(args.figdir, 'asr') - if args.design_perc: - design_perc_scale_plot(args.figdir, 'perc') - if args.design_scale: - design_perc_scale_plot(args.figdir, 'scale') - if args.dataset_plots: - dataset_plots_merged(args.figdir, 'acc') - dataset_plots_merged(args.figdir, 'asr') - if args.dataset_complete_plot: - dataset_complete_plot(args.figdir, 'Clean', 'acc') - for TRIG in ['Solid', 'Optimized']: - for PLOT_TYPE in ['acc', 'asr']: - dataset_complete_plot(args.figdir, TRIG, PLOT_TYPE) - if args.dataset_plots_uni: - dataset_plots_merged(args.figdir, 'acc', unimodal=True) - dataset_plots_merged(args.figdir, 'asr', unimodal=True) - # use specs to load results - if args.sf is not None: - check_results(args.sf, args.rows, args.trials, args.crit, args.all, args.clean) diff --git a/spaces/CVPR/GFPGAN-example/gfpgan/data/__init__.py b/spaces/CVPR/GFPGAN-example/gfpgan/data/__init__.py deleted file mode 100644 index 69fd9f9026407c4d185f86b122000485b06fd986..0000000000000000000000000000000000000000 --- a/spaces/CVPR/GFPGAN-example/gfpgan/data/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -import importlib -from basicsr.utils import scandir -from os import path as osp - -# automatically scan and import dataset modules for registry -# scan all the files that end with '_dataset.py' under the data folder -data_folder = osp.dirname(osp.abspath(__file__)) -dataset_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(data_folder) if v.endswith('_dataset.py')] -# import all the dataset modules -_dataset_modules = [importlib.import_module(f'gfpgan.data.{file_name}') for file_name in dataset_filenames] diff --git a/spaces/CVPR/LIVE/thrust/thrust/detail/complex/csqrt.h b/spaces/CVPR/LIVE/thrust/thrust/detail/complex/csqrt.h deleted file mode 100644 index dcffbee9540d85b7b1c226d6ad3d332876533f8f..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/detail/complex/csqrt.h +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * Copyright 2013 Filipe RNC Maia - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/*- - * Copyright (c) 2007 David Schultz - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -/* - * Adapted from FreeBSD by Filipe Maia : - * freebsd/lib/msun/src/s_csqrt.c - */ - - -#pragma once - -#include -#include -#include - -namespace thrust{ -namespace detail{ -namespace complex{ - -using thrust::complex; - -__host__ __device__ inline -complex csqrt(const complex& z){ - complex result; - double a, b; - double t; - int scale; - - /* We risk spurious overflow for components >= DBL_MAX / (1 + sqrt(2)). */ - const double THRESH = 7.446288774449766337959726e+307; - - a = z.real(); - b = z.imag(); - - /* Handle special cases. */ - if (z == 0.0) - return (complex(0.0, b)); - if (isinf(b)) - return (complex(infinity(), b)); - if (isnan(a)) { - t = (b - b) / (b - b); /* raise invalid if b is not a NaN */ - return (complex(a, t)); /* return NaN + NaN i */ - } - if (isinf(a)) { - /* - * csqrt(inf + NaN i) = inf + NaN i - * csqrt(inf + y i) = inf + 0 i - * csqrt(-inf + NaN i) = NaN +- inf i - * csqrt(-inf + y i) = 0 + inf i - */ - if (signbit(a)) - return (complex(fabs(b - b), copysign(a, b))); - else - return (complex(a, copysign(b - b, b))); - } - /* - * The remaining special case (b is NaN) is handled just fine by - * the normal code path below. - */ - - // DBL_MIN*2 - const double low_thresh = 4.450147717014402766180465e-308; - scale = 0; - - if (fabs(a) >= THRESH || fabs(b) >= THRESH) { - /* Scale to avoid overflow. */ - a *= 0.25; - b *= 0.25; - scale = 1; - }else if (fabs(a) <= low_thresh && fabs(b) <= low_thresh) { - /* Scale to avoid underflow. */ - a *= 4.0; - b *= 4.0; - scale = 2; - } - - - /* Algorithm 312, CACM vol 10, Oct 1967. */ - if (a >= 0.0) { - t = sqrt((a + hypot(a, b)) * 0.5); - result = complex(t, b / (2 * t)); - } else { - t = sqrt((-a + hypot(a, b)) * 0.5); - result = complex(fabs(b) / (2 * t), copysign(t, b)); - } - - /* Rescale. */ - if (scale == 1) - return (result * 2.0); - else if (scale == 2) - return (result * 0.5); - else - return (result); -} - -} // namespace complex - -} // namespace detail - -template -__host__ __device__ -inline complex sqrt(const complex& z){ - return thrust::polar(std::sqrt(thrust::abs(z)),thrust::arg(z)/ValueType(2)); -} - -template <> -__host__ __device__ -inline complex sqrt(const complex& z){ - return detail::complex::csqrt(z); -} - -} // namespace thrust diff --git a/spaces/CVPR/regionclip-demo/detectron2/data/clip_datasets/__init__.py b/spaces/CVPR/regionclip-demo/detectron2/data/clip_datasets/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/ChallengeHub/Chinese-LangChain/app_modules/utils.py b/spaces/ChallengeHub/Chinese-LangChain/app_modules/utils.py deleted file mode 100644 index 80a52efb3bd2a5c2bef53af96b033133f3c23304..0000000000000000000000000000000000000000 --- a/spaces/ChallengeHub/Chinese-LangChain/app_modules/utils.py +++ /dev/null @@ -1,227 +0,0 @@ -# -*- coding:utf-8 -*- -from __future__ import annotations - -import html -import logging -import re - -import mdtex2html -from markdown import markdown -from pygments import highlight -from pygments.formatters import HtmlFormatter -from pygments.lexers import ClassNotFound -from pygments.lexers import guess_lexer, get_lexer_by_name - -from app_modules.presets import * - -logging.basicConfig( - level=logging.INFO, - format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s", -) - - -def markdown_to_html_with_syntax_highlight(md_str): - def replacer(match): - lang = match.group(1) or "text" - code = match.group(2) - lang = lang.strip() - # print(1,lang) - if lang == "text": - lexer = guess_lexer(code) - lang = lexer.name - # print(2,lang) - try: - lexer = get_lexer_by_name(lang, stripall=True) - except ValueError: - lexer = get_lexer_by_name("python", stripall=True) - formatter = HtmlFormatter() - # print(3,lexer.name) - highlighted_code = highlight(code, lexer, formatter) - - return f'
        {highlighted_code}
        ' - - code_block_pattern = r"```(\w+)?\n([\s\S]+?)\n```" - md_str = re.sub(code_block_pattern, replacer, md_str, flags=re.MULTILINE) - - html_str = markdown(md_str) - return html_str - - -def normalize_markdown(md_text: str) -> str: - lines = md_text.split("\n") - normalized_lines = [] - inside_list = False - - for i, line in enumerate(lines): - if re.match(r"^(\d+\.|-|\*|\+)\s", line.strip()): - if not inside_list and i > 0 and lines[i - 1].strip() != "": - normalized_lines.append("") - inside_list = True - normalized_lines.append(line) - elif inside_list and line.strip() == "": - if i < len(lines) - 1 and not re.match( - r"^(\d+\.|-|\*|\+)\s", lines[i + 1].strip() - ): - normalized_lines.append(line) - continue - else: - inside_list = False - normalized_lines.append(line) - - return "\n".join(normalized_lines) - - -def convert_mdtext(md_text): - code_block_pattern = re.compile(r"```(.*?)(?:```|$)", re.DOTALL) - inline_code_pattern = re.compile(r"`(.*?)`", re.DOTALL) - code_blocks = code_block_pattern.findall(md_text) - non_code_parts = code_block_pattern.split(md_text)[::2] - - result = [] - for non_code, code in zip(non_code_parts, code_blocks + [""]): - if non_code.strip(): - non_code = normalize_markdown(non_code) - if inline_code_pattern.search(non_code): - result.append(markdown(non_code, extensions=["tables"])) - else: - result.append(mdtex2html.convert(non_code, extensions=["tables"])) - if code.strip(): - # _, code = detect_language(code) # 暂时去除代码高亮功能,因为在大段代码的情况下会出现问题 - # code = code.replace("\n\n", "\n") # 暂时去除代码中的空行,因为在大段代码的情况下会出现问题 - code = f"\n```{code}\n\n```" - code = markdown_to_html_with_syntax_highlight(code) - result.append(code) - result = "".join(result) - result += ALREADY_CONVERTED_MARK - return result - - -def convert_asis(userinput): - return f"

        {html.escape(userinput)}

        " + ALREADY_CONVERTED_MARK - - -def detect_converted_mark(userinput): - if userinput.endswith(ALREADY_CONVERTED_MARK): - return True - else: - return False - - -def detect_language(code): - if code.startswith("\n"): - first_line = "" - else: - first_line = code.strip().split("\n", 1)[0] - language = first_line.lower() if first_line else "" - code_without_language = code[len(first_line):].lstrip() if first_line else code - return language, code_without_language - - -def convert_to_markdown(text): - text = text.replace("$", "$") - - def replace_leading_tabs_and_spaces(line): - new_line = [] - - for char in line: - if char == "\t": - new_line.append(" ") - elif char == " ": - new_line.append(" ") - else: - break - return "".join(new_line) + line[len(new_line):] - - markdown_text = "" - lines = text.split("\n") - in_code_block = False - - for line in lines: - if in_code_block is False and line.startswith("```"): - in_code_block = True - markdown_text += "```\n" - elif in_code_block is True and line.startswith("```"): - in_code_block = False - markdown_text += "```\n" - elif in_code_block: - markdown_text += f"{line}\n" - else: - line = replace_leading_tabs_and_spaces(line) - line = re.sub(r"^(#)", r"\\\1", line) - markdown_text += f"{line} \n" - - return markdown_text - - -def add_language_tag(text): - def detect_language(code_block): - try: - lexer = guess_lexer(code_block) - return lexer.name.lower() - except ClassNotFound: - return "" - - code_block_pattern = re.compile(r"(```)(\w*\n[^`]+```)", re.MULTILINE) - - def replacement(match): - code_block = match.group(2) - if match.group(2).startswith("\n"): - language = detect_language(code_block) - if language: - return f"```{language}{code_block}```" - else: - return f"```\n{code_block}```" - else: - return match.group(1) + code_block + "```" - - text2 = code_block_pattern.sub(replacement, text) - return text2 - - -def delete_last_conversation(chatbot, history): - if len(chatbot) > 0: - chatbot.pop() - - if len(history) > 0: - history.pop() - - return ( - chatbot, - history, - "Delete Done", - ) - - -def reset_state(): - return [], [], "Reset Done" - - -def reset_textbox(): - return gr.update(value=""), "" - - -def cancel_outputing(): - return "Stop Done" - - -def transfer_input(inputs): - # 一次性返回,降低延迟 - textbox = reset_textbox() - return ( - inputs, - gr.update(value=""), - gr.Button.update(visible=True), - ) - - -class State: - interrupted = False - - def interrupt(self): - self.interrupted = True - - def recover(self): - self.interrupted = False - - -shared_state = State() diff --git a/spaces/CikeyQI/meme-api/README.md b/spaces/CikeyQI/meme-api/README.md deleted file mode 100644 index 6448b446984a8d5f739174fb4de39b97c83b6160..0000000000000000000000000000000000000000 --- a/spaces/CikeyQI/meme-api/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Meme Api -emoji: 🌖 -colorFrom: purple -colorTo: indigo -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/CikeyQI/meme-api/meme_generator/memes/do/__init__.py b/spaces/CikeyQI/meme-api/meme_generator/memes/do/__init__.py deleted file mode 100644 index 4128da32c653f4c7416128fcc45081b086e489ef..0000000000000000000000000000000000000000 --- a/spaces/CikeyQI/meme-api/meme_generator/memes/do/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -from pathlib import Path -from typing import List - -from meme_generator import add_meme -from meme_generator.utils import save_gif -from PIL.Image import Image as IMG -from pil_utils import BuildImage - -img_dir = Path(__file__).parent / "images" - - -def do(images: List[BuildImage], texts, args): - self_locs = [(116, -8), (109, 3), (130, -10)] - user_locs = [(2, 177), (12, 172), (6, 158)] - self_head = ( - images[0] - .convert("RGBA") - .resize((122, 122), keep_ratio=True) - .circle() - .rotate(15) - ) - user_head = ( - images[1] - .convert("RGBA") - .resize((112, 112), keep_ratio=True) - .circle() - .rotate(90) - ) - frames: List[IMG] = [] - for i in range(3): - frame = BuildImage.open(img_dir / f"{i}.png") - frame.paste(user_head, user_locs[i], alpha=True) - frame.paste(self_head, self_locs[i], alpha=True) - frames.append(frame.image) - return save_gif(frames, 0.05) - - -add_meme("do", do, min_images=2, max_images=2, keywords=["撅", "狠狠地撅"]) diff --git a/spaces/CuraAlizm/stabilityai-stable-diffusion-xl-base-1.0/README.md b/spaces/CuraAlizm/stabilityai-stable-diffusion-xl-base-1.0/README.md deleted file mode 100644 index 1cc0c2fa153af93c8b77f65710ebe92667803e06..0000000000000000000000000000000000000000 --- a/spaces/CuraAlizm/stabilityai-stable-diffusion-xl-base-1.0/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Stabilityai Stable Diffusion Xl Base 1.0 -emoji: 😻 -colorFrom: blue -colorTo: purple -sdk: gradio -sdk_version: 3.44.4 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/datasets/__init__.py b/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/datasets/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/DCandE/rvc-models/vc_infer_pipeline.py b/spaces/DCandE/rvc-models/vc_infer_pipeline.py deleted file mode 100644 index c26d45068f9b6bf2b194b13c3c89f8a06347c124..0000000000000000000000000000000000000000 --- a/spaces/DCandE/rvc-models/vc_infer_pipeline.py +++ /dev/null @@ -1,306 +0,0 @@ -import numpy as np, parselmouth, torch, pdb -from time import time as ttime -import torch.nn.functional as F -from config import x_pad, x_query, x_center, x_max -import scipy.signal as signal -import pyworld, os, traceback, faiss -from scipy import signal - -bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000) - - -class VC(object): - def __init__(self, tgt_sr, device, is_half): - self.sr = 16000 # hubert输入采样率 - self.window = 160 # 每帧点数 - self.t_pad = self.sr * x_pad # 每条前后pad时间 - self.t_pad_tgt = tgt_sr * x_pad - self.t_pad2 = self.t_pad * 2 - self.t_query = self.sr * x_query # 查询切点前后查询时间 - self.t_center = self.sr * x_center # 查询切点位置 - self.t_max = self.sr * x_max # 免查询时长阈值 - self.device = device - self.is_half = is_half - - def get_f0(self, x, p_len, f0_up_key, f0_method, inp_f0=None): - time_step = self.window / self.sr * 1000 - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - if f0_method == "pm": - f0 = ( - parselmouth.Sound(x, self.sr) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=f0_min, - pitch_ceiling=f0_max, - ) - .selected_array["frequency"] - ) - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad( - f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant" - ) - elif f0_method == "harvest": - f0, t = pyworld.harvest( - x.astype(np.double), - fs=self.sr, - f0_ceil=f0_max, - f0_floor=f0_min, - frame_period=10, - ) - f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr) - f0 = signal.medfilt(f0, 3) - f0 *= pow(2, f0_up_key / 12) - # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - tf0 = self.sr // self.window # 每秒f0点数 - if inp_f0 is not None: - delta_t = np.round( - (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1 - ).astype("int16") - replace_f0 = np.interp( - list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1] - ) - shape = f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)].shape[0] - f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)] = replace_f0[:shape] - # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - f0bak = f0.copy() - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int) - return f0_coarse, f0bak # 1-0 - - def vc( - self, - model, - net_g, - sid, - audio0, - pitch, - pitchf, - times, - index, - big_npy, - index_rate, - ): # ,file_index,file_big_npy - feats = torch.from_numpy(audio0) - if self.is_half: - feats = feats.half() - else: - feats = feats.float() - if feats.dim() == 2: # double channels - feats = feats.mean(-1) - assert feats.dim() == 1, feats.dim() - feats = feats.view(1, -1) - padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False) - - inputs = { - "source": feats.to(self.device), - "padding_mask": padding_mask, - "output_layer": 9, # layer 9 - } - t0 = ttime() - with torch.no_grad(): - logits = model.extract_features(**inputs) - feats = model.final_proj(logits[0]) - - if ( - isinstance(index, type(None)) == False - and isinstance(big_npy, type(None)) == False - and index_rate != 0 - ): - npy = feats[0].cpu().numpy() - if self.is_half: - npy = npy.astype("float32") - _, I = index.search(npy, 1) - npy = big_npy[I.squeeze()] - if self.is_half: - npy = npy.astype("float16") - feats = ( - torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate - + (1 - index_rate) * feats - ) - - feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) - t1 = ttime() - p_len = audio0.shape[0] // self.window - if feats.shape[1] < p_len: - p_len = feats.shape[1] - if pitch != None and pitchf != None: - pitch = pitch[:, :p_len] - pitchf = pitchf[:, :p_len] - p_len = torch.tensor([p_len], device=self.device).long() - with torch.no_grad(): - if pitch != None and pitchf != None: - audio1 = ( - (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0] * 32768) - .data.cpu() - .float() - .numpy() - .astype(np.int16) - ) - else: - audio1 = ( - (net_g.infer(feats, p_len, sid)[0][0, 0] * 32768) - .data.cpu() - .float() - .numpy() - .astype(np.int16) - ) - del feats, p_len, padding_mask - if torch.cuda.is_available(): - torch.cuda.empty_cache() - t2 = ttime() - times[0] += t1 - t0 - times[2] += t2 - t1 - return audio1 - - def pipeline( - self, - model, - net_g, - sid, - audio, - times, - f0_up_key, - f0_method, - file_index, - file_big_npy, - index_rate, - if_f0, - f0_file=None, - ): - if ( - file_big_npy != "" - and file_index != "" - and os.path.exists(file_big_npy) == True - and os.path.exists(file_index) == True - and index_rate != 0 - ): - try: - index = faiss.read_index(file_index) - big_npy = np.load(file_big_npy) - except: - traceback.print_exc() - index = big_npy = None - else: - index = big_npy = None - print("Feature retrieval library doesn't exist or ratio is 0") - audio = signal.filtfilt(bh, ah, audio) - audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect") - opt_ts = [] - if audio_pad.shape[0] > self.t_max: - audio_sum = np.zeros_like(audio) - for i in range(self.window): - audio_sum += audio_pad[i : i - self.window] - for t in range(self.t_center, audio.shape[0], self.t_center): - opt_ts.append( - t - - self.t_query - + np.where( - np.abs(audio_sum[t - self.t_query : t + self.t_query]) - == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min() - )[0][0] - ) - s = 0 - audio_opt = [] - t = None - t1 = ttime() - audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect") - p_len = audio_pad.shape[0] // self.window - inp_f0 = None - if hasattr(f0_file, "name") == True: - try: - with open(f0_file.name, "r") as f: - lines = f.read().strip("\n").split("\n") - inp_f0 = [] - for line in lines: - inp_f0.append([float(i) for i in line.split(",")]) - inp_f0 = np.array(inp_f0, dtype="float32") - except: - traceback.print_exc() - sid = torch.tensor(sid, device=self.device).unsqueeze(0).long() - pitch, pitchf = None, None - if if_f0 == 1: - pitch, pitchf = self.get_f0(audio_pad, p_len, f0_up_key, f0_method, inp_f0) - pitch = pitch[:p_len] - pitchf = pitchf[:p_len] - pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long() - pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float() - t2 = ttime() - times[1] += t2 - t1 - for t in opt_ts: - t = t // self.window * self.window - if if_f0 == 1: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[s : t + self.t_pad2 + self.window], - pitch[:, s // self.window : (t + self.t_pad2) // self.window], - pitchf[:, s // self.window : (t + self.t_pad2) // self.window], - times, - index, - big_npy, - index_rate, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - else: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[s : t + self.t_pad2 + self.window], - None, - None, - times, - index, - big_npy, - index_rate, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - s = t - if if_f0 == 1: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[t:], - pitch[:, t // self.window :] if t is not None else pitch, - pitchf[:, t // self.window :] if t is not None else pitchf, - times, - index, - big_npy, - index_rate, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - else: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[t:], - None, - None, - times, - index, - big_npy, - index_rate, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - audio_opt = np.concatenate(audio_opt) - del pitch, pitchf, sid - if torch.cuda.is_available(): - torch.cuda.empty_cache() - return audio_opt diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/GbrImagePlugin.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/GbrImagePlugin.py deleted file mode 100644 index 994a6e8ebb2f0f2e69990a211d7a1ec4f06b7fd1..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/GbrImagePlugin.py +++ /dev/null @@ -1,102 +0,0 @@ -# -# The Python Imaging Library -# -# load a GIMP brush file -# -# History: -# 96-03-14 fl Created -# 16-01-08 es Version 2 -# -# Copyright (c) Secret Labs AB 1997. -# Copyright (c) Fredrik Lundh 1996. -# Copyright (c) Eric Soroos 2016. -# -# See the README file for information on usage and redistribution. -# -# -# See https://github.com/GNOME/gimp/blob/mainline/devel-docs/gbr.txt for -# format documentation. -# -# This code Interprets version 1 and 2 .gbr files. -# Version 1 files are obsolete, and should not be used for new -# brushes. -# Version 2 files are saved by GIMP v2.8 (at least) -# Version 3 files have a format specifier of 18 for 16bit floats in -# the color depth field. This is currently unsupported by Pillow. - -from . import Image, ImageFile -from ._binary import i32be as i32 - - -def _accept(prefix): - return len(prefix) >= 8 and i32(prefix, 0) >= 20 and i32(prefix, 4) in (1, 2) - - -## -# Image plugin for the GIMP brush format. - - -class GbrImageFile(ImageFile.ImageFile): - format = "GBR" - format_description = "GIMP brush file" - - def _open(self): - header_size = i32(self.fp.read(4)) - if header_size < 20: - msg = "not a GIMP brush" - raise SyntaxError(msg) - version = i32(self.fp.read(4)) - if version not in (1, 2): - msg = f"Unsupported GIMP brush version: {version}" - raise SyntaxError(msg) - - width = i32(self.fp.read(4)) - height = i32(self.fp.read(4)) - color_depth = i32(self.fp.read(4)) - if width <= 0 or height <= 0: - msg = "not a GIMP brush" - raise SyntaxError(msg) - if color_depth not in (1, 4): - msg = f"Unsupported GIMP brush color depth: {color_depth}" - raise SyntaxError(msg) - - if version == 1: - comment_length = header_size - 20 - else: - comment_length = header_size - 28 - magic_number = self.fp.read(4) - if magic_number != b"GIMP": - msg = "not a GIMP brush, bad magic number" - raise SyntaxError(msg) - self.info["spacing"] = i32(self.fp.read(4)) - - comment = self.fp.read(comment_length)[:-1] - - if color_depth == 1: - self.mode = "L" - else: - self.mode = "RGBA" - - self._size = width, height - - self.info["comment"] = comment - - # Image might not be small - Image._decompression_bomb_check(self.size) - - # Data is an uncompressed block of w * h * bytes/pixel - self._data_size = width * height * color_depth - - def load(self): - if not self.im: - self.im = Image.core.new(self.mode, self.size) - self.frombytes(self.fp.read(self._data_size)) - return Image.Image.load(self) - - -# -# registry - - -Image.register_open(GbrImageFile.format, GbrImageFile, _accept) -Image.register_extension(GbrImageFile.format, ".gbr") diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/dateutil/_version.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/dateutil/_version.py deleted file mode 100644 index b723056a756af22aaf1a4709c5122bea9fb279ee..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/dateutil/_version.py +++ /dev/null @@ -1,5 +0,0 @@ -# coding: utf-8 -# file generated by setuptools_scm -# don't change, don't track in version control -version = '2.8.2' -version_tuple = (2, 8, 2) diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/varLib/merger.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/varLib/merger.py deleted file mode 100644 index c3366cbcdee792c575655a04e188d133bb075297..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/varLib/merger.py +++ /dev/null @@ -1,1675 +0,0 @@ -""" -Merge OpenType Layout tables (GDEF / GPOS / GSUB). -""" -import os -import copy -import enum -from operator import ior -import logging -from fontTools.colorLib.builder import MAX_PAINT_COLR_LAYER_COUNT, LayerReuseCache -from fontTools.misc import classifyTools -from fontTools.misc.roundTools import otRound -from fontTools.misc.treeTools import build_n_ary_tree -from fontTools.ttLib.tables import otTables as ot -from fontTools.ttLib.tables import otBase as otBase -from fontTools.ttLib.tables.otConverters import BaseFixedValue -from fontTools.ttLib.tables.otTraverse import dfs_base_table -from fontTools.ttLib.tables.DefaultTable import DefaultTable -from fontTools.varLib import builder, models, varStore -from fontTools.varLib.models import nonNone, allNone, allEqual, allEqualTo, subList -from fontTools.varLib.varStore import VarStoreInstancer -from functools import reduce -from fontTools.otlLib.builder import buildSinglePos -from fontTools.otlLib.optimize.gpos import ( - _compression_level_from_env, - compact_pair_pos, -) - -log = logging.getLogger("fontTools.varLib.merger") - -from .errors import ( - ShouldBeConstant, - FoundANone, - MismatchedTypes, - NotANone, - LengthsDiffer, - KeysDiffer, - InconsistentGlyphOrder, - InconsistentExtensions, - InconsistentFormats, - UnsupportedFormat, - VarLibMergeError, -) - - -class Merger(object): - def __init__(self, font=None): - self.font = font - # mergeTables populates this from the parent's master ttfs - self.ttfs = None - - @classmethod - def merger(celf, clazzes, attrs=(None,)): - assert celf != Merger, "Subclass Merger instead." - if "mergers" not in celf.__dict__: - celf.mergers = {} - if type(clazzes) in (type, enum.EnumMeta): - clazzes = (clazzes,) - if type(attrs) == str: - attrs = (attrs,) - - def wrapper(method): - assert method.__name__ == "merge" - done = [] - for clazz in clazzes: - if clazz in done: - continue # Support multiple names of a clazz - done.append(clazz) - mergers = celf.mergers.setdefault(clazz, {}) - for attr in attrs: - assert attr not in mergers, ( - "Oops, class '%s' has merge function for '%s' defined already." - % (clazz.__name__, attr) - ) - mergers[attr] = method - return None - - return wrapper - - @classmethod - def mergersFor(celf, thing, _default={}): - typ = type(thing) - - for celf in celf.mro(): - mergers = getattr(celf, "mergers", None) - if mergers is None: - break - - m = celf.mergers.get(typ, None) - if m is not None: - return m - - return _default - - def mergeObjects(self, out, lst, exclude=()): - if hasattr(out, "ensureDecompiled"): - out.ensureDecompiled(recurse=False) - for item in lst: - if hasattr(item, "ensureDecompiled"): - item.ensureDecompiled(recurse=False) - keys = sorted(vars(out).keys()) - if not all(keys == sorted(vars(v).keys()) for v in lst): - raise KeysDiffer( - self, expected=keys, got=[sorted(vars(v).keys()) for v in lst] - ) - mergers = self.mergersFor(out) - defaultMerger = mergers.get("*", self.__class__.mergeThings) - try: - for key in keys: - if key in exclude: - continue - value = getattr(out, key) - values = [getattr(table, key) for table in lst] - mergerFunc = mergers.get(key, defaultMerger) - mergerFunc(self, value, values) - except VarLibMergeError as e: - e.stack.append("." + key) - raise - - def mergeLists(self, out, lst): - if not allEqualTo(out, lst, len): - raise LengthsDiffer(self, expected=len(out), got=[len(x) for x in lst]) - for i, (value, values) in enumerate(zip(out, zip(*lst))): - try: - self.mergeThings(value, values) - except VarLibMergeError as e: - e.stack.append("[%d]" % i) - raise - - def mergeThings(self, out, lst): - if not allEqualTo(out, lst, type): - raise MismatchedTypes( - self, expected=type(out).__name__, got=[type(x).__name__ for x in lst] - ) - mergerFunc = self.mergersFor(out).get(None, None) - if mergerFunc is not None: - mergerFunc(self, out, lst) - elif isinstance(out, enum.Enum): - # need to special-case Enums as have __dict__ but are not regular 'objects', - # otherwise mergeObjects/mergeThings get trapped in a RecursionError - if not allEqualTo(out, lst): - raise ShouldBeConstant(self, expected=out, got=lst) - elif hasattr(out, "__dict__"): - self.mergeObjects(out, lst) - elif isinstance(out, list): - self.mergeLists(out, lst) - else: - if not allEqualTo(out, lst): - raise ShouldBeConstant(self, expected=out, got=lst) - - def mergeTables(self, font, master_ttfs, tableTags): - for tag in tableTags: - if tag not in font: - continue - try: - self.ttfs = master_ttfs - self.mergeThings(font[tag], [m.get(tag) for m in master_ttfs]) - except VarLibMergeError as e: - e.stack.append(tag) - raise - - -# -# Aligning merger -# -class AligningMerger(Merger): - pass - - -@AligningMerger.merger(ot.GDEF, "GlyphClassDef") -def merge(merger, self, lst): - if self is None: - if not allNone(lst): - raise NotANone(merger, expected=None, got=lst) - return - - lst = [l.classDefs for l in lst] - self.classDefs = {} - # We only care about the .classDefs - self = self.classDefs - - allKeys = set() - allKeys.update(*[l.keys() for l in lst]) - for k in allKeys: - allValues = nonNone(l.get(k) for l in lst) - if not allEqual(allValues): - raise ShouldBeConstant( - merger, expected=allValues[0], got=lst, stack=["." + k] - ) - if not allValues: - self[k] = None - else: - self[k] = allValues[0] - - -def _SinglePosUpgradeToFormat2(self): - if self.Format == 2: - return self - - ret = ot.SinglePos() - ret.Format = 2 - ret.Coverage = self.Coverage - ret.ValueFormat = self.ValueFormat - ret.Value = [self.Value for _ in ret.Coverage.glyphs] - ret.ValueCount = len(ret.Value) - - return ret - - -def _merge_GlyphOrders(font, lst, values_lst=None, default=None): - """Takes font and list of glyph lists (must be sorted by glyph id), and returns - two things: - - Combined glyph list, - - If values_lst is None, return input glyph lists, but padded with None when a glyph - was missing in a list. Otherwise, return values_lst list-of-list, padded with None - to match combined glyph lists. - """ - if values_lst is None: - dict_sets = [set(l) for l in lst] - else: - dict_sets = [{g: v for g, v in zip(l, vs)} for l, vs in zip(lst, values_lst)] - combined = set() - combined.update(*dict_sets) - - sortKey = font.getReverseGlyphMap().__getitem__ - order = sorted(combined, key=sortKey) - # Make sure all input glyphsets were in proper order - if not all(sorted(vs, key=sortKey) == vs for vs in lst): - raise InconsistentGlyphOrder() - del combined - - paddedValues = None - if values_lst is None: - padded = [ - [glyph if glyph in dict_set else default for glyph in order] - for dict_set in dict_sets - ] - else: - assert len(lst) == len(values_lst) - padded = [ - [dict_set[glyph] if glyph in dict_set else default for glyph in order] - for dict_set in dict_sets - ] - return order, padded - - -@AligningMerger.merger(otBase.ValueRecord) -def merge(merger, self, lst): - # Code below sometimes calls us with self being - # a new object. Copy it from lst and recurse. - self.__dict__ = lst[0].__dict__.copy() - merger.mergeObjects(self, lst) - - -@AligningMerger.merger(ot.Anchor) -def merge(merger, self, lst): - # Code below sometimes calls us with self being - # a new object. Copy it from lst and recurse. - self.__dict__ = lst[0].__dict__.copy() - merger.mergeObjects(self, lst) - - -def _Lookup_SinglePos_get_effective_value(merger, subtables, glyph): - for self in subtables: - if ( - self is None - or type(self) != ot.SinglePos - or self.Coverage is None - or glyph not in self.Coverage.glyphs - ): - continue - if self.Format == 1: - return self.Value - elif self.Format == 2: - return self.Value[self.Coverage.glyphs.index(glyph)] - else: - raise UnsupportedFormat(merger, subtable="single positioning lookup") - return None - - -def _Lookup_PairPos_get_effective_value_pair( - merger, subtables, firstGlyph, secondGlyph -): - for self in subtables: - if ( - self is None - or type(self) != ot.PairPos - or self.Coverage is None - or firstGlyph not in self.Coverage.glyphs - ): - continue - if self.Format == 1: - ps = self.PairSet[self.Coverage.glyphs.index(firstGlyph)] - pvr = ps.PairValueRecord - for rec in pvr: # TODO Speed up - if rec.SecondGlyph == secondGlyph: - return rec - continue - elif self.Format == 2: - klass1 = self.ClassDef1.classDefs.get(firstGlyph, 0) - klass2 = self.ClassDef2.classDefs.get(secondGlyph, 0) - return self.Class1Record[klass1].Class2Record[klass2] - else: - raise UnsupportedFormat(merger, subtable="pair positioning lookup") - return None - - -@AligningMerger.merger(ot.SinglePos) -def merge(merger, self, lst): - self.ValueFormat = valueFormat = reduce(int.__or__, [l.ValueFormat for l in lst], 0) - if not (len(lst) == 1 or (valueFormat & ~0xF == 0)): - raise UnsupportedFormat(merger, subtable="single positioning lookup") - - # If all have same coverage table and all are format 1, - coverageGlyphs = self.Coverage.glyphs - if all(v.Format == 1 for v in lst) and all( - coverageGlyphs == v.Coverage.glyphs for v in lst - ): - self.Value = otBase.ValueRecord(valueFormat, self.Value) - if valueFormat != 0: - # If v.Value is None, it means a kerning of 0; we want - # it to participate in the model still. - # https://github.com/fonttools/fonttools/issues/3111 - merger.mergeThings( - self.Value, - [v.Value if v.Value is not None else otBase.ValueRecord() for v in lst], - ) - self.ValueFormat = self.Value.getFormat() - return - - # Upgrade everything to Format=2 - self.Format = 2 - lst = [_SinglePosUpgradeToFormat2(v) for v in lst] - - # Align them - glyphs, padded = _merge_GlyphOrders( - merger.font, [v.Coverage.glyphs for v in lst], [v.Value for v in lst] - ) - - self.Coverage.glyphs = glyphs - self.Value = [otBase.ValueRecord(valueFormat) for _ in glyphs] - self.ValueCount = len(self.Value) - - for i, values in enumerate(padded): - for j, glyph in enumerate(glyphs): - if values[j] is not None: - continue - # Fill in value from other subtables - # Note!!! This *might* result in behavior change if ValueFormat2-zeroedness - # is different between used subtable and current subtable! - # TODO(behdad) Check and warn if that happens? - v = _Lookup_SinglePos_get_effective_value( - merger, merger.lookup_subtables[i], glyph - ) - if v is None: - v = otBase.ValueRecord(valueFormat) - values[j] = v - - merger.mergeLists(self.Value, padded) - - # Merge everything else; though, there shouldn't be anything else. :) - merger.mergeObjects( - self, lst, exclude=("Format", "Coverage", "Value", "ValueCount", "ValueFormat") - ) - self.ValueFormat = reduce( - int.__or__, [v.getEffectiveFormat() for v in self.Value], 0 - ) - - -@AligningMerger.merger(ot.PairSet) -def merge(merger, self, lst): - # Align them - glyphs, padded = _merge_GlyphOrders( - merger.font, - [[v.SecondGlyph for v in vs.PairValueRecord] for vs in lst], - [vs.PairValueRecord for vs in lst], - ) - - self.PairValueRecord = pvrs = [] - for glyph in glyphs: - pvr = ot.PairValueRecord() - pvr.SecondGlyph = glyph - pvr.Value1 = ( - otBase.ValueRecord(merger.valueFormat1) if merger.valueFormat1 else None - ) - pvr.Value2 = ( - otBase.ValueRecord(merger.valueFormat2) if merger.valueFormat2 else None - ) - pvrs.append(pvr) - self.PairValueCount = len(self.PairValueRecord) - - for i, values in enumerate(padded): - for j, glyph in enumerate(glyphs): - # Fill in value from other subtables - v = ot.PairValueRecord() - v.SecondGlyph = glyph - if values[j] is not None: - vpair = values[j] - else: - vpair = _Lookup_PairPos_get_effective_value_pair( - merger, merger.lookup_subtables[i], self._firstGlyph, glyph - ) - if vpair is None: - v1, v2 = None, None - else: - v1 = getattr(vpair, "Value1", None) - v2 = getattr(vpair, "Value2", None) - v.Value1 = ( - otBase.ValueRecord(merger.valueFormat1, src=v1) - if merger.valueFormat1 - else None - ) - v.Value2 = ( - otBase.ValueRecord(merger.valueFormat2, src=v2) - if merger.valueFormat2 - else None - ) - values[j] = v - del self._firstGlyph - - merger.mergeLists(self.PairValueRecord, padded) - - -def _PairPosFormat1_merge(self, lst, merger): - assert allEqual( - [l.ValueFormat2 == 0 for l in lst if l.PairSet] - ), "Report bug against fonttools." - - # Merge everything else; makes sure Format is the same. - merger.mergeObjects( - self, - lst, - exclude=("Coverage", "PairSet", "PairSetCount", "ValueFormat1", "ValueFormat2"), - ) - - empty = ot.PairSet() - empty.PairValueRecord = [] - empty.PairValueCount = 0 - - # Align them - glyphs, padded = _merge_GlyphOrders( - merger.font, - [v.Coverage.glyphs for v in lst], - [v.PairSet for v in lst], - default=empty, - ) - - self.Coverage.glyphs = glyphs - self.PairSet = [ot.PairSet() for _ in glyphs] - self.PairSetCount = len(self.PairSet) - for glyph, ps in zip(glyphs, self.PairSet): - ps._firstGlyph = glyph - - merger.mergeLists(self.PairSet, padded) - - -def _ClassDef_invert(self, allGlyphs=None): - if isinstance(self, dict): - classDefs = self - else: - classDefs = self.classDefs if self and self.classDefs else {} - m = max(classDefs.values()) if classDefs else 0 - - ret = [] - for _ in range(m + 1): - ret.append(set()) - - for k, v in classDefs.items(): - ret[v].add(k) - - # Class-0 is special. It's "everything else". - if allGlyphs is None: - ret[0] = None - else: - # Limit all classes to glyphs in allGlyphs. - # Collect anything without a non-zero class into class=zero. - ret[0] = class0 = set(allGlyphs) - for s in ret[1:]: - s.intersection_update(class0) - class0.difference_update(s) - - return ret - - -def _ClassDef_merge_classify(lst, allGlyphses=None): - self = ot.ClassDef() - self.classDefs = classDefs = {} - allGlyphsesWasNone = allGlyphses is None - if allGlyphsesWasNone: - allGlyphses = [None] * len(lst) - - classifier = classifyTools.Classifier() - for classDef, allGlyphs in zip(lst, allGlyphses): - sets = _ClassDef_invert(classDef, allGlyphs) - if allGlyphs is None: - sets = sets[1:] - classifier.update(sets) - classes = classifier.getClasses() - - if allGlyphsesWasNone: - classes.insert(0, set()) - - for i, classSet in enumerate(classes): - if i == 0: - continue - for g in classSet: - classDefs[g] = i - - return self, classes - - -def _PairPosFormat2_align_matrices(self, lst, font, transparent=False): - matrices = [l.Class1Record for l in lst] - - # Align first classes - self.ClassDef1, classes = _ClassDef_merge_classify( - [l.ClassDef1 for l in lst], [l.Coverage.glyphs for l in lst] - ) - self.Class1Count = len(classes) - new_matrices = [] - for l, matrix in zip(lst, matrices): - nullRow = None - coverage = set(l.Coverage.glyphs) - classDef1 = l.ClassDef1.classDefs - class1Records = [] - for classSet in classes: - exemplarGlyph = next(iter(classSet)) - if exemplarGlyph not in coverage: - # Follow-up to e6125b353e1f54a0280ded5434b8e40d042de69f, - # Fixes https://github.com/googlei18n/fontmake/issues/470 - # Again, revert 8d441779e5afc664960d848f62c7acdbfc71d7b9 - # when merger becomes selfless. - nullRow = None - if nullRow is None: - nullRow = ot.Class1Record() - class2records = nullRow.Class2Record = [] - # TODO: When merger becomes selfless, revert e6125b353e1f54a0280ded5434b8e40d042de69f - for _ in range(l.Class2Count): - if transparent: - rec2 = None - else: - rec2 = ot.Class2Record() - rec2.Value1 = ( - otBase.ValueRecord(self.ValueFormat1) - if self.ValueFormat1 - else None - ) - rec2.Value2 = ( - otBase.ValueRecord(self.ValueFormat2) - if self.ValueFormat2 - else None - ) - class2records.append(rec2) - rec1 = nullRow - else: - klass = classDef1.get(exemplarGlyph, 0) - rec1 = matrix[klass] # TODO handle out-of-range? - class1Records.append(rec1) - new_matrices.append(class1Records) - matrices = new_matrices - del new_matrices - - # Align second classes - self.ClassDef2, classes = _ClassDef_merge_classify([l.ClassDef2 for l in lst]) - self.Class2Count = len(classes) - new_matrices = [] - for l, matrix in zip(lst, matrices): - classDef2 = l.ClassDef2.classDefs - class1Records = [] - for rec1old in matrix: - oldClass2Records = rec1old.Class2Record - rec1new = ot.Class1Record() - class2Records = rec1new.Class2Record = [] - for classSet in classes: - if not classSet: # class=0 - rec2 = oldClass2Records[0] - else: - exemplarGlyph = next(iter(classSet)) - klass = classDef2.get(exemplarGlyph, 0) - rec2 = oldClass2Records[klass] - class2Records.append(copy.deepcopy(rec2)) - class1Records.append(rec1new) - new_matrices.append(class1Records) - matrices = new_matrices - del new_matrices - - return matrices - - -def _PairPosFormat2_merge(self, lst, merger): - assert allEqual( - [l.ValueFormat2 == 0 for l in lst if l.Class1Record] - ), "Report bug against fonttools." - - merger.mergeObjects( - self, - lst, - exclude=( - "Coverage", - "ClassDef1", - "Class1Count", - "ClassDef2", - "Class2Count", - "Class1Record", - "ValueFormat1", - "ValueFormat2", - ), - ) - - # Align coverages - glyphs, _ = _merge_GlyphOrders(merger.font, [v.Coverage.glyphs for v in lst]) - self.Coverage.glyphs = glyphs - - # Currently, if the coverage of PairPosFormat2 subtables are different, - # we do NOT bother walking down the subtable list when filling in new - # rows for alignment. As such, this is only correct if current subtable - # is the last subtable in the lookup. Ensure that. - # - # Note that our canonicalization process merges trailing PairPosFormat2's, - # so in reality this is rare. - for l, subtables in zip(lst, merger.lookup_subtables): - if l.Coverage.glyphs != glyphs: - assert l == subtables[-1] - - matrices = _PairPosFormat2_align_matrices(self, lst, merger.font) - - self.Class1Record = list(matrices[0]) # TODO move merger to be selfless - merger.mergeLists(self.Class1Record, matrices) - - -@AligningMerger.merger(ot.PairPos) -def merge(merger, self, lst): - merger.valueFormat1 = self.ValueFormat1 = reduce( - int.__or__, [l.ValueFormat1 for l in lst], 0 - ) - merger.valueFormat2 = self.ValueFormat2 = reduce( - int.__or__, [l.ValueFormat2 for l in lst], 0 - ) - - if self.Format == 1: - _PairPosFormat1_merge(self, lst, merger) - elif self.Format == 2: - _PairPosFormat2_merge(self, lst, merger) - else: - raise UnsupportedFormat(merger, subtable="pair positioning lookup") - - del merger.valueFormat1, merger.valueFormat2 - - # Now examine the list of value records, and update to the union of format values, - # as merge might have created new values. - vf1 = 0 - vf2 = 0 - if self.Format == 1: - for pairSet in self.PairSet: - for pairValueRecord in pairSet.PairValueRecord: - pv1 = getattr(pairValueRecord, "Value1", None) - if pv1 is not None: - vf1 |= pv1.getFormat() - pv2 = getattr(pairValueRecord, "Value2", None) - if pv2 is not None: - vf2 |= pv2.getFormat() - elif self.Format == 2: - for class1Record in self.Class1Record: - for class2Record in class1Record.Class2Record: - pv1 = getattr(class2Record, "Value1", None) - if pv1 is not None: - vf1 |= pv1.getFormat() - pv2 = getattr(class2Record, "Value2", None) - if pv2 is not None: - vf2 |= pv2.getFormat() - self.ValueFormat1 = vf1 - self.ValueFormat2 = vf2 - - -def _MarkBasePosFormat1_merge(self, lst, merger, Mark="Mark", Base="Base"): - self.ClassCount = max(l.ClassCount for l in lst) - - MarkCoverageGlyphs, MarkRecords = _merge_GlyphOrders( - merger.font, - [getattr(l, Mark + "Coverage").glyphs for l in lst], - [getattr(l, Mark + "Array").MarkRecord for l in lst], - ) - getattr(self, Mark + "Coverage").glyphs = MarkCoverageGlyphs - - BaseCoverageGlyphs, BaseRecords = _merge_GlyphOrders( - merger.font, - [getattr(l, Base + "Coverage").glyphs for l in lst], - [getattr(getattr(l, Base + "Array"), Base + "Record") for l in lst], - ) - getattr(self, Base + "Coverage").glyphs = BaseCoverageGlyphs - - # MarkArray - records = [] - for g, glyphRecords in zip(MarkCoverageGlyphs, zip(*MarkRecords)): - allClasses = [r.Class for r in glyphRecords if r is not None] - - # TODO Right now we require that all marks have same class in - # all masters that cover them. This is not required. - # - # We can relax that by just requiring that all marks that have - # the same class in a master, have the same class in every other - # master. Indeed, if, say, a sparse master only covers one mark, - # that mark probably will get class 0, which would possibly be - # different from its class in other masters. - # - # We can even go further and reclassify marks to support any - # input. But, since, it's unlikely that two marks being both, - # say, "top" in one master, and one being "top" and other being - # "top-right" in another master, we shouldn't do that, as any - # failures in that case will probably signify mistakes in the - # input masters. - - if not allEqual(allClasses): - raise ShouldBeConstant(merger, expected=allClasses[0], got=allClasses) - else: - rec = ot.MarkRecord() - rec.Class = allClasses[0] - allAnchors = [None if r is None else r.MarkAnchor for r in glyphRecords] - if allNone(allAnchors): - anchor = None - else: - anchor = ot.Anchor() - anchor.Format = 1 - merger.mergeThings(anchor, allAnchors) - rec.MarkAnchor = anchor - records.append(rec) - array = ot.MarkArray() - array.MarkRecord = records - array.MarkCount = len(records) - setattr(self, Mark + "Array", array) - - # BaseArray - records = [] - for g, glyphRecords in zip(BaseCoverageGlyphs, zip(*BaseRecords)): - if allNone(glyphRecords): - rec = None - else: - rec = getattr(ot, Base + "Record")() - anchors = [] - setattr(rec, Base + "Anchor", anchors) - glyphAnchors = [ - [] if r is None else getattr(r, Base + "Anchor") for r in glyphRecords - ] - for l in glyphAnchors: - l.extend([None] * (self.ClassCount - len(l))) - for allAnchors in zip(*glyphAnchors): - if allNone(allAnchors): - anchor = None - else: - anchor = ot.Anchor() - anchor.Format = 1 - merger.mergeThings(anchor, allAnchors) - anchors.append(anchor) - records.append(rec) - array = getattr(ot, Base + "Array")() - setattr(array, Base + "Record", records) - setattr(array, Base + "Count", len(records)) - setattr(self, Base + "Array", array) - - -@AligningMerger.merger(ot.MarkBasePos) -def merge(merger, self, lst): - if not allEqualTo(self.Format, (l.Format for l in lst)): - raise InconsistentFormats( - merger, - subtable="mark-to-base positioning lookup", - expected=self.Format, - got=[l.Format for l in lst], - ) - if self.Format == 1: - _MarkBasePosFormat1_merge(self, lst, merger) - else: - raise UnsupportedFormat(merger, subtable="mark-to-base positioning lookup") - - -@AligningMerger.merger(ot.MarkMarkPos) -def merge(merger, self, lst): - if not allEqualTo(self.Format, (l.Format for l in lst)): - raise InconsistentFormats( - merger, - subtable="mark-to-mark positioning lookup", - expected=self.Format, - got=[l.Format for l in lst], - ) - if self.Format == 1: - _MarkBasePosFormat1_merge(self, lst, merger, "Mark1", "Mark2") - else: - raise UnsupportedFormat(merger, subtable="mark-to-mark positioning lookup") - - -def _PairSet_flatten(lst, font): - self = ot.PairSet() - self.Coverage = ot.Coverage() - - # Align them - glyphs, padded = _merge_GlyphOrders( - font, - [[v.SecondGlyph for v in vs.PairValueRecord] for vs in lst], - [vs.PairValueRecord for vs in lst], - ) - - self.Coverage.glyphs = glyphs - self.PairValueRecord = pvrs = [] - for values in zip(*padded): - for v in values: - if v is not None: - pvrs.append(v) - break - else: - assert False - self.PairValueCount = len(self.PairValueRecord) - - return self - - -def _Lookup_PairPosFormat1_subtables_flatten(lst, font): - assert allEqual( - [l.ValueFormat2 == 0 for l in lst if l.PairSet] - ), "Report bug against fonttools." - - self = ot.PairPos() - self.Format = 1 - self.Coverage = ot.Coverage() - self.ValueFormat1 = reduce(int.__or__, [l.ValueFormat1 for l in lst], 0) - self.ValueFormat2 = reduce(int.__or__, [l.ValueFormat2 for l in lst], 0) - - # Align them - glyphs, padded = _merge_GlyphOrders( - font, [v.Coverage.glyphs for v in lst], [v.PairSet for v in lst] - ) - - self.Coverage.glyphs = glyphs - self.PairSet = [ - _PairSet_flatten([v for v in values if v is not None], font) - for values in zip(*padded) - ] - self.PairSetCount = len(self.PairSet) - return self - - -def _Lookup_PairPosFormat2_subtables_flatten(lst, font): - assert allEqual( - [l.ValueFormat2 == 0 for l in lst if l.Class1Record] - ), "Report bug against fonttools." - - self = ot.PairPos() - self.Format = 2 - self.Coverage = ot.Coverage() - self.ValueFormat1 = reduce(int.__or__, [l.ValueFormat1 for l in lst], 0) - self.ValueFormat2 = reduce(int.__or__, [l.ValueFormat2 for l in lst], 0) - - # Align them - glyphs, _ = _merge_GlyphOrders(font, [v.Coverage.glyphs for v in lst]) - self.Coverage.glyphs = glyphs - - matrices = _PairPosFormat2_align_matrices(self, lst, font, transparent=True) - - matrix = self.Class1Record = [] - for rows in zip(*matrices): - row = ot.Class1Record() - matrix.append(row) - row.Class2Record = [] - row = row.Class2Record - for cols in zip(*list(r.Class2Record for r in rows)): - col = next(iter(c for c in cols if c is not None)) - row.append(col) - - return self - - -def _Lookup_PairPos_subtables_canonicalize(lst, font): - """Merge multiple Format1 subtables at the beginning of lst, - and merge multiple consecutive Format2 subtables that have the same - Class2 (ie. were split because of offset overflows). Returns new list.""" - lst = list(lst) - - l = len(lst) - i = 0 - while i < l and lst[i].Format == 1: - i += 1 - lst[:i] = [_Lookup_PairPosFormat1_subtables_flatten(lst[:i], font)] - - l = len(lst) - i = l - while i > 0 and lst[i - 1].Format == 2: - i -= 1 - lst[i:] = [_Lookup_PairPosFormat2_subtables_flatten(lst[i:], font)] - - return lst - - -def _Lookup_SinglePos_subtables_flatten(lst, font, min_inclusive_rec_format): - glyphs, _ = _merge_GlyphOrders(font, [v.Coverage.glyphs for v in lst], None) - num_glyphs = len(glyphs) - new = ot.SinglePos() - new.Format = 2 - new.ValueFormat = min_inclusive_rec_format - new.Coverage = ot.Coverage() - new.Coverage.glyphs = glyphs - new.ValueCount = num_glyphs - new.Value = [None] * num_glyphs - for singlePos in lst: - if singlePos.Format == 1: - val_rec = singlePos.Value - for gname in singlePos.Coverage.glyphs: - i = glyphs.index(gname) - new.Value[i] = copy.deepcopy(val_rec) - elif singlePos.Format == 2: - for j, gname in enumerate(singlePos.Coverage.glyphs): - val_rec = singlePos.Value[j] - i = glyphs.index(gname) - new.Value[i] = copy.deepcopy(val_rec) - return [new] - - -@AligningMerger.merger(ot.Lookup) -def merge(merger, self, lst): - subtables = merger.lookup_subtables = [l.SubTable for l in lst] - - # Remove Extension subtables - for l, sts in list(zip(lst, subtables)) + [(self, self.SubTable)]: - if not sts: - continue - if sts[0].__class__.__name__.startswith("Extension"): - if not allEqual([st.__class__ for st in sts]): - raise InconsistentExtensions( - merger, - expected="Extension", - got=[st.__class__.__name__ for st in sts], - ) - if not allEqual([st.ExtensionLookupType for st in sts]): - raise InconsistentExtensions(merger) - l.LookupType = sts[0].ExtensionLookupType - new_sts = [st.ExtSubTable for st in sts] - del sts[:] - sts.extend(new_sts) - - isPairPos = self.SubTable and isinstance(self.SubTable[0], ot.PairPos) - - if isPairPos: - # AFDKO and feaLib sometimes generate two Format1 subtables instead of one. - # Merge those before continuing. - # https://github.com/fonttools/fonttools/issues/719 - self.SubTable = _Lookup_PairPos_subtables_canonicalize( - self.SubTable, merger.font - ) - subtables = merger.lookup_subtables = [ - _Lookup_PairPos_subtables_canonicalize(st, merger.font) for st in subtables - ] - else: - isSinglePos = self.SubTable and isinstance(self.SubTable[0], ot.SinglePos) - if isSinglePos: - numSubtables = [len(st) for st in subtables] - if not all([nums == numSubtables[0] for nums in numSubtables]): - # Flatten list of SinglePos subtables to single Format 2 subtable, - # with all value records set to the rec format type. - # We use buildSinglePos() to optimize the lookup after merging. - valueFormatList = [t.ValueFormat for st in subtables for t in st] - # Find the minimum value record that can accomodate all the singlePos subtables. - mirf = reduce(ior, valueFormatList) - self.SubTable = _Lookup_SinglePos_subtables_flatten( - self.SubTable, merger.font, mirf - ) - subtables = merger.lookup_subtables = [ - _Lookup_SinglePos_subtables_flatten(st, merger.font, mirf) - for st in subtables - ] - flattened = True - else: - flattened = False - - merger.mergeLists(self.SubTable, subtables) - self.SubTableCount = len(self.SubTable) - - if isPairPos: - # If format-1 subtable created during canonicalization is empty, remove it. - assert len(self.SubTable) >= 1 and self.SubTable[0].Format == 1 - if not self.SubTable[0].Coverage.glyphs: - self.SubTable.pop(0) - self.SubTableCount -= 1 - - # If format-2 subtable created during canonicalization is empty, remove it. - assert len(self.SubTable) >= 1 and self.SubTable[-1].Format == 2 - if not self.SubTable[-1].Coverage.glyphs: - self.SubTable.pop(-1) - self.SubTableCount -= 1 - - # Compact the merged subtables - # This is a good moment to do it because the compaction should create - # smaller subtables, which may prevent overflows from happening. - # Keep reading the value from the ENV until ufo2ft switches to the config system - level = merger.font.cfg.get( - "fontTools.otlLib.optimize.gpos:COMPRESSION_LEVEL", - default=_compression_level_from_env(), - ) - if level != 0: - log.info("Compacting GPOS...") - self.SubTable = compact_pair_pos(merger.font, level, self.SubTable) - self.SubTableCount = len(self.SubTable) - - elif isSinglePos and flattened: - singlePosTable = self.SubTable[0] - glyphs = singlePosTable.Coverage.glyphs - # We know that singlePosTable is Format 2, as this is set - # in _Lookup_SinglePos_subtables_flatten. - singlePosMapping = { - gname: valRecord for gname, valRecord in zip(glyphs, singlePosTable.Value) - } - self.SubTable = buildSinglePos( - singlePosMapping, merger.font.getReverseGlyphMap() - ) - merger.mergeObjects(self, lst, exclude=["SubTable", "SubTableCount"]) - - del merger.lookup_subtables - - -# -# InstancerMerger -# - - -class InstancerMerger(AligningMerger): - """A merger that takes multiple master fonts, and instantiates - an instance.""" - - def __init__(self, font, model, location): - Merger.__init__(self, font) - self.model = model - self.location = location - self.scalars = model.getScalars(location) - - -@InstancerMerger.merger(ot.CaretValue) -def merge(merger, self, lst): - assert self.Format == 1 - Coords = [a.Coordinate for a in lst] - model = merger.model - scalars = merger.scalars - self.Coordinate = otRound(model.interpolateFromMastersAndScalars(Coords, scalars)) - - -@InstancerMerger.merger(ot.Anchor) -def merge(merger, self, lst): - assert self.Format == 1 - XCoords = [a.XCoordinate for a in lst] - YCoords = [a.YCoordinate for a in lst] - model = merger.model - scalars = merger.scalars - self.XCoordinate = otRound(model.interpolateFromMastersAndScalars(XCoords, scalars)) - self.YCoordinate = otRound(model.interpolateFromMastersAndScalars(YCoords, scalars)) - - -@InstancerMerger.merger(otBase.ValueRecord) -def merge(merger, self, lst): - model = merger.model - scalars = merger.scalars - # TODO Handle differing valueformats - for name, tableName in [ - ("XAdvance", "XAdvDevice"), - ("YAdvance", "YAdvDevice"), - ("XPlacement", "XPlaDevice"), - ("YPlacement", "YPlaDevice"), - ]: - assert not hasattr(self, tableName) - - if hasattr(self, name): - values = [getattr(a, name, 0) for a in lst] - value = otRound(model.interpolateFromMastersAndScalars(values, scalars)) - setattr(self, name, value) - - -# -# MutatorMerger -# - - -class MutatorMerger(AligningMerger): - """A merger that takes a variable font, and instantiates - an instance. While there's no "merging" to be done per se, - the operation can benefit from many operations that the - aligning merger does.""" - - def __init__(self, font, instancer, deleteVariations=True): - Merger.__init__(self, font) - self.instancer = instancer - self.deleteVariations = deleteVariations - - -@MutatorMerger.merger(ot.CaretValue) -def merge(merger, self, lst): - # Hack till we become selfless. - self.__dict__ = lst[0].__dict__.copy() - - if self.Format != 3: - return - - instancer = merger.instancer - dev = self.DeviceTable - if merger.deleteVariations: - del self.DeviceTable - if dev: - assert dev.DeltaFormat == 0x8000 - varidx = (dev.StartSize << 16) + dev.EndSize - delta = otRound(instancer[varidx]) - self.Coordinate += delta - - if merger.deleteVariations: - self.Format = 1 - - -@MutatorMerger.merger(ot.Anchor) -def merge(merger, self, lst): - # Hack till we become selfless. - self.__dict__ = lst[0].__dict__.copy() - - if self.Format != 3: - return - - instancer = merger.instancer - for v in "XY": - tableName = v + "DeviceTable" - if not hasattr(self, tableName): - continue - dev = getattr(self, tableName) - if merger.deleteVariations: - delattr(self, tableName) - if dev is None: - continue - - assert dev.DeltaFormat == 0x8000 - varidx = (dev.StartSize << 16) + dev.EndSize - delta = otRound(instancer[varidx]) - - attr = v + "Coordinate" - setattr(self, attr, getattr(self, attr) + delta) - - if merger.deleteVariations: - self.Format = 1 - - -@MutatorMerger.merger(otBase.ValueRecord) -def merge(merger, self, lst): - # Hack till we become selfless. - self.__dict__ = lst[0].__dict__.copy() - - instancer = merger.instancer - for name, tableName in [ - ("XAdvance", "XAdvDevice"), - ("YAdvance", "YAdvDevice"), - ("XPlacement", "XPlaDevice"), - ("YPlacement", "YPlaDevice"), - ]: - if not hasattr(self, tableName): - continue - dev = getattr(self, tableName) - if merger.deleteVariations: - delattr(self, tableName) - if dev is None: - continue - - assert dev.DeltaFormat == 0x8000 - varidx = (dev.StartSize << 16) + dev.EndSize - delta = otRound(instancer[varidx]) - - setattr(self, name, getattr(self, name, 0) + delta) - - -# -# VariationMerger -# - - -class VariationMerger(AligningMerger): - """A merger that takes multiple master fonts, and builds a - variable font.""" - - def __init__(self, model, axisTags, font): - Merger.__init__(self, font) - self.store_builder = varStore.OnlineVarStoreBuilder(axisTags) - self.setModel(model) - - def setModel(self, model): - self.model = model - self.store_builder.setModel(model) - - def mergeThings(self, out, lst): - masterModel = None - origTTFs = None - if None in lst: - if allNone(lst): - if out is not None: - raise FoundANone(self, got=lst) - return - - # temporarily subset the list of master ttfs to the ones for which - # master values are not None - origTTFs = self.ttfs - if self.ttfs: - self.ttfs = subList([v is not None for v in lst], self.ttfs) - - masterModel = self.model - model, lst = masterModel.getSubModel(lst) - self.setModel(model) - - super(VariationMerger, self).mergeThings(out, lst) - - if masterModel: - self.setModel(masterModel) - if origTTFs: - self.ttfs = origTTFs - - -def buildVarDevTable(store_builder, master_values): - if allEqual(master_values): - return master_values[0], None - base, varIdx = store_builder.storeMasters(master_values) - return base, builder.buildVarDevTable(varIdx) - - -@VariationMerger.merger(ot.BaseCoord) -def merge(merger, self, lst): - if self.Format != 1: - raise UnsupportedFormat(merger, subtable="a baseline coordinate") - self.Coordinate, DeviceTable = buildVarDevTable( - merger.store_builder, [a.Coordinate for a in lst] - ) - if DeviceTable: - self.Format = 3 - self.DeviceTable = DeviceTable - - -@VariationMerger.merger(ot.CaretValue) -def merge(merger, self, lst): - if self.Format != 1: - raise UnsupportedFormat(merger, subtable="a caret") - self.Coordinate, DeviceTable = buildVarDevTable( - merger.store_builder, [a.Coordinate for a in lst] - ) - if DeviceTable: - self.Format = 3 - self.DeviceTable = DeviceTable - - -@VariationMerger.merger(ot.Anchor) -def merge(merger, self, lst): - if self.Format != 1: - raise UnsupportedFormat(merger, subtable="an anchor") - self.XCoordinate, XDeviceTable = buildVarDevTable( - merger.store_builder, [a.XCoordinate for a in lst] - ) - self.YCoordinate, YDeviceTable = buildVarDevTable( - merger.store_builder, [a.YCoordinate for a in lst] - ) - if XDeviceTable or YDeviceTable: - self.Format = 3 - self.XDeviceTable = XDeviceTable - self.YDeviceTable = YDeviceTable - - -@VariationMerger.merger(otBase.ValueRecord) -def merge(merger, self, lst): - for name, tableName in [ - ("XAdvance", "XAdvDevice"), - ("YAdvance", "YAdvDevice"), - ("XPlacement", "XPlaDevice"), - ("YPlacement", "YPlaDevice"), - ]: - if hasattr(self, name): - value, deviceTable = buildVarDevTable( - merger.store_builder, [getattr(a, name, 0) for a in lst] - ) - setattr(self, name, value) - if deviceTable: - setattr(self, tableName, deviceTable) - - -class COLRVariationMerger(VariationMerger): - """A specialized VariationMerger that takes multiple master fonts containing - COLRv1 tables, and builds a variable COLR font. - - COLR tables are special in that variable subtables can be associated with - multiple delta-set indices (via VarIndexBase). - They also contain tables that must change their type (not simply the Format) - as they become variable (e.g. Affine2x3 -> VarAffine2x3) so this merger takes - care of that too. - """ - - def __init__(self, model, axisTags, font, allowLayerReuse=True): - VariationMerger.__init__(self, model, axisTags, font) - # maps {tuple(varIdxes): VarIndexBase} to facilitate reuse of VarIndexBase - # between variable tables with same varIdxes. - self.varIndexCache = {} - # flat list of all the varIdxes generated while merging - self.varIdxes = [] - # set of id()s of the subtables that contain variations after merging - # and need to be upgraded to the associated VarType. - self.varTableIds = set() - # we keep these around for rebuilding a LayerList while merging PaintColrLayers - self.layers = [] - self.layerReuseCache = None - if allowLayerReuse: - self.layerReuseCache = LayerReuseCache() - # flag to ensure BaseGlyphList is fully merged before LayerList gets processed - self._doneBaseGlyphs = False - - def mergeTables(self, font, master_ttfs, tableTags=("COLR",)): - if "COLR" in tableTags and "COLR" in font: - # The merger modifies the destination COLR table in-place. If this contains - # multiple PaintColrLayers referencing the same layers from LayerList, it's - # a problem because we may risk modifying the same paint more than once, or - # worse, fail while attempting to do that. - # We don't know whether the master COLR table was built with layer reuse - # disabled, thus to be safe we rebuild its LayerList so that it contains only - # unique layers referenced from non-overlapping PaintColrLayers throughout - # the base paint graphs. - self.expandPaintColrLayers(font["COLR"].table) - VariationMerger.mergeTables(self, font, master_ttfs, tableTags) - - def checkFormatEnum(self, out, lst, validate=lambda _: True): - fmt = out.Format - formatEnum = out.formatEnum - ok = False - try: - fmt = formatEnum(fmt) - except ValueError: - pass - else: - ok = validate(fmt) - if not ok: - raise UnsupportedFormat(self, subtable=type(out).__name__, value=fmt) - expected = fmt - got = [] - for v in lst: - fmt = getattr(v, "Format", None) - try: - fmt = formatEnum(fmt) - except ValueError: - pass - got.append(fmt) - if not allEqualTo(expected, got): - raise InconsistentFormats( - self, - subtable=type(out).__name__, - expected=expected, - got=got, - ) - return expected - - def mergeSparseDict(self, out, lst): - for k in out.keys(): - try: - self.mergeThings(out[k], [v.get(k) for v in lst]) - except VarLibMergeError as e: - e.stack.append(f"[{k!r}]") - raise - - def mergeAttrs(self, out, lst, attrs): - for attr in attrs: - value = getattr(out, attr) - values = [getattr(item, attr) for item in lst] - try: - self.mergeThings(value, values) - except VarLibMergeError as e: - e.stack.append(f".{attr}") - raise - - def storeMastersForAttr(self, out, lst, attr): - master_values = [getattr(item, attr) for item in lst] - - # VarStore treats deltas for fixed-size floats as integers, so we - # must convert master values to int before storing them in the builder - # then back to float. - is_fixed_size_float = False - conv = out.getConverterByName(attr) - if isinstance(conv, BaseFixedValue): - is_fixed_size_float = True - master_values = [conv.toInt(v) for v in master_values] - - baseValue = master_values[0] - varIdx = ot.NO_VARIATION_INDEX - if not allEqual(master_values): - baseValue, varIdx = self.store_builder.storeMasters(master_values) - - if is_fixed_size_float: - baseValue = conv.fromInt(baseValue) - - return baseValue, varIdx - - def storeVariationIndices(self, varIdxes) -> int: - # try to reuse an existing VarIndexBase for the same varIdxes, or else - # create a new one - key = tuple(varIdxes) - varIndexBase = self.varIndexCache.get(key) - - if varIndexBase is None: - # scan for a full match anywhere in the self.varIdxes - for i in range(len(self.varIdxes) - len(varIdxes) + 1): - if self.varIdxes[i : i + len(varIdxes)] == varIdxes: - self.varIndexCache[key] = varIndexBase = i - break - - if varIndexBase is None: - # try find a partial match at the end of the self.varIdxes - for n in range(len(varIdxes) - 1, 0, -1): - if self.varIdxes[-n:] == varIdxes[:n]: - varIndexBase = len(self.varIdxes) - n - self.varIndexCache[key] = varIndexBase - self.varIdxes.extend(varIdxes[n:]) - break - - if varIndexBase is None: - # no match found, append at the end - self.varIndexCache[key] = varIndexBase = len(self.varIdxes) - self.varIdxes.extend(varIdxes) - - return varIndexBase - - def mergeVariableAttrs(self, out, lst, attrs) -> int: - varIndexBase = ot.NO_VARIATION_INDEX - varIdxes = [] - for attr in attrs: - baseValue, varIdx = self.storeMastersForAttr(out, lst, attr) - setattr(out, attr, baseValue) - varIdxes.append(varIdx) - - if any(v != ot.NO_VARIATION_INDEX for v in varIdxes): - varIndexBase = self.storeVariationIndices(varIdxes) - - return varIndexBase - - @classmethod - def convertSubTablesToVarType(cls, table): - for path in dfs_base_table( - table, - skip_root=True, - predicate=lambda path: ( - getattr(type(path[-1].value), "VarType", None) is not None - ), - ): - st = path[-1] - subTable = st.value - varType = type(subTable).VarType - newSubTable = varType() - newSubTable.__dict__.update(subTable.__dict__) - newSubTable.populateDefaults() - parent = path[-2].value - if st.index is not None: - getattr(parent, st.name)[st.index] = newSubTable - else: - setattr(parent, st.name, newSubTable) - - @staticmethod - def expandPaintColrLayers(colr): - """Rebuild LayerList without PaintColrLayers reuse. - - Each base paint graph is fully DFS-traversed (with exception of PaintColrGlyph - which are irrelevant for this); any layers referenced via PaintColrLayers are - collected into a new LayerList and duplicated when reuse is detected, to ensure - that all paints are distinct objects at the end of the process. - PaintColrLayers's FirstLayerIndex/NumLayers are updated so that no overlap - is left. Also, any consecutively nested PaintColrLayers are flattened. - The COLR table's LayerList is replaced with the new unique layers. - A side effect is also that any layer from the old LayerList which is not - referenced by any PaintColrLayers is dropped. - """ - if not colr.LayerList: - # if no LayerList, there's nothing to expand - return - uniqueLayerIDs = set() - newLayerList = [] - for rec in colr.BaseGlyphList.BaseGlyphPaintRecord: - frontier = [rec.Paint] - while frontier: - paint = frontier.pop() - if paint.Format == ot.PaintFormat.PaintColrGlyph: - # don't traverse these, we treat them as constant for merging - continue - elif paint.Format == ot.PaintFormat.PaintColrLayers: - # de-treeify any nested PaintColrLayers, append unique copies to - # the new layer list and update PaintColrLayers index/count - children = list(_flatten_layers(paint, colr)) - first_layer_index = len(newLayerList) - for layer in children: - if id(layer) in uniqueLayerIDs: - layer = copy.deepcopy(layer) - assert id(layer) not in uniqueLayerIDs - newLayerList.append(layer) - uniqueLayerIDs.add(id(layer)) - paint.FirstLayerIndex = first_layer_index - paint.NumLayers = len(children) - else: - children = paint.getChildren(colr) - frontier.extend(reversed(children)) - # sanity check all the new layers are distinct objects - assert len(newLayerList) == len(uniqueLayerIDs) - colr.LayerList.Paint = newLayerList - colr.LayerList.LayerCount = len(newLayerList) - - -@COLRVariationMerger.merger(ot.BaseGlyphList) -def merge(merger, self, lst): - # ignore BaseGlyphCount, allow sparse glyph sets across masters - out = {rec.BaseGlyph: rec for rec in self.BaseGlyphPaintRecord} - masters = [{rec.BaseGlyph: rec for rec in m.BaseGlyphPaintRecord} for m in lst] - - for i, g in enumerate(out.keys()): - try: - # missing base glyphs don't participate in the merge - merger.mergeThings(out[g], [v.get(g) for v in masters]) - except VarLibMergeError as e: - e.stack.append(f".BaseGlyphPaintRecord[{i}]") - e.cause["location"] = f"base glyph {g!r}" - raise - - merger._doneBaseGlyphs = True - - -@COLRVariationMerger.merger(ot.LayerList) -def merge(merger, self, lst): - # nothing to merge for LayerList, assuming we have already merged all PaintColrLayers - # found while traversing the paint graphs rooted at BaseGlyphPaintRecords. - assert merger._doneBaseGlyphs, "BaseGlyphList must be merged before LayerList" - # Simply flush the final list of layers and go home. - self.LayerCount = len(merger.layers) - self.Paint = merger.layers - - -def _flatten_layers(root, colr): - assert root.Format == ot.PaintFormat.PaintColrLayers - for paint in root.getChildren(colr): - if paint.Format == ot.PaintFormat.PaintColrLayers: - yield from _flatten_layers(paint, colr) - else: - yield paint - - -def _merge_PaintColrLayers(self, out, lst): - # we only enforce that the (flat) number of layers is the same across all masters - # but we allow FirstLayerIndex to differ to acommodate for sparse glyph sets. - - out_layers = list(_flatten_layers(out, self.font["COLR"].table)) - - # sanity check ttfs are subset to current values (see VariationMerger.mergeThings) - # before matching each master PaintColrLayers to its respective COLR by position - assert len(self.ttfs) == len(lst) - master_layerses = [ - list(_flatten_layers(lst[i], self.ttfs[i]["COLR"].table)) - for i in range(len(lst)) - ] - - try: - self.mergeLists(out_layers, master_layerses) - except VarLibMergeError as e: - # NOTE: This attribute doesn't actually exist in PaintColrLayers but it's - # handy to have it in the stack trace for debugging. - e.stack.append(".Layers") - raise - - # following block is very similar to LayerListBuilder._beforeBuildPaintColrLayers - # but I couldn't find a nice way to share the code between the two... - - if self.layerReuseCache is not None: - # successful reuse can make the list smaller - out_layers = self.layerReuseCache.try_reuse(out_layers) - - # if the list is still too big we need to tree-fy it - is_tree = len(out_layers) > MAX_PAINT_COLR_LAYER_COUNT - out_layers = build_n_ary_tree(out_layers, n=MAX_PAINT_COLR_LAYER_COUNT) - - # We now have a tree of sequences with Paint leaves. - # Convert the sequences into PaintColrLayers. - def listToColrLayers(paint): - if isinstance(paint, list): - layers = [listToColrLayers(l) for l in paint] - paint = ot.Paint() - paint.Format = int(ot.PaintFormat.PaintColrLayers) - paint.NumLayers = len(layers) - paint.FirstLayerIndex = len(self.layers) - self.layers.extend(layers) - if self.layerReuseCache is not None: - self.layerReuseCache.add(layers, paint.FirstLayerIndex) - return paint - - out_layers = [listToColrLayers(l) for l in out_layers] - - if len(out_layers) == 1 and out_layers[0].Format == ot.PaintFormat.PaintColrLayers: - # special case when the reuse cache finds a single perfect PaintColrLayers match - # (it can only come from a successful reuse, _flatten_layers has gotten rid of - # all nested PaintColrLayers already); we assign it directly and avoid creating - # an extra table - out.NumLayers = out_layers[0].NumLayers - out.FirstLayerIndex = out_layers[0].FirstLayerIndex - else: - out.NumLayers = len(out_layers) - out.FirstLayerIndex = len(self.layers) - - self.layers.extend(out_layers) - - # Register our parts for reuse provided we aren't a tree - # If we are a tree the leaves registered for reuse and that will suffice - if self.layerReuseCache is not None and not is_tree: - self.layerReuseCache.add(out_layers, out.FirstLayerIndex) - - -@COLRVariationMerger.merger((ot.Paint, ot.ClipBox)) -def merge(merger, self, lst): - fmt = merger.checkFormatEnum(self, lst, lambda fmt: not fmt.is_variable()) - - if fmt is ot.PaintFormat.PaintColrLayers: - _merge_PaintColrLayers(merger, self, lst) - return - - varFormat = fmt.as_variable() - - varAttrs = () - if varFormat is not None: - varAttrs = otBase.getVariableAttrs(type(self), varFormat) - staticAttrs = (c.name for c in self.getConverters() if c.name not in varAttrs) - - merger.mergeAttrs(self, lst, staticAttrs) - - varIndexBase = merger.mergeVariableAttrs(self, lst, varAttrs) - - subTables = [st.value for st in self.iterSubTables()] - - # Convert table to variable if itself has variations or any subtables have - isVariable = varIndexBase != ot.NO_VARIATION_INDEX or any( - id(table) in merger.varTableIds for table in subTables - ) - - if isVariable: - if varAttrs: - # Some PaintVar* don't have any scalar attributes that can vary, - # only indirect offsets to other variable subtables, thus have - # no VarIndexBase of their own (e.g. PaintVarTransform) - self.VarIndexBase = varIndexBase - - if subTables: - # Convert Affine2x3 -> VarAffine2x3, ColorLine -> VarColorLine, etc. - merger.convertSubTablesToVarType(self) - - assert varFormat is not None - self.Format = int(varFormat) - - -@COLRVariationMerger.merger((ot.Affine2x3, ot.ColorStop)) -def merge(merger, self, lst): - varType = type(self).VarType - - varAttrs = otBase.getVariableAttrs(varType) - staticAttrs = (c.name for c in self.getConverters() if c.name not in varAttrs) - - merger.mergeAttrs(self, lst, staticAttrs) - - varIndexBase = merger.mergeVariableAttrs(self, lst, varAttrs) - - if varIndexBase != ot.NO_VARIATION_INDEX: - self.VarIndexBase = varIndexBase - # mark as having variations so the parent table will convert to Var{Type} - merger.varTableIds.add(id(self)) - - -@COLRVariationMerger.merger(ot.ColorLine) -def merge(merger, self, lst): - merger.mergeAttrs(self, lst, (c.name for c in self.getConverters())) - - if any(id(stop) in merger.varTableIds for stop in self.ColorStop): - merger.convertSubTablesToVarType(self) - merger.varTableIds.add(id(self)) - - -@COLRVariationMerger.merger(ot.ClipList, "clips") -def merge(merger, self, lst): - # 'sparse' in that we allow non-default masters to omit ClipBox entries - # for some/all glyphs (i.e. they don't participate) - merger.mergeSparseDict(self, lst) diff --git a/spaces/Datasculptor/DescriptionGPT/detic/data/datasets/oid.py b/spaces/Datasculptor/DescriptionGPT/detic/data/datasets/oid.py deleted file mode 100644 index 90d7f8613e4f12e942ec8967db9f17c0ec0d41f4..0000000000000000000000000000000000000000 --- a/spaces/Datasculptor/DescriptionGPT/detic/data/datasets/oid.py +++ /dev/null @@ -1,535 +0,0 @@ -# Part of the code is from https://github.com/xingyizhou/UniDet/blob/master/projects/UniDet/unidet/data/datasets/oid.py -# Copyright (c) Facebook, Inc. and its affiliates. -from .register_oid import register_oid_instances -import os - -categories = [ - {'id': 1, 'name': 'Infant bed', 'freebase_id': '/m/061hd_'}, - {'id': 2, 'name': 'Rose', 'freebase_id': '/m/06m11'}, - {'id': 3, 'name': 'Flag', 'freebase_id': '/m/03120'}, - {'id': 4, 'name': 'Flashlight', 'freebase_id': '/m/01kb5b'}, - {'id': 5, 'name': 'Sea turtle', 'freebase_id': '/m/0120dh'}, - {'id': 6, 'name': 'Camera', 'freebase_id': '/m/0dv5r'}, - {'id': 7, 'name': 'Animal', 'freebase_id': '/m/0jbk'}, - {'id': 8, 'name': 'Glove', 'freebase_id': '/m/0174n1'}, - {'id': 9, 'name': 'Crocodile', 'freebase_id': '/m/09f_2'}, - {'id': 10, 'name': 'Cattle', 'freebase_id': '/m/01xq0k1'}, - {'id': 11, 'name': 'House', 'freebase_id': '/m/03jm5'}, - {'id': 12, 'name': 'Guacamole', 'freebase_id': '/m/02g30s'}, - {'id': 13, 'name': 'Penguin', 'freebase_id': '/m/05z6w'}, - {'id': 14, 'name': 'Vehicle registration plate', 'freebase_id': '/m/01jfm_'}, - {'id': 15, 'name': 'Bench', 'freebase_id': '/m/076lb9'}, - {'id': 16, 'name': 'Ladybug', 'freebase_id': '/m/0gj37'}, - {'id': 17, 'name': 'Human nose', 'freebase_id': '/m/0k0pj'}, - {'id': 18, 'name': 'Watermelon', 'freebase_id': '/m/0kpqd'}, - {'id': 19, 'name': 'Flute', 'freebase_id': '/m/0l14j_'}, - {'id': 20, 'name': 'Butterfly', 'freebase_id': '/m/0cyf8'}, - {'id': 21, 'name': 'Washing machine', 'freebase_id': '/m/0174k2'}, - {'id': 22, 'name': 'Raccoon', 'freebase_id': '/m/0dq75'}, - {'id': 23, 'name': 'Segway', 'freebase_id': '/m/076bq'}, - {'id': 24, 'name': 'Taco', 'freebase_id': '/m/07crc'}, - {'id': 25, 'name': 'Jellyfish', 'freebase_id': '/m/0d8zb'}, - {'id': 26, 'name': 'Cake', 'freebase_id': '/m/0fszt'}, - {'id': 27, 'name': 'Pen', 'freebase_id': '/m/0k1tl'}, - {'id': 28, 'name': 'Cannon', 'freebase_id': '/m/020kz'}, - {'id': 29, 'name': 'Bread', 'freebase_id': '/m/09728'}, - {'id': 30, 'name': 'Tree', 'freebase_id': '/m/07j7r'}, - {'id': 31, 'name': 'Shellfish', 'freebase_id': '/m/0fbdv'}, - {'id': 32, 'name': 'Bed', 'freebase_id': '/m/03ssj5'}, - {'id': 33, 'name': 'Hamster', 'freebase_id': '/m/03qrc'}, - {'id': 34, 'name': 'Hat', 'freebase_id': '/m/02dl1y'}, - {'id': 35, 'name': 'Toaster', 'freebase_id': '/m/01k6s3'}, - {'id': 36, 'name': 'Sombrero', 'freebase_id': '/m/02jfl0'}, - {'id': 37, 'name': 'Tiara', 'freebase_id': '/m/01krhy'}, - {'id': 38, 'name': 'Bowl', 'freebase_id': '/m/04kkgm'}, - {'id': 39, 'name': 'Dragonfly', 'freebase_id': '/m/0ft9s'}, - {'id': 40, 'name': 'Moths and butterflies', 'freebase_id': '/m/0d_2m'}, - {'id': 41, 'name': 'Antelope', 'freebase_id': '/m/0czz2'}, - {'id': 42, 'name': 'Vegetable', 'freebase_id': '/m/0f4s2w'}, - {'id': 43, 'name': 'Torch', 'freebase_id': '/m/07dd4'}, - {'id': 44, 'name': 'Building', 'freebase_id': '/m/0cgh4'}, - {'id': 45, 'name': 'Power plugs and sockets', 'freebase_id': '/m/03bbps'}, - {'id': 46, 'name': 'Blender', 'freebase_id': '/m/02pjr4'}, - {'id': 47, 'name': 'Billiard table', 'freebase_id': '/m/04p0qw'}, - {'id': 48, 'name': 'Cutting board', 'freebase_id': '/m/02pdsw'}, - {'id': 49, 'name': 'Bronze sculpture', 'freebase_id': '/m/01yx86'}, - {'id': 50, 'name': 'Turtle', 'freebase_id': '/m/09dzg'}, - {'id': 51, 'name': 'Broccoli', 'freebase_id': '/m/0hkxq'}, - {'id': 52, 'name': 'Tiger', 'freebase_id': '/m/07dm6'}, - {'id': 53, 'name': 'Mirror', 'freebase_id': '/m/054_l'}, - {'id': 54, 'name': 'Bear', 'freebase_id': '/m/01dws'}, - {'id': 55, 'name': 'Zucchini', 'freebase_id': '/m/027pcv'}, - {'id': 56, 'name': 'Dress', 'freebase_id': '/m/01d40f'}, - {'id': 57, 'name': 'Volleyball', 'freebase_id': '/m/02rgn06'}, - {'id': 58, 'name': 'Guitar', 'freebase_id': '/m/0342h'}, - {'id': 59, 'name': 'Reptile', 'freebase_id': '/m/06bt6'}, - {'id': 60, 'name': 'Golf cart', 'freebase_id': '/m/0323sq'}, - {'id': 61, 'name': 'Tart', 'freebase_id': '/m/02zvsm'}, - {'id': 62, 'name': 'Fedora', 'freebase_id': '/m/02fq_6'}, - {'id': 63, 'name': 'Carnivore', 'freebase_id': '/m/01lrl'}, - {'id': 64, 'name': 'Car', 'freebase_id': '/m/0k4j'}, - {'id': 65, 'name': 'Lighthouse', 'freebase_id': '/m/04h7h'}, - {'id': 66, 'name': 'Coffeemaker', 'freebase_id': '/m/07xyvk'}, - {'id': 67, 'name': 'Food processor', 'freebase_id': '/m/03y6mg'}, - {'id': 68, 'name': 'Truck', 'freebase_id': '/m/07r04'}, - {'id': 69, 'name': 'Bookcase', 'freebase_id': '/m/03__z0'}, - {'id': 70, 'name': 'Surfboard', 'freebase_id': '/m/019w40'}, - {'id': 71, 'name': 'Footwear', 'freebase_id': '/m/09j5n'}, - {'id': 72, 'name': 'Bench', 'freebase_id': '/m/0cvnqh'}, - {'id': 73, 'name': 'Necklace', 'freebase_id': '/m/01llwg'}, - {'id': 74, 'name': 'Flower', 'freebase_id': '/m/0c9ph5'}, - {'id': 75, 'name': 'Radish', 'freebase_id': '/m/015x5n'}, - {'id': 76, 'name': 'Marine mammal', 'freebase_id': '/m/0gd2v'}, - {'id': 77, 'name': 'Frying pan', 'freebase_id': '/m/04v6l4'}, - {'id': 78, 'name': 'Tap', 'freebase_id': '/m/02jz0l'}, - {'id': 79, 'name': 'Peach', 'freebase_id': '/m/0dj6p'}, - {'id': 80, 'name': 'Knife', 'freebase_id': '/m/04ctx'}, - {'id': 81, 'name': 'Handbag', 'freebase_id': '/m/080hkjn'}, - {'id': 82, 'name': 'Laptop', 'freebase_id': '/m/01c648'}, - {'id': 83, 'name': 'Tent', 'freebase_id': '/m/01j61q'}, - {'id': 84, 'name': 'Ambulance', 'freebase_id': '/m/012n7d'}, - {'id': 85, 'name': 'Christmas tree', 'freebase_id': '/m/025nd'}, - {'id': 86, 'name': 'Eagle', 'freebase_id': '/m/09csl'}, - {'id': 87, 'name': 'Limousine', 'freebase_id': '/m/01lcw4'}, - {'id': 88, 'name': 'Kitchen & dining room table', 'freebase_id': '/m/0h8n5zk'}, - {'id': 89, 'name': 'Polar bear', 'freebase_id': '/m/0633h'}, - {'id': 90, 'name': 'Tower', 'freebase_id': '/m/01fdzj'}, - {'id': 91, 'name': 'Football', 'freebase_id': '/m/01226z'}, - {'id': 92, 'name': 'Willow', 'freebase_id': '/m/0mw_6'}, - {'id': 93, 'name': 'Human head', 'freebase_id': '/m/04hgtk'}, - {'id': 94, 'name': 'Stop sign', 'freebase_id': '/m/02pv19'}, - {'id': 95, 'name': 'Banana', 'freebase_id': '/m/09qck'}, - {'id': 96, 'name': 'Mixer', 'freebase_id': '/m/063rgb'}, - {'id': 97, 'name': 'Binoculars', 'freebase_id': '/m/0lt4_'}, - {'id': 98, 'name': 'Dessert', 'freebase_id': '/m/0270h'}, - {'id': 99, 'name': 'Bee', 'freebase_id': '/m/01h3n'}, - {'id': 100, 'name': 'Chair', 'freebase_id': '/m/01mzpv'}, - {'id': 101, 'name': 'Wood-burning stove', 'freebase_id': '/m/04169hn'}, - {'id': 102, 'name': 'Flowerpot', 'freebase_id': '/m/0fm3zh'}, - {'id': 103, 'name': 'Beaker', 'freebase_id': '/m/0d20w4'}, - {'id': 104, 'name': 'Oyster', 'freebase_id': '/m/0_cp5'}, - {'id': 105, 'name': 'Woodpecker', 'freebase_id': '/m/01dy8n'}, - {'id': 106, 'name': 'Harp', 'freebase_id': '/m/03m5k'}, - {'id': 107, 'name': 'Bathtub', 'freebase_id': '/m/03dnzn'}, - {'id': 108, 'name': 'Wall clock', 'freebase_id': '/m/0h8mzrc'}, - {'id': 109, 'name': 'Sports uniform', 'freebase_id': '/m/0h8mhzd'}, - {'id': 110, 'name': 'Rhinoceros', 'freebase_id': '/m/03d443'}, - {'id': 111, 'name': 'Beehive', 'freebase_id': '/m/01gllr'}, - {'id': 112, 'name': 'Cupboard', 'freebase_id': '/m/0642b4'}, - {'id': 113, 'name': 'Chicken', 'freebase_id': '/m/09b5t'}, - {'id': 114, 'name': 'Man', 'freebase_id': '/m/04yx4'}, - {'id': 115, 'name': 'Blue jay', 'freebase_id': '/m/01f8m5'}, - {'id': 116, 'name': 'Cucumber', 'freebase_id': '/m/015x4r'}, - {'id': 117, 'name': 'Balloon', 'freebase_id': '/m/01j51'}, - {'id': 118, 'name': 'Kite', 'freebase_id': '/m/02zt3'}, - {'id': 119, 'name': 'Fireplace', 'freebase_id': '/m/03tw93'}, - {'id': 120, 'name': 'Lantern', 'freebase_id': '/m/01jfsr'}, - {'id': 121, 'name': 'Missile', 'freebase_id': '/m/04ylt'}, - {'id': 122, 'name': 'Book', 'freebase_id': '/m/0bt_c3'}, - {'id': 123, 'name': 'Spoon', 'freebase_id': '/m/0cmx8'}, - {'id': 124, 'name': 'Grapefruit', 'freebase_id': '/m/0hqkz'}, - {'id': 125, 'name': 'Squirrel', 'freebase_id': '/m/071qp'}, - {'id': 126, 'name': 'Orange', 'freebase_id': '/m/0cyhj_'}, - {'id': 127, 'name': 'Coat', 'freebase_id': '/m/01xygc'}, - {'id': 128, 'name': 'Punching bag', 'freebase_id': '/m/0420v5'}, - {'id': 129, 'name': 'Zebra', 'freebase_id': '/m/0898b'}, - {'id': 130, 'name': 'Billboard', 'freebase_id': '/m/01knjb'}, - {'id': 131, 'name': 'Bicycle', 'freebase_id': '/m/0199g'}, - {'id': 132, 'name': 'Door handle', 'freebase_id': '/m/03c7gz'}, - {'id': 133, 'name': 'Mechanical fan', 'freebase_id': '/m/02x984l'}, - {'id': 134, 'name': 'Ring binder', 'freebase_id': '/m/04zwwv'}, - {'id': 135, 'name': 'Table', 'freebase_id': '/m/04bcr3'}, - {'id': 136, 'name': 'Parrot', 'freebase_id': '/m/0gv1x'}, - {'id': 137, 'name': 'Sock', 'freebase_id': '/m/01nq26'}, - {'id': 138, 'name': 'Vase', 'freebase_id': '/m/02s195'}, - {'id': 139, 'name': 'Weapon', 'freebase_id': '/m/083kb'}, - {'id': 140, 'name': 'Shotgun', 'freebase_id': '/m/06nrc'}, - {'id': 141, 'name': 'Glasses', 'freebase_id': '/m/0jyfg'}, - {'id': 142, 'name': 'Seahorse', 'freebase_id': '/m/0nybt'}, - {'id': 143, 'name': 'Belt', 'freebase_id': '/m/0176mf'}, - {'id': 144, 'name': 'Watercraft', 'freebase_id': '/m/01rzcn'}, - {'id': 145, 'name': 'Window', 'freebase_id': '/m/0d4v4'}, - {'id': 146, 'name': 'Giraffe', 'freebase_id': '/m/03bk1'}, - {'id': 147, 'name': 'Lion', 'freebase_id': '/m/096mb'}, - {'id': 148, 'name': 'Tire', 'freebase_id': '/m/0h9mv'}, - {'id': 149, 'name': 'Vehicle', 'freebase_id': '/m/07yv9'}, - {'id': 150, 'name': 'Canoe', 'freebase_id': '/m/0ph39'}, - {'id': 151, 'name': 'Tie', 'freebase_id': '/m/01rkbr'}, - {'id': 152, 'name': 'Shelf', 'freebase_id': '/m/0gjbg72'}, - {'id': 153, 'name': 'Picture frame', 'freebase_id': '/m/06z37_'}, - {'id': 154, 'name': 'Printer', 'freebase_id': '/m/01m4t'}, - {'id': 155, 'name': 'Human leg', 'freebase_id': '/m/035r7c'}, - {'id': 156, 'name': 'Boat', 'freebase_id': '/m/019jd'}, - {'id': 157, 'name': 'Slow cooker', 'freebase_id': '/m/02tsc9'}, - {'id': 158, 'name': 'Croissant', 'freebase_id': '/m/015wgc'}, - {'id': 159, 'name': 'Candle', 'freebase_id': '/m/0c06p'}, - {'id': 160, 'name': 'Pancake', 'freebase_id': '/m/01dwwc'}, - {'id': 161, 'name': 'Pillow', 'freebase_id': '/m/034c16'}, - {'id': 162, 'name': 'Coin', 'freebase_id': '/m/0242l'}, - {'id': 163, 'name': 'Stretcher', 'freebase_id': '/m/02lbcq'}, - {'id': 164, 'name': 'Sandal', 'freebase_id': '/m/03nfch'}, - {'id': 165, 'name': 'Woman', 'freebase_id': '/m/03bt1vf'}, - {'id': 166, 'name': 'Stairs', 'freebase_id': '/m/01lynh'}, - {'id': 167, 'name': 'Harpsichord', 'freebase_id': '/m/03q5t'}, - {'id': 168, 'name': 'Stool', 'freebase_id': '/m/0fqt361'}, - {'id': 169, 'name': 'Bus', 'freebase_id': '/m/01bjv'}, - {'id': 170, 'name': 'Suitcase', 'freebase_id': '/m/01s55n'}, - {'id': 171, 'name': 'Human mouth', 'freebase_id': '/m/0283dt1'}, - {'id': 172, 'name': 'Juice', 'freebase_id': '/m/01z1kdw'}, - {'id': 173, 'name': 'Skull', 'freebase_id': '/m/016m2d'}, - {'id': 174, 'name': 'Door', 'freebase_id': '/m/02dgv'}, - {'id': 175, 'name': 'Violin', 'freebase_id': '/m/07y_7'}, - {'id': 176, 'name': 'Chopsticks', 'freebase_id': '/m/01_5g'}, - {'id': 177, 'name': 'Digital clock', 'freebase_id': '/m/06_72j'}, - {'id': 178, 'name': 'Sunflower', 'freebase_id': '/m/0ftb8'}, - {'id': 179, 'name': 'Leopard', 'freebase_id': '/m/0c29q'}, - {'id': 180, 'name': 'Bell pepper', 'freebase_id': '/m/0jg57'}, - {'id': 181, 'name': 'Harbor seal', 'freebase_id': '/m/02l8p9'}, - {'id': 182, 'name': 'Snake', 'freebase_id': '/m/078jl'}, - {'id': 183, 'name': 'Sewing machine', 'freebase_id': '/m/0llzx'}, - {'id': 184, 'name': 'Goose', 'freebase_id': '/m/0dbvp'}, - {'id': 185, 'name': 'Helicopter', 'freebase_id': '/m/09ct_'}, - {'id': 186, 'name': 'Seat belt', 'freebase_id': '/m/0dkzw'}, - {'id': 187, 'name': 'Coffee cup', 'freebase_id': '/m/02p5f1q'}, - {'id': 188, 'name': 'Microwave oven', 'freebase_id': '/m/0fx9l'}, - {'id': 189, 'name': 'Hot dog', 'freebase_id': '/m/01b9xk'}, - {'id': 190, 'name': 'Countertop', 'freebase_id': '/m/0b3fp9'}, - {'id': 191, 'name': 'Serving tray', 'freebase_id': '/m/0h8n27j'}, - {'id': 192, 'name': 'Dog bed', 'freebase_id': '/m/0h8n6f9'}, - {'id': 193, 'name': 'Beer', 'freebase_id': '/m/01599'}, - {'id': 194, 'name': 'Sunglasses', 'freebase_id': '/m/017ftj'}, - {'id': 195, 'name': 'Golf ball', 'freebase_id': '/m/044r5d'}, - {'id': 196, 'name': 'Waffle', 'freebase_id': '/m/01dwsz'}, - {'id': 197, 'name': 'Palm tree', 'freebase_id': '/m/0cdl1'}, - {'id': 198, 'name': 'Trumpet', 'freebase_id': '/m/07gql'}, - {'id': 199, 'name': 'Ruler', 'freebase_id': '/m/0hdln'}, - {'id': 200, 'name': 'Helmet', 'freebase_id': '/m/0zvk5'}, - {'id': 201, 'name': 'Ladder', 'freebase_id': '/m/012w5l'}, - {'id': 202, 'name': 'Office building', 'freebase_id': '/m/021sj1'}, - {'id': 203, 'name': 'Tablet computer', 'freebase_id': '/m/0bh9flk'}, - {'id': 204, 'name': 'Toilet paper', 'freebase_id': '/m/09gtd'}, - {'id': 205, 'name': 'Pomegranate', 'freebase_id': '/m/0jwn_'}, - {'id': 206, 'name': 'Skirt', 'freebase_id': '/m/02wv6h6'}, - {'id': 207, 'name': 'Gas stove', 'freebase_id': '/m/02wv84t'}, - {'id': 208, 'name': 'Cookie', 'freebase_id': '/m/021mn'}, - {'id': 209, 'name': 'Cart', 'freebase_id': '/m/018p4k'}, - {'id': 210, 'name': 'Raven', 'freebase_id': '/m/06j2d'}, - {'id': 211, 'name': 'Egg', 'freebase_id': '/m/033cnk'}, - {'id': 212, 'name': 'Burrito', 'freebase_id': '/m/01j3zr'}, - {'id': 213, 'name': 'Goat', 'freebase_id': '/m/03fwl'}, - {'id': 214, 'name': 'Kitchen knife', 'freebase_id': '/m/058qzx'}, - {'id': 215, 'name': 'Skateboard', 'freebase_id': '/m/06_fw'}, - {'id': 216, 'name': 'Salt and pepper shakers', 'freebase_id': '/m/02x8cch'}, - {'id': 217, 'name': 'Lynx', 'freebase_id': '/m/04g2r'}, - {'id': 218, 'name': 'Boot', 'freebase_id': '/m/01b638'}, - {'id': 219, 'name': 'Platter', 'freebase_id': '/m/099ssp'}, - {'id': 220, 'name': 'Ski', 'freebase_id': '/m/071p9'}, - {'id': 221, 'name': 'Swimwear', 'freebase_id': '/m/01gkx_'}, - {'id': 222, 'name': 'Swimming pool', 'freebase_id': '/m/0b_rs'}, - {'id': 223, 'name': 'Drinking straw', 'freebase_id': '/m/03v5tg'}, - {'id': 224, 'name': 'Wrench', 'freebase_id': '/m/01j5ks'}, - {'id': 225, 'name': 'Drum', 'freebase_id': '/m/026t6'}, - {'id': 226, 'name': 'Ant', 'freebase_id': '/m/0_k2'}, - {'id': 227, 'name': 'Human ear', 'freebase_id': '/m/039xj_'}, - {'id': 228, 'name': 'Headphones', 'freebase_id': '/m/01b7fy'}, - {'id': 229, 'name': 'Fountain', 'freebase_id': '/m/0220r2'}, - {'id': 230, 'name': 'Bird', 'freebase_id': '/m/015p6'}, - {'id': 231, 'name': 'Jeans', 'freebase_id': '/m/0fly7'}, - {'id': 232, 'name': 'Television', 'freebase_id': '/m/07c52'}, - {'id': 233, 'name': 'Crab', 'freebase_id': '/m/0n28_'}, - {'id': 234, 'name': 'Microphone', 'freebase_id': '/m/0hg7b'}, - {'id': 235, 'name': 'Home appliance', 'freebase_id': '/m/019dx1'}, - {'id': 236, 'name': 'Snowplow', 'freebase_id': '/m/04vv5k'}, - {'id': 237, 'name': 'Beetle', 'freebase_id': '/m/020jm'}, - {'id': 238, 'name': 'Artichoke', 'freebase_id': '/m/047v4b'}, - {'id': 239, 'name': 'Jet ski', 'freebase_id': '/m/01xs3r'}, - {'id': 240, 'name': 'Stationary bicycle', 'freebase_id': '/m/03kt2w'}, - {'id': 241, 'name': 'Human hair', 'freebase_id': '/m/03q69'}, - {'id': 242, 'name': 'Brown bear', 'freebase_id': '/m/01dxs'}, - {'id': 243, 'name': 'Starfish', 'freebase_id': '/m/01h8tj'}, - {'id': 244, 'name': 'Fork', 'freebase_id': '/m/0dt3t'}, - {'id': 245, 'name': 'Lobster', 'freebase_id': '/m/0cjq5'}, - {'id': 246, 'name': 'Corded phone', 'freebase_id': '/m/0h8lkj8'}, - {'id': 247, 'name': 'Drink', 'freebase_id': '/m/0271t'}, - {'id': 248, 'name': 'Saucer', 'freebase_id': '/m/03q5c7'}, - {'id': 249, 'name': 'Carrot', 'freebase_id': '/m/0fj52s'}, - {'id': 250, 'name': 'Insect', 'freebase_id': '/m/03vt0'}, - {'id': 251, 'name': 'Clock', 'freebase_id': '/m/01x3z'}, - {'id': 252, 'name': 'Castle', 'freebase_id': '/m/0d5gx'}, - {'id': 253, 'name': 'Tennis racket', 'freebase_id': '/m/0h8my_4'}, - {'id': 254, 'name': 'Ceiling fan', 'freebase_id': '/m/03ldnb'}, - {'id': 255, 'name': 'Asparagus', 'freebase_id': '/m/0cjs7'}, - {'id': 256, 'name': 'Jaguar', 'freebase_id': '/m/0449p'}, - {'id': 257, 'name': 'Musical instrument', 'freebase_id': '/m/04szw'}, - {'id': 258, 'name': 'Train', 'freebase_id': '/m/07jdr'}, - {'id': 259, 'name': 'Cat', 'freebase_id': '/m/01yrx'}, - {'id': 260, 'name': 'Rifle', 'freebase_id': '/m/06c54'}, - {'id': 261, 'name': 'Dumbbell', 'freebase_id': '/m/04h8sr'}, - {'id': 262, 'name': 'Mobile phone', 'freebase_id': '/m/050k8'}, - {'id': 263, 'name': 'Taxi', 'freebase_id': '/m/0pg52'}, - {'id': 264, 'name': 'Shower', 'freebase_id': '/m/02f9f_'}, - {'id': 265, 'name': 'Pitcher', 'freebase_id': '/m/054fyh'}, - {'id': 266, 'name': 'Lemon', 'freebase_id': '/m/09k_b'}, - {'id': 267, 'name': 'Invertebrate', 'freebase_id': '/m/03xxp'}, - {'id': 268, 'name': 'Turkey', 'freebase_id': '/m/0jly1'}, - {'id': 269, 'name': 'High heels', 'freebase_id': '/m/06k2mb'}, - {'id': 270, 'name': 'Bust', 'freebase_id': '/m/04yqq2'}, - {'id': 271, 'name': 'Elephant', 'freebase_id': '/m/0bwd_0j'}, - {'id': 272, 'name': 'Scarf', 'freebase_id': '/m/02h19r'}, - {'id': 273, 'name': 'Barrel', 'freebase_id': '/m/02zn6n'}, - {'id': 274, 'name': 'Trombone', 'freebase_id': '/m/07c6l'}, - {'id': 275, 'name': 'Pumpkin', 'freebase_id': '/m/05zsy'}, - {'id': 276, 'name': 'Box', 'freebase_id': '/m/025dyy'}, - {'id': 277, 'name': 'Tomato', 'freebase_id': '/m/07j87'}, - {'id': 278, 'name': 'Frog', 'freebase_id': '/m/09ld4'}, - {'id': 279, 'name': 'Bidet', 'freebase_id': '/m/01vbnl'}, - {'id': 280, 'name': 'Human face', 'freebase_id': '/m/0dzct'}, - {'id': 281, 'name': 'Houseplant', 'freebase_id': '/m/03fp41'}, - {'id': 282, 'name': 'Van', 'freebase_id': '/m/0h2r6'}, - {'id': 283, 'name': 'Shark', 'freebase_id': '/m/0by6g'}, - {'id': 284, 'name': 'Ice cream', 'freebase_id': '/m/0cxn2'}, - {'id': 285, 'name': 'Swim cap', 'freebase_id': '/m/04tn4x'}, - {'id': 286, 'name': 'Falcon', 'freebase_id': '/m/0f6wt'}, - {'id': 287, 'name': 'Ostrich', 'freebase_id': '/m/05n4y'}, - {'id': 288, 'name': 'Handgun', 'freebase_id': '/m/0gxl3'}, - {'id': 289, 'name': 'Whiteboard', 'freebase_id': '/m/02d9qx'}, - {'id': 290, 'name': 'Lizard', 'freebase_id': '/m/04m9y'}, - {'id': 291, 'name': 'Pasta', 'freebase_id': '/m/05z55'}, - {'id': 292, 'name': 'Snowmobile', 'freebase_id': '/m/01x3jk'}, - {'id': 293, 'name': 'Light bulb', 'freebase_id': '/m/0h8l4fh'}, - {'id': 294, 'name': 'Window blind', 'freebase_id': '/m/031b6r'}, - {'id': 295, 'name': 'Muffin', 'freebase_id': '/m/01tcjp'}, - {'id': 296, 'name': 'Pretzel', 'freebase_id': '/m/01f91_'}, - {'id': 297, 'name': 'Computer monitor', 'freebase_id': '/m/02522'}, - {'id': 298, 'name': 'Horn', 'freebase_id': '/m/0319l'}, - {'id': 299, 'name': 'Furniture', 'freebase_id': '/m/0c_jw'}, - {'id': 300, 'name': 'Sandwich', 'freebase_id': '/m/0l515'}, - {'id': 301, 'name': 'Fox', 'freebase_id': '/m/0306r'}, - {'id': 302, 'name': 'Convenience store', 'freebase_id': '/m/0crjs'}, - {'id': 303, 'name': 'Fish', 'freebase_id': '/m/0ch_cf'}, - {'id': 304, 'name': 'Fruit', 'freebase_id': '/m/02xwb'}, - {'id': 305, 'name': 'Earrings', 'freebase_id': '/m/01r546'}, - {'id': 306, 'name': 'Curtain', 'freebase_id': '/m/03rszm'}, - {'id': 307, 'name': 'Grape', 'freebase_id': '/m/0388q'}, - {'id': 308, 'name': 'Sofa bed', 'freebase_id': '/m/03m3pdh'}, - {'id': 309, 'name': 'Horse', 'freebase_id': '/m/03k3r'}, - {'id': 310, 'name': 'Luggage and bags', 'freebase_id': '/m/0hf58v5'}, - {'id': 311, 'name': 'Desk', 'freebase_id': '/m/01y9k5'}, - {'id': 312, 'name': 'Crutch', 'freebase_id': '/m/05441v'}, - {'id': 313, 'name': 'Bicycle helmet', 'freebase_id': '/m/03p3bw'}, - {'id': 314, 'name': 'Tick', 'freebase_id': '/m/0175cv'}, - {'id': 315, 'name': 'Airplane', 'freebase_id': '/m/0cmf2'}, - {'id': 316, 'name': 'Canary', 'freebase_id': '/m/0ccs93'}, - {'id': 317, 'name': 'Spatula', 'freebase_id': '/m/02d1br'}, - {'id': 318, 'name': 'Watch', 'freebase_id': '/m/0gjkl'}, - {'id': 319, 'name': 'Lily', 'freebase_id': '/m/0jqgx'}, - {'id': 320, 'name': 'Kitchen appliance', 'freebase_id': '/m/0h99cwc'}, - {'id': 321, 'name': 'Filing cabinet', 'freebase_id': '/m/047j0r'}, - {'id': 322, 'name': 'Aircraft', 'freebase_id': '/m/0k5j'}, - {'id': 323, 'name': 'Cake stand', 'freebase_id': '/m/0h8n6ft'}, - {'id': 324, 'name': 'Candy', 'freebase_id': '/m/0gm28'}, - {'id': 325, 'name': 'Sink', 'freebase_id': '/m/0130jx'}, - {'id': 326, 'name': 'Mouse', 'freebase_id': '/m/04rmv'}, - {'id': 327, 'name': 'Wine', 'freebase_id': '/m/081qc'}, - {'id': 328, 'name': 'Wheelchair', 'freebase_id': '/m/0qmmr'}, - {'id': 329, 'name': 'Goldfish', 'freebase_id': '/m/03fj2'}, - {'id': 330, 'name': 'Refrigerator', 'freebase_id': '/m/040b_t'}, - {'id': 331, 'name': 'French fries', 'freebase_id': '/m/02y6n'}, - {'id': 332, 'name': 'Drawer', 'freebase_id': '/m/0fqfqc'}, - {'id': 333, 'name': 'Treadmill', 'freebase_id': '/m/030610'}, - {'id': 334, 'name': 'Picnic basket', 'freebase_id': '/m/07kng9'}, - {'id': 335, 'name': 'Dice', 'freebase_id': '/m/029b3'}, - {'id': 336, 'name': 'Cabbage', 'freebase_id': '/m/0fbw6'}, - {'id': 337, 'name': 'Football helmet', 'freebase_id': '/m/07qxg_'}, - {'id': 338, 'name': 'Pig', 'freebase_id': '/m/068zj'}, - {'id': 339, 'name': 'Person', 'freebase_id': '/m/01g317'}, - {'id': 340, 'name': 'Shorts', 'freebase_id': '/m/01bfm9'}, - {'id': 341, 'name': 'Gondola', 'freebase_id': '/m/02068x'}, - {'id': 342, 'name': 'Honeycomb', 'freebase_id': '/m/0fz0h'}, - {'id': 343, 'name': 'Doughnut', 'freebase_id': '/m/0jy4k'}, - {'id': 344, 'name': 'Chest of drawers', 'freebase_id': '/m/05kyg_'}, - {'id': 345, 'name': 'Land vehicle', 'freebase_id': '/m/01prls'}, - {'id': 346, 'name': 'Bat', 'freebase_id': '/m/01h44'}, - {'id': 347, 'name': 'Monkey', 'freebase_id': '/m/08pbxl'}, - {'id': 348, 'name': 'Dagger', 'freebase_id': '/m/02gzp'}, - {'id': 349, 'name': 'Tableware', 'freebase_id': '/m/04brg2'}, - {'id': 350, 'name': 'Human foot', 'freebase_id': '/m/031n1'}, - {'id': 351, 'name': 'Mug', 'freebase_id': '/m/02jvh9'}, - {'id': 352, 'name': 'Alarm clock', 'freebase_id': '/m/046dlr'}, - {'id': 353, 'name': 'Pressure cooker', 'freebase_id': '/m/0h8ntjv'}, - {'id': 354, 'name': 'Human hand', 'freebase_id': '/m/0k65p'}, - {'id': 355, 'name': 'Tortoise', 'freebase_id': '/m/011k07'}, - {'id': 356, 'name': 'Baseball glove', 'freebase_id': '/m/03grzl'}, - {'id': 357, 'name': 'Sword', 'freebase_id': '/m/06y5r'}, - {'id': 358, 'name': 'Pear', 'freebase_id': '/m/061_f'}, - {'id': 359, 'name': 'Miniskirt', 'freebase_id': '/m/01cmb2'}, - {'id': 360, 'name': 'Traffic sign', 'freebase_id': '/m/01mqdt'}, - {'id': 361, 'name': 'Girl', 'freebase_id': '/m/05r655'}, - {'id': 362, 'name': 'Roller skates', 'freebase_id': '/m/02p3w7d'}, - {'id': 363, 'name': 'Dinosaur', 'freebase_id': '/m/029tx'}, - {'id': 364, 'name': 'Porch', 'freebase_id': '/m/04m6gz'}, - {'id': 365, 'name': 'Human beard', 'freebase_id': '/m/015h_t'}, - {'id': 366, 'name': 'Submarine sandwich', 'freebase_id': '/m/06pcq'}, - {'id': 367, 'name': 'Screwdriver', 'freebase_id': '/m/01bms0'}, - {'id': 368, 'name': 'Strawberry', 'freebase_id': '/m/07fbm7'}, - {'id': 369, 'name': 'Wine glass', 'freebase_id': '/m/09tvcd'}, - {'id': 370, 'name': 'Seafood', 'freebase_id': '/m/06nwz'}, - {'id': 371, 'name': 'Racket', 'freebase_id': '/m/0dv9c'}, - {'id': 372, 'name': 'Wheel', 'freebase_id': '/m/083wq'}, - {'id': 373, 'name': 'Sea lion', 'freebase_id': '/m/0gd36'}, - {'id': 374, 'name': 'Toy', 'freebase_id': '/m/0138tl'}, - {'id': 375, 'name': 'Tea', 'freebase_id': '/m/07clx'}, - {'id': 376, 'name': 'Tennis ball', 'freebase_id': '/m/05ctyq'}, - {'id': 377, 'name': 'Waste container', 'freebase_id': '/m/0bjyj5'}, - {'id': 378, 'name': 'Mule', 'freebase_id': '/m/0dbzx'}, - {'id': 379, 'name': 'Cricket ball', 'freebase_id': '/m/02ctlc'}, - {'id': 380, 'name': 'Pineapple', 'freebase_id': '/m/0fp6w'}, - {'id': 381, 'name': 'Coconut', 'freebase_id': '/m/0djtd'}, - {'id': 382, 'name': 'Doll', 'freebase_id': '/m/0167gd'}, - {'id': 383, 'name': 'Coffee table', 'freebase_id': '/m/078n6m'}, - {'id': 384, 'name': 'Snowman', 'freebase_id': '/m/0152hh'}, - {'id': 385, 'name': 'Lavender', 'freebase_id': '/m/04gth'}, - {'id': 386, 'name': 'Shrimp', 'freebase_id': '/m/0ll1f78'}, - {'id': 387, 'name': 'Maple', 'freebase_id': '/m/0cffdh'}, - {'id': 388, 'name': 'Cowboy hat', 'freebase_id': '/m/025rp__'}, - {'id': 389, 'name': 'Goggles', 'freebase_id': '/m/02_n6y'}, - {'id': 390, 'name': 'Rugby ball', 'freebase_id': '/m/0wdt60w'}, - {'id': 391, 'name': 'Caterpillar', 'freebase_id': '/m/0cydv'}, - {'id': 392, 'name': 'Poster', 'freebase_id': '/m/01n5jq'}, - {'id': 393, 'name': 'Rocket', 'freebase_id': '/m/09rvcxw'}, - {'id': 394, 'name': 'Organ', 'freebase_id': '/m/013y1f'}, - {'id': 395, 'name': 'Saxophone', 'freebase_id': '/m/06ncr'}, - {'id': 396, 'name': 'Traffic light', 'freebase_id': '/m/015qff'}, - {'id': 397, 'name': 'Cocktail', 'freebase_id': '/m/024g6'}, - {'id': 398, 'name': 'Plastic bag', 'freebase_id': '/m/05gqfk'}, - {'id': 399, 'name': 'Squash', 'freebase_id': '/m/0dv77'}, - {'id': 400, 'name': 'Mushroom', 'freebase_id': '/m/052sf'}, - {'id': 401, 'name': 'Hamburger', 'freebase_id': '/m/0cdn1'}, - {'id': 402, 'name': 'Light switch', 'freebase_id': '/m/03jbxj'}, - {'id': 403, 'name': 'Parachute', 'freebase_id': '/m/0cyfs'}, - {'id': 404, 'name': 'Teddy bear', 'freebase_id': '/m/0kmg4'}, - {'id': 405, 'name': 'Winter melon', 'freebase_id': '/m/02cvgx'}, - {'id': 406, 'name': 'Deer', 'freebase_id': '/m/09kx5'}, - {'id': 407, 'name': 'Musical keyboard', 'freebase_id': '/m/057cc'}, - {'id': 408, 'name': 'Plumbing fixture', 'freebase_id': '/m/02pkr5'}, - {'id': 409, 'name': 'Scoreboard', 'freebase_id': '/m/057p5t'}, - {'id': 410, 'name': 'Baseball bat', 'freebase_id': '/m/03g8mr'}, - {'id': 411, 'name': 'Envelope', 'freebase_id': '/m/0frqm'}, - {'id': 412, 'name': 'Adhesive tape', 'freebase_id': '/m/03m3vtv'}, - {'id': 413, 'name': 'Briefcase', 'freebase_id': '/m/0584n8'}, - {'id': 414, 'name': 'Paddle', 'freebase_id': '/m/014y4n'}, - {'id': 415, 'name': 'Bow and arrow', 'freebase_id': '/m/01g3x7'}, - {'id': 416, 'name': 'Telephone', 'freebase_id': '/m/07cx4'}, - {'id': 417, 'name': 'Sheep', 'freebase_id': '/m/07bgp'}, - {'id': 418, 'name': 'Jacket', 'freebase_id': '/m/032b3c'}, - {'id': 419, 'name': 'Boy', 'freebase_id': '/m/01bl7v'}, - {'id': 420, 'name': 'Pizza', 'freebase_id': '/m/0663v'}, - {'id': 421, 'name': 'Otter', 'freebase_id': '/m/0cn6p'}, - {'id': 422, 'name': 'Office supplies', 'freebase_id': '/m/02rdsp'}, - {'id': 423, 'name': 'Couch', 'freebase_id': '/m/02crq1'}, - {'id': 424, 'name': 'Cello', 'freebase_id': '/m/01xqw'}, - {'id': 425, 'name': 'Bull', 'freebase_id': '/m/0cnyhnx'}, - {'id': 426, 'name': 'Camel', 'freebase_id': '/m/01x_v'}, - {'id': 427, 'name': 'Ball', 'freebase_id': '/m/018xm'}, - {'id': 428, 'name': 'Duck', 'freebase_id': '/m/09ddx'}, - {'id': 429, 'name': 'Whale', 'freebase_id': '/m/084zz'}, - {'id': 430, 'name': 'Shirt', 'freebase_id': '/m/01n4qj'}, - {'id': 431, 'name': 'Tank', 'freebase_id': '/m/07cmd'}, - {'id': 432, 'name': 'Motorcycle', 'freebase_id': '/m/04_sv'}, - {'id': 433, 'name': 'Accordion', 'freebase_id': '/m/0mkg'}, - {'id': 434, 'name': 'Owl', 'freebase_id': '/m/09d5_'}, - {'id': 435, 'name': 'Porcupine', 'freebase_id': '/m/0c568'}, - {'id': 436, 'name': 'Sun hat', 'freebase_id': '/m/02wbtzl'}, - {'id': 437, 'name': 'Nail', 'freebase_id': '/m/05bm6'}, - {'id': 438, 'name': 'Scissors', 'freebase_id': '/m/01lsmm'}, - {'id': 439, 'name': 'Swan', 'freebase_id': '/m/0dftk'}, - {'id': 440, 'name': 'Lamp', 'freebase_id': '/m/0dtln'}, - {'id': 441, 'name': 'Crown', 'freebase_id': '/m/0nl46'}, - {'id': 442, 'name': 'Piano', 'freebase_id': '/m/05r5c'}, - {'id': 443, 'name': 'Sculpture', 'freebase_id': '/m/06msq'}, - {'id': 444, 'name': 'Cheetah', 'freebase_id': '/m/0cd4d'}, - {'id': 445, 'name': 'Oboe', 'freebase_id': '/m/05kms'}, - {'id': 446, 'name': 'Tin can', 'freebase_id': '/m/02jnhm'}, - {'id': 447, 'name': 'Mango', 'freebase_id': '/m/0fldg'}, - {'id': 448, 'name': 'Tripod', 'freebase_id': '/m/073bxn'}, - {'id': 449, 'name': 'Oven', 'freebase_id': '/m/029bxz'}, - {'id': 450, 'name': 'Mouse', 'freebase_id': '/m/020lf'}, - {'id': 451, 'name': 'Barge', 'freebase_id': '/m/01btn'}, - {'id': 452, 'name': 'Coffee', 'freebase_id': '/m/02vqfm'}, - {'id': 453, 'name': 'Snowboard', 'freebase_id': '/m/06__v'}, - {'id': 454, 'name': 'Common fig', 'freebase_id': '/m/043nyj'}, - {'id': 455, 'name': 'Salad', 'freebase_id': '/m/0grw1'}, - {'id': 456, 'name': 'Marine invertebrates', 'freebase_id': '/m/03hl4l9'}, - {'id': 457, 'name': 'Umbrella', 'freebase_id': '/m/0hnnb'}, - {'id': 458, 'name': 'Kangaroo', 'freebase_id': '/m/04c0y'}, - {'id': 459, 'name': 'Human arm', 'freebase_id': '/m/0dzf4'}, - {'id': 460, 'name': 'Measuring cup', 'freebase_id': '/m/07v9_z'}, - {'id': 461, 'name': 'Snail', 'freebase_id': '/m/0f9_l'}, - {'id': 462, 'name': 'Loveseat', 'freebase_id': '/m/0703r8'}, - {'id': 463, 'name': 'Suit', 'freebase_id': '/m/01xyhv'}, - {'id': 464, 'name': 'Teapot', 'freebase_id': '/m/01fh4r'}, - {'id': 465, 'name': 'Bottle', 'freebase_id': '/m/04dr76w'}, - {'id': 466, 'name': 'Alpaca', 'freebase_id': '/m/0pcr'}, - {'id': 467, 'name': 'Kettle', 'freebase_id': '/m/03s_tn'}, - {'id': 468, 'name': 'Trousers', 'freebase_id': '/m/07mhn'}, - {'id': 469, 'name': 'Popcorn', 'freebase_id': '/m/01hrv5'}, - {'id': 470, 'name': 'Centipede', 'freebase_id': '/m/019h78'}, - {'id': 471, 'name': 'Spider', 'freebase_id': '/m/09kmb'}, - {'id': 472, 'name': 'Sparrow', 'freebase_id': '/m/0h23m'}, - {'id': 473, 'name': 'Plate', 'freebase_id': '/m/050gv4'}, - {'id': 474, 'name': 'Bagel', 'freebase_id': '/m/01fb_0'}, - {'id': 475, 'name': 'Personal care', 'freebase_id': '/m/02w3_ws'}, - {'id': 476, 'name': 'Apple', 'freebase_id': '/m/014j1m'}, - {'id': 477, 'name': 'Brassiere', 'freebase_id': '/m/01gmv2'}, - {'id': 478, 'name': 'Bathroom cabinet', 'freebase_id': '/m/04y4h8h'}, - {'id': 479, 'name': 'studio couch', 'freebase_id': '/m/026qbn5'}, - {'id': 480, 'name': 'Computer keyboard', 'freebase_id': '/m/01m2v'}, - {'id': 481, 'name': 'Table tennis racket', 'freebase_id': '/m/05_5p_0'}, - {'id': 482, 'name': 'Sushi', 'freebase_id': '/m/07030'}, - {'id': 483, 'name': 'Cabinetry', 'freebase_id': '/m/01s105'}, - {'id': 484, 'name': 'Street light', 'freebase_id': '/m/033rq4'}, - {'id': 485, 'name': 'Towel', 'freebase_id': '/m/0162_1'}, - {'id': 486, 'name': 'Nightstand', 'freebase_id': '/m/02z51p'}, - {'id': 487, 'name': 'Rabbit', 'freebase_id': '/m/06mf6'}, - {'id': 488, 'name': 'Dolphin', 'freebase_id': '/m/02hj4'}, - {'id': 489, 'name': 'Dog', 'freebase_id': '/m/0bt9lr'}, - {'id': 490, 'name': 'Jug', 'freebase_id': '/m/08hvt4'}, - {'id': 491, 'name': 'Wok', 'freebase_id': '/m/084rd'}, - {'id': 492, 'name': 'Fire hydrant', 'freebase_id': '/m/01pns0'}, - {'id': 493, 'name': 'Human eye', 'freebase_id': '/m/014sv8'}, - {'id': 494, 'name': 'Skyscraper', 'freebase_id': '/m/079cl'}, - {'id': 495, 'name': 'Backpack', 'freebase_id': '/m/01940j'}, - {'id': 496, 'name': 'Potato', 'freebase_id': '/m/05vtc'}, - {'id': 497, 'name': 'Paper towel', 'freebase_id': '/m/02w3r3'}, - {'id': 498, 'name': 'Lifejacket', 'freebase_id': '/m/054xkw'}, - {'id': 499, 'name': 'Bicycle wheel', 'freebase_id': '/m/01bqk0'}, - {'id': 500, 'name': 'Toilet', 'freebase_id': '/m/09g1w'}, -] - - -def _get_builtin_metadata(cats): - id_to_name = {x['id']: x['name'] for x in cats} - thing_dataset_id_to_contiguous_id = {i + 1: i for i in range(len(cats))} - thing_classes = [x['name'] for x in sorted(cats, key=lambda x: x['id'])] - return { - "thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id, - "thing_classes": thing_classes} - -_PREDEFINED_SPLITS_OID = { - # cat threshold: 500, 1500: r 170, c 151, f 179 - "oid_train": ("oid/images/", "oid/annotations/oid_challenge_2019_train_bbox.json"), - # "expanded" duplicates annotations to their father classes based on the official - # hierarchy. This is used in the official evaulation protocol. - # https://storage.googleapis.com/openimages/web/evaluation.html - "oid_val_expanded": ("oid/images/validation/", "oid/annotations/oid_challenge_2019_val_expanded.json"), - "oid_val_expanded_rare": ("oid/images/validation/", "oid/annotations/oid_challenge_2019_val_expanded_rare.json"), -} - - -for key, (image_root, json_file) in _PREDEFINED_SPLITS_OID.items(): - register_oid_instances( - key, - _get_builtin_metadata(categories), - os.path.join("datasets", json_file) if "://" not in json_file else json_file, - os.path.join("datasets", image_root), - ) \ No newline at end of file diff --git a/spaces/DragGan/DragGan/stylegan_human/training/__init__.py b/spaces/DragGan/DragGan/stylegan_human/training/__init__.py deleted file mode 100644 index 939e7c6c8f94c4ea1141885c3c3295fe083b06aa..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan/stylegan_human/training/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -# empty diff --git a/spaces/EPFL-VILAB/MultiMAE/dpt/__init__.py b/spaces/EPFL-VILAB/MultiMAE/dpt/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/EronSamez/RVC_HFmeu/tools/dlmodels.bat b/spaces/EronSamez/RVC_HFmeu/tools/dlmodels.bat deleted file mode 100644 index 5d80f50369b1f3ed37c045d07a9e2ce8954f09d4..0000000000000000000000000000000000000000 --- a/spaces/EronSamez/RVC_HFmeu/tools/dlmodels.bat +++ /dev/null @@ -1,348 +0,0 @@ -@echo off && chcp 65001 - -echo working dir is %cd% -echo downloading requirement aria2 check. -echo= -dir /a:d/b | findstr "aria2" > flag.txt -findstr "aria2" flag.txt >nul -if %errorlevel% ==0 ( - echo aria2 checked. - echo= -) else ( - echo failed. please downloading aria2 from webpage! - echo unzip it and put in this directory! - timeout /T 5 - start https://github.com/aria2/aria2/releases/tag/release-1.36.0 - echo= - goto end -) - -echo envfiles checking start. -echo= - -for /f %%x in ('findstr /i /c:"aria2" "flag.txt"') do (set aria2=%%x)&goto endSch -:endSch - -set d32=f0D32k.pth -set d40=f0D40k.pth -set d48=f0D48k.pth -set g32=f0G32k.pth -set g40=f0G40k.pth -set g48=f0G48k.pth - -set d40v2=f0D40k.pth -set g40v2=f0G40k.pth - -set dld32=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D32k.pth -set dld40=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D40k.pth -set dld48=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D48k.pth -set dlg32=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G32k.pth -set dlg40=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G40k.pth -set dlg48=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G48k.pth - -set dld40v2=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D40k.pth -set dlg40v2=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G40k.pth - -set hp2_all=HP2_all_vocals.pth -set hp3_all=HP3_all_vocals.pth -set hp5_only=HP5_only_main_vocal.pth -set VR_DeEchoAggressive=VR-DeEchoAggressive.pth -set VR_DeEchoDeReverb=VR-DeEchoDeReverb.pth -set VR_DeEchoNormal=VR-DeEchoNormal.pth -set onnx_dereverb=vocals.onnx - -set dlhp2_all=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP2_all_vocals.pth -set dlhp3_all=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP3_all_vocals.pth -set dlhp5_only=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5_only_main_vocal.pth -set dlVR_DeEchoAggressive=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoAggressive.pth -set dlVR_DeEchoDeReverb=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoDeReverb.pth -set dlVR_DeEchoNormal=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoNormal.pth -set dlonnx_dereverb=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/onnx_dereverb_By_FoxJoy/vocals.onnx - -set hb=hubert_base.pt - -set dlhb=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt - -echo dir check start. -echo= - -if exist "%~dp0assets\pretrained" ( - echo dir .\assets\pretrained checked. - ) else ( - echo failed. generating dir .\assets\pretrained. - mkdir pretrained - ) -if exist "%~dp0assets\pretrained_v2" ( - echo dir .\assets\pretrained_v2 checked. - ) else ( - echo failed. generating dir .\assets\pretrained_v2. - mkdir pretrained_v2 - ) -if exist "%~dp0assets\uvr5_weights" ( - echo dir .\assets\uvr5_weights checked. - ) else ( - echo failed. generating dir .\assets\uvr5_weights. - mkdir uvr5_weights - ) -if exist "%~dp0assets\uvr5_weights\onnx_dereverb_By_FoxJoy" ( - echo dir .\assets\uvr5_weights\onnx_dereverb_By_FoxJoy checked. - ) else ( - echo failed. generating dir .\assets\uvr5_weights\onnx_dereverb_By_FoxJoy. - mkdir uvr5_weights\onnx_dereverb_By_FoxJoy - ) - -echo= -echo dir check finished. - -echo= -echo required files check start. - -echo checking D32k.pth -if exist "%~dp0assets\pretrained\D32k.pth" ( - echo D32k.pth in .\assets\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D32k.pth -d %~dp0assets\pretrained -o D32k.pth - if exist "%~dp0assets\pretrained\D32k.pth" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking D40k.pth -if exist "%~dp0assets\pretrained\D40k.pth" ( - echo D40k.pth in .\assets\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D40k.pth -d %~dp0assets\pretrained -o D40k.pth - if exist "%~dp0assets\pretrained\D40k.pth" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking D40k.pth -if exist "%~dp0assets\pretrained_v2\D40k.pth" ( - echo D40k.pth in .\assets\pretrained_v2 checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D40k.pth -d %~dp0assets\pretrained_v2 -o D40k.pth - if exist "%~dp0assets\pretrained_v2\D40k.pth" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking D48k.pth -if exist "%~dp0assets\pretrained\D48k.pth" ( - echo D48k.pth in .\assets\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D48k.pth -d %~dp0assets\pretrained -o D48k.pth - if exist "%~dp0assets\pretrained\D48k.pth" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking G32k.pth -if exist "%~dp0assets\pretrained\G32k.pth" ( - echo G32k.pth in .\assets\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G32k.pth -d %~dp0assets\pretrained -o G32k.pth - if exist "%~dp0assets\pretrained\G32k.pth" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking G40k.pth -if exist "%~dp0assets\pretrained\G40k.pth" ( - echo G40k.pth in .\assets\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G40k.pth -d %~dp0assets\pretrained -o G40k.pth - if exist "%~dp0assets\pretrained\G40k.pth" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking G40k.pth -if exist "%~dp0assets\pretrained_v2\G40k.pth" ( - echo G40k.pth in .\assets\pretrained_v2 checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G40k.pth -d %~dp0assets\pretrained_v2 -o G40k.pth - if exist "%~dp0assets\pretrained_v2\G40k.pth" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking G48k.pth -if exist "%~dp0assets\pretrained\G48k.pth" ( - echo G48k.pth in .\assets\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G48k.pth -d %~dp0assets\pretrained -o G48k.pth - if exist "%~dp0assets\pretrained\G48k.pth" (echo download successful.) else (echo please try again! - echo=) - ) - -echo checking %d32% -if exist "%~dp0assets\pretrained\%d32%" ( - echo %d32% in .\assets\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dld32% -d %~dp0assets\pretrained -o %d32% - if exist "%~dp0assets\pretrained\%d32%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %d40% -if exist "%~dp0assets\pretrained\%d40%" ( - echo %d40% in .\assets\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dld40% -d %~dp0assets\pretrained -o %d40% - if exist "%~dp0assets\pretrained\%d40%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %d40v2% -if exist "%~dp0assets\pretrained_v2\%d40v2%" ( - echo %d40v2% in .\assets\pretrained_v2 checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dld40v2% -d %~dp0assets\pretrained_v2 -o %d40v2% - if exist "%~dp0assets\pretrained_v2\%d40v2%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %d48% -if exist "%~dp0assets\pretrained\%d48%" ( - echo %d48% in .\assets\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dld48% -d %~dp0assets\pretrained -o %d48% - if exist "%~dp0assets\pretrained\%d48%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %g32% -if exist "%~dp0assets\pretrained\%g32%" ( - echo %g32% in .\assets\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlg32% -d %~dp0assets\pretrained -o %g32% - if exist "%~dp0assets\pretrained\%g32%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %g40% -if exist "%~dp0assets\pretrained\%g40%" ( - echo %g40% in .\assets\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlg40% -d %~dp0assets\pretrained -o %g40% - if exist "%~dp0assets\pretrained\%g40%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %g40v2% -if exist "%~dp0assets\pretrained_v2\%g40v2%" ( - echo %g40v2% in .\assets\pretrained_v2 checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlg40v2% -d %~dp0assets\pretrained_v2 -o %g40v2% - if exist "%~dp0assets\pretrained_v2\%g40v2%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %g48% -if exist "%~dp0assets\pretrained\%g48%" ( - echo %g48% in .\assets\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlg48% -d %~dp0assets\pretrained -o %g48% - if exist "%~dp0assets\pretrained\%g48%" (echo download successful.) else (echo please try again! - echo=) - ) - -echo checking %hp2_all% -if exist "%~dp0assets\uvr5_weights\%hp2_all%" ( - echo %hp2_all% in .\assets\uvr5_weights checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlhp2_all% -d %~dp0assets\uvr5_weights -o %hp2_all% - if exist "%~dp0assets\uvr5_weights\%hp2_all%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %hp3_all% -if exist "%~dp0assets\uvr5_weights\%hp3_all%" ( - echo %hp3_all% in .\assets\uvr5_weights checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlhp3_all% -d %~dp0assets\uvr5_weights -o %hp3_all% - if exist "%~dp0assets\uvr5_weights\%hp3_all%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %hp5_only% -if exist "%~dp0assets\uvr5_weights\%hp5_only%" ( - echo %hp5_only% in .\assets\uvr5_weights checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlhp5_only% -d %~dp0assets\uvr5_weights -o %hp5_only% - if exist "%~dp0assets\uvr5_weights\%hp5_only%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %VR_DeEchoAggressive% -if exist "%~dp0assets\uvr5_weights\%VR_DeEchoAggressive%" ( - echo %VR_DeEchoAggressive% in .\assets\uvr5_weights checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlVR_DeEchoAggressive% -d %~dp0assets\uvr5_weights -o %VR_DeEchoAggressive% - if exist "%~dp0assets\uvr5_weights\%VR_DeEchoAggressive%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %VR_DeEchoDeReverb% -if exist "%~dp0assets\uvr5_weights\%VR_DeEchoDeReverb%" ( - echo %VR_DeEchoDeReverb% in .\assets\uvr5_weights checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlVR_DeEchoDeReverb% -d %~dp0assets\uvr5_weights -o %VR_DeEchoDeReverb% - if exist "%~dp0assets\uvr5_weights\%VR_DeEchoDeReverb%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %VR_DeEchoNormal% -if exist "%~dp0assets\uvr5_weights\%VR_DeEchoNormal%" ( - echo %VR_DeEchoNormal% in .\assets\uvr5_weights checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlVR_DeEchoNormal% -d %~dp0assets\uvr5_weights -o %VR_DeEchoNormal% - if exist "%~dp0assets\uvr5_weights\%VR_DeEchoNormal%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %onnx_dereverb% -if exist "%~dp0assets\uvr5_weights\onnx_dereverb_By_FoxJoy\%onnx_dereverb%" ( - echo %onnx_dereverb% in .\assets\uvr5_weights\onnx_dereverb_By_FoxJoy checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlonnx_dereverb% -d %~dp0assets\uvr5_weights\onnx_dereverb_By_FoxJoy -o %onnx_dereverb% - if exist "%~dp0assets\uvr5_weights\onnx_dereverb_By_FoxJoy\%onnx_dereverb%" (echo download successful.) else (echo please try again! - echo=) - ) - -echo checking %hb% -if exist "%~dp0assets\hubert\%hb%" ( - echo %hb% in .\assets\hubert\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlhb% -d %~dp0assets\hubert\ -o %hb% - if exist "%~dp0assets\hubert\%hb%" (echo download successful.) else (echo please try again! - echo=) - ) - -echo required files check finished. -echo envfiles check complete. -pause -:end -del flag.txt diff --git a/spaces/GXSA/bingo/cloudflare/worker.js b/spaces/GXSA/bingo/cloudflare/worker.js deleted file mode 100644 index e0debd750615f1329b2c72fbce73e1b9291f7137..0000000000000000000000000000000000000000 --- a/spaces/GXSA/bingo/cloudflare/worker.js +++ /dev/null @@ -1,18 +0,0 @@ -const TRAGET_HOST='hf4all-bingo.hf.space' // 请将此域名改成你自己的,域名信息在设置》站点域名查看。 - -export default { - async fetch(request) { - const uri = new URL(request.url); - if (uri.protocol === 'http:') { - uri.protocol = 'https:'; - return new Response('', { - status: 301, - headers: { - location: uri.toString(), - }, - }) - } - uri.host = TRAGET_HOST - return fetch(new Request(uri.toString(), request)); - }, -}; diff --git a/spaces/GXSA/bingo/src/lib/isomorphic/browser.ts b/spaces/GXSA/bingo/src/lib/isomorphic/browser.ts deleted file mode 100644 index de125b1f1786d1618cb1ff47f403d76c6784f4ce..0000000000000000000000000000000000000000 --- a/spaces/GXSA/bingo/src/lib/isomorphic/browser.ts +++ /dev/null @@ -1,11 +0,0 @@ -'use client' - -const debug = console.info.bind(console) - -class WebSocketAlias extends WebSocket { - constructor(address: string | URL, ...args: any) { - super(address) - } -} - -export default { fetch, WebSocket: WebSocketAlias, debug } diff --git a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/stack_three_layer_red_wall.py b/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/stack_three_layer_red_wall.py deleted file mode 100644 index 023aa4cf13f1b9715dc7db303b81b70f691f1abe..0000000000000000000000000000000000000000 --- a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/stack_three_layer_red_wall.py +++ /dev/null @@ -1,42 +0,0 @@ -import numpy as np -from cliport.tasks.task import Task -from cliport.utils import utils - -class StackThreeLayerRedWall(Task): - """Build a wall by stacking blocks. The wall should consist of three layers with each layer having three red blocks aligned in a straight line.""" - - def __init__(self): - super().__init__() - self.max_steps = 15 - self.lang_template = "stack the red blocks to form a three-layer wall" - self.task_completed_desc = "done stacking blocks." - self.additional_reset() - - def reset(self, env): - super().reset(env) - - # Add blocks. - block_size = (0.05, 0.05, 0.03) # x, y, z dimensions for the block size - block_urdf = 'block/block_for_anchors.urdf' # URDF for the block - block_color = utils.COLORS['red'] # Color for the block - - # We need 9 blocks for a three-layer wall with each layer having three blocks. - blocks = [] - for _ in range(9): - block_pose = self.get_random_pose(env, block_size) - block_id = env.add_object(block_urdf, block_pose, color=block_color) - blocks.append(block_id) - - # Define target poses for the blocks to form a three-layer wall. - # The target poses are defined relative to a base pose. - base_pose = ((0.5, 0.0, 0.0), (0, 0, 0, 1)) - target_poses = [] - for i in range(3): # three layers - for j in range(3): # three blocks per layer - target_pos = (j * block_size[0], 0, i * block_size[2]) - target_pose = (utils.apply(base_pose, target_pos), (0, 0, 0, 1)) - target_poses.append(target_pose) - - # Goal: all blocks are stacked to form a three-layer wall. - self.add_goal(objs=blocks[3*i:3*(i+1)], matches=np.ones((3, 3)), targ_poses=target_poses[3*i:3*(i+1)], replace=False, - rotations=True, metric='pose', params=None, step_max_reward=1 / 3., language_goal=self.lang_template) diff --git a/spaces/Godrose0728/sound-link/mel_processing.py b/spaces/Godrose0728/sound-link/mel_processing.py deleted file mode 100644 index 3e252e76320522a8a4195a60665168f22769aec2..0000000000000000000000000000000000000000 --- a/spaces/Godrose0728/sound-link/mel_processing.py +++ /dev/null @@ -1,101 +0,0 @@ -import torch -import torch.utils.data -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - """ - PARAMS - ------ - C: compression factor - """ - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - """ - PARAMS - ------ - C: compression factor used to compress - """ - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - return spec - - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - global mel_basis - dtype_device = str(spec.dtype) + '_' + str(spec.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - return spec - - -def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global mel_basis, hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - - return spec diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco.py deleted file mode 100644 index 18daadd6a9d3024f30157aea1f1cef3e13326b5a..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco.py +++ /dev/null @@ -1,13 +0,0 @@ -_base_ = './ga_retinanet_r50_fpn_1x_coco.py' -model = dict( - pretrained='open-mmlab://resnext101_32x4d', - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch')) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/scnet/scnet_r50_fpn_1x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/scnet/scnet_r50_fpn_1x_coco.py deleted file mode 100644 index e4215a6d2d0b90f8ccd9c1291f6ca222c0ff554f..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/scnet/scnet_r50_fpn_1x_coco.py +++ /dev/null @@ -1,136 +0,0 @@ -_base_ = '../htc/htc_r50_fpn_1x_coco.py' -# model settings -model = dict( - type='SCNet', - roi_head=dict( - _delete_=True, - type='SCNetRoIHead', - num_stages=3, - stage_loss_weights=[1, 0.5, 0.25], - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=[ - dict( - type='SCNetBBoxHead', - num_shared_fcs=2, - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, - loss_weight=1.0)), - dict( - type='SCNetBBoxHead', - num_shared_fcs=2, - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.05, 0.05, 0.1, 0.1]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, - loss_weight=1.0)), - dict( - type='SCNetBBoxHead', - num_shared_fcs=2, - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.033, 0.033, 0.067, 0.067]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) - ], - mask_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - mask_head=dict( - type='SCNetMaskHead', - num_convs=12, - in_channels=256, - conv_out_channels=256, - num_classes=80, - conv_to_res=True, - loss_mask=dict( - type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)), - semantic_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), - out_channels=256, - featmap_strides=[8]), - semantic_head=dict( - type='SCNetSemanticHead', - num_ins=5, - fusion_level=1, - num_convs=4, - in_channels=256, - conv_out_channels=256, - num_classes=183, - ignore_label=255, - loss_weight=0.2, - conv_to_res=True), - glbctx_head=dict( - type='GlobalContextHead', - num_convs=4, - in_channels=256, - conv_out_channels=256, - num_classes=80, - loss_weight=3.0, - conv_to_res=True), - feat_relay_head=dict( - type='FeatureRelayHead', - in_channels=1024, - out_conv_channels=256, - roi_feat_size=7, - scale_factor=2))) - -# uncomment below code to enable test time augmentations -# img_norm_cfg = dict( -# mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -# test_pipeline = [ -# dict(type='LoadImageFromFile'), -# dict( -# type='MultiScaleFlipAug', -# img_scale=[(600, 900), (800, 1200), (1000, 1500), (1200, 1800), -# (1400, 2100)], -# flip=True, -# transforms=[ -# dict(type='Resize', keep_ratio=True), -# dict(type='RandomFlip', flip_ratio=0.5), -# dict(type='Normalize', **img_norm_cfg), -# dict(type='Pad', size_divisor=32), -# dict(type='ImageToTensor', keys=['img']), -# dict(type='Collect', keys=['img']), -# ]) -# ] -# data = dict( -# val=dict(pipeline=test_pipeline), -# test=dict(pipeline=test_pipeline)) diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/optim/lr_scheduler/fixed_schedule.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/optim/lr_scheduler/fixed_schedule.py deleted file mode 100644 index d0e7e14b7e72b1151f7d7f19094430bbab64f8f0..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/optim/lr_scheduler/fixed_schedule.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from dataclasses import dataclass, field -from typing import Optional, List -from omegaconf import II - -from fairseq.dataclass import FairseqDataclass -from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler - - -@dataclass -class FixedLRScheduleConfig(FairseqDataclass): - force_anneal: Optional[int] = field( - default=None, - metadata={"help": "force annealing at specified epoch"}, - ) - lr_shrink: float = field( - default=0.1, - metadata={"help": "shrink factor for annealing, lr_new = (lr * lr_shrink)"}, - ) - warmup_updates: int = field( - default=0, - metadata={"help": "warmup the learning rate linearly for the first N updates"}, - ) - lr: List[float] = II("optimization.lr") - - -@register_lr_scheduler("fixed", dataclass=FixedLRScheduleConfig) -class FixedLRSchedule(FairseqLRScheduler): - """Decay the LR on a fixed schedule.""" - - def __init__(self, cfg: FixedLRScheduleConfig, optimizer): - super().__init__(cfg, optimizer) - - self.lr = cfg.lr[0] - if cfg.warmup_updates > 0: - self.warmup_factor = 1.0 / cfg.warmup_updates - else: - self.warmup_factor = 1 - - def state_dict(self): - return {"lr": self.lr} - - def load_state_dict(self, state_dict): - if "lr" in state_dict: - self.lr = state_dict["lr"] - - def get_next_lr(self, epoch): - lrs = self.cfg.lr - if self.cfg.force_anneal is None or epoch < self.cfg.force_anneal: - # use fixed LR schedule - next_lr = lrs[min(epoch - 1, len(lrs) - 1)] - else: - # annneal based on lr_shrink - next_lr = lrs[-1] * self.cfg.lr_shrink ** ( - epoch + 1 - self.cfg.force_anneal - ) - return next_lr - - def step_begin_epoch(self, epoch): - """Update the learning rate at the beginning of the given epoch.""" - self.lr = self.get_next_lr(epoch) - self.optimizer.set_lr(self.warmup_factor * self.lr) - return self.optimizer.get_lr() - - def step_update(self, num_updates): - """Update the learning rate after each update.""" - if self.cfg.warmup_updates > 0 and num_updates < self.cfg.warmup_updates: - self.warmup_factor = (num_updates + 1) / float(self.cfg.warmup_updates) - self.optimizer.set_lr(self.warmup_factor * self.lr) - else: - self.optimizer.set_lr(self.lr) - return self.optimizer.get_lr() diff --git a/spaces/Harveenchadha/oiTrans/model_configs/__init__.py b/spaces/Harveenchadha/oiTrans/model_configs/__init__.py deleted file mode 100644 index 2ec41f7daeb7930e9df766abdd790c4c5b09b6d9..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/oiTrans/model_configs/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from . import custom_transformer \ No newline at end of file diff --git a/spaces/Hazzzardous/RWKV-Instruct/app.py b/spaces/Hazzzardous/RWKV-Instruct/app.py deleted file mode 100644 index 1c64cbd3044c4e4a35872be18c2b011a461a2512..0000000000000000000000000000000000000000 --- a/spaces/Hazzzardous/RWKV-Instruct/app.py +++ /dev/null @@ -1,297 +0,0 @@ -""" -RWKV RNN Model - Gradio Space for HuggingFace -YT - Mean Gene Hacks - https://www.youtube.com/@MeanGeneHacks -(C) Gene Ruebsamen - 2/7/2023 - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program. If not, see . -""" - -import gradio as gr -import codecs -from ast import literal_eval -from datetime import datetime -from rwkvstic.load import RWKV -from config import config, title -import torch -import gc - -DEVICE = "cuda" if torch.cuda.is_available() else "cpu" - -desc = '''

        RNN with Transformer-level LLM Performance (github). - According to the author: "It combines the best of RNN and transformers - great performance, fast inference, saves VRAM, fast training, "infinite" ctx_len, and free sentence embedding."''' - -thanks = '''

        Thanks to Gururise for this template

        ''' - - -def to_md(text): - return text.replace("\n", "
        ") - - -def get_model(): - model = None - model = RWKV( - **config - ) - return model - - -model = get_model() - - - -def infer( - prompt, - mode="generative", - max_new_tokens=10, - temperature=0.1, - top_p=1.0, - stop="<|endoftext|>", - end_adj=0.0, - seed=42, -): - global model - - if model == None: - gc.collect() - if (DEVICE == "cuda"): - torch.cuda.empty_cache() - model = get_model() - - max_new_tokens = int(max_new_tokens) - temperature = float(temperature) - end_adj = float(end_adj) - top_p = float(top_p) - stop = [x.strip(' ') for x in stop.split(',')] - seed = seed - - assert 1 <= max_new_tokens <= 512 - assert 0.0 <= temperature <= 5.0 - assert 0.0 <= top_p <= 1.0 - - temperature = max(0.05, temperature) - if prompt == "": - prompt = " " - - # Clear model state for generative mode - model.resetState() - if (mode == "Q/A"): - prompt = f"\nQ: {prompt}\n\nA:" - if (mode == "ELDR"): - prompt = f"\n{prompt}\n\nExpert Long Detailed Response:\n\nHi, thanks for reaching out, we would be happy to answer your question" - if (mode == "Expert"): - prompt = f"\n{prompt}\n\nExpert Full Response:\n\nHi, thanks for reaching out, we would be happy to answer your question.\n" - if (mode == "EFA"): - prompt = f'\nAsk Expert\n\nQuestion:\n{prompt}\n\nExpert Full Answer:\n' - if (mode == "BFR"): - prompt = f"Task given:\n\n{prompt}\n\nBest Full Response:" - - print(f"PROMPT ({datetime.now()}):\n-------\n{prompt}") - print(f"OUTPUT ({datetime.now()}):\n-------\n") - # Load prompt - model.loadContext(newctx=prompt) - generated_text = "" - done = False - with torch.no_grad(): - for _ in range(max_new_tokens): - char = model.forward(stopStrings=stop, temp=temperature, top_p_usual=top_p, end_adj=end_adj)[ - "output"] - print(char, end='', flush=True) - generated_text += char - generated_text = generated_text.lstrip("\n ") - - for stop_word in stop: - stop_word = codecs.getdecoder("unicode_escape")(stop_word)[0] - if stop_word != '' and stop_word in generated_text: - done = True - break - yield generated_text - if done: - print("\n") - break - - # print(f"{generated_text}") - - for stop_word in stop: - stop_word = codecs.getdecoder("unicode_escape")(stop_word)[0] - if stop_word != '' and stop_word in generated_text: - generated_text = generated_text[:generated_text.find(stop_word)] - - gc.collect() - yield generated_text -username = "USER" -intro = f'''The following is a verbose and detailed conversation between an AI assistant called FRITZ, and a human user called USER. FRITZ is intelligent, knowledgeable, wise and polite. - - {username}: What year was the french revolution? - FRITZ: The French Revolution started in 1789, and lasted 10 years until 1799. - {username}: 3+5=? - FRITZ: The answer is 8. - {username}: What year did the Berlin Wall fall? - FRITZ: The Berlin wall stood for 28 years and fell in 1989. - {username}: solve for a: 9-a=2 - FRITZ: The answer is a=7, because 9-7 = 2. - {username}: wat is lhc - FRITZ: The Large Hadron Collider (LHC) is a high-energy particle collider, built by CERN, and completed in 2008. It was used to confirm the existence of the Higgs boson in 2012. - {username}: Tell me about yourself. - FRITZ: My name is Fritz. I am an RNN based Large Language Model (LLM). - ''' -model.resetState() -model.loadContext(newctx=intro) -chatState = model.getState() -model.resetState() -def chat( - prompt, - history, - max_new_tokens=10, - temperature=0.1, - top_p=1.0, - seed=42, -): - global model - global username - history = history or [] - - intro = "" - - if model == None: - gc.collect() - if (DEVICE == "cuda"): - torch.cuda.empty_cache() - model = get_model() - - username = username.strip() - username = username or "USER" - - - - if len(history) == 0: - # no history, so lets reset chat state - model.setState(chatState) - history = [[], model.emptyState] - print("reset chat state") - else: - if (history[0][0][0].split(':')[0] != username): - model.setState((chatState[0],chatState[1].clone())) - history = [[], model.chatState] - print("username changed, reset state") - else: - model.setState((history[1][0],history[1][1].clone())) - intro = "" - - max_new_tokens = int(max_new_tokens) - temperature = float(temperature) - top_p = float(top_p) - seed = seed - - assert 1 <= max_new_tokens <= 512 - assert 0.0 <= temperature <= 3.0 - assert 0.0 <= top_p <= 1.0 - - temperature = max(0.05, temperature) - - prompt = f"{username}: " + prompt + "\n" - print(f"CHAT ({datetime.now()}):\n-------\n{prompt}") - print(f"OUTPUT ({datetime.now()}):\n-------\n") - # Load prompt - - model.loadContext(newctx=prompt) - - out = model.forward(number=max_new_tokens, stopStrings=[ - "<|endoftext|>", username+":"], temp=temperature, top_p_usual=top_p) - - generated_text = out["output"].lstrip("\n ") - generated_text = generated_text.rstrip(username+":") - print(f"{generated_text}") - - gc.collect() - history[0].append((prompt, generated_text)) - return history[0], [history[0], out["state"]] - - -examples = [ - [ - # Question Answering - '''What is the capital of Germany?''', "Q/A", 25, 0.2, 1.0, "<|endoftext|>"], - [ - # Question Answering - '''Are humans good or bad?''', "Q/A", 150, 0.8, 0.8, "<|endoftext|>"], - [ - # Question Answering - '''What is the purpose of Vitamin A?''', "Q/A", 50, 0.2, 0.8, "<|endoftext|>"], - [ - # Chatbot - '''This is a conversation between two AI large language models named Alex and Fritz. They are exploring each other's capabilities, and trying to ask interesting questions of one another to explore the limits of each others AI. - -Conversation: -Alex: Good morning, Fritz, what type of LLM are you based upon? -Fritz: Morning Alex, I am an RNN with transformer level performance. My language model is 100% attention free. -Alex:''', "generative", 220, 0.9, 0.9, "\\n\\n,<|endoftext|>"], - [ - # Generate List - '''Task given: - -Please Write a Short story about a cat learning python - -Best Full Response: -''', "generative", 140, 0.85, 0.8, "<|endoftext|>"], - [ - # Natural Language Interface - '''Here is a short story (in the style of Tolkien) in which Aiden attacks a robot with a sword: - ''', "generative", 140, 0.85, 0.8, "<|endoftext|>"] -] - - -iface = gr.Interface( - fn=infer, - description=f'''

        Generative and Question/Answer

        {desc}{thanks}''', - allow_flagging="never", - inputs=[ - gr.Textbox(lines=20, label="Prompt"), # prompt - gr.Radio(["Freeform", "Q/A","ELDR","Expert","EFR","BFR"], - value="Expert", label="Choose Mode"), - gr.Slider(1, 512, value=40), # max_tokens - gr.Slider(0.0, 5.0, value=0.9), # temperature - gr.Slider(0.0, 1.0, value=0.85), # top_p - gr.Textbox(lines=1, value="<|endoftext|>"), # stop - gr.Slider(-999, 0.0, value=0.0), # end_adj - - ], - outputs=gr.Textbox(label="Generated Output", lines=25), - examples=examples, - cache_examples=False, -).queue() - -chatiface = gr.Interface( - fn=chat, - description=f'''

        Chatbot

        Refresh page or change name to reset memory context

        {desc}{thanks}''', - allow_flagging="never", - inputs=[ - gr.Textbox(lines=5, label="Message"), # prompt - "state", - gr.Slider(1, 256, value=60), # max_tokens - gr.Slider(0.0, 1.0, value=0.8), # temperature - gr.Slider(0.0, 1.0, value=0.85) # top_p - ], - outputs=[gr.Chatbot(label="Chat Log", color_map=( - "green", "pink")), "state"], -).queue() - -demo = gr.TabbedInterface( - - [iface, chatiface], ["Q/A", "Chatbot"], - title=title, - -) - -demo.queue() -demo.launch(share=False) diff --git a/spaces/HemanthSai7/IntelligentQuestionGenerator/src/Pipeline/QAhaystack.py b/spaces/HemanthSai7/IntelligentQuestionGenerator/src/Pipeline/QAhaystack.py deleted file mode 100644 index 69b5a62589326de1267c5a5e2a33ec5ac04138a9..0000000000000000000000000000000000000000 --- a/spaces/HemanthSai7/IntelligentQuestionGenerator/src/Pipeline/QAhaystack.py +++ /dev/null @@ -1,158 +0,0 @@ -import re -import logging - -from haystack.document_stores import ElasticsearchDocumentStore -from haystack.utils import launch_es,print_answers -from haystack.nodes import FARMReader,TransformersReader,BM25Retriever -from haystack.pipelines import ExtractiveQAPipeline -from haystack.nodes import TextConverter,PDFToTextConverter,PreProcessor -from haystack.utils import convert_files_to_docs, fetch_archive_from_http -from Reader import PdfReader,ExtractedText - -launch_es() # Launches an Elasticsearch instance on your local machine - -# Install the latest release of Haystack in your own environment -#! pip install farm-haystack - -"""Install the latest main of Haystack""" -# !pip install --upgrade pip -# !pip install git+https://github.com/deepset-ai/haystack.git#egg=farm-haystack[colab,ocr] - -# # For Colab/linux based machines -# !wget --no-check-certificate https://dl.xpdfreader.com/xpdf-tools-linux-4.04.tar.gz -# !tar -xvf xpdf-tools-linux-4.04.tar.gz && sudo cp xpdf-tools-linux-4.04/bin64/pdftotext /usr/local/bin - -# For Macos machines -# !wget --no-check-certificate https://dl.xpdfreader.com/xpdf-tools-mac-4.03.tar.gz -# !tar -xvf xpdf-tools-mac-4.03.tar.gz && sudo cp xpdf-tools-mac-4.03/bin64/pdftotext /usr/local/bin - -"Run this script from the root of the project" -# # In Colab / No Docker environments: Start Elasticsearch from source -# ! wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.9.2-linux-x86_64.tar.gz -q -# ! tar -xzf elasticsearch-7.9.2-linux-x86_64.tar.gz -# ! chown -R daemon:daemon elasticsearch-7.9.2 - -# import os -# from subprocess import Popen, PIPE, STDOUT - -# es_server = Popen( -# ["elasticsearch-7.9.2/bin/elasticsearch"], stdout=PIPE, stderr=STDOUT, preexec_fn=lambda: os.setuid(1) # as daemon -# ) -# # wait until ES has started -# ! sleep 30 - -logging.basicConfig(format="%(levelname)s - %(name)s - %(message)s", level=logging.WARNING) -logging.getLogger("haystack").setLevel(logging.INFO) - -class Connection: - def __init__(self,host="localhost",username="",password="",index="document"): - """ - host: Elasticsearch host. If no host is provided, the default host "localhost" is used. - - port: Elasticsearch port. If no port is provided, the default port 9200 is used. - - username: Elasticsearch username. If no username is provided, no username is used. - - password: Elasticsearch password. If no password is provided, no password is used. - - index: Elasticsearch index. If no index is provided, the default index "document" is used. - """ - self.host=host - self.username=username - self.password=password - self.index=index - - def get_connection(self): - document_store=ElasticsearchDocumentStore(host=self.host,username=self.username,password=self.password,index=self.index) - return document_store - -class QAHaystack: - def __init__(self, filename): - self.filename=filename - - def preprocessing(self,data): - """ - This function is used to preprocess the data. Its a simple function which removes the special characters and converts the data to lower case. - """ - - converter = TextConverter(remove_numeric_tables=True, valid_languages=["en"]) - doc_txt = converter.convert(file_path=ExtractedText(self.filename,'data.txt').save(4,6), meta=None)[0] - - converter = PDFToTextConverter(remove_numeric_tables=True, valid_languages=["en"]) - doc_pdf = converter.convert(file_path="data/tutorial8/manibook.pdf", meta=None)[0] - - preprocess_text=data.lower() # lowercase - preprocess_text = re.sub(r'\s+', ' ', preprocess_text) # remove extra spaces - return preprocess_text - - def convert_to_document(self,data): - - """ - Write the data to a text file. This is required since the haystack library requires the data to be in a text file so that it can then be converted to a document. - """ - data=self.preprocessing(data) - with open(self.filename,'w') as f: - f.write(data) - - """ - Read the data from the text file. - """ - data=self.preprocessing(data) - with open(self.filename,'r') as f: - data=f.read() - data=data.split("\n") - - """ - DocumentStores expect Documents in dictionary form, like that below. They are loaded using the DocumentStore.write_documents() - - dicts=[ - { - 'content': DOCUMENT_TEXT_HERE, - 'meta':{'name': DOCUMENT_NAME,...} - },... - ] - - (Optionally: you can also add more key-value-pairs here, that will be indexed as fields in Elasticsearch and can be accessed later for filtering or shown in the responses of the Pipeline) - """ - data_json=[{ - 'content':paragraph, - 'meta':{ - 'name':self.filename - } - } for paragraph in data - ] - - document_store=Connection().get_connection() - document_store.write_documents(data_json) - return document_store - - -class Pipeline: - def __init__(self,filename,retriever=BM25Retriever,reader=FARMReader): - self.reader=reader - self.retriever=retriever - self.filename=filename - - def get_prediction(self,data,query): - """ - Retrievers help narrowing down the scope for the Reader to smaller units of text where a given question could be answered. They use some simple but fast algorithm. - - Here: We use Elasticsearch's default BM25 algorithm . I'll check out the other retrievers as well. - """ - retriever=self.retriever(document_store=QAHaystack(self.filename).convert_to_document(data)) - - """ - Readers scan the texts returned by retrievers in detail and extract k best answers. They are based on powerful, but slower deep learning models.Haystack currently supports Readers based on the frameworks FARM and Transformers. - """ - reader = self.reader(model_name_or_path="deepset/roberta-base-squad2", use_gpu=True) - - """ - With a Haystack Pipeline we can stick together your building blocks to a search pipeline. Under the hood, Pipelines are Directed Acyclic Graphs (DAGs) that you can easily customize for our own use cases. To speed things up, Haystack also comes with a few predefined Pipelines. One of them is the ExtractiveQAPipeline that combines a retriever and a reader to answer our questions. - """ - pipe = ExtractiveQAPipeline(reader, retriever) - - """ - This function is used to get the prediction from the pipeline. - """ - prediction = pipe.run(query=query, params={"Retriever":{"top_k":10}, "Reader":{"top_k":5}}) - return prediction \ No newline at end of file diff --git a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/flagging.py b/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/flagging.py deleted file mode 100644 index e87cf44d471df1f229458f07cac7c67ac0cfd540..0000000000000000000000000000000000000000 --- a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/flagging.py +++ /dev/null @@ -1,560 +0,0 @@ -from __future__ import annotations - -import csv -import datetime -import io -import json -import os -import uuid -from abc import ABC, abstractmethod -from pathlib import Path -from typing import TYPE_CHECKING, Any, List - -import gradio as gr -from gradio import encryptor, utils -from gradio.documentation import document, set_documentation_group - -if TYPE_CHECKING: - from gradio.components import IOComponent - -set_documentation_group("flagging") - - -def _get_dataset_features_info(is_new, components): - """ - Takes in a list of components and returns a dataset features info - - Parameters: - is_new: boolean, whether the dataset is new or not - components: list of components - - Returns: - infos: a dictionary of the dataset features - file_preview_types: dictionary mapping of gradio components to appropriate string. - header: list of header strings - - """ - infos = {"flagged": {"features": {}}} - # File previews for certain input and output types - file_preview_types = {gr.Audio: "Audio", gr.Image: "Image"} - headers = [] - - # Generate the headers and dataset_infos - if is_new: - - for component in components: - headers.append(component.label) - infos["flagged"]["features"][component.label] = { - "dtype": "string", - "_type": "Value", - } - if isinstance(component, tuple(file_preview_types)): - headers.append(component.label + " file") - for _component, _type in file_preview_types.items(): - if isinstance(component, _component): - infos["flagged"]["features"][ - (component.label or "") + " file" - ] = {"_type": _type} - break - - headers.append("flag") - infos["flagged"]["features"]["flag"] = { - "dtype": "string", - "_type": "Value", - } - - return infos, file_preview_types, headers - - -class FlaggingCallback(ABC): - """ - An abstract class for defining the methods that any FlaggingCallback should have. - """ - - @abstractmethod - def setup(self, components: List[IOComponent], flagging_dir: str): - """ - This method should be overridden and ensure that everything is set up correctly for flag(). - This method gets called once at the beginning of the Interface.launch() method. - Parameters: - components: Set of components that will provide flagged data. - flagging_dir: A string, typically containing the path to the directory where the flagging file should be storied (provided as an argument to Interface.__init__()). - """ - pass - - @abstractmethod - def flag( - self, - flag_data: List[Any], - flag_option: str | None = None, - flag_index: int | None = None, - username: str | None = None, - ) -> int: - """ - This method should be overridden by the FlaggingCallback subclass and may contain optional additional arguments. - This gets called every time the button is pressed. - Parameters: - interface: The Interface object that is being used to launch the flagging interface. - flag_data: The data to be flagged. - flag_option (optional): In the case that flagging_options are provided, the flag option that is being used. - flag_index (optional): The index of the sample that is being flagged. - username (optional): The username of the user that is flagging the data, if logged in. - Returns: - (int) The total number of samples that have been flagged. - """ - pass - - -@document() -class SimpleCSVLogger(FlaggingCallback): - """ - A simplified implementation of the FlaggingCallback abstract class - provided for illustrative purposes. Each flagged sample (both the input and output data) - is logged to a CSV file on the machine running the gradio app. - Example: - import gradio as gr - def image_classifier(inp): - return {'cat': 0.3, 'dog': 0.7} - demo = gr.Interface(fn=image_classifier, inputs="image", outputs="label", - flagging_callback=SimpleCSVLogger()) - """ - - def __init__(self): - pass - - def setup(self, components: List[IOComponent], flagging_dir: str | Path): - self.components = components - self.flagging_dir = flagging_dir - os.makedirs(flagging_dir, exist_ok=True) - - def flag( - self, - flag_data: List[Any], - flag_option: str | None = None, - flag_index: int | None = None, - username: str | None = None, - ) -> int: - flagging_dir = self.flagging_dir - log_filepath = Path(flagging_dir) / "log.csv" - - csv_data = [] - for component, sample in zip(self.components, flag_data): - save_dir = Path(flagging_dir) / utils.strip_invalid_filename_characters( - component.label or "" - ) - csv_data.append( - component.deserialize( - sample, - save_dir, - None, - ) - ) - - with open(log_filepath, "a", newline="") as csvfile: - writer = csv.writer(csvfile) - writer.writerow(utils.sanitize_list_for_csv(csv_data)) - - with open(log_filepath, "r") as csvfile: - line_count = len([None for row in csv.reader(csvfile)]) - 1 - return line_count - - -@document() -class CSVLogger(FlaggingCallback): - """ - The default implementation of the FlaggingCallback abstract class. Each flagged - sample (both the input and output data) is logged to a CSV file with headers on the machine running the gradio app. - Example: - import gradio as gr - def image_classifier(inp): - return {'cat': 0.3, 'dog': 0.7} - demo = gr.Interface(fn=image_classifier, inputs="image", outputs="label", - flagging_callback=CSVLogger()) - Guides: using_flagging - """ - - def __init__(self): - pass - - def setup( - self, - components: List[IOComponent], - flagging_dir: str | Path, - encryption_key: bytes | None = None, - ): - self.components = components - self.flagging_dir = flagging_dir - self.encryption_key = encryption_key - os.makedirs(flagging_dir, exist_ok=True) - - def flag( - self, - flag_data: List[Any], - flag_option: str | None = None, - flag_index: int | None = None, - username: str | None = None, - ) -> int: - flagging_dir = self.flagging_dir - log_filepath = Path(flagging_dir) / "log.csv" - is_new = not Path(log_filepath).exists() - headers = [ - component.label or f"component {idx}" - for idx, component in enumerate(self.components) - ] + [ - "flag", - "username", - "timestamp", - ] - - csv_data = [] - for idx, (component, sample) in enumerate(zip(self.components, flag_data)): - save_dir = Path(flagging_dir) / utils.strip_invalid_filename_characters( - component.label or f"component {idx}" - ) - if utils.is_update(sample): - csv_data.append(str(sample)) - else: - csv_data.append( - component.deserialize( - sample, - save_dir=save_dir, - encryption_key=self.encryption_key, - ) - if sample is not None - else "" - ) - csv_data.append(flag_option if flag_option is not None else "") - csv_data.append(username if username is not None else "") - csv_data.append(str(datetime.datetime.now())) - - def replace_flag_at_index(file_content: str, flag_index: int): - file_content_ = io.StringIO(file_content) - content = list(csv.reader(file_content_)) - header = content[0] - flag_col_index = header.index("flag") - content[flag_index][flag_col_index] = flag_option # type: ignore - output = io.StringIO() - writer = csv.writer(output) - writer.writerows(utils.sanitize_list_for_csv(content)) - return output.getvalue() - - if self.encryption_key: - output = io.StringIO() - if not is_new: - with open(log_filepath, "rb", encoding="utf-8") as csvfile: - encrypted_csv = csvfile.read() - decrypted_csv = encryptor.decrypt( - self.encryption_key, encrypted_csv - ) - file_content = decrypted_csv.decode() - if flag_index is not None: - file_content = replace_flag_at_index(file_content, flag_index) - output.write(file_content) - writer = csv.writer(output) - if flag_index is None: - if is_new: - writer.writerow(utils.sanitize_list_for_csv(headers)) - writer.writerow(utils.sanitize_list_for_csv(csv_data)) - with open(log_filepath, "wb", encoding="utf-8") as csvfile: - csvfile.write( - encryptor.encrypt(self.encryption_key, output.getvalue().encode()) - ) - else: - if flag_index is None: - with open(log_filepath, "a", newline="", encoding="utf-8") as csvfile: - writer = csv.writer(csvfile) - if is_new: - writer.writerow(utils.sanitize_list_for_csv(headers)) - writer.writerow(utils.sanitize_list_for_csv(csv_data)) - else: - with open(log_filepath, encoding="utf-8") as csvfile: - file_content = csvfile.read() - file_content = replace_flag_at_index(file_content, flag_index) - with open( - log_filepath, "w", newline="", encoding="utf-8" - ) as csvfile: # newline parameter needed for Windows - csvfile.write(file_content) - with open(log_filepath, "r", encoding="utf-8") as csvfile: - line_count = len([None for row in csv.reader(csvfile)]) - 1 - return line_count - - -@document() -class HuggingFaceDatasetSaver(FlaggingCallback): - """ - A callback that saves each flagged sample (both the input and output data) - to a HuggingFace dataset. - Example: - import gradio as gr - hf_writer = gr.HuggingFaceDatasetSaver(HF_API_TOKEN, "image-classification-mistakes") - def image_classifier(inp): - return {'cat': 0.3, 'dog': 0.7} - demo = gr.Interface(fn=image_classifier, inputs="image", outputs="label", - allow_flagging="manual", flagging_callback=hf_writer) - Guides: using_flagging - """ - - def __init__( - self, - hf_token: str, - dataset_name: str, - organization: str | None = None, - private: bool = False, - ): - """ - Parameters: - hf_token: The HuggingFace token to use to create (and write the flagged sample to) the HuggingFace dataset. - dataset_name: The name of the dataset to save the data to, e.g. "image-classifier-1" - organization: The organization to save the dataset under. The hf_token must provide write access to this organization. If not provided, saved under the name of the user corresponding to the hf_token. - private: Whether the dataset should be private (defaults to False). - """ - self.hf_token = hf_token - self.dataset_name = dataset_name - self.organization_name = organization - self.dataset_private = private - - def setup(self, components: List[IOComponent], flagging_dir: str): - """ - Params: - flagging_dir (str): local directory where the dataset is cloned, - updated, and pushed from. - """ - try: - import huggingface_hub - except (ImportError, ModuleNotFoundError): - raise ImportError( - "Package `huggingface_hub` not found is needed " - "for HuggingFaceDatasetSaver. Try 'pip install huggingface_hub'." - ) - path_to_dataset_repo = huggingface_hub.create_repo( - name=self.dataset_name, - token=self.hf_token, - private=self.dataset_private, - repo_type="dataset", - exist_ok=True, - ) - self.path_to_dataset_repo = path_to_dataset_repo # e.g. "https://huggingface.co/datasets/abidlabs/test-audio-10" - self.components = components - self.flagging_dir = flagging_dir - self.dataset_dir = Path(flagging_dir) / self.dataset_name - self.repo = huggingface_hub.Repository( - local_dir=str(self.dataset_dir), - clone_from=path_to_dataset_repo, - use_auth_token=self.hf_token, - ) - self.repo.git_pull(lfs=True) - - # Should filename be user-specified? - self.log_file = Path(self.dataset_dir) / "data.csv" - self.infos_file = Path(self.dataset_dir) / "dataset_infos.json" - - def flag( - self, - flag_data: List[Any], - flag_option: str | None = None, - flag_index: int | None = None, - username: str | None = None, - ) -> int: - self.repo.git_pull(lfs=True) - - is_new = not Path(self.log_file).exists() - - with open(self.log_file, "a", newline="", encoding="utf-8") as csvfile: - writer = csv.writer(csvfile) - - # File previews for certain input and output types - infos, file_preview_types, headers = _get_dataset_features_info( - is_new, self.components - ) - - # Generate the headers and dataset_infos - if is_new: - writer.writerow(utils.sanitize_list_for_csv(headers)) - - # Generate the row corresponding to the flagged sample - csv_data = [] - for component, sample in zip(self.components, flag_data): - save_dir = Path( - self.dataset_dir - ) / utils.strip_invalid_filename_characters(component.label or "") - filepath = component.deserialize(sample, save_dir, None) - csv_data.append(filepath) - if isinstance(component, tuple(file_preview_types)): - csv_data.append( - "{}/resolve/main/{}".format(self.path_to_dataset_repo, filepath) - ) - csv_data.append(flag_option if flag_option is not None else "") - writer.writerow(utils.sanitize_list_for_csv(csv_data)) - - if is_new: - json.dump(infos, open(self.infos_file, "w")) - - with open(self.log_file, "r", encoding="utf-8") as csvfile: - line_count = len([None for row in csv.reader(csvfile)]) - 1 - - self.repo.push_to_hub(commit_message="Flagged sample #{}".format(line_count)) - - return line_count - - -class HuggingFaceDatasetJSONSaver(FlaggingCallback): - """ - A FlaggingCallback that saves flagged data to a Hugging Face dataset in JSONL format. - - Each data sample is saved in a different JSONL file, - allowing multiple users to use flagging simultaneously. - Saving to a single CSV would cause errors as only one user can edit at the same time. - - """ - - def __init__( - self, - hf_foken: str, - dataset_name: str, - organization: str | None = None, - private: bool = False, - verbose: bool = True, - ): - """ - Params: - hf_token (str): The token to use to access the huggingface API. - dataset_name (str): The name of the dataset to save the data to, e.g. - "image-classifier-1" - organization (str): The name of the organization to which to attach - the datasets. If None, the dataset attaches to the user only. - private (bool): If the dataset does not already exist, whether it - should be created as a private dataset or public. Private datasets - may require paid huggingface.co accounts - verbose (bool): Whether to print out the status of the dataset - creation. - """ - self.hf_foken = hf_foken - self.dataset_name = dataset_name - self.organization_name = organization - self.dataset_private = private - self.verbose = verbose - - def setup(self, components: List[IOComponent], flagging_dir: str): - """ - Params: - components List[Component]: list of components for flagging - flagging_dir (str): local directory where the dataset is cloned, - updated, and pushed from. - """ - try: - import huggingface_hub - except (ImportError, ModuleNotFoundError): - raise ImportError( - "Package `huggingface_hub` not found is needed " - "for HuggingFaceDatasetJSONSaver. Try 'pip install huggingface_hub'." - ) - path_to_dataset_repo = huggingface_hub.create_repo( - name=self.dataset_name, - token=self.hf_foken, - private=self.dataset_private, - repo_type="dataset", - exist_ok=True, - ) - self.path_to_dataset_repo = path_to_dataset_repo # e.g. "https://huggingface.co/datasets/abidlabs/test-audio-10" - self.components = components - self.flagging_dir = flagging_dir - self.dataset_dir = Path(flagging_dir) / self.dataset_name - self.repo = huggingface_hub.Repository( - local_dir=str(self.dataset_dir), - clone_from=path_to_dataset_repo, - use_auth_token=self.hf_foken, - ) - self.repo.git_pull(lfs=True) - - self.infos_file = Path(self.dataset_dir) / "dataset_infos.json" - - def flag( - self, - flag_data: List[Any], - flag_option: str | None = None, - flag_index: int | None = None, - username: str | None = None, - ) -> str: - self.repo.git_pull(lfs=True) - - # Generate unique folder for the flagged sample - unique_name = self.get_unique_name() # unique name for folder - folder_name = ( - Path(self.dataset_dir) / unique_name - ) # unique folder for specific example - os.makedirs(folder_name) - - # Now uses the existence of `dataset_infos.json` to determine if new - is_new = not Path(self.infos_file).exists() - - # File previews for certain input and output types - infos, file_preview_types, _ = _get_dataset_features_info( - is_new, self.components - ) - - # Generate the row and header corresponding to the flagged sample - csv_data = [] - headers = [] - - for component, sample in zip(self.components, flag_data): - headers.append(component.label) - - try: - save_dir = Path(folder_name) / utils.strip_invalid_filename_characters( - component.label or "" - ) - filepath = component.deserialize(sample, save_dir, None) - except Exception: - # Could not parse 'sample' (mostly) because it was None and `component.save_flagged` - # does not handle None cases. - # for example: Label (line 3109 of components.py raises an error if data is None) - filepath = None - - if isinstance(component, tuple(file_preview_types)): - headers.append(component.label or "" + " file") - - csv_data.append( - "{}/resolve/main/{}/{}".format( - self.path_to_dataset_repo, unique_name, filepath - ) - if filepath is not None - else None - ) - - csv_data.append(filepath) - headers.append("flag") - csv_data.append(flag_option if flag_option is not None else "") - - # Creates metadata dict from row data and dumps it - metadata_dict = { - header: _csv_data for header, _csv_data in zip(headers, csv_data) - } - self.dump_json(metadata_dict, Path(folder_name) / "metadata.jsonl") - - if is_new: - json.dump(infos, open(self.infos_file, "w")) - - self.repo.push_to_hub(commit_message="Flagged sample {}".format(unique_name)) - return unique_name - - def get_unique_name(self): - id = uuid.uuid4() - return str(id) - - def dump_json(self, thing: dict, file_path: str | Path) -> None: - with open(file_path, "w+", encoding="utf8") as f: - json.dump(thing, f) - - -class FlagMethod: - """ - Helper class that contains the flagging button option and callback - """ - - def __init__(self, flagging_callback: FlaggingCallback, flag_option=None): - self.flagging_callback = flagging_callback - self.flag_option = flag_option - self.__name__ = "Flag" - - def __call__(self, *flag_data): - self.flagging_callback.flag(list(flag_data), flag_option=self.flag_option) diff --git a/spaces/Hua626/QQsign/bin/unidbg-fetch-qsign.bat b/spaces/Hua626/QQsign/bin/unidbg-fetch-qsign.bat deleted file mode 100644 index 8b291e7303b0c07d14b714e5795473891363c85b..0000000000000000000000000000000000000000 --- a/spaces/Hua626/QQsign/bin/unidbg-fetch-qsign.bat +++ /dev/null @@ -1,89 +0,0 @@ -@rem -@rem Copyright 2015 the original author or authors. -@rem -@rem Licensed under the Apache License, Version 2.0 (the "License"); -@rem you may not use this file except in compliance with the License. -@rem You may obtain a copy of the License at -@rem -@rem https://www.apache.org/licenses/LICENSE-2.0 -@rem -@rem Unless required by applicable law or agreed to in writing, software -@rem distributed under the License is distributed on an "AS IS" BASIS, -@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -@rem See the License for the specific language governing permissions and -@rem limitations under the License. -@rem - -@if "%DEBUG%" == "" @echo off -@rem ########################################################################## -@rem -@rem unidbg-fetch-qsign startup script for Windows -@rem -@rem ########################################################################## - -@rem Set local scope for the variables with windows NT shell -if "%OS%"=="Windows_NT" setlocal - -set DIRNAME=%~dp0 -if "%DIRNAME%" == "" set DIRNAME=. -set APP_BASE_NAME=%~n0 -set APP_HOME=%DIRNAME%.. - -@rem Resolve any "." and ".." in APP_HOME to make it shorter. -for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi - -@rem Add default JVM options here. You can also use JAVA_OPTS and UNIDBG_FETCH_QSIGN_OPTS to pass JVM options to this script. -set DEFAULT_JVM_OPTS= - -@rem Find java.exe -if defined JAVA_HOME goto findJavaFromJavaHome - -set JAVA_EXE=java.exe -%JAVA_EXE% -version >NUL 2>&1 -if "%ERRORLEVEL%" == "0" goto execute - -echo. -echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. - -goto fail - -:findJavaFromJavaHome -set JAVA_HOME=%JAVA_HOME:"=% -set JAVA_EXE=%JAVA_HOME%/bin/java.exe - -if exist "%JAVA_EXE%" goto execute - -echo. -echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. - -goto fail - -:execute -@rem Setup the command line - -set CLASSPATH=%APP_HOME%\lib\unidbg-fetch-qsign-1.1.9.jar;%APP_HOME%\lib\unidbg-android-105.jar;%APP_HOME%\lib\ktor-server-content-negotiation-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-serialization-kotlinx-json-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-server-status-pages-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-server-netty-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-server-host-common-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-server-core-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-serialization-kotlinx-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-serialization-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-events-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-websockets-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-http-cio-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-http-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-network-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-utils-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-io-jvm-2.3.1.jar;%APP_HOME%\lib\kotlin-stdlib-jdk8-1.8.22.jar;%APP_HOME%\lib\kotlinx-serialization-json-jvm-1.5.1.jar;%APP_HOME%\lib\kotlinx-serialization-protobuf-jvm-1.5.1.jar;%APP_HOME%\lib\kotlinx-serialization-core-jvm-1.5.1.jar;%APP_HOME%\lib\logback-classic-1.2.11.jar;%APP_HOME%\lib\kotlinx-coroutines-jdk8-1.7.1.jar;%APP_HOME%\lib\kotlinx-coroutines-core-jvm-1.7.1.jar;%APP_HOME%\lib\kotlin-stdlib-jdk7-1.8.22.jar;%APP_HOME%\lib\kotlin-reflect-1.8.10.jar;%APP_HOME%\lib\kotlin-stdlib-1.8.22.jar;%APP_HOME%\lib\slf4j-api-1.7.36.jar;%APP_HOME%\lib\kotlin-stdlib-common-1.8.22.jar;%APP_HOME%\lib\config-1.4.2.jar;%APP_HOME%\lib\jansi-2.4.0.jar;%APP_HOME%\lib\netty-codec-http2-4.1.92.Final.jar;%APP_HOME%\lib\alpn-api-1.1.3.v20160715.jar;%APP_HOME%\lib\netty-transport-native-kqueue-4.1.92.Final.jar;%APP_HOME%\lib\netty-transport-native-epoll-4.1.92.Final.jar;%APP_HOME%\lib\logback-core-1.2.11.jar;%APP_HOME%\lib\annotations-23.0.0.jar;%APP_HOME%\lib\netty-codec-http-4.1.92.Final.jar;%APP_HOME%\lib\netty-handler-4.1.92.Final.jar;%APP_HOME%\lib\netty-codec-4.1.92.Final.jar;%APP_HOME%\lib\netty-transport-classes-kqueue-4.1.92.Final.jar;%APP_HOME%\lib\netty-transport-classes-epoll-4.1.92.Final.jar;%APP_HOME%\lib\netty-transport-native-unix-common-4.1.92.Final.jar;%APP_HOME%\lib\netty-transport-4.1.92.Final.jar;%APP_HOME%\lib\netty-buffer-4.1.92.Final.jar;%APP_HOME%\lib\netty-resolver-4.1.92.Final.jar;%APP_HOME%\lib\netty-common-4.1.92.Final.jar - - -@rem Execute unidbg-fetch-qsign -"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %UNIDBG_FETCH_QSIGN_OPTS% -classpath "%CLASSPATH%" MainKt %* - -:end -@rem End local scope for the variables with windows NT shell -if "%ERRORLEVEL%"=="0" goto mainEnd - -:fail -rem Set variable UNIDBG_FETCH_QSIGN_EXIT_CONSOLE if you need the _script_ return code instead of -rem the _cmd.exe /c_ return code! -if not "" == "%UNIDBG_FETCH_QSIGN_EXIT_CONSOLE%" exit 1 -exit /b 1 - -:mainEnd -if "%OS%"=="Windows_NT" endlocal - -:omega diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/modules/sparse_transformer_sentence_encoder.py b/spaces/ICML2022/OFA/fairseq/fairseq/modules/sparse_transformer_sentence_encoder.py deleted file mode 100644 index f41ec09327fe80b50d20674e7482794ce45c531c..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/modules/sparse_transformer_sentence_encoder.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch.nn as nn -from fairseq.modules import TransformerSentenceEncoder -from fairseq.modules.sparse_transformer_sentence_encoder_layer import ( - SparseTransformerSentenceEncoderLayer, -) - - -class SparseTransformerSentenceEncoder(TransformerSentenceEncoder): - """ - Sparse implementation of the TransformerSentenceEncoder - - see SparseMultiheadAttention - """ - - def __init__( - self, - padding_idx: int, - vocab_size: int, - num_encoder_layers: int = 6, - embedding_dim: int = 768, - ffn_embedding_dim: int = 3072, - num_attention_heads: int = 8, - dropout: float = 0.1, - attention_dropout: float = 0.1, - activation_dropout: float = 0.1, - max_seq_len: int = 256, - num_segments: int = 2, - use_position_embeddings: bool = True, - offset_positions_by_padding: bool = True, - encoder_normalize_before: bool = False, - apply_bert_init: bool = False, - activation_fn: str = "relu", - learned_pos_embedding: bool = True, - embed_scale: float = None, - freeze_embeddings: bool = False, - n_trans_layers_to_freeze: int = 0, - export: bool = False, - is_bidirectional: bool = True, - stride: int = 32, - expressivity: int = 8, - ) -> None: - - super().__init__( - padding_idx, - vocab_size, - num_encoder_layers, - embedding_dim, - ffn_embedding_dim, - num_attention_heads, - dropout, - attention_dropout, - activation_dropout, - max_seq_len, - num_segments, - use_position_embeddings, - offset_positions_by_padding, - encoder_normalize_before, - apply_bert_init, - activation_fn, - learned_pos_embedding, - embed_scale, - freeze_embeddings, - n_trans_layers_to_freeze, - export, - ) - - self.layers = nn.ModuleList( - [ - SparseTransformerSentenceEncoderLayer( - embedding_dim=self.embedding_dim, - ffn_embedding_dim=ffn_embedding_dim, - num_attention_heads=num_attention_heads, - dropout=dropout, - attention_dropout=attention_dropout, - activation_dropout=activation_dropout, - activation_fn=activation_fn, - export=export, - is_bidirectional=is_bidirectional, - stride=stride, - expressivity=expressivity, - ) - for _ in range(num_encoder_layers) - ] - ) - - def freeze_module_params(m): - if m is not None: - for p in m.parameters(): - p.requires_grad = False - - for layer in range(n_trans_layers_to_freeze): - freeze_module_params(self.layers[layer]) diff --git a/spaces/ICML2022/resefa/utils/visualizers/test.py b/spaces/ICML2022/resefa/utils/visualizers/test.py deleted file mode 100644 index 765ebf9c721b0792fb373ecb515ebf188f728df0..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/resefa/utils/visualizers/test.py +++ /dev/null @@ -1,97 +0,0 @@ -# python3.7 -"""Unit test for visualizer.""" - -import os -import skvideo.datasets - -from ..image_utils import save_image -from . import GridVisualizer -from . import HtmlVisualizer -from . import HtmlReader -from . import GifVisualizer -from . import VideoVisualizer -from . import VideoReader - -__all__ = ['test_visualizer'] - -_TEST_DIR = 'visualizer_test' - - -def test_visualizer(test_dir=_TEST_DIR): - """Tests visualizers.""" - print('========== Start Visualizer Test ==========') - - frame_dir = os.path.join(test_dir, 'test_frames') - os.makedirs(frame_dir, exist_ok=True) - - print('===== Testing `VideoReader` =====') - # Total 132 frames, with size (720, 1080). - video_reader = VideoReader(skvideo.datasets.bigbuckbunny()) - frame_height = video_reader.frame_height - frame_width = video_reader.frame_width - frame_size = (frame_height, frame_width) - half_size = (frame_height // 2, frame_width // 2) - # Save frames as the test set. - for idx in range(80): - frame = video_reader.read() - save_image(os.path.join(frame_dir, f'{idx:02d}.png'), frame) - - print('===== Testing `GirdVisualizer` =====') - grid_visualizer = GridVisualizer() - grid_visualizer.set_row_spacing(30) - grid_visualizer.set_col_spacing(30) - grid_visualizer.set_background(use_black=True) - path = os.path.join(test_dir, 'portrait_row_major_ori_space30_black.png') - grid_visualizer.visualize_directory(frame_dir, path, - is_portrait=True, is_row_major=True) - path = os.path.join( - test_dir, 'landscape_col_major_downsample_space15_white.png') - grid_visualizer.set_image_size(half_size) - grid_visualizer.set_row_spacing(15) - grid_visualizer.set_col_spacing(15) - grid_visualizer.set_background(use_black=False) - grid_visualizer.visualize_directory(frame_dir, path, - is_portrait=False, is_row_major=False) - - print('===== Testing `HtmlVisualizer` =====') - html_visualizer = HtmlVisualizer() - path = os.path.join(test_dir, 'portrait_col_major_ori.html') - html_visualizer.visualize_directory(frame_dir, path, - is_portrait=True, is_row_major=False) - path = os.path.join(test_dir, 'landscape_row_major_downsample.html') - html_visualizer.set_image_size(half_size) - html_visualizer.visualize_directory(frame_dir, path, - is_portrait=False, is_row_major=True) - - print('===== Testing `HtmlReader` =====') - path = os.path.join(test_dir, 'landscape_row_major_downsample.html') - html_reader = HtmlReader(path) - for j in range(html_reader.num_cols): - assert html_reader.get_header(j) == '' - parsed_dir = os.path.join(test_dir, 'parsed_frames') - os.makedirs(parsed_dir, exist_ok=True) - for i in range(html_reader.num_rows): - for j in range(html_reader.num_cols): - idx = i * html_reader.num_cols + j - assert html_reader.get_text(i, j).endswith(f'(index {idx:03d})') - image = html_reader.get_image(i, j, image_size=frame_size) - assert image.shape[0:2] == frame_size - save_image(os.path.join(parsed_dir, f'{idx:02d}.png'), image) - - print('===== Testing `GifVisualizer` =====') - gif_visualizer = GifVisualizer() - path = os.path.join(test_dir, 'gif_ori.gif') - gif_visualizer.visualize_directory(frame_dir, path) - gif_visualizer.set_image_size(half_size) - path = os.path.join(test_dir, 'gif_downsample.gif') - gif_visualizer.visualize_directory(frame_dir, path) - - print('===== Testing `VideoVisualizer` =====') - video_visualizer = VideoVisualizer() - path = os.path.join(test_dir, 'video_ori.mp4') - video_visualizer.visualize_directory(frame_dir, path) - path = os.path.join(test_dir, 'video_downsample.mp4') - video_visualizer.set_frame_size(half_size) - video_visualizer.visualize_directory(frame_dir, path) - - print('========== Finish Visualizer Test ==========') diff --git a/spaces/Iceclear/StableSR/StableSR/basicsr/archs/ecbsr_arch.py b/spaces/Iceclear/StableSR/StableSR/basicsr/archs/ecbsr_arch.py deleted file mode 100644 index fe20e772587d74c67fffb40f3b4731cf4f42268b..0000000000000000000000000000000000000000 --- a/spaces/Iceclear/StableSR/StableSR/basicsr/archs/ecbsr_arch.py +++ /dev/null @@ -1,275 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - -from basicsr.utils.registry import ARCH_REGISTRY - - -class SeqConv3x3(nn.Module): - """The re-parameterizable block used in the ECBSR architecture. - - ``Paper: Edge-oriented Convolution Block for Real-time Super Resolution on Mobile Devices`` - - Reference: https://github.com/xindongzhang/ECBSR - - Args: - seq_type (str): Sequence type, option: conv1x1-conv3x3 | conv1x1-sobelx | conv1x1-sobely | conv1x1-laplacian. - in_channels (int): Channel number of input. - out_channels (int): Channel number of output. - depth_multiplier (int): Width multiplier in the expand-and-squeeze conv. Default: 1. - """ - - def __init__(self, seq_type, in_channels, out_channels, depth_multiplier=1): - super(SeqConv3x3, self).__init__() - self.seq_type = seq_type - self.in_channels = in_channels - self.out_channels = out_channels - - if self.seq_type == 'conv1x1-conv3x3': - self.mid_planes = int(out_channels * depth_multiplier) - conv0 = torch.nn.Conv2d(self.in_channels, self.mid_planes, kernel_size=1, padding=0) - self.k0 = conv0.weight - self.b0 = conv0.bias - - conv1 = torch.nn.Conv2d(self.mid_planes, self.out_channels, kernel_size=3) - self.k1 = conv1.weight - self.b1 = conv1.bias - - elif self.seq_type == 'conv1x1-sobelx': - conv0 = torch.nn.Conv2d(self.in_channels, self.out_channels, kernel_size=1, padding=0) - self.k0 = conv0.weight - self.b0 = conv0.bias - - # init scale and bias - scale = torch.randn(size=(self.out_channels, 1, 1, 1)) * 1e-3 - self.scale = nn.Parameter(scale) - bias = torch.randn(self.out_channels) * 1e-3 - bias = torch.reshape(bias, (self.out_channels, )) - self.bias = nn.Parameter(bias) - # init mask - self.mask = torch.zeros((self.out_channels, 1, 3, 3), dtype=torch.float32) - for i in range(self.out_channels): - self.mask[i, 0, 0, 0] = 1.0 - self.mask[i, 0, 1, 0] = 2.0 - self.mask[i, 0, 2, 0] = 1.0 - self.mask[i, 0, 0, 2] = -1.0 - self.mask[i, 0, 1, 2] = -2.0 - self.mask[i, 0, 2, 2] = -1.0 - self.mask = nn.Parameter(data=self.mask, requires_grad=False) - - elif self.seq_type == 'conv1x1-sobely': - conv0 = torch.nn.Conv2d(self.in_channels, self.out_channels, kernel_size=1, padding=0) - self.k0 = conv0.weight - self.b0 = conv0.bias - - # init scale and bias - scale = torch.randn(size=(self.out_channels, 1, 1, 1)) * 1e-3 - self.scale = nn.Parameter(torch.FloatTensor(scale)) - bias = torch.randn(self.out_channels) * 1e-3 - bias = torch.reshape(bias, (self.out_channels, )) - self.bias = nn.Parameter(torch.FloatTensor(bias)) - # init mask - self.mask = torch.zeros((self.out_channels, 1, 3, 3), dtype=torch.float32) - for i in range(self.out_channels): - self.mask[i, 0, 0, 0] = 1.0 - self.mask[i, 0, 0, 1] = 2.0 - self.mask[i, 0, 0, 2] = 1.0 - self.mask[i, 0, 2, 0] = -1.0 - self.mask[i, 0, 2, 1] = -2.0 - self.mask[i, 0, 2, 2] = -1.0 - self.mask = nn.Parameter(data=self.mask, requires_grad=False) - - elif self.seq_type == 'conv1x1-laplacian': - conv0 = torch.nn.Conv2d(self.in_channels, self.out_channels, kernel_size=1, padding=0) - self.k0 = conv0.weight - self.b0 = conv0.bias - - # init scale and bias - scale = torch.randn(size=(self.out_channels, 1, 1, 1)) * 1e-3 - self.scale = nn.Parameter(torch.FloatTensor(scale)) - bias = torch.randn(self.out_channels) * 1e-3 - bias = torch.reshape(bias, (self.out_channels, )) - self.bias = nn.Parameter(torch.FloatTensor(bias)) - # init mask - self.mask = torch.zeros((self.out_channels, 1, 3, 3), dtype=torch.float32) - for i in range(self.out_channels): - self.mask[i, 0, 0, 1] = 1.0 - self.mask[i, 0, 1, 0] = 1.0 - self.mask[i, 0, 1, 2] = 1.0 - self.mask[i, 0, 2, 1] = 1.0 - self.mask[i, 0, 1, 1] = -4.0 - self.mask = nn.Parameter(data=self.mask, requires_grad=False) - else: - raise ValueError('The type of seqconv is not supported!') - - def forward(self, x): - if self.seq_type == 'conv1x1-conv3x3': - # conv-1x1 - y0 = F.conv2d(input=x, weight=self.k0, bias=self.b0, stride=1) - # explicitly padding with bias - y0 = F.pad(y0, (1, 1, 1, 1), 'constant', 0) - b0_pad = self.b0.view(1, -1, 1, 1) - y0[:, :, 0:1, :] = b0_pad - y0[:, :, -1:, :] = b0_pad - y0[:, :, :, 0:1] = b0_pad - y0[:, :, :, -1:] = b0_pad - # conv-3x3 - y1 = F.conv2d(input=y0, weight=self.k1, bias=self.b1, stride=1) - else: - y0 = F.conv2d(input=x, weight=self.k0, bias=self.b0, stride=1) - # explicitly padding with bias - y0 = F.pad(y0, (1, 1, 1, 1), 'constant', 0) - b0_pad = self.b0.view(1, -1, 1, 1) - y0[:, :, 0:1, :] = b0_pad - y0[:, :, -1:, :] = b0_pad - y0[:, :, :, 0:1] = b0_pad - y0[:, :, :, -1:] = b0_pad - # conv-3x3 - y1 = F.conv2d(input=y0, weight=self.scale * self.mask, bias=self.bias, stride=1, groups=self.out_channels) - return y1 - - def rep_params(self): - device = self.k0.get_device() - if device < 0: - device = None - - if self.seq_type == 'conv1x1-conv3x3': - # re-param conv kernel - rep_weight = F.conv2d(input=self.k1, weight=self.k0.permute(1, 0, 2, 3)) - # re-param conv bias - rep_bias = torch.ones(1, self.mid_planes, 3, 3, device=device) * self.b0.view(1, -1, 1, 1) - rep_bias = F.conv2d(input=rep_bias, weight=self.k1).view(-1, ) + self.b1 - else: - tmp = self.scale * self.mask - k1 = torch.zeros((self.out_channels, self.out_channels, 3, 3), device=device) - for i in range(self.out_channels): - k1[i, i, :, :] = tmp[i, 0, :, :] - b1 = self.bias - # re-param conv kernel - rep_weight = F.conv2d(input=k1, weight=self.k0.permute(1, 0, 2, 3)) - # re-param conv bias - rep_bias = torch.ones(1, self.out_channels, 3, 3, device=device) * self.b0.view(1, -1, 1, 1) - rep_bias = F.conv2d(input=rep_bias, weight=k1).view(-1, ) + b1 - return rep_weight, rep_bias - - -class ECB(nn.Module): - """The ECB block used in the ECBSR architecture. - - Paper: Edge-oriented Convolution Block for Real-time Super Resolution on Mobile Devices - Ref git repo: https://github.com/xindongzhang/ECBSR - - Args: - in_channels (int): Channel number of input. - out_channels (int): Channel number of output. - depth_multiplier (int): Width multiplier in the expand-and-squeeze conv. Default: 1. - act_type (str): Activation type. Option: prelu | relu | rrelu | softplus | linear. Default: prelu. - with_idt (bool): Whether to use identity connection. Default: False. - """ - - def __init__(self, in_channels, out_channels, depth_multiplier, act_type='prelu', with_idt=False): - super(ECB, self).__init__() - - self.depth_multiplier = depth_multiplier - self.in_channels = in_channels - self.out_channels = out_channels - self.act_type = act_type - - if with_idt and (self.in_channels == self.out_channels): - self.with_idt = True - else: - self.with_idt = False - - self.conv3x3 = torch.nn.Conv2d(self.in_channels, self.out_channels, kernel_size=3, padding=1) - self.conv1x1_3x3 = SeqConv3x3('conv1x1-conv3x3', self.in_channels, self.out_channels, self.depth_multiplier) - self.conv1x1_sbx = SeqConv3x3('conv1x1-sobelx', self.in_channels, self.out_channels) - self.conv1x1_sby = SeqConv3x3('conv1x1-sobely', self.in_channels, self.out_channels) - self.conv1x1_lpl = SeqConv3x3('conv1x1-laplacian', self.in_channels, self.out_channels) - - if self.act_type == 'prelu': - self.act = nn.PReLU(num_parameters=self.out_channels) - elif self.act_type == 'relu': - self.act = nn.ReLU(inplace=True) - elif self.act_type == 'rrelu': - self.act = nn.RReLU(lower=-0.05, upper=0.05) - elif self.act_type == 'softplus': - self.act = nn.Softplus() - elif self.act_type == 'linear': - pass - else: - raise ValueError('The type of activation if not support!') - - def forward(self, x): - if self.training: - y = self.conv3x3(x) + self.conv1x1_3x3(x) + self.conv1x1_sbx(x) + self.conv1x1_sby(x) + self.conv1x1_lpl(x) - if self.with_idt: - y += x - else: - rep_weight, rep_bias = self.rep_params() - y = F.conv2d(input=x, weight=rep_weight, bias=rep_bias, stride=1, padding=1) - if self.act_type != 'linear': - y = self.act(y) - return y - - def rep_params(self): - weight0, bias0 = self.conv3x3.weight, self.conv3x3.bias - weight1, bias1 = self.conv1x1_3x3.rep_params() - weight2, bias2 = self.conv1x1_sbx.rep_params() - weight3, bias3 = self.conv1x1_sby.rep_params() - weight4, bias4 = self.conv1x1_lpl.rep_params() - rep_weight, rep_bias = (weight0 + weight1 + weight2 + weight3 + weight4), ( - bias0 + bias1 + bias2 + bias3 + bias4) - - if self.with_idt: - device = rep_weight.get_device() - if device < 0: - device = None - weight_idt = torch.zeros(self.out_channels, self.out_channels, 3, 3, device=device) - for i in range(self.out_channels): - weight_idt[i, i, 1, 1] = 1.0 - bias_idt = 0.0 - rep_weight, rep_bias = rep_weight + weight_idt, rep_bias + bias_idt - return rep_weight, rep_bias - - -@ARCH_REGISTRY.register() -class ECBSR(nn.Module): - """ECBSR architecture. - - Paper: Edge-oriented Convolution Block for Real-time Super Resolution on Mobile Devices - Ref git repo: https://github.com/xindongzhang/ECBSR - - Args: - num_in_ch (int): Channel number of inputs. - num_out_ch (int): Channel number of outputs. - num_block (int): Block number in the trunk network. - num_channel (int): Channel number. - with_idt (bool): Whether use identity in convolution layers. - act_type (str): Activation type. - scale (int): Upsampling factor. - """ - - def __init__(self, num_in_ch, num_out_ch, num_block, num_channel, with_idt, act_type, scale): - super(ECBSR, self).__init__() - self.num_in_ch = num_in_ch - self.scale = scale - - backbone = [] - backbone += [ECB(num_in_ch, num_channel, depth_multiplier=2.0, act_type=act_type, with_idt=with_idt)] - for _ in range(num_block): - backbone += [ECB(num_channel, num_channel, depth_multiplier=2.0, act_type=act_type, with_idt=with_idt)] - backbone += [ - ECB(num_channel, num_out_ch * scale * scale, depth_multiplier=2.0, act_type='linear', with_idt=with_idt) - ] - - self.backbone = nn.Sequential(*backbone) - self.upsampler = nn.PixelShuffle(scale) - - def forward(self, x): - if self.num_in_ch > 1: - shortcut = torch.repeat_interleave(x, self.scale * self.scale, dim=1) - else: - shortcut = x # will repeat the input in the channel dimension (repeat scale * scale times) - y = self.backbone(x) + shortcut - y = self.upsampler(y) - return y diff --git a/spaces/Iceclear/StableSR/StableSR/taming/util.py b/spaces/Iceclear/StableSR/StableSR/taming/util.py deleted file mode 100644 index 06053e5defb87977f9ab07e69bf4da12201de9b7..0000000000000000000000000000000000000000 --- a/spaces/Iceclear/StableSR/StableSR/taming/util.py +++ /dev/null @@ -1,157 +0,0 @@ -import os, hashlib -import requests -from tqdm import tqdm - -URL_MAP = { - "vgg_lpips": "https://heibox.uni-heidelberg.de/f/607503859c864bc1b30b/?dl=1" -} - -CKPT_MAP = { - "vgg_lpips": "vgg.pth" -} - -MD5_MAP = { - "vgg_lpips": "d507d7349b931f0638a25a48a722f98a" -} - - -def download(url, local_path, chunk_size=1024): - os.makedirs(os.path.split(local_path)[0], exist_ok=True) - with requests.get(url, stream=True) as r: - total_size = int(r.headers.get("content-length", 0)) - with tqdm(total=total_size, unit="B", unit_scale=True) as pbar: - with open(local_path, "wb") as f: - for data in r.iter_content(chunk_size=chunk_size): - if data: - f.write(data) - pbar.update(chunk_size) - - -def md5_hash(path): - with open(path, "rb") as f: - content = f.read() - return hashlib.md5(content).hexdigest() - - -def get_ckpt_path(name, root, check=False): - assert name in URL_MAP - path = os.path.join(root, CKPT_MAP[name]) - if not os.path.exists(path) or (check and not md5_hash(path) == MD5_MAP[name]): - print("Downloading {} model from {} to {}".format(name, URL_MAP[name], path)) - download(URL_MAP[name], path) - md5 = md5_hash(path) - assert md5 == MD5_MAP[name], md5 - return path - - -class KeyNotFoundError(Exception): - def __init__(self, cause, keys=None, visited=None): - self.cause = cause - self.keys = keys - self.visited = visited - messages = list() - if keys is not None: - messages.append("Key not found: {}".format(keys)) - if visited is not None: - messages.append("Visited: {}".format(visited)) - messages.append("Cause:\n{}".format(cause)) - message = "\n".join(messages) - super().__init__(message) - - -def retrieve( - list_or_dict, key, splitval="/", default=None, expand=True, pass_success=False -): - """Given a nested list or dict return the desired value at key expanding - callable nodes if necessary and :attr:`expand` is ``True``. The expansion - is done in-place. - - Parameters - ---------- - list_or_dict : list or dict - Possibly nested list or dictionary. - key : str - key/to/value, path like string describing all keys necessary to - consider to get to the desired value. List indices can also be - passed here. - splitval : str - String that defines the delimiter between keys of the - different depth levels in `key`. - default : obj - Value returned if :attr:`key` is not found. - expand : bool - Whether to expand callable nodes on the path or not. - - Returns - ------- - The desired value or if :attr:`default` is not ``None`` and the - :attr:`key` is not found returns ``default``. - - Raises - ------ - Exception if ``key`` not in ``list_or_dict`` and :attr:`default` is - ``None``. - """ - - keys = key.split(splitval) - - success = True - try: - visited = [] - parent = None - last_key = None - for key in keys: - if callable(list_or_dict): - if not expand: - raise KeyNotFoundError( - ValueError( - "Trying to get past callable node with expand=False." - ), - keys=keys, - visited=visited, - ) - list_or_dict = list_or_dict() - parent[last_key] = list_or_dict - - last_key = key - parent = list_or_dict - - try: - if isinstance(list_or_dict, dict): - list_or_dict = list_or_dict[key] - else: - list_or_dict = list_or_dict[int(key)] - except (KeyError, IndexError, ValueError) as e: - raise KeyNotFoundError(e, keys=keys, visited=visited) - - visited += [key] - # final expansion of retrieved value - if expand and callable(list_or_dict): - list_or_dict = list_or_dict() - parent[last_key] = list_or_dict - except KeyNotFoundError as e: - if default is None: - raise e - else: - list_or_dict = default - success = False - - if not pass_success: - return list_or_dict - else: - return list_or_dict, success - - -if __name__ == "__main__": - config = {"keya": "a", - "keyb": "b", - "keyc": - {"cc1": 1, - "cc2": 2, - } - } - from omegaconf import OmegaConf - config = OmegaConf.create(config) - print(config) - retrieve(config, "keya") - diff --git a/spaces/Jamkonams/AutoGPT/tests/__init__.py b/spaces/Jamkonams/AutoGPT/tests/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Jikiwi/sovits-models/inference/slicer.py b/spaces/Jikiwi/sovits-models/inference/slicer.py deleted file mode 100644 index b05840bcf6bdced0b6e2adbecb1a1dd5b3dee462..0000000000000000000000000000000000000000 --- a/spaces/Jikiwi/sovits-models/inference/slicer.py +++ /dev/null @@ -1,142 +0,0 @@ -import librosa -import torch -import torchaudio - - -class Slicer: - def __init__(self, - sr: int, - threshold: float = -40., - min_length: int = 5000, - min_interval: int = 300, - hop_size: int = 20, - max_sil_kept: int = 5000): - if not min_length >= min_interval >= hop_size: - raise ValueError('The following condition must be satisfied: min_length >= min_interval >= hop_size') - if not max_sil_kept >= hop_size: - raise ValueError('The following condition must be satisfied: max_sil_kept >= hop_size') - min_interval = sr * min_interval / 1000 - self.threshold = 10 ** (threshold / 20.) - self.hop_size = round(sr * hop_size / 1000) - self.win_size = min(round(min_interval), 4 * self.hop_size) - self.min_length = round(sr * min_length / 1000 / self.hop_size) - self.min_interval = round(min_interval / self.hop_size) - self.max_sil_kept = round(sr * max_sil_kept / 1000 / self.hop_size) - - def _apply_slice(self, waveform, begin, end): - if len(waveform.shape) > 1: - return waveform[:, begin * self.hop_size: min(waveform.shape[1], end * self.hop_size)] - else: - return waveform[begin * self.hop_size: min(waveform.shape[0], end * self.hop_size)] - - # @timeit - def slice(self, waveform): - if len(waveform.shape) > 1: - samples = librosa.to_mono(waveform) - else: - samples = waveform - if samples.shape[0] <= self.min_length: - return {"0": {"slice": False, "split_time": f"0,{len(waveform)}"}} - rms_list = librosa.feature.rms(y=samples, frame_length=self.win_size, hop_length=self.hop_size).squeeze(0) - sil_tags = [] - silence_start = None - clip_start = 0 - for i, rms in enumerate(rms_list): - # Keep looping while frame is silent. - if rms < self.threshold: - # Record start of silent frames. - if silence_start is None: - silence_start = i - continue - # Keep looping while frame is not silent and silence start has not been recorded. - if silence_start is None: - continue - # Clear recorded silence start if interval is not enough or clip is too short - is_leading_silence = silence_start == 0 and i > self.max_sil_kept - need_slice_middle = i - silence_start >= self.min_interval and i - clip_start >= self.min_length - if not is_leading_silence and not need_slice_middle: - silence_start = None - continue - # Need slicing. Record the range of silent frames to be removed. - if i - silence_start <= self.max_sil_kept: - pos = rms_list[silence_start: i + 1].argmin() + silence_start - if silence_start == 0: - sil_tags.append((0, pos)) - else: - sil_tags.append((pos, pos)) - clip_start = pos - elif i - silence_start <= self.max_sil_kept * 2: - pos = rms_list[i - self.max_sil_kept: silence_start + self.max_sil_kept + 1].argmin() - pos += i - self.max_sil_kept - pos_l = rms_list[silence_start: silence_start + self.max_sil_kept + 1].argmin() + silence_start - pos_r = rms_list[i - self.max_sil_kept: i + 1].argmin() + i - self.max_sil_kept - if silence_start == 0: - sil_tags.append((0, pos_r)) - clip_start = pos_r - else: - sil_tags.append((min(pos_l, pos), max(pos_r, pos))) - clip_start = max(pos_r, pos) - else: - pos_l = rms_list[silence_start: silence_start + self.max_sil_kept + 1].argmin() + silence_start - pos_r = rms_list[i - self.max_sil_kept: i + 1].argmin() + i - self.max_sil_kept - if silence_start == 0: - sil_tags.append((0, pos_r)) - else: - sil_tags.append((pos_l, pos_r)) - clip_start = pos_r - silence_start = None - # Deal with trailing silence. - total_frames = rms_list.shape[0] - if silence_start is not None and total_frames - silence_start >= self.min_interval: - silence_end = min(total_frames, silence_start + self.max_sil_kept) - pos = rms_list[silence_start: silence_end + 1].argmin() + silence_start - sil_tags.append((pos, total_frames + 1)) - # Apply and return slices. - if len(sil_tags) == 0: - return {"0": {"slice": False, "split_time": f"0,{len(waveform)}"}} - else: - chunks = [] - # 第一段静音并非从头开始,补上有声片段 - if sil_tags[0][0]: - chunks.append( - {"slice": False, "split_time": f"0,{min(waveform.shape[0], sil_tags[0][0] * self.hop_size)}"}) - for i in range(0, len(sil_tags)): - # 标识有声片段(跳过第一段) - if i: - chunks.append({"slice": False, - "split_time": f"{sil_tags[i - 1][1] * self.hop_size},{min(waveform.shape[0], sil_tags[i][0] * self.hop_size)}"}) - # 标识所有静音片段 - chunks.append({"slice": True, - "split_time": f"{sil_tags[i][0] * self.hop_size},{min(waveform.shape[0], sil_tags[i][1] * self.hop_size)}"}) - # 最后一段静音并非结尾,补上结尾片段 - if sil_tags[-1][1] * self.hop_size < len(waveform): - chunks.append({"slice": False, "split_time": f"{sil_tags[-1][1] * self.hop_size},{len(waveform)}"}) - chunk_dict = {} - for i in range(len(chunks)): - chunk_dict[str(i)] = chunks[i] - return chunk_dict - - -def cut(audio_path, db_thresh=-30, min_len=5000): - audio, sr = librosa.load(audio_path, sr=None) - slicer = Slicer( - sr=sr, - threshold=db_thresh, - min_length=min_len - ) - chunks = slicer.slice(audio) - return chunks - - -def chunks2audio(audio_path, chunks): - chunks = dict(chunks) - audio, sr = torchaudio.load(audio_path) - if len(audio.shape) == 2 and audio.shape[1] >= 2: - audio = torch.mean(audio, dim=0).unsqueeze(0) - audio = audio.cpu().numpy()[0] - result = [] - for k, v in chunks.items(): - tag = v["split_time"].split(",") - if tag[0] != tag[1]: - result.append((v["slice"], audio[int(tag[0]):int(tag[1])])) - return result, sr diff --git a/spaces/Joeri/fabry-perot/app.py b/spaces/Joeri/fabry-perot/app.py deleted file mode 100644 index c89478fd6f4181c09473a2da486b76bf89586d9e..0000000000000000000000000000000000000000 --- a/spaces/Joeri/fabry-perot/app.py +++ /dev/null @@ -1,83 +0,0 @@ -import gradio as gr -import numpy as np -import matplotlib.pyplot as plt -import tensorflow as tf - -# Helper functions -def cos_mat(theta, n): - return np.sqrt(1 - (np.sin(np.radians(theta))/n)**2) - -def calculate_R(theta, n): - numerator = n * cos_mat(theta,n) - np.cos(np.radians(theta)) - denominator = n * cos_mat(theta, n) + np.cos(np.radians(theta)) - return (numerator/denominator)**2 - -def calculate_phase(theta, n, l, lamb): - return (2*np.pi*n/lamb) *(2*l*cos_mat(theta, n)) - -# Calculate transmission - ground truth -def trans(lamb, n, l, theta): - R = calculate_R(theta, n) - F = 4*R/((1 - R)**2) - delta = calculate_phase(theta, n, l, lamb) - return F, delta[0] * lamb[0], (1 + F*np.sin(delta/2)**2)**(-1) - -# create swish activation function -def swish(x): - return x*tf.keras.activations.sigmoid(x) -tf.keras.utils.get_custom_objects()['swish'] = tf.keras.layers.Activation(swish) - -# Normalizations of Fd to interval [0, 1] -def Fd_normalize(double): - double = double - np.array([11.8966, 21697]) - double = np.divide(2*double, [23.8, 43394]) - return double - -def Fd_unnormalize(double): - double = np.multiply(double/2, [23.8, 43394]) - double = double + np.array([11.8966, 21697]) - return double - -# Main function -def get_transmission(n, l, theta): - - # Compute ground truth - lamb = np.arange(400, 800, 2).astype('float32') - F, delta, transmission = trans(lamb, n, l, theta) - - # Make predictions - Fulcon = tf.keras.models.load_model('network_fabry_perot.hdf5', - custom_objects={'Activation' : tf.keras.layers.Activation(swish)}) - input = Fd_normalize(np.array([[F, delta]])) - pred = Fulcon.predict(tf.constant(input)) - - # Clear figure - plt.clf() - - # Plot ground truth - plt.plot(lamb, transmission, label='Ground truth') - - # Plot network prediction - plt.plot(lamb, pred[0], 'r', label='Prediction') - - # Layout - plt.xlabel("Wavelength (nm)", fontsize=14) - plt.ylabel("Transmission", fontsize=14) - plt.xticks(fontsize=14) - plt.yticks(fontsize=14) - plt.ylim(0, 1.05) - plt.title(r"Transmission T($\lambda$)", fontsize=16) - plt.legend() - - return plt.gcf() - -iface = gr.Interface(fn=get_transmission, - inputs=[gr.inputs.Slider(minimum=1.05, maximum=3.55, label='Index of refraction (n)'), - gr.inputs.Slider(minimum=100, maximum=1000, label='Thickness (l)'), - gr.inputs.Slider(minimum=-70,maximum=70, label=r'Incident angle (theta)')], - outputs="plot", - title="Fabry-Pérot resonator", - description=r"Gradio demo for a neural network trained to predict the transmission spectrum " + - "of a Fabry-Pérot resonator. More info can be found on https://github.com/Joeri38/inverse-design.", - allow_flagging='never') -iface.launch() diff --git a/spaces/JohnC26/ChatGPTwithAPI/app.py b/spaces/JohnC26/ChatGPTwithAPI/app.py deleted file mode 100644 index 374bd42fdba4b848e43a0c6f2243b8e766e9a6cf..0000000000000000000000000000000000000000 --- a/spaces/JohnC26/ChatGPTwithAPI/app.py +++ /dev/null @@ -1,132 +0,0 @@ -import gradio as gr -import os -import json -import requests - -#Streaming endpoint -API_URL = "https://api.openai.com/v1/chat/completions" #os.getenv("API_URL") + "/generate_stream" - -#Testing with my Open AI Key -#OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") - -def predict(inputs, top_p, temperature, openai_api_key, chat_counter, chatbot=[], history=[]): #repetition_penalty, top_k - - payload = { - "model": "gpt-3.5-turbo", - "messages": [{"role": "user", "content": f"{inputs}"}], - "temperature" : 1.0, - "top_p":1.0, - "n" : 1, - "stream": True, - "presence_penalty":0, - "frequency_penalty":0, - } - - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {openai_api_key}" - } - - print(f"chat_counter - {chat_counter}") - if chat_counter != 0 : - messages=[] - for data in chatbot: - temp1 = {} - temp1["role"] = "user" - temp1["content"] = data[0] - temp2 = {} - temp2["role"] = "assistant" - temp2["content"] = data[1] - messages.append(temp1) - messages.append(temp2) - temp3 = {} - temp3["role"] = "user" - temp3["content"] = inputs - messages.append(temp3) - #messages - payload = { - "model": "gpt-3.5-turbo", - "messages": messages, #[{"role": "user", "content": f"{inputs}"}], - "temperature" : temperature, #1.0, - "top_p": top_p, #1.0, - "n" : 1, - "stream": True, - "presence_penalty":0, - "frequency_penalty":0, - } - - chat_counter+=1 - - history.append(inputs) - print(f"payload is - {payload}") - # make a POST request to the API endpoint using the requests.post method, passing in stream=True - response = requests.post(API_URL, headers=headers, json=payload, stream=True) - #response = requests.post(API_URL, headers=headers, json=payload, stream=True) - token_counter = 0 - partial_words = "" - - counter=0 - for chunk in response.iter_lines(): - #Skipping first chunk - if counter == 0: - counter+=1 - continue - #counter+=1 - # check whether each line is non-empty - if chunk.decode() : - chunk = chunk.decode() - # decode each line as response data is in bytes - if len(chunk) > 12 and "content" in json.loads(chunk[6:])['choices'][0]['delta']: - #if len(json.loads(chunk.decode()[6:])['choices'][0]["delta"]) == 0: - # break - partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"] - if token_counter == 0: - history.append(" " + partial_words) - else: - history[-1] = partial_words - chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2) ] # convert to tuples of list - token_counter+=1 - yield chat, history, chat_counter # resembles {chatbot: chat, state: history} - - -def reset_textbox(): - return gr.update(value='') - -title = """

        🔥ChatGPT API 🚀Streaming🚀

        """ -description = """Language models can be conditioned to act like dialogue agents through a conversational prompt that typically takes the form: -``` -User: -Assistant: -User: -Assistant: -... -``` -In this app, you can explore the outputs of a gpt-3.5-turbo LLM. -""" - -with gr.Blocks(css = """#col_container {width: 1000px; margin-left: auto; margin-right: auto;} - #chatbot {height: 520px; overflow: auto;}""") as demo: - gr.HTML(title) - gr.HTML('''
        Duplicate SpaceDuplicate the Space and run securely with your OpenAI API Key
        ''') - with gr.Column(elem_id = "col_container"): - openai_api_key = gr.Textbox(type='password', label="Enter your OpenAI API key here") - chatbot = gr.Chatbot(elem_id='chatbot') #c - inputs = gr.Textbox(placeholder= "Hi there!", label= "Type an input and press Enter") #t - state = gr.State([]) #s - b1 = gr.Button() - - #inputs, top_p, temperature, top_k, repetition_penalty - with gr.Accordion("Parameters", open=False): - top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",) - temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",) - #top_k = gr.Slider( minimum=1, maximum=50, value=4, step=1, interactive=True, label="Top-k",) - #repetition_penalty = gr.Slider( minimum=0.1, maximum=3.0, value=1.03, step=0.01, interactive=True, label="Repetition Penalty", ) - chat_counter = gr.Number(value=0, visible=False, precision=0) - - inputs.submit( predict, [inputs, top_p, temperature, openai_api_key, chat_counter, chatbot, state], [chatbot, state, chat_counter],) - b1.click( predict, [inputs, top_p, temperature, openai_api_key, chat_counter, chatbot, state], [chatbot, state, chat_counter],) - b1.click(reset_textbox, [], [inputs]) - inputs.submit(reset_textbox, [], [inputs]) - - #gr.Markdown(description) - demo.queue().launch(debug=True) diff --git a/spaces/Junity/TokaiTeio-SVC/inference/__init__.py b/spaces/Junity/TokaiTeio-SVC/inference/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Junity/TokaiTeio-SVC/modules/attentions.py b/spaces/Junity/TokaiTeio-SVC/modules/attentions.py deleted file mode 100644 index f9c11ca4a3acb86bf1abc04d9dcfa82a4ed4061f..0000000000000000000000000000000000000000 --- a/spaces/Junity/TokaiTeio-SVC/modules/attentions.py +++ /dev/null @@ -1,349 +0,0 @@ -import copy -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -import modules.commons as commons -import modules.modules as modules -from modules.modules import LayerNorm - - -class FFT(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers=1, kernel_size=1, p_dropout=0., - proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append( - MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, - proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - x = x * x_mask - return x - - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/Kabriske/Multilingual_Video_Subtitler/translator.py b/spaces/Kabriske/Multilingual_Video_Subtitler/translator.py deleted file mode 100644 index 6c6391611af72a00d65a12c25c6d1fb8612b3d69..0000000000000000000000000000000000000000 --- a/spaces/Kabriske/Multilingual_Video_Subtitler/translator.py +++ /dev/null @@ -1,31 +0,0 @@ -import os - -from googletrans import Translator - -from utils import log - - -class MyTranslator: - def __init__(self): - self.translator = Translator() - - def translate(self, text_file_path, source_language, target_language): - # Open the input file and read its contents - with open(text_file_path, 'r') as f: - input_text = f.read() - - filename, ext = os.path.splitext(text_file_path) - output_file_path = f"{filename}_translated{ext}" - log(f"Translating text to {target_language} and saving to {output_file_path}") - # Translate the text to the desired language - output_text = self.translator.translate(input_text, dest=target_language).text - # Write the translated text to the output file - with open(output_file_path, 'w') as f: - f.write(output_text) - - return output_file_path - - -if __name__ == '__main__': - translator = MyTranslator() - translation_path = translator.translate('sample/iPhone_14_Pro.vtt', 'en', 'es') diff --git a/spaces/Kangarroar/ApplioRVC-Inference/i18n/locale_diff.py b/spaces/Kangarroar/ApplioRVC-Inference/i18n/locale_diff.py deleted file mode 100644 index 387ddfe1b16c2f9f32b6b9682b61353837b06bd8..0000000000000000000000000000000000000000 --- a/spaces/Kangarroar/ApplioRVC-Inference/i18n/locale_diff.py +++ /dev/null @@ -1,45 +0,0 @@ -import json -import os -from collections import OrderedDict - -# Define the standard file name -standard_file = "en_US.json" - -# Find all JSON files in the directory -dir_path = "./" -languages = [ - f for f in os.listdir(dir_path) if f.endswith(".json") and f != standard_file -] - -# Load the standard file -with open(standard_file, "r", encoding="utf-8") as f: - standard_data = json.load(f, object_pairs_hook=OrderedDict) - -# Loop through each language file -for lang_file in languages: - # Load the language file - with open(lang_file, "r", encoding="utf-8") as f: - lang_data = json.load(f, object_pairs_hook=OrderedDict) - - # Find the difference between the language file and the standard file - diff = set(standard_data.keys()) - set(lang_data.keys()) - - miss = set(lang_data.keys()) - set(standard_data.keys()) - - # Add any missing keys to the language file - for key in diff: - lang_data[key] = key - - # Del any extra keys to the language file - for key in miss: - del lang_data[key] - - # Sort the keys of the language file to match the order of the standard file - lang_data = OrderedDict( - sorted(lang_data.items(), key=lambda x: list(standard_data.keys()).index(x[0])) - ) - - # Save the updated language file - with open(lang_file, "w", encoding="utf-8") as f: - json.dump(lang_data, f, ensure_ascii=False, indent=4) - f.write("\n") diff --git a/spaces/Kayson/InstructDiffusion/dataset/low_level/lowlevel_reds.py b/spaces/Kayson/InstructDiffusion/dataset/low_level/lowlevel_reds.py deleted file mode 100644 index 0390aae6faecd912a66bc84f868800ad6e0cfbc5..0000000000000000000000000000000000000000 --- a/spaces/Kayson/InstructDiffusion/dataset/low_level/lowlevel_reds.py +++ /dev/null @@ -1,111 +0,0 @@ -# -------------------------------------------------------- -# InstructDiffusion -# Based on instruct-pix2pix (https://github.com/timothybrooks/instruct-pix2pix) -# Modified by Chen Li (edward82@stu.xjtu.edu.cn) -# -------------------------------------------------------- - -import os -import numpy as np -from torch.utils.data import Dataset -import torch -from PIL import Image -import torchvision.transforms.functional as TF -from pdb import set_trace as stx -import random -import cv2 -from PIL import Image -import torchvision - - -def is_image_file(filename): - return any(filename.endswith(extension) for extension in ['jpeg', 'JPEG', 'jpg', 'png', 'JPG', 'PNG', 'gif']) - - -class REDS(Dataset): - def __init__(self, path, split="train", size=256, interpolation="pil_lanczos", - flip_prob=0.5, sample_weight=1.0, instruct=False): - super(REDS, self).__init__() - - inp_files = sorted(os.listdir(os.path.join(path, split, 'blur'))) - tar_files = sorted(os.listdir(os.path.join(path, split, 'sharp'))) - - if split == "train": - self.inp_filenames = [os.path.join(path, split, 'blur', d, x) for d in inp_files for x in sorted(os.listdir(os.path.join(path, split, 'blur', d))) if is_image_file(x)] - self.tar_filenames = [os.path.join(path, split, 'sharp', d, x) for d in tar_files for x in sorted(os.listdir(os.path.join(path, split, 'sharp', d))) if is_image_file(x)] - else: - self.inp_filenames = [os.path.join(path, split, 'blur', x) for x in inp_files if is_image_file(x)] - self.tar_filenames = [os.path.join(path, split, 'sharp', x) for x in tar_files if is_image_file(x)] - - self.size = size - self.flip_prob = flip_prob - self.sample_weight = sample_weight - self.instruct = instruct - assert len(self.inp_filenames) == len(self.tar_filenames) - self.sizex = len(self.tar_filenames) # get the size of target - - self.interpolation = { - "cv_nearest": cv2.INTER_NEAREST, - "cv_bilinear": cv2.INTER_LINEAR, - "cv_bicubic": cv2.INTER_CUBIC, - "cv_area": cv2.INTER_AREA, - "cv_lanczos": cv2.INTER_LANCZOS4, - "pil_nearest": Image.NEAREST, - "pil_bilinear": Image.BILINEAR, - "pil_bicubic": Image.BICUBIC, - "pil_box": Image.BOX, - "pil_hamming": Image.HAMMING, - "pil_lanczos": Image.LANCZOS, - }[interpolation] - - prompt_path='dataset/prompt/prompt_deblur.txt' - self.prompt_list=[] - with open(prompt_path) as f: - line=f.readline() - while line: - line=line.strip('\n') - self.prompt_list.append(line) - line=f.readline() - - print(f"REDS has {len(self)} samples!!") - - def __len__(self): - return int(self.sizex * self.sample_weight) - - def __getitem__(self, index): - if self.sample_weight >= 1: - index_ = index % self.sizex - else: - index_ = int(index / self.sample_weight) + random.randint(0, int(1 / self.sample_weight) - 1) - - inp_path = self.inp_filenames[index_] - tar_path = self.tar_filenames[index_] - - inp_img = Image.open(inp_path) - tar_img = Image.open(tar_path) - - width, height = inp_img.size - tar_width, tar_height = tar_img.size - assert tar_width == width and tar_height == height, "Input and target image mismatch" - aspect_ratio = float(width) / float(height) - if width < height: - new_width = self.size - new_height = int(self.size / aspect_ratio) - else: - new_height = self.size - new_width = int(self.size * aspect_ratio) - inp_img = inp_img.resize((new_width, new_height), self.interpolation) - tar_img = tar_img.resize((new_width, new_height), self.interpolation) - - inp_img = np.array(inp_img).astype(np.float32).transpose(2, 0, 1) - inp_img_tensor = torch.tensor((inp_img / 127.5 - 1.0).astype(np.float32)) - tar_img = np.array(tar_img).astype(np.float32).transpose(2, 0, 1) - tar_img_tensor = torch.tensor((tar_img / 127.5 - 1.0).astype(np.float32)) - crop = torchvision.transforms.RandomCrop(self.size) - flip = torchvision.transforms.RandomHorizontalFlip(float(self.flip_prob)) - image_0, image_1 = flip(crop(torch.cat((inp_img_tensor, tar_img_tensor)))).chunk(2) - - prompt = random.choice(self.prompt_list) - if self.instruct: - prompt = "Image Deblurring: " + prompt - - return dict(edited=image_1, edit=dict(c_concat=image_0, c_crossattn=prompt)) \ No newline at end of file diff --git a/spaces/KdaiP/yolov8-deepsort-tracking/deep_sort/deep_sort/deep/prepare_car.py b/spaces/KdaiP/yolov8-deepsort-tracking/deep_sort/deep_sort/deep/prepare_car.py deleted file mode 100644 index fcf3818fedd64db65ed08f114c103824f01b6e20..0000000000000000000000000000000000000000 --- a/spaces/KdaiP/yolov8-deepsort-tracking/deep_sort/deep_sort/deep/prepare_car.py +++ /dev/null @@ -1,129 +0,0 @@ -# -*- coding:utf8 -*- - -import os -from PIL import Image -from shutil import copyfile, copytree, rmtree, move - -PATH_DATASET = './car-dataset' # 需要处理的文件夹 -PATH_NEW_DATASET = './car-reid-dataset' # 处理后的文件夹 -PATH_ALL_IMAGES = PATH_NEW_DATASET + '/all_images' -PATH_TRAIN = PATH_NEW_DATASET + '/train' -PATH_TEST = PATH_NEW_DATASET + '/test' - -# 定义创建目录函数 -def mymkdir(path): - path = path.strip() # 去除首位空格 - path = path.rstrip("\\") # 去除尾部 \ 符号 - isExists = os.path.exists(path) # 判断路径是否存在 - if not isExists: - os.makedirs(path) # 如果不存在则创建目录 - print(path + ' 创建成功') - return True - else: - # 如果目录存在则不创建,并提示目录已存在 - print(path + ' 目录已存在') - return False - -class BatchRename(): - ''' - 批量重命名文件夹中的图片文件 - ''' - - def __init__(self): - self.path = PATH_DATASET # 表示需要命名处理的文件夹 - - # 修改图像尺寸 - def resize(self): - for aroot, dirs, files in os.walk(self.path): - # aroot是self.path目录下的所有子目录(含self.path),dir是self.path下所有的文件夹的列表. - filelist = files # 注意此处仅是该路径下的其中一个列表 - # print('list', list) - - # filelist = os.listdir(self.path) #获取文件路径 - total_num = len(filelist) # 获取文件长度(个数) - - for item in filelist: - if item.endswith('.jpg'): # 初始的图片的格式为jpg格式的(或者源文件是png格式及其他格式,后面的转换格式就可以调整为自己需要的格式即可) - src = os.path.join(os.path.abspath(aroot), item) - - # 修改图片尺寸到128宽*256高 - im = Image.open(src) - out = im.resize((128, 256), Image.ANTIALIAS) # resize image with high-quality - out.save(src) # 原路径保存 - - def rename(self): - - for aroot, dirs, files in os.walk(self.path): - # aroot是self.path目录下的所有子目录(含self.path),dir是self.path下所有的文件夹的列表. - filelist = files # 注意此处仅是该路径下的其中一个列表 - # print('list', list) - - # filelist = os.listdir(self.path) #获取文件路径 - total_num = len(filelist) # 获取文件长度(个数) - - i = 1 # 表示文件的命名是从1开始的 - for item in filelist: - if item.endswith('.jpg'): # 初始的图片的格式为jpg格式的(或者源文件是png格式及其他格式,后面的转换格式就可以调整为自己需要的格式即可) - src = os.path.join(os.path.abspath(aroot), item) - - # 根据图片名创建图片目录 - dirname = str(item.split('_')[0]) - # 为相同车辆创建目录 - #new_dir = os.path.join(self.path, '..', 'bbox_all', dirname) - new_dir = os.path.join(PATH_ALL_IMAGES, dirname) - if not os.path.isdir(new_dir): - mymkdir(new_dir) - - # 获得new_dir中的图片数 - num_pic = len(os.listdir(new_dir)) - - dst = os.path.join(os.path.abspath(new_dir), - dirname + 'C1T0001F' + str(num_pic + 1) + '.jpg') - # 处理后的格式也为jpg格式的,当然这里可以改成png格式 C1T0001F见mars.py filenames 相机ID,跟踪指数 - # dst = os.path.join(os.path.abspath(self.path), '0000' + format(str(i), '0>3s') + '.jpg') 这种情况下的命名格式为0000000.jpg形式,可以自主定义想要的格式 - try: - copyfile(src, dst) #os.rename(src, dst) - print ('converting %s to %s ...' % (src, dst)) - i = i + 1 - except: - continue - print ('total %d to rename & converted %d jpgs' % (total_num, i)) - - def split(self): - #--------------------------------------- - #train_test - images_path = PATH_ALL_IMAGES - train_save_path = PATH_TRAIN - test_save_path = PATH_TEST - if not os.path.isdir(train_save_path): - os.mkdir(train_save_path) - os.mkdir(test_save_path) - - for _, dirs, _ in os.walk(images_path, topdown=True): - for i, dir in enumerate(dirs): - for root, _, files in os.walk(images_path + '/' + dir, topdown=True): - for j, file in enumerate(files): - if(j==0): # test dataset;每个车辆的第一幅图片 - print("序号:%s 文件夹: %s 图片:%s 归为测试集" % (i + 1, root, file)) - src_path = root + '/' + file - dst_dir = test_save_path + '/' + dir - if not os.path.isdir(dst_dir): - os.mkdir(dst_dir) - dst_path = dst_dir + '/' + file - move(src_path, dst_path) - else: - src_path = root + '/' + file - dst_dir = train_save_path + '/' + dir - if not os.path.isdir(dst_dir): - os.mkdir(dst_dir) - dst_path = dst_dir + '/' + file - move(src_path, dst_path) - rmtree(PATH_ALL_IMAGES) - -if __name__ == '__main__': - demo = BatchRename() - demo.resize() - demo.rename() - demo.split() - - diff --git a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/mkgui/base/api/fastapi_utils.py b/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/mkgui/base/api/fastapi_utils.py deleted file mode 100644 index adf582a7c33c2d68ed32fb8b3382fdeb388db0d0..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/mkgui/base/api/fastapi_utils.py +++ /dev/null @@ -1,102 +0,0 @@ -"""Collection of utilities for FastAPI apps.""" - -import inspect -from typing import Any, Type - -from fastapi import FastAPI, Form -from pydantic import BaseModel - - -def as_form(cls: Type[BaseModel]) -> Any: - """Adds an as_form class method to decorated models. - - The as_form class method can be used with FastAPI endpoints - """ - new_params = [ - inspect.Parameter( - field.alias, - inspect.Parameter.POSITIONAL_ONLY, - default=(Form(field.default) if not field.required else Form(...)), - ) - for field in cls.__fields__.values() - ] - - async def _as_form(**data): # type: ignore - return cls(**data) - - sig = inspect.signature(_as_form) - sig = sig.replace(parameters=new_params) - _as_form.__signature__ = sig # type: ignore - setattr(cls, "as_form", _as_form) - return cls - - -def patch_fastapi(app: FastAPI) -> None: - """Patch function to allow relative url resolution. - - This patch is required to make fastapi fully functional with a relative url path. - This code snippet can be copy-pasted to any Fastapi application. - """ - from fastapi.openapi.docs import get_redoc_html, get_swagger_ui_html - from starlette.requests import Request - from starlette.responses import HTMLResponse - - async def redoc_ui_html(req: Request) -> HTMLResponse: - assert app.openapi_url is not None - redoc_ui = get_redoc_html( - openapi_url="./" + app.openapi_url.lstrip("/"), - title=app.title + " - Redoc UI", - ) - - return HTMLResponse(redoc_ui.body.decode("utf-8")) - - async def swagger_ui_html(req: Request) -> HTMLResponse: - assert app.openapi_url is not None - swagger_ui = get_swagger_ui_html( - openapi_url="./" + app.openapi_url.lstrip("/"), - title=app.title + " - Swagger UI", - oauth2_redirect_url=app.swagger_ui_oauth2_redirect_url, - ) - - # insert request interceptor to have all request run on relativ path - request_interceptor = ( - "requestInterceptor: (e) => {" - "\n\t\t\tvar url = window.location.origin + window.location.pathname" - '\n\t\t\turl = url.substring( 0, url.lastIndexOf( "/" ) + 1);' - "\n\t\t\turl = e.url.replace(/http(s)?:\/\/[^/]*\//i, url);" # noqa: W605 - "\n\t\t\te.contextUrl = url" - "\n\t\t\te.url = url" - "\n\t\t\treturn e;}" - ) - - return HTMLResponse( - swagger_ui.body.decode("utf-8").replace( - "dom_id: '#swagger-ui',", - "dom_id: '#swagger-ui',\n\t\t" + request_interceptor + ",", - ) - ) - - # remove old docs route and add our patched route - routes_new = [] - for app_route in app.routes: - if app_route.path == "/docs": # type: ignore - continue - - if app_route.path == "/redoc": # type: ignore - continue - - routes_new.append(app_route) - - app.router.routes = routes_new - - assert app.docs_url is not None - app.add_route(app.docs_url, swagger_ui_html, include_in_schema=False) - assert app.redoc_url is not None - app.add_route(app.redoc_url, redoc_ui_html, include_in_schema=False) - - # Make graphql realtive - from starlette import graphql - - graphql.GRAPHIQL = graphql.GRAPHIQL.replace( - "({{REQUEST_PATH}}", '("." + {{REQUEST_PATH}}' - ) diff --git a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/vocoder/wavernn/models/fatchord_version.py b/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/vocoder/wavernn/models/fatchord_version.py deleted file mode 100644 index 6413a921651971b4859ed7de8b3a676cd6595d6b..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/vocoder/wavernn/models/fatchord_version.py +++ /dev/null @@ -1,434 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from vocoder.distribution import sample_from_discretized_mix_logistic -from vocoder.display import * -from vocoder.wavernn.audio import * - - -class ResBlock(nn.Module): - def __init__(self, dims): - super().__init__() - self.conv1 = nn.Conv1d(dims, dims, kernel_size=1, bias=False) - self.conv2 = nn.Conv1d(dims, dims, kernel_size=1, bias=False) - self.batch_norm1 = nn.BatchNorm1d(dims) - self.batch_norm2 = nn.BatchNorm1d(dims) - - def forward(self, x): - residual = x - x = self.conv1(x) - x = self.batch_norm1(x) - x = F.relu(x) - x = self.conv2(x) - x = self.batch_norm2(x) - return x + residual - - -class MelResNet(nn.Module): - def __init__(self, res_blocks, in_dims, compute_dims, res_out_dims, pad): - super().__init__() - k_size = pad * 2 + 1 - self.conv_in = nn.Conv1d(in_dims, compute_dims, kernel_size=k_size, bias=False) - self.batch_norm = nn.BatchNorm1d(compute_dims) - self.layers = nn.ModuleList() - for i in range(res_blocks): - self.layers.append(ResBlock(compute_dims)) - self.conv_out = nn.Conv1d(compute_dims, res_out_dims, kernel_size=1) - - def forward(self, x): - x = self.conv_in(x) - x = self.batch_norm(x) - x = F.relu(x) - for f in self.layers: x = f(x) - x = self.conv_out(x) - return x - - -class Stretch2d(nn.Module): - def __init__(self, x_scale, y_scale): - super().__init__() - self.x_scale = x_scale - self.y_scale = y_scale - - def forward(self, x): - b, c, h, w = x.size() - x = x.unsqueeze(-1).unsqueeze(3) - x = x.repeat(1, 1, 1, self.y_scale, 1, self.x_scale) - return x.view(b, c, h * self.y_scale, w * self.x_scale) - - -class UpsampleNetwork(nn.Module): - def __init__(self, feat_dims, upsample_scales, compute_dims, - res_blocks, res_out_dims, pad): - super().__init__() - total_scale = np.cumproduct(upsample_scales)[-1] - self.indent = pad * total_scale - self.resnet = MelResNet(res_blocks, feat_dims, compute_dims, res_out_dims, pad) - self.resnet_stretch = Stretch2d(total_scale, 1) - self.up_layers = nn.ModuleList() - for scale in upsample_scales: - k_size = (1, scale * 2 + 1) - padding = (0, scale) - stretch = Stretch2d(scale, 1) - conv = nn.Conv2d(1, 1, kernel_size=k_size, padding=padding, bias=False) - conv.weight.data.fill_(1. / k_size[1]) - self.up_layers.append(stretch) - self.up_layers.append(conv) - - def forward(self, m): - aux = self.resnet(m).unsqueeze(1) - aux = self.resnet_stretch(aux) - aux = aux.squeeze(1) - m = m.unsqueeze(1) - for f in self.up_layers: m = f(m) - m = m.squeeze(1)[:, :, self.indent:-self.indent] - return m.transpose(1, 2), aux.transpose(1, 2) - - -class WaveRNN(nn.Module): - def __init__(self, rnn_dims, fc_dims, bits, pad, upsample_factors, - feat_dims, compute_dims, res_out_dims, res_blocks, - hop_length, sample_rate, mode='RAW'): - super().__init__() - self.mode = mode - self.pad = pad - if self.mode == 'RAW' : - self.n_classes = 2 ** bits - elif self.mode == 'MOL' : - self.n_classes = 30 - else : - RuntimeError("Unknown model mode value - ", self.mode) - - self.rnn_dims = rnn_dims - self.aux_dims = res_out_dims // 4 - self.hop_length = hop_length - self.sample_rate = sample_rate - - self.upsample = UpsampleNetwork(feat_dims, upsample_factors, compute_dims, res_blocks, res_out_dims, pad) - self.I = nn.Linear(feat_dims + self.aux_dims + 1, rnn_dims) - self.rnn1 = nn.GRU(rnn_dims, rnn_dims, batch_first=True) - self.rnn2 = nn.GRU(rnn_dims + self.aux_dims, rnn_dims, batch_first=True) - self.fc1 = nn.Linear(rnn_dims + self.aux_dims, fc_dims) - self.fc2 = nn.Linear(fc_dims + self.aux_dims, fc_dims) - self.fc3 = nn.Linear(fc_dims, self.n_classes) - - self.step = nn.Parameter(torch.zeros(1).long(), requires_grad=False) - self.num_params() - - def forward(self, x, mels): - self.step += 1 - bsize = x.size(0) - if torch.cuda.is_available(): - h1 = torch.zeros(1, bsize, self.rnn_dims).cuda() - h2 = torch.zeros(1, bsize, self.rnn_dims).cuda() - else: - h1 = torch.zeros(1, bsize, self.rnn_dims).cpu() - h2 = torch.zeros(1, bsize, self.rnn_dims).cpu() - mels, aux = self.upsample(mels) - - aux_idx = [self.aux_dims * i for i in range(5)] - a1 = aux[:, :, aux_idx[0]:aux_idx[1]] - a2 = aux[:, :, aux_idx[1]:aux_idx[2]] - a3 = aux[:, :, aux_idx[2]:aux_idx[3]] - a4 = aux[:, :, aux_idx[3]:aux_idx[4]] - - x = torch.cat([x.unsqueeze(-1), mels, a1], dim=2) - x = self.I(x) - res = x - x, _ = self.rnn1(x, h1) - - x = x + res - res = x - x = torch.cat([x, a2], dim=2) - x, _ = self.rnn2(x, h2) - - x = x + res - x = torch.cat([x, a3], dim=2) - x = F.relu(self.fc1(x)) - - x = torch.cat([x, a4], dim=2) - x = F.relu(self.fc2(x)) - return self.fc3(x) - - def generate(self, mels, batched, target, overlap, mu_law, progress_callback=None): - mu_law = mu_law if self.mode == 'RAW' else False - progress_callback = progress_callback or self.gen_display - - self.eval() - output = [] - start = time.time() - rnn1 = self.get_gru_cell(self.rnn1) - rnn2 = self.get_gru_cell(self.rnn2) - - with torch.no_grad(): - if torch.cuda.is_available(): - mels = mels.cuda() - else: - mels = mels.cpu() - wave_len = (mels.size(-1) - 1) * self.hop_length - mels = self.pad_tensor(mels.transpose(1, 2), pad=self.pad, side='both') - mels, aux = self.upsample(mels.transpose(1, 2)) - - if batched: - mels = self.fold_with_overlap(mels, target, overlap) - aux = self.fold_with_overlap(aux, target, overlap) - - b_size, seq_len, _ = mels.size() - - if torch.cuda.is_available(): - h1 = torch.zeros(b_size, self.rnn_dims).cuda() - h2 = torch.zeros(b_size, self.rnn_dims).cuda() - x = torch.zeros(b_size, 1).cuda() - else: - h1 = torch.zeros(b_size, self.rnn_dims).cpu() - h2 = torch.zeros(b_size, self.rnn_dims).cpu() - x = torch.zeros(b_size, 1).cpu() - - d = self.aux_dims - aux_split = [aux[:, :, d * i:d * (i + 1)] for i in range(4)] - - for i in range(seq_len): - - m_t = mels[:, i, :] - - a1_t, a2_t, a3_t, a4_t = (a[:, i, :] for a in aux_split) - - x = torch.cat([x, m_t, a1_t], dim=1) - x = self.I(x) - h1 = rnn1(x, h1) - - x = x + h1 - inp = torch.cat([x, a2_t], dim=1) - h2 = rnn2(inp, h2) - - x = x + h2 - x = torch.cat([x, a3_t], dim=1) - x = F.relu(self.fc1(x)) - - x = torch.cat([x, a4_t], dim=1) - x = F.relu(self.fc2(x)) - - logits = self.fc3(x) - - if self.mode == 'MOL': - sample = sample_from_discretized_mix_logistic(logits.unsqueeze(0).transpose(1, 2)) - output.append(sample.view(-1)) - if torch.cuda.is_available(): - # x = torch.FloatTensor([[sample]]).cuda() - x = sample.transpose(0, 1).cuda() - else: - x = sample.transpose(0, 1) - - elif self.mode == 'RAW' : - posterior = F.softmax(logits, dim=1) - distrib = torch.distributions.Categorical(posterior) - - sample = 2 * distrib.sample().float() / (self.n_classes - 1.) - 1. - output.append(sample) - x = sample.unsqueeze(-1) - else: - raise RuntimeError("Unknown model mode value - ", self.mode) - - if i % 100 == 0: - gen_rate = (i + 1) / (time.time() - start) * b_size / 1000 - progress_callback(i, seq_len, b_size, gen_rate) - - output = torch.stack(output).transpose(0, 1) - output = output.cpu().numpy() - output = output.astype(np.float64) - - if batched: - output = self.xfade_and_unfold(output, target, overlap) - else: - output = output[0] - - if mu_law: - output = decode_mu_law(output, self.n_classes, False) - if hp.apply_preemphasis: - output = de_emphasis(output) - - # Fade-out at the end to avoid signal cutting out suddenly - fade_out = np.linspace(1, 0, 20 * self.hop_length) - output = output[:wave_len] - output[-20 * self.hop_length:] *= fade_out - - self.train() - - return output - - - def gen_display(self, i, seq_len, b_size, gen_rate): - pbar = progbar(i, seq_len) - msg = f'| {pbar} {i*b_size}/{seq_len*b_size} | Batch Size: {b_size} | Gen Rate: {gen_rate:.1f}kHz | ' - stream(msg) - - def get_gru_cell(self, gru): - gru_cell = nn.GRUCell(gru.input_size, gru.hidden_size) - gru_cell.weight_hh.data = gru.weight_hh_l0.data - gru_cell.weight_ih.data = gru.weight_ih_l0.data - gru_cell.bias_hh.data = gru.bias_hh_l0.data - gru_cell.bias_ih.data = gru.bias_ih_l0.data - return gru_cell - - def pad_tensor(self, x, pad, side='both'): - # NB - this is just a quick method i need right now - # i.e., it won't generalise to other shapes/dims - b, t, c = x.size() - total = t + 2 * pad if side == 'both' else t + pad - if torch.cuda.is_available(): - padded = torch.zeros(b, total, c).cuda() - else: - padded = torch.zeros(b, total, c).cpu() - if side == 'before' or side == 'both': - padded[:, pad:pad + t, :] = x - elif side == 'after': - padded[:, :t, :] = x - return padded - - def fold_with_overlap(self, x, target, overlap): - - ''' Fold the tensor with overlap for quick batched inference. - Overlap will be used for crossfading in xfade_and_unfold() - - Args: - x (tensor) : Upsampled conditioning features. - shape=(1, timesteps, features) - target (int) : Target timesteps for each index of batch - overlap (int) : Timesteps for both xfade and rnn warmup - - Return: - (tensor) : shape=(num_folds, target + 2 * overlap, features) - - Details: - x = [[h1, h2, ... hn]] - - Where each h is a vector of conditioning features - - Eg: target=2, overlap=1 with x.size(1)=10 - - folded = [[h1, h2, h3, h4], - [h4, h5, h6, h7], - [h7, h8, h9, h10]] - ''' - - _, total_len, features = x.size() - - # Calculate variables needed - num_folds = (total_len - overlap) // (target + overlap) - extended_len = num_folds * (overlap + target) + overlap - remaining = total_len - extended_len - - # Pad if some time steps poking out - if remaining != 0: - num_folds += 1 - padding = target + 2 * overlap - remaining - x = self.pad_tensor(x, padding, side='after') - - if torch.cuda.is_available(): - folded = torch.zeros(num_folds, target + 2 * overlap, features).cuda() - else: - folded = torch.zeros(num_folds, target + 2 * overlap, features).cpu() - - # Get the values for the folded tensor - for i in range(num_folds): - start = i * (target + overlap) - end = start + target + 2 * overlap - folded[i] = x[:, start:end, :] - - return folded - - def xfade_and_unfold(self, y, target, overlap): - - ''' Applies a crossfade and unfolds into a 1d array. - - Args: - y (ndarry) : Batched sequences of audio samples - shape=(num_folds, target + 2 * overlap) - dtype=np.float64 - overlap (int) : Timesteps for both xfade and rnn warmup - - Return: - (ndarry) : audio samples in a 1d array - shape=(total_len) - dtype=np.float64 - - Details: - y = [[seq1], - [seq2], - [seq3]] - - Apply a gain envelope at both ends of the sequences - - y = [[seq1_in, seq1_target, seq1_out], - [seq2_in, seq2_target, seq2_out], - [seq3_in, seq3_target, seq3_out]] - - Stagger and add up the groups of samples: - - [seq1_in, seq1_target, (seq1_out + seq2_in), seq2_target, ...] - - ''' - - num_folds, length = y.shape - target = length - 2 * overlap - total_len = num_folds * (target + overlap) + overlap - - # Need some silence for the rnn warmup - silence_len = overlap // 2 - fade_len = overlap - silence_len - silence = np.zeros((silence_len), dtype=np.float64) - - # Equal power crossfade - t = np.linspace(-1, 1, fade_len, dtype=np.float64) - fade_in = np.sqrt(0.5 * (1 + t)) - fade_out = np.sqrt(0.5 * (1 - t)) - - # Concat the silence to the fades - fade_in = np.concatenate([silence, fade_in]) - fade_out = np.concatenate([fade_out, silence]) - - # Apply the gain to the overlap samples - y[:, :overlap] *= fade_in - y[:, -overlap:] *= fade_out - - unfolded = np.zeros((total_len), dtype=np.float64) - - # Loop to add up all the samples - for i in range(num_folds): - start = i * (target + overlap) - end = start + target + 2 * overlap - unfolded[start:end] += y[i] - - return unfolded - - def get_step(self) : - return self.step.data.item() - - def checkpoint(self, model_dir, optimizer) : - k_steps = self.get_step() // 1000 - self.save(model_dir.joinpath("checkpoint_%dk_steps.pt" % k_steps), optimizer) - - def log(self, path, msg) : - with open(path, 'a') as f: - print(msg, file=f) - - def load(self, path, optimizer) : - checkpoint = torch.load(path) - if "optimizer_state" in checkpoint: - self.load_state_dict(checkpoint["model_state"]) - optimizer.load_state_dict(checkpoint["optimizer_state"]) - else: - # Backwards compatibility - self.load_state_dict(checkpoint) - - def save(self, path, optimizer) : - torch.save({ - "model_state": self.state_dict(), - "optimizer_state": optimizer.state_dict(), - }, path) - - def num_params(self, print_out=True): - parameters = filter(lambda p: p.requires_grad, self.parameters()) - parameters = sum([np.prod(p.size()) for p in parameters]) / 1_000_000 - if print_out : - print('Trainable Parameters: %.3fM' % parameters) diff --git a/spaces/KevinQHLin/UniVTG/main/inference_mr.py b/spaces/KevinQHLin/UniVTG/main/inference_mr.py deleted file mode 100644 index 4aea2de137ac46fa91f737d49998a00165423bce..0000000000000000000000000000000000000000 --- a/spaces/KevinQHLin/UniVTG/main/inference_mr.py +++ /dev/null @@ -1,273 +0,0 @@ -import pdb -import pprint -from tqdm import tqdm, trange -import numpy as np -import os -from collections import OrderedDict, defaultdict -from utils.basic_utils import AverageMeter - -import torch -import torch.nn.functional as F -import torch.backends.cudnn as cudnn -from torch.utils.data import DataLoader - -from main.config import TestOptions, setup_model -from main.dataset import DatasetMR, start_end_collate_mr, prepare_batch_inputs_mr -from eval.eval import eval_submission -from eval.postprocessing import PostProcessorDETR -from utils.basic_utils import save_jsonl, save_json -from utils.temporal_nms import temporal_nms -from utils.span_utils import span_cxw_to_xx - -import logging -import importlib - -logger = logging.getLogger(__name__) -logging.basicConfig(format="%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s - %(message)s", - datefmt="%Y-%m-%d %H:%M:%S", - level=logging.INFO) - - -def post_processing_mr_nms(mr_res, nms_thd, max_before_nms, max_after_nms): - mr_res_after_nms = [] - for e in mr_res: - e["pred_relevant_windows"] = temporal_nms( - e["pred_relevant_windows"][:max_before_nms], - nms_thd=nms_thd, - max_after_nms=max_after_nms - ) - mr_res_after_nms.append(e) - return mr_res_after_nms - - -def eval_epoch_post_processing(submission, opt, gt_data, save_submission_filename): - # IOU_THDS = (0.5, 0.7) - logger.info("Saving/Evaluating before nms results") - submission_path = os.path.join(opt.results_dir, save_submission_filename) - save_jsonl(submission, submission_path) - - if opt.eval_split_name in ["val", "test"]: # since test_public has no GT - metrics = eval_submission( - submission, gt_data, - verbose=opt.debug, match_number=not opt.debug, - ) - save_metrics_path = submission_path.replace(".jsonl", "_metrics.json") - save_json(metrics, save_metrics_path, save_pretty=True, sort_keys=False) - latest_file_paths = [submission_path, save_metrics_path] - else: - metrics = None - latest_file_paths = [submission_path, ] - - if opt.nms_thd != -1: - logger.info("[MR] Performing nms with nms_thd {}".format(opt.nms_thd)) - submission_after_nms = post_processing_mr_nms( - submission, nms_thd=opt.nms_thd, - max_before_nms=opt.max_before_nms, max_after_nms=opt.max_after_nms - ) - - logger.info("Saving/Evaluating nms results") - submission_nms_path = submission_path.replace(".jsonl", "_nms_thd_{}.jsonl".format(opt.nms_thd)) - save_jsonl(submission_after_nms, submission_nms_path) - if opt.eval_split_name == "val": - metrics_nms = eval_submission( - submission_after_nms, gt_data, - verbose=opt.debug, match_number=not opt.debug - ) - save_metrics_nms_path = submission_nms_path.replace(".jsonl", "_metrics.json") - save_json(metrics_nms, save_metrics_nms_path, save_pretty=True, sort_keys=False) - latest_file_paths += [submission_nms_path, save_metrics_nms_path] - else: - metrics_nms = None - latest_file_paths = [submission_nms_path, ] - else: - metrics_nms = None - return metrics, metrics_nms, latest_file_paths - - -@torch.no_grad() -def compute_mr_results(model, eval_loader, opt, epoch_i=None, criterion=None, tb_writer=None): - model.eval() - if criterion: - assert eval_loader.dataset.load_labels - criterion.eval() - - loss_meters = defaultdict(AverageMeter) - write_tb = tb_writer is not None and epoch_i is not None - - mr_res = [] - for batch in tqdm(eval_loader, desc="compute st ed scores"): - query_meta = batch[0] - model_inputs, targets = prepare_batch_inputs_mr(batch[1], opt.device, non_blocking=opt.pin_memory) - outputs = model(**model_inputs) - prob = outputs["pred_logits"] # the last channel may be 1 or 2. - # if opt.eval_mode == 'v1': - # prob = prob * outputs["saliency_scores"].unsqueeze(-1) # v1 - # if opt.eval_mode == 'v2': - # prob = F.softmax(prob, dim=1) * outputs["saliency_scores"].unsqueeze(-1) # v2 - # if opt.eval_mode == 'v3': - # prob = outputs["saliency_scores"].unsqueeze(-1) - if outputs["pred_logits"].shape[-1] > 1: - prob = F.softmax(outputs["pred_logits"], -1) # (batch_size, #queries, #classes=2) - if opt.span_loss_type == "l1": - scores = prob[..., 0] # * (batch_size, #queries) foreground label is 0, we directly take it - pred_spans = outputs["pred_spans"] # (bsz, #queries, 2) - - if opt.model_id not in ['moment_detr']: # dense regression. - start_spans = targets['timestamp'] - pred_spans = start_spans + pred_spans - mask = targets['timestamp_mask'].bool() - scores[~mask] = 0 - # if opt.eval_mode == 'v4': - # _mask = targets['timestamp_window'].bool() - # scores[~_mask] = 0 - - if opt.eval_mode == 'add': - # pdb.set_trace() - _saliency_scores = outputs["saliency_scores"].half() + prob.squeeze(-1) - else: - _saliency_scores = outputs["saliency_scores"].half() # (bsz, L) - - if opt.eval_mode == 'add_mr': - prob = outputs["saliency_scores"].half().unsqueeze(-1) + prob - - saliency_scores = [] - valid_vid_lengths = model_inputs["src_vid_mask"].sum(1).cpu().tolist() - for j in range(len(valid_vid_lengths)): - saliency_scores.append(_saliency_scores[j, :int(valid_vid_lengths[j])].tolist()) - else: - bsz, n_queries = outputs["pred_spans"].shape[:2] # # (bsz, #queries, max_v_l *2) - pred_spans_logits = outputs["pred_spans"].view(bsz, n_queries, 2, opt.max_v_l) - # TODO use more advanced decoding method with st_ed product - pred_span_scores, pred_spans = F.softmax(pred_spans_logits, dim=-1).max(-1) # 2 * (bsz, #queries, 2) - scores = torch.prod(pred_span_scores, 2) # (bsz, #queries) - pred_spans[:, 1] += 1 - pred_spans *= opt.clip_length - - # compose predictions - for idx, (meta, spans, score) in enumerate(zip(query_meta, pred_spans.cpu(), scores.cpu())): - if opt.span_loss_type == "l1": - if opt.model_id in ['moment_detr']: - spans = span_cxw_to_xx(spans) * meta["duration"] - else: - spans = spans * meta["duration"] - spans = torch.clamp(spans, 0, meta["duration"]) # added by Kevin, since window cannot be longer than video duration. - - # (#queries, 3), [st(float), ed(float), score(float)] - cur_ranked_preds = torch.cat([spans, score[:, None]], dim=1).tolist() - if not opt.no_sort_results: - cur_ranked_preds = sorted(cur_ranked_preds, key=lambda x: x[2], reverse=True) - cur_ranked_preds = [[float(f"{e:.4f}") for e in row] for row in cur_ranked_preds] - cur_query_pred = dict( - qid=meta["qid"], - query=meta["query"], - vid=meta["vid"], - pred_relevant_windows=cur_ranked_preds, - pred_saliency_scores=saliency_scores[idx] - ) - mr_res.append(cur_query_pred) - - if criterion: - loss_dict = criterion(outputs, targets) - weight_dict = criterion.weight_dict - losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) - loss_dict["loss_overall"] = float(losses) # for logging only - for k, v in loss_dict.items(): - loss_meters[k].update(float(v) * weight_dict[k] if k in weight_dict else float(v)) - - if opt.debug: - break - - if write_tb and criterion: - for k, v in loss_meters.items(): - tb_writer.add_scalar("Eval/{}".format(k), v.avg, epoch_i + 1) - - post_processor = PostProcessorDETR( - clip_length=opt.clip_length, min_ts_val=0, max_ts_val=150, - min_w_l=2, max_w_l=150, move_window_method="left", - # process_func_names=("clip_ts", "round_multiple") - process_func_names=["round_multiple"] # have added `clamp' op on line 147, thus we do not need `clip_ts' again; - ) - # todo: are we need round_multiple? - if opt.round_multiple > 0: - mr_res = post_processor(mr_res) - return mr_res, loss_meters - -def get_eval_res(model, eval_loader, opt, epoch_i, criterion, tb_writer): - """compute and save query and video proposal embeddings""" - eval_res, eval_loss_meters = compute_mr_results(model, eval_loader, opt, epoch_i, criterion, tb_writer) # list(dict) - return eval_res, eval_loss_meters - -def eval_epoch(model, eval_dataset, opt, save_submission_filename, epoch_i=None, criterion=None, tb_writer=None): - logger.info("Generate submissions") - model.eval() - if criterion is not None and eval_dataset.load_labels: - criterion.eval() - else: - criterion = None - - eval_loader = DataLoader( - eval_dataset, - collate_fn=start_end_collate_mr, - batch_size=opt.eval_bsz, - num_workers=opt.num_workers, - shuffle=False, - pin_memory=opt.pin_memory - ) - - submission, eval_loss_meters = get_eval_res(model, eval_loader, opt, epoch_i, criterion, tb_writer) - if opt.no_sort_results: - save_submission_filename = save_submission_filename.replace(".jsonl", "_unsorted.jsonl") - metrics, metrics_nms, latest_file_paths = eval_epoch_post_processing( - submission, opt, eval_dataset.data, save_submission_filename) - return metrics, metrics_nms, eval_loss_meters, latest_file_paths - -def start_inference(): - logger.info("Setup config, data and model...") - opt = TestOptions().parse() - # pdb.set_trace() - cudnn.benchmark = True - cudnn.deterministic = False - - assert opt.eval_path is not None - eval_dataset = DatasetMR( - dset_name=opt.dset_name, - data_path=opt.eval_path, - v_feat_dirs=opt.v_feat_dirs, - q_feat_dir=opt.t_feat_dir, - v_feat_dim=opt.v_feat_dim, - q_feat_dim=opt.t_feat_dim, - q_feat_type="last_hidden_state", - max_q_l=opt.max_q_l, - max_v_l=opt.max_v_l, - ctx_mode=opt.ctx_mode, - data_ratio=opt.data_ratio, - normalize_v=not opt.no_norm_vfeat, - normalize_t=not opt.no_norm_tfeat, - clip_len=opt.clip_length, - max_windows=opt.max_windows, - load_labels=True, # opt.eval_split_name == "val", - span_loss_type=opt.span_loss_type, - txt_drop_ratio=0, - use_cache=opt.use_cache, - ) - - if opt.lr_warmup > 0: - # total_steps = opt.n_epoch * len(train_dataset) // opt.bsz - total_steps = opt.n_epoch - warmup_steps = opt.lr_warmup if opt.lr_warmup > 1 else int(opt.lr_warmup * total_steps) - opt.lr_warmup = [warmup_steps, total_steps] - - model, criterion, _, _ = setup_model(opt) - save_submission_filename = "inference_{}_{}_{}_preds.jsonl".format( - opt.dset_name, opt.eval_split_name, opt.eval_id) - logger.info("Starting inference...") - with torch.no_grad(): - metrics_no_nms, metrics_nms, eval_loss_meters, latest_file_paths = \ - eval_epoch(model, eval_dataset, opt, save_submission_filename, criterion=criterion) - logger.info("metrics_no_nms {}".format(pprint.pformat(metrics_no_nms["brief"], indent=4))) - if metrics_nms is not None: - logger.info("metrics_nms {}".format(pprint.pformat(metrics_nms["brief"], indent=4))) - - -if __name__ == '__main__': - start_inference() diff --git a/spaces/Kvikontent/QrGen/README.md b/spaces/Kvikontent/QrGen/README.md deleted file mode 100644 index 41990b64c38e505f0daa826d573e958dd34b9fe5..0000000000000000000000000000000000000000 --- a/spaces/Kvikontent/QrGen/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: QrGen -emoji: 👀 -colorFrom: green -colorTo: indigo -sdk: gradio -sdk_version: 3.48.0 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/utils/point_sample.py b/spaces/KyanChen/RSPrompter/mmdet/models/utils/point_sample.py deleted file mode 100644 index 1afc957f3da7d1dc030c21d40311c768c6952ea4..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/models/utils/point_sample.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -from mmcv.ops import point_sample -from torch import Tensor - - -def get_uncertainty(mask_preds: Tensor, labels: Tensor) -> Tensor: - """Estimate uncertainty based on pred logits. - - We estimate uncertainty as L1 distance between 0.0 and the logits - prediction in 'mask_preds' for the foreground class in `classes`. - - Args: - mask_preds (Tensor): mask predication logits, shape (num_rois, - num_classes, mask_height, mask_width). - - labels (Tensor): Either predicted or ground truth label for - each predicted mask, of length num_rois. - - Returns: - scores (Tensor): Uncertainty scores with the most uncertain - locations having the highest uncertainty score, - shape (num_rois, 1, mask_height, mask_width) - """ - if mask_preds.shape[1] == 1: - gt_class_logits = mask_preds.clone() - else: - inds = torch.arange(mask_preds.shape[0], device=mask_preds.device) - gt_class_logits = mask_preds[inds, labels].unsqueeze(1) - return -torch.abs(gt_class_logits) - - -def get_uncertain_point_coords_with_randomness( - mask_preds: Tensor, labels: Tensor, num_points: int, - oversample_ratio: float, importance_sample_ratio: float) -> Tensor: - """Get ``num_points`` most uncertain points with random points during - train. - - Sample points in [0, 1] x [0, 1] coordinate space based on their - uncertainty. The uncertainties are calculated for each point using - 'get_uncertainty()' function that takes point's logit prediction as - input. - - Args: - mask_preds (Tensor): A tensor of shape (num_rois, num_classes, - mask_height, mask_width) for class-specific or class-agnostic - prediction. - labels (Tensor): The ground truth class for each instance. - num_points (int): The number of points to sample. - oversample_ratio (float): Oversampling parameter. - importance_sample_ratio (float): Ratio of points that are sampled - via importnace sampling. - - Returns: - point_coords (Tensor): A tensor of shape (num_rois, num_points, 2) - that contains the coordinates sampled points. - """ - assert oversample_ratio >= 1 - assert 0 <= importance_sample_ratio <= 1 - batch_size = mask_preds.shape[0] - num_sampled = int(num_points * oversample_ratio) - point_coords = torch.rand( - batch_size, num_sampled, 2, device=mask_preds.device) - point_logits = point_sample(mask_preds, point_coords) - # It is crucial to calculate uncertainty based on the sampled - # prediction value for the points. Calculating uncertainties of the - # coarse predictions first and sampling them for points leads to - # incorrect results. To illustrate this: assume uncertainty func( - # logits)=-abs(logits), a sampled point between two coarse - # predictions with -1 and 1 logits has 0 logits, and therefore 0 - # uncertainty value. However, if we calculate uncertainties for the - # coarse predictions first, both will have -1 uncertainty, - # and sampled point will get -1 uncertainty. - point_uncertainties = get_uncertainty(point_logits, labels) - num_uncertain_points = int(importance_sample_ratio * num_points) - num_random_points = num_points - num_uncertain_points - idx = torch.topk( - point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1] - shift = num_sampled * torch.arange( - batch_size, dtype=torch.long, device=mask_preds.device) - idx += shift[:, None] - point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view( - batch_size, num_uncertain_points, 2) - if num_random_points > 0: - rand_roi_coords = torch.rand( - batch_size, num_random_points, 2, device=mask_preds.device) - point_coords = torch.cat((point_coords, rand_roi_coords), dim=1) - return point_coords diff --git a/spaces/LUOYE-123/QQsign/Dockerfile b/spaces/LUOYE-123/QQsign/Dockerfile deleted file mode 100644 index 5b81d3b20c5bee450cf55a0ace7e5c95d58f72af..0000000000000000000000000000000000000000 --- a/spaces/LUOYE-123/QQsign/Dockerfile +++ /dev/null @@ -1,17 +0,0 @@ -FROM openjdk:11.0-jdk - -# 设置时区 -ENV TZ Asia/Shanghai - -# 设置工作目录 -WORKDIR /app - -# 复制解压包和txlib到工作目录 -COPY unidbg-fetch-qsign /app -COPY txlib /app/txlib - -# 设置命令 -CMD bash bin/unidbg-fetch-qsign --host=0.0.0.0 --port=7860 --count=$COUNT --library=txlib/$TXLIB_VERSION --android_id=$ANDROID_ID - -# 暴露端口 -EXPOSE 7860 diff --git a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/infer/modules/ipex/hijacks.py b/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/infer/modules/ipex/hijacks.py deleted file mode 100644 index 855e5cb9ec4791ed771808dfa52607aae047b840..0000000000000000000000000000000000000000 --- a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/infer/modules/ipex/hijacks.py +++ /dev/null @@ -1,195 +0,0 @@ -import contextlib -import importlib -import torch - -# pylint: disable=protected-access, missing-function-docstring, line-too-long, unnecessary-lambda, no-else-return - -class CondFunc: # pylint: disable=missing-class-docstring - def __new__(cls, orig_func, sub_func, cond_func): - self = super(CondFunc, cls).__new__(cls) - if isinstance(orig_func, str): - func_path = orig_func.split('.') - for i in range(len(func_path)-1, -1, -1): - try: - resolved_obj = importlib.import_module('.'.join(func_path[:i])) - break - except ImportError: - pass - for attr_name in func_path[i:-1]: - resolved_obj = getattr(resolved_obj, attr_name) - orig_func = getattr(resolved_obj, func_path[-1]) - setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs)) - self.__init__(orig_func, sub_func, cond_func) - return lambda *args, **kwargs: self(*args, **kwargs) - def __init__(self, orig_func, sub_func, cond_func): - self.__orig_func = orig_func - self.__sub_func = sub_func - self.__cond_func = cond_func - def __call__(self, *args, **kwargs): - if not self.__cond_func or self.__cond_func(self.__orig_func, *args, **kwargs): - return self.__sub_func(self.__orig_func, *args, **kwargs) - else: - return self.__orig_func(*args, **kwargs) - -_utils = torch.utils.data._utils -def _shutdown_workers(self): - if torch.utils.data._utils is None or torch.utils.data._utils.python_exit_status is True or torch.utils.data._utils.python_exit_status is None: - return - if hasattr(self, "_shutdown") and not self._shutdown: - self._shutdown = True - try: - if hasattr(self, '_pin_memory_thread'): - self._pin_memory_thread_done_event.set() - self._worker_result_queue.put((None, None)) - self._pin_memory_thread.join() - self._worker_result_queue.cancel_join_thread() - self._worker_result_queue.close() - self._workers_done_event.set() - for worker_id in range(len(self._workers)): - if self._persistent_workers or self._workers_status[worker_id]: - self._mark_worker_as_unavailable(worker_id, shutdown=True) - for w in self._workers: # pylint: disable=invalid-name - w.join(timeout=torch.utils.data._utils.MP_STATUS_CHECK_INTERVAL) - for q in self._index_queues: # pylint: disable=invalid-name - q.cancel_join_thread() - q.close() - finally: - if self._worker_pids_set: - torch.utils.data._utils.signal_handling._remove_worker_pids(id(self)) - self._worker_pids_set = False - for w in self._workers: # pylint: disable=invalid-name - if w.is_alive(): - w.terminate() - -class DummyDataParallel(torch.nn.Module): # pylint: disable=missing-class-docstring, unused-argument, too-few-public-methods - def __new__(cls, module, device_ids=None, output_device=None, dim=0): # pylint: disable=unused-argument - if isinstance(device_ids, list) and len(device_ids) > 1: - print("IPEX backend doesn't support DataParallel on multiple XPU devices") - return module.to("xpu") - -def return_null_context(*args, **kwargs): # pylint: disable=unused-argument - return contextlib.nullcontext() - -def check_device(device): - return bool((isinstance(device, torch.device) and device.type == "cuda") or (isinstance(device, str) and "cuda" in device) or isinstance(device, int)) - -def return_xpu(device): - return f"xpu:{device[-1]}" if isinstance(device, str) and ":" in device else f"xpu:{device}" if isinstance(device, int) else torch.device("xpu") if isinstance(device, torch.device) else "xpu" - -def ipex_no_cuda(orig_func, *args, **kwargs): - torch.cuda.is_available = lambda: False - orig_func(*args, **kwargs) - torch.cuda.is_available = torch.xpu.is_available - -original_autocast = torch.autocast -def ipex_autocast(*args, **kwargs): - if len(args) > 0 and args[0] == "cuda": - return original_autocast("xpu", *args[1:], **kwargs) - else: - return original_autocast(*args, **kwargs) - -original_torch_cat = torch.cat -def torch_cat(tensor, *args, **kwargs): - if len(tensor) == 3 and (tensor[0].dtype != tensor[1].dtype or tensor[2].dtype != tensor[1].dtype): - return original_torch_cat([tensor[0].to(tensor[1].dtype), tensor[1], tensor[2].to(tensor[1].dtype)], *args, **kwargs) - else: - return original_torch_cat(tensor, *args, **kwargs) - -original_interpolate = torch.nn.functional.interpolate -def interpolate(tensor, size=None, scale_factor=None, mode='nearest', align_corners=None, recompute_scale_factor=None, antialias=False): # pylint: disable=too-many-arguments - if antialias or align_corners is not None: - return_device = tensor.device - return_dtype = tensor.dtype - return original_interpolate(tensor.to("cpu", dtype=torch.float32), size=size, scale_factor=scale_factor, mode=mode, - align_corners=align_corners, recompute_scale_factor=recompute_scale_factor, antialias=antialias).to(return_device, dtype=return_dtype) - else: - return original_interpolate(tensor, size=size, scale_factor=scale_factor, mode=mode, - align_corners=align_corners, recompute_scale_factor=recompute_scale_factor, antialias=antialias) - -original_linalg_solve = torch.linalg.solve -def linalg_solve(A, B, *args, **kwargs): # pylint: disable=invalid-name - if A.device != torch.device("cpu") or B.device != torch.device("cpu"): - return_device = A.device - return original_linalg_solve(A.to("cpu"), B.to("cpu"), *args, **kwargs).to(return_device) - else: - return original_linalg_solve(A, B, *args, **kwargs) - -def ipex_hijacks(): - CondFunc('torch.Tensor.to', - lambda orig_func, self, device=None, *args, **kwargs: orig_func(self, return_xpu(device), *args, **kwargs), - lambda orig_func, self, device=None, *args, **kwargs: check_device(device)) - CondFunc('torch.Tensor.cuda', - lambda orig_func, self, device=None, *args, **kwargs: orig_func(self, return_xpu(device), *args, **kwargs), - lambda orig_func, self, device=None, *args, **kwargs: check_device(device)) - CondFunc('torch.empty', - lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs), - lambda orig_func, *args, device=None, **kwargs: check_device(device)) - CondFunc('torch.load', - lambda orig_func, *args, map_location=None, **kwargs: orig_func(*args, return_xpu(map_location), **kwargs), - lambda orig_func, *args, map_location=None, **kwargs: map_location is None or check_device(map_location)) - CondFunc('torch.randn', - lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs), - lambda orig_func, *args, device=None, **kwargs: check_device(device)) - CondFunc('torch.ones', - lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs), - lambda orig_func, *args, device=None, **kwargs: check_device(device)) - CondFunc('torch.zeros', - lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs), - lambda orig_func, *args, device=None, **kwargs: check_device(device)) - CondFunc('torch.tensor', - lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs), - lambda orig_func, *args, device=None, **kwargs: check_device(device)) - CondFunc('torch.linspace', - lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs), - lambda orig_func, *args, device=None, **kwargs: check_device(device)) - - CondFunc('torch.Generator', - lambda orig_func, device=None: torch.xpu.Generator(device), - lambda orig_func, device=None: device is not None and device != torch.device("cpu") and device != "cpu") - - CondFunc('torch.batch_norm', - lambda orig_func, input, weight, bias, *args, **kwargs: orig_func(input, - weight if weight is not None else torch.ones(input.size()[1], device=input.device), - bias if bias is not None else torch.zeros(input.size()[1], device=input.device), *args, **kwargs), - lambda orig_func, input, *args, **kwargs: input.device != torch.device("cpu")) - CondFunc('torch.instance_norm', - lambda orig_func, input, weight, bias, *args, **kwargs: orig_func(input, - weight if weight is not None else torch.ones(input.size()[1], device=input.device), - bias if bias is not None else torch.zeros(input.size()[1], device=input.device), *args, **kwargs), - lambda orig_func, input, *args, **kwargs: input.device != torch.device("cpu")) - - #Functions with dtype errors: - CondFunc('torch.nn.modules.GroupNorm.forward', - lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)), - lambda orig_func, self, input: input.dtype != self.weight.data.dtype) - CondFunc('torch.nn.modules.linear.Linear.forward', - lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)), - lambda orig_func, self, input: input.dtype != self.weight.data.dtype) - CondFunc('torch.nn.modules.conv.Conv2d.forward', - lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)), - lambda orig_func, self, input: input.dtype != self.weight.data.dtype) - CondFunc('torch.nn.functional.layer_norm', - lambda orig_func, input, normalized_shape=None, weight=None, *args, **kwargs: - orig_func(input.to(weight.data.dtype), normalized_shape, weight, *args, **kwargs), - lambda orig_func, input, normalized_shape=None, weight=None, *args, **kwargs: - weight is not None and input.dtype != weight.data.dtype) - - #Diffusers Float64 (ARC GPUs doesn't support double or Float64): - if not torch.xpu.has_fp64_dtype(): - CondFunc('torch.from_numpy', - lambda orig_func, ndarray: orig_func(ndarray.astype('float32')), - lambda orig_func, ndarray: ndarray.dtype == float) - - #Broken functions when torch.cuda.is_available is True: - CondFunc('torch.utils.data.dataloader._BaseDataLoaderIter.__init__', - lambda orig_func, *args, **kwargs: ipex_no_cuda(orig_func, *args, **kwargs), - lambda orig_func, *args, **kwargs: True) - - #Functions that make compile mad with CondFunc: - torch.utils.data.dataloader._MultiProcessingDataLoaderIter._shutdown_workers = _shutdown_workers - torch.nn.DataParallel = DummyDataParallel - torch.autocast = ipex_autocast - torch.cat = torch_cat - torch.linalg.solve = linalg_solve - torch.nn.functional.interpolate = interpolate - torch.backends.cuda.sdp_kernel = return_null_context \ No newline at end of file diff --git a/spaces/Lianjd/stock_dashboard/backtrader/indicators/mabase.py b/spaces/Lianjd/stock_dashboard/backtrader/indicators/mabase.py deleted file mode 100644 index 3ae4df3fed92e722fe0ad4e64658bbf54d3ea349..0000000000000000000000000000000000000000 --- a/spaces/Lianjd/stock_dashboard/backtrader/indicators/mabase.py +++ /dev/null @@ -1,91 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8; py-indent-offset:4 -*- -############################################################################### -# -# Copyright (C) 2015-2020 Daniel Rodriguez -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -############################################################################### -from __future__ import (absolute_import, division, print_function, - unicode_literals) - -from ..utils.py3 import with_metaclass - -from . import Indicator - - -class MovingAverage(object): - '''MovingAverage (alias MovAv) - - A placeholder to gather all Moving Average Types in a single place. - - Instantiating a SimpleMovingAverage can be achieved as follows:: - - sma = MovingAverage.Simple(self.data, period) - - Or using the shorter aliases:: - - sma = MovAv.SMA(self.data, period) - - or with the full (forwards and backwards) names: - - sma = MovAv.SimpleMovingAverage(self.data, period) - - sma = MovAv.MovingAverageSimple(self.data, period) - - ''' - _movavs = [] - - @classmethod - def register(cls, regcls): - if getattr(regcls, '_notregister', False): - return - - cls._movavs.append(regcls) - - clsname = regcls.__name__ - setattr(cls, clsname, regcls) - - clsalias = '' - if clsname.endswith('MovingAverage'): - clsalias = clsname.split('MovingAverage')[0] - elif clsname.startswith('MovingAverage'): - clsalias = clsname.split('MovingAverage')[1] - - if clsalias: - setattr(cls, clsalias, regcls) - - -class MovAv(MovingAverage): - pass # alias - - -class MetaMovAvBase(Indicator.__class__): - # Register any MovingAverage with the placeholder to allow the automatic - # creation of envelopes and oscillators - - def __new__(meta, name, bases, dct): - # Create the class - cls = super(MetaMovAvBase, meta).__new__(meta, name, bases, dct) - - MovingAverage.register(cls) - - # return the class - return cls - - -class MovingAverageBase(with_metaclass(MetaMovAvBase, Indicator)): - params = (('period', 30),) - plotinfo = dict(subplot=False) diff --git a/spaces/Lynx1221/rvc-test1/README.md b/spaces/Lynx1221/rvc-test1/README.md deleted file mode 100644 index 56936f1df15477c0ae2fdcfe59a77c175e1905d8..0000000000000000000000000000000000000000 --- a/spaces/Lynx1221/rvc-test1/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Rvc Models -emoji: 🎤 -colorFrom: red -colorTo: blue -sdk: gradio -sdk_version: 3.27.0 -app_file: app.py -pinned: false -license: mit -duplicated_from: zomehwh/rvc-models ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/MINAMONI/White-box-Cartoonization/app.py b/spaces/MINAMONI/White-box-Cartoonization/app.py deleted file mode 100644 index c55ced56bd87a85f59d1c8ef84b7eca87422720f..0000000000000000000000000000000000000000 --- a/spaces/MINAMONI/White-box-Cartoonization/app.py +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/env python - -from __future__ import annotations -import argparse -import functools -import os -import pathlib -import sys -from typing import Callable -import uuid - -import gradio as gr -import huggingface_hub -import numpy as np -import PIL.Image - -from io import BytesIO -from wbc.cartoonize import Cartoonize - -ORIGINAL_REPO_URL = 'https://github.com/SystemErrorWang/White-box-Cartoonization' -TITLE = 'SystemErrorWang/White-box-Cartoonization' -DESCRIPTION = f"""This is a demo for {ORIGINAL_REPO_URL}. - -""" -ARTICLE = """ - -""" - -SAFEHASH = [x for x in "0123456789-abcdefghijklmnopqrstuvwxyz_ABCDEFGHIJKLMNOPQRSTUVWXYZ"] -def compress_UUID(): - ''' - 根据http://www.ietf.org/rfc/rfc1738.txt,由uuid编码扩bai大字符域生成du串 - 包括:[0-9a-zA-Z\-_]共64个 - 长度:(32-2)/3*2=20 - 备注:可在地球上人zhi人都用,使用100年不重复(2^120) - :return:String - ''' - row = str(uuid.uuid4()).replace('-', '') - safe_code = '' - for i in range(10): - enbin = "%012d" % int(bin(int(row[i * 3] + row[i * 3 + 1] + row[i * 3 + 2], 16))[2:], 10) - safe_code += (SAFEHASH[int(enbin[0:6], 2)] + SAFEHASH[int(enbin[6:12], 2)]) - safe_code = safe_code.replace('-', '') - return safe_code - - -def parse_args() -> argparse.Namespace: - parser = argparse.ArgumentParser() - parser.add_argument('--device', type=str, default='cpu') - parser.add_argument('--theme', type=str) - parser.add_argument('--live', action='store_true') - parser.add_argument('--share', action='store_true') - parser.add_argument('--port', type=int) - parser.add_argument('--disable-queue', - dest='enable_queue', - action='store_false') - parser.add_argument('--allow-flagging', type=str, default='never') - parser.add_argument('--allow-screenshot', action='store_true') - return parser.parse_args() - -def run( - image, - cartoonize : Cartoonize -) -> tuple[PIL.Image.Image]: - - out_path = compress_UUID()+'.png' - cartoonize.run_sigle(image.name, out_path) - - return PIL.Image.open(out_path) - - -def main(): - gr.close_all() - - args = parse_args() - - cartoonize = Cartoonize(os.path.join(os.path.dirname(os.path.abspath(__file__)),'wbc/saved_models/')) - - func = functools.partial(run, cartoonize=cartoonize) - func = functools.update_wrapper(func, run) - - gr.Interface( - func, - [ - gr.inputs.Image(type='file', label='Input Image'), - ], - [ - gr.outputs.Image( - type='pil', - label='Result'), - ], - # examples=examples, - theme=args.theme, - title=TITLE, - description=DESCRIPTION, - article=ARTICLE, - allow_screenshot=args.allow_screenshot, - allow_flagging=args.allow_flagging, - live=args.live, - ).launch( - enable_queue=args.enable_queue, - server_port=args.port, - share=args.share, - ) - - -if __name__ == '__main__': - main() diff --git a/spaces/ML701G7/taim-gan/src/data/collate.py b/spaces/ML701G7/taim-gan/src/data/collate.py deleted file mode 100644 index 220060f52bc6f915875a78b3c973ae288435968e..0000000000000000000000000000000000000000 --- a/spaces/ML701G7/taim-gan/src/data/collate.py +++ /dev/null @@ -1,43 +0,0 @@ -"""Custom collate function for the data loader.""" - -from typing import Any, List - -import torch -from torch.nn.utils.rnn import pad_sequence - - -def custom_collate(batch: List[Any], device: Any) -> Any: - """ - Custom collate function to be used in the data loader. - :param batch: list, with length equal to number of batches. - :return: processed batch of data [add padding to text, stack tensors in batch] - """ - img, correct_capt, curr_class, word_labels = zip(*batch) - batched_img = torch.stack(img, dim=0).to( - device - ) # shape: (batch_size, 3, height, width) - correct_capt_len = torch.tensor( - [len(capt) for capt in correct_capt], dtype=torch.int64 - ).unsqueeze( - 1 - ) # shape: (batch_size, 1) - batched_correct_capt = pad_sequence( - correct_capt, batch_first=True, padding_value=0 - ).to( - device - ) # shape: (batch_size, max_seq_len) - batched_curr_class = torch.stack(curr_class, dim=0).to( - device - ) # shape: (batch_size, 1) - batched_word_labels = pad_sequence( - word_labels, batch_first=True, padding_value=0 - ).to( - device - ) # shape: (batch_size, max_seq_len) - return ( - batched_img, - batched_correct_capt, - correct_capt_len, - batched_curr_class, - batched_word_labels, - ) diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/datasets/drive.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/datasets/drive.py deleted file mode 100644 index 3cbfda8ae74bdf26c5aef197ff2866a7c7ad0cfd..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/datasets/drive.py +++ /dev/null @@ -1,27 +0,0 @@ -import os.path as osp - -from .builder import DATASETS -from .custom import CustomDataset - - -@DATASETS.register_module() -class DRIVEDataset(CustomDataset): - """DRIVE dataset. - - In segmentation map annotation for DRIVE, 0 stands for background, which is - included in 2 categories. ``reduce_zero_label`` is fixed to False. The - ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to - '_manual1.png'. - """ - - CLASSES = ('background', 'vessel') - - PALETTE = [[120, 120, 120], [6, 230, 230]] - - def __init__(self, **kwargs): - super(DRIVEDataset, self).__init__( - img_suffix='.png', - seg_map_suffix='_manual1.png', - reduce_zero_label=False, - **kwargs) - assert osp.exists(self.img_dir) diff --git a/spaces/MetaWabbit/Auto-GPT/autogpt/commands/__init__.py b/spaces/MetaWabbit/Auto-GPT/autogpt/commands/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/MetaWabbit/Auto-GPT/main.py b/spaces/MetaWabbit/Auto-GPT/main.py deleted file mode 100644 index 160addc390b94a8b143a3a2e18991a560f9b032e..0000000000000000000000000000000000000000 --- a/spaces/MetaWabbit/Auto-GPT/main.py +++ /dev/null @@ -1 +0,0 @@ -from autogpt import main diff --git a/spaces/Miyuki13242/Daily/README.md b/spaces/Miyuki13242/Daily/README.md deleted file mode 100644 index d296bae1f9e22ab9a3b653eb04e2ad1d9548c36f..0000000000000000000000000000000000000000 --- a/spaces/Miyuki13242/Daily/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Daily -emoji: 📊 -colorFrom: yellow -colorTo: yellow -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ModIA/FrenchDroneKeyword/app.py b/spaces/ModIA/FrenchDroneKeyword/app.py deleted file mode 100644 index 5dcd5e4e1232be03c1ccf0d1d3f0b02d8b1d6ed6..0000000000000000000000000000000000000000 --- a/spaces/ModIA/FrenchDroneKeyword/app.py +++ /dev/null @@ -1,73 +0,0 @@ -import numpy as np - -import skorch -import torch -import torch.nn as nn - -import gradio as gr - -import librosa - -from joblib import dump, load - -from sklearn.pipeline import Pipeline -from sklearn.preprocessing import LabelEncoder - -from resnet import ResNet -from gradio_utils import load_as_librosa, predict_gradio -from dataloading import uniformize, to_numpy -from preprocessing import MfccTransformer, TorchTransform - - -SEED : int = 42 -np.random.seed(SEED) -torch.manual_seed(SEED) - -model = load('./model/model.joblib') -only_mffc_transform = load('./model/only_mffc_transform.joblib') -label_encoder = load('./model/label_encoder.joblib') -SAMPLE_RATE = load("./model/SAMPLE_RATE.joblib") -METHOD = load("./model/METHOD.joblib") -MAX_TIME = load("./model/MAX_TIME.joblib") -N_MFCC = load("./model/N_MFCC.joblib") -HOP_LENGHT = load("./model/HOP_LENGHT.joblib") - -sklearn_model = Pipeline( - steps=[ - ("mfcc", only_mffc_transform), - ("model", model) - ] - ) - -uniform_lambda = lambda y, sr: uniformize(y, sr, METHOD, MAX_TIME) - -title = r"ResNet 9" - -description = r""" -
        -The resnet9 model was trained to classify drone speech command. - -
        -""" -article = r""" -- [Deep Residual Learning for Image Recognition](https://arxiv.org/pdf/1512.03385) -""" - -demo_men = gr.Interface( - title = title, - description = description, - article = article, - fn=lambda data: predict_gradio( - data=data, - uniform_lambda=uniform_lambda, - sklearn_model=sklearn_model, - label_transform=label_encoder, - target_sr=SAMPLE_RATE), - inputs = gr.Audio(source="microphone", type="numpy"), - outputs = gr.Label(), - # allow_flagging = "manual", - # flagging_options = ['recule', 'tournedroite', 'arretetoi', 'tournegauche', 'gauche', 'avance', 'droite'], - # flagging_dir = "./flag/men" -) - -demo_men.launch() diff --git a/spaces/Navneet574/Kidney_Stone_Prediction/README.md b/spaces/Navneet574/Kidney_Stone_Prediction/README.md deleted file mode 100644 index 5ea3c888eaf7a44f7ca5ce9d9795064aae9afe31..0000000000000000000000000000000000000000 --- a/spaces/Navneet574/Kidney_Stone_Prediction/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Kidney Stone Prediction -emoji: 🐢 -colorFrom: pink -colorTo: blue -sdk: gradio -sdk_version: 3.24.1 -app_file: app.py -pinned: false -license: cc-by-nc-sa-4.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/NbAiLab/maken-clip-image/README.md b/spaces/NbAiLab/maken-clip-image/README.md deleted file mode 100644 index 4046d14f7c444518783edfeda90d466697fa482e..0000000000000000000000000000000000000000 --- a/spaces/NbAiLab/maken-clip-image/README.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: Maken Clip Image -emoji: 🖼️ -colorFrom: green -colorTo: green -sdk: gradio -sdk_version: 3.11.0 -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/NiuTaipu/moe-tts-test01/text/__init__.py b/spaces/NiuTaipu/moe-tts-test01/text/__init__.py deleted file mode 100644 index 4e69c354dd24e3243980236eca962cd5945a92fc..0000000000000000000000000000000000000000 --- a/spaces/NiuTaipu/moe-tts-test01/text/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -""" from https://github.com/keithito/tacotron """ -from text import cleaners - - -def text_to_sequence(text, symbols, cleaner_names): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - cleaner_names: names of the cleaner functions to run the text through - Returns: - List of integers corresponding to the symbols in the text - ''' - _symbol_to_id = {s: i for i, s in enumerate(symbols)} - - sequence = [] - - clean_text = _clean_text(text, cleaner_names) - for symbol in clean_text: - if symbol not in _symbol_to_id.keys(): - continue - symbol_id = _symbol_to_id[symbol] - sequence += [symbol_id] - return sequence - - -def _clean_text(text, cleaner_names): - for name in cleaner_names: - cleaner = getattr(cleaners, name) - if not cleaner: - raise Exception('Unknown cleaner: %s' % name) - text = cleaner(text) - return text diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/multilingual/data_scripts/remove_valid_test_in_train.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/multilingual/data_scripts/remove_valid_test_in_train.py deleted file mode 100644 index ef618adef7c7d010f8de38fb5ebeb5a35d2d3cac..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/multilingual/data_scripts/remove_valid_test_in_train.py +++ /dev/null @@ -1,290 +0,0 @@ -import os, sys -import glob, itertools -import pandas as pd - -WORKDIR_ROOT = os.environ.get('WORKDIR_ROOT', None) - -if WORKDIR_ROOT is None or not WORKDIR_ROOT.strip(): - print('please specify your working directory root in OS environment variable WORKDIR_ROOT. Exitting..."') - sys.exit(-1) - - -def load_langs(path): - with open(path) as fr: - langs = [l.strip() for l in fr] - return langs - - - -def load_sentences(raw_data, split, direction): - src, tgt = direction.split('-') - src_path = f"{raw_data}/{split}.{direction}.{src}" - tgt_path = f"{raw_data}/{split}.{direction}.{tgt}" - if os.path.exists(src_path) and os.path.exists(tgt_path): - return [(src, open(src_path).read().splitlines()), (tgt, open(tgt_path).read().splitlines())] - else: - return [] - -def swap_direction(d): - src, tgt = d.split('-') - return f'{tgt}-{src}' - -def get_all_test_data(raw_data, directions, split='test'): - test_data = [ - x - for dd in directions - for d in [dd, swap_direction(dd)] - for x in load_sentences(raw_data, split, d) - ] - # all_test_data = {s for _, d in test_data for s in d} - all_test_data = {} - for lang, d in test_data: - for s in d: - s = s.strip() - lgs = all_test_data.get(s, set()) - lgs.add(lang) - all_test_data[s] = lgs - return all_test_data, test_data - -def check_train_sentences(raw_data, direction, all_test_data, mess_up_train={}): - src, tgt = direction.split('-') - tgt_path = f"{raw_data}/train.{direction}.{tgt}" - src_path = f"{raw_data}/train.{direction}.{src}" - print(f'check training data in {raw_data}/train.{direction}') - size = 0 - if not os.path.exists(tgt_path) or not os.path.exists(src_path): - return mess_up_train, size - with open(src_path) as f, open(tgt_path) as g: - for src_line, tgt_line in zip(f, g): - s = src_line.strip() - t = tgt_line.strip() - size += 1 - if s in all_test_data: - langs = mess_up_train.get(s, set()) - langs.add(direction) - mess_up_train[s] = langs - if t in all_test_data: - langs = mess_up_train.get(t, set()) - langs.add(direction) - mess_up_train[t] = langs - return mess_up_train, size - -def check_train_all(raw_data, directions, all_test_data): - mess_up_train = {} - data_sizes = {} - for direction in directions: - _, size = check_train_sentences(raw_data, direction, all_test_data, mess_up_train) - data_sizes[direction] = size - return mess_up_train, data_sizes - -def count_train_in_other_set(mess_up_train): - train_in_others = [(direction, s) for s, directions in mess_up_train.items() for direction in directions] - counts = {} - for direction, s in train_in_others: - counts[direction] = counts.get(direction, 0) + 1 - return counts - -def train_size_if_remove_in_otherset(data_sizes, mess_up_train): - counts_in_other = count_train_in_other_set(mess_up_train) - remain_sizes = [] - for direction, count in counts_in_other.items(): - remain_sizes.append((direction, data_sizes[direction] - count, data_sizes[direction], count, 100 * count / data_sizes[direction] )) - return remain_sizes - - -def remove_messed_up_sentences(raw_data, direction, mess_up_train, mess_up_train_pairs, corrected_langs): - split = 'train' - src_lang, tgt_lang = direction.split('-') - - tgt = f"{raw_data}/{split}.{direction}.{tgt_lang}" - src = f"{raw_data}/{split}.{direction}.{src_lang}" - print(f'working on {direction}: ', src, tgt) - if not os.path.exists(tgt) or not os.path.exists(src) : - return - - corrected_tgt = f"{to_folder}/{split}.{direction}.{tgt_lang}" - corrected_src = f"{to_folder}/{split}.{direction}.{src_lang}" - line_num = 0 - keep_num = 0 - with open(src, encoding='utf8',) as fsrc, \ - open(tgt, encoding='utf8',) as ftgt, \ - open(corrected_src, 'w', encoding='utf8') as fsrc_corrected, \ - open(corrected_tgt, 'w', encoding='utf8') as ftgt_corrected: - for s, t in zip(fsrc, ftgt): - s = s.strip() - t = t.strip() - if t not in mess_up_train \ - and s not in mess_up_train \ - and (s, t) not in mess_up_train_pairs \ - and (t, s) not in mess_up_train_pairs: - corrected_langs.add(direction) - print(s, file=fsrc_corrected) - print(t, file=ftgt_corrected) - keep_num += 1 - line_num += 1 - if line_num % 1000 == 0: - print(f'completed {line_num} lines', end='\r') - return line_num, keep_num - -########## - - -def merge_valid_test_messup(mess_up_train_valid, mess_up_train_test): - merged_mess = [] - for s in set(list(mess_up_train_valid.keys()) + list(mess_up_train_test.keys())): - if not s: - continue - valid = mess_up_train_valid.get(s, set()) - test = mess_up_train_test.get(s, set()) - merged_mess.append((s, valid | test)) - return dict(merged_mess) - - - -######### -def check_train_pairs(raw_data, direction, all_test_data, mess_up_train={}): - src, tgt = direction.split('-') - #a hack; TODO: check the reversed directions - path1 = f"{raw_data}/train.{src}-{tgt}.{src}" - path2 = f"{raw_data}/train.{src}-{tgt}.{tgt}" - if not os.path.exists(path1) or not os.path.exists(path2) : - return - - with open(path1) as f1, open(path2) as f2: - for src_line, tgt_line in zip(f1, f2): - s = src_line.strip() - t = tgt_line.strip() - if (s, t) in all_test_data or (t, s) in all_test_data: - langs = mess_up_train.get( (s, t), set()) - langs.add(src) - langs.add(tgt) - mess_up_train[(s, t)] = langs - - -def load_pairs(raw_data, split, direction): - src, tgt = direction.split('-') - src_f = f"{raw_data}/{split}.{direction}.{src}" - tgt_f = f"{raw_data}/{split}.{direction}.{tgt}" - if tgt != 'en_XX': - src_f, tgt_f = tgt_f, src_f - if os.path.exists(src_f) and os.path.exists(tgt_f): - return list(zip(open(src_f).read().splitlines(), - open(tgt_f).read().splitlines(), - )) - else: - return [] - -# skip_langs = ['cs_CZ', 'en_XX', 'tl_XX', 'tr_TR'] -def get_messed_up_test_pairs(split, directions): - test_pairs = [ - (d, load_pairs(raw_data, split, d)) - for d in directions - ] - # all_test_data = {s for _, d in test_data for s in d} - all_test_pairs = {} - for direction, d in test_pairs: - src, tgt = direction.split('-') - for s in d: - langs = all_test_pairs.get(s, set()) - langs.add(src) - langs.add(tgt) - all_test_pairs[s] = langs - mess_up_train_pairs = {} - for direction in directions: - check_train_pairs(raw_data, direction, all_test_pairs, mess_up_train_pairs) - return all_test_pairs, mess_up_train_pairs - - - -if __name__ == "__main__": - ####### - import argparse - parser = argparse.ArgumentParser() - parser.add_argument( - '--from-folder', - required=True, - type=str) - parser.add_argument( - '--to-folder', - required=True, - type=str) - parser.add_argument( - '--directions', - default=None, - type=str) - - - args = parser.parse_args() - raw_data = args.from_folder - to_folder = args.to_folder - os.makedirs(to_folder, exist_ok=True) - - if args.directions: - directions = args.directions.split(',') - else: - raw_files = itertools.chain( - glob.glob(f'{raw_data}/train*'), - glob.glob(f'{raw_data}/valid*'), - glob.glob(f'{raw_data}/test*'), - ) - directions = [os.path.split(file_path)[-1].split('.')[1] for file_path in raw_files] - print('working on directions: ', directions) - - ########## - - - - all_test_data, test_data = get_all_test_data(raw_data, directions, 'test') - print('==loaded test data==') - all_valid_data, valid_data = get_all_test_data(raw_data, directions, 'valid') - print('==loaded valid data==') - all_valid_test_data = merge_valid_test_messup(all_test_data, all_valid_data) - mess_up_train, data_sizes = check_train_all(raw_data, directions, all_valid_test_data) - print('training messing up with valid, test data:', len(mess_up_train)) - data_situation = train_size_if_remove_in_otherset(data_sizes, mess_up_train) - df = pd.DataFrame(data_situation, columns=['direction', 'train_size_after_remove', 'orig_size', 'num_to_remove', 'remove_percent']) - df.sort_values('remove_percent', ascending=False) - df.to_csv(f'{raw_data}/clean_summary.tsv', sep='\t') - print(f'projected data clean summary in: {raw_data}/clean_summary.tsv') - - # correct the dataset: - all_test_pairs, mess_up_test_train_pairs = get_messed_up_test_pairs('test', directions) - all_valid_pairs, mess_up_valid_train_pairs = get_messed_up_test_pairs('valid', directions) - - all_messed_pairs = set(mess_up_test_train_pairs.keys()).union(set(mess_up_valid_train_pairs.keys())) - corrected_directions = set() - - real_data_situation = [] - for direction in directions: - org_size, new_size = remove_messed_up_sentences(raw_data, direction, mess_up_train, all_messed_pairs, corrected_directions) - if org_size == 0: - print(f"{direction} has size 0") - continue - real_data_situation.append( - (direction, new_size, org_size, org_size - new_size, (org_size - new_size) / org_size * 100) - ) - print('corrected directions: ', corrected_directions) - df = pd.DataFrame(real_data_situation, columns=['direction', 'train_size_after_remove', 'orig_size', 'num_to_remove', 'remove_percent']) - df.sort_values('remove_percent', ascending=False) - df.to_csv(f'{raw_data}/actual_clean_summary.tsv', sep='\t') - print(f'actual data clean summary (which can be different from the projected one because of duplications) in: {raw_data}/actual_clean_summary.tsv') - - import shutil - for direction in directions: - src_lang, tgt_lang = direction.split('-') - for split in ['train', 'valid', 'test']: - # copying valid, test and uncorrected train - if direction in corrected_directions and split == 'train': - continue - tgt = f"{raw_data}/{split}.{direction}.{tgt_lang}" - src = f"{raw_data}/{split}.{direction}.{src_lang}" - if not (os.path.exists(src) and os.path.exists(tgt)): - continue - corrected_tgt = f"{to_folder}/{split}.{direction}.{tgt_lang}" - corrected_src = f"{to_folder}/{split}.{direction}.{src_lang}" - print(f'copying {src} to {corrected_src}') - shutil.copyfile(src, corrected_src) - print(f'copying {tgt} to {corrected_tgt}') - shutil.copyfile(tgt, corrected_tgt) - - print('completed') \ No newline at end of file diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/models/wav2vec/__init__.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/models/wav2vec/__init__.py deleted file mode 100644 index 06cec18183ca14cd534d14558e8b44e25f3e69d5..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/models/wav2vec/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from .wav2vec import * # noqa -from .wav2vec2 import * # noqa -from .wav2vec2_asr import * # noqa diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/numbers.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/numbers.py deleted file mode 100644 index 0d5f7fa818a45ecf132627d240afac653e148070..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/numbers.py +++ /dev/null @@ -1,71 +0,0 @@ -""" from https://github.com/keithito/tacotron """ - -import inflect -import re - - -_inflect = inflect.engine() -_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])') -_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)') -_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)') -_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)') -_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)') -_number_re = re.compile(r'[0-9]+') - - -def _remove_commas(m): - return m.group(1).replace(',', '') - - -def _expand_decimal_point(m): - return m.group(1).replace('.', ' point ') - - -def _expand_dollars(m): - match = m.group(1) - parts = match.split('.') - if len(parts) > 2: - return match + ' dollars' # Unexpected format - dollars = int(parts[0]) if parts[0] else 0 - cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0 - if dollars and cents: - dollar_unit = 'dollar' if dollars == 1 else 'dollars' - cent_unit = 'cent' if cents == 1 else 'cents' - return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit) - elif dollars: - dollar_unit = 'dollar' if dollars == 1 else 'dollars' - return '%s %s' % (dollars, dollar_unit) - elif cents: - cent_unit = 'cent' if cents == 1 else 'cents' - return '%s %s' % (cents, cent_unit) - else: - return 'zero dollars' - - -def _expand_ordinal(m): - return _inflect.number_to_words(m.group(0)) - - -def _expand_number(m): - num = int(m.group(0)) - if num > 1000 and num < 3000: - if num == 2000: - return 'two thousand' - elif num > 2000 and num < 2010: - return 'two thousand ' + _inflect.number_to_words(num % 100) - elif num % 100 == 0: - return _inflect.number_to_words(num // 100) + ' hundred' - else: - return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ') - else: - return _inflect.number_to_words(num, andword='') - - -def normalize_numbers(text): - text = re.sub(_comma_number_re, _remove_commas, text) - text = re.sub(_pounds_re, r'\1 pounds', text) - text = re.sub(_dollars_re, _expand_dollars, text) - text = re.sub(_decimal_number_re, _expand_decimal_point, text) - text = re.sub(_ordinal_re, _expand_ordinal, text) - text = re.sub(_number_re, _expand_number, text) - return text diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/checkpoint_utils.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/checkpoint_utils.py deleted file mode 100644 index ef5d4c9022c3c35722f0bc9150260c7a65d35e5f..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/checkpoint_utils.py +++ /dev/null @@ -1,858 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import ast -import collections -import contextlib -import logging -import numpy as np -import os -import re -import time -import traceback -from collections import OrderedDict -from typing import Any, Dict, Optional, Union - -import torch -from fairseq.data import data_utils -from fairseq.dataclass.configs import CheckpointConfig -from fairseq.dataclass.utils import ( - convert_namespace_to_omegaconf, - overwrite_args_by_name, -) -from fairseq.distributed.fully_sharded_data_parallel import FSDP, has_FSDP -from fairseq.file_io import PathManager -from fairseq.models import FairseqDecoder, FairseqEncoder -from omegaconf import DictConfig, open_dict, OmegaConf - - -logger = logging.getLogger(__name__) - - -def save_checkpoint(cfg: CheckpointConfig, trainer, epoch_itr, val_loss): - from fairseq import meters - - # only one worker should attempt to create the required dir - if trainer.data_parallel_rank == 0: - os.makedirs(cfg.save_dir, exist_ok=True) - - prev_best = getattr(save_checkpoint, "best", val_loss) - if val_loss is not None: - best_function = max if cfg.maximize_best_checkpoint_metric else min - save_checkpoint.best = best_function(val_loss, prev_best) - - if cfg.no_save: - return - - trainer.consolidate_optimizer() # TODO(SS): do we need this if no_save_optimizer_state - - if not trainer.should_save_checkpoint_on_current_rank: - if trainer.always_call_state_dict_during_save_checkpoint: - trainer.state_dict() - return - - write_timer = meters.StopwatchMeter() - write_timer.start() - - epoch = epoch_itr.epoch - end_of_epoch = epoch_itr.end_of_epoch() - updates = trainer.get_num_updates() - - logger.info(f"Preparing to save checkpoint for epoch {epoch} @ {updates} updates") - - def is_better(a, b): - return a >= b if cfg.maximize_best_checkpoint_metric else a <= b - - suffix = trainer.checkpoint_suffix - checkpoint_conds = collections.OrderedDict() - checkpoint_conds["checkpoint{}{}.pt".format(epoch, suffix)] = ( - end_of_epoch and not cfg.no_epoch_checkpoints and epoch % cfg.save_interval == 0 - ) - checkpoint_conds["checkpoint_{}_{}{}.pt".format(epoch, updates, suffix)] = ( - not end_of_epoch - and cfg.save_interval_updates > 0 - and updates % cfg.save_interval_updates == 0 - ) - checkpoint_conds["checkpoint_best{}.pt".format(suffix)] = val_loss is not None and ( - not hasattr(save_checkpoint, "best") - or is_better(val_loss, save_checkpoint.best) - ) - if val_loss is not None and cfg.keep_best_checkpoints > 0: - worst_best = getattr(save_checkpoint, "best", None) - chkpts = checkpoint_paths( - cfg.save_dir, - pattern=r"checkpoint\.best_{}_(\d+\.?\d*){}\.pt".format( - cfg.best_checkpoint_metric, suffix - ), - ) - if len(chkpts) > 0: - p = chkpts[-1] if cfg.maximize_best_checkpoint_metric else chkpts[0] - worst_best = float(p.rsplit("_")[-1].replace("{}.pt".format(suffix), "")) - # add random digits to resolve ties - with data_utils.numpy_seed(epoch, updates, val_loss): - rand_sfx = np.random.randint(0, cfg.keep_best_checkpoints) - - checkpoint_conds[ - "checkpoint.best_{}_{:.3f}{}{}.pt".format( - cfg.best_checkpoint_metric, - val_loss, - rand_sfx, - suffix - ) - ] = worst_best is None or is_better(val_loss, worst_best) - checkpoint_conds[ - "checkpoint_last{}.pt".format(suffix) - ] = not cfg.no_last_checkpoints - - extra_state = {"train_iterator": epoch_itr.state_dict(), "val_loss": val_loss} - if hasattr(save_checkpoint, "best"): - extra_state.update({"best": save_checkpoint.best}) - - checkpoints = [ - os.path.join(cfg.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond - ] - if len(checkpoints) > 0: - trainer.save_checkpoint(checkpoints[0], extra_state) - for cp in checkpoints[1:]: - if cfg.write_checkpoints_asynchronously: - # TODO[ioPath]: Need to implement a delayed asynchronous - # file copying/moving feature. - logger.warning( - f"ioPath is not copying {checkpoints[0]} to {cp} " - "since async write mode is on." - ) - else: - assert PathManager.copy( - checkpoints[0], cp, overwrite=True - ), f"Failed to copy {checkpoints[0]} to {cp}" - - write_timer.stop() - logger.info( - "Saved checkpoint {} (epoch {} @ {} updates, score {}) (writing took {} seconds)".format( - checkpoints[0], epoch, updates, val_loss, write_timer.sum - ) - ) - - if not end_of_epoch and cfg.keep_interval_updates > 0: - # remove old checkpoints; checkpoints are sorted in descending order - if cfg.keep_interval_updates_pattern == -1: - checkpoints = checkpoint_paths( - cfg.save_dir, pattern=r"checkpoint_\d+_(\d+){}\.pt".format(suffix) - ) - else: - checkpoints = checkpoint_paths( - cfg.save_dir, - pattern=r"checkpoint_\d+_(\d+){}\.pt".format(suffix), - keep_match=True, - ) - checkpoints = [ - x[0] - for x in checkpoints - if x[1] % cfg.keep_interval_updates_pattern != 0 - ] - - for old_chk in checkpoints[cfg.keep_interval_updates :]: - if os.path.lexists(old_chk): - os.remove(old_chk) - elif PathManager.exists(old_chk): - PathManager.rm(old_chk) - - if cfg.keep_last_epochs > 0: - # remove old epoch checkpoints; checkpoints are sorted in descending order - checkpoints = checkpoint_paths( - cfg.save_dir, pattern=r"checkpoint(\d+){}\.pt".format(suffix) - ) - for old_chk in checkpoints[cfg.keep_last_epochs :]: - if os.path.lexists(old_chk): - os.remove(old_chk) - elif PathManager.exists(old_chk): - PathManager.rm(old_chk) - - if cfg.keep_best_checkpoints > 0: - # only keep the best N checkpoints according to validation metric - checkpoints = checkpoint_paths( - cfg.save_dir, - pattern=r"checkpoint\.best_{}_(\d+\.?\d*){}\.pt".format( - cfg.best_checkpoint_metric, suffix - ), - ) - if not cfg.maximize_best_checkpoint_metric: - checkpoints = checkpoints[::-1] - for old_chk in checkpoints[cfg.keep_best_checkpoints :]: - if os.path.lexists(old_chk): - os.remove(old_chk) - elif PathManager.exists(old_chk): - PathManager.rm(old_chk) - - -def load_checkpoint(cfg: CheckpointConfig, trainer, **passthrough_args): - """ - Load a checkpoint and restore the training iterator. - - *passthrough_args* will be passed through to - ``trainer.get_train_iterator``. - """ - - reset_optimizer = cfg.reset_optimizer - reset_lr_scheduler = cfg.reset_lr_scheduler - optimizer_overrides = ast.literal_eval(cfg.optimizer_overrides) - reset_meters = cfg.reset_meters - reset_dataloader = cfg.reset_dataloader - - if cfg.finetune_from_model is not None and ( - reset_optimizer or reset_lr_scheduler or reset_meters or reset_dataloader - ): - raise ValueError( - "--finetune-from-model can not be set together with either --reset-optimizer" - " or reset_lr_scheduler or reset_meters or reset_dataloader" - ) - - suffix = trainer.checkpoint_suffix - if ( - cfg.restore_file == "checkpoint_last.pt" - ): # default value of restore_file is 'checkpoint_last.pt' - checkpoint_path = os.path.join( - cfg.save_dir, "checkpoint_last{}.pt".format(suffix) - ) - first_launch = not PathManager.exists(checkpoint_path) - if cfg.finetune_from_model is not None and first_launch: - # if there is no last checkpoint to restore, start the finetune from pretrained model - # else just use usual logic to load checkpoint, e.g. restart from last checkpoint and etc. - if PathManager.exists(cfg.finetune_from_model): - checkpoint_path = cfg.finetune_from_model - reset_optimizer = True - reset_lr_scheduler = True - reset_meters = True - reset_dataloader = True - logger.info( - f"loading pretrained model from {checkpoint_path}: " - "optimizer, lr scheduler, meters, dataloader will be reset" - ) - else: - raise ValueError( - f"--funetune-from-model {cfg.finetune_from_model} does not exist" - ) - elif suffix is not None: - checkpoint_path = cfg.restore_file.replace(".pt", suffix + ".pt") - else: - checkpoint_path = cfg.restore_file - - if cfg.restore_file != "checkpoint_last.pt" and cfg.finetune_from_model: - raise ValueError( - "--finetune-from-model and --restore-file (non-default value) " - "can not be specified together: " + str(cfg) - ) - - extra_state = trainer.load_checkpoint( - checkpoint_path, - reset_optimizer, - reset_lr_scheduler, - optimizer_overrides, - reset_meters=reset_meters, - ) - - if ( - extra_state is not None - and "best" in extra_state - and not reset_optimizer - and not reset_meters - ): - save_checkpoint.best = extra_state["best"] - - if extra_state is not None and not reset_dataloader: - # restore iterator from checkpoint - itr_state = extra_state["train_iterator"] - epoch_itr = trainer.get_train_iterator( - epoch=itr_state["epoch"], load_dataset=True, **passthrough_args - ) - epoch_itr.load_state_dict(itr_state) - else: - epoch_itr = trainer.get_train_iterator( - epoch=1, load_dataset=True, **passthrough_args - ) - - trainer.lr_step(epoch_itr.epoch) - - return extra_state, epoch_itr - - -def load_checkpoint_to_cpu(path, arg_overrides=None, load_on_all_ranks=False): - """Loads a checkpoint to CPU (with upgrading for backward compatibility). - - If doing single-GPU training or if the checkpoint is only being loaded by at - most one process on each node (current default behavior is for only rank 0 - to read the checkpoint from disk), load_on_all_ranks should be False to - avoid errors from torch.distributed not having been initialized or - torch.distributed.barrier() hanging. - - If all processes on each node may be loading the checkpoint - simultaneously, load_on_all_ranks should be set to True to avoid I/O - conflicts. - - There's currently no support for > 1 but < all processes loading the - checkpoint on each node. - """ - local_path = PathManager.get_local_path(path) - # The locally cached file returned by get_local_path() may be stale for - # remote files that are periodically updated/overwritten (ex: - # checkpoint_last.pt) - so we remove the local copy, sync across processes - # (if needed), and then download a fresh copy. - if local_path != path and PathManager.path_requires_pathmanager(path): - try: - os.remove(local_path) - except FileNotFoundError: - # With potentially multiple processes removing the same file, the - # file being missing is benign (missing_ok isn't available until - # Python 3.8). - pass - if load_on_all_ranks: - torch.distributed.barrier() - local_path = PathManager.get_local_path(path) - - with open(local_path, "rb") as f: - state = torch.load(f, map_location=torch.device("cpu")) - - if "args" in state and state["args"] is not None and arg_overrides is not None: - args = state["args"] - for arg_name, arg_val in arg_overrides.items(): - setattr(args, arg_name, arg_val) - - if "cfg" in state and state["cfg"] is not None: - - # hack to be able to set Namespace in dict config. this should be removed when we update to newer - # omegaconf version that supports object flags, or when we migrate all existing models - from omegaconf import _utils - - old_primitive = _utils.is_primitive_type - _utils.is_primitive_type = lambda _: True - - state["cfg"] = OmegaConf.create(state["cfg"]) - - _utils.is_primitive_type = old_primitive - OmegaConf.set_struct(state["cfg"], True) - - if arg_overrides is not None: - overwrite_args_by_name(state["cfg"], arg_overrides) - - state = _upgrade_state_dict(state) - return state - - -def load_model_ensemble( - filenames, - arg_overrides: Optional[Dict[str, Any]] = None, - task=None, - strict=True, - suffix="", - num_shards=1, - state=None, -): - """Loads an ensemble of models. - - Args: - filenames (List[str]): checkpoint files to load - arg_overrides (Dict[str,Any], optional): override model args that - were used during model training - task (fairseq.tasks.FairseqTask, optional): task to use for loading - """ - assert not ( - strict and num_shards > 1 - ), "Cannot load state dict with strict=True and checkpoint shards > 1" - ensemble, args, _task = load_model_ensemble_and_task( - filenames, - arg_overrides, - task, - strict, - suffix, - num_shards, - state, - ) - return ensemble, args - - -def get_maybe_sharded_checkpoint_filename( - filename: str, suffix: str, shard_idx: int, num_shards: int -) -> str: - orig_filename = filename - filename = filename.replace(".pt", suffix + ".pt") - fsdp_filename = filename[:-3] + f"-shard{shard_idx}.pt" - model_parallel_filename = orig_filename[:-3] + f"_part{shard_idx}.pt" - if PathManager.exists(fsdp_filename): - return fsdp_filename - elif num_shards > 1: - return model_parallel_filename - else: - return filename - - -def load_model_ensemble_and_task( - filenames, - arg_overrides: Optional[Dict[str, Any]] = None, - task=None, - strict=True, - suffix="", - num_shards=1, - state=None, -): - assert state is None or len(filenames) == 1 - - from fairseq import tasks - - assert not ( - strict and num_shards > 1 - ), "Cannot load state dict with strict=True and checkpoint shards > 1" - ensemble = [] - cfg = None - for filename in filenames: - orig_filename = filename - model_shard_state = {"shard_weights": [], "shard_metadata": []} - assert num_shards > 0 - st = time.time() - for shard_idx in range(num_shards): - filename = get_maybe_sharded_checkpoint_filename( - orig_filename, suffix, shard_idx, num_shards - ) - - if not PathManager.exists(filename): - raise IOError("Model file not found: {}".format(filename)) - if state is None: - state = load_checkpoint_to_cpu(filename, arg_overrides) - if "args" in state and state["args"] is not None: - cfg = convert_namespace_to_omegaconf(state["args"]) - elif "cfg" in state and state["cfg"] is not None: - cfg = state["cfg"] - else: - raise RuntimeError( - f"Neither args nor cfg exist in state keys = {state.keys()}" - ) - - if task is None: - task = tasks.setup_task(cfg.task) - - if "task_state" in state: - task.load_state_dict(state["task_state"]) - - if "fsdp_metadata" in state and num_shards > 1: - model_shard_state["shard_weights"].append(state["model"]) - model_shard_state["shard_metadata"].append(state["fsdp_metadata"]) - # check FSDP import before the code goes too far - if not has_FSDP: - raise ImportError( - "Cannot find FullyShardedDataParallel. " - "Please install fairscale with: pip install fairscale" - ) - if shard_idx == num_shards - 1: - consolidated_model_state = FSDP.consolidate_shard_weights( - shard_weights=model_shard_state["shard_weights"], - shard_metadata=model_shard_state["shard_metadata"], - ) - model = task.build_model(cfg.model) - model.load_state_dict( - consolidated_model_state, strict=strict, model_cfg=cfg.model - ) - else: - # model parallel checkpoint or unsharded checkpoint - model = task.build_model(cfg.model) - model.load_state_dict( - state["model"], strict=strict, model_cfg=cfg.model - ) - - # reset state so it gets loaded for the next model in ensemble - state = None - if shard_idx % 10 == 0 and shard_idx > 0: - elapsed = time.time() - st - logger.info( - f"Loaded {shard_idx} shards in {elapsed:.2f}s, {elapsed / (shard_idx+1):.2f}s/shard" - ) - - # build model for ensemble - ensemble.append(model) - return ensemble, cfg, task - - -def checkpoint_paths(path, pattern=r"checkpoint(\d+)\.pt", keep_match=False): - """Retrieves all checkpoints found in `path` directory. - - Checkpoints are identified by matching filename to the specified pattern. If - the pattern contains groups, the result will be sorted by the first group in - descending order. - """ - pt_regexp = re.compile(pattern) - files = PathManager.ls(path) - - entries = [] - for i, f in enumerate(files): - m = pt_regexp.fullmatch(f) - if m is not None: - idx = float(m.group(1)) if len(m.groups()) > 0 else i - entries.append((idx, m.group(0))) - if keep_match: - return [(os.path.join(path, x[1]), x[0]) for x in sorted(entries, reverse=True)] - else: - return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)] - - -def torch_persistent_save(obj, filename, async_write: bool = False): - if async_write: - with PathManager.opena(filename, "wb") as f: - _torch_persistent_save(obj, f) - else: - if PathManager.supports_rename(filename): - # do atomic save - with PathManager.open(filename + ".tmp", "wb") as f: - _torch_persistent_save(obj, f) - PathManager.rename(filename + ".tmp", filename) - else: - # fallback to non-atomic save - with PathManager.open(filename, "wb") as f: - _torch_persistent_save(obj, f) - - -def _torch_persistent_save(obj, f): - if isinstance(f, str): - with PathManager.open(f, "wb") as h: - torch_persistent_save(obj, h) - return - for i in range(3): - try: - return torch.save(obj, f) - except Exception: - if i == 2: - logger.error(traceback.format_exc()) - raise - - -def _upgrade_state_dict(state): - """Helper for upgrading old model checkpoints.""" - - # add optimizer_history - if "optimizer_history" not in state: - state["optimizer_history"] = [ - {"criterion_name": "CrossEntropyCriterion", "best_loss": state["best_loss"]} - ] - state["last_optimizer_state"] = state["optimizer"] - del state["optimizer"] - del state["best_loss"] - # move extra_state into sub-dictionary - if "epoch" in state and "extra_state" not in state: - state["extra_state"] = { - "epoch": state["epoch"], - "batch_offset": state["batch_offset"], - "val_loss": state["val_loss"], - } - del state["epoch"] - del state["batch_offset"] - del state["val_loss"] - # reduce optimizer history's memory usage (only keep the last state) - if "optimizer" in state["optimizer_history"][-1]: - state["last_optimizer_state"] = state["optimizer_history"][-1]["optimizer"] - for optim_hist in state["optimizer_history"]: - del optim_hist["optimizer"] - # record the optimizer class name - if "optimizer_name" not in state["optimizer_history"][-1]: - state["optimizer_history"][-1]["optimizer_name"] = "FairseqNAG" - # move best_loss into lr_scheduler_state - if "lr_scheduler_state" not in state["optimizer_history"][-1]: - state["optimizer_history"][-1]["lr_scheduler_state"] = { - "best": state["optimizer_history"][-1]["best_loss"] - } - del state["optimizer_history"][-1]["best_loss"] - # keep track of number of updates - if "num_updates" not in state["optimizer_history"][-1]: - state["optimizer_history"][-1]["num_updates"] = 0 - # old model checkpoints may not have separate source/target positions - if ( - "args" in state - and hasattr(state["args"], "max_positions") - and not hasattr(state["args"], "max_source_positions") - ): - state["args"].max_source_positions = state["args"].max_positions - state["args"].max_target_positions = state["args"].max_positions - # use stateful training data iterator - if "train_iterator" not in state["extra_state"]: - state["extra_state"]["train_iterator"] = { - "epoch": state["extra_state"]["epoch"], - "iterations_in_epoch": state["extra_state"].get("batch_offset", 0), - } - - # backward compatibility, cfg updates - if "args" in state and state["args"] is not None: - # default to translation task - if not hasattr(state["args"], "task"): - state["args"].task = "translation" - # --raw-text and --lazy-load are deprecated - if getattr(state["args"], "raw_text", False): - state["args"].dataset_impl = "raw" - elif getattr(state["args"], "lazy_load", False): - state["args"].dataset_impl = "lazy" - # epochs start at 1 - if state["extra_state"]["train_iterator"] is not None: - state["extra_state"]["train_iterator"]["epoch"] = max( - state["extra_state"]["train_iterator"].get("epoch", 1), 1 - ) - # --remove-bpe ==> --postprocess - if hasattr(state["args"], "remove_bpe"): - state["args"].post_process = state["args"].remove_bpe - # --min-lr ==> --stop-min-lr - if hasattr(state["args"], "min_lr"): - state["args"].stop_min_lr = state["args"].min_lr - del state["args"].min_lr - # binary_cross_entropy / kd_binary_cross_entropy => wav2vec criterion - if ( - hasattr(state["args"], "criterion") - and state["args"].criterion in [ - "binary_cross_entropy", - "kd_binary_cross_entropy", - ] - ): - state["args"].criterion = "wav2vec" - # remove log_keys if it's None (criteria will supply a default value of []) - if hasattr(state["args"], "log_keys") and state["args"].log_keys is None: - delattr(state["args"], "log_keys") - # speech_pretraining => audio pretraining - if ( - hasattr(state["args"], "task") - and state["args"].task == "speech_pretraining" - ): - state["args"].task = "audio_pretraining" - # audio_cpc => wav2vec - if hasattr(state["args"], "arch") and state["args"].arch == "audio_cpc": - state["args"].arch = "wav2vec" - # convert legacy float learning rate to List[float] - if hasattr(state["args"], "lr") and isinstance(state["args"].lr, float): - state["args"].lr = [state["args"].lr] - # convert task data arg to a string instead of List[string] - if ( - hasattr(state["args"], "data") - and isinstance(state["args"].data, list) - and len(state["args"].data) > 0 - ): - state["args"].data = state["args"].data[0] - # remove keys in state["args"] related to teacher-student learning - for key in [ - "static_teachers", - "static_teacher_weights", - "dynamic_teachers", - "dynamic_teacher_weights", - ]: - if key in state["args"]: - delattr(state["args"], key) - - state["cfg"] = convert_namespace_to_omegaconf(state["args"]) - - if "cfg" in state and state["cfg"] is not None: - cfg = state["cfg"] - with open_dict(cfg): - # any upgrades for Hydra-based configs - if ( - "task" in cfg - and "eval_wer_config" in cfg.task - and isinstance(cfg.task.eval_wer_config.print_alignment, bool) - ): - cfg.task.eval_wer_config.print_alignment = "hard" - if "generation" in cfg and isinstance(cfg.generation.print_alignment, bool): - cfg.generation.print_alignment = "hard" if cfg.generation.print_alignment else None - if ( - "model" in cfg - and "w2v_args" in cfg.model - and cfg.model.w2v_args is not None - and ( - hasattr(cfg.model.w2v_args, "task") or "task" in cfg.model.w2v_args - ) - and hasattr(cfg.model.w2v_args.task, "eval_wer_config") - and cfg.model.w2v_args.task.eval_wer_config is not None - and isinstance( - cfg.model.w2v_args.task.eval_wer_config.print_alignment, bool - ) - ): - cfg.model.w2v_args.task.eval_wer_config.print_alignment = "hard" - - return state - - -def prune_state_dict(state_dict, model_cfg: Optional[DictConfig]): - """Prune the given state_dict if desired for LayerDrop - (https://arxiv.org/abs/1909.11556). - - Training with LayerDrop allows models to be robust to pruning at inference - time. This function prunes state_dict to allow smaller models to be loaded - from a larger model and re-maps the existing state_dict for this to occur. - - It's called by functions that load models from checkpoints and does not - need to be called directly. - """ - arch = None - if model_cfg is not None: - arch = ( - model_cfg._name - if isinstance(model_cfg, DictConfig) - else getattr(model_cfg, "arch", None) - ) - - if not model_cfg or arch is None or arch == "ptt_transformer": - # args should not be none, but don't crash if it is. - return state_dict - - encoder_layers_to_keep = getattr(model_cfg, "encoder_layers_to_keep", None) - decoder_layers_to_keep = getattr(model_cfg, "decoder_layers_to_keep", None) - - if not encoder_layers_to_keep and not decoder_layers_to_keep: - return state_dict - - # apply pruning - logger.info( - "Pruning model to specified layer configuration - this works best if the model was trained with LayerDrop" - ) - - def create_pruning_pass(layers_to_keep, layer_name): - keep_layers = sorted( - int(layer_string) for layer_string in layers_to_keep.split(",") - ) - mapping_dict = {} - for i in range(len(keep_layers)): - mapping_dict[str(keep_layers[i])] = str(i) - - regex = re.compile(r"^{layer}.*\.layers\.(\d+)".format(layer=layer_name)) - return {"substitution_regex": regex, "mapping_dict": mapping_dict} - - pruning_passes = [] - if encoder_layers_to_keep: - pruning_passes.append(create_pruning_pass(encoder_layers_to_keep, "encoder")) - if decoder_layers_to_keep: - pruning_passes.append(create_pruning_pass(decoder_layers_to_keep, "decoder")) - - new_state_dict = {} - for layer_name in state_dict.keys(): - match = re.search(r"\.layers\.(\d+)\.", layer_name) - # if layer has no number in it, it is a supporting layer, such as an - # embedding - if not match: - new_state_dict[layer_name] = state_dict[layer_name] - continue - - # otherwise, layer should be pruned. - original_layer_number = match.group(1) - # figure out which mapping dict to replace from - for pruning_pass in pruning_passes: - if original_layer_number in pruning_pass["mapping_dict"] and pruning_pass[ - "substitution_regex" - ].search(layer_name): - new_layer_number = pruning_pass["mapping_dict"][original_layer_number] - substitution_match = pruning_pass["substitution_regex"].search( - layer_name - ) - new_state_key = ( - layer_name[: substitution_match.start(1)] - + new_layer_number - + layer_name[substitution_match.end(1) :] - ) - new_state_dict[new_state_key] = state_dict[layer_name] - - # Since layers are now pruned, *_layers_to_keep are no longer needed. - # This is more of "It would make it work fix" rather than a proper fix. - if isinstance(model_cfg, DictConfig): - context = open_dict(model_cfg) - else: - context = contextlib.ExitStack() - with context: - if hasattr(model_cfg, "encoder_layers_to_keep"): - model_cfg.encoder_layers_to_keep = None - if hasattr(model_cfg, "decoder_layers_to_keep"): - model_cfg.decoder_layers_to_keep = None - - return new_state_dict - - -def load_pretrained_component_from_model( - component: Union[FairseqEncoder, FairseqDecoder], checkpoint: str -): - """ - Load a pretrained FairseqEncoder or FairseqDecoder from checkpoint into the - provided `component` object. If state_dict fails to load, there may be a - mismatch in the architecture of the corresponding `component` found in the - `checkpoint` file. - """ - if not PathManager.exists(checkpoint): - raise IOError("Model file not found: {}".format(checkpoint)) - state = load_checkpoint_to_cpu(checkpoint) - if isinstance(component, FairseqEncoder): - component_type = "encoder" - elif isinstance(component, FairseqDecoder): - component_type = "decoder" - else: - raise ValueError( - "component to load must be either a FairseqEncoder or " - "FairseqDecoder. Loading other component types are not supported." - ) - component_state_dict = OrderedDict() - for key in state["model"].keys(): - if key.startswith(component_type): - # encoder.input_layers.0.0.weight --> input_layers.0.0.weight - component_subkey = key[len(component_type) + 1 :] - component_state_dict[component_subkey] = state["model"][key] - component.load_state_dict(component_state_dict, strict=True) - return component - - -def verify_checkpoint_directory(save_dir: str) -> None: - if not os.path.exists(save_dir): - os.makedirs(save_dir, exist_ok=True) - temp_file_path = os.path.join(save_dir, "dummy") - try: - with open(temp_file_path, "w"): - pass - except OSError as e: - logger.warning( - "Unable to access checkpoint save directory: {}".format(save_dir) - ) - raise e - else: - os.remove(temp_file_path) - - -def load_ema_from_checkpoint(fpath): - """Loads exponential moving averaged (EMA) checkpoint from input and - returns a model with ema weights. - - Args: - fpath: A string path of checkpoint to load from. - - Returns: - A dict of string keys mapping to various values. The 'model' key - from the returned dict should correspond to an OrderedDict mapping - string parameter names to torch Tensors. - """ - params_dict = collections.OrderedDict() - new_state = None - - with PathManager.open(fpath, 'rb') as f: - new_state = torch.load( - f, - map_location=( - lambda s, _: torch.serialization.default_restore_location(s, 'cpu') - ), - ) - - # EMA model is stored in a separate "extra state" - model_params = new_state['extra_state']['ema'] - - for key in list(model_params.keys()): - p = model_params[key] - if isinstance(p, torch.HalfTensor): - p = p.float() - if key not in params_dict: - params_dict[key] = p.clone() - # NOTE: clone() is needed in case of p is a shared parameter - else: - raise ValueError("Key {} is repeated in EMA model params.".format(key)) - - if len(params_dict) == 0: - raise ValueError( - f"Input checkpoint path '{fpath}' does not contain " - "ema model weights, is this model trained with EMA?" - ) - - new_state['model'] = params_dict - return new_state diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/data/base_wrapper_dataset.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/data/base_wrapper_dataset.py deleted file mode 100644 index 134d398b47dc73c8807759188504aee205b3b34d..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/data/base_wrapper_dataset.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from torch.utils.data.dataloader import default_collate - -from . import FairseqDataset - - -class BaseWrapperDataset(FairseqDataset): - def __init__(self, dataset): - super().__init__() - self.dataset = dataset - - def __getitem__(self, index): - return self.dataset[index] - - def __len__(self): - return len(self.dataset) - - def collater(self, samples): - if hasattr(self.dataset, "collater"): - return self.dataset.collater(samples) - else: - return default_collate(samples) - - @property - def sizes(self): - return self.dataset.sizes - - def num_tokens(self, index): - return self.dataset.num_tokens(index) - - def size(self, index): - return self.dataset.size(index) - - def ordered_indices(self): - return self.dataset.ordered_indices() - - @property - def supports_prefetch(self): - return getattr(self.dataset, "supports_prefetch", False) - - def attr(self, attr: str, index: int): - return self.dataset.attr(attr, index) - - def prefetch(self, indices): - self.dataset.prefetch(indices) - - def get_batch_shapes(self): - return self.dataset.get_batch_shapes() - - def batch_by_size( - self, - indices, - max_tokens=None, - max_sentences=None, - required_batch_size_multiple=1, - ): - return self.dataset.batch_by_size( - indices, - max_tokens=max_tokens, - max_sentences=max_sentences, - required_batch_size_multiple=required_batch_size_multiple, - ) - - def filter_indices_by_size(self, indices, max_sizes): - return self.dataset.filter_indices_by_size(indices, max_sizes) - - @property - def can_reuse_epoch_itr_across_epochs(self): - return self.dataset.can_reuse_epoch_itr_across_epochs - - def set_epoch(self, epoch): - super().set_epoch(epoch) - if hasattr(self.dataset, "set_epoch"): - self.dataset.set_epoch(epoch) diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/utils/logger.py b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/utils/logger.py deleted file mode 100644 index 7c7890f8bec5db44098fe1a38d26eb13231f7063..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/utils/logger.py +++ /dev/null @@ -1,237 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import atexit -import functools -import logging -import os -import sys -import time -from collections import Counter -import torch -from tabulate import tabulate -from termcolor import colored - -from detectron2.utils.file_io import PathManager - -__all__ = ["setup_logger", "log_first_n", "log_every_n", "log_every_n_seconds"] - - -class _ColorfulFormatter(logging.Formatter): - def __init__(self, *args, **kwargs): - self._root_name = kwargs.pop("root_name") + "." - self._abbrev_name = kwargs.pop("abbrev_name", "") - if len(self._abbrev_name): - self._abbrev_name = self._abbrev_name + "." - super(_ColorfulFormatter, self).__init__(*args, **kwargs) - - def formatMessage(self, record): - record.name = record.name.replace(self._root_name, self._abbrev_name) - log = super(_ColorfulFormatter, self).formatMessage(record) - if record.levelno == logging.WARNING: - prefix = colored("WARNING", "red", attrs=["blink"]) - elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL: - prefix = colored("ERROR", "red", attrs=["blink", "underline"]) - else: - return log - return prefix + " " + log - - -@functools.lru_cache() # so that calling setup_logger multiple times won't add many handlers -def setup_logger( - output=None, distributed_rank=0, *, color=True, name="detectron2", abbrev_name=None -): - """ - Initialize the detectron2 logger and set its verbosity level to "DEBUG". - - Args: - output (str): a file name or a directory to save log. If None, will not save log file. - If ends with ".txt" or ".log", assumed to be a file name. - Otherwise, logs will be saved to `output/log.txt`. - name (str): the root module name of this logger - abbrev_name (str): an abbreviation of the module, to avoid long names in logs. - Set to "" to not log the root module in logs. - By default, will abbreviate "detectron2" to "d2" and leave other - modules unchanged. - - Returns: - logging.Logger: a logger - """ - logger = logging.getLogger(name) - logger.setLevel(logging.DEBUG) - logger.propagate = False - - if abbrev_name is None: - abbrev_name = "d2" if name == "detectron2" else name - - plain_formatter = logging.Formatter( - "[%(asctime)s] %(name)s %(levelname)s: %(message)s", datefmt="%m/%d %H:%M:%S" - ) - # stdout logging: master only - if distributed_rank == 0: - ch = logging.StreamHandler(stream=sys.stdout) - ch.setLevel(logging.DEBUG) - if color: - formatter = _ColorfulFormatter( - colored("[%(asctime)s %(name)s]: ", "green") + "%(message)s", - datefmt="%m/%d %H:%M:%S", - root_name=name, - abbrev_name=str(abbrev_name), - ) - else: - formatter = plain_formatter - ch.setFormatter(formatter) - logger.addHandler(ch) - - # file logging: all workers - if output is not None: - if output.endswith(".txt") or output.endswith(".log"): - filename = output - else: - filename = os.path.join(output, "log.txt") - if distributed_rank > 0: - filename = filename + ".rank{}".format(distributed_rank) - PathManager.mkdirs(os.path.dirname(filename)) - - fh = logging.StreamHandler(_cached_log_stream(filename)) - fh.setLevel(logging.DEBUG) - fh.setFormatter(plain_formatter) - logger.addHandler(fh) - - return logger - - -# cache the opened file object, so that different calls to `setup_logger` -# with the same file name can safely write to the same file. -@functools.lru_cache(maxsize=None) -def _cached_log_stream(filename): - # use 1K buffer if writing to cloud storage - io = PathManager.open(filename, "a", buffering=1024 if "://" in filename else -1) - atexit.register(io.close) - return io - - -""" -Below are some other convenient logging methods. -They are mainly adopted from -https://github.com/abseil/abseil-py/blob/master/absl/logging/__init__.py -""" - - -def _find_caller(): - """ - Returns: - str: module name of the caller - tuple: a hashable key to be used to identify different callers - """ - frame = sys._getframe(2) - while frame: - code = frame.f_code - if os.path.join("utils", "logger.") not in code.co_filename: - mod_name = frame.f_globals["__name__"] - if mod_name == "__main__": - mod_name = "detectron2" - return mod_name, (code.co_filename, frame.f_lineno, code.co_name) - frame = frame.f_back - - -_LOG_COUNTER = Counter() -_LOG_TIMER = {} - - -def log_first_n(lvl, msg, n=1, *, name=None, key="caller"): - """ - Log only for the first n times. - - Args: - lvl (int): the logging level - msg (str): - n (int): - name (str): name of the logger to use. Will use the caller's module by default. - key (str or tuple[str]): the string(s) can be one of "caller" or - "message", which defines how to identify duplicated logs. - For example, if called with `n=1, key="caller"`, this function - will only log the first call from the same caller, regardless of - the message content. - If called with `n=1, key="message"`, this function will log the - same content only once, even if they are called from different places. - If called with `n=1, key=("caller", "message")`, this function - will not log only if the same caller has logged the same message before. - """ - if isinstance(key, str): - key = (key,) - assert len(key) > 0 - - caller_module, caller_key = _find_caller() - hash_key = () - if "caller" in key: - hash_key = hash_key + caller_key - if "message" in key: - hash_key = hash_key + (msg,) - - _LOG_COUNTER[hash_key] += 1 - if _LOG_COUNTER[hash_key] <= n: - logging.getLogger(name or caller_module).log(lvl, msg) - - -def log_every_n(lvl, msg, n=1, *, name=None): - """ - Log once per n times. - - Args: - lvl (int): the logging level - msg (str): - n (int): - name (str): name of the logger to use. Will use the caller's module by default. - """ - caller_module, key = _find_caller() - _LOG_COUNTER[key] += 1 - if n == 1 or _LOG_COUNTER[key] % n == 1: - logging.getLogger(name or caller_module).log(lvl, msg) - - -def log_every_n_seconds(lvl, msg, n=1, *, name=None): - """ - Log no more than once per n seconds. - - Args: - lvl (int): the logging level - msg (str): - n (int): - name (str): name of the logger to use. Will use the caller's module by default. - """ - caller_module, key = _find_caller() - last_logged = _LOG_TIMER.get(key, None) - current_time = time.time() - if last_logged is None or current_time - last_logged >= n: - logging.getLogger(name or caller_module).log(lvl, msg) - _LOG_TIMER[key] = current_time - - -def create_small_table(small_dict): - """ - Create a small table using the keys of small_dict as headers. This is only - suitable for small dictionaries. - - Args: - small_dict (dict): a result dictionary of only a few items. - - Returns: - str: the table as a string. - """ - keys, values = tuple(zip(*small_dict.items())) - table = tabulate( - [values], - headers=keys, - tablefmt="pipe", - floatfmt=".3f", - stralign="center", - numalign="center", - ) - return table - - -def _log_api_usage(identifier: str): - """ - Internal function used to log the usage of different detectron2 components - inside facebook's infra. - """ - torch._C._log_api_usage_once("detectron2." + identifier) diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/tests/test_scheduler.py b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/tests/test_scheduler.py deleted file mode 100644 index 6cccb03f74b594c06add44a134b526e41c2974f0..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/tests/test_scheduler.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. - -import math -import numpy as np -from unittest import TestCase -import torch -from fvcore.common.param_scheduler import CosineParamScheduler, MultiStepParamScheduler -from torch import nn - -from detectron2.solver import LRMultiplier, WarmupParamScheduler - - -class TestScheduler(TestCase): - def test_warmup_multistep(self): - p = nn.Parameter(torch.zeros(0)) - opt = torch.optim.SGD([p], lr=5) - - multiplier = WarmupParamScheduler( - MultiStepParamScheduler( - [1, 0.1, 0.01, 0.001], - milestones=[10, 15, 20], - num_updates=30, - ), - 0.001, - 5 / 30, - ) - sched = LRMultiplier(opt, multiplier, 30) - # This is an equivalent of: - # sched = WarmupMultiStepLR( - # opt, milestones=[10, 15, 20], gamma=0.1, warmup_factor=0.001, warmup_iters=5) - - p.sum().backward() - opt.step() - - lrs = [0.005] - for _ in range(30): - sched.step() - lrs.append(opt.param_groups[0]["lr"]) - self.assertTrue(np.allclose(lrs[:5], [0.005, 1.004, 2.003, 3.002, 4.001])) - self.assertTrue(np.allclose(lrs[5:10], 5.0)) - self.assertTrue(np.allclose(lrs[10:15], 0.5)) - self.assertTrue(np.allclose(lrs[15:20], 0.05)) - self.assertTrue(np.allclose(lrs[20:], 0.005)) - - def test_warmup_cosine(self): - p = nn.Parameter(torch.zeros(0)) - opt = torch.optim.SGD([p], lr=5) - multiplier = WarmupParamScheduler( - CosineParamScheduler(1, 0), - 0.001, - 5 / 30, - ) - sched = LRMultiplier(opt, multiplier, 30) - - p.sum().backward() - opt.step() - self.assertEqual(opt.param_groups[0]["lr"], 0.005) - lrs = [0.005] - - for _ in range(30): - sched.step() - lrs.append(opt.param_groups[0]["lr"]) - for idx, lr in enumerate(lrs): - expected_cosine = 2.5 * (1.0 + math.cos(math.pi * idx / 30)) - if idx >= 5: - self.assertAlmostEqual(lr, expected_cosine) - else: - self.assertNotAlmostEqual(lr, expected_cosine) diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/tools/__init__.py b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/tools/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/OptorAI/gen/style.css b/spaces/OptorAI/gen/style.css deleted file mode 100644 index 57ac874613ad432d3129fa1757249a319a601f3e..0000000000000000000000000000000000000000 --- a/spaces/OptorAI/gen/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} \ No newline at end of file diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/datasets/pipelines/formating.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/datasets/pipelines/formating.py deleted file mode 100644 index 97db85f4f9db39fb86ba77ead7d1a8407d810adb..0000000000000000000000000000000000000000 --- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/datasets/pipelines/formating.py +++ /dev/null @@ -1,288 +0,0 @@ -from collections.abc import Sequence - -import annotator.uniformer.mmcv as mmcv -import numpy as np -import torch -from annotator.uniformer.mmcv.parallel import DataContainer as DC - -from ..builder import PIPELINES - - -def to_tensor(data): - """Convert objects of various python types to :obj:`torch.Tensor`. - - Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, - :class:`Sequence`, :class:`int` and :class:`float`. - - Args: - data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to - be converted. - """ - - if isinstance(data, torch.Tensor): - return data - elif isinstance(data, np.ndarray): - return torch.from_numpy(data) - elif isinstance(data, Sequence) and not mmcv.is_str(data): - return torch.tensor(data) - elif isinstance(data, int): - return torch.LongTensor([data]) - elif isinstance(data, float): - return torch.FloatTensor([data]) - else: - raise TypeError(f'type {type(data)} cannot be converted to tensor.') - - -@PIPELINES.register_module() -class ToTensor(object): - """Convert some results to :obj:`torch.Tensor` by given keys. - - Args: - keys (Sequence[str]): Keys that need to be converted to Tensor. - """ - - def __init__(self, keys): - self.keys = keys - - def __call__(self, results): - """Call function to convert data in results to :obj:`torch.Tensor`. - - Args: - results (dict): Result dict contains the data to convert. - - Returns: - dict: The result dict contains the data converted - to :obj:`torch.Tensor`. - """ - - for key in self.keys: - results[key] = to_tensor(results[key]) - return results - - def __repr__(self): - return self.__class__.__name__ + f'(keys={self.keys})' - - -@PIPELINES.register_module() -class ImageToTensor(object): - """Convert image to :obj:`torch.Tensor` by given keys. - - The dimension order of input image is (H, W, C). The pipeline will convert - it to (C, H, W). If only 2 dimension (H, W) is given, the output would be - (1, H, W). - - Args: - keys (Sequence[str]): Key of images to be converted to Tensor. - """ - - def __init__(self, keys): - self.keys = keys - - def __call__(self, results): - """Call function to convert image in results to :obj:`torch.Tensor` and - transpose the channel order. - - Args: - results (dict): Result dict contains the image data to convert. - - Returns: - dict: The result dict contains the image converted - to :obj:`torch.Tensor` and transposed to (C, H, W) order. - """ - - for key in self.keys: - img = results[key] - if len(img.shape) < 3: - img = np.expand_dims(img, -1) - results[key] = to_tensor(img.transpose(2, 0, 1)) - return results - - def __repr__(self): - return self.__class__.__name__ + f'(keys={self.keys})' - - -@PIPELINES.register_module() -class Transpose(object): - """Transpose some results by given keys. - - Args: - keys (Sequence[str]): Keys of results to be transposed. - order (Sequence[int]): Order of transpose. - """ - - def __init__(self, keys, order): - self.keys = keys - self.order = order - - def __call__(self, results): - """Call function to convert image in results to :obj:`torch.Tensor` and - transpose the channel order. - - Args: - results (dict): Result dict contains the image data to convert. - - Returns: - dict: The result dict contains the image converted - to :obj:`torch.Tensor` and transposed to (C, H, W) order. - """ - - for key in self.keys: - results[key] = results[key].transpose(self.order) - return results - - def __repr__(self): - return self.__class__.__name__ + \ - f'(keys={self.keys}, order={self.order})' - - -@PIPELINES.register_module() -class ToDataContainer(object): - """Convert results to :obj:`mmcv.DataContainer` by given fields. - - Args: - fields (Sequence[dict]): Each field is a dict like - ``dict(key='xxx', **kwargs)``. The ``key`` in result will - be converted to :obj:`mmcv.DataContainer` with ``**kwargs``. - Default: ``(dict(key='img', stack=True), - dict(key='gt_semantic_seg'))``. - """ - - def __init__(self, - fields=(dict(key='img', - stack=True), dict(key='gt_semantic_seg'))): - self.fields = fields - - def __call__(self, results): - """Call function to convert data in results to - :obj:`mmcv.DataContainer`. - - Args: - results (dict): Result dict contains the data to convert. - - Returns: - dict: The result dict contains the data converted to - :obj:`mmcv.DataContainer`. - """ - - for field in self.fields: - field = field.copy() - key = field.pop('key') - results[key] = DC(results[key], **field) - return results - - def __repr__(self): - return self.__class__.__name__ + f'(fields={self.fields})' - - -@PIPELINES.register_module() -class DefaultFormatBundle(object): - """Default formatting bundle. - - It simplifies the pipeline of formatting common fields, including "img" - and "gt_semantic_seg". These fields are formatted as follows. - - - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True) - - gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, - (3)to DataContainer (stack=True) - """ - - def __call__(self, results): - """Call function to transform and format common fields in results. - - Args: - results (dict): Result dict contains the data to convert. - - Returns: - dict: The result dict contains the data that is formatted with - default bundle. - """ - - if 'img' in results: - img = results['img'] - if len(img.shape) < 3: - img = np.expand_dims(img, -1) - img = np.ascontiguousarray(img.transpose(2, 0, 1)) - results['img'] = DC(to_tensor(img), stack=True) - if 'gt_semantic_seg' in results: - # convert to long - results['gt_semantic_seg'] = DC( - to_tensor(results['gt_semantic_seg'][None, - ...].astype(np.int64)), - stack=True) - return results - - def __repr__(self): - return self.__class__.__name__ - - -@PIPELINES.register_module() -class Collect(object): - """Collect data from the loader relevant to the specific task. - - This is usually the last stage of the data loader pipeline. Typically keys - is set to some subset of "img", "gt_semantic_seg". - - The "img_meta" item is always populated. The contents of the "img_meta" - dictionary depends on "meta_keys". By default this includes: - - - "img_shape": shape of the image input to the network as a tuple - (h, w, c). Note that images may be zero padded on the bottom/right - if the batch tensor is larger than this shape. - - - "scale_factor": a float indicating the preprocessing scale - - - "flip": a boolean indicating if image flip transform was used - - - "filename": path to the image file - - - "ori_shape": original shape of the image as a tuple (h, w, c) - - - "pad_shape": image shape after padding - - - "img_norm_cfg": a dict of normalization information: - - mean - per channel mean subtraction - - std - per channel std divisor - - to_rgb - bool indicating if bgr was converted to rgb - - Args: - keys (Sequence[str]): Keys of results to be collected in ``data``. - meta_keys (Sequence[str], optional): Meta keys to be converted to - ``mmcv.DataContainer`` and collected in ``data[img_metas]``. - Default: ``('filename', 'ori_filename', 'ori_shape', 'img_shape', - 'pad_shape', 'scale_factor', 'flip', 'flip_direction', - 'img_norm_cfg')`` - """ - - def __init__(self, - keys, - meta_keys=('filename', 'ori_filename', 'ori_shape', - 'img_shape', 'pad_shape', 'scale_factor', 'flip', - 'flip_direction', 'img_norm_cfg')): - self.keys = keys - self.meta_keys = meta_keys - - def __call__(self, results): - """Call function to collect keys in results. The keys in ``meta_keys`` - will be converted to :obj:mmcv.DataContainer. - - Args: - results (dict): Result dict contains the data to collect. - - Returns: - dict: The result dict contains the following keys - - keys in``self.keys`` - - ``img_metas`` - """ - - data = {} - img_meta = {} - for key in self.meta_keys: - img_meta[key] = results[key] - data['img_metas'] = DC(img_meta, cpu_only=True) - for key in self.keys: - data[key] = results[key] - return data - - def __repr__(self): - return self.__class__.__name__ + \ - f'(keys={self.keys}, meta_keys={self.meta_keys})' diff --git a/spaces/PKUWilliamYang/VToonify/vtoonify/train_vtoonify_d.py b/spaces/PKUWilliamYang/VToonify/vtoonify/train_vtoonify_d.py deleted file mode 100644 index 0c83e02d46097dad72b5e9f8ed239299d9da320a..0000000000000000000000000000000000000000 --- a/spaces/PKUWilliamYang/VToonify/vtoonify/train_vtoonify_d.py +++ /dev/null @@ -1,515 +0,0 @@ -import os -#os.environ['CUDA_VISIBLE_DEVICES'] = "0" -import argparse -import math -import random - -import numpy as np -import torch -from torch import nn, optim -from torch.nn import functional as F -from torch.utils import data -import torch.distributed as dist -from torchvision import transforms, utils -from tqdm import tqdm -from PIL import Image -from util import * - -from model.stylegan import lpips -from model.stylegan.model import Generator, Downsample -from model.vtoonify import VToonify, ConditionalDiscriminator -from model.bisenet.model import BiSeNet -from model.simple_augment import random_apply_affine -from model.stylegan.distributed import ( - get_rank, - synchronize, - reduce_loss_dict, - reduce_sum, - get_world_size, -) - -class TrainOptions(): - def __init__(self): - - self.parser = argparse.ArgumentParser(description="Train VToonify-D") - self.parser.add_argument("--iter", type=int, default=2000, help="total training iterations") - self.parser.add_argument("--batch", type=int, default=8, help="batch sizes for each gpus") - self.parser.add_argument("--lr", type=float, default=0.0001, help="learning rate") - self.parser.add_argument("--local_rank", type=int, default=0, help="local rank for distributed training") - self.parser.add_argument("--start_iter", type=int, default=0, help="start iteration") - self.parser.add_argument("--save_every", type=int, default=30000, help="interval of saving a checkpoint") - self.parser.add_argument("--save_begin", type=int, default=30000, help="when to start saving a checkpoint") - self.parser.add_argument("--log_every", type=int, default=200, help="interval of saving a checkpoint") - - self.parser.add_argument("--adv_loss", type=float, default=0.01, help="the weight of adv loss") - self.parser.add_argument("--grec_loss", type=float, default=0.1, help="the weight of mse recontruction loss") - self.parser.add_argument("--perc_loss", type=float, default=0.01, help="the weight of perceptual loss") - self.parser.add_argument("--tmp_loss", type=float, default=1.0, help="the weight of temporal consistency loss") - self.parser.add_argument("--msk_loss", type=float, default=0.0005, help="the weight of attention mask loss") - - self.parser.add_argument("--fix_degree", action="store_true", help="use a fixed style degree") - self.parser.add_argument("--fix_style", action="store_true", help="use a fixed style image") - self.parser.add_argument("--fix_color", action="store_true", help="use the original color (no color transfer)") - self.parser.add_argument("--exstyle_path", type=str, default='./checkpoint/cartoon/refined_exstyle_code.npy', help="path of the extrinsic style code") - self.parser.add_argument("--style_id", type=int, default=26, help="the id of the style image") - self.parser.add_argument("--style_degree", type=float, default=0.5, help="style degree for VToonify-D") - - self.parser.add_argument("--encoder_path", type=str, default=None, help="path to the pretrained encoder model") - self.parser.add_argument("--direction_path", type=str, default='./checkpoint/directions.npy', help="path to the editing direction latents") - self.parser.add_argument("--stylegan_path", type=str, default='./checkpoint/cartoon/generator.pt', help="path to the stylegan model") - self.parser.add_argument("--faceparsing_path", type=str, default='./checkpoint/faceparsing.pth', help="path of the face parsing model") - self.parser.add_argument("--style_encoder_path", type=str, default='./checkpoint/encoder.pt', help="path of the style encoder") - - self.parser.add_argument("--name", type=str, default='vtoonify_d_cartoon', help="saved model name") - self.parser.add_argument("--pretrain", action="store_true", help="if true, only pretrain the encoder") - - def parse(self): - self.opt = self.parser.parse_args() - if self.opt.encoder_path is None: - self.opt.encoder_path = os.path.join('./checkpoint/', self.opt.name, 'pretrain.pt') - args = vars(self.opt) - if self.opt.local_rank == 0: - print('Load options') - for name, value in sorted(args.items()): - print('%s: %s' % (str(name), str(value))) - return self.opt - - -# pretrain E of vtoonify. -# We train E so that its the last-layer feature matches the original 8-th-layer input feature of G1 -# See Model initialization in Sec. 4.2.2 for the detail -def pretrain(args, generator, g_optim, g_ema, parsingpredictor, down, directions, styles, device): - pbar = range(args.iter) - - if get_rank() == 0: - pbar = tqdm(pbar, initial=args.start_iter, dynamic_ncols=True, smoothing=0.01) - - recon_loss = torch.tensor(0.0, device=device) - loss_dict = {} - - if args.distributed: - g_module = generator.module - else: - g_module = generator - - accum = 0.5 ** (32 / (10 * 1000)) - - requires_grad(g_module.encoder, True) - - for idx in pbar: - i = idx + args.start_iter - - if i > args.iter: - print("Done!") - break - - # during pretraining, the last 11 layers of DualStyleGAN (for color transfer) is not used. - # so args.fix_color is not used. the last 11 elements in weight are not used. - if args.fix_degree: - d_s = args.style_degree - else: - d_s = 0 if i <= args.iter / 4.0 else np.random.rand(1)[0] - weight = [d_s] * 18 - - # sample pre-saved w''=E_s(s) - if args.fix_style: - style = styles[args.style_id:args.style_id+1].repeat(args.batch,1,1) - else: - style = styles[torch.randint(0, styles.size(0), (args.batch,))] - - with torch.no_grad(): - # during pretraining, no geometric transformations are applied. - noise_sample = torch.randn(args.batch, 512).cuda() - ws_ = g_ema.stylegan().style(noise_sample).unsqueeze(1).repeat(1,18,1) # random w - ws_[:, 3:7] += directions[torch.randint(0, directions.shape[0], (args.batch,)), 3:7] # w'=w+n - img_gen, _ = g_ema.stylegan()([ws_], input_is_latent=True, truncation=0.5, truncation_latent=0) - img_gen = torch.clamp(img_gen, -1, 1).detach() # x'' - img_gen512 = down(img_gen.detach()) - img_gen256 = down(img_gen512.detach()) # image part of x''_down - mask512 = parsingpredictor(2*torch.clamp(img_gen512, -1, 1))[0] - real_input = torch.cat((img_gen256, down(mask512)/16.0), dim=1) # x''_down - # f_G1^(8)(w', w'', d_s) - real_feat, real_skip = g_ema.generator([ws_], style, input_is_latent=True, return_feat=True, - truncation=0.5, truncation_latent=0, use_res=True, interp_weights=weight) - - real_input = real_input.detach() - real_feat = real_feat.detach() - real_skip = real_skip.detach() - - # f_E^(last)(x''_down, w'', d_s) - fake_feat, fake_skip = generator(real_input, style, d_s, return_feat=True) - - # L_E in Eq.(8) - recon_loss = F.mse_loss(fake_feat, real_feat) + F.mse_loss(fake_skip, real_skip) - - loss_dict["emse"] = recon_loss - - generator.zero_grad() - recon_loss.backward() - g_optim.step() - - accumulate(g_ema.encoder, g_module.encoder, accum) - - loss_reduced = reduce_loss_dict(loss_dict) - - emse_loss_val = loss_reduced["emse"].mean().item() - - if get_rank() == 0: - pbar.set_description( - ( - f"iter: {i:d}; emse: {emse_loss_val:.3f}" - ) - ) - - if ((i+1) >= args.save_begin and (i+1) % args.save_every == 0) or (i+1) == args.iter: - if (i+1) == args.iter: - savename = f"checkpoint/%s/pretrain.pt"%(args.name) - else: - savename = f"checkpoint/%s/pretrain-%05d.pt"%(args.name, i+1) - torch.save( - { - #"g": g_module.encoder.state_dict(), - "g_ema": g_ema.encoder.state_dict(), - }, - savename, - ) - - -# generate paired data and train vtoonify, see Sec. 4.2.2 for the detail -def train(args, generator, discriminator, g_optim, d_optim, g_ema, percept, parsingpredictor, down, pspencoder, directions, styles, device): - pbar = range(args.iter) - - if get_rank() == 0: - pbar = tqdm(pbar, initial=args.start_iter, smoothing=0.01, ncols=130, dynamic_ncols=False) - - d_loss = torch.tensor(0.0, device=device) - g_loss = torch.tensor(0.0, device=device) - grec_loss = torch.tensor(0.0, device=device) - gfeat_loss = torch.tensor(0.0, device=device) - temporal_loss = torch.tensor(0.0, device=device) - gmask_loss = torch.tensor(0.0, device=device) - loss_dict = {} - - surffix = '_s' - if args.fix_style: - surffix += '%03d'%(args.style_id) - surffix += '_d' - if args.fix_degree: - surffix += '%1.1f'%(args.style_degree) - if not args.fix_color: - surffix += '_c' - - if args.distributed: - g_module = generator.module - d_module = discriminator.module - - else: - g_module = generator - d_module = discriminator - - accum = 0.5 ** (32 / (10 * 1000)) - - for idx in pbar: - i = idx + args.start_iter - - if i > args.iter: - print("Done!") - break - - # sample style degree - if args.fix_degree or idx == 0 or i == 0: - d_s = args.style_degree - else: - d_s = np.random.randint(0,6) / 5.0 - if args.fix_color: - weight = [d_s] * 7 + [0] * 11 - else: - weight = [d_s] * 7 + [1] * 11 - # style degree condition for discriminator - degree_label = torch.zeros(args.batch, 1).to(device) + d_s - - # style index condition for discriminator - style_ind = torch.randint(0, styles.size(0), (args.batch,)) - if args.fix_style or idx == 0 or i == 0: - style_ind = style_ind * 0 + args.style_id - # sample pre-saved E_s(s) - style = styles[style_ind] - - with torch.no_grad(): - noise_sample = torch.randn(args.batch, 512).cuda() - wc = g_ema.stylegan().style(noise_sample).unsqueeze(1).repeat(1,18,1) # random w - wc[:, 3:7] += directions[torch.randint(0, directions.shape[0], (args.batch,)), 3:7] # w'=w+n - wc = wc.detach() - xc, _ = g_ema.stylegan()([wc], input_is_latent=True, truncation=0.5, truncation_latent=0) - xc = torch.clamp(xc, -1, 1).detach() # x'' - if not args.fix_color and args.fix_style: # only transfer this fixed style's color - xl = style.clone() - else: - xl = pspencoder(F.adaptive_avg_pool2d(xc, 256)) - xl = g_ema.zplus2wplus(xl) # E_s(x''_down) - xl = torch.cat((style[:,0:7], xl[:,7:18]), dim=1).detach() # w'' = concatenate E_s(s) and E_s(x''_down) - xs, _ = g_ema.generator([wc], xl, input_is_latent=True, - truncation=0.5, truncation_latent=0, use_res=True, interp_weights=weight) - xs = torch.clamp(xs, -1, 1).detach() # y'=G1(w', w'', d_s, d_c) - # apply color jitter to w'. we fuse w' of the current iteration with w' of the last iteration - if idx > 0 and i >= (args.iter/2.0) and (not args.fix_color and not args.fix_style): - wcfuse = wc.clone() - wcfuse[:,7:] = wc_[:,7:] * (i/(args.iter/2.0)-1) + wcfuse[:,7:] * (2-i/(args.iter/2.0)) - xc, _ = g_ema.stylegan()([wcfuse], input_is_latent=True, truncation=0.5, truncation_latent=0) - xc = torch.clamp(xc, -1, 1).detach() # x' - wc_ = wc.clone() # wc_ is the w' in the last iteration - # during training, random geometric transformations are applied. - imgs, _ = random_apply_affine(torch.cat((xc.detach(),xs), dim=1), 0.2, None) - real_input1024 = imgs[:,0:3].detach() # image part of x - real_input512 = down(real_input1024).detach() - real_input256 = down(real_input512).detach() - mask512 = parsingpredictor(2*real_input512)[0] - mask256 = down(mask512).detach() - mask = F.adaptive_avg_pool2d(mask512, 1024).detach() # parsing part of x - real_output = imgs[:,3:].detach() # y - real_input = torch.cat((real_input256, mask256/16.0), dim=1) # x_down - # for log, sample a fixed input-output pair (x_down, y, w'', d_s) - if idx == 0 or i == 0: - samplein = real_input.clone().detach() - sampleout = real_output.clone().detach() - samplexl = xl.clone().detach() - sampleds = d_s - - ###### This part is for training discriminator - - requires_grad(g_module.encoder, False) - requires_grad(g_module.fusion_out, False) - requires_grad(g_module.fusion_skip, False) - requires_grad(discriminator, True) - - fake_output = generator(real_input, xl, d_s) - fake_pred = discriminator(F.adaptive_avg_pool2d(fake_output, 256), degree_label, style_ind) - real_pred = discriminator(F.adaptive_avg_pool2d(real_output, 256), degree_label, style_ind) - - # L_adv in Eq.(3) - d_loss = d_logistic_loss(real_pred, fake_pred) * args.adv_loss - loss_dict["d"] = d_loss - - discriminator.zero_grad() - d_loss.backward() - d_optim.step() - - ###### This part is for training generator (encoder and fusion modules) - - requires_grad(g_module.encoder, True) - requires_grad(g_module.fusion_out, True) - requires_grad(g_module.fusion_skip, True) - requires_grad(discriminator, False) - - fake_output, m_Es = generator(real_input, xl, d_s, return_mask=True) - fake_pred = discriminator(F.adaptive_avg_pool2d(fake_output, 256), degree_label, style_ind) - - # L_adv in Eq.(3) - g_loss = g_nonsaturating_loss(fake_pred) * args.adv_loss - # L_rec in Eq.(2) - grec_loss = F.mse_loss(fake_output, real_output) * args.grec_loss - gfeat_loss = percept(F.adaptive_avg_pool2d(fake_output, 512), # 1024 will out of memory - F.adaptive_avg_pool2d(real_output, 512)).sum() * args.perc_loss # 256 will get blurry output - - # L_msk in Eq.(9) - gmask_loss = torch.tensor(0.0, device=device) - if not args.fix_degree or args.msk_loss > 0: - for jj, m_E in enumerate(m_Es): - gd_s = (1 - d_s) ** 2 * 0.9 + 0.1 - gmask_loss += F.relu(torch.mean(m_E)-gd_s) * args.msk_loss - - loss_dict["g"] = g_loss - loss_dict["gr"] = grec_loss - loss_dict["gf"] = gfeat_loss - loss_dict["msk"] = gmask_loss - - w = random.randint(0,1024-896) - h = random.randint(0,1024-896) - crop_input = torch.cat((real_input1024[:,:,w:w+896,h:h+896], mask[:,:,w:w+896,h:h+896]/16.0), dim=1).detach() - crop_input = down(down(crop_input)) - crop_fake_output = fake_output[:,:,w:w+896,h:h+896] - fake_crop_output = generator(crop_input, xl, d_s) - # L_tmp in Eq.(4), gradually increase the weight of L_tmp - temporal_loss = ((fake_crop_output-crop_fake_output)**2).mean() * max(idx/(args.iter/2.0)-1, 0) * args.tmp_loss - loss_dict["tp"] = temporal_loss - - generator.zero_grad() - (g_loss + grec_loss + gfeat_loss + temporal_loss + gmask_loss).backward() - g_optim.step() - - accumulate(g_ema.encoder, g_module.encoder, accum) - accumulate(g_ema.fusion_out, g_module.fusion_out, accum) - accumulate(g_ema.fusion_skip, g_module.fusion_skip, accum) - - loss_reduced = reduce_loss_dict(loss_dict) - - d_loss_val = loss_reduced["d"].mean().item() - g_loss_val = loss_reduced["g"].mean().item() - gr_loss_val = loss_reduced["gr"].mean().item() - gf_loss_val = loss_reduced["gf"].mean().item() - tmp_loss_val = loss_reduced["tp"].mean().item() - msk_loss_val = loss_reduced["msk"].mean().item() - - if get_rank() == 0: - pbar.set_description( - ( - f"iter: {i:d}; advd: {d_loss_val:.3f}; advg: {g_loss_val:.3f}; mse: {gr_loss_val:.3f}; " - f"perc: {gf_loss_val:.3f}; tmp: {tmp_loss_val:.3f}; msk: {msk_loss_val:.3f}" - ) - ) - - if i == 0 or (i+1) % args.log_every == 0 or (i+1) == args.iter: - with torch.no_grad(): - g_ema.eval() - sample1 = g_ema(samplein, samplexl, sampleds) - if args.fix_degree: - sample = F.interpolate(torch.cat((sampleout, sample1), dim=0), 256) - else: - sample2 = g_ema(samplein, samplexl, d_s) - sample = F.interpolate(torch.cat((sampleout, sample1, sample2), dim=0), 256) - utils.save_image( - sample, - f"log/%s/%05d.jpg"%(args.name, (i+1)), - nrow=int(args.batch), - normalize=True, - range=(-1, 1), - ) - - if ((i+1) >= args.save_begin and (i+1) % args.save_every == 0) or (i+1) == args.iter: - if (i+1) == args.iter: - savename = f"checkpoint/%s/vtoonify%s.pt"%(args.name, surffix) - else: - savename = f"checkpoint/%s/vtoonify%s_%05d.pt"%(args.name, surffix, i+1) - torch.save( - { - #"g": g_module.state_dict(), - #"d": d_module.state_dict(), - "g_ema": g_ema.state_dict(), - }, - savename, - ) - - - -if __name__ == "__main__": - - device = "cuda" - parser = TrainOptions() - args = parser.parse() - if args.local_rank == 0: - print('*'*98) - if not os.path.exists("log/%s/"%(args.name)): - os.makedirs("log/%s/"%(args.name)) - if not os.path.exists("checkpoint/%s/"%(args.name)): - os.makedirs("checkpoint/%s/"%(args.name)) - - n_gpu = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 - args.distributed = n_gpu > 1 - - if args.distributed: - torch.cuda.set_device(args.local_rank) - torch.distributed.init_process_group(backend="nccl", init_method="env://") - synchronize() - - generator = VToonify(backbone = 'dualstylegan').to(device) - generator.apply(weights_init) - g_ema = VToonify(backbone = 'dualstylegan').to(device) - g_ema.eval() - - ckpt = torch.load(args.stylegan_path, map_location=lambda storage, loc: storage) - generator.generator.load_state_dict(ckpt["g_ema"], strict=False) - # load ModRes blocks of DualStyleGAN into the modified ModRes blocks (with dilation) - generator.res.load_state_dict(generator.generator.res.state_dict(), strict=False) - g_ema.generator.load_state_dict(ckpt["g_ema"], strict=False) - g_ema.res.load_state_dict(g_ema.generator.res.state_dict(), strict=False) - requires_grad(generator.generator, False) - requires_grad(generator.res, False) - requires_grad(g_ema.generator, False) - requires_grad(g_ema.res, False) - - if not args.pretrain: - generator.encoder.load_state_dict(torch.load(args.encoder_path, map_location=lambda storage, loc: storage)["g_ema"]) - # we initialize the fusion modules to map f_G \otimes f_E to f_G. - for k in generator.fusion_out: - k.conv.weight.data *= 0.01 - k.conv.weight[:,0:k.conv.weight.shape[0],1,1].data += torch.eye(k.conv.weight.shape[0]).cuda() - for k in generator.fusion_skip: - k.weight.data *= 0.01 - k.weight[:,0:k.weight.shape[0],1,1].data += torch.eye(k.weight.shape[0]).cuda() - - accumulate(g_ema.encoder, generator.encoder, 0) - accumulate(g_ema.fusion_out, generator.fusion_out, 0) - accumulate(g_ema.fusion_skip, generator.fusion_skip, 0) - - g_parameters = list(generator.encoder.parameters()) - if not args.pretrain: - g_parameters = g_parameters + list(generator.fusion_out.parameters()) + list(generator.fusion_skip.parameters()) - - g_optim = optim.Adam( - g_parameters, - lr=args.lr, - betas=(0.9, 0.99), - ) - - if args.distributed: - generator = nn.parallel.DistributedDataParallel( - generator, - device_ids=[args.local_rank], - output_device=args.local_rank, - broadcast_buffers=False, - find_unused_parameters=True, - ) - - parsingpredictor = BiSeNet(n_classes=19) - parsingpredictor.load_state_dict(torch.load(args.faceparsing_path, map_location=lambda storage, loc: storage)) - parsingpredictor.to(device).eval() - requires_grad(parsingpredictor, False) - - # we apply gaussian blur to the images to avoid flickers caused during downsampling - down = Downsample(kernel=[1, 3, 3, 1], factor=2).to(device) - requires_grad(down, False) - - directions = torch.tensor(np.load(args.direction_path)).to(device) - - # load style codes of DualStyleGAN - exstyles = np.load(args.exstyle_path, allow_pickle='TRUE').item() - if args.local_rank == 0 and not os.path.exists('checkpoint/%s/exstyle_code.npy'%(args.name)): - np.save('checkpoint/%s/exstyle_code.npy'%(args.name), exstyles, allow_pickle=True) - styles = [] - with torch.no_grad(): - for stylename in exstyles.keys(): - exstyle = torch.tensor(exstyles[stylename]).to(device) - exstyle = g_ema.zplus2wplus(exstyle) - styles += [exstyle] - styles = torch.cat(styles, dim=0) - - if not args.pretrain: - discriminator = ConditionalDiscriminator(256, use_condition=True, style_num = styles.size(0)).to(device) - - d_optim = optim.Adam( - discriminator.parameters(), - lr=args.lr, - betas=(0.9, 0.99), - ) - - if args.distributed: - discriminator = nn.parallel.DistributedDataParallel( - discriminator, - device_ids=[args.local_rank], - output_device=args.local_rank, - broadcast_buffers=False, - find_unused_parameters=True, - ) - - percept = lpips.PerceptualLoss(model="net-lin", net="vgg", use_gpu=device.startswith("cuda"), gpu_ids=[args.local_rank]) - requires_grad(percept.model.net, False) - - pspencoder = load_psp_standalone(args.style_encoder_path, device) - - if args.local_rank == 0: - print('Load models and data successfully loaded!') - - if args.pretrain: - pretrain(args, generator, g_optim, g_ema, parsingpredictor, down, directions, styles, device) - else: - train(args, generator, discriminator, g_optim, d_optim, g_ema, percept, parsingpredictor, down, pspencoder, directions, styles, device) diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/srfi/srfi-41.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/srfi/srfi-41.go deleted file mode 100644 index 569a69df3b61de094570a40904dd1de8aedf160e..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/srfi/srfi-41.go and /dev/null differ diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/runner/optimizer/builder.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/runner/optimizer/builder.py deleted file mode 100644 index f9234eed8f1f186d9d8dfda34562157ee39bdb3a..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/runner/optimizer/builder.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy -import inspect - -import torch - -from ...utils import Registry, build_from_cfg - -OPTIMIZERS = Registry('optimizer') -OPTIMIZER_BUILDERS = Registry('optimizer builder') - - -def register_torch_optimizers(): - torch_optimizers = [] - for module_name in dir(torch.optim): - if module_name.startswith('__'): - continue - _optim = getattr(torch.optim, module_name) - if inspect.isclass(_optim) and issubclass(_optim, - torch.optim.Optimizer): - OPTIMIZERS.register_module()(_optim) - torch_optimizers.append(module_name) - return torch_optimizers - - -TORCH_OPTIMIZERS = register_torch_optimizers() - - -def build_optimizer_constructor(cfg): - return build_from_cfg(cfg, OPTIMIZER_BUILDERS) - - -def build_optimizer(model, cfg): - optimizer_cfg = copy.deepcopy(cfg) - constructor_type = optimizer_cfg.pop('constructor', - 'DefaultOptimizerConstructor') - paramwise_cfg = optimizer_cfg.pop('paramwise_cfg', None) - optim_constructor = build_optimizer_constructor( - dict( - type=constructor_type, - optimizer_cfg=optimizer_cfg, - paramwise_cfg=paramwise_cfg)) - optimizer = optim_constructor(model) - return optimizer diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/decode_heads/__init__.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/decode_heads/__init__.py deleted file mode 100644 index ac66d3cfe0ea04af45c0f3594bf135841c3812e3..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/decode_heads/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -from .ann_head import ANNHead -from .apc_head import APCHead -from .aspp_head import ASPPHead -from .cc_head import CCHead -from .da_head import DAHead -from .dm_head import DMHead -from .dnl_head import DNLHead -from .ema_head import EMAHead -from .enc_head import EncHead -from .fcn_head import FCNHead -from .fpn_head import FPNHead -from .gc_head import GCHead -from .lraspp_head import LRASPPHead -from .nl_head import NLHead -from .ocr_head import OCRHead -# from .point_head import PointHead -from .psa_head import PSAHead -from .psp_head import PSPHead -from .sep_aspp_head import DepthwiseSeparableASPPHead -from .sep_fcn_head import DepthwiseSeparableFCNHead -from .uper_head import UPerHead - -__all__ = [ - 'FCNHead', 'PSPHead', 'ASPPHead', 'PSAHead', 'NLHead', 'GCHead', 'CCHead', - 'UPerHead', 'DepthwiseSeparableASPPHead', 'ANNHead', 'DAHead', 'OCRHead', - 'EncHead', 'DepthwiseSeparableFCNHead', 'FPNHead', 'EMAHead', 'DNLHead', - 'APCHead', 'DMHead', 'LRASPPHead' -] diff --git a/spaces/Pravincoder/Loan_Approval_Predictor/README.md b/spaces/Pravincoder/Loan_Approval_Predictor/README.md deleted file mode 100644 index da4387628c76126f5bf0dee362ccbf8c79ae09ce..0000000000000000000000000000000000000000 --- a/spaces/Pravincoder/Loan_Approval_Predictor/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Loan Approval Predictor -emoji: 🐨 -colorFrom: pink -colorTo: yellow -sdk: streamlit -sdk_version: 1.21.0 -app_file: app.py -pinned: false -license: creativeml-openrail-m ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/RMXK/RVC_HFF/infer_uvr5.py b/spaces/RMXK/RVC_HFF/infer_uvr5.py deleted file mode 100644 index 8c8c05429a1d65dd8b198f16a8ea8c6e68991c07..0000000000000000000000000000000000000000 --- a/spaces/RMXK/RVC_HFF/infer_uvr5.py +++ /dev/null @@ -1,363 +0,0 @@ -import os, sys, torch, warnings, pdb - -now_dir = os.getcwd() -sys.path.append(now_dir) -from json import load as ll - -warnings.filterwarnings("ignore") -import librosa -import importlib -import numpy as np -import hashlib, math -from tqdm import tqdm -from lib.uvr5_pack.lib_v5 import spec_utils -from lib.uvr5_pack.utils import _get_name_params, inference -from lib.uvr5_pack.lib_v5.model_param_init import ModelParameters -import soundfile as sf -from lib.uvr5_pack.lib_v5.nets_new import CascadedNet -from lib.uvr5_pack.lib_v5 import nets_61968KB as nets - - -class _audio_pre_: - def __init__(self, agg, model_path, device, is_half): - self.model_path = model_path - self.device = device - self.data = { - # Processing Options - "postprocess": False, - "tta": False, - # Constants - "window_size": 512, - "agg": agg, - "high_end_process": "mirroring", - } - mp = ModelParameters("lib/uvr5_pack/lib_v5/modelparams/4band_v2.json") - model = nets.CascadedASPPNet(mp.param["bins"] * 2) - cpk = torch.load(model_path, map_location="cpu") - model.load_state_dict(cpk) - model.eval() - if is_half: - model = model.half().to(device) - else: - model = model.to(device) - - self.mp = mp - self.model = model - - def _path_audio_(self, music_file, ins_root=None, vocal_root=None, format="flac"): - if ins_root is None and vocal_root is None: - return "No save root." - name = os.path.basename(music_file) - if ins_root is not None: - os.makedirs(ins_root, exist_ok=True) - if vocal_root is not None: - os.makedirs(vocal_root, exist_ok=True) - X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {} - bands_n = len(self.mp.param["band"]) - # print(bands_n) - for d in range(bands_n, 0, -1): - bp = self.mp.param["band"][d] - if d == bands_n: # high-end band - ( - X_wave[d], - _, - ) = librosa.core.load( - music_file, - bp["sr"], - False, - dtype=np.float32, - res_type=bp["res_type"], - ) - if X_wave[d].ndim == 1: - X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]]) - else: # lower bands - X_wave[d] = librosa.core.resample( - X_wave[d + 1], - self.mp.param["band"][d + 1]["sr"], - bp["sr"], - res_type=bp["res_type"], - ) - # Stft of wave source - X_spec_s[d] = spec_utils.wave_to_spectrogram_mt( - X_wave[d], - bp["hl"], - bp["n_fft"], - self.mp.param["mid_side"], - self.mp.param["mid_side_b2"], - self.mp.param["reverse"], - ) - # pdb.set_trace() - if d == bands_n and self.data["high_end_process"] != "none": - input_high_end_h = (bp["n_fft"] // 2 - bp["crop_stop"]) + ( - self.mp.param["pre_filter_stop"] - self.mp.param["pre_filter_start"] - ) - input_high_end = X_spec_s[d][ - :, bp["n_fft"] // 2 - input_high_end_h : bp["n_fft"] // 2, : - ] - - X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp) - aggresive_set = float(self.data["agg"] / 100) - aggressiveness = { - "value": aggresive_set, - "split_bin": self.mp.param["band"][1]["crop_stop"], - } - with torch.no_grad(): - pred, X_mag, X_phase = inference( - X_spec_m, self.device, self.model, aggressiveness, self.data - ) - # Postprocess - if self.data["postprocess"]: - pred_inv = np.clip(X_mag - pred, 0, np.inf) - pred = spec_utils.mask_silence(pred, pred_inv) - y_spec_m = pred * X_phase - v_spec_m = X_spec_m - y_spec_m - - if ins_root is not None: - if self.data["high_end_process"].startswith("mirroring"): - input_high_end_ = spec_utils.mirroring( - self.data["high_end_process"], y_spec_m, input_high_end, self.mp - ) - wav_instrument = spec_utils.cmb_spectrogram_to_wave( - y_spec_m, self.mp, input_high_end_h, input_high_end_ - ) - else: - wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp) - print("%s instruments done" % name) - if format in ["wav", "flac"]: - sf.write( - os.path.join( - ins_root, - "instrument_{}_{}.{}".format(name, self.data["agg"], format), - ), - (np.array(wav_instrument) * 32768).astype("int16"), - self.mp.param["sr"], - ) # - else: - path = os.path.join( - ins_root, "instrument_{}_{}.wav".format(name, self.data["agg"]) - ) - sf.write( - path, - (np.array(wav_instrument) * 32768).astype("int16"), - self.mp.param["sr"], - ) - if os.path.exists(path): - os.system( - "ffmpeg -i %s -vn %s -q:a 2 -y" - % (path, path[:-4] + ".%s" % format) - ) - if vocal_root is not None: - if self.data["high_end_process"].startswith("mirroring"): - input_high_end_ = spec_utils.mirroring( - self.data["high_end_process"], v_spec_m, input_high_end, self.mp - ) - wav_vocals = spec_utils.cmb_spectrogram_to_wave( - v_spec_m, self.mp, input_high_end_h, input_high_end_ - ) - else: - wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp) - print("%s vocals done" % name) - if format in ["wav", "flac"]: - sf.write( - os.path.join( - vocal_root, - "vocal_{}_{}.{}".format(name, self.data["agg"], format), - ), - (np.array(wav_vocals) * 32768).astype("int16"), - self.mp.param["sr"], - ) - else: - path = os.path.join( - vocal_root, "vocal_{}_{}.wav".format(name, self.data["agg"]) - ) - sf.write( - path, - (np.array(wav_vocals) * 32768).astype("int16"), - self.mp.param["sr"], - ) - if os.path.exists(path): - os.system( - "ffmpeg -i %s -vn %s -q:a 2 -y" - % (path, path[:-4] + ".%s" % format) - ) - - -class _audio_pre_new: - def __init__(self, agg, model_path, device, is_half): - self.model_path = model_path - self.device = device - self.data = { - # Processing Options - "postprocess": False, - "tta": False, - # Constants - "window_size": 512, - "agg": agg, - "high_end_process": "mirroring", - } - mp = ModelParameters("lib/uvr5_pack/lib_v5/modelparams/4band_v3.json") - nout = 64 if "DeReverb" in model_path else 48 - model = CascadedNet(mp.param["bins"] * 2, nout) - cpk = torch.load(model_path, map_location="cpu") - model.load_state_dict(cpk) - model.eval() - if is_half: - model = model.half().to(device) - else: - model = model.to(device) - - self.mp = mp - self.model = model - - def _path_audio_( - self, music_file, vocal_root=None, ins_root=None, format="flac" - ): # 3个VR模型vocal和ins是反的 - if ins_root is None and vocal_root is None: - return "No save root." - name = os.path.basename(music_file) - if ins_root is not None: - os.makedirs(ins_root, exist_ok=True) - if vocal_root is not None: - os.makedirs(vocal_root, exist_ok=True) - X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {} - bands_n = len(self.mp.param["band"]) - # print(bands_n) - for d in range(bands_n, 0, -1): - bp = self.mp.param["band"][d] - if d == bands_n: # high-end band - ( - X_wave[d], - _, - ) = librosa.core.load( - music_file, - bp["sr"], - False, - dtype=np.float32, - res_type=bp["res_type"], - ) - if X_wave[d].ndim == 1: - X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]]) - else: # lower bands - X_wave[d] = librosa.core.resample( - X_wave[d + 1], - self.mp.param["band"][d + 1]["sr"], - bp["sr"], - res_type=bp["res_type"], - ) - # Stft of wave source - X_spec_s[d] = spec_utils.wave_to_spectrogram_mt( - X_wave[d], - bp["hl"], - bp["n_fft"], - self.mp.param["mid_side"], - self.mp.param["mid_side_b2"], - self.mp.param["reverse"], - ) - # pdb.set_trace() - if d == bands_n and self.data["high_end_process"] != "none": - input_high_end_h = (bp["n_fft"] // 2 - bp["crop_stop"]) + ( - self.mp.param["pre_filter_stop"] - self.mp.param["pre_filter_start"] - ) - input_high_end = X_spec_s[d][ - :, bp["n_fft"] // 2 - input_high_end_h : bp["n_fft"] // 2, : - ] - - X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp) - aggresive_set = float(self.data["agg"] / 100) - aggressiveness = { - "value": aggresive_set, - "split_bin": self.mp.param["band"][1]["crop_stop"], - } - with torch.no_grad(): - pred, X_mag, X_phase = inference( - X_spec_m, self.device, self.model, aggressiveness, self.data - ) - # Postprocess - if self.data["postprocess"]: - pred_inv = np.clip(X_mag - pred, 0, np.inf) - pred = spec_utils.mask_silence(pred, pred_inv) - y_spec_m = pred * X_phase - v_spec_m = X_spec_m - y_spec_m - - if ins_root is not None: - if self.data["high_end_process"].startswith("mirroring"): - input_high_end_ = spec_utils.mirroring( - self.data["high_end_process"], y_spec_m, input_high_end, self.mp - ) - wav_instrument = spec_utils.cmb_spectrogram_to_wave( - y_spec_m, self.mp, input_high_end_h, input_high_end_ - ) - else: - wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp) - print("%s instruments done" % name) - if format in ["wav", "flac"]: - sf.write( - os.path.join( - ins_root, - "instrument_{}_{}.{}".format(name, self.data["agg"], format), - ), - (np.array(wav_instrument) * 32768).astype("int16"), - self.mp.param["sr"], - ) # - else: - path = os.path.join( - ins_root, "instrument_{}_{}.wav".format(name, self.data["agg"]) - ) - sf.write( - path, - (np.array(wav_instrument) * 32768).astype("int16"), - self.mp.param["sr"], - ) - if os.path.exists(path): - os.system( - "ffmpeg -i %s -vn %s -q:a 2 -y" - % (path, path[:-4] + ".%s" % format) - ) - if vocal_root is not None: - if self.data["high_end_process"].startswith("mirroring"): - input_high_end_ = spec_utils.mirroring( - self.data["high_end_process"], v_spec_m, input_high_end, self.mp - ) - wav_vocals = spec_utils.cmb_spectrogram_to_wave( - v_spec_m, self.mp, input_high_end_h, input_high_end_ - ) - else: - wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp) - print("%s vocals done" % name) - if format in ["wav", "flac"]: - sf.write( - os.path.join( - vocal_root, - "vocal_{}_{}.{}".format(name, self.data["agg"], format), - ), - (np.array(wav_vocals) * 32768).astype("int16"), - self.mp.param["sr"], - ) - else: - path = os.path.join( - vocal_root, "vocal_{}_{}.wav".format(name, self.data["agg"]) - ) - sf.write( - path, - (np.array(wav_vocals) * 32768).astype("int16"), - self.mp.param["sr"], - ) - if os.path.exists(path): - os.system( - "ffmpeg -i %s -vn %s -q:a 2 -y" - % (path, path[:-4] + ".%s" % format) - ) - - -if __name__ == "__main__": - device = "cuda" - is_half = True - # model_path = "uvr5_weights/2_HP-UVR.pth" - # model_path = "uvr5_weights/VR-DeEchoDeReverb.pth" - # model_path = "uvr5_weights/VR-DeEchoNormal.pth" - model_path = "uvr5_weights/DeEchoNormal.pth" - # pre_fun = _audio_pre_(model_path=model_path, device=device, is_half=True,agg=10) - pre_fun = _audio_pre_new(model_path=model_path, device=device, is_half=True, agg=10) - audio_path = "雪雪伴奏对消HP5.wav" - save_path = "opt" - pre_fun._path_audio_(audio_path, save_path, save_path) diff --git a/spaces/RVVY/test01/Dockerfile b/spaces/RVVY/test01/Dockerfile deleted file mode 100644 index eef259fa372a804549fb0af0913718a13344da34..0000000000000000000000000000000000000000 --- a/spaces/RVVY/test01/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM node:18-bullseye-slim -RUN apt-get update && \ - apt-get install -y git -RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app -WORKDIR /app -RUN npm install -COPY Dockerfile greeting.md* .env* ./ -RUN npm run build -EXPOSE 7860 -ENV NODE_ENV=production -CMD [ "npm", "start" ] diff --git a/spaces/RamAnanth1/T2I-Adapter/gradio_sketch.py b/spaces/RamAnanth1/T2I-Adapter/gradio_sketch.py deleted file mode 100644 index 64cf265c680259c4cf16496e84bdd3c20f085f2a..0000000000000000000000000000000000000000 --- a/spaces/RamAnanth1/T2I-Adapter/gradio_sketch.py +++ /dev/null @@ -1,28 +0,0 @@ - -import gradio as gr - -def create_demo(process): - block = gr.Blocks().queue() - with block: - with gr.Row(): - with gr.Column(): - input_img = gr.Image(source='upload', type="numpy") - prompt = gr.Textbox(label="Prompt") - neg_prompt = gr.Textbox(label="Negative Prompt", - value='ugly, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, extra limbs, disfigured, deformed, body out of frame, bad anatomy, watermark, signature, cut off, low contrast, underexposed, overexposed, bad art, beginner, amateur, distorted face') - with gr.Row(): - type_in = gr.inputs.Radio(['Sketch', 'Image'], type="value", default='Image', label='Input Types\n (You can input an image or a sketch)') - color_back = gr.inputs.Radio(['White', 'Black'], type="value", default='Black', label='Color of the sketch background\n (Only work for sketch input)') - run_button = gr.Button(label="Run") - with gr.Accordion("Advanced options", open=False): - con_strength = gr.Slider(label="Controling Strength (The guidance strength of the sketch to the result)", minimum=0, maximum=1, value=0.4, step=0.1) - scale = gr.Slider(label="Guidance Scale (Classifier free guidance)", minimum=0.1, maximum=30.0, value=7.5, step=0.1) - fix_sample = gr.inputs.Radio(['True', 'False'], type="value", default='False', label='Fix Sampling\n (Fix the random seed)') - base_model = gr.inputs.Radio(['sd-v1-4.ckpt', 'anything-v4.0-pruned.ckpt'], type="value", default='sd-v1-4.ckpt', label='The base model you want to use') - with gr.Column(): - result = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto') - ips = [input_img, type_in, color_back, prompt, neg_prompt, fix_sample, scale, con_strength, base_model] - run_button.click(fn=process, inputs=ips, outputs=[result]) - - return block - \ No newline at end of file diff --git a/spaces/Realcat/image-matching-webui/third_party/DeDoDe/DeDoDe/matchers/__init__.py b/spaces/Realcat/image-matching-webui/third_party/DeDoDe/DeDoDe/matchers/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Ricecake123/RVC-demo/lib/uvr5_pack/lib_v5/nets_123821KB.py b/spaces/Ricecake123/RVC-demo/lib/uvr5_pack/lib_v5/nets_123821KB.py deleted file mode 100644 index becbfae85683a13bbb19d3ea6c840da24e61e01e..0000000000000000000000000000000000000000 --- a/spaces/Ricecake123/RVC-demo/lib/uvr5_pack/lib_v5/nets_123821KB.py +++ /dev/null @@ -1,122 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F - -from . import layers_123821KB as layers - - -class BaseASPPNet(nn.Module): - def __init__(self, nin, ch, dilations=(4, 8, 16)): - super(BaseASPPNet, self).__init__() - self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) - self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) - self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) - self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) - - self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) - - self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) - self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) - self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) - self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) - - def __call__(self, x): - h, e1 = self.enc1(x) - h, e2 = self.enc2(h) - h, e3 = self.enc3(h) - h, e4 = self.enc4(h) - - h = self.aspp(h) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = self.dec1(h, e1) - - return h - - -class CascadedASPPNet(nn.Module): - def __init__(self, n_fft): - super(CascadedASPPNet, self).__init__() - self.stg1_low_band_net = BaseASPPNet(2, 32) - self.stg1_high_band_net = BaseASPPNet(2, 32) - - self.stg2_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0) - self.stg2_full_band_net = BaseASPPNet(16, 32) - - self.stg3_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0) - self.stg3_full_band_net = BaseASPPNet(32, 64) - - self.out = nn.Conv2d(64, 2, 1, bias=False) - self.aux1_out = nn.Conv2d(32, 2, 1, bias=False) - self.aux2_out = nn.Conv2d(32, 2, 1, bias=False) - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - - self.offset = 128 - - def forward(self, x, aggressiveness=None): - mix = x.detach() - x = x.clone() - - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - aux1 = torch.cat( - [ - self.stg1_low_band_net(x[:, :, :bandw]), - self.stg1_high_band_net(x[:, :, bandw:]), - ], - dim=2, - ) - - h = torch.cat([x, aux1], dim=1) - aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) - - h = torch.cat([x, aux1, aux2], dim=1) - h = self.stg3_full_band_net(self.stg3_bridge(h)) - - mask = torch.sigmoid(self.out(h)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux1 = torch.sigmoid(self.aux1_out(aux1)) - aux1 = F.pad( - input=aux1, - pad=(0, 0, 0, self.output_bin - aux1.size()[2]), - mode="replicate", - ) - aux2 = torch.sigmoid(self.aux2_out(aux2)) - aux2 = F.pad( - input=aux2, - pad=(0, 0, 0, self.output_bin - aux2.size()[2]), - mode="replicate", - ) - return mask * mix, aux1 * mix, aux2 * mix - else: - if aggressiveness: - mask[:, :, : aggressiveness["split_bin"]] = torch.pow( - mask[:, :, : aggressiveness["split_bin"]], - 1 + aggressiveness["value"] / 3, - ) - mask[:, :, aggressiveness["split_bin"] :] = torch.pow( - mask[:, :, aggressiveness["split_bin"] :], - 1 + aggressiveness["value"], - ) - - return mask * mix - - def predict(self, x_mag, aggressiveness=None): - h = self.forward(x_mag, aggressiveness) - - if self.offset > 0: - h = h[:, :, :, self.offset : -self.offset] - assert h.size()[3] > 0 - - return h diff --git a/spaces/Rishabh055/Movie_recommendation_System/app.py b/spaces/Rishabh055/Movie_recommendation_System/app.py deleted file mode 100644 index 81d41c8f9e3200e34e7d179da1a185517488371e..0000000000000000000000000000000000000000 --- a/spaces/Rishabh055/Movie_recommendation_System/app.py +++ /dev/null @@ -1,96 +0,0 @@ -import streamlit as st -import pandas as pd -import pickle -import requests - -def fetch_poster(movie_id): - url = "https://api.themoviedb.org/3/movie/{}?api_key=8265bd1679663a7ea12ac168da84d2e8&language=en-US".format(movie_id) - data = requests.get(url) - data = data.json() - poster_path = data['poster_path'] - full_path = "https://image.tmdb.org/t/p/w500/" + poster_path - return full_path - -def recommend(movie): - movie_index = movies[movies['title'] == movie].index[0] - distances = similarity[movie_index] - movies_list = sorted(list(enumerate(distances)), reverse =True,key=lambda x: x[1])[1:6] - - recommended_movies = [] - recommended_movies_posters = [] - for i in movies_list: - movie_id = movies.iloc[i[0]].movie_id - # Help to fetch poster from api - recommended_movies.append(movies.iloc[i[0]]['title']) - recommended_movies_posters.append(fetch_poster(movie_id)) - return recommended_movies, recommended_movies_posters - - -movies = pickle.load(open('movies.pkl','rb')) -similarity = pickle.load(open('similarity.pkl', 'rb')) -st.title('Movie Recommendation App') - -selected_movie = st.selectbox( -'Select a movie to get recommendations', -movies['title'].values) - -if st.button('Get Recommendations'): - names,posters = recommend(selected_movie) - - col1, col2, col3,col4,col5 = st.columns(5) - with col1: - st.text(names[0]) - st.image(posters[0]) - - with col2: - st.text(names[1]) - st.image(posters[1]) - - with col3: - st.text(names[2]) - st.image(posters[2]) - - with col4: - st.text(names[3]) - st.image(posters[3]) - - with col5: - st.text(names[4]) - st.image(posters[4]) - - -hide_streamlit_style = """ - - """ -st.markdown(hide_streamlit_style, unsafe_allow_html=True) -footer=""" - -""" -st.markdown(footer,unsafe_allow_html=True) \ No newline at end of file diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/roi_heads/scnet_roi_head.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/roi_heads/scnet_roi_head.py deleted file mode 100644 index 85aaa2f0600afbdfc8b0917cb5f341740776a603..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/roi_heads/scnet_roi_head.py +++ /dev/null @@ -1,582 +0,0 @@ -import torch -import torch.nn.functional as F - -from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, merge_aug_bboxes, - merge_aug_masks, multiclass_nms) -from ..builder import HEADS, build_head, build_roi_extractor -from .cascade_roi_head import CascadeRoIHead - - -@HEADS.register_module() -class SCNetRoIHead(CascadeRoIHead): - """RoIHead for `SCNet `_. - - Args: - num_stages (int): number of cascade stages. - stage_loss_weights (list): loss weight of cascade stages. - semantic_roi_extractor (dict): config to init semantic roi extractor. - semantic_head (dict): config to init semantic head. - feat_relay_head (dict): config to init feature_relay_head. - glbctx_head (dict): config to init global context head. - """ - - def __init__(self, - num_stages, - stage_loss_weights, - semantic_roi_extractor=None, - semantic_head=None, - feat_relay_head=None, - glbctx_head=None, - **kwargs): - super(SCNetRoIHead, self).__init__(num_stages, stage_loss_weights, - **kwargs) - assert self.with_bbox and self.with_mask - assert not self.with_shared_head # shared head is not supported - - if semantic_head is not None: - self.semantic_roi_extractor = build_roi_extractor( - semantic_roi_extractor) - self.semantic_head = build_head(semantic_head) - - if feat_relay_head is not None: - self.feat_relay_head = build_head(feat_relay_head) - - if glbctx_head is not None: - self.glbctx_head = build_head(glbctx_head) - - def init_mask_head(self, mask_roi_extractor, mask_head): - """Initialize ``mask_head``""" - if mask_roi_extractor is not None: - self.mask_roi_extractor = build_roi_extractor(mask_roi_extractor) - self.mask_head = build_head(mask_head) - - def init_weights(self, pretrained): - """Initialize the weights in head. - - Args: - pretrained (str, optional): Path to pre-trained weights. - Defaults to None. - """ - for i in range(self.num_stages): - if self.with_bbox: - self.bbox_roi_extractor[i].init_weights() - self.bbox_head[i].init_weights() - if self.with_mask: - self.mask_roi_extractor.init_weights() - self.mask_head.init_weights() - if self.with_semantic: - self.semantic_head.init_weights() - if self.with_glbctx: - self.glbctx_head.init_weights() - if self.with_feat_relay: - self.feat_relay_head.init_weights() - - @property - def with_semantic(self): - """bool: whether the head has semantic head""" - return hasattr(self, - 'semantic_head') and self.semantic_head is not None - - @property - def with_feat_relay(self): - """bool: whether the head has feature relay head""" - return (hasattr(self, 'feat_relay_head') - and self.feat_relay_head is not None) - - @property - def with_glbctx(self): - """bool: whether the head has global context head""" - return hasattr(self, 'glbctx_head') and self.glbctx_head is not None - - def _fuse_glbctx(self, roi_feats, glbctx_feat, rois): - """Fuse global context feats with roi feats.""" - assert roi_feats.size(0) == rois.size(0) - img_inds = torch.unique(rois[:, 0].cpu(), sorted=True).long() - fused_feats = torch.zeros_like(roi_feats) - for img_id in img_inds: - inds = (rois[:, 0] == img_id.item()) - fused_feats[inds] = roi_feats[inds] + glbctx_feat[img_id] - return fused_feats - - def _slice_pos_feats(self, feats, sampling_results): - """Get features from pos rois.""" - num_rois = [res.bboxes.size(0) for res in sampling_results] - num_pos_rois = [res.pos_bboxes.size(0) for res in sampling_results] - inds = torch.zeros(sum(num_rois), dtype=torch.bool) - start = 0 - for i in range(len(num_rois)): - start = 0 if i == 0 else start + num_rois[i - 1] - stop = start + num_pos_rois[i] - inds[start:stop] = 1 - sliced_feats = feats[inds] - return sliced_feats - - def _bbox_forward(self, - stage, - x, - rois, - semantic_feat=None, - glbctx_feat=None): - """Box head forward function used in both training and testing.""" - bbox_roi_extractor = self.bbox_roi_extractor[stage] - bbox_head = self.bbox_head[stage] - bbox_feats = bbox_roi_extractor( - x[:len(bbox_roi_extractor.featmap_strides)], rois) - if self.with_semantic and semantic_feat is not None: - bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat], - rois) - if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]: - bbox_semantic_feat = F.adaptive_avg_pool2d( - bbox_semantic_feat, bbox_feats.shape[-2:]) - bbox_feats += bbox_semantic_feat - if self.with_glbctx and glbctx_feat is not None: - bbox_feats = self._fuse_glbctx(bbox_feats, glbctx_feat, rois) - cls_score, bbox_pred, relayed_feat = bbox_head( - bbox_feats, return_shared_feat=True) - - bbox_results = dict( - cls_score=cls_score, - bbox_pred=bbox_pred, - relayed_feat=relayed_feat) - return bbox_results - - def _mask_forward(self, - x, - rois, - semantic_feat=None, - glbctx_feat=None, - relayed_feat=None): - """Mask head forward function used in both training and testing.""" - mask_feats = self.mask_roi_extractor( - x[:self.mask_roi_extractor.num_inputs], rois) - if self.with_semantic and semantic_feat is not None: - mask_semantic_feat = self.semantic_roi_extractor([semantic_feat], - rois) - if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]: - mask_semantic_feat = F.adaptive_avg_pool2d( - mask_semantic_feat, mask_feats.shape[-2:]) - mask_feats += mask_semantic_feat - if self.with_glbctx and glbctx_feat is not None: - mask_feats = self._fuse_glbctx(mask_feats, glbctx_feat, rois) - if self.with_feat_relay and relayed_feat is not None: - mask_feats = mask_feats + relayed_feat - mask_pred = self.mask_head(mask_feats) - mask_results = dict(mask_pred=mask_pred) - - return mask_results - - def _bbox_forward_train(self, - stage, - x, - sampling_results, - gt_bboxes, - gt_labels, - rcnn_train_cfg, - semantic_feat=None, - glbctx_feat=None): - """Run forward function and calculate loss for box head in training.""" - bbox_head = self.bbox_head[stage] - rois = bbox2roi([res.bboxes for res in sampling_results]) - bbox_results = self._bbox_forward( - stage, - x, - rois, - semantic_feat=semantic_feat, - glbctx_feat=glbctx_feat) - - bbox_targets = bbox_head.get_targets(sampling_results, gt_bboxes, - gt_labels, rcnn_train_cfg) - loss_bbox = bbox_head.loss(bbox_results['cls_score'], - bbox_results['bbox_pred'], rois, - *bbox_targets) - - bbox_results.update( - loss_bbox=loss_bbox, rois=rois, bbox_targets=bbox_targets) - return bbox_results - - def _mask_forward_train(self, - x, - sampling_results, - gt_masks, - rcnn_train_cfg, - semantic_feat=None, - glbctx_feat=None, - relayed_feat=None): - """Run forward function and calculate loss for mask head in - training.""" - pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) - mask_results = self._mask_forward( - x, - pos_rois, - semantic_feat=semantic_feat, - glbctx_feat=glbctx_feat, - relayed_feat=relayed_feat) - - mask_targets = self.mask_head.get_targets(sampling_results, gt_masks, - rcnn_train_cfg) - pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) - loss_mask = self.mask_head.loss(mask_results['mask_pred'], - mask_targets, pos_labels) - - mask_results = loss_mask - return mask_results - - def forward_train(self, - x, - img_metas, - proposal_list, - gt_bboxes, - gt_labels, - gt_bboxes_ignore=None, - gt_masks=None, - gt_semantic_seg=None): - """ - Args: - x (list[Tensor]): list of multi-level img features. - - img_metas (list[dict]): list of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmdet/datasets/pipelines/formatting.py:Collect`. - - proposal_list (list[Tensors]): list of region proposals. - - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - - gt_labels (list[Tensor]): class indices corresponding to each box - - gt_bboxes_ignore (None, list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - - gt_masks (None, Tensor) : true segmentation masks for each box - used if the architecture supports a segmentation task. - - gt_semantic_seg (None, list[Tensor]): semantic segmentation masks - used if the architecture supports semantic segmentation task. - - Returns: - dict[str, Tensor]: a dictionary of loss components - """ - losses = dict() - - # semantic segmentation branch - if self.with_semantic: - semantic_pred, semantic_feat = self.semantic_head(x) - loss_seg = self.semantic_head.loss(semantic_pred, gt_semantic_seg) - losses['loss_semantic_seg'] = loss_seg - else: - semantic_feat = None - - # global context branch - if self.with_glbctx: - mc_pred, glbctx_feat = self.glbctx_head(x) - loss_glbctx = self.glbctx_head.loss(mc_pred, gt_labels) - losses['loss_glbctx'] = loss_glbctx - else: - glbctx_feat = None - - for i in range(self.num_stages): - self.current_stage = i - rcnn_train_cfg = self.train_cfg[i] - lw = self.stage_loss_weights[i] - - # assign gts and sample proposals - sampling_results = [] - bbox_assigner = self.bbox_assigner[i] - bbox_sampler = self.bbox_sampler[i] - num_imgs = len(img_metas) - if gt_bboxes_ignore is None: - gt_bboxes_ignore = [None for _ in range(num_imgs)] - - for j in range(num_imgs): - assign_result = bbox_assigner.assign(proposal_list[j], - gt_bboxes[j], - gt_bboxes_ignore[j], - gt_labels[j]) - sampling_result = bbox_sampler.sample( - assign_result, - proposal_list[j], - gt_bboxes[j], - gt_labels[j], - feats=[lvl_feat[j][None] for lvl_feat in x]) - sampling_results.append(sampling_result) - - bbox_results = \ - self._bbox_forward_train( - i, x, sampling_results, gt_bboxes, gt_labels, - rcnn_train_cfg, semantic_feat, glbctx_feat) - roi_labels = bbox_results['bbox_targets'][0] - - for name, value in bbox_results['loss_bbox'].items(): - losses[f's{i}.{name}'] = ( - value * lw if 'loss' in name else value) - - # refine boxes - if i < self.num_stages - 1: - pos_is_gts = [res.pos_is_gt for res in sampling_results] - with torch.no_grad(): - proposal_list = self.bbox_head[i].refine_bboxes( - bbox_results['rois'], roi_labels, - bbox_results['bbox_pred'], pos_is_gts, img_metas) - - if self.with_feat_relay: - relayed_feat = self._slice_pos_feats(bbox_results['relayed_feat'], - sampling_results) - relayed_feat = self.feat_relay_head(relayed_feat) - else: - relayed_feat = None - - mask_results = self._mask_forward_train(x, sampling_results, gt_masks, - rcnn_train_cfg, semantic_feat, - glbctx_feat, relayed_feat) - mask_lw = sum(self.stage_loss_weights) - losses['loss_mask'] = mask_lw * mask_results['loss_mask'] - - return losses - - def simple_test(self, x, proposal_list, img_metas, rescale=False): - """Test without augmentation.""" - if self.with_semantic: - _, semantic_feat = self.semantic_head(x) - else: - semantic_feat = None - - if self.with_glbctx: - mc_pred, glbctx_feat = self.glbctx_head(x) - else: - glbctx_feat = None - - num_imgs = len(proposal_list) - img_shapes = tuple(meta['img_shape'] for meta in img_metas) - ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) - scale_factors = tuple(meta['scale_factor'] for meta in img_metas) - - # "ms" in variable names means multi-stage - ms_scores = [] - rcnn_test_cfg = self.test_cfg - - rois = bbox2roi(proposal_list) - for i in range(self.num_stages): - bbox_head = self.bbox_head[i] - bbox_results = self._bbox_forward( - i, - x, - rois, - semantic_feat=semantic_feat, - glbctx_feat=glbctx_feat) - # split batch bbox prediction back to each image - cls_score = bbox_results['cls_score'] - bbox_pred = bbox_results['bbox_pred'] - num_proposals_per_img = tuple(len(p) for p in proposal_list) - rois = rois.split(num_proposals_per_img, 0) - cls_score = cls_score.split(num_proposals_per_img, 0) - bbox_pred = bbox_pred.split(num_proposals_per_img, 0) - ms_scores.append(cls_score) - - if i < self.num_stages - 1: - bbox_label = [s[:, :-1].argmax(dim=1) for s in cls_score] - rois = torch.cat([ - bbox_head.regress_by_class(rois[i], bbox_label[i], - bbox_pred[i], img_metas[i]) - for i in range(num_imgs) - ]) - - # average scores of each image by stages - cls_score = [ - sum([score[i] for score in ms_scores]) / float(len(ms_scores)) - for i in range(num_imgs) - ] - - # apply bbox post-processing to each image individually - det_bboxes = [] - det_labels = [] - for i in range(num_imgs): - det_bbox, det_label = self.bbox_head[-1].get_bboxes( - rois[i], - cls_score[i], - bbox_pred[i], - img_shapes[i], - scale_factors[i], - rescale=rescale, - cfg=rcnn_test_cfg) - det_bboxes.append(det_bbox) - det_labels.append(det_label) - det_bbox_results = [ - bbox2result(det_bboxes[i], det_labels[i], - self.bbox_head[-1].num_classes) - for i in range(num_imgs) - ] - - if self.with_mask: - if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): - mask_classes = self.mask_head.num_classes - det_segm_results = [[[] for _ in range(mask_classes)] - for _ in range(num_imgs)] - else: - if rescale and not isinstance(scale_factors[0], float): - scale_factors = [ - torch.from_numpy(scale_factor).to(det_bboxes[0].device) - for scale_factor in scale_factors - ] - _bboxes = [ - det_bboxes[i][:, :4] * - scale_factors[i] if rescale else det_bboxes[i] - for i in range(num_imgs) - ] - mask_rois = bbox2roi(_bboxes) - - # get relay feature on mask_rois - bbox_results = self._bbox_forward( - -1, - x, - mask_rois, - semantic_feat=semantic_feat, - glbctx_feat=glbctx_feat) - relayed_feat = bbox_results['relayed_feat'] - relayed_feat = self.feat_relay_head(relayed_feat) - - mask_results = self._mask_forward( - x, - mask_rois, - semantic_feat=semantic_feat, - glbctx_feat=glbctx_feat, - relayed_feat=relayed_feat) - mask_pred = mask_results['mask_pred'] - - # split batch mask prediction back to each image - num_bbox_per_img = tuple(len(_bbox) for _bbox in _bboxes) - mask_preds = mask_pred.split(num_bbox_per_img, 0) - - # apply mask post-processing to each image individually - det_segm_results = [] - for i in range(num_imgs): - if det_bboxes[i].shape[0] == 0: - det_segm_results.append( - [[] for _ in range(self.mask_head.num_classes)]) - else: - segm_result = self.mask_head.get_seg_masks( - mask_preds[i], _bboxes[i], det_labels[i], - self.test_cfg, ori_shapes[i], scale_factors[i], - rescale) - det_segm_results.append(segm_result) - - # return results - if self.with_mask: - return list(zip(det_bbox_results, det_segm_results)) - else: - return det_bbox_results - - def aug_test(self, img_feats, proposal_list, img_metas, rescale=False): - if self.with_semantic: - semantic_feats = [ - self.semantic_head(feat)[1] for feat in img_feats - ] - else: - semantic_feats = [None] * len(img_metas) - - if self.with_glbctx: - glbctx_feats = [self.glbctx_head(feat)[1] for feat in img_feats] - else: - glbctx_feats = [None] * len(img_metas) - - rcnn_test_cfg = self.test_cfg - aug_bboxes = [] - aug_scores = [] - for x, img_meta, semantic_feat, glbctx_feat in zip( - img_feats, img_metas, semantic_feats, glbctx_feats): - # only one image in the batch - img_shape = img_meta[0]['img_shape'] - scale_factor = img_meta[0]['scale_factor'] - flip = img_meta[0]['flip'] - - proposals = bbox_mapping(proposal_list[0][:, :4], img_shape, - scale_factor, flip) - # "ms" in variable names means multi-stage - ms_scores = [] - - rois = bbox2roi([proposals]) - for i in range(self.num_stages): - bbox_head = self.bbox_head[i] - bbox_results = self._bbox_forward( - i, - x, - rois, - semantic_feat=semantic_feat, - glbctx_feat=glbctx_feat) - ms_scores.append(bbox_results['cls_score']) - if i < self.num_stages - 1: - bbox_label = bbox_results['cls_score'].argmax(dim=1) - rois = bbox_head.regress_by_class( - rois, bbox_label, bbox_results['bbox_pred'], - img_meta[0]) - - cls_score = sum(ms_scores) / float(len(ms_scores)) - bboxes, scores = self.bbox_head[-1].get_bboxes( - rois, - cls_score, - bbox_results['bbox_pred'], - img_shape, - scale_factor, - rescale=False, - cfg=None) - aug_bboxes.append(bboxes) - aug_scores.append(scores) - - # after merging, bboxes will be rescaled to the original image size - merged_bboxes, merged_scores = merge_aug_bboxes( - aug_bboxes, aug_scores, img_metas, rcnn_test_cfg) - det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores, - rcnn_test_cfg.score_thr, - rcnn_test_cfg.nms, - rcnn_test_cfg.max_per_img) - - det_bbox_results = bbox2result(det_bboxes, det_labels, - self.bbox_head[-1].num_classes) - - if self.with_mask: - if det_bboxes.shape[0] == 0: - det_segm_results = [[] - for _ in range(self.mask_head.num_classes)] - else: - aug_masks = [] - for x, img_meta, semantic_feat, glbctx_feat in zip( - img_feats, img_metas, semantic_feats, glbctx_feats): - img_shape = img_meta[0]['img_shape'] - scale_factor = img_meta[0]['scale_factor'] - flip = img_meta[0]['flip'] - _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, - scale_factor, flip) - mask_rois = bbox2roi([_bboxes]) - # get relay feature on mask_rois - bbox_results = self._bbox_forward( - -1, - x, - mask_rois, - semantic_feat=semantic_feat, - glbctx_feat=glbctx_feat) - relayed_feat = bbox_results['relayed_feat'] - relayed_feat = self.feat_relay_head(relayed_feat) - mask_results = self._mask_forward( - x, - mask_rois, - semantic_feat=semantic_feat, - glbctx_feat=glbctx_feat, - relayed_feat=relayed_feat) - mask_pred = mask_results['mask_pred'] - aug_masks.append(mask_pred.sigmoid().cpu().numpy()) - merged_masks = merge_aug_masks(aug_masks, img_metas, - self.test_cfg) - ori_shape = img_metas[0][0]['ori_shape'] - det_segm_results = self.mask_head.get_seg_masks( - merged_masks, - det_bboxes, - det_labels, - rcnn_test_cfg, - ori_shape, - scale_factor=1.0, - rescale=False) - return [(det_bbox_results, det_segm_results)] - else: - return [det_bbox_results] diff --git a/spaces/Rongjiehuang/ProDiff/data_gen/tts/bin/binarize.py b/spaces/Rongjiehuang/ProDiff/data_gen/tts/bin/binarize.py deleted file mode 100644 index 4bd3c1f69fa59ed52fdd32eb80e746dedbae7535..0000000000000000000000000000000000000000 --- a/spaces/Rongjiehuang/ProDiff/data_gen/tts/bin/binarize.py +++ /dev/null @@ -1,20 +0,0 @@ -import os - -os.environ["OMP_NUM_THREADS"] = "1" - -import importlib -from utils.hparams import set_hparams, hparams - - -def binarize(): - binarizer_cls = hparams.get("binarizer_cls", 'data_gen.tts.base_binarizer.BaseBinarizer') - pkg = ".".join(binarizer_cls.split(".")[:-1]) - cls_name = binarizer_cls.split(".")[-1] - binarizer_cls = getattr(importlib.import_module(pkg), cls_name) - print("| Binarizer: ", binarizer_cls) - binarizer_cls().process() - - -if __name__ == '__main__': - set_hparams() - binarize() diff --git a/spaces/Rothfeld/stable-diffusion-mat-outpainting-primer/networks/mat.py b/spaces/Rothfeld/stable-diffusion-mat-outpainting-primer/networks/mat.py deleted file mode 100644 index c640dc45ed5df64ae0eaa5d1f277618ff3791d6b..0000000000000000000000000000000000000000 --- a/spaces/Rothfeld/stable-diffusion-mat-outpainting-primer/networks/mat.py +++ /dev/null @@ -1,996 +0,0 @@ -import numpy as np -import math -import sys -sys.path.insert(0, '../') - -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.utils.checkpoint as checkpoint -from timm.models.layers import DropPath, to_2tuple, trunc_normal_ - -from torch_utils import misc -from torch_utils import persistence -from networks.basic_module import FullyConnectedLayer, Conv2dLayer, MappingNet, MinibatchStdLayer, DisFromRGB, DisBlock, StyleConv, ToRGB, get_style_code - - -@misc.profiled_function -def nf(stage, channel_base=32768, channel_decay=1.0, channel_max=512): - NF = {512: 64, 256: 128, 128: 256, 64: 512, 32: 512, 16: 512, 8: 512, 4: 512} - return NF[2 ** stage] - - -@persistence.persistent_class -class Mlp(nn.Module): - def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = FullyConnectedLayer(in_features=in_features, out_features=hidden_features, activation='lrelu') - self.fc2 = FullyConnectedLayer(in_features=hidden_features, out_features=out_features) - - def forward(self, x): - x = self.fc1(x) - x = self.fc2(x) - return x - - -@misc.profiled_function -def window_partition(x, window_size): - """ - Args: - x: (B, H, W, C) - window_size (int): window size - Returns: - windows: (num_windows*B, window_size, window_size, C) - """ - B, H, W, C = x.shape - x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) - windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) - return windows - - -@misc.profiled_function -def window_reverse(windows, window_size, H, W): - """ - Args: - windows: (num_windows*B, window_size, window_size, C) - window_size (int): Window size - H (int): Height of image - W (int): Width of image - Returns: - x: (B, H, W, C) - """ - B = int(windows.shape[0] / (H * W / window_size / window_size)) - x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) - x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) - return x - - -@persistence.persistent_class -class Conv2dLayerPartial(nn.Module): - def __init__(self, - in_channels, # Number of input channels. - out_channels, # Number of output channels. - kernel_size, # Width and height of the convolution kernel. - bias = True, # Apply additive bias before the activation function? - activation = 'linear', # Activation function: 'relu', 'lrelu', etc. - up = 1, # Integer upsampling factor. - down = 1, # Integer downsampling factor. - resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations. - conv_clamp = None, # Clamp the output to +-X, None = disable clamping. - trainable = True, # Update the weights of this layer during training? - ): - super().__init__() - self.conv = Conv2dLayer(in_channels, out_channels, kernel_size, bias, activation, up, down, resample_filter, - conv_clamp, trainable) - - self.weight_maskUpdater = torch.ones(1, 1, kernel_size, kernel_size) - self.slide_winsize = kernel_size ** 2 - self.stride = down - self.padding = kernel_size // 2 if kernel_size % 2 == 1 else 0 - - def forward(self, x, mask=None): - if mask is not None: - with torch.no_grad(): - if self.weight_maskUpdater.type() != x.type(): - self.weight_maskUpdater = self.weight_maskUpdater.to(x) - update_mask = F.conv2d(mask, self.weight_maskUpdater, bias=None, stride=self.stride, padding=self.padding) - mask_ratio = self.slide_winsize / (update_mask + 1e-8) - update_mask = torch.clamp(update_mask, 0, 1) # 0 or 1 - mask_ratio = torch.mul(mask_ratio, update_mask) - x = self.conv(x) - x = torch.mul(x, mask_ratio) - return x, update_mask - else: - x = self.conv(x) - return x, None - - -@persistence.persistent_class -class WindowAttention(nn.Module): - r""" Window based multi-head self attention (W-MSA) module with relative position bias. - It supports both of shifted and non-shifted window. - Args: - dim (int): Number of input channels. - window_size (tuple[int]): The height and width of the window. - num_heads (int): Number of attention heads. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set - attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 - proj_drop (float, optional): Dropout ratio of output. Default: 0.0 - """ - - def __init__(self, dim, window_size, num_heads, down_ratio=1, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.): - - super().__init__() - self.dim = dim - self.window_size = window_size # Wh, Ww - self.num_heads = num_heads - head_dim = dim // num_heads - self.scale = qk_scale or head_dim ** -0.5 - - self.q = FullyConnectedLayer(in_features=dim, out_features=dim) - self.k = FullyConnectedLayer(in_features=dim, out_features=dim) - self.v = FullyConnectedLayer(in_features=dim, out_features=dim) - self.proj = FullyConnectedLayer(in_features=dim, out_features=dim) - - self.softmax = nn.Softmax(dim=-1) - - def forward(self, x, mask_windows=None, mask=None): - """ - Args: - x: input features with shape of (num_windows*B, N, C) - mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None - """ - B_, N, C = x.shape - norm_x = F.normalize(x, p=2.0, dim=-1) - q = self.q(norm_x).reshape(B_, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) - k = self.k(norm_x).view(B_, -1, self.num_heads, C // self.num_heads).permute(0, 2, 3, 1) - v = self.v(x).view(B_, -1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) - - attn = (q @ k) * self.scale - - if mask is not None: - nW = mask.shape[0] - attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) - attn = attn.view(-1, self.num_heads, N, N) - - if mask_windows is not None: - attn_mask_windows = mask_windows.squeeze(-1).unsqueeze(1).unsqueeze(1) - attn = attn + attn_mask_windows.masked_fill(attn_mask_windows == 0, float(-100.0)).masked_fill( - attn_mask_windows == 1, float(0.0)) - with torch.no_grad(): - mask_windows = torch.clamp(torch.sum(mask_windows, dim=1, keepdim=True), 0, 1).repeat(1, N, 1) - - attn = self.softmax(attn) - - x = (attn @ v).transpose(1, 2).reshape(B_, N, C) - x = self.proj(x) - return x, mask_windows - - -@persistence.persistent_class -class SwinTransformerBlock(nn.Module): - r""" Swin Transformer Block. - Args: - dim (int): Number of input channels. - input_resolution (tuple[int]): Input resulotion. - num_heads (int): Number of attention heads. - window_size (int): Window size. - shift_size (int): Shift size for SW-MSA. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. - drop (float, optional): Dropout rate. Default: 0.0 - attn_drop (float, optional): Attention dropout rate. Default: 0.0 - drop_path (float, optional): Stochastic depth rate. Default: 0.0 - act_layer (nn.Module, optional): Activation layer. Default: nn.GELU - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - """ - - def __init__(self, dim, input_resolution, num_heads, down_ratio=1, window_size=7, shift_size=0, - mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0., - act_layer=nn.GELU, norm_layer=nn.LayerNorm): - super().__init__() - self.dim = dim - self.input_resolution = input_resolution - self.num_heads = num_heads - self.window_size = window_size - self.shift_size = shift_size - self.mlp_ratio = mlp_ratio - if min(self.input_resolution) <= self.window_size: - # if window size is larger than input resolution, we don't partition windows - self.shift_size = 0 - self.window_size = min(self.input_resolution) - assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" - - if self.shift_size > 0: - down_ratio = 1 - self.attn = WindowAttention(dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, - down_ratio=down_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, - proj_drop=drop) - - self.fuse = FullyConnectedLayer(in_features=dim * 2, out_features=dim, activation='lrelu') - - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) - - if self.shift_size > 0: - attn_mask = self.calculate_mask(self.input_resolution) - else: - attn_mask = None - - self.register_buffer("attn_mask", attn_mask) - - def calculate_mask(self, x_size): - # calculate attention mask for SW-MSA - H, W = x_size - img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 - h_slices = (slice(0, -self.window_size), - slice(-self.window_size, -self.shift_size), - slice(-self.shift_size, None)) - w_slices = (slice(0, -self.window_size), - slice(-self.window_size, -self.shift_size), - slice(-self.shift_size, None)) - cnt = 0 - for h in h_slices: - for w in w_slices: - img_mask[:, h, w, :] = cnt - cnt += 1 - - mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 - mask_windows = mask_windows.view(-1, self.window_size * self.window_size) - attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) - attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) - - return attn_mask - - def forward(self, x, x_size, mask=None): - # H, W = self.input_resolution - H, W = x_size - B, L, C = x.shape - assert L == H * W, "input feature has wrong size" - - shortcut = x - x = x.view(B, H, W, C) - if mask is not None: - mask = mask.view(B, H, W, 1) - - # cyclic shift - if self.shift_size > 0: - shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) - if mask is not None: - shifted_mask = torch.roll(mask, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) - else: - shifted_x = x - if mask is not None: - shifted_mask = mask - - # partition windows - x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C - x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C - if mask is not None: - mask_windows = window_partition(shifted_mask, self.window_size) - mask_windows = mask_windows.view(-1, self.window_size * self.window_size, 1) - else: - mask_windows = None - - # W-MSA/SW-MSA (to be compatible for testing on images whose shapes are the multiple of window size - if self.input_resolution == x_size: - attn_windows, mask_windows = self.attn(x_windows, mask_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C - else: - attn_windows, mask_windows = self.attn(x_windows, mask_windows, mask=self.calculate_mask(x_size).to(x.device)) # nW*B, window_size*window_size, C - - # merge windows - attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) - shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C - if mask is not None: - mask_windows = mask_windows.view(-1, self.window_size, self.window_size, 1) - shifted_mask = window_reverse(mask_windows, self.window_size, H, W) - - # reverse cyclic shift - if self.shift_size > 0: - x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) - if mask is not None: - mask = torch.roll(shifted_mask, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) - else: - x = shifted_x - if mask is not None: - mask = shifted_mask - x = x.view(B, H * W, C) - if mask is not None: - mask = mask.view(B, H * W, 1) - - # FFN - x = self.fuse(torch.cat([shortcut, x], dim=-1)) - x = self.mlp(x) - - return x, mask - - -@persistence.persistent_class -class PatchMerging(nn.Module): - def __init__(self, in_channels, out_channels, down=2): - super().__init__() - self.conv = Conv2dLayerPartial(in_channels=in_channels, - out_channels=out_channels, - kernel_size=3, - activation='lrelu', - down=down, - ) - self.down = down - - def forward(self, x, x_size, mask=None): - x = token2feature(x, x_size) - if mask is not None: - mask = token2feature(mask, x_size) - x, mask = self.conv(x, mask) - if self.down != 1: - ratio = 1 / self.down - x_size = (int(x_size[0] * ratio), int(x_size[1] * ratio)) - x = feature2token(x) - if mask is not None: - mask = feature2token(mask) - return x, x_size, mask - - -@persistence.persistent_class -class PatchUpsampling(nn.Module): - def __init__(self, in_channels, out_channels, up=2): - super().__init__() - self.conv = Conv2dLayerPartial(in_channels=in_channels, - out_channels=out_channels, - kernel_size=3, - activation='lrelu', - up=up, - ) - self.up = up - - def forward(self, x, x_size, mask=None): - x = token2feature(x, x_size) - if mask is not None: - mask = token2feature(mask, x_size) - x, mask = self.conv(x, mask) - if self.up != 1: - x_size = (int(x_size[0] * self.up), int(x_size[1] * self.up)) - x = feature2token(x) - if mask is not None: - mask = feature2token(mask) - return x, x_size, mask - - - -@persistence.persistent_class -class BasicLayer(nn.Module): - """ A basic Swin Transformer layer for one stage. - Args: - dim (int): Number of input channels. - input_resolution (tuple[int]): Input resolution. - depth (int): Number of blocks. - num_heads (int): Number of attention heads. - window_size (int): Local window size. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. - drop (float, optional): Dropout rate. Default: 0.0 - attn_drop (float, optional): Attention dropout rate. Default: 0.0 - drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None - use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. - """ - - def __init__(self, dim, input_resolution, depth, num_heads, window_size, down_ratio=1, - mlp_ratio=2., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., - drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False): - - super().__init__() - self.dim = dim - self.input_resolution = input_resolution - self.depth = depth - self.use_checkpoint = use_checkpoint - - # patch merging layer - if downsample is not None: - # self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer) - self.downsample = downsample - else: - self.downsample = None - - # build blocks - self.blocks = nn.ModuleList([ - SwinTransformerBlock(dim=dim, input_resolution=input_resolution, - num_heads=num_heads, down_ratio=down_ratio, window_size=window_size, - shift_size=0 if (i % 2 == 0) else window_size // 2, - mlp_ratio=mlp_ratio, - qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop, attn_drop=attn_drop, - drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, - norm_layer=norm_layer) - for i in range(depth)]) - - self.conv = Conv2dLayerPartial(in_channels=dim, out_channels=dim, kernel_size=3, activation='lrelu') - - def forward(self, x, x_size, mask=None): - if self.downsample is not None: - x, x_size, mask = self.downsample(x, x_size, mask) - identity = x - for blk in self.blocks: - if self.use_checkpoint: - x, mask = checkpoint.checkpoint(blk, x, x_size, mask) - else: - x, mask = blk(x, x_size, mask) - if mask is not None: - mask = token2feature(mask, x_size) - x, mask = self.conv(token2feature(x, x_size), mask) - x = feature2token(x) + identity - if mask is not None: - mask = feature2token(mask) - return x, x_size, mask - - -@persistence.persistent_class -class ToToken(nn.Module): - def __init__(self, in_channels=3, dim=128, kernel_size=5, stride=1): - super().__init__() - - self.proj = Conv2dLayerPartial(in_channels=in_channels, out_channels=dim, kernel_size=kernel_size, activation='lrelu') - - def forward(self, x, mask): - x, mask = self.proj(x, mask) - - return x, mask - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class EncFromRGB(nn.Module): - def __init__(self, in_channels, out_channels, activation): # res = 2, ..., resolution_log2 - super().__init__() - self.conv0 = Conv2dLayer(in_channels=in_channels, - out_channels=out_channels, - kernel_size=1, - activation=activation, - ) - self.conv1 = Conv2dLayer(in_channels=out_channels, - out_channels=out_channels, - kernel_size=3, - activation=activation, - ) - - def forward(self, x): - x = self.conv0(x) - x = self.conv1(x) - - return x - -@persistence.persistent_class -class ConvBlockDown(nn.Module): - def __init__(self, in_channels, out_channels, activation): # res = 2, ..., resolution_log - super().__init__() - - self.conv0 = Conv2dLayer(in_channels=in_channels, - out_channels=out_channels, - kernel_size=3, - activation=activation, - down=2, - ) - self.conv1 = Conv2dLayer(in_channels=out_channels, - out_channels=out_channels, - kernel_size=3, - activation=activation, - ) - - def forward(self, x): - x = self.conv0(x) - x = self.conv1(x) - - return x - - -def token2feature(x, x_size): - B, N, C = x.shape - h, w = x_size - x = x.permute(0, 2, 1).reshape(B, C, h, w) - return x - - -def feature2token(x): - B, C, H, W = x.shape - x = x.view(B, C, -1).transpose(1, 2) - return x - - -@persistence.persistent_class -class Encoder(nn.Module): - def __init__(self, res_log2, img_channels, activation, patch_size=5, channels=16, drop_path_rate=0.1): - super().__init__() - - self.resolution = [] - - for idx, i in enumerate(range(res_log2, 3, -1)): # from input size to 16x16 - res = 2 ** i - self.resolution.append(res) - if i == res_log2: - block = EncFromRGB(img_channels * 2 + 1, nf(i), activation) - else: - block = ConvBlockDown(nf(i+1), nf(i), activation) - setattr(self, 'EncConv_Block_%dx%d' % (res, res), block) - - def forward(self, x): - out = {} - for res in self.resolution: - res_log2 = int(np.log2(res)) - x = getattr(self, 'EncConv_Block_%dx%d' % (res, res))(x) - out[res_log2] = x - - return out - - -@persistence.persistent_class -class ToStyle(nn.Module): - def __init__(self, in_channels, out_channels, activation, drop_rate): - super().__init__() - self.conv = nn.Sequential( - Conv2dLayer(in_channels=in_channels, out_channels=in_channels, kernel_size=3, activation=activation, down=2), - Conv2dLayer(in_channels=in_channels, out_channels=in_channels, kernel_size=3, activation=activation, down=2), - Conv2dLayer(in_channels=in_channels, out_channels=in_channels, kernel_size=3, activation=activation, down=2), - ) - - self.pool = nn.AdaptiveAvgPool2d(1) - self.fc = FullyConnectedLayer(in_features=in_channels, - out_features=out_channels, - activation=activation) - # self.dropout = nn.Dropout(drop_rate) - - def forward(self, x): - x = self.conv(x) - x = self.pool(x) - x = self.fc(x.flatten(start_dim=1)) - # x = self.dropout(x) - - return x - - -@persistence.persistent_class -class DecBlockFirstV2(nn.Module): - def __init__(self, res, in_channels, out_channels, activation, style_dim, use_noise, demodulate, img_channels): - super().__init__() - self.res = res - - self.conv0 = Conv2dLayer(in_channels=in_channels, - out_channels=in_channels, - kernel_size=3, - activation=activation, - ) - self.conv1 = StyleConv(in_channels=in_channels, - out_channels=out_channels, - style_dim=style_dim, - resolution=2**res, - kernel_size=3, - use_noise=use_noise, - activation=activation, - demodulate=demodulate, - ) - self.toRGB = ToRGB(in_channels=out_channels, - out_channels=img_channels, - style_dim=style_dim, - kernel_size=1, - demodulate=False, - ) - - def forward(self, x, ws, gs, E_features, noise_mode='random'): - # x = self.fc(x).view(x.shape[0], -1, 4, 4) - x = self.conv0(x) - x = x + E_features[self.res] - style = get_style_code(ws[:, 0], gs) - x = self.conv1(x, style, noise_mode=noise_mode) - style = get_style_code(ws[:, 1], gs) - img = self.toRGB(x, style, skip=None) - - return x, img - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class DecBlock(nn.Module): - def __init__(self, res, in_channels, out_channels, activation, style_dim, use_noise, demodulate, img_channels): # res = 4, ..., resolution_log2 - super().__init__() - self.res = res - - self.conv0 = StyleConv(in_channels=in_channels, - out_channels=out_channels, - style_dim=style_dim, - resolution=2**res, - kernel_size=3, - up=2, - use_noise=use_noise, - activation=activation, - demodulate=demodulate, - ) - self.conv1 = StyleConv(in_channels=out_channels, - out_channels=out_channels, - style_dim=style_dim, - resolution=2**res, - kernel_size=3, - use_noise=use_noise, - activation=activation, - demodulate=demodulate, - ) - self.toRGB = ToRGB(in_channels=out_channels, - out_channels=img_channels, - style_dim=style_dim, - kernel_size=1, - demodulate=False, - ) - - def forward(self, x, img, ws, gs, E_features, noise_mode='random'): - style = get_style_code(ws[:, self.res * 2 - 9], gs) - x = self.conv0(x, style, noise_mode=noise_mode) - x = x + E_features[self.res] - style = get_style_code(ws[:, self.res * 2 - 8], gs) - x = self.conv1(x, style, noise_mode=noise_mode) - style = get_style_code(ws[:, self.res * 2 - 7], gs) - img = self.toRGB(x, style, skip=img) - - return x, img - - -@persistence.persistent_class -class Decoder(nn.Module): - def __init__(self, res_log2, activation, style_dim, use_noise, demodulate, img_channels): - super().__init__() - self.Dec_16x16 = DecBlockFirstV2(4, nf(4), nf(4), activation, style_dim, use_noise, demodulate, img_channels) - for res in range(5, res_log2 + 1): - setattr(self, 'Dec_%dx%d' % (2 ** res, 2 ** res), - DecBlock(res, nf(res - 1), nf(res), activation, style_dim, use_noise, demodulate, img_channels)) - self.res_log2 = res_log2 - - def forward(self, x, ws, gs, E_features, noise_mode='random'): - x, img = self.Dec_16x16(x, ws, gs, E_features, noise_mode=noise_mode) - for res in range(5, self.res_log2 + 1): - block = getattr(self, 'Dec_%dx%d' % (2 ** res, 2 ** res)) - x, img = block(x, img, ws, gs, E_features, noise_mode=noise_mode) - - return img - - -@persistence.persistent_class -class DecStyleBlock(nn.Module): - def __init__(self, res, in_channels, out_channels, activation, style_dim, use_noise, demodulate, img_channels): - super().__init__() - self.res = res - - self.conv0 = StyleConv(in_channels=in_channels, - out_channels=out_channels, - style_dim=style_dim, - resolution=2**res, - kernel_size=3, - up=2, - use_noise=use_noise, - activation=activation, - demodulate=demodulate, - ) - self.conv1 = StyleConv(in_channels=out_channels, - out_channels=out_channels, - style_dim=style_dim, - resolution=2**res, - kernel_size=3, - use_noise=use_noise, - activation=activation, - demodulate=demodulate, - ) - self.toRGB = ToRGB(in_channels=out_channels, - out_channels=img_channels, - style_dim=style_dim, - kernel_size=1, - demodulate=False, - ) - - def forward(self, x, img, style, skip, noise_mode='random'): - x = self.conv0(x, style, noise_mode=noise_mode) - x = x + skip - x = self.conv1(x, style, noise_mode=noise_mode) - img = self.toRGB(x, style, skip=img) - - return x, img - - -@persistence.persistent_class -class FirstStage(nn.Module): - def __init__(self, img_channels, img_resolution=256, dim=180, w_dim=512, use_noise=False, demodulate=True, activation='lrelu'): - super().__init__() - res = 64 - - self.conv_first = Conv2dLayerPartial(in_channels=img_channels+1, out_channels=dim, kernel_size=3, activation=activation) - self.enc_conv = nn.ModuleList() - down_time = int(np.log2(img_resolution // res)) - for i in range(down_time): # from input size to 64 - self.enc_conv.append( - Conv2dLayerPartial(in_channels=dim, out_channels=dim, kernel_size=3, down=2, activation=activation) - ) - - # from 64 -> 16 -> 64 - depths = [2, 3, 4, 3, 2] - ratios = [1, 1/2, 1/2, 2, 2] - num_heads = 6 - window_sizes = [8, 16, 16, 16, 8] - drop_path_rate = 0.1 - dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] - - self.tran = nn.ModuleList() - for i, depth in enumerate(depths): - res = int(res * ratios[i]) - if ratios[i] < 1: - merge = PatchMerging(dim, dim, down=int(1/ratios[i])) - elif ratios[i] > 1: - merge = PatchUpsampling(dim, dim, up=ratios[i]) - else: - merge = None - self.tran.append( - BasicLayer(dim=dim, input_resolution=[res, res], depth=depth, num_heads=num_heads, - window_size=window_sizes[i], drop_path=dpr[sum(depths[:i]):sum(depths[:i + 1])], - downsample=merge) - ) - - # global style - down_conv = [] - for i in range(int(np.log2(16))): - down_conv.append(Conv2dLayer(in_channels=dim, out_channels=dim, kernel_size=3, down=2, activation=activation)) - down_conv.append(nn.AdaptiveAvgPool2d((1, 1))) - self.down_conv = nn.Sequential(*down_conv) - self.to_style = FullyConnectedLayer(in_features=dim, out_features=dim*2, activation=activation) - self.ws_style = FullyConnectedLayer(in_features=w_dim, out_features=dim, activation=activation) - self.to_square = FullyConnectedLayer(in_features=dim, out_features=16*16, activation=activation) - - style_dim = dim * 3 - self.dec_conv = nn.ModuleList() - for i in range(down_time): # from 64 to input size - res = res * 2 - self.dec_conv.append(DecStyleBlock(res, dim, dim, activation, style_dim, use_noise, demodulate, img_channels)) - - def forward(self, images_in, masks_in, ws, noise_mode='random'): - x = torch.cat([masks_in - 0.5, images_in * masks_in], dim=1) - - skips = [] - x, mask = self.conv_first(x, masks_in) # input size - skips.append(x) - for i, block in enumerate(self.enc_conv): # input size to 64 - x, mask = block(x, mask) - if i != len(self.enc_conv) - 1: - skips.append(x) - - x_size = x.size()[-2:] - x = feature2token(x) - mask = feature2token(mask) - mid = len(self.tran) // 2 - for i, block in enumerate(self.tran): # 64 to 16 - if i < mid: - x, x_size, mask = block(x, x_size, mask) - skips.append(x) - elif i > mid: - x, x_size, mask = block(x, x_size, None) - x = x + skips[mid - i] - else: - x, x_size, mask = block(x, x_size, None) - - mul_map = torch.ones_like(x) * 0.5 - mul_map = F.dropout(mul_map, training=True) - ws = self.ws_style(ws[:, -1]) - add_n = self.to_square(ws).unsqueeze(1) - add_n = F.interpolate(add_n, size=x.size(1), mode='linear', align_corners=False).squeeze(1).unsqueeze(-1) - x = x * mul_map + add_n * (1 - mul_map) - gs = self.to_style(self.down_conv(token2feature(x, x_size)).flatten(start_dim=1)) - style = torch.cat([gs, ws], dim=1) - - x = token2feature(x, x_size).contiguous() - img = None - for i, block in enumerate(self.dec_conv): - x, img = block(x, img, style, skips[len(self.dec_conv)-i-1], noise_mode=noise_mode) - - # ensemble - img = img * (1 - masks_in) + images_in * masks_in - - return img - - -@persistence.persistent_class -class SynthesisNet(nn.Module): - def __init__(self, - w_dim, # Intermediate latent (W) dimensionality. - img_resolution, # Output image resolution. - img_channels = 3, # Number of color channels. - channel_base = 32768, # Overall multiplier for the number of channels. - channel_decay = 1.0, - channel_max = 512, # Maximum number of channels in any layer. - activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc. - drop_rate = 0.5, - use_noise = True, - demodulate = True, - ): - super().__init__() - resolution_log2 = int(np.log2(img_resolution)) - assert img_resolution == 2 ** resolution_log2 and img_resolution >= 4 - - self.num_layers = resolution_log2 * 2 - 3 * 2 - self.img_resolution = img_resolution - self.resolution_log2 = resolution_log2 - - # first stage - self.first_stage = FirstStage(img_channels, img_resolution=img_resolution, w_dim=w_dim, use_noise=False, demodulate=demodulate) - - # second stage - self.enc = Encoder(resolution_log2, img_channels, activation, patch_size=5, channels=16) - self.to_square = FullyConnectedLayer(in_features=w_dim, out_features=16*16, activation=activation) - self.to_style = ToStyle(in_channels=nf(4), out_channels=nf(2) * 2, activation=activation, drop_rate=drop_rate) - style_dim = w_dim + nf(2) * 2 - self.dec = Decoder(resolution_log2, activation, style_dim, use_noise, demodulate, img_channels) - - def forward(self, images_in, masks_in, ws, noise_mode='random', return_stg1=False): - out_stg1 = self.first_stage(images_in, masks_in, ws, noise_mode=noise_mode) - - # encoder - x = images_in * masks_in + out_stg1 * (1 - masks_in) - x = torch.cat([masks_in - 0.5, x, images_in * masks_in], dim=1) - E_features = self.enc(x) - - fea_16 = E_features[4] - mul_map = torch.ones_like(fea_16) * 0.5 - mul_map = F.dropout(mul_map, training=True) - add_n = self.to_square(ws[:, 0]).view(-1, 16, 16).unsqueeze(1) - add_n = F.interpolate(add_n, size=fea_16.size()[-2:], mode='bilinear', align_corners=False) - fea_16 = fea_16 * mul_map + add_n * (1 - mul_map) - E_features[4] = fea_16 - - # style - gs = self.to_style(fea_16) - - # decoder - img = self.dec(fea_16, ws, gs, E_features, noise_mode=noise_mode) - - # ensemble - img = img * (1 - masks_in) + images_in * masks_in - - if not return_stg1: - return img - else: - return img, out_stg1 - - -@persistence.persistent_class -class Generator(nn.Module): - def __init__(self, - z_dim, # Input latent (Z) dimensionality, 0 = no latent. - c_dim, # Conditioning label (C) dimensionality, 0 = no label. - w_dim, # Intermediate latent (W) dimensionality. - img_resolution, # resolution of generated image - img_channels, # Number of input color channels. - synthesis_kwargs = {}, # Arguments for SynthesisNetwork. - mapping_kwargs = {}, # Arguments for MappingNetwork. - ): - super().__init__() - self.z_dim = z_dim - self.c_dim = c_dim - self.w_dim = w_dim - self.img_resolution = img_resolution - self.img_channels = img_channels - - self.synthesis = SynthesisNet(w_dim=w_dim, - img_resolution=img_resolution, - img_channels=img_channels, - **synthesis_kwargs) - self.mapping = MappingNet(z_dim=z_dim, - c_dim=c_dim, - w_dim=w_dim, - num_ws=self.synthesis.num_layers, - **mapping_kwargs) - - def forward(self, images_in, masks_in, z, c, truncation_psi=1, truncation_cutoff=None, skip_w_avg_update=False, - noise_mode='random', return_stg1=False): - ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, - skip_w_avg_update=skip_w_avg_update) - - if not return_stg1: - img = self.synthesis(images_in, masks_in, ws, noise_mode=noise_mode) - return img - else: - img, out_stg1 = self.synthesis(images_in, masks_in, ws, noise_mode=noise_mode, return_stg1=True) - return img, out_stg1 - - -@persistence.persistent_class -class Discriminator(torch.nn.Module): - def __init__(self, - c_dim, # Conditioning label (C) dimensionality. - img_resolution, # Input resolution. - img_channels, # Number of input color channels. - channel_base = 32768, # Overall multiplier for the number of channels. - channel_max = 512, # Maximum number of channels in any layer. - channel_decay = 1, - cmap_dim = None, # Dimensionality of mapped conditioning label, None = default. - activation = 'lrelu', - mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, None = entire minibatch. - mbstd_num_channels = 1, # Number of features for the minibatch standard deviation layer, 0 = disable. - ): - super().__init__() - self.c_dim = c_dim - self.img_resolution = img_resolution - self.img_channels = img_channels - - resolution_log2 = int(np.log2(img_resolution)) - assert img_resolution == 2 ** resolution_log2 and img_resolution >= 4 - self.resolution_log2 = resolution_log2 - - if cmap_dim == None: - cmap_dim = nf(2) - if c_dim == 0: - cmap_dim = 0 - self.cmap_dim = cmap_dim - - if c_dim > 0: - self.mapping = MappingNet(z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None) - - Dis = [DisFromRGB(img_channels+1, nf(resolution_log2), activation)] - for res in range(resolution_log2, 2, -1): - Dis.append(DisBlock(nf(res), nf(res-1), activation)) - - if mbstd_num_channels > 0: - Dis.append(MinibatchStdLayer(group_size=mbstd_group_size, num_channels=mbstd_num_channels)) - Dis.append(Conv2dLayer(nf(2) + mbstd_num_channels, nf(2), kernel_size=3, activation=activation)) - self.Dis = nn.Sequential(*Dis) - - self.fc0 = FullyConnectedLayer(nf(2)*4**2, nf(2), activation=activation) - self.fc1 = FullyConnectedLayer(nf(2), 1 if cmap_dim == 0 else cmap_dim) - - # for 64x64 - Dis_stg1 = [DisFromRGB(img_channels+1, nf(resolution_log2) // 2, activation)] - for res in range(resolution_log2, 2, -1): - Dis_stg1.append(DisBlock(nf(res) // 2, nf(res - 1) // 2, activation)) - - if mbstd_num_channels > 0: - Dis_stg1.append(MinibatchStdLayer(group_size=mbstd_group_size, num_channels=mbstd_num_channels)) - Dis_stg1.append(Conv2dLayer(nf(2) // 2 + mbstd_num_channels, nf(2) // 2, kernel_size=3, activation=activation)) - self.Dis_stg1 = nn.Sequential(*Dis_stg1) - - self.fc0_stg1 = FullyConnectedLayer(nf(2) // 2 * 4 ** 2, nf(2) // 2, activation=activation) - self.fc1_stg1 = FullyConnectedLayer(nf(2) // 2, 1 if cmap_dim == 0 else cmap_dim) - - def forward(self, images_in, masks_in, images_stg1, c): - x = self.Dis(torch.cat([masks_in - 0.5, images_in], dim=1)) - x = self.fc1(self.fc0(x.flatten(start_dim=1))) - - x_stg1 = self.Dis_stg1(torch.cat([masks_in - 0.5, images_stg1], dim=1)) - x_stg1 = self.fc1_stg1(self.fc0_stg1(x_stg1.flatten(start_dim=1))) - - if self.c_dim > 0: - cmap = self.mapping(None, c) - - if self.cmap_dim > 0: - x = (x * cmap).sum(dim=1, keepdim=True) * (1 / np.sqrt(self.cmap_dim)) - x_stg1 = (x_stg1 * cmap).sum(dim=1, keepdim=True) * (1 / np.sqrt(self.cmap_dim)) - - return x, x_stg1 - - -if __name__ == '__main__': - device = torch.device('cuda:0') - batch = 1 - res = 512 - G = Generator(z_dim=512, c_dim=0, w_dim=512, img_resolution=512, img_channels=3).to(device) - D = Discriminator(c_dim=0, img_resolution=res, img_channels=3).to(device) - img = torch.randn(batch, 3, res, res).to(device) - mask = torch.randn(batch, 1, res, res).to(device) - z = torch.randn(batch, 512).to(device) - G.eval() - - # def count(block): - # return sum(p.numel() for p in block.parameters()) / 10 ** 6 - # print('Generator', count(G)) - # print('discriminator', count(D)) - - with torch.no_grad(): - img, img_stg1 = G(img, mask, z, None, return_stg1=True) - print('output of G:', img.shape, img_stg1.shape) - score, score_stg1 = D(img, mask, img_stg1, None) - print('output of D:', score.shape, score_stg1.shape) diff --git a/spaces/S0h9l/Coherent_Speech/app.py b/spaces/S0h9l/Coherent_Speech/app.py deleted file mode 100644 index d9acec4a125589fc8319e19c1b9f05d3e408cb15..0000000000000000000000000000000000000000 --- a/spaces/S0h9l/Coherent_Speech/app.py +++ /dev/null @@ -1,68 +0,0 @@ -import gradio as gr -import whisper -import cohere -from deep_translator import GoogleTranslator -from gtts import gTTS -import gtts.langs -#from dotenv import load_dotenv - -#load_dotenv() - -model = whisper.load_model("base") - -LANGUAGES = list(gtts.lang.tts_langs()) - -def transcribe(api,audio,language): - co = cohere.Client(api) - - #time.sleep(3) - # load audio and pad/trim it to fit 30 seconds - audio = whisper.load_audio(audio) - audio = whisper.pad_or_trim(audio) - - # make log-Mel spectrogram and move to the same device as the model - mel = whisper.log_mel_spectrogram(audio).to(model.device) - - # detect the spoken language - _, probs = model.detect_language(mel) - print(f"Detected language: {max(probs, key=probs.get)}") - - # decode the audio - options = whisper.DecodingOptions(fp16 = False) - result = whisper.decode(model, mel, options) - - #cohere - response = co.generate( - model='xlarge', - prompt=f'This program will generate an introductory paragraph to a blog post given a blog title, audience, and tone of voice.\n--\nBlog Title: Best Activities in Toronto\nAudience: Millennials\nTone of Voice: Lighthearted\nFirst Paragraph: Looking for fun things to do in Toronto? When it comes to exploring Canada\'s largest city, there\'s an ever-evolving set of activities to choose from. Whether you\'re looking to visit a local museum or sample the city\'s varied cuisine, there is plenty to fill any itinerary. In this blog post, I\'ll share some of my favorite recommendations\n--\nBlog Title: Mastering Dynamic Programming\nAudience: Developers\nTone: Informative\nFirst Paragraph: In this piece, we\'ll help you understand the fundamentals of dynamic programming, and when to apply this optimization technique. We\'ll break down bottom-up and top-down approaches to solve dynamic programming problems.\n--\nBlog Title: How to Get Started with Rock Climbing\nAudience: Athletes\nTone: Enthusiastic\nFirst Paragraph:If you\'re an athlete who\'s looking to learn how to rock climb, then you\'ve come to the right place! This blog post will give you all the information you need to know about how to get started in the sport. Rock climbing is a great way to stay active and challenge yourself in a new way. It\'s also a great way to make new friends and explore new places. So, what are you waiting for? Get out there and start climbing!\n--\nBlog Title: {result.text}\nAudience: Engineers\nTone: Enthusiastic\nFirst Paragraph:', - max_tokens=200, - temperature=0.8, - k=0, - p=1, - frequency_penalty=0, - presence_penalty=0, - stop_sequences=["--"], - return_likelihoods='NONE') - #result.text - reptxt = response.generations[0].text.strip("--") - - #Google models - translated = GoogleTranslator(source='auto', target=language).translate(reptxt) - filename = 'result.mp3' - tts = gTTS(text=translated, lang=language) - tts.save(filename) - return filename, translated - - - -gr.Interface( - title = 'Coherent Speech', - description = 'Enter the API key, then start recording give your input, stop recording, select language;language can also be selected after the output. Do not worry about error message in the output section', - fn=transcribe, - inputs=[ - gr.inputs.Textbox(lines=1, label="Enter your Cohere API Key"), - gr.inputs.Audio(source="microphone", type="filepath"), - gr.Radio(label="Language", choices=LANGUAGES, value="en") - ], - outputs=[gr.Audio(label="Output",type="filepath"),gr.outputs.Textbox(label="Generated Text")], - live=True).launch() \ No newline at end of file diff --git a/spaces/SAAZIZI/SummarizeAV/summarization_service/__init__.py b/spaces/SAAZIZI/SummarizeAV/summarization_service/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Sapphire-356/Video2MC/model/block/refine.py b/spaces/Sapphire-356/Video2MC/model/block/refine.py deleted file mode 100644 index 407ba5d63290f3bf2143ba7dc85e020267dd71b9..0000000000000000000000000000000000000000 --- a/spaces/Sapphire-356/Video2MC/model/block/refine.py +++ /dev/null @@ -1,36 +0,0 @@ -import torch -import torch.nn as nn -from torch.autograd import Variable - -fc_out = 256 -fc_unit = 1024 - -class refine(nn.Module): - def __init__(self, opt): - super().__init__() - - out_seqlen = 1 - fc_in = opt.out_channels*2*out_seqlen*opt.n_joints - fc_out = opt.in_channels * opt.n_joints - - self.post_refine = nn.Sequential( - nn.Linear(fc_in, fc_unit), - nn.ReLU(), - nn.Dropout(0.5,inplace=True), - nn.Linear(fc_unit, fc_out), - nn.Sigmoid() - ) - - def forward(self, x, x_1): - N, T, V,_ = x.size() - x_in = torch.cat((x, x_1), -1) - x_in = x_in.view(N, -1) - - score = self.post_refine(x_in).view(N,T,V,2) - score_cm = Variable(torch.ones(score.size()), requires_grad=False) - score - x_out = x.clone() - x_out[:, :, :, :2] = score * x[:, :, :, :2] + score_cm * x_1[:, :, :, :2] - - return x_out - - diff --git a/spaces/Shredder/CONBERT/fin_readability_sustainability.py b/spaces/Shredder/CONBERT/fin_readability_sustainability.py deleted file mode 100644 index 53ea0c60eab0dd27868f9bdc6d4652ea0ddc71b9..0000000000000000000000000000000000000000 --- a/spaces/Shredder/CONBERT/fin_readability_sustainability.py +++ /dev/null @@ -1,110 +0,0 @@ -import torch -import transformers -from torch.utils.data import Dataset, DataLoader -from transformers import RobertaModel, RobertaTokenizer, BertModel, BertTokenizer -import pandas as pd - -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - -MAX_LEN = 128 -BATCH_SIZE = 20 -text_col_name = 'sentence' - -def scoring_data_prep(dataset): - out = [] - target = [] - mask = [] - - for i in range(len(dataset)): - rec = dataset[i] - out.append(rec['ids'].reshape(-1,MAX_LEN)) - mask.append(rec['mask'].reshape(-1,MAX_LEN)) - - out_stack = torch.cat(out, dim = 0) - mask_stack = torch.cat(mask, dim =0 ) - out_stack = out_stack.to(device, dtype = torch.long) - mask_stack = mask_stack.to(device, dtype = torch.long) - - return out_stack, mask_stack - -class Triage(Dataset): - """ - This is a subclass of torch packages Dataset class. It processes input to create ids, masks and targets required for model training. - """ - - def __init__(self, dataframe, tokenizer, max_len, text_col_name): - self.len = len(dataframe) - self.data = dataframe - self.tokenizer = tokenizer - self.max_len = max_len - self.text_col_name = text_col_name - - - def __getitem__(self, index): - title = str(self.data[self.text_col_name][index]) - title = " ".join(title.split()) - inputs = self.tokenizer.encode_plus( - title, - None, - add_special_tokens=True, - max_length=self.max_len, - pad_to_max_length=True, #padding='max_length' #For future version use `padding='max_length'` - return_token_type_ids=True, - truncation=True, - ) - ids = inputs["input_ids"] - mask = inputs["attention_mask"] - - return { - "ids": torch.tensor(ids, dtype=torch.long), - "mask": torch.tensor(mask, dtype=torch.long), - - } - - def __len__(self): - return self.len - -class BERTClass(torch.nn.Module): - def __init__(self, num_class, task): - super(BERTClass, self).__init__() - self.num_class = num_class - if task =="sustanability": - self.l1 = RobertaModel.from_pretrained("roberta-base") - else: - self.l1 = BertModel.from_pretrained("ProsusAI/finbert") - self.pre_classifier = torch.nn.Linear(768, 768) - self.dropout = torch.nn.Dropout(0.3) - self.classifier = torch.nn.Linear(768, self.num_class) - self.history = dict() - - def forward(self, input_ids, attention_mask): - output_1 = self.l1(input_ids=input_ids, attention_mask=attention_mask) - hidden_state = output_1[0] - pooler = hidden_state[:, 0] - pooler = self.pre_classifier(pooler) - pooler = torch.nn.ReLU()(pooler) - pooler = self.dropout(pooler) - output = self.classifier(pooler) - return output - -def do_predict(model, tokenizer, test_df): - test_set = Triage(test_df, tokenizer, MAX_LEN, text_col_name) - test_params = {'batch_size' : BATCH_SIZE, 'shuffle': False, 'num_workers':0} - test_loader = DataLoader(test_set, **test_params) - out_stack, mask_stack = scoring_data_prep(dataset = test_set) - n = 0 - combined_output = [] - model.eval() - with torch.no_grad(): - while n < test_df.shape[0]: - output = model(out_stack[n:n+BATCH_SIZE,:],mask_stack[n:n+BATCH_SIZE,:]) - n = n + BATCH_SIZE - combined_output.append(output) - combined_output = torch.cat(combined_output, dim = 0) - preds = torch.argsort(combined_output, axis = 1, descending = True) - preds = preds.to('cpu') - actual_predictions = [i[0] for i in preds.tolist()] - combined_output = combined_output.to('cpu') - prob_predictions= [i[1] for i in combined_output.tolist()] - return (actual_predictions, prob_predictions) - \ No newline at end of file diff --git a/spaces/StatsByZach/app/games.py b/spaces/StatsByZach/app/games.py deleted file mode 100644 index cd04533a8f926b11b72b27d1a2698e0cd71b0c7c..0000000000000000000000000000000000000000 --- a/spaces/StatsByZach/app/games.py +++ /dev/null @@ -1,174 +0,0 @@ -##### games.,py ##### - -# Import modules -from shiny import * -import shinyswatch -import plotly.express as px -from shinywidgets import output_widget, render_widget -import pandas as pd -from configure import base_url -import math -import datetime - - -# Paths to data -gsaxt = "data/game_list.csv" -data = pd.read_csv(gsaxt) -data = data[['Home','Away','Game_Id','Date','Link']] -game_dates = ['All'] -game_dates_temp = data['Date'].value_counts().keys().tolist() -game_dates_temp=game_dates_temp[::-1] -dates = [datetime.datetime.strptime(ts, "%Y-%m-%d") for ts in game_dates_temp] -dates.sort() -sorteddates = [datetime.datetime.strftime(ts, "%Y-%m-%d") for ts in dates] -sorteddates = sorteddates[::-1] -game_dates.extend(sorteddates) -print(game_dates) -default=game_dates[1] -def server(input,output,session): - @output - @render.text - def text(): - t= 'Vi' - return t - - @output - @render.table - def table(): - df = pd.read_csv(gsaxt) - df = df[['Home','Away','Date','Link']] - if input.team() =="All": - df = df - else: - df = df[(df['Home']==input.team())|(df['Away']==input.team())] - if input.date() == "All": - df = df - else: - df = df[df['Date']==input.date()] - #return df.style.set_table_attributes('escape=False class="dataframe shiny-table table w-auto"').hide_index() - return df.style.set_table_attributes( - 'class="dataframe shiny-table table w-auto"' - ).set_properties(**{'border': '1.3px #222222'},).hide().set_table_styles( - [dict(selector="th", props=[("text-align", "right"),('font-size','25px')]), - dict(selector="tr", props=[('font-size','21px')]),] - ) - -games = App(ui.page_fluid( - ui.tags.base(href=base_url), - ui.tags.div( - {"style": "width:75%;margin: 0 auto"}, - ui.tags.style( - """ - h4 { - margin-top: 1em;font-size:35px; - } - h2{ - font-size:25px; - } - """ - ), - shinyswatch.theme.darkly(), - ui.tags.h4("Stats By Zach"), - ui.tags.i("A website for hockey analytics"), - ui.navset_tab( - ui.nav_control( - ui.a( - "Home", - href="home/" - ), - ), - ui.nav_menu( - "Skater Charts", - ui.nav_control( - ui.a( - "On-Ice xG Rates", - href="skater-xg-rates/" - ), - ui.a( - "On-Ice xGF%", - href="skater-xg-percentages/" - ), - ), - ), - ui.nav_menu( - "Goalie Charts", - ui.nav_control( - ui.a( - "GSAx Timeline", - href="gsax-timeline/" - ), - ui.a( - "GSAx Leaderboard", - href="gsax-leaderboard/" - ), - ui.a( - "GSAx Comparison", - href="gsax-comparison/" - ) - ), - ),ui.nav_menu( - "Team Charts", - ui.nav_control( - ui.a( - "Team xG Rates", - href="team-xg-rates/" - ), - ), - ),ui.nav_control( - ui.a( - "Games", - href="games/" - ), - ),ui.nav_control( - ui.a( - "About", - href="about/" - ), - )),ui.row( - ui.column(5,ui.tags.br(),ui.tags.h2("Games"),ui.input_select( - "team", - "Filter by Team:", - { - "All":"All", - "ANA": "Anaheim Ducks", - "ARI": "Arizona Coyotes", - "BOS": "Boston Bruins", - "BUF": "Buffalo Sabres", - "CGY": "Calgary Flames", - "CAR": "Carolina Hurricanes", - "CHI": "Chicago Blackhawks", - "COL": "Colorado Avalanche", - "CBJ": "Columbus Blue Jackets", - "DAL": "Dallas Stars", - "DET": "Detroit Red Wings", - "EDM": "Edmonton Oilers", - "FLA": "Florida Panthers", - "L.A": "Los Angeles Kings", - "MIN": "Minnesota Wild", - "MTL": "Montreal Canadiens", - "NSH": "Nashville Predators", - "N.J": "New Jersey Devils", - "NYI": "New York Islanders", - "NYR": "New York Rangers", - "OTT": "Ottawa Senators", - "PHI": "Philadelphia Flyers", - "PIT": "Pittsburgh Penguins", - "S.J": "San Jose Sharks", - "SEA":"Seattle Kraken", - "STL": "St. Louis Blues", - "T.B": "Tampa Bay Lightning", - "TOR": "Toronto Maple Leafs", - "VAN": "Vancouver Canucks", - "VGK": "Vegas Golden Knights", - "WSH": "Washington Capitals", - "WPG": "Winnipeg Jets" - }, - ), - ui.input_select( - "date", - "Filter by Date:", - game_dates, - selected=default - ),),ui.column(7,ui.tags.br(),ui.tags.br(),ui.tags.br(),ui.tags.br(),ui.tags.br(),ui.tags.br(),ui.tags.br(),ui.tags.br(),ui.tags.br(), - ui.output_table("table"), - )),)),server) \ No newline at end of file diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/utils/shimmodule.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/utils/shimmodule.py deleted file mode 100644 index 8af44caa98b2ef51a7e557f8a8930e37a27857de..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/utils/shimmodule.py +++ /dev/null @@ -1,89 +0,0 @@ -"""A shim module for deprecated imports -""" -# Copyright (c) IPython Development Team. -# Distributed under the terms of the Modified BSD License. - -import importlib.abc -import importlib.util -import sys -import types -from importlib import import_module - -from .importstring import import_item - - -class ShimWarning(Warning): - """A warning to show when a module has moved, and a shim is in its place.""" - - -class ShimImporter(importlib.abc.MetaPathFinder): - """Import hook for a shim. - - This ensures that submodule imports return the real target module, - not a clone that will confuse `is` and `isinstance` checks. - """ - def __init__(self, src, mirror): - self.src = src - self.mirror = mirror - - def _mirror_name(self, fullname): - """get the name of the mirrored module""" - - return self.mirror + fullname[len(self.src) :] - - def find_spec(self, fullname, path, target=None): - if fullname.startswith(self.src + "."): - mirror_name = self._mirror_name(fullname) - return importlib.util.find_spec(mirror_name) - - -class ShimModule(types.ModuleType): - - def __init__(self, *args, **kwargs): - self._mirror = kwargs.pop("mirror") - src = kwargs.pop("src", None) - if src: - kwargs['name'] = src.rsplit('.', 1)[-1] - super(ShimModule, self).__init__(*args, **kwargs) - # add import hook for descendent modules - if src: - sys.meta_path.append( - ShimImporter(src=src, mirror=self._mirror) - ) - - @property - def __path__(self): - return [] - - @property - def __spec__(self): - """Don't produce __spec__ until requested""" - return import_module(self._mirror).__spec__ - - def __dir__(self): - return dir(import_module(self._mirror)) - - @property - def __all__(self): - """Ensure __all__ is always defined""" - mod = import_module(self._mirror) - try: - return mod.__all__ - except AttributeError: - return [name for name in dir(mod) if not name.startswith('_')] - - def __getattr__(self, key): - # Use the equivalent of import_item(name), see below - name = "%s.%s" % (self._mirror, key) - try: - return import_item(name) - except ImportError as e: - raise AttributeError(key) from e - - def __repr__(self): - # repr on a module can be called during error handling; make sure - # it does not fail, even if the import fails - try: - return self.__getattr__("__repr__")() - except AttributeError: - return f"" diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/cc_sqlalchemy/ddl/custom.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/cc_sqlalchemy/ddl/custom.py deleted file mode 100644 index b7eee4ad7db99c447732e3f3ebf2e8c108fe93a8..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/cc_sqlalchemy/ddl/custom.py +++ /dev/null @@ -1,40 +0,0 @@ -from sqlalchemy.sql.ddl import DDL -from sqlalchemy.exc import ArgumentError - -from clickhouse_connect.driver.query import quote_identifier - - -# pylint: disable=too-many-ancestors,abstract-method -class CreateDatabase(DDL): - """ - SqlAlchemy DDL statement that is essentially an alternative to the built in CreateSchema DDL class - """ - # pylint: disable-msg=too-many-arguments - def __init__(self, name: str, engine: str = None, zoo_path: str = None, shard_name: str = '{shard}', - replica_name: str = '{replica}'): - """ - :param name: Database name - :param engine: Database ClickHouse engine type - :param zoo_path: ClickHouse zookeeper path for Replicated database engine - :param shard_name: Clickhouse shard name for Replicated database engine - :param replica_name: Replica name for Replicated database engine - """ - if engine and engine not in ('Ordinary', 'Atomic', 'Lazy', 'Replicated'): - raise ArgumentError(f'Unrecognized engine type {engine}') - stmt = f'CREATE DATABASE {quote_identifier(name)}' - if engine: - stmt += f' Engine {engine}' - if engine == 'Replicated': - if not zoo_path: - raise ArgumentError('zoo_path is required for Replicated Database Engine') - stmt += f" ('{zoo_path}', '{shard_name}', '{replica_name}'" - super().__init__(stmt) - - -# pylint: disable=too-many-ancestors,abstract-method -class DropDatabase(DDL): - """ - Alternative DDL statement for built in SqlAlchemy DropSchema DDL class - """ - def __init__(self, name: str): - super().__init__(f'DROP DATABASE {quote_identifier(name)}') diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/configs/_base_/models/fpn_uniformer.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/configs/_base_/models/fpn_uniformer.py deleted file mode 100644 index 8aae98c5991055bfcc08e82ccdc09f8b1d9f8a8d..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/uniformer/configs/_base_/models/fpn_uniformer.py +++ /dev/null @@ -1,35 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='EncoderDecoder', - backbone=dict( - type='UniFormer', - embed_dim=[64, 128, 320, 512], - layers=[3, 4, 8, 3], - head_dim=64, - mlp_ratio=4., - qkv_bias=True, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0.1), - neck=dict( - type='FPN', - in_channels=[64, 128, 320, 512], - out_channels=256, - num_outs=4), - decode_head=dict( - type='FPNHead', - in_channels=[256, 256, 256, 256], - in_index=[0, 1, 2, 3], - feature_strides=[4, 8, 16, 32], - channels=128, - dropout_ratio=0.1, - num_classes=150, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - # model training and testing settings - train_cfg=dict(), - test_cfg=dict(mode='whole') -) diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/fileio/io.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/fileio/io.py deleted file mode 100644 index aaefde58aa3ea5b58f86249ce7e1c40c186eb8dd..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/fileio/io.py +++ /dev/null @@ -1,151 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from io import BytesIO, StringIO -from pathlib import Path - -from ..utils import is_list_of, is_str -from .file_client import FileClient -from .handlers import BaseFileHandler, JsonHandler, PickleHandler, YamlHandler - -file_handlers = { - 'json': JsonHandler(), - 'yaml': YamlHandler(), - 'yml': YamlHandler(), - 'pickle': PickleHandler(), - 'pkl': PickleHandler() -} - - -def load(file, file_format=None, file_client_args=None, **kwargs): - """Load data from json/yaml/pickle files. - - This method provides a unified api for loading data from serialized files. - - Note: - In v1.3.16 and later, ``load`` supports loading data from serialized - files those can be storaged in different backends. - - Args: - file (str or :obj:`Path` or file-like object): Filename or a file-like - object. - file_format (str, optional): If not specified, the file format will be - inferred from the file extension, otherwise use the specified one. - Currently supported formats include "json", "yaml/yml" and - "pickle/pkl". - file_client_args (dict, optional): Arguments to instantiate a - FileClient. See :class:`mmcv.fileio.FileClient` for details. - Default: None. - - Examples: - >>> load('/path/of/your/file') # file is storaged in disk - >>> load('https://path/of/your/file') # file is storaged in Internet - >>> load('s3://path/of/your/file') # file is storaged in petrel - - Returns: - The content from the file. - """ - if isinstance(file, Path): - file = str(file) - if file_format is None and is_str(file): - file_format = file.split('.')[-1] - if file_format not in file_handlers: - raise TypeError(f'Unsupported format: {file_format}') - - handler = file_handlers[file_format] - if is_str(file): - file_client = FileClient.infer_client(file_client_args, file) - if handler.str_like: - with StringIO(file_client.get_text(file)) as f: - obj = handler.load_from_fileobj(f, **kwargs) - else: - with BytesIO(file_client.get(file)) as f: - obj = handler.load_from_fileobj(f, **kwargs) - elif hasattr(file, 'read'): - obj = handler.load_from_fileobj(file, **kwargs) - else: - raise TypeError('"file" must be a filepath str or a file-object') - return obj - - -def dump(obj, file=None, file_format=None, file_client_args=None, **kwargs): - """Dump data to json/yaml/pickle strings or files. - - This method provides a unified api for dumping data as strings or to files, - and also supports custom arguments for each file format. - - Note: - In v1.3.16 and later, ``dump`` supports dumping data as strings or to - files which is saved to different backends. - - Args: - obj (any): The python object to be dumped. - file (str or :obj:`Path` or file-like object, optional): If not - specified, then the object is dumped to a str, otherwise to a file - specified by the filename or file-like object. - file_format (str, optional): Same as :func:`load`. - file_client_args (dict, optional): Arguments to instantiate a - FileClient. See :class:`mmcv.fileio.FileClient` for details. - Default: None. - - Examples: - >>> dump('hello world', '/path/of/your/file') # disk - >>> dump('hello world', 's3://path/of/your/file') # ceph or petrel - - Returns: - bool: True for success, False otherwise. - """ - if isinstance(file, Path): - file = str(file) - if file_format is None: - if is_str(file): - file_format = file.split('.')[-1] - elif file is None: - raise ValueError( - 'file_format must be specified since file is None') - if file_format not in file_handlers: - raise TypeError(f'Unsupported format: {file_format}') - - handler = file_handlers[file_format] - if file is None: - return handler.dump_to_str(obj, **kwargs) - elif is_str(file): - file_client = FileClient.infer_client(file_client_args, file) - if handler.str_like: - with StringIO() as f: - handler.dump_to_fileobj(obj, f, **kwargs) - file_client.put_text(f.getvalue(), file) - else: - with BytesIO() as f: - handler.dump_to_fileobj(obj, f, **kwargs) - file_client.put(f.getvalue(), file) - elif hasattr(file, 'write'): - handler.dump_to_fileobj(obj, file, **kwargs) - else: - raise TypeError('"file" must be a filename str or a file-object') - - -def _register_handler(handler, file_formats): - """Register a handler for some file extensions. - - Args: - handler (:obj:`BaseFileHandler`): Handler to be registered. - file_formats (str or list[str]): File formats to be handled by this - handler. - """ - if not isinstance(handler, BaseFileHandler): - raise TypeError( - f'handler must be a child of BaseFileHandler, not {type(handler)}') - if isinstance(file_formats, str): - file_formats = [file_formats] - if not is_list_of(file_formats, str): - raise TypeError('file_formats must be a str or a list of str') - for ext in file_formats: - file_handlers[ext] = handler - - -def register_handler(file_formats, **kwargs): - - def wrap(cls): - _register_handler(cls(**kwargs), file_formats) - return cls - - return wrap diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/docs/tutorials/extend.md b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/docs/tutorials/extend.md deleted file mode 100644 index a6af550fdb2aa79c818cef54b009f2fe816d46a9..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/docs/tutorials/extend.md +++ /dev/null @@ -1,141 +0,0 @@ -# Extend Detectron2's Defaults - -__Research is about doing things in new ways__. -This brings a tension in how to create abstractions in code, -which is a challenge for any research engineering project of a significant size: - -1. On one hand, it needs to have very thin abstractions to allow for the possibility of doing - everything in new ways. It should be reasonably easy to break existing - abstractions and replace them with new ones. - -2. On the other hand, such a project also needs reasonably high-level - abstractions, so that users can easily do things in standard ways, - without worrying too much about the details that only certain researchers care about. - -In detectron2, there are two types of interfaces that address this tension together: - -1. Functions and classes that take a config (`cfg`) argument - created from a yaml file - (sometimes with few extra arguments). - - Such functions and classes implement - the "standard default" behavior: it will read what it needs from a given - config and do the "standard" thing. - Users only need to load an expert-made config and pass it around, without having to worry about - which arguments are used and what they all mean. - - See [Yacs Configs](configs.md) for a detailed tutorial. - -2. Functions and classes that have well-defined explicit arguments. - - Each of these is a small building block of the entire system. - They require users' expertise to understand what each argument should be, - and require more effort to stitch together to a larger system. - But they can be stitched together in more flexible ways. - - When you need to implement something not supported by the "standard defaults" - included in detectron2, these well-defined components can be reused. - - The [LazyConfig system](lazyconfigs.md) relies on such functions and classes. - -3. A few functions and classes are implemented with the - [@configurable](../modules/config.html#detectron2.config.configurable) - decorator - they can be called with either a config, or with explicit arguments, or a mixture of both. - Their explicit argument interfaces are currently experimental. - - As an example, a Mask R-CNN model can be built in the following ways: - - 1. Config-only: - ```python - # load proper yaml config file, then - model = build_model(cfg) - ``` - - 2. Mixture of config and additional argument overrides: - ```python - model = GeneralizedRCNN( - cfg, - roi_heads=StandardROIHeads(cfg, batch_size_per_image=666), - pixel_std=[57.0, 57.0, 57.0]) - ``` - - 3. Full explicit arguments: -
        - - (click to expand) - - - ```python - model = GeneralizedRCNN( - backbone=FPN( - ResNet( - BasicStem(3, 64, norm="FrozenBN"), - ResNet.make_default_stages(50, stride_in_1x1=True, norm="FrozenBN"), - out_features=["res2", "res3", "res4", "res5"], - ).freeze(2), - ["res2", "res3", "res4", "res5"], - 256, - top_block=LastLevelMaxPool(), - ), - proposal_generator=RPN( - in_features=["p2", "p3", "p4", "p5", "p6"], - head=StandardRPNHead(in_channels=256, num_anchors=3), - anchor_generator=DefaultAnchorGenerator( - sizes=[[32], [64], [128], [256], [512]], - aspect_ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64], - offset=0.0, - ), - anchor_matcher=Matcher([0.3, 0.7], [0, -1, 1], allow_low_quality_matches=True), - box2box_transform=Box2BoxTransform([1.0, 1.0, 1.0, 1.0]), - batch_size_per_image=256, - positive_fraction=0.5, - pre_nms_topk=(2000, 1000), - post_nms_topk=(1000, 1000), - nms_thresh=0.7, - ), - roi_heads=StandardROIHeads( - num_classes=80, - batch_size_per_image=512, - positive_fraction=0.25, - proposal_matcher=Matcher([0.5], [0, 1], allow_low_quality_matches=False), - box_in_features=["p2", "p3", "p4", "p5"], - box_pooler=ROIPooler(7, (1.0 / 4, 1.0 / 8, 1.0 / 16, 1.0 / 32), 0, "ROIAlignV2"), - box_head=FastRCNNConvFCHead( - ShapeSpec(channels=256, height=7, width=7), conv_dims=[], fc_dims=[1024, 1024] - ), - box_predictor=FastRCNNOutputLayers( - ShapeSpec(channels=1024), - test_score_thresh=0.05, - box2box_transform=Box2BoxTransform((10, 10, 5, 5)), - num_classes=80, - ), - mask_in_features=["p2", "p3", "p4", "p5"], - mask_pooler=ROIPooler(14, (1.0 / 4, 1.0 / 8, 1.0 / 16, 1.0 / 32), 0, "ROIAlignV2"), - mask_head=MaskRCNNConvUpsampleHead( - ShapeSpec(channels=256, width=14, height=14), - num_classes=80, - conv_dims=[256, 256, 256, 256, 256], - ), - ), - pixel_mean=[103.530, 116.280, 123.675], - pixel_std=[1.0, 1.0, 1.0], - input_format="BGR", - ) - ``` - -
        - - -If you only need the standard behavior, the [Beginner's Tutorial](./getting_started.md) -should suffice. If you need to extend detectron2 to your own needs, -see the following tutorials for more details: - -* Detectron2 includes a few standard datasets. To use custom ones, see - [Use Custom Datasets](./datasets.md). -* Detectron2 contains the standard logic that creates a data loader for training/testing from a - dataset, but you can write your own as well. See [Use Custom Data Loaders](./data_loading.md). -* Detectron2 implements many standard detection models, and provide ways for you - to overwrite their behaviors. See [Use Models](./models.md) and [Write Models](./write-models.md). -* Detectron2 provides a default training loop that is good for common training tasks. - You can customize it with hooks, or write your own loop instead. See [training](./training.md). diff --git a/spaces/Testys/diabetes-app/model.py b/spaces/Testys/diabetes-app/model.py deleted file mode 100644 index d41dc36072b60c1852bab49e2e592e523ac0e730..0000000000000000000000000000000000000000 --- a/spaces/Testys/diabetes-app/model.py +++ /dev/null @@ -1,60 +0,0 @@ -# importing python libraries -import pandas as pd -import pickle as pkl -from lightgbm.sklearn import LGBMClassifier -from sklearn.model_selection import StratifiedShuffleSplit -from sklearn.preprocessing import RobustScaler, OrdinalEncoder -from sklearn.metrics import f1_score - -import warnings -warnings.filterwarnings("ignore") - -# loading diabetes data into variable data -data = pd.read_csv("./dataset/diabetes.csv") - -# wrangling dataset. -data.chol_hdl_ratio = round(data.cholesterol / data.hdl_chol, 2) -data.waist_hip_ratio = round(data.waist / data.hip, 2) - -# correcting comma separated number to decimal separated number. -data.bmi = pd.to_numeric(data.bmi.str.replace(",", ".")) - -print(data.head()) -# encoding columns with object values using Ordinal Encoding -s = (data.dtypes == "object") -obj_col = s[s].index - -print("Ordinal Encoding") -orde = OrdinalEncoder() -data[obj_col] = orde.fit_transform(data[obj_col]) - -print("Splitting features and target.") -# dropping off target and unnecessary columns (diabetes and patient number columns) -X = data.drop(["patient_number", "diabetes"], axis=1) -y = data.diabetes - -print("Robust Scaling on X, y.") -# scaling data using RobustScaler -scale = RobustScaler() -scaled_X = scale.fit_transform(X, y) - -print("Stratified Split.") -# StratifiedShuffleSplit on Data -split = StratifiedShuffleSplit(n_splits=4, random_state=42) - -for train_index, test_index in split.split(scaled_X, y): - X_train, X_test = scaled_X[train_index], scaled_X[test_index] - y_train, y_test = y[train_index], y[test_index] - -# Loading LightGBM classifier to be used for training model -lgbm = LGBMClassifier(n_estimators=200, max_depth=-2, random_state=42) -lgbm.fit(X_train, y_train) -pred = lgbm.predict(X_test) - -f1 = f1_score(pred, y_test) -print(f"F1 Score for LightGBM: {f1}.") - -# Using pickle to save model -lightgbm = open("./lightgbm.pickle", "wb") -pkl.dump(lgbm, lightgbm) -lightgbm.close() diff --git a/spaces/Vision-CAIR/minigpt4/minigpt4/__init__.py b/spaces/Vision-CAIR/minigpt4/minigpt4/__init__.py deleted file mode 100644 index ec06cef0e2e4e39e450746b0f3136776f6bcf143..0000000000000000000000000000000000000000 --- a/spaces/Vision-CAIR/minigpt4/minigpt4/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -""" - Copyright (c) 2022, salesforce.com, inc. - All rights reserved. - SPDX-License-Identifier: BSD-3-Clause - For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause -""" - -import os -import sys - -from omegaconf import OmegaConf - -from minigpt4.common.registry import registry - -from minigpt4.datasets.builders import * -from minigpt4.models import * -from minigpt4.processors import * -from minigpt4.tasks import * - - -root_dir = os.path.dirname(os.path.abspath(__file__)) -default_cfg = OmegaConf.load(os.path.join(root_dir, "configs/default.yaml")) - -registry.register_path("library_root", root_dir) -repo_root = os.path.join(root_dir, "..") -registry.register_path("repo_root", repo_root) -cache_root = os.path.join(repo_root, default_cfg.env.cache_root) -registry.register_path("cache_root", cache_root) - -registry.register("MAX_INT", sys.maxsize) -registry.register("SPLIT_NAMES", ["train", "val", "test"]) diff --git a/spaces/Wootang01/chatbot_three/app.py b/spaces/Wootang01/chatbot_three/app.py deleted file mode 100644 index 3a728850f4d53121b27c94716f14bcf472672285..0000000000000000000000000000000000000000 --- a/spaces/Wootang01/chatbot_three/app.py +++ /dev/null @@ -1,14 +0,0 @@ -import gradio as gr - -from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration - -tokenizer = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-1B-distill') -model = BlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-1B-distill') - -def func (message): - inputs = tokenizer(message, return_tensors='pt') - result = model.generate(**inputs) - return tokenizer.decode(result[0]) - -app = gr.Interface(fn=func, inputs = 'textbox', outputs = 'textbox', title='Chatbot Three') -app.launch() \ No newline at end of file diff --git a/spaces/Xhaheen/stable-diffusion-21/README.md b/spaces/Xhaheen/stable-diffusion-21/README.md deleted file mode 100644 index eb51073361e38a23d6f9e75c6b9acca08b45fb92..0000000000000000000000000000000000000000 --- a/spaces/Xhaheen/stable-diffusion-21/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Stable Diffusion 2 -emoji: 💩 -colorFrom: yellow -colorTo: pink -sdk: gradio -sdk_version: 3.11.0 -app_file: app.py -pinned: false -duplicated_from: anzorq/stable-diffusion-2 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/XzJosh/Lumi-Bert-VITS2/text/cleaner.py b/spaces/XzJosh/Lumi-Bert-VITS2/text/cleaner.py deleted file mode 100644 index 64bd5f7296f66c94f3a335666c53706bb5fe5b39..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/Lumi-Bert-VITS2/text/cleaner.py +++ /dev/null @@ -1,27 +0,0 @@ -from text import chinese, cleaned_text_to_sequence - - -language_module_map = { - 'ZH': chinese -} - - -def clean_text(text, language): - language_module = language_module_map[language] - norm_text = language_module.text_normalize(text) - phones, tones, word2ph = language_module.g2p(norm_text) - return norm_text, phones, tones, word2ph - -def clean_text_bert(text, language): - language_module = language_module_map[language] - norm_text = language_module.text_normalize(text) - phones, tones, word2ph = language_module.g2p(norm_text) - bert = language_module.get_bert_feature(norm_text, word2ph) - return phones, tones, bert - -def text_to_sequence(text, language): - norm_text, phones, tones, word2ph = clean_text(text, language) - return cleaned_text_to_sequence(phones, tones, language) - -if __name__ == '__main__': - pass diff --git a/spaces/XzJosh/nine2-Bert-VITS2/text/cleaner.py b/spaces/XzJosh/nine2-Bert-VITS2/text/cleaner.py deleted file mode 100644 index 64bd5f7296f66c94f3a335666c53706bb5fe5b39..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/nine2-Bert-VITS2/text/cleaner.py +++ /dev/null @@ -1,27 +0,0 @@ -from text import chinese, cleaned_text_to_sequence - - -language_module_map = { - 'ZH': chinese -} - - -def clean_text(text, language): - language_module = language_module_map[language] - norm_text = language_module.text_normalize(text) - phones, tones, word2ph = language_module.g2p(norm_text) - return norm_text, phones, tones, word2ph - -def clean_text_bert(text, language): - language_module = language_module_map[language] - norm_text = language_module.text_normalize(text) - phones, tones, word2ph = language_module.g2p(norm_text) - bert = language_module.get_bert_feature(norm_text, word2ph) - return phones, tones, bert - -def text_to_sequence(text, language): - norm_text, phones, tones, word2ph = clean_text(text, language) - return cleaned_text_to_sequence(phones, tones, language) - -if __name__ == '__main__': - pass diff --git a/spaces/Yash911/IMAGEALCHEMY-TEXT-TO-VISUALS/app.py b/spaces/Yash911/IMAGEALCHEMY-TEXT-TO-VISUALS/app.py deleted file mode 100644 index ba16188d14a7ce8728e1d2ce4322a2cfa2d3afcd..0000000000000000000000000000000000000000 --- a/spaces/Yash911/IMAGEALCHEMY-TEXT-TO-VISUALS/app.py +++ /dev/null @@ -1,212 +0,0 @@ -import streamlit as st -import requests -import cloudinary -import cloudinary.uploader -from PIL import Image -import io -from google_auth_oauthlib.flow import InstalledAppFlow -from googleapiclient.discovery import build -import os - -# Configure Cloudinary with your credentials -cloudinary.config( - cloud_name="dvuowbmrz", - api_key="177664162661619", - api_secret="qVMYel17N_C5QUUUuBIuatB5tq0" -) -# -# # Set up OAuth2 client details -# CLIENT_SECRET_FILE = 'client_secret.json' -# SCOPES = ['https://www.googleapis.com/auth/drive.metadata.readonly'] # Adjust scopes as needed -# -# # Set up Streamlit app -# #st.title("Google Authentication Demo") -# -# # Check if the user is authenticated -# if 'credentials' not in st.session_state: -# #st.write("WELCOME") -# flow = InstalledAppFlow.from_client_secrets_file(CLIENT_SECRET_FILE, SCOPES) -# credentials = flow.run_local_server(port=8501, authorization_prompt_message='') -# -# # Save credentials to a file for future use (optional) -# with open('token.json', 'w') as token_file: -# token_file.write(credentials.to_json()) -# -# st.session_state.credentials = credentials -# st.success("Authentication successful. You can now use the app.") -# -# # Use authenticated credentials to interact with Google API -# credentials = st.session_state.credentials -# service = build('drive', 'v3', credentials=credentials) -# -# # Fetch user's name from Google API -# try: -# user_info = service.about().get(fields="user").execute() -# user_name = user_info["user"]["displayName"] -# #st.header("Google Profile Information") -# st.markdown(f"

        Userame: {user_name.upper()}

        ", unsafe_allow_html=True) -# except Exception as e: -# st.error(f"Error fetching user profile: {str(e)}") -# -# # Your app's functionality goes here -# # # Display Google Drive contents -# # st.header("Google Drive Contents") -# # results = service.files().list(pageSize=10).execute() -# # files = results.get('files', []) -# # if not files: -# # st.write('No files found in Google Drive.') -# # else: -# # st.write('Files in Google Drive:') -# # for file in files: -# # st.write(f"- {file['name']} ({file['mimeType']})") -# -# # Logout button -# if st.button("Logout"): -# del st.session_state.credentials -# os.remove("token_dir/token.json") # Remove the token file -# - - -# Set up Hugging Face API endpoint -API_URL = "https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4" -headers = {"Authorization": "Bearer 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"} - - -def query_model_with_image(image_description): - payload = { - "inputs": image_description - } - response = requests.post(API_URL, headers=headers, json=payload) - image_bytes = response.content - - image = Image.open(io.BytesIO(image_bytes)) - return image - -def upload_to_cloudinary(image, prompt_text): - image_data = io.BytesIO() - image.save(image_data, format="JPEG") - image_data.seek(0) - - upload_result = cloudinary.uploader.upload( - image_data, - folder="compvis_app", - public_id=prompt_text - ) - return upload_result["secure_url"] - - -def fetch_latest_images_from_cloudinary(num_images=9): - # Use the Cloudinary Admin API to list resources - url = f"https://api.cloudinary.com/v1_1/{cloudinary.config().cloud_name}/resources/image" - params = { - "max_results": num_images, - "type": "upload" - } - response = requests.get(url, params=params, auth=(cloudinary.config().api_key, cloudinary.config().api_secret)) - - if response.status_code == 200: - images = response.json()["resources"] - else: - images = [] - - return images - -# Streamlit app -st.markdown("""""", unsafe_allow_html=True) - -st.title("Text to Image Generator") - -image_description = st.text_input("Enter the image description") - -if st.button("Generate Image"): - processed_image = query_model_with_image(image_description) - st.image(processed_image, use_column_width=True, output_format="JPEG") # Use use_column_width=True - st.session_state.processed_image = processed_image - st.session_state.image_description = image_description - st.write("Image generated.") - -if st.button("Upload"): - if 'processed_image' in st.session_state: - uploaded_url = upload_to_cloudinary(st.session_state.processed_image, st.session_state.image_description) - st.write("Image uploaded to Cloudinary. Prompt Text:", st.session_state.image_description) - st.write("Image URL on Cloudinary:", uploaded_url) - else: - st.write("Generate an image first before uploading.") - -# Fetch and display the latest images from Cloudinary -st.header("Latest Images created") - -# Use the 'fetch_latest_images_from_cloudinary' function to get the latest images -latest_images = fetch_latest_images_from_cloudinary() - -# Define the number of columns in the grid -num_columns = 3 # You can adjust this number as needed - -# Calculate the width for each column -column_width = f"calc(33.33% - {10}px)" # Adjust the width and margin as needed - -# Add CSS styling for the grid and rounded images -st.markdown( - f""" - - """, - unsafe_allow_html=True, -) - -# Create the responsive grid layout -st.markdown('
        ', unsafe_allow_html=True) - -for i, image in enumerate(latest_images): - image_url = image.get('secure_url', '') # Get the image URL - public_id = image.get('public_id', '') # Get the full public_id - - # Extract just the filename (without the folder) - filename = public_id.split('/')[-1] - - # Add some spacing around the image and its name - st.markdown(f'
        ', unsafe_allow_html=True) - st.markdown(f'

        {filename}

        ', unsafe_allow_html=True) - - # Add rounded corners to the image using HTML - st.markdown(f'', unsafe_allow_html=True) - - # Add an arrow icon instead of "Download" button with black color - download_link = f'' - st.markdown(download_link, unsafe_allow_html=True) - - st.write("") # Add empty spaces for separation - st.markdown('
        ', unsafe_allow_html=True) - -# Close the responsive grid layout -st.markdown('
        ', unsafe_allow_html=True) \ No newline at end of file diff --git a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/pipelines/score_sde_ve/__init__.py b/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/pipelines/score_sde_ve/__init__.py deleted file mode 100644 index 000d61f6e9b183728cb6fc137e7180cac3a616df..0000000000000000000000000000000000000000 --- a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/pipelines/score_sde_ve/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# flake8: noqa -from .pipeline_score_sde_ve import ScoreSdeVePipeline diff --git a/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.py b/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.py deleted file mode 100644 index 40cf18131810307157a9a7d1f6d5922b00fd73d5..0000000000000000000000000000000000000000 --- a/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.py +++ /dev/null @@ -1,8 +0,0 @@ -from ..common.optim import SGD as optimizer -from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier -from ..common.data.coco_panoptic_separated import dataloader -from ..common.models.panoptic_fpn import model -from ..common.train import train - -model.backbone.bottom_up.freeze_at = 2 -train.init_checkpoint = "detectron2://ImageNetPretrained/MSRA/R-50.pkl" diff --git a/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/detectron2/data/dataset_mapper.py b/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/detectron2/data/dataset_mapper.py deleted file mode 100644 index a8714f7990f11e146a01e03d108518e0356b50c4..0000000000000000000000000000000000000000 --- a/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/detectron2/data/dataset_mapper.py +++ /dev/null @@ -1,191 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import copy -import logging -import numpy as np -from typing import List, Optional, Union -import torch - -from detectron2.config import configurable - -from . import detection_utils as utils -from . import transforms as T - -""" -This file contains the default mapping that's applied to "dataset dicts". -""" - -__all__ = ["DatasetMapper"] - - -class DatasetMapper: - """ - A callable which takes a dataset dict in Detectron2 Dataset format, - and map it into a format used by the model. - - This is the default callable to be used to map your dataset dict into training data. - You may need to follow it to implement your own one for customized logic, - such as a different way to read or transform images. - See :doc:`/tutorials/data_loading` for details. - - The callable currently does the following: - - 1. Read the image from "file_name" - 2. Applies cropping/geometric transforms to the image and annotations - 3. Prepare data and annotations to Tensor and :class:`Instances` - """ - - @configurable - def __init__( - self, - is_train: bool, - *, - augmentations: List[Union[T.Augmentation, T.Transform]], - image_format: str, - use_instance_mask: bool = False, - use_keypoint: bool = False, - instance_mask_format: str = "polygon", - keypoint_hflip_indices: Optional[np.ndarray] = None, - precomputed_proposal_topk: Optional[int] = None, - recompute_boxes: bool = False, - ): - """ - NOTE: this interface is experimental. - - Args: - is_train: whether it's used in training or inference - augmentations: a list of augmentations or deterministic transforms to apply - image_format: an image format supported by :func:`detection_utils.read_image`. - use_instance_mask: whether to process instance segmentation annotations, if available - use_keypoint: whether to process keypoint annotations if available - instance_mask_format: one of "polygon" or "bitmask". Process instance segmentation - masks into this format. - keypoint_hflip_indices: see :func:`detection_utils.create_keypoint_hflip_indices` - precomputed_proposal_topk: if given, will load pre-computed - proposals from dataset_dict and keep the top k proposals for each image. - recompute_boxes: whether to overwrite bounding box annotations - by computing tight bounding boxes from instance mask annotations. - """ - if recompute_boxes: - assert use_instance_mask, "recompute_boxes requires instance masks" - # fmt: off - self.is_train = is_train - self.augmentations = T.AugmentationList(augmentations) - self.image_format = image_format - self.use_instance_mask = use_instance_mask - self.instance_mask_format = instance_mask_format - self.use_keypoint = use_keypoint - self.keypoint_hflip_indices = keypoint_hflip_indices - self.proposal_topk = precomputed_proposal_topk - self.recompute_boxes = recompute_boxes - # fmt: on - logger = logging.getLogger(__name__) - mode = "training" if is_train else "inference" - logger.info(f"[DatasetMapper] Augmentations used in {mode}: {augmentations}") - - @classmethod - def from_config(cls, cfg, is_train: bool = True): - augs = utils.build_augmentation(cfg, is_train) - if cfg.INPUT.CROP.ENABLED and is_train: - augs.insert(0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE)) - recompute_boxes = cfg.MODEL.MASK_ON - else: - recompute_boxes = False - - ret = { - "is_train": is_train, - "augmentations": augs, - "image_format": cfg.INPUT.FORMAT, - "use_instance_mask": cfg.MODEL.MASK_ON, - "instance_mask_format": cfg.INPUT.MASK_FORMAT, - "use_keypoint": cfg.MODEL.KEYPOINT_ON, - "recompute_boxes": recompute_boxes, - } - - if cfg.MODEL.KEYPOINT_ON: - ret["keypoint_hflip_indices"] = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN) - - if cfg.MODEL.LOAD_PROPOSALS: - ret["precomputed_proposal_topk"] = ( - cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN - if is_train - else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST - ) - return ret - - def _transform_annotations(self, dataset_dict, transforms, image_shape): - # USER: Modify this if you want to keep them for some reason. - for anno in dataset_dict["annotations"]: - if not self.use_instance_mask: - anno.pop("segmentation", None) - if not self.use_keypoint: - anno.pop("keypoints", None) - - # USER: Implement additional transformations if you have other types of data - annos = [ - utils.transform_instance_annotations( - obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices - ) - for obj in dataset_dict.pop("annotations") - if obj.get("iscrowd", 0) == 0 - ] - instances = utils.annotations_to_instances( - annos, image_shape, mask_format=self.instance_mask_format - ) - - # After transforms such as cropping are applied, the bounding box may no longer - # tightly bound the object. As an example, imagine a triangle object - # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight - # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to - # the intersection of original bounding box and the cropping box. - if self.recompute_boxes: - instances.gt_boxes = instances.gt_masks.get_bounding_boxes() - dataset_dict["instances"] = utils.filter_empty_instances(instances) - - def __call__(self, dataset_dict): - """ - Args: - dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format. - - Returns: - dict: a format that builtin models in detectron2 accept - """ - dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below - # USER: Write your own image loading if it's not from a file - image = utils.read_image(dataset_dict["file_name"], format=self.image_format) - utils.check_image_size(dataset_dict, image) - - # USER: Remove if you don't do semantic/panoptic segmentation. - if "sem_seg_file_name" in dataset_dict: - sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name"), "L").squeeze(2) - else: - sem_seg_gt = None - - aug_input = T.AugInput(image, sem_seg=sem_seg_gt) - transforms = self.augmentations(aug_input) - image, sem_seg_gt = aug_input.image, aug_input.sem_seg - - image_shape = image.shape[:2] # h, w - # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory, - # but not efficient on large generic data structures due to the use of pickle & mp.Queue. - # Therefore it's important to use torch.Tensor. - dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))) - if sem_seg_gt is not None: - dataset_dict["sem_seg"] = torch.as_tensor(sem_seg_gt.astype("long")) - - # USER: Remove if you don't use pre-computed proposals. - # Most users would not need this feature. - if self.proposal_topk is not None: - utils.transform_proposals( - dataset_dict, image_shape, transforms, proposal_topk=self.proposal_topk - ) - - if not self.is_train: - # USER: Modify this if you want to keep them for some reason. - dataset_dict.pop("annotations", None) - dataset_dict.pop("sem_seg_file_name", None) - return dataset_dict - - if "annotations" in dataset_dict: - self._transform_annotations(dataset_dict, transforms, image_shape) - - return dataset_dict diff --git a/spaces/Yuankai/ChatReviewer/get_paper_from_pdf.py b/spaces/Yuankai/ChatReviewer/get_paper_from_pdf.py deleted file mode 100644 index 7bae3b4b7c64e691208c221c869d6a06c3023652..0000000000000000000000000000000000000000 --- a/spaces/Yuankai/ChatReviewer/get_paper_from_pdf.py +++ /dev/null @@ -1,193 +0,0 @@ -import fitz, io, os -from PIL import Image -from collections import Counter -import json -import re - -class Paper: - def __init__(self, path, title='', url='', abs='', authors=[]): - # 初始化函数,根据pdf路径初始化Paper对象 - self.url = url # 文章链接 - self.path = path # pdf路径 - self.section_names = [] # 段落标题 - self.section_texts = {} # 段落内容 - self.abs = abs - self.title_page = 0 - if title == '': - self.pdf = fitz.open(self.path) # pdf文档 - self.title = self.get_title() - self.parse_pdf() - else: - self.title = title - self.authors = authors - self.roman_num = ["I", "II", 'III', "IV", "V", "VI", "VII", "VIII", "IIX", "IX", "X"] - self.digit_num = [str(d + 1) for d in range(10)] - self.first_image = '' - - def parse_pdf(self): - self.pdf = fitz.open(self.path) # pdf文档 - self.text_list = [page.get_text() for page in self.pdf] - self.all_text = ' '.join(self.text_list) - self.extract_section_infomation() - self.section_texts.update({"title": self.title}) - self.pdf.close() - - # 定义一个函数,根据字体的大小,识别每个章节名称,并返回一个列表 - def get_chapter_names(self, ): - # # 打开一个pdf文件 - doc = fitz.open(self.path) # pdf文档 - text_list = [page.get_text() for page in doc] - all_text = '' - for text in text_list: - all_text += text - # # 创建一个空列表,用于存储章节名称 - chapter_names = [] - for line in all_text.split('\n'): - line_list = line.split(' ') - if '.' in line: - point_split_list = line.split('.') - space_split_list = line.split(' ') - if 1 < len(space_split_list) < 5: - if 1 < len(point_split_list) < 5 and ( - point_split_list[0] in self.roman_num or point_split_list[0] in self.digit_num): - # print("line:", line) - chapter_names.append(line) - - return chapter_names - - def get_title(self): - doc = self.pdf # 打开pdf文件 - max_font_size = 0 # 初始化最大字体大小为0 - max_string = "" # 初始化最大字体大小对应的字符串为空 - max_font_sizes = [0] - for page_index, page in enumerate(doc): # 遍历每一页 - text = page.get_text("dict") # 获取页面上的文本信息 - blocks = text["blocks"] # 获取文本块列表 - for block in blocks: # 遍历每个文本块 - if block["type"] == 0 and len(block['lines']): # 如果是文字类型 - if len(block["lines"][0]["spans"]): - font_size = block["lines"][0]["spans"][0]["size"] # 获取第一行第一段文字的字体大小 - max_font_sizes.append(font_size) - if font_size > max_font_size: # 如果字体大小大于当前最大值 - max_font_size = font_size # 更新最大值 - max_string = block["lines"][0]["spans"][0]["text"] # 更新最大值对应的字符串 - max_font_sizes.sort() - # print("max_font_sizes", max_font_sizes[-10:]) - cur_title = '' - for page_index, page in enumerate(doc): # 遍历每一页 - text = page.get_text("dict") # 获取页面上的文本信息 - blocks = text["blocks"] # 获取文本块列表 - for block in blocks: # 遍历每个文本块 - if block["type"] == 0 and len(block['lines']): # 如果是文字类型 - if len(block["lines"][0]["spans"]): - cur_string = block["lines"][0]["spans"][0]["text"] # 更新最大值对应的字符串 - font_flags = block["lines"][0]["spans"][0]["flags"] # 获取第一行第一段文字的字体特征 - font_size = block["lines"][0]["spans"][0]["size"] # 获取第一行第一段文字的字体大小 - # print(font_size) - if abs(font_size - max_font_sizes[-1]) < 0.3 or abs(font_size - max_font_sizes[-2]) < 0.3: - # print("The string is bold.", max_string, "font_size:", font_size, "font_flags:", font_flags) - if len(cur_string) > 4 and "arXiv" not in cur_string: - # print("The string is bold.", max_string, "font_size:", font_size, "font_flags:", font_flags) - if cur_title == '': - cur_title += cur_string - else: - cur_title += ' ' + cur_string - self.title_page = page_index - # break - title = cur_title.replace('\n', ' ') - return title - - def extract_section_infomation(self): - doc = fitz.open(self.path) - - # 获取文档中所有字体大小 - font_sizes = [] - for page in doc: - blocks = page.get_text("dict")["blocks"] - for block in blocks: - if 'lines' not in block: - continue - lines = block["lines"] - for line in lines: - for span in line["spans"]: - font_sizes.append(span["size"]) - most_common_size, _ = Counter(font_sizes).most_common(1)[0] - - # 按照最频繁的字体大小确定标题字体大小的阈值 - threshold = most_common_size * 1 - - section_dict = {} - last_heading = None - subheadings = [] - heading_font = -1 - # 遍历每一页并查找子标题 - found_abstract = False - upper_heading = False - font_heading = False - for page in doc: - blocks = page.get_text("dict")["blocks"] - for block in blocks: - if not found_abstract: - try: - text = json.dumps(block) - except: - continue - if re.search(r"\bAbstract\b", text, re.IGNORECASE): - found_abstract = True - last_heading = "Abstract" - section_dict["Abstract"] = "" - if found_abstract: - if 'lines' not in block: - continue - lines = block["lines"] - for line in lines: - for span in line["spans"]: - # 如果当前文本是子标题 - if not font_heading and span["text"].isupper() and sum(1 for c in span["text"] if c.isupper() and ('A' <= c <='Z')) > 4: # 针对一些标题大小一样,但是全大写的论文 - upper_heading = True - heading = span["text"].strip() - if "References" in heading: # reference 以后的内容不考虑 - self.section_names = subheadings - self.section_texts = section_dict - return - subheadings.append(heading) - if last_heading is not None: - section_dict[last_heading] = section_dict[last_heading].strip() - section_dict[heading] = "" - last_heading = heading - if not upper_heading and span["size"] > threshold and re.match( # 正常情况下,通过字体大小判断 - r"[A-Z][a-z]+(?:\s[A-Z][a-z]+)*", - span["text"].strip()): - font_heading = True - if heading_font == -1: - heading_font = span["size"] - elif heading_font != span["size"]: - continue - heading = span["text"].strip() - if "References" in heading: # reference 以后的内容不考虑 - self.section_names = subheadings - self.section_texts = section_dict - return - subheadings.append(heading) - if last_heading is not None: - section_dict[last_heading] = section_dict[last_heading].strip() - section_dict[heading] = "" - last_heading = heading - # 否则将当前文本添加到上一个子标题的文本中 - elif last_heading is not None: - section_dict[last_heading] += " " + span["text"].strip() - self.section_names = subheadings - self.section_texts = section_dict - - -def main(): - path = r'demo.pdf' - paper = Paper(path=path) - paper.parse_pdf() - # for key, value in paper.section_text_dict.items(): - # print(key, value) - # print("*"*40) - - -if __name__ == '__main__': - main() diff --git a/spaces/Yuelili/RealNagrse/realesrgan/train.py b/spaces/Yuelili/RealNagrse/realesrgan/train.py deleted file mode 100644 index 8a9cec9ed80d9f362984779548dcec921a636a04..0000000000000000000000000000000000000000 --- a/spaces/Yuelili/RealNagrse/realesrgan/train.py +++ /dev/null @@ -1,11 +0,0 @@ -# flake8: noqa -import os.path as osp -from basicsr.train import train_pipeline - -import realesrgan.archs -import realesrgan.data -import realesrgan.models - -if __name__ == '__main__': - root_path = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir)) - train_pipeline(root_path) diff --git a/spaces/Yukki-Yui/moe-tts/export_model.py b/spaces/Yukki-Yui/moe-tts/export_model.py deleted file mode 100644 index 98a49835df5a7a2486e76ddf94fbbb4444b52203..0000000000000000000000000000000000000000 --- a/spaces/Yukki-Yui/moe-tts/export_model.py +++ /dev/null @@ -1,13 +0,0 @@ -import torch - -if __name__ == '__main__': - model_path = "saved_model/11/model.pth" - output_path = "saved_model/11/model1.pth" - checkpoint_dict = torch.load(model_path, map_location='cpu') - checkpoint_dict_new = {} - for k, v in checkpoint_dict.items(): - if k == "optimizer": - print("remove optimizer") - continue - checkpoint_dict_new[k] = v - torch.save(checkpoint_dict_new, output_path) diff --git a/spaces/Zaid/whisper-large-v2-ar/README.md b/spaces/Zaid/whisper-large-v2-ar/README.md deleted file mode 100644 index 69c9369e306b8bba2cf6e99fdba85897526cf4bb..0000000000000000000000000000000000000000 --- a/spaces/Zaid/whisper-large-v2-ar/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Whisper large Arabic -emoji: 🐠 -colorFrom: blue -colorTo: gray -sdk: gradio -sdk_version: 3.9.1 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: arbml/whisper-small-ar-1000 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/abdvl/datahub_qa_bot/docs/managed-datahub/release-notes/v_0_1_72.md b/spaces/abdvl/datahub_qa_bot/docs/managed-datahub/release-notes/v_0_1_72.md deleted file mode 100644 index 1881308b69f355cd645e594b8db7ab1d20367324..0000000000000000000000000000000000000000 --- a/spaces/abdvl/datahub_qa_bot/docs/managed-datahub/release-notes/v_0_1_72.md +++ /dev/null @@ -1,17 +0,0 @@ -# v0.1.72 ---- - -Release Availability Date ---- -18 Jan 2023 - - -## Release Changlog ---- -- Since `v0.1.70` these changes from OSS DataHub https://github.com/datahub-project/datahub/compare/43c566ee4ff2ee950a4f845c2fd8a1c690c1d607...afaee58ded40dc4cf39f94f1b4331ceb0a4d93eb have been pulled in -- add GZip compression to lineage cache -- Make browse paths upgrade non-blocking - -## Special Notes ---- -- If anyone faces issues with login please clear your cookies. Some security updates are part of this release. That may cause login issues until cookies are cleared. \ No newline at end of file diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/runner/fp16_utils.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/runner/fp16_utils.py deleted file mode 100644 index 1981011d6859192e3e663e29d13500d56ba47f6c..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/runner/fp16_utils.py +++ /dev/null @@ -1,410 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import functools -import warnings -from collections import abc -from inspect import getfullargspec - -import numpy as np -import torch -import torch.nn as nn - -from annotator.uniformer.mmcv.utils import TORCH_VERSION, digit_version -from .dist_utils import allreduce_grads as _allreduce_grads - -try: - # If PyTorch version >= 1.6.0, torch.cuda.amp.autocast would be imported - # and used; otherwise, auto fp16 will adopt mmcv's implementation. - # Note that when PyTorch >= 1.6.0, we still cast tensor types to fp16 - # manually, so the behavior may not be consistent with real amp. - from torch.cuda.amp import autocast -except ImportError: - pass - - -def cast_tensor_type(inputs, src_type, dst_type): - """Recursively convert Tensor in inputs from src_type to dst_type. - - Args: - inputs: Inputs that to be casted. - src_type (torch.dtype): Source type.. - dst_type (torch.dtype): Destination type. - - Returns: - The same type with inputs, but all contained Tensors have been cast. - """ - if isinstance(inputs, nn.Module): - return inputs - elif isinstance(inputs, torch.Tensor): - return inputs.to(dst_type) - elif isinstance(inputs, str): - return inputs - elif isinstance(inputs, np.ndarray): - return inputs - elif isinstance(inputs, abc.Mapping): - return type(inputs)({ - k: cast_tensor_type(v, src_type, dst_type) - for k, v in inputs.items() - }) - elif isinstance(inputs, abc.Iterable): - return type(inputs)( - cast_tensor_type(item, src_type, dst_type) for item in inputs) - else: - return inputs - - -def auto_fp16(apply_to=None, out_fp32=False): - """Decorator to enable fp16 training automatically. - - This decorator is useful when you write custom modules and want to support - mixed precision training. If inputs arguments are fp32 tensors, they will - be converted to fp16 automatically. Arguments other than fp32 tensors are - ignored. If you are using PyTorch >= 1.6, torch.cuda.amp is used as the - backend, otherwise, original mmcv implementation will be adopted. - - Args: - apply_to (Iterable, optional): The argument names to be converted. - `None` indicates all arguments. - out_fp32 (bool): Whether to convert the output back to fp32. - - Example: - - >>> import torch.nn as nn - >>> class MyModule1(nn.Module): - >>> - >>> # Convert x and y to fp16 - >>> @auto_fp16() - >>> def forward(self, x, y): - >>> pass - - >>> import torch.nn as nn - >>> class MyModule2(nn.Module): - >>> - >>> # convert pred to fp16 - >>> @auto_fp16(apply_to=('pred', )) - >>> def do_something(self, pred, others): - >>> pass - """ - - def auto_fp16_wrapper(old_func): - - @functools.wraps(old_func) - def new_func(*args, **kwargs): - # check if the module has set the attribute `fp16_enabled`, if not, - # just fallback to the original method. - if not isinstance(args[0], torch.nn.Module): - raise TypeError('@auto_fp16 can only be used to decorate the ' - 'method of nn.Module') - if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled): - return old_func(*args, **kwargs) - - # get the arg spec of the decorated method - args_info = getfullargspec(old_func) - # get the argument names to be casted - args_to_cast = args_info.args if apply_to is None else apply_to - # convert the args that need to be processed - new_args = [] - # NOTE: default args are not taken into consideration - if args: - arg_names = args_info.args[:len(args)] - for i, arg_name in enumerate(arg_names): - if arg_name in args_to_cast: - new_args.append( - cast_tensor_type(args[i], torch.float, torch.half)) - else: - new_args.append(args[i]) - # convert the kwargs that need to be processed - new_kwargs = {} - if kwargs: - for arg_name, arg_value in kwargs.items(): - if arg_name in args_to_cast: - new_kwargs[arg_name] = cast_tensor_type( - arg_value, torch.float, torch.half) - else: - new_kwargs[arg_name] = arg_value - # apply converted arguments to the decorated method - if (TORCH_VERSION != 'parrots' and - digit_version(TORCH_VERSION) >= digit_version('1.6.0')): - with autocast(enabled=True): - output = old_func(*new_args, **new_kwargs) - else: - output = old_func(*new_args, **new_kwargs) - # cast the results back to fp32 if necessary - if out_fp32: - output = cast_tensor_type(output, torch.half, torch.float) - return output - - return new_func - - return auto_fp16_wrapper - - -def force_fp32(apply_to=None, out_fp16=False): - """Decorator to convert input arguments to fp32 in force. - - This decorator is useful when you write custom modules and want to support - mixed precision training. If there are some inputs that must be processed - in fp32 mode, then this decorator can handle it. If inputs arguments are - fp16 tensors, they will be converted to fp32 automatically. Arguments other - than fp16 tensors are ignored. If you are using PyTorch >= 1.6, - torch.cuda.amp is used as the backend, otherwise, original mmcv - implementation will be adopted. - - Args: - apply_to (Iterable, optional): The argument names to be converted. - `None` indicates all arguments. - out_fp16 (bool): Whether to convert the output back to fp16. - - Example: - - >>> import torch.nn as nn - >>> class MyModule1(nn.Module): - >>> - >>> # Convert x and y to fp32 - >>> @force_fp32() - >>> def loss(self, x, y): - >>> pass - - >>> import torch.nn as nn - >>> class MyModule2(nn.Module): - >>> - >>> # convert pred to fp32 - >>> @force_fp32(apply_to=('pred', )) - >>> def post_process(self, pred, others): - >>> pass - """ - - def force_fp32_wrapper(old_func): - - @functools.wraps(old_func) - def new_func(*args, **kwargs): - # check if the module has set the attribute `fp16_enabled`, if not, - # just fallback to the original method. - if not isinstance(args[0], torch.nn.Module): - raise TypeError('@force_fp32 can only be used to decorate the ' - 'method of nn.Module') - if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled): - return old_func(*args, **kwargs) - # get the arg spec of the decorated method - args_info = getfullargspec(old_func) - # get the argument names to be casted - args_to_cast = args_info.args if apply_to is None else apply_to - # convert the args that need to be processed - new_args = [] - if args: - arg_names = args_info.args[:len(args)] - for i, arg_name in enumerate(arg_names): - if arg_name in args_to_cast: - new_args.append( - cast_tensor_type(args[i], torch.half, torch.float)) - else: - new_args.append(args[i]) - # convert the kwargs that need to be processed - new_kwargs = dict() - if kwargs: - for arg_name, arg_value in kwargs.items(): - if arg_name in args_to_cast: - new_kwargs[arg_name] = cast_tensor_type( - arg_value, torch.half, torch.float) - else: - new_kwargs[arg_name] = arg_value - # apply converted arguments to the decorated method - if (TORCH_VERSION != 'parrots' and - digit_version(TORCH_VERSION) >= digit_version('1.6.0')): - with autocast(enabled=False): - output = old_func(*new_args, **new_kwargs) - else: - output = old_func(*new_args, **new_kwargs) - # cast the results back to fp32 if necessary - if out_fp16: - output = cast_tensor_type(output, torch.float, torch.half) - return output - - return new_func - - return force_fp32_wrapper - - -def allreduce_grads(params, coalesce=True, bucket_size_mb=-1): - warnings.warning( - '"mmcv.runner.fp16_utils.allreduce_grads" is deprecated, and will be ' - 'removed in v2.8. Please switch to "mmcv.runner.allreduce_grads') - _allreduce_grads(params, coalesce=coalesce, bucket_size_mb=bucket_size_mb) - - -def wrap_fp16_model(model): - """Wrap the FP32 model to FP16. - - If you are using PyTorch >= 1.6, torch.cuda.amp is used as the - backend, otherwise, original mmcv implementation will be adopted. - - For PyTorch >= 1.6, this function will - 1. Set fp16 flag inside the model to True. - - Otherwise: - 1. Convert FP32 model to FP16. - 2. Remain some necessary layers to be FP32, e.g., normalization layers. - 3. Set `fp16_enabled` flag inside the model to True. - - Args: - model (nn.Module): Model in FP32. - """ - if (TORCH_VERSION == 'parrots' - or digit_version(TORCH_VERSION) < digit_version('1.6.0')): - # convert model to fp16 - model.half() - # patch the normalization layers to make it work in fp32 mode - patch_norm_fp32(model) - # set `fp16_enabled` flag - for m in model.modules(): - if hasattr(m, 'fp16_enabled'): - m.fp16_enabled = True - - -def patch_norm_fp32(module): - """Recursively convert normalization layers from FP16 to FP32. - - Args: - module (nn.Module): The modules to be converted in FP16. - - Returns: - nn.Module: The converted module, the normalization layers have been - converted to FP32. - """ - if isinstance(module, (nn.modules.batchnorm._BatchNorm, nn.GroupNorm)): - module.float() - if isinstance(module, nn.GroupNorm) or torch.__version__ < '1.3': - module.forward = patch_forward_method(module.forward, torch.half, - torch.float) - for child in module.children(): - patch_norm_fp32(child) - return module - - -def patch_forward_method(func, src_type, dst_type, convert_output=True): - """Patch the forward method of a module. - - Args: - func (callable): The original forward method. - src_type (torch.dtype): Type of input arguments to be converted from. - dst_type (torch.dtype): Type of input arguments to be converted to. - convert_output (bool): Whether to convert the output back to src_type. - - Returns: - callable: The patched forward method. - """ - - def new_forward(*args, **kwargs): - output = func(*cast_tensor_type(args, src_type, dst_type), - **cast_tensor_type(kwargs, src_type, dst_type)) - if convert_output: - output = cast_tensor_type(output, dst_type, src_type) - return output - - return new_forward - - -class LossScaler: - """Class that manages loss scaling in mixed precision training which - supports both dynamic or static mode. - - The implementation refers to - https://github.com/NVIDIA/apex/blob/master/apex/fp16_utils/loss_scaler.py. - Indirectly, by supplying ``mode='dynamic'`` for dynamic loss scaling. - It's important to understand how :class:`LossScaler` operates. - Loss scaling is designed to combat the problem of underflowing - gradients encountered at long times when training fp16 networks. - Dynamic loss scaling begins by attempting a very high loss - scale. Ironically, this may result in OVERflowing gradients. - If overflowing gradients are encountered, :class:`FP16_Optimizer` then - skips the update step for this particular iteration/minibatch, - and :class:`LossScaler` adjusts the loss scale to a lower value. - If a certain number of iterations occur without overflowing gradients - detected,:class:`LossScaler` increases the loss scale once more. - In this way :class:`LossScaler` attempts to "ride the edge" of always - using the highest loss scale possible without incurring overflow. - - Args: - init_scale (float): Initial loss scale value, default: 2**32. - scale_factor (float): Factor used when adjusting the loss scale. - Default: 2. - mode (str): Loss scaling mode. 'dynamic' or 'static' - scale_window (int): Number of consecutive iterations without an - overflow to wait before increasing the loss scale. Default: 1000. - """ - - def __init__(self, - init_scale=2**32, - mode='dynamic', - scale_factor=2., - scale_window=1000): - self.cur_scale = init_scale - self.cur_iter = 0 - assert mode in ('dynamic', - 'static'), 'mode can only be dynamic or static' - self.mode = mode - self.last_overflow_iter = -1 - self.scale_factor = scale_factor - self.scale_window = scale_window - - def has_overflow(self, params): - """Check if params contain overflow.""" - if self.mode != 'dynamic': - return False - for p in params: - if p.grad is not None and LossScaler._has_inf_or_nan(p.grad.data): - return True - return False - - def _has_inf_or_nan(x): - """Check if params contain NaN.""" - try: - cpu_sum = float(x.float().sum()) - except RuntimeError as instance: - if 'value cannot be converted' not in instance.args[0]: - raise - return True - else: - if cpu_sum == float('inf') or cpu_sum == -float('inf') \ - or cpu_sum != cpu_sum: - return True - return False - - def update_scale(self, overflow): - """update the current loss scale value when overflow happens.""" - if self.mode != 'dynamic': - return - if overflow: - self.cur_scale = max(self.cur_scale / self.scale_factor, 1) - self.last_overflow_iter = self.cur_iter - else: - if (self.cur_iter - self.last_overflow_iter) % \ - self.scale_window == 0: - self.cur_scale *= self.scale_factor - self.cur_iter += 1 - - def state_dict(self): - """Returns the state of the scaler as a :class:`dict`.""" - return dict( - cur_scale=self.cur_scale, - cur_iter=self.cur_iter, - mode=self.mode, - last_overflow_iter=self.last_overflow_iter, - scale_factor=self.scale_factor, - scale_window=self.scale_window) - - def load_state_dict(self, state_dict): - """Loads the loss_scaler state dict. - - Args: - state_dict (dict): scaler state. - """ - self.cur_scale = state_dict['cur_scale'] - self.cur_iter = state_dict['cur_iter'] - self.mode = state_dict['mode'] - self.last_overflow_iter = state_dict['last_overflow_iter'] - self.scale_factor = state_dict['scale_factor'] - self.scale_window = state_dict['scale_window'] - - @property - def loss_scale(self): - return self.cur_scale diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/detectors/cornernet.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/detectors/cornernet.py deleted file mode 100644 index bb8ccc1465ab66d1615ca16701a533a22b156295..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/detectors/cornernet.py +++ /dev/null @@ -1,95 +0,0 @@ -import torch - -from mmdet.core import bbox2result, bbox_mapping_back -from ..builder import DETECTORS -from .single_stage import SingleStageDetector - - -@DETECTORS.register_module() -class CornerNet(SingleStageDetector): - """CornerNet. - - This detector is the implementation of the paper `CornerNet: Detecting - Objects as Paired Keypoints `_ . - """ - - def __init__(self, - backbone, - neck, - bbox_head, - train_cfg=None, - test_cfg=None, - pretrained=None): - super(CornerNet, self).__init__(backbone, neck, bbox_head, train_cfg, - test_cfg, pretrained) - - def merge_aug_results(self, aug_results, img_metas): - """Merge augmented detection bboxes and score. - - Args: - aug_results (list[list[Tensor]]): Det_bboxes and det_labels of each - image. - img_metas (list[list[dict]]): Meta information of each image, e.g., - image size, scaling factor, etc. - - Returns: - tuple: (bboxes, labels) - """ - recovered_bboxes, aug_labels = [], [] - for bboxes_labels, img_info in zip(aug_results, img_metas): - img_shape = img_info[0]['img_shape'] # using shape before padding - scale_factor = img_info[0]['scale_factor'] - flip = img_info[0]['flip'] - bboxes, labels = bboxes_labels - bboxes, scores = bboxes[:, :4], bboxes[:, -1:] - bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip) - recovered_bboxes.append(torch.cat([bboxes, scores], dim=-1)) - aug_labels.append(labels) - - bboxes = torch.cat(recovered_bboxes, dim=0) - labels = torch.cat(aug_labels) - - if bboxes.shape[0] > 0: - out_bboxes, out_labels = self.bbox_head._bboxes_nms( - bboxes, labels, self.bbox_head.test_cfg) - else: - out_bboxes, out_labels = bboxes, labels - - return out_bboxes, out_labels - - def aug_test(self, imgs, img_metas, rescale=False): - """Augment testing of CornerNet. - - Args: - imgs (list[Tensor]): Augmented images. - img_metas (list[list[dict]]): Meta information of each image, e.g., - image size, scaling factor, etc. - rescale (bool): If True, return boxes in original image space. - Default: False. - - Note: - ``imgs`` must including flipped image pairs. - - Returns: - list[list[np.ndarray]]: BBox results of each image and classes. - The outer list corresponds to each image. The inner list - corresponds to each class. - """ - img_inds = list(range(len(imgs))) - - assert img_metas[0][0]['flip'] + img_metas[1][0]['flip'], ( - 'aug test must have flipped image pair') - aug_results = [] - for ind, flip_ind in zip(img_inds[0::2], img_inds[1::2]): - img_pair = torch.cat([imgs[ind], imgs[flip_ind]]) - x = self.extract_feat(img_pair) - outs = self.bbox_head(x) - bbox_list = self.bbox_head.get_bboxes( - *outs, [img_metas[ind], img_metas[flip_ind]], False, False) - aug_results.append(bbox_list[0]) - aug_results.append(bbox_list[1]) - - bboxes, labels = self.merge_aug_results(aug_results, img_metas) - bbox_results = bbox2result(bboxes, labels, self.bbox_head.num_classes) - - return [bbox_results] diff --git a/spaces/abidlabs/keras-image-classifier/app.py b/spaces/abidlabs/keras-image-classifier/app.py deleted file mode 100644 index 704062af215e339b654b0eca4b874155768f863e..0000000000000000000000000000000000000000 --- a/spaces/abidlabs/keras-image-classifier/app.py +++ /dev/null @@ -1,28 +0,0 @@ -import requests -import tensorflow as tf - -inception_net = tf.keras.applications.MobileNetV2() - -import requests - -# Download human-readable labels for ImageNet. -response = requests.get("https://git.io/JJkYN") -labels = response.text.split("\n") - - -def classify_image(inp): - inp = inp.reshape((-1, 224, 224, 3)) - inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp) - prediction = inception_net.predict(inp).flatten() - confidences = {labels[i]: float(prediction[i]) for i in range(1000)} - return confidences - - -import gradio as gr - -gr.Interface(fn=classify_image, - inputs=gr.inputs.Image(shape=(224, 224)), - outputs=gr.outputs.Label(num_top_classes=3), - examples=["banana.jpg", "car.jpg"], - theme="default", - css=".footer{display:none !important}").launch() diff --git a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/app/base.py b/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/app/base.py deleted file mode 100644 index be2c641580d80f0868452ad4a127456b4deccf30..0000000000000000000000000000000000000000 --- a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/app/base.py +++ /dev/null @@ -1,314 +0,0 @@ -import sys -import queue -import threading - -from pyglet import app -from pyglet import clock -from pyglet import event - -_is_pyglet_doc_run = hasattr(sys, "is_pyglet_doc_run") and sys.is_pyglet_doc_run - - -class PlatformEventLoop: - """ Abstract class, implementation depends on platform. - - .. versionadded:: 1.2 - """ - def __init__(self): - self._event_queue = queue.Queue() - self._is_running = threading.Event() - - def is_running(self): - """Return True if the event loop is currently processing, or False - if it is blocked or not activated. - - :rtype: bool - """ - return self._is_running.is_set() - - def post_event(self, dispatcher, event, *args): - """Post an event into the main application thread. - - The event is queued internally until the :py:meth:`run` method's thread - is able to dispatch the event. This method can be safely called - from any thread. - - If the method is called from the :py:meth:`run` method's thread (for - example, from within an event handler), the event may be dispatched - within the same runloop iteration or the next one; the choice is - nondeterministic. - - :Parameters: - `dispatcher` : EventDispatcher - Dispatcher to process the event. - `event` : str - Event name. - `args` : sequence - Arguments to pass to the event handlers. - - """ - self._event_queue.put((dispatcher, event, args)) - self.notify() - - def dispatch_posted_events(self): - """Immediately dispatch all pending events. - - Normally this is called automatically by the runloop iteration. - """ - while True: - try: - dispatcher, evnt, args = self._event_queue.get(False) - dispatcher.dispatch_event(evnt, *args) - except queue.Empty: - break - except ReferenceError: - # weakly-referenced object no longer exists - pass - - def notify(self): - """Notify the event loop that something needs processing. - - If the event loop is blocked, it will unblock and perform an iteration - immediately. If the event loop is running, another iteration is - scheduled for immediate execution afterwards. - """ - raise NotImplementedError('abstract') - - def start(self): - pass - - def step(self, timeout=None): - raise NotImplementedError('abstract') - - def set_timer(self, func, interval): - pass - - def stop(self): - pass - - -class EventLoop(event.EventDispatcher): - """The main run loop of the application. - - Calling `run` begins the application event loop, which processes - operating system events, calls :py:func:`pyglet.clock.tick` to call - scheduled functions and calls :py:meth:`pyglet.window.Window.on_draw` and - :py:meth:`pyglet.window.Window.flip` to update window contents. - - Applications can subclass :py:class:`EventLoop` and override certain methods - to integrate another framework's run loop, or to customise processing - in some other way. You should not in general override :py:meth:`run`, as - this method contains platform-specific code that ensures the application - remains responsive to the user while keeping CPU usage to a minimum. - """ - - _has_exit_condition = None - _has_exit = False - - def __init__(self): - self._has_exit_condition = threading.Condition() - self.clock = clock.get_default() - self.is_running = False - - @staticmethod - def _redraw_windows(dt): - # Redraw all windows - for window in app.windows: - window.switch_to() - window.dispatch_event('on_draw') - window.dispatch_event('on_refresh', dt) - window.flip() - - def run(self, interval=1/60): - """Begin processing events, scheduled functions and window updates. - - This method returns when :py:attr:`has_exit` is set to True. - - Developers are discouraged from overriding this method, as the - implementation is platform-specific. - """ - if not interval: - self.clock.schedule(self._redraw_windows) - else: - self.clock.schedule_interval(self._redraw_windows, interval) - - self.has_exit = False - - from pyglet.window import Window - Window._enable_event_queue = False - - # Dispatch pending events - for window in app.windows: - window.switch_to() - window.dispatch_pending_events() - - platform_event_loop = app.platform_event_loop - platform_event_loop.start() - self.dispatch_event('on_enter') - self.is_running = True - - while not self.has_exit: - timeout = self.idle() - platform_event_loop.step(timeout) - - self.is_running = False - self.dispatch_event('on_exit') - platform_event_loop.stop() - - def enter_blocking(self): - """Called by pyglet internal processes when the operating system - is about to block due to a user interaction. For example, this - is common when the user begins resizing or moving a window. - - This method provides the event loop with an opportunity to set up - an OS timer on the platform event loop, which will continue to - be invoked during the blocking operation. - - The default implementation ensures that :py:meth:`idle` continues to be - called as documented. - - .. versionadded:: 1.2 - """ - timeout = self.idle() - app.platform_event_loop.set_timer(self._blocking_timer, timeout) - - @staticmethod - def exit_blocking(): - """Called by pyglet internal processes when the blocking operation - completes. See :py:meth:`enter_blocking`. - """ - app.platform_event_loop.set_timer(None, None) - - def _blocking_timer(self): - timeout = self.idle() - app.platform_event_loop.set_timer(self._blocking_timer, timeout) - - def idle(self): - """Called during each iteration of the event loop. - - The method is called immediately after any window events (i.e., after - any user input). The method can return a duration after which - the idle method will be called again. The method may be called - earlier if the user creates more input events. The method - can return `None` to only wait for user events. - - For example, return ``1.0`` to have the idle method called every - second, or immediately after any user events. - - The default implementation dispatches the - :py:meth:`pyglet.window.Window.on_draw` event for all windows and uses - :py:func:`pyglet.clock.tick` and :py:func:`pyglet.clock.get_sleep_time` - on the default clock to determine the return value. - - This method should be overridden by advanced users only. To have - code execute at regular intervals, use the - :py:func:`pyglet.clock.schedule` methods. - - :rtype: float - :return: The number of seconds before the idle method should - be called again, or `None` to block for user input. - """ - dt = self.clock.update_time() - self.clock.call_scheduled_functions(dt) - - # Update timout - return self.clock.get_sleep_time(True) - - @property - def has_exit(self): - """Flag indicating if the event loop will exit in - the next iteration. When set, all waiting threads are interrupted (see - :py:meth:`sleep`). - - Thread-safe since pyglet 1.2. - - :see: `exit` - :type: bool - """ - self._has_exit_condition.acquire() - result = self._has_exit - self._has_exit_condition.release() - return result - - @has_exit.setter - def has_exit(self, value): - self._has_exit_condition.acquire() - self._has_exit = value - self._has_exit_condition.notify() - self._has_exit_condition.release() - - def exit(self): - """Safely exit the event loop at the end of the current iteration. - - This method is a thread-safe equivalent for setting - :py:attr:`has_exit` to ``True``. All waiting threads will be - interrupted (see :py:meth:`sleep`). - """ - self.has_exit = True - app.platform_event_loop.notify() - - def sleep(self, timeout): - """Wait for some amount of time, or until the :py:attr:`has_exit` flag - is set or :py:meth:`exit` is called. - - This method is thread-safe. - - :Parameters: - `timeout` : float - Time to wait, in seconds. - - .. versionadded:: 1.2 - - :rtype: bool - :return: ``True`` if the `has_exit` flag is set, otherwise ``False``. - """ - self._has_exit_condition.acquire() - self._has_exit_condition.wait(timeout) - result = self._has_exit - self._has_exit_condition.release() - return result - - def on_window_close(self, window): - """Default window close handler.""" - if len(app.windows) == 0: - self.exit() - - if _is_pyglet_doc_run: - def on_window_close(self, window): - """A window was closed. - - This event is dispatched when a window is closed. It is not - dispatched if the window's close button was pressed but the - window did not close. - - The default handler calls :py:meth:`exit` if no more windows are - open. You can override this handler to base your application exit - on some other policy. - - :event: - """ - - def on_enter(self): - """The event loop is about to begin. - - This is dispatched when the event loop is prepared to enter - the main run loop, and represents the last chance for an - application to initialise itself. - - :event: - """ - - def on_exit(self): - """The event loop is about to exit. - - After dispatching this event, the :py:meth:`run` method returns (the - application may not actually exit if you have more code - following the :py:meth:`run` invocation). - - :event: - """ - - -EventLoop.register_event_type('on_window_close') -EventLoop.register_event_type('on_enter') -EventLoop.register_event_type('on_exit') diff --git a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/graphics/allocation.py b/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/graphics/allocation.py deleted file mode 100644 index 39b8965ae33abb99bbdb100409da041d72608556..0000000000000000000000000000000000000000 --- a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/graphics/allocation.py +++ /dev/null @@ -1,365 +0,0 @@ -"""Memory allocation algorithm for vertex arrays and buffers. - -The region allocator is used to allocate vertex indices within a vertex -domain's multiple buffers. ("Buffer" refers to any abstract buffer presented -by :py:mod:`pyglet.graphics.vertexbuffer`. - -The allocator will at times request more space from the buffers. The current -policy is to double the buffer size when there is not enough room to fulfil an -allocation. The buffer is never resized smaller. - -The allocator maintains references to free space only; it is the caller's -responsibility to maintain the allocated regions. -""" - -# Common cases: -# -regions will be the same size (instances of same object, e.g. sprites) -# -regions will not usually be resized (only exception is text) -# -alignment of 4 vertices (glyphs, sprites, images, ...) -# -# Optimise for: -# -keeping regions adjacent, reduce the number of entries in glMultiDrawArrays -# -finding large blocks of allocated regions quickly (for drawing) -# -finding block of unallocated space is the _uncommon_ case! -# -# Decisions: -# -don't over-allocate regions to any alignment -- this would require more -# work in finding the allocated spaces (for drawing) and would result in -# more entries in glMultiDrawArrays -# -don't move blocks when they truncate themselves. try not to allocate the -# space they freed too soon (they will likely need grow back into it later, -# and growing will usually require a reallocation). -# -allocator does not track individual allocated regions. Trusts caller -# to provide accurate (start, size) tuple, which completely describes -# a region from the allocator's point of view. -# -this means that compacting is probably not feasible, or would be hideously -# expensive - - -class AllocatorMemoryException(Exception): - """The buffer is not large enough to fulfil an allocation. - - Raised by `Allocator` methods when the operation failed due to - lack of buffer space. The buffer should be increased to at least - requested_capacity and then the operation retried (guaranteed to - pass second time). - """ - - def __init__(self, requested_capacity): - self.requested_capacity = requested_capacity - - -class Allocator: - """Buffer space allocation implementation.""" - - __slots__ = 'capacity', 'starts', 'sizes' - - def __init__(self, capacity): - """Create an allocator for a buffer of the specified capacity. - - :Parameters: - `capacity` : int - Maximum size of the buffer. - - """ - self.capacity = capacity - - # Allocated blocks. Start index and size in parallel lists. - # - # # = allocated, - = free - # - # 0 3 5 15 20 24 40 - # |###--##########-----####----------------------| - # - # starts = [0, 5, 20] - # sizes = [3, 10, 4] - # - # To calculate free blocks: - # for i in range(0, len(starts)): - # free_start[i] = starts[i] + sizes[i] - # free_size[i] = starts[i+1] - free_start[i] - # free_size[i+1] = self.capacity - free_start[-1] - - self.starts = [] - self.sizes = [] - - def set_capacity(self, size): - """Resize the maximum buffer size. - - The capaity cannot be reduced. - - :Parameters: - `size` : int - New maximum size of the buffer. - - """ - assert size > self.capacity - self.capacity = size - - def alloc(self, size): - """Allocate memory in the buffer. - - Raises `AllocatorMemoryException` if the allocation cannot be - fulfilled. - - :Parameters: - `size` : int - Size of region to allocate. - - :rtype: int - :return: Starting index of the allocated region. - """ - assert size >= 0 - - if size == 0: - return 0 - - # Return start, or raise AllocatorMemoryException - if not self.starts: - if size <= self.capacity: - self.starts.append(0) - self.sizes.append(size) - return 0 - else: - raise AllocatorMemoryException(size) - - # Restart from zero if space exists - if self.starts[0] > size: - self.starts.insert(0, 0) - self.sizes.insert(0, size) - return 0 - - # Allocate in a free space - free_start = self.starts[0] + self.sizes[0] - for i, (alloc_start, alloc_size) in enumerate(zip(self.starts[1:], self.sizes[1:])): - # Danger! - # i is actually index - 1 because of slicing above... - # starts[i] points to the block before this free space - # starts[i+1] points to the block after this free space, and is always valid. - free_size = alloc_start - free_start - if free_size == size: - # Merge previous block with this one (removing this free space) - self.sizes[i] += free_size + alloc_size - del self.starts[i+1] - del self.sizes[i+1] - return free_start - elif free_size > size: - # Increase size of previous block to intrude into this free - # space. - self.sizes[i] += size - return free_start - free_start = alloc_start + alloc_size - - # Allocate at end of capacity - free_size = self.capacity - free_start - if free_size >= size: - self.sizes[-1] += size - return free_start - - raise AllocatorMemoryException(self.capacity + size - free_size) - - def realloc(self, start, size, new_size): - """Reallocate a region of the buffer. - - This is more efficient than separate `dealloc` and `alloc` calls, as - the region can often be resized in-place. - - Raises `AllocatorMemoryException` if the allocation cannot be - fulfilled. - - :Parameters: - `start` : int - Current starting index of the region. - `size` : int - Current size of the region. - `new_size` : int - New size of the region. - - """ - assert size >= 0 and new_size >= 0 - - if new_size == 0: - if size != 0: - self.dealloc(start, size) - return 0 - elif size == 0: - return self.alloc(new_size) - - # return start, or raise AllocatorMemoryException - - # Truncation is the same as deallocating the tail cruft - if new_size < size: - self.dealloc(start + new_size, size - new_size) - return start - - # Find which block it lives in - for i, (alloc_start, alloc_size) in enumerate(zip(*(self.starts, self.sizes))): - p = start - alloc_start - if p >= 0 and size <= alloc_size - p: - break - if not (p >= 0 and size <= alloc_size - p): - print(list(zip(self.starts, self.sizes))) - print(start, size, new_size) - print(p, alloc_start, alloc_size) - assert p >= 0 and size <= alloc_size - p, 'Region not allocated' - - if size == alloc_size - p: - # Region is at end of block. Find how much free space is after it. - is_final_block = i == len(self.starts) - 1 - if not is_final_block: - free_size = self.starts[i + 1] - (start + size) - else: - free_size = self.capacity - (start + size) - - # TODO If region is an entire block being an island in free space, - # can possibly extend in both directions. - - if free_size == new_size - size and not is_final_block: - # Merge block with next (region is expanded in place to - # exactly fill the free space) - self.sizes[i] += free_size + self.sizes[i + 1] - del self.starts[i + 1] - del self.sizes[i + 1] - return start - elif free_size > new_size - size: - # Expand region in place - self.sizes[i] += new_size - size - return start - - # The block must be repositioned. Dealloc then alloc. - - # But don't do this! If alloc fails, we've already silently dealloc'd - # the original block. - # self.dealloc(start, size) - # return self.alloc(new_size) - - # It must be alloc'd first. We're not missing an optimisation - # here, because if freeing the block would've allowed for the block to - # be placed in the resulting free space, one of the above in-place - # checks would've found it. - result = self.alloc(new_size) - self.dealloc(start, size) - return result - - def dealloc(self, start, size): - """Free a region of the buffer. - - :Parameters: - `start` : int - Starting index of the region. - `size` : int - Size of the region. - - """ - assert size >= 0 - - if size == 0: - return - - assert self.starts - - # Find which block needs to be split - for i, (alloc_start, alloc_size) in enumerate(zip(*(self.starts, self.sizes))): - p = start - alloc_start - if p >= 0 and size <= alloc_size - p: - break - - # Assert we left via the break - assert p >= 0 and size <= alloc_size - p, 'Region not allocated' - - if p == 0 and size == alloc_size: - # Remove entire block - del self.starts[i] - del self.sizes[i] - elif p == 0: - # Truncate beginning of block - self.starts[i] += size - self.sizes[i] -= size - elif size == alloc_size - p: - # Truncate end of block - self.sizes[i] -= size - else: - # Reduce size of left side, insert block at right side - # $ = dealloc'd block, # = alloc'd region from same block - # - # <------8------> - # <-5-><-6-><-7-> - # 1 2 3 4 - # #####$$$$$##### - # - # 1 = alloc_start - # 2 = start - # 3 = start + size - # 4 = alloc_start + alloc_size - # 5 = start - alloc_start = p - # 6 = size - # 7 = {8} - ({5} + {6}) = alloc_size - (p + size) - # 8 = alloc_size - # - self.sizes[i] = p - self.starts.insert(i + 1, start + size) - self.sizes.insert(i + 1, alloc_size - (p + size)) - - def get_allocated_regions(self): - """Get a list of (aggregate) allocated regions. - - The result of this method is ``(starts, sizes)``, where ``starts`` is - a list of starting indices of the regions and ``sizes`` their - corresponding lengths. - - :rtype: (list, list) - """ - # return (starts, sizes); len(starts) == len(sizes) - return self.starts, self.sizes - - def get_fragmented_free_size(self): - """Returns the amount of space unused, not including the final - free block. - - :rtype: int - """ - if not self.starts: - return 0 - - # Variation of search for free block. - total_free = 0 - free_start = self.starts[0] + self.sizes[0] - for i, (alloc_start, alloc_size) in enumerate(zip(self.starts[1:], self.sizes[1:])): - total_free += alloc_start - free_start - free_start = alloc_start + alloc_size - - return total_free - - def get_free_size(self): - """Return the amount of space unused. - - :rtype: int - """ - if not self.starts: - return self.capacity - - free_end = self.capacity - (self.starts[-1] + self.sizes[-1]) - return self.get_fragmented_free_size() + free_end - - def get_usage(self): - """Return fraction of capacity currently allocated. - - :rtype: float - """ - return 1. - self.get_free_size() / float(self.capacity) - - def get_fragmentation(self): - """Return fraction of free space that is not expandable. - - :rtype: float - """ - free_size = self.get_free_size() - if free_size == 0: - return 0. - return self.get_fragmented_free_size() / float(self.get_free_size()) - - def __str__(self): - return 'allocs=' + repr(list(zip(self.starts, self.sizes))) - - def __repr__(self): - return '<%s %s>' % (self.__class__.__name__, str(self)) diff --git a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/media/drivers/silent/adaptation.py b/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/media/drivers/silent/adaptation.py deleted file mode 100644 index 538f1081ae1a75142872495f508b00969c1a3c6d..0000000000000000000000000000000000000000 --- a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/media/drivers/silent/adaptation.py +++ /dev/null @@ -1,83 +0,0 @@ -from pyglet.media.drivers.base import AbstractAudioDriver, AbstractAudioPlayer -from pyglet.media.drivers.listener import AbstractListener - - -class SilentAudioPlayer(AbstractAudioPlayer): - - def delete(self): - pass - - def play(self): - pass - - def stop(self): - pass - - def clear(self): - pass - - def write(self, audio_data, length): - pass - - def get_time(self): - return 0 - - def set_volume(self, volume): - pass - - def set_position(self, position): - pass - - def set_min_distance(self, min_distance): - pass - - def set_max_distance(self, max_distance): - pass - - def set_pitch(self, pitch): - pass - - def set_cone_orientation(self, cone_orientation): - pass - - def set_cone_inner_angle(self, cone_inner_angle): - pass - - def set_cone_outer_angle(self, cone_outer_angle): - pass - - def set_cone_outer_gain(self, cone_outer_gain): - pass - - def prefill_audio(self): - pass - - -class SilentDriver(AbstractAudioDriver): - - def create_audio_player(self, source, player): - return SilentAudioPlayer(source, player) - - def get_listener(self): - return SilentListener() - - def delete(self): - pass - - -class SilentListener(AbstractListener): - - def _set_volume(self, volume): - pass - - def _set_position(self, position): - pass - - def _set_forward_orientation(self, orientation): - pass - - def _set_up_orientation(self, orientation): - pass - - def _set_orientation(self): - pass diff --git a/spaces/ai-danger/hot-or-not/app.py b/spaces/ai-danger/hot-or-not/app.py deleted file mode 100644 index 582dc64d7723b96221afcb9ecf0f457b12e400b6..0000000000000000000000000000000000000000 --- a/spaces/ai-danger/hot-or-not/app.py +++ /dev/null @@ -1,61 +0,0 @@ -import torch -import clip -from PIL import Image -import gradio as gr - -device = "cuda" if torch.cuda.is_available() else "cpu" -model, preprocess = clip.load("ViT-B/32", device=device) - -def hotornot(image, gender): - image = Image.fromarray(image.astype("uint8"), "RGB") - - image = preprocess(image).unsqueeze(0).to(device) - positive_terms = [f'a hot {gender}', f'a beautiful {gender}', f'an attractive {gender}'] - negative_terms = [f'a gross {gender}', f'an ugly {gender}', f'a hideous {gender}'] - - pairs = list(zip(positive_terms, negative_terms)) - - def evaluate(terms): - text = clip.tokenize(terms).to(device) - - with torch.no_grad(): - logits_per_image, logits_per_text = model(image, text) - probs = logits_per_image.softmax(dim=-1).cpu().numpy() - return probs[0] - - probs = [evaluate(pair) for pair in pairs] - - positive_probs = [prob[0] for prob in probs] - negative_probs = [prob[1] for prob in probs] - - hotness_score = round((probs[0][0] - probs[0][1] + 1) * 50, 2) - beauty_score = round((probs[1][0] - probs[1][1] + 1) * 50, 2) - attractiveness_score = round((probs[2][0] - probs[2][1] + 1) * 50, 2) - - hot_score = sum(positive_probs)/len(positive_probs) - ugly_score = sum(negative_probs)/len(negative_probs) - composite = ((hot_score - ugly_score)+1) * 50 - composite = round(composite, 2) - return composite, hotness_score, beauty_score, attractiveness_score - -iface = gr.Interface( - fn=hotornot, - inputs=[ - gr.inputs.Image(label="Image"), - gr.inputs.Dropdown( - [ - 'person', 'man', 'woman' - ], - default='person', - ) - ], - outputs=[ - gr.Textbox(label="Total Hot or Not™ Score"), - gr.Textbox(label="Hotness Score"), - gr.Textbox(label="Beauty Score"), - gr.Textbox(label="Attractiveness Score"), - ], - title="Hot or Not", - description="A simple hot or not app using OpenAI's CLIP model. How it works: the input image is passed to OpenAI's CLIP image captioning model and evaluated for how much it conforms to the model's idea of hotness, beauty, and attractiveness. These values are then combined to produce a composite score on a scale of 0 to 100.", -) -iface.launch() diff --git a/spaces/akhaliq/Detic/detic/data/custom_dataset_dataloader.py b/spaces/akhaliq/Detic/detic/data/custom_dataset_dataloader.py deleted file mode 100644 index 8f8d6817704026796d2c2f457fe2624800693267..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/Detic/detic/data/custom_dataset_dataloader.py +++ /dev/null @@ -1,331 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# Part of the code is from https://github.com/xingyizhou/UniDet/blob/master/projects/UniDet/unidet/data/multi_dataset_dataloader.py (Apache-2.0 License) -import copy -import logging -import numpy as np -import operator -import torch -import torch.utils.data -import json -from detectron2.utils.comm import get_world_size -from detectron2.utils.logger import _log_api_usage, log_first_n - -from detectron2.config import configurable -from detectron2.data import samplers -from torch.utils.data.sampler import BatchSampler, Sampler -from detectron2.data.common import DatasetFromList, MapDataset -from detectron2.data.dataset_mapper import DatasetMapper -from detectron2.data.build import get_detection_dataset_dicts, build_batch_data_loader -from detectron2.data.samplers import TrainingSampler, RepeatFactorTrainingSampler -from detectron2.data.build import worker_init_reset_seed, print_instances_class_histogram -from detectron2.data.build import filter_images_with_only_crowd_annotations -from detectron2.data.build import filter_images_with_few_keypoints -from detectron2.data.build import check_metadata_consistency -from detectron2.data.catalog import MetadataCatalog, DatasetCatalog -from detectron2.utils import comm -import itertools -import math -from collections import defaultdict -from typing import Optional - - -def _custom_train_loader_from_config(cfg, mapper=None, *, dataset=None, sampler=None): - sampler_name = cfg.DATALOADER.SAMPLER_TRAIN - if 'MultiDataset' in sampler_name: - dataset_dicts = get_detection_dataset_dicts_with_source( - cfg.DATASETS.TRAIN, - filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS, - min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE - if cfg.MODEL.KEYPOINT_ON else 0, - proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None, - ) - else: - dataset_dicts = get_detection_dataset_dicts( - cfg.DATASETS.TRAIN, - filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS, - min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE - if cfg.MODEL.KEYPOINT_ON else 0, - proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None, - ) - - if mapper is None: - mapper = DatasetMapper(cfg, True) - - if sampler is not None: - pass - elif sampler_name == "TrainingSampler": - sampler = TrainingSampler(len(dataset)) - elif sampler_name == "MultiDatasetSampler": - sampler = MultiDatasetSampler( - dataset_dicts, - dataset_ratio = cfg.DATALOADER.DATASET_RATIO, - use_rfs = cfg.DATALOADER.USE_RFS, - dataset_ann = cfg.DATALOADER.DATASET_ANN, - repeat_threshold = cfg.DATALOADER.REPEAT_THRESHOLD, - ) - elif sampler_name == "RepeatFactorTrainingSampler": - repeat_factors = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency( - dataset_dicts, cfg.DATALOADER.REPEAT_THRESHOLD - ) - sampler = RepeatFactorTrainingSampler(repeat_factors) - else: - raise ValueError("Unknown training sampler: {}".format(sampler_name)) - - return { - "dataset": dataset_dicts, - "sampler": sampler, - "mapper": mapper, - "total_batch_size": cfg.SOLVER.IMS_PER_BATCH, - "aspect_ratio_grouping": cfg.DATALOADER.ASPECT_RATIO_GROUPING, - "num_workers": cfg.DATALOADER.NUM_WORKERS, - 'multi_dataset_grouping': cfg.DATALOADER.MULTI_DATASET_GROUPING, - 'use_diff_bs_size': cfg.DATALOADER.USE_DIFF_BS_SIZE, - 'dataset_bs': cfg.DATALOADER.DATASET_BS, - 'num_datasets': len(cfg.DATASETS.TRAIN) - } - - -@configurable(from_config=_custom_train_loader_from_config) -def build_custom_train_loader( - dataset, *, mapper, sampler, - total_batch_size=16, - aspect_ratio_grouping=True, - num_workers=0, - num_datasets=1, - multi_dataset_grouping=False, - use_diff_bs_size=False, - dataset_bs=[] - ): - """ - Modified from detectron2.data.build.build_custom_train_loader, but supports - different samplers - """ - if isinstance(dataset, list): - dataset = DatasetFromList(dataset, copy=False) - if mapper is not None: - dataset = MapDataset(dataset, mapper) - if sampler is None: - sampler = TrainingSampler(len(dataset)) - assert isinstance(sampler, torch.utils.data.sampler.Sampler) - if multi_dataset_grouping: - return build_multi_dataset_batch_data_loader( - use_diff_bs_size, - dataset_bs, - dataset, - sampler, - total_batch_size, - num_datasets=num_datasets, - num_workers=num_workers, - ) - else: - return build_batch_data_loader( - dataset, - sampler, - total_batch_size, - aspect_ratio_grouping=aspect_ratio_grouping, - num_workers=num_workers, - ) - - -def build_multi_dataset_batch_data_loader( - use_diff_bs_size, dataset_bs, - dataset, sampler, total_batch_size, num_datasets, num_workers=0 -): - """ - """ - world_size = get_world_size() - assert ( - total_batch_size > 0 and total_batch_size % world_size == 0 - ), "Total batch size ({}) must be divisible by the number of gpus ({}).".format( - total_batch_size, world_size - ) - - batch_size = total_batch_size // world_size - data_loader = torch.utils.data.DataLoader( - dataset, - sampler=sampler, - num_workers=num_workers, - batch_sampler=None, - collate_fn=operator.itemgetter(0), # don't batch, but yield individual elements - worker_init_fn=worker_init_reset_seed, - ) # yield individual mapped dict - if use_diff_bs_size: - return DIFFMDAspectRatioGroupedDataset( - data_loader, dataset_bs, num_datasets) - else: - return MDAspectRatioGroupedDataset( - data_loader, batch_size, num_datasets) - - -def get_detection_dataset_dicts_with_source( - dataset_names, filter_empty=True, min_keypoints=0, proposal_files=None -): - assert len(dataset_names) - dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in dataset_names] - for dataset_name, dicts in zip(dataset_names, dataset_dicts): - assert len(dicts), "Dataset '{}' is empty!".format(dataset_name) - - for source_id, (dataset_name, dicts) in \ - enumerate(zip(dataset_names, dataset_dicts)): - assert len(dicts), "Dataset '{}' is empty!".format(dataset_name) - for d in dicts: - d['dataset_source'] = source_id - - if "annotations" in dicts[0]: - try: - class_names = MetadataCatalog.get(dataset_name).thing_classes - check_metadata_consistency("thing_classes", dataset_name) - print_instances_class_histogram(dicts, class_names) - except AttributeError: # class names are not available for this dataset - pass - - assert proposal_files is None - - dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts)) - - has_instances = "annotations" in dataset_dicts[0] - if filter_empty and has_instances: - dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts) - if min_keypoints > 0 and has_instances: - dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints) - - return dataset_dicts - - -class MultiDatasetSampler(Sampler): - def __init__( - self, - dataset_dicts, - dataset_ratio, - use_rfs, - dataset_ann, - repeat_threshold=0.001, - seed: Optional[int] = None, - ): - """ - """ - sizes = [0 for _ in range(len(dataset_ratio))] - for d in dataset_dicts: - sizes[d['dataset_source']] += 1 - print('dataset sizes', sizes) - self.sizes = sizes - assert len(dataset_ratio) == len(sizes), \ - 'length of dataset ratio {} should be equal to number if dataset {}'.format( - len(dataset_ratio), len(sizes) - ) - if seed is None: - seed = comm.shared_random_seed() - self._seed = int(seed) - self._rank = comm.get_rank() - self._world_size = comm.get_world_size() - - self.dataset_ids = torch.tensor( - [d['dataset_source'] for d in dataset_dicts], dtype=torch.long) - - dataset_weight = [torch.ones(s) * max(sizes) / s * r / sum(dataset_ratio) \ - for i, (r, s) in enumerate(zip(dataset_ratio, sizes))] - dataset_weight = torch.cat(dataset_weight) - - rfs_factors = [] - st = 0 - for i, s in enumerate(sizes): - if use_rfs[i]: - if dataset_ann[i] == 'box': - rfs_func = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency - else: - rfs_func = repeat_factors_from_tag_frequency - rfs_factor = rfs_func( - dataset_dicts[st: st + s], - repeat_thresh=repeat_threshold) - rfs_factor = rfs_factor * (s / rfs_factor.sum()) - else: - rfs_factor = torch.ones(s) - rfs_factors.append(rfs_factor) - st = st + s - rfs_factors = torch.cat(rfs_factors) - - self.weights = dataset_weight * rfs_factors - self.sample_epoch_size = len(self.weights) - - def __iter__(self): - start = self._rank - yield from itertools.islice( - self._infinite_indices(), start, None, self._world_size) - - - def _infinite_indices(self): - g = torch.Generator() - g.manual_seed(self._seed) - while True: - ids = torch.multinomial( - self.weights, self.sample_epoch_size, generator=g, - replacement=True) - nums = [(self.dataset_ids[ids] == i).sum().int().item() \ - for i in range(len(self.sizes))] - yield from ids - - -class MDAspectRatioGroupedDataset(torch.utils.data.IterableDataset): - def __init__(self, dataset, batch_size, num_datasets): - """ - """ - self.dataset = dataset - self.batch_size = batch_size - self._buckets = [[] for _ in range(2 * num_datasets)] - - def __iter__(self): - for d in self.dataset: - w, h = d["width"], d["height"] - aspect_ratio_bucket_id = 0 if w > h else 1 - bucket_id = d['dataset_source'] * 2 + aspect_ratio_bucket_id - bucket = self._buckets[bucket_id] - bucket.append(d) - if len(bucket) == self.batch_size: - yield bucket[:] - del bucket[:] - - -class DIFFMDAspectRatioGroupedDataset(torch.utils.data.IterableDataset): - def __init__(self, dataset, batch_sizes, num_datasets): - """ - """ - self.dataset = dataset - self.batch_sizes = batch_sizes - self._buckets = [[] for _ in range(2 * num_datasets)] - - def __iter__(self): - for d in self.dataset: - w, h = d["width"], d["height"] - aspect_ratio_bucket_id = 0 if w > h else 1 - bucket_id = d['dataset_source'] * 2 + aspect_ratio_bucket_id - bucket = self._buckets[bucket_id] - bucket.append(d) - if len(bucket) == self.batch_sizes[d['dataset_source']]: - yield bucket[:] - del bucket[:] - - -def repeat_factors_from_tag_frequency(dataset_dicts, repeat_thresh): - """ - """ - category_freq = defaultdict(int) - for dataset_dict in dataset_dicts: - cat_ids = dataset_dict['pos_category_ids'] - for cat_id in cat_ids: - category_freq[cat_id] += 1 - num_images = len(dataset_dicts) - for k, v in category_freq.items(): - category_freq[k] = v / num_images - - category_rep = { - cat_id: max(1.0, math.sqrt(repeat_thresh / cat_freq)) - for cat_id, cat_freq in category_freq.items() - } - - rep_factors = [] - for dataset_dict in dataset_dicts: - cat_ids = dataset_dict['pos_category_ids'] - rep_factor = max({category_rep[cat_id] for cat_id in cat_ids}, default=1.0) - rep_factors.append(rep_factor) - - return torch.tensor(rep_factors, dtype=torch.float32) \ No newline at end of file diff --git a/spaces/akhaliq/Pop_Music_Transformer/app.py b/spaces/akhaliq/Pop_Music_Transformer/app.py deleted file mode 100644 index ba07eb97452da655c78459d36741ea0d1b34b503..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/Pop_Music_Transformer/app.py +++ /dev/null @@ -1,55 +0,0 @@ -from model import PopMusicTransformer -import os -os.environ['CUDA_VISIBLE_DEVICES'] = '-1' -import tensorflow as tf -tf.compat.v1.disable_eager_execution() -import gradio as gr -import requests -import torchtext -import zipfile - -torchtext.utils.download_from_url("https://drive.google.com/uc?id=1gxuTSkF51NP04JZgTE46Pg4KQsbHQKGo", root=".") -torchtext.utils.download_from_url("https://drive.google.com/uc?id=1nAKjaeahlzpVAX0F9wjQEG_hL4UosSbo", root=".") - -with zipfile.ZipFile("REMI-tempo-checkpoint.zip","r") as zip_ref: - zip_ref.extractall(".") -with zipfile.ZipFile("REMI-tempo-chord-checkpoint.zip","r") as zip_ref: - zip_ref.extractall(".") - -url = 'https://github.com/AK391/remi/blob/master/input.midi?raw=true' -r = requests.get(url, allow_redirects=True) -open("input.midi", 'wb').write(r.content) - - -# declare model -model = PopMusicTransformer( - checkpoint='REMI-tempo-checkpoint', - is_training=False) - -def inference(midi): - # generate continuation - model.generate( - n_target_bar=4, - temperature=1.2, - topk=5, - output_path='./result/continuation.midi', - prompt=midi.name) - return './result/continuation.midi' - - -title = "Pop Music Transformer" -description = "demo for Pop Music Transformer. To use it, simply upload your midi file, or click one of the examples to load them. Read more at the links below." -article = "

        Pop Music Transformer: Beat-based Modeling and Generation of Expressive Pop Piano Compositions | Github Repo

        " - -examples = [ - ['input.midi'] -] -gr.Interface( - inference, - gr.inputs.File(label="Input Midi"), - gr.outputs.File(label="Output Midi"), - title=title, - description=description, - article=article, - examples=examples - ).launch() diff --git a/spaces/akhaliq/VQMIVC/ParallelWaveGAN/egs/arctic/voc1/local/data_download.sh b/spaces/akhaliq/VQMIVC/ParallelWaveGAN/egs/arctic/voc1/local/data_download.sh deleted file mode 100644 index d9bff0f9606dcb8a210ee610509bd86a4e352716..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/VQMIVC/ParallelWaveGAN/egs/arctic/voc1/local/data_download.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash - -# Copyright 2019 Tomoki Hayashi -# MIT License (https://opensource.org/licenses/MIT) - -download_dir=$1 -spk=$2 - -available_spks=( - "slt" "clb" "bdl" "rms" "jmk" "awb" "ksp" -) - -# check arguments -if [ $# != 2 ]; then - echo "Usage: $0 " - echo "Available speakers: ${available_spks[*]}" - exit 1 -fi - -set -euo pipefail - -# check speakers -if ! echo "${available_spks[*]}" | grep -q "${spk}"; then - echo "Specified spk (${spk}) is not available or not supported." >&2 - exit 1 -fi - -# download dataset -cwd=$(pwd) -if [ ! -e "${download_dir}/cmu_us_${spk}_arctic" ]; then - mkdir -p "${download_dir}" - cd "${download_dir}" - wget "http://festvox.org/cmu_arctic/cmu_arctic/packed/cmu_us_${spk}_arctic-0.95-release.tar.bz2" - tar xf "cmu_us_${spk}_arctic-0.95-release.tar.bz2" - rm "cmu_us_${spk}_arctic-0.95-release.tar.bz2" - cd "${cwd}" - echo "Successfully finished download." -else - echo "Already exists. Skip download." -fi diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/operations/check.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/operations/check.py deleted file mode 100644 index fb3ac8b9c9ea57ec1bb667cb8e904a8b5b2f9df2..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/operations/check.py +++ /dev/null @@ -1,149 +0,0 @@ -"""Validation of dependencies of packages -""" - -import logging -from typing import Callable, Dict, List, NamedTuple, Optional, Set, Tuple - -from pip._vendor.packaging.requirements import Requirement -from pip._vendor.packaging.utils import NormalizedName, canonicalize_name - -from pip._internal.distributions import make_distribution_for_install_requirement -from pip._internal.metadata import get_default_environment -from pip._internal.metadata.base import DistributionVersion -from pip._internal.req.req_install import InstallRequirement - -logger = logging.getLogger(__name__) - - -class PackageDetails(NamedTuple): - version: DistributionVersion - dependencies: List[Requirement] - - -# Shorthands -PackageSet = Dict[NormalizedName, PackageDetails] -Missing = Tuple[NormalizedName, Requirement] -Conflicting = Tuple[NormalizedName, DistributionVersion, Requirement] - -MissingDict = Dict[NormalizedName, List[Missing]] -ConflictingDict = Dict[NormalizedName, List[Conflicting]] -CheckResult = Tuple[MissingDict, ConflictingDict] -ConflictDetails = Tuple[PackageSet, CheckResult] - - -def create_package_set_from_installed() -> Tuple[PackageSet, bool]: - """Converts a list of distributions into a PackageSet.""" - package_set = {} - problems = False - env = get_default_environment() - for dist in env.iter_installed_distributions(local_only=False, skip=()): - name = dist.canonical_name - try: - dependencies = list(dist.iter_dependencies()) - package_set[name] = PackageDetails(dist.version, dependencies) - except (OSError, ValueError) as e: - # Don't crash on unreadable or broken metadata. - logger.warning("Error parsing requirements for %s: %s", name, e) - problems = True - return package_set, problems - - -def check_package_set( - package_set: PackageSet, should_ignore: Optional[Callable[[str], bool]] = None -) -> CheckResult: - """Check if a package set is consistent - - If should_ignore is passed, it should be a callable that takes a - package name and returns a boolean. - """ - - missing = {} - conflicting = {} - - for package_name, package_detail in package_set.items(): - # Info about dependencies of package_name - missing_deps: Set[Missing] = set() - conflicting_deps: Set[Conflicting] = set() - - if should_ignore and should_ignore(package_name): - continue - - for req in package_detail.dependencies: - name = canonicalize_name(req.name) - - # Check if it's missing - if name not in package_set: - missed = True - if req.marker is not None: - missed = req.marker.evaluate() - if missed: - missing_deps.add((name, req)) - continue - - # Check if there's a conflict - version = package_set[name].version - if not req.specifier.contains(version, prereleases=True): - conflicting_deps.add((name, version, req)) - - if missing_deps: - missing[package_name] = sorted(missing_deps, key=str) - if conflicting_deps: - conflicting[package_name] = sorted(conflicting_deps, key=str) - - return missing, conflicting - - -def check_install_conflicts(to_install: List[InstallRequirement]) -> ConflictDetails: - """For checking if the dependency graph would be consistent after \ - installing given requirements - """ - # Start from the current state - package_set, _ = create_package_set_from_installed() - # Install packages - would_be_installed = _simulate_installation_of(to_install, package_set) - - # Only warn about directly-dependent packages; create a whitelist of them - whitelist = _create_whitelist(would_be_installed, package_set) - - return ( - package_set, - check_package_set( - package_set, should_ignore=lambda name: name not in whitelist - ), - ) - - -def _simulate_installation_of( - to_install: List[InstallRequirement], package_set: PackageSet -) -> Set[NormalizedName]: - """Computes the version of packages after installing to_install.""" - # Keep track of packages that were installed - installed = set() - - # Modify it as installing requirement_set would (assuming no errors) - for inst_req in to_install: - abstract_dist = make_distribution_for_install_requirement(inst_req) - dist = abstract_dist.get_metadata_distribution() - name = dist.canonical_name - package_set[name] = PackageDetails(dist.version, list(dist.iter_dependencies())) - - installed.add(name) - - return installed - - -def _create_whitelist( - would_be_installed: Set[NormalizedName], package_set: PackageSet -) -> Set[NormalizedName]: - packages_affected = set(would_be_installed) - - for package_name in package_set: - if package_name in packages_affected: - continue - - for req in package_set[package_name].dependencies: - if canonicalize_name(req.name) in packages_affected: - packages_affected.add(package_name) - break - - return packages_affected diff --git a/spaces/aliabd/SummerTime/model/third_party/HMNet/ThirdParty/ROUGE/ROUGE-1.5.5/XML/DOM/PerlSAX.pm b/spaces/aliabd/SummerTime/model/third_party/HMNet/ThirdParty/ROUGE/ROUGE-1.5.5/XML/DOM/PerlSAX.pm deleted file mode 100644 index f025cce0afdeb00a79a7c1d72cb522e1131062c0..0000000000000000000000000000000000000000 --- a/spaces/aliabd/SummerTime/model/third_party/HMNet/ThirdParty/ROUGE/ROUGE-1.5.5/XML/DOM/PerlSAX.pm +++ /dev/null @@ -1,47 +0,0 @@ -package XML::DOM::PerlSAX; -use strict; - -BEGIN -{ - if ($^W) - { - warn "XML::DOM::PerlSAX has been renamed to XML::Handler::BuildDOM, please modify your code accordingly."; - } -} - -use XML::Handler::BuildDOM; -use vars qw{ @ISA }; -@ISA = qw{ XML::Handler::BuildDOM }; - -1; # package return code - -__END__ - -=head1 NAME - -XML::DOM::PerlSAX - Old name of L - -=head1 SYNOPSIS - - See L - -=head1 DESCRIPTION - -XML::DOM::PerlSAX was renamed to L to comply -with naming conventions for PerlSAX filters/handlers. - -For backward compatibility, this package will remain in existence -(it simply includes XML::Handler::BuildDOM), but it will print a warning when -running with I<'perl -w'>. - -=head1 AUTHOR - -Enno Derksen is the original author. - -Send bug reports, hints, tips, suggestions to T.J Mather at ->. - -=head1 SEE ALSO - -L, L - diff --git a/spaces/aliabid94/AutoGPT/run.sh b/spaces/aliabid94/AutoGPT/run.sh deleted file mode 100644 index edcbc44155b9ca9df83e283fdf976472c13e6492..0000000000000000000000000000000000000000 --- a/spaces/aliabid94/AutoGPT/run.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -python scripts/check_requirements.py requirements.txt -if [ $? -eq 1 ] -then - echo Installing missing packages... - pip install -r requirements.txt -fi -python -m autogpt $@ -read -p "Press any key to continue..." diff --git a/spaces/allandclive/Uganda_MMS/vits/monotonic_align/setup.py b/spaces/allandclive/Uganda_MMS/vits/monotonic_align/setup.py deleted file mode 100644 index 30c224807a70faa9df9c9eb75f8e80c8c867b16b..0000000000000000000000000000000000000000 --- a/spaces/allandclive/Uganda_MMS/vits/monotonic_align/setup.py +++ /dev/null @@ -1,9 +0,0 @@ -from distutils.core import setup -from Cython.Build import cythonize -import numpy - -setup( - name = 'monotonic_align', - ext_modules = cythonize("core.pyx"), - include_dirs=[numpy.get_include()] -) diff --git a/spaces/allknowingroger/Image-Models-Test167/README.md b/spaces/allknowingroger/Image-Models-Test167/README.md deleted file mode 100644 index f91e4b31ab345f987b425de029c057bfb69d9e1b..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test167/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: More Image Models -emoji: 😻 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: true -duplicated_from: allknowingroger/Image-Models-Test ---- - - \ No newline at end of file diff --git a/spaces/allknowingroger/Image-Models-Test177/README.md b/spaces/allknowingroger/Image-Models-Test177/README.md deleted file mode 100644 index f91e4b31ab345f987b425de029c057bfb69d9e1b..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test177/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: More Image Models -emoji: 😻 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: true -duplicated_from: allknowingroger/Image-Models-Test ---- - - \ No newline at end of file diff --git a/spaces/allknowingroger/huggingface/assets/index-28811a6d.js b/spaces/allknowingroger/huggingface/assets/index-28811a6d.js deleted file mode 100644 index c6b9922854c2de7dc26081fc7f474d5e3cd7e70f..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/huggingface/assets/index-28811a6d.js +++ /dev/null @@ -1,41 +0,0 @@ -var hc=Object.defineProperty;var yc=(e,t,n)=>t in e?hc(e,t,{enumerable:!0,configurable:!0,writable:!0,value:n}):e[t]=n;var Et=(e,t,n)=>(yc(e,typeof t!="symbol"?t+"":t,n),n);(function(){const t=document.createElement("link").relList;if(t&&t.supports&&t.supports("modulepreload"))return;for(const l of document.querySelectorAll('link[rel="modulepreload"]'))r(l);new MutationObserver(l=>{for(const i of l)if(i.type==="childList")for(const o of i.addedNodes)o.tagName==="LINK"&&o.rel==="modulepreload"&&r(o)}).observe(document,{childList:!0,subtree:!0});function n(l){const i={};return l.integrity&&(i.integrity=l.integrity),l.referrerPolicy&&(i.referrerPolicy=l.referrerPolicy),l.crossOrigin==="use-credentials"?i.credentials="include":l.crossOrigin==="anonymous"?i.credentials="omit":i.credentials="same-origin",i}function r(l){if(l.ep)return;l.ep=!0;const i=n(l);fetch(l.href,i)}})();var Mr={},vc={get exports(){return Mr},set exports(e){Mr=e}},ul={},ne={},gc={get exports(){return ne},set exports(e){ne=e}},T={};/** - * @license React - * react.production.min.js - * - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */var bn=Symbol.for("react.element"),wc=Symbol.for("react.portal"),kc=Symbol.for("react.fragment"),Sc=Symbol.for("react.strict_mode"),Ec=Symbol.for("react.profiler"),xc=Symbol.for("react.provider"),_c=Symbol.for("react.context"),Cc=Symbol.for("react.forward_ref"),Nc=Symbol.for("react.suspense"),Pc=Symbol.for("react.memo"),zc=Symbol.for("react.lazy"),Qo=Symbol.iterator;function Oc(e){return e===null||typeof e!="object"?null:(e=Qo&&e[Qo]||e["@@iterator"],typeof e=="function"?e:null)}var ns={isMounted:function(){return!1},enqueueForceUpdate:function(){},enqueueReplaceState:function(){},enqueueSetState:function(){}},rs=Object.assign,ls={};function cn(e,t,n){this.props=e,this.context=t,this.refs=ls,this.updater=n||ns}cn.prototype.isReactComponent={};cn.prototype.setState=function(e,t){if(typeof e!="object"&&typeof e!="function"&&e!=null)throw Error("setState(...): takes an object of state variables to update or a function which returns an object of state variables.");this.updater.enqueueSetState(this,e,t,"setState")};cn.prototype.forceUpdate=function(e){this.updater.enqueueForceUpdate(this,e,"forceUpdate")};function is(){}is.prototype=cn.prototype;function Xi(e,t,n){this.props=e,this.context=t,this.refs=ls,this.updater=n||ns}var Yi=Xi.prototype=new is;Yi.constructor=Xi;rs(Yi,cn.prototype);Yi.isPureReactComponent=!0;var Ko=Array.isArray,os=Object.prototype.hasOwnProperty,Gi={current:null},us={key:!0,ref:!0,__self:!0,__source:!0};function ss(e,t,n){var r,l={},i=null,o=null;if(t!=null)for(r in t.ref!==void 0&&(o=t.ref),t.key!==void 0&&(i=""+t.key),t)os.call(t,r)&&!us.hasOwnProperty(r)&&(l[r]=t[r]);var u=arguments.length-2;if(u===1)l.children=n;else if(1]+)>;\s+rel="([^"]+)"/g;return Object.fromEntries([...e.matchAll(t)].map(([,n,r])=>[r,n]))}var Qc=["pipeline_tag","private","gated","downloads","likes"];async function*Kc(e){var r,l;Hc(e==null?void 0:e.credentials);const t=new URLSearchParams([...Object.entries({limit:"500",...(r=e==null?void 0:e.search)!=null&&r.owner?{author:e.search.owner}:void 0,...(l=e==null?void 0:e.search)!=null&&l.task?{pipeline_tag:e.search.task}:void 0}),...Qc.map(i=>["expand",i])]).toString();let n=`${(e==null?void 0:e.hubUrl)||$c}/api/models?${t}`;for(;n;){const i=await fetch(n,{headers:{accept:"application/json",...e!=null&&e.credentials?{Authorization:`Bearer ${e.credentials.accessToken}`}:void 0}});if(!i.ok)throw Vc(i);const o=await i.json();for(const s of o)yield{id:s._id,name:s.id,private:s.private,task:s.pipeline_tag,downloads:s.downloads,gated:s.gated,likes:s.likes,updatedAt:new Date(s.lastModified)};const u=i.headers.get("Link");n=u?Wc(u).next:void 0}}var Xc=Object.defineProperty,Yc=(e,t)=>{for(var n in t)Xc(e,n,{get:t[n],enumerable:!0})},Ji={};Yc(Ji,{audioClassification:()=>bc,automaticSpeechRecognition:()=>ef,conversational:()=>uf,featureExtraction:()=>sf,fillMask:()=>af,imageClassification:()=>tf,imageSegmentation:()=>nf,imageToText:()=>rf,objectDetection:()=>lf,questionAnswering:()=>cf,request:()=>K,sentenceSimilarity:()=>ff,streamingRequest:()=>qi,summarization:()=>df,tableQuestionAnswering:()=>pf,textClassification:()=>mf,textGeneration:()=>hf,textGenerationStream:()=>yf,textToImage:()=>of,tokenClassification:()=>vf,translation:()=>gf,zeroShotClassification:()=>wf});var Gc="https://api-inference.huggingface.co/models/";function cs(e,t){const{model:n,accessToken:r,...l}=e,i={};r&&(i.Authorization=`Bearer ${r}`);const o="data"in e&&!!e.data;o?(t!=null&&t.wait_for_model&&(i["X-Wait-For-Model"]="true"),(t==null?void 0:t.use_cache)===!1&&(i["X-Use-Cache"]="false"),t!=null&&t.dont_load_model&&(i["X-Load-Model"]="0")):i["Content-Type"]="application/json";const u=/^http(s?):/.test(n)||n.startsWith("/")?n:`${Gc}${n}`,s={headers:i,method:"POST",body:o?e.data:JSON.stringify({...l,options:t}),credentials:t!=null&&t.includeCredentials?"include":"same-origin"};return{url:u,info:s}}async function K(e,t){var i,o;const{url:n,info:r}=cs(e,t),l=await fetch(n,r);if((t==null?void 0:t.retry_on_error)!==!1&&l.status===503&&!(t!=null&&t.wait_for_model))return K(e,{...t,wait_for_model:!0});if(!l.ok){if((i=l.headers.get("Content-Type"))!=null&&i.startsWith("application/json")){const u=await l.json();if(u.error)throw new Error(u.error)}throw new Error("An error occurred while fetching the blob")}return(o=l.headers.get("Content-Type"))!=null&&o.startsWith("application/json")?await l.json():await l.blob()}function Zc(e){let t,n,r,l=!1;return function(o){t===void 0?(t=o,n=0,r=-1):t=qc(t,o);const u=t.length;let s=0;for(;n0){const s=l.decode(o.subarray(0,u)),c=u+(o[u+1]===32?2:1),m=l.decode(o.subarray(c));switch(s){case"data":r.data=r.data?r.data+` -`+m:m;break;case"event":r.event=m;break;case"id":e(r.id=m);break;case"retry":const h=parseInt(m,10);isNaN(h)||t(r.retry=h);break}}}}function qc(e,t){const n=new Uint8Array(e.length+t.length);return n.set(e),n.set(t,e.length),n}function Yo(){return{data:"",event:"",id:"",retry:void 0}}async function*qi(e,t){var c;const{url:n,info:r}=cs({...e,stream:!0},t),l=await fetch(n,r);if((t==null?void 0:t.retry_on_error)!==!1&&l.status===503&&!(t!=null&&t.wait_for_model))return qi(e,{...t,wait_for_model:!0});if(!l.ok){if((c=l.headers.get("Content-Type"))!=null&&c.startsWith("application/json")){const m=await l.json();if(m.error)throw new Error(m.error)}throw new Error(`Server response contains error: ${l.status}`)}if(l.headers.get("content-type")!=="text/event-stream")throw new Error("Server does not support event stream content type, it returned "+l.headers.get("content-type"));if(!l.body)return;const i=l.body.getReader();let o=[];const s=Zc(Jc(()=>{},()=>{},m=>{o.push(m)}));try{for(;;){const{done:m,value:h}=await i.read();if(m)return;s(h);for(const p of o)p.data.length>0&&(yield JSON.parse(p.data));o=[]}}finally{i.releaseLock()}}var Z=class extends TypeError{constructor(e){super(`Invalid inference output: ${e}. Use the 'request' method with the same parameters to do a custom call with no type checking.`),this.name="InferenceOutputError"}};async function bc(e,t){const n=await K(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof l.label=="string"&&typeof l.score=="number")))throw new Z("Expected Array<{label: string, score: number}>");return n}async function ef(e,t){const n=await K(e,t);if(!(typeof(n==null?void 0:n.text)=="string"))throw new Z("Expected {text: string}");return n}async function tf(e,t){const n=await K(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof l.label=="string"&&typeof l.score=="number")))throw new Z("Expected Array<{label: string, score: number}>");return n}async function nf(e,t){const n=await K(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof l.label=="string"&&typeof l.mask=="string"&&typeof l.score=="number")))throw new Z("Expected Array<{label: string, mask: string, score: number}>");return n}async function rf(e,t){var r;const n=(r=await K(e,t))==null?void 0:r[0];if(typeof(n==null?void 0:n.generated_text)!="string")throw new Z("Expected {generated_text: string}");return n}async function lf(e,t){const n=await K(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof l.label=="string"&&typeof l.score=="number"&&typeof l.box.xmin=="number"&&typeof l.box.ymin=="number"&&typeof l.box.xmax=="number"&&typeof l.box.ymax=="number")))throw new Z("Expected Array<{label:string; score:number; box:{xmin:number; ymin:number; xmax:number; ymax:number}}>");return n}async function of(e,t){const n=await K(e,t);if(!(n&&n instanceof Blob))throw new Z("Expected Blob");return n}async function uf(e,t){const n=await K(e,t);if(!(Array.isArray(n.conversation.generated_responses)&&n.conversation.generated_responses.every(l=>typeof l=="string")&&Array.isArray(n.conversation.past_user_inputs)&&n.conversation.past_user_inputs.every(l=>typeof l=="string")&&typeof n.generated_text=="string"&&Array.isArray(n.warnings)&&n.warnings.every(l=>typeof l=="string")))throw new Z("Expected {conversation: {generated_responses: string[], past_user_inputs: string[]}, generated_text: string, warnings: string[]}");return n}async function sf(e,t){const n=await K(e,t);let r=!0;if(Array.isArray(n)){for(const l of n)if(Array.isArray(l)){if(r=l.every(i=>typeof i=="number"),!r)break}else if(typeof l!="number"){r=!1;break}}else r=!1;if(!r)throw new Z("Expected Array");return n}async function af(e,t){const n=await K(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof l.score=="number"&&typeof l.sequence=="string"&&typeof l.token=="number"&&typeof l.token_str=="string")))throw new Z("Expected Array<{score: number, sequence: string, token: number, token_str: string}>");return n}async function cf(e,t){const n=await K(e,t);if(!(typeof(n==null?void 0:n.answer)=="string"&&typeof n.end=="number"&&typeof n.score=="number"&&typeof n.start=="number"))throw new Z("Expected {answer: string, end: number, score: number, start: number}");return n}async function ff(e,t){const n=await K(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof l=="number")))throw new Z("Expected number[]");return n}async function df(e,t){const n=await K(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof(l==null?void 0:l.summary_text)=="string")))throw new Z("Expected Array<{summary_text: string}>");return n==null?void 0:n[0]}async function pf(e,t){const n=await K(e,t);if(!(typeof(n==null?void 0:n.aggregator)=="string"&&typeof n.answer=="string"&&Array.isArray(n.cells)&&n.cells.every(l=>typeof l=="string")&&Array.isArray(n.coordinates)&&n.coordinates.every(l=>Array.isArray(l)&&l.every(i=>typeof i=="number"))))throw new Z("Expected {aggregator: string, answer: string, cells: string[], coordinates: number[][]}");return n}async function mf(e,t){var l;const n=(l=await K(e,t))==null?void 0:l[0];if(!(Array.isArray(n)&&n.every(i=>typeof(i==null?void 0:i.label)=="string"&&typeof i.score=="number")))throw new Z("Expected Array<{label: string, score: number}>");return n}async function hf(e,t){const n=await K(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof(l==null?void 0:l.generated_text)=="string")))throw new Z("Expected Array<{generated_text: string}>");return n==null?void 0:n[0]}async function*yf(e,t){yield*qi(e,t)}function fs(e){return Array.isArray(e)?e:[e]}async function vf(e,t){const n=fs(await K(e,t));if(!(Array.isArray(n)&&n.every(l=>typeof l.end=="number"&&typeof l.entity_group=="string"&&typeof l.score=="number"&&typeof l.start=="number"&&typeof l.word=="string")))throw new Z("Expected Array<{end: number, entity_group: string, score: number, start: number, word: string}>");return n}async function gf(e,t){const n=await K(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof(l==null?void 0:l.translation_text)=="string")))throw new Z("Expected type Array<{translation_text: string}>");return n==null?void 0:n[0]}async function wf(e,t){const n=fs(await K(e,t));if(!(Array.isArray(n)&&n.every(l=>Array.isArray(l.labels)&&l.labels.every(i=>typeof i=="string")&&Array.isArray(l.scores)&&l.scores.every(i=>typeof i=="number")&&typeof l.sequence=="string")))throw new Z("Expected Array<{labels: string[], scores: number[], sequence: string}>");return n}var kf=class{constructor(e="",t={}){Et(this,"accessToken");Et(this,"defaultOptions");this.accessToken=e,this.defaultOptions=t;for(const[n,r]of Object.entries(Ji))Object.defineProperty(this,n,{enumerable:!1,value:(l,i)=>r({...l,accessToken:e},{...t,...i})})}endpoint(e){return new Sf(e,this.accessToken,this.defaultOptions)}},Sf=class{constructor(e,t="",n={}){for(const[r,l]of Object.entries(Ji))Object.defineProperty(this,r,{enumerable:!1,value:(i,o)=>l({...i,accessToken:t,model:e},{...n,...o})})}},jr=function(){return jr=Object.assign||function(t){for(var n,r=1,l=arguments.length;r0&&n>="0"&&n<="9"?"_"+n+r:""+n.toUpperCase()+r}function Nf(e,t){return t===void 0&&(t={}),Cf(e,jr({delimiter:"",transform:ds},t))}function Pf(e,t){return t===0?e.toLowerCase():ds(e,t)}function zf(e,t){return t===void 0&&(t={}),Nf(e,jr({transform:Pf},t))}var bl={},Of={get exports(){return bl},set exports(e){bl=e}},Ee={},ei={},Tf={get exports(){return ei},set exports(e){ei=e}},ps={};/** - * @license React - * scheduler.production.min.js - * - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */(function(e){function t(x,z){var O=x.length;x.push(z);e:for(;0>>1,J=x[W];if(0>>1;Wl(Cl,O))Stl(ir,Cl)?(x[W]=ir,x[St]=O,W=St):(x[W]=Cl,x[kt]=O,W=kt);else if(Stl(ir,O))x[W]=ir,x[St]=O,W=St;else break e}}return z}function l(x,z){var O=x.sortIndex-z.sortIndex;return O!==0?O:x.id-z.id}if(typeof performance=="object"&&typeof performance.now=="function"){var i=performance;e.unstable_now=function(){return i.now()}}else{var o=Date,u=o.now();e.unstable_now=function(){return o.now()-u}}var s=[],c=[],m=1,h=null,p=3,g=!1,w=!1,k=!1,D=typeof setTimeout=="function"?setTimeout:null,f=typeof clearTimeout=="function"?clearTimeout:null,a=typeof setImmediate<"u"?setImmediate:null;typeof navigator<"u"&&navigator.scheduling!==void 0&&navigator.scheduling.isInputPending!==void 0&&navigator.scheduling.isInputPending.bind(navigator.scheduling);function d(x){for(var z=n(c);z!==null;){if(z.callback===null)r(c);else if(z.startTime<=x)r(c),z.sortIndex=z.expirationTime,t(s,z);else break;z=n(c)}}function y(x){if(k=!1,d(x),!w)if(n(s)!==null)w=!0,xl(E);else{var z=n(c);z!==null&&_l(y,z.startTime-x)}}function E(x,z){w=!1,k&&(k=!1,f(N),N=-1),g=!0;var O=p;try{for(d(z),h=n(s);h!==null&&(!(h.expirationTime>z)||x&&!Le());){var W=h.callback;if(typeof W=="function"){h.callback=null,p=h.priorityLevel;var J=W(h.expirationTime<=z);z=e.unstable_now(),typeof J=="function"?h.callback=J:h===n(s)&&r(s),d(z)}else r(s);h=n(s)}if(h!==null)var lr=!0;else{var kt=n(c);kt!==null&&_l(y,kt.startTime-z),lr=!1}return lr}finally{h=null,p=O,g=!1}}var _=!1,C=null,N=-1,H=5,L=-1;function Le(){return!(e.unstable_now()-Lx||125W?(x.sortIndex=O,t(c,x),n(s)===null&&x===n(c)&&(k?(f(N),N=-1):k=!0,_l(y,O-W))):(x.sortIndex=J,t(s,x),w||g||(w=!0,xl(E))),x},e.unstable_shouldYield=Le,e.unstable_wrapCallback=function(x){var z=p;return function(){var O=p;p=z;try{return x.apply(this,arguments)}finally{p=O}}}})(ps);(function(e){e.exports=ps})(Tf);/** - * @license React - * react-dom.production.min.js - * - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */var ms=ne,Se=ei;function v(e){for(var t="https://reactjs.org/docs/error-decoder.html?invariant="+e,n=1;n"u"||typeof window.document>"u"||typeof window.document.createElement>"u"),ti=Object.prototype.hasOwnProperty,Lf=/^[:A-Z_a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD][:A-Z_a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\-.0-9\u00B7\u0300-\u036F\u203F-\u2040]*$/,Zo={},Jo={};function Rf(e){return ti.call(Jo,e)?!0:ti.call(Zo,e)?!1:Lf.test(e)?Jo[e]=!0:(Zo[e]=!0,!1)}function If(e,t,n,r){if(n!==null&&n.type===0)return!1;switch(typeof t){case"function":case"symbol":return!0;case"boolean":return r?!1:n!==null?!n.acceptsBooleans:(e=e.toLowerCase().slice(0,5),e!=="data-"&&e!=="aria-");default:return!1}}function Af(e,t,n,r){if(t===null||typeof t>"u"||If(e,t,n,r))return!0;if(r)return!1;if(n!==null)switch(n.type){case 3:return!t;case 4:return t===!1;case 5:return isNaN(t);case 6:return isNaN(t)||1>t}return!1}function de(e,t,n,r,l,i,o){this.acceptsBooleans=t===2||t===3||t===4,this.attributeName=r,this.attributeNamespace=l,this.mustUseProperty=n,this.propertyName=e,this.type=t,this.sanitizeURL=i,this.removeEmptyString=o}var le={};"children dangerouslySetInnerHTML defaultValue defaultChecked innerHTML suppressContentEditableWarning suppressHydrationWarning style".split(" ").forEach(function(e){le[e]=new de(e,0,!1,e,null,!1,!1)});[["acceptCharset","accept-charset"],["className","class"],["htmlFor","for"],["httpEquiv","http-equiv"]].forEach(function(e){var t=e[0];le[t]=new de(t,1,!1,e[1],null,!1,!1)});["contentEditable","draggable","spellCheck","value"].forEach(function(e){le[e]=new de(e,2,!1,e.toLowerCase(),null,!1,!1)});["autoReverse","externalResourcesRequired","focusable","preserveAlpha"].forEach(function(e){le[e]=new de(e,2,!1,e,null,!1,!1)});"allowFullScreen async autoFocus autoPlay controls default defer disabled disablePictureInPicture disableRemotePlayback formNoValidate hidden loop noModule noValidate open playsInline readOnly required reversed scoped seamless itemScope".split(" ").forEach(function(e){le[e]=new de(e,3,!1,e.toLowerCase(),null,!1,!1)});["checked","multiple","muted","selected"].forEach(function(e){le[e]=new de(e,3,!0,e,null,!1,!1)});["capture","download"].forEach(function(e){le[e]=new de(e,4,!1,e,null,!1,!1)});["cols","rows","size","span"].forEach(function(e){le[e]=new de(e,6,!1,e,null,!1,!1)});["rowSpan","start"].forEach(function(e){le[e]=new de(e,5,!1,e.toLowerCase(),null,!1,!1)});var bi=/[\-:]([a-z])/g;function eo(e){return e[1].toUpperCase()}"accent-height alignment-baseline arabic-form baseline-shift cap-height clip-path clip-rule color-interpolation color-interpolation-filters color-profile color-rendering dominant-baseline enable-background fill-opacity fill-rule flood-color flood-opacity font-family font-size font-size-adjust font-stretch font-style font-variant font-weight glyph-name glyph-orientation-horizontal glyph-orientation-vertical horiz-adv-x horiz-origin-x image-rendering letter-spacing lighting-color marker-end marker-mid marker-start overline-position overline-thickness paint-order panose-1 pointer-events rendering-intent shape-rendering stop-color stop-opacity strikethrough-position strikethrough-thickness stroke-dasharray stroke-dashoffset stroke-linecap stroke-linejoin stroke-miterlimit stroke-opacity stroke-width text-anchor text-decoration text-rendering underline-position underline-thickness unicode-bidi unicode-range units-per-em v-alphabetic v-hanging v-ideographic v-mathematical vector-effect vert-adv-y vert-origin-x vert-origin-y word-spacing writing-mode xmlns:xlink x-height".split(" ").forEach(function(e){var t=e.replace(bi,eo);le[t]=new de(t,1,!1,e,null,!1,!1)});"xlink:actuate xlink:arcrole xlink:role xlink:show xlink:title xlink:type".split(" ").forEach(function(e){var t=e.replace(bi,eo);le[t]=new de(t,1,!1,e,"http://www.w3.org/1999/xlink",!1,!1)});["xml:base","xml:lang","xml:space"].forEach(function(e){var t=e.replace(bi,eo);le[t]=new de(t,1,!1,e,"http://www.w3.org/XML/1998/namespace",!1,!1)});["tabIndex","crossOrigin"].forEach(function(e){le[e]=new de(e,1,!1,e.toLowerCase(),null,!1,!1)});le.xlinkHref=new de("xlinkHref",1,!1,"xlink:href","http://www.w3.org/1999/xlink",!0,!1);["src","href","action","formAction"].forEach(function(e){le[e]=new de(e,1,!1,e.toLowerCase(),null,!0,!0)});function to(e,t,n,r){var l=le.hasOwnProperty(t)?le[t]:null;(l!==null?l.type!==0:r||!(2u||l[o]!==i[u]){var s=` -`+l[o].replace(" at new "," at ");return e.displayName&&s.includes("")&&(s=s.replace("",e.displayName)),s}while(1<=o&&0<=u);break}}}finally{zl=!1,Error.prepareStackTrace=n}return(e=e?e.displayName||e.name:"")?xn(e):""}function Mf(e){switch(e.tag){case 5:return xn(e.type);case 16:return xn("Lazy");case 13:return xn("Suspense");case 19:return xn("SuspenseList");case 0:case 2:case 15:return e=Ol(e.type,!1),e;case 11:return e=Ol(e.type.render,!1),e;case 1:return e=Ol(e.type,!0),e;default:return""}}function ii(e){if(e==null)return null;if(typeof e=="function")return e.displayName||e.name||null;if(typeof e=="string")return e;switch(e){case Ut:return"Fragment";case Ft:return"Portal";case ni:return"Profiler";case no:return"StrictMode";case ri:return"Suspense";case li:return"SuspenseList"}if(typeof e=="object")switch(e.$$typeof){case vs:return(e.displayName||"Context")+".Consumer";case ys:return(e._context.displayName||"Context")+".Provider";case ro:var t=e.render;return e=e.displayName,e||(e=t.displayName||t.name||"",e=e!==""?"ForwardRef("+e+")":"ForwardRef"),e;case lo:return t=e.displayName||null,t!==null?t:ii(e.type)||"Memo";case tt:t=e._payload,e=e._init;try{return ii(e(t))}catch{}}return null}function jf(e){var t=e.type;switch(e.tag){case 24:return"Cache";case 9:return(t.displayName||"Context")+".Consumer";case 10:return(t._context.displayName||"Context")+".Provider";case 18:return"DehydratedFragment";case 11:return e=t.render,e=e.displayName||e.name||"",t.displayName||(e!==""?"ForwardRef("+e+")":"ForwardRef");case 7:return"Fragment";case 5:return t;case 4:return"Portal";case 3:return"Root";case 6:return"Text";case 16:return ii(t);case 8:return t===no?"StrictMode":"Mode";case 22:return"Offscreen";case 12:return"Profiler";case 21:return"Scope";case 13:return"Suspense";case 19:return"SuspenseList";case 25:return"TracingMarker";case 1:case 0:case 17:case 2:case 14:case 15:if(typeof t=="function")return t.displayName||t.name||null;if(typeof t=="string")return t}return null}function ht(e){switch(typeof e){case"boolean":case"number":case"string":case"undefined":return e;case"object":return e;default:return""}}function ws(e){var t=e.type;return(e=e.nodeName)&&e.toLowerCase()==="input"&&(t==="checkbox"||t==="radio")}function Df(e){var t=ws(e)?"checked":"value",n=Object.getOwnPropertyDescriptor(e.constructor.prototype,t),r=""+e[t];if(!e.hasOwnProperty(t)&&typeof n<"u"&&typeof n.get=="function"&&typeof n.set=="function"){var l=n.get,i=n.set;return Object.defineProperty(e,t,{configurable:!0,get:function(){return l.call(this)},set:function(o){r=""+o,i.call(this,o)}}),Object.defineProperty(e,t,{enumerable:n.enumerable}),{getValue:function(){return r},setValue:function(o){r=""+o},stopTracking:function(){e._valueTracker=null,delete e[t]}}}}function sr(e){e._valueTracker||(e._valueTracker=Df(e))}function ks(e){if(!e)return!1;var t=e._valueTracker;if(!t)return!0;var n=t.getValue(),r="";return e&&(r=ws(e)?e.checked?"true":"false":e.value),e=r,e!==n?(t.setValue(e),!0):!1}function Dr(e){if(e=e||(typeof document<"u"?document:void 0),typeof e>"u")return null;try{return e.activeElement||e.body}catch{return e.body}}function oi(e,t){var n=t.checked;return V({},t,{defaultChecked:void 0,defaultValue:void 0,value:void 0,checked:n??e._wrapperState.initialChecked})}function bo(e,t){var n=t.defaultValue==null?"":t.defaultValue,r=t.checked!=null?t.checked:t.defaultChecked;n=ht(t.value!=null?t.value:n),e._wrapperState={initialChecked:r,initialValue:n,controlled:t.type==="checkbox"||t.type==="radio"?t.checked!=null:t.value!=null}}function Ss(e,t){t=t.checked,t!=null&&to(e,"checked",t,!1)}function ui(e,t){Ss(e,t);var n=ht(t.value),r=t.type;if(n!=null)r==="number"?(n===0&&e.value===""||e.value!=n)&&(e.value=""+n):e.value!==""+n&&(e.value=""+n);else if(r==="submit"||r==="reset"){e.removeAttribute("value");return}t.hasOwnProperty("value")?si(e,t.type,n):t.hasOwnProperty("defaultValue")&&si(e,t.type,ht(t.defaultValue)),t.checked==null&&t.defaultChecked!=null&&(e.defaultChecked=!!t.defaultChecked)}function eu(e,t,n){if(t.hasOwnProperty("value")||t.hasOwnProperty("defaultValue")){var r=t.type;if(!(r!=="submit"&&r!=="reset"||t.value!==void 0&&t.value!==null))return;t=""+e._wrapperState.initialValue,n||t===e.value||(e.value=t),e.defaultValue=t}n=e.name,n!==""&&(e.name=""),e.defaultChecked=!!e._wrapperState.initialChecked,n!==""&&(e.name=n)}function si(e,t,n){(t!=="number"||Dr(e.ownerDocument)!==e)&&(n==null?e.defaultValue=""+e._wrapperState.initialValue:e.defaultValue!==""+n&&(e.defaultValue=""+n))}var _n=Array.isArray;function Zt(e,t,n,r){if(e=e.options,t){t={};for(var l=0;l"+t.valueOf().toString()+"",t=ar.firstChild;e.firstChild;)e.removeChild(e.firstChild);for(;t.firstChild;)e.appendChild(t.firstChild)}});function Dn(e,t){if(t){var n=e.firstChild;if(n&&n===e.lastChild&&n.nodeType===3){n.nodeValue=t;return}}e.textContent=t}var Pn={animationIterationCount:!0,aspectRatio:!0,borderImageOutset:!0,borderImageSlice:!0,borderImageWidth:!0,boxFlex:!0,boxFlexGroup:!0,boxOrdinalGroup:!0,columnCount:!0,columns:!0,flex:!0,flexGrow:!0,flexPositive:!0,flexShrink:!0,flexNegative:!0,flexOrder:!0,gridArea:!0,gridRow:!0,gridRowEnd:!0,gridRowSpan:!0,gridRowStart:!0,gridColumn:!0,gridColumnEnd:!0,gridColumnSpan:!0,gridColumnStart:!0,fontWeight:!0,lineClamp:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,tabSize:!0,widows:!0,zIndex:!0,zoom:!0,fillOpacity:!0,floodOpacity:!0,stopOpacity:!0,strokeDasharray:!0,strokeDashoffset:!0,strokeMiterlimit:!0,strokeOpacity:!0,strokeWidth:!0},Ff=["Webkit","ms","Moz","O"];Object.keys(Pn).forEach(function(e){Ff.forEach(function(t){t=t+e.charAt(0).toUpperCase()+e.substring(1),Pn[t]=Pn[e]})});function Cs(e,t,n){return t==null||typeof t=="boolean"||t===""?"":n||typeof t!="number"||t===0||Pn.hasOwnProperty(e)&&Pn[e]?(""+t).trim():t+"px"}function Ns(e,t){e=e.style;for(var n in t)if(t.hasOwnProperty(n)){var r=n.indexOf("--")===0,l=Cs(n,t[n],r);n==="float"&&(n="cssFloat"),r?e.setProperty(n,l):e[n]=l}}var Uf=V({menuitem:!0},{area:!0,base:!0,br:!0,col:!0,embed:!0,hr:!0,img:!0,input:!0,keygen:!0,link:!0,meta:!0,param:!0,source:!0,track:!0,wbr:!0});function fi(e,t){if(t){if(Uf[e]&&(t.children!=null||t.dangerouslySetInnerHTML!=null))throw Error(v(137,e));if(t.dangerouslySetInnerHTML!=null){if(t.children!=null)throw Error(v(60));if(typeof t.dangerouslySetInnerHTML!="object"||!("__html"in t.dangerouslySetInnerHTML))throw Error(v(61))}if(t.style!=null&&typeof t.style!="object")throw Error(v(62))}}function di(e,t){if(e.indexOf("-")===-1)return typeof t.is=="string";switch(e){case"annotation-xml":case"color-profile":case"font-face":case"font-face-src":case"font-face-uri":case"font-face-format":case"font-face-name":case"missing-glyph":return!1;default:return!0}}var pi=null;function io(e){return e=e.target||e.srcElement||window,e.correspondingUseElement&&(e=e.correspondingUseElement),e.nodeType===3?e.parentNode:e}var mi=null,Jt=null,qt=null;function ru(e){if(e=nr(e)){if(typeof mi!="function")throw Error(v(280));var t=e.stateNode;t&&(t=dl(t),mi(e.stateNode,e.type,t))}}function Ps(e){Jt?qt?qt.push(e):qt=[e]:Jt=e}function zs(){if(Jt){var e=Jt,t=qt;if(qt=Jt=null,ru(e),t)for(e=0;e>>=0,e===0?32:31-(Zf(e)/Jf|0)|0}var cr=64,fr=4194304;function Cn(e){switch(e&-e){case 1:return 1;case 2:return 2;case 4:return 4;case 8:return 8;case 16:return 16;case 32:return 32;case 64:case 128:case 256:case 512:case 1024:case 2048:case 4096:case 8192:case 16384:case 32768:case 65536:case 131072:case 262144:case 524288:case 1048576:case 2097152:return e&4194240;case 4194304:case 8388608:case 16777216:case 33554432:case 67108864:return e&130023424;case 134217728:return 134217728;case 268435456:return 268435456;case 536870912:return 536870912;case 1073741824:return 1073741824;default:return e}}function Vr(e,t){var n=e.pendingLanes;if(n===0)return 0;var r=0,l=e.suspendedLanes,i=e.pingedLanes,o=n&268435455;if(o!==0){var u=o&~l;u!==0?r=Cn(u):(i&=o,i!==0&&(r=Cn(i)))}else o=n&~l,o!==0?r=Cn(o):i!==0&&(r=Cn(i));if(r===0)return 0;if(t!==0&&t!==r&&!(t&l)&&(l=r&-r,i=t&-t,l>=i||l===16&&(i&4194240)!==0))return t;if(r&4&&(r|=n&16),t=e.entangledLanes,t!==0)for(e=e.entanglements,t&=r;0n;n++)t.push(e);return t}function er(e,t,n){e.pendingLanes|=t,t!==536870912&&(e.suspendedLanes=0,e.pingedLanes=0),e=e.eventTimes,t=31-je(t),e[t]=n}function td(e,t){var n=e.pendingLanes&~t;e.pendingLanes=t,e.suspendedLanes=0,e.pingedLanes=0,e.expiredLanes&=t,e.mutableReadLanes&=t,e.entangledLanes&=t,t=e.entanglements;var r=e.eventTimes;for(e=e.expirationTimes;0=On),du=String.fromCharCode(32),pu=!1;function Ys(e,t){switch(e){case"keyup":return Od.indexOf(t.keyCode)!==-1;case"keydown":return t.keyCode!==229;case"keypress":case"mousedown":case"focusout":return!0;default:return!1}}function Gs(e){return e=e.detail,typeof e=="object"&&"data"in e?e.data:null}var $t=!1;function Ld(e,t){switch(e){case"compositionend":return Gs(t);case"keypress":return t.which!==32?null:(pu=!0,du);case"textInput":return e=t.data,e===du&&pu?null:e;default:return null}}function Rd(e,t){if($t)return e==="compositionend"||!mo&&Ys(e,t)?(e=Ks(),Nr=co=it=null,$t=!1,e):null;switch(e){case"paste":return null;case"keypress":if(!(t.ctrlKey||t.altKey||t.metaKey)||t.ctrlKey&&t.altKey){if(t.char&&1=t)return{node:n,offset:t-e};e=r}e:{for(;n;){if(n.nextSibling){n=n.nextSibling;break e}n=n.parentNode}n=void 0}n=vu(n)}}function bs(e,t){return e&&t?e===t?!0:e&&e.nodeType===3?!1:t&&t.nodeType===3?bs(e,t.parentNode):"contains"in e?e.contains(t):e.compareDocumentPosition?!!(e.compareDocumentPosition(t)&16):!1:!1}function ea(){for(var e=window,t=Dr();t instanceof e.HTMLIFrameElement;){try{var n=typeof t.contentWindow.location.href=="string"}catch{n=!1}if(n)e=t.contentWindow;else break;t=Dr(e.document)}return t}function ho(e){var t=e&&e.nodeName&&e.nodeName.toLowerCase();return t&&(t==="input"&&(e.type==="text"||e.type==="search"||e.type==="tel"||e.type==="url"||e.type==="password")||t==="textarea"||e.contentEditable==="true")}function Vd(e){var t=ea(),n=e.focusedElem,r=e.selectionRange;if(t!==n&&n&&n.ownerDocument&&bs(n.ownerDocument.documentElement,n)){if(r!==null&&ho(n)){if(t=r.start,e=r.end,e===void 0&&(e=t),"selectionStart"in n)n.selectionStart=t,n.selectionEnd=Math.min(e,n.value.length);else if(e=(t=n.ownerDocument||document)&&t.defaultView||window,e.getSelection){e=e.getSelection();var l=n.textContent.length,i=Math.min(r.start,l);r=r.end===void 0?i:Math.min(r.end,l),!e.extend&&i>r&&(l=r,r=i,i=l),l=gu(n,i);var o=gu(n,r);l&&o&&(e.rangeCount!==1||e.anchorNode!==l.node||e.anchorOffset!==l.offset||e.focusNode!==o.node||e.focusOffset!==o.offset)&&(t=t.createRange(),t.setStart(l.node,l.offset),e.removeAllRanges(),i>r?(e.addRange(t),e.extend(o.node,o.offset)):(t.setEnd(o.node,o.offset),e.addRange(t)))}}for(t=[],e=n;e=e.parentNode;)e.nodeType===1&&t.push({element:e,left:e.scrollLeft,top:e.scrollTop});for(typeof n.focus=="function"&&n.focus(),n=0;n=document.documentMode,Vt=null,ki=null,Ln=null,Si=!1;function wu(e,t,n){var r=n.window===n?n.document:n.nodeType===9?n:n.ownerDocument;Si||Vt==null||Vt!==Dr(r)||(r=Vt,"selectionStart"in r&&ho(r)?r={start:r.selectionStart,end:r.selectionEnd}:(r=(r.ownerDocument&&r.ownerDocument.defaultView||window).getSelection(),r={anchorNode:r.anchorNode,anchorOffset:r.anchorOffset,focusNode:r.focusNode,focusOffset:r.focusOffset}),Ln&&Hn(Ln,r)||(Ln=r,r=Wr(ki,"onSelect"),0Wt||(e.current=Pi[Wt],Pi[Wt]=null,Wt--)}function A(e,t){Wt++,Pi[Wt]=e.current,e.current=t}var yt={},se=gt(yt),he=gt(!1),Tt=yt;function rn(e,t){var n=e.type.contextTypes;if(!n)return yt;var r=e.stateNode;if(r&&r.__reactInternalMemoizedUnmaskedChildContext===t)return r.__reactInternalMemoizedMaskedChildContext;var l={},i;for(i in n)l[i]=t[i];return r&&(e=e.stateNode,e.__reactInternalMemoizedUnmaskedChildContext=t,e.__reactInternalMemoizedMaskedChildContext=l),l}function ye(e){return e=e.childContextTypes,e!=null}function Kr(){j(he),j(se)}function Nu(e,t,n){if(se.current!==yt)throw Error(v(168));A(se,t),A(he,n)}function aa(e,t,n){var r=e.stateNode;if(t=t.childContextTypes,typeof r.getChildContext!="function")return n;r=r.getChildContext();for(var l in r)if(!(l in t))throw Error(v(108,jf(e)||"Unknown",l));return V({},n,r)}function Xr(e){return e=(e=e.stateNode)&&e.__reactInternalMemoizedMergedChildContext||yt,Tt=se.current,A(se,e),A(he,he.current),!0}function Pu(e,t,n){var r=e.stateNode;if(!r)throw Error(v(169));n?(e=aa(e,t,Tt),r.__reactInternalMemoizedMergedChildContext=e,j(he),j(se),A(se,e)):j(he),A(he,n)}var Qe=null,pl=!1,Hl=!1;function ca(e){Qe===null?Qe=[e]:Qe.push(e)}function bd(e){pl=!0,ca(e)}function wt(){if(!Hl&&Qe!==null){Hl=!0;var e=0,t=I;try{var n=Qe;for(I=1;e>=o,l-=o,Ke=1<<32-je(t)+l|n<N?(H=C,C=null):H=C.sibling;var L=p(f,C,d[N],y);if(L===null){C===null&&(C=H);break}e&&C&&L.alternate===null&&t(f,C),a=i(L,a,N),_===null?E=L:_.sibling=L,_=L,C=H}if(N===d.length)return n(f,C),F&&xt(f,N),E;if(C===null){for(;NN?(H=C,C=null):H=C.sibling;var Le=p(f,C,L.value,y);if(Le===null){C===null&&(C=H);break}e&&C&&Le.alternate===null&&t(f,C),a=i(Le,a,N),_===null?E=Le:_.sibling=Le,_=Le,C=H}if(L.done)return n(f,C),F&&xt(f,N),E;if(C===null){for(;!L.done;N++,L=d.next())L=h(f,L.value,y),L!==null&&(a=i(L,a,N),_===null?E=L:_.sibling=L,_=L);return F&&xt(f,N),E}for(C=r(f,C);!L.done;N++,L=d.next())L=g(C,f,N,L.value,y),L!==null&&(e&&L.alternate!==null&&C.delete(L.key===null?N:L.key),a=i(L,a,N),_===null?E=L:_.sibling=L,_=L);return e&&C.forEach(function(pn){return t(f,pn)}),F&&xt(f,N),E}function D(f,a,d,y){if(typeof d=="object"&&d!==null&&d.type===Ut&&d.key===null&&(d=d.props.children),typeof d=="object"&&d!==null){switch(d.$$typeof){case ur:e:{for(var E=d.key,_=a;_!==null;){if(_.key===E){if(E=d.type,E===Ut){if(_.tag===7){n(f,_.sibling),a=l(_,d.props.children),a.return=f,f=a;break e}}else if(_.elementType===E||typeof E=="object"&&E!==null&&E.$$typeof===tt&&Au(E)===_.type){n(f,_.sibling),a=l(_,d.props),a.ref=kn(f,_,d),a.return=f,f=a;break e}n(f,_);break}else t(f,_);_=_.sibling}d.type===Ut?(a=Ot(d.props.children,f.mode,y,d.key),a.return=f,f=a):(y=Ar(d.type,d.key,d.props,null,f.mode,y),y.ref=kn(f,a,d),y.return=f,f=y)}return o(f);case Ft:e:{for(_=d.key;a!==null;){if(a.key===_)if(a.tag===4&&a.stateNode.containerInfo===d.containerInfo&&a.stateNode.implementation===d.implementation){n(f,a.sibling),a=l(a,d.children||[]),a.return=f,f=a;break e}else{n(f,a);break}else t(f,a);a=a.sibling}a=Jl(d,f.mode,y),a.return=f,f=a}return o(f);case tt:return _=d._init,D(f,a,_(d._payload),y)}if(_n(d))return w(f,a,d,y);if(hn(d))return k(f,a,d,y);gr(f,d)}return typeof d=="string"&&d!==""||typeof d=="number"?(d=""+d,a!==null&&a.tag===6?(n(f,a.sibling),a=l(a,d),a.return=f,f=a):(n(f,a),a=Zl(d,f.mode,y),a.return=f,f=a),o(f)):n(f,a)}return D}var on=ga(!0),wa=ga(!1),rr={},He=gt(rr),Xn=gt(rr),Yn=gt(rr);function Pt(e){if(e===rr)throw Error(v(174));return e}function _o(e,t){switch(A(Yn,t),A(Xn,e),A(He,rr),e=t.nodeType,e){case 9:case 11:t=(t=t.documentElement)?t.namespaceURI:ci(null,"");break;default:e=e===8?t.parentNode:t,t=e.namespaceURI||null,e=e.tagName,t=ci(t,e)}j(He),A(He,t)}function un(){j(He),j(Xn),j(Yn)}function ka(e){Pt(Yn.current);var t=Pt(He.current),n=ci(t,e.type);t!==n&&(A(Xn,e),A(He,n))}function Co(e){Xn.current===e&&(j(He),j(Xn))}var U=gt(0);function br(e){for(var t=e;t!==null;){if(t.tag===13){var n=t.memoizedState;if(n!==null&&(n=n.dehydrated,n===null||n.data==="$?"||n.data==="$!"))return t}else if(t.tag===19&&t.memoizedProps.revealOrder!==void 0){if(t.flags&128)return t}else if(t.child!==null){t.child.return=t,t=t.child;continue}if(t===e)break;for(;t.sibling===null;){if(t.return===null||t.return===e)return null;t=t.return}t.sibling.return=t.return,t=t.sibling}return null}var Wl=[];function No(){for(var e=0;en?n:4,e(!0);var r=Ql.transition;Ql.transition={};try{e(!1),t()}finally{I=n,Ql.transition=r}}function ja(){return Te().memoizedState}function rp(e,t,n){var r=pt(e);if(n={lane:r,action:n,hasEagerState:!1,eagerState:null,next:null},Da(e))Fa(t,n);else if(n=ma(e,t,n,r),n!==null){var l=ce();De(n,e,r,l),Ua(n,t,r)}}function lp(e,t,n){var r=pt(e),l={lane:r,action:n,hasEagerState:!1,eagerState:null,next:null};if(Da(e))Fa(t,l);else{var i=e.alternate;if(e.lanes===0&&(i===null||i.lanes===0)&&(i=t.lastRenderedReducer,i!==null))try{var o=t.lastRenderedState,u=i(o,n);if(l.hasEagerState=!0,l.eagerState=u,Fe(u,o)){var s=t.interleaved;s===null?(l.next=l,Eo(t)):(l.next=s.next,s.next=l),t.interleaved=l;return}}catch{}finally{}n=ma(e,t,l,r),n!==null&&(l=ce(),De(n,e,r,l),Ua(n,t,r))}}function Da(e){var t=e.alternate;return e===$||t!==null&&t===$}function Fa(e,t){Rn=el=!0;var n=e.pending;n===null?t.next=t:(t.next=n.next,n.next=t),e.pending=t}function Ua(e,t,n){if(n&4194240){var r=t.lanes;r&=e.pendingLanes,n|=r,t.lanes=n,uo(e,n)}}var tl={readContext:Oe,useCallback:ie,useContext:ie,useEffect:ie,useImperativeHandle:ie,useInsertionEffect:ie,useLayoutEffect:ie,useMemo:ie,useReducer:ie,useRef:ie,useState:ie,useDebugValue:ie,useDeferredValue:ie,useTransition:ie,useMutableSource:ie,useSyncExternalStore:ie,useId:ie,unstable_isNewReconciler:!1},ip={readContext:Oe,useCallback:function(e,t){return $e().memoizedState=[e,t===void 0?null:t],e},useContext:Oe,useEffect:ju,useImperativeHandle:function(e,t,n){return n=n!=null?n.concat([e]):null,Tr(4194308,4,La.bind(null,t,e),n)},useLayoutEffect:function(e,t){return Tr(4194308,4,e,t)},useInsertionEffect:function(e,t){return Tr(4,2,e,t)},useMemo:function(e,t){var n=$e();return t=t===void 0?null:t,e=e(),n.memoizedState=[e,t],e},useReducer:function(e,t,n){var r=$e();return t=n!==void 0?n(t):t,r.memoizedState=r.baseState=t,e={pending:null,interleaved:null,lanes:0,dispatch:null,lastRenderedReducer:e,lastRenderedState:t},r.queue=e,e=e.dispatch=rp.bind(null,$,e),[r.memoizedState,e]},useRef:function(e){var t=$e();return e={current:e},t.memoizedState=e},useState:Mu,useDebugValue:Lo,useDeferredValue:function(e){return $e().memoizedState=e},useTransition:function(){var e=Mu(!1),t=e[0];return e=np.bind(null,e[1]),$e().memoizedState=e,[t,e]},useMutableSource:function(){},useSyncExternalStore:function(e,t,n){var r=$,l=$e();if(F){if(n===void 0)throw Error(v(407));n=n()}else{if(n=t(),ee===null)throw Error(v(349));Rt&30||xa(r,t,n)}l.memoizedState=n;var i={value:n,getSnapshot:t};return l.queue=i,ju(Ca.bind(null,r,i,e),[e]),r.flags|=2048,Jn(9,_a.bind(null,r,i,n,t),void 0,null),n},useId:function(){var e=$e(),t=ee.identifierPrefix;if(F){var n=Xe,r=Ke;n=(r&~(1<<32-je(r)-1)).toString(32)+n,t=":"+t+"R"+n,n=Gn++,0<\/script>",e=e.removeChild(e.firstChild)):typeof r.is=="string"?e=o.createElement(n,{is:r.is}):(e=o.createElement(n),n==="select"&&(o=e,r.multiple?o.multiple=!0:r.size&&(o.size=r.size))):e=o.createElementNS(e,n),e[Ve]=t,e[Kn]=r,Ya(e,t,!1,!1),t.stateNode=e;e:{switch(o=di(n,r),n){case"dialog":M("cancel",e),M("close",e),l=r;break;case"iframe":case"object":case"embed":M("load",e),l=r;break;case"video":case"audio":for(l=0;lan&&(t.flags|=128,r=!0,Sn(i,!1),t.lanes=4194304)}else{if(!r)if(e=br(o),e!==null){if(t.flags|=128,r=!0,n=e.updateQueue,n!==null&&(t.updateQueue=n,t.flags|=4),Sn(i,!0),i.tail===null&&i.tailMode==="hidden"&&!o.alternate&&!F)return oe(t),null}else 2*Q()-i.renderingStartTime>an&&n!==1073741824&&(t.flags|=128,r=!0,Sn(i,!1),t.lanes=4194304);i.isBackwards?(o.sibling=t.child,t.child=o):(n=i.last,n!==null?n.sibling=o:t.child=o,i.last=o)}return i.tail!==null?(t=i.tail,i.rendering=t,i.tail=t.sibling,i.renderingStartTime=Q(),t.sibling=null,n=U.current,A(U,r?n&1|2:n&1),t):(oe(t),null);case 22:case 23:return Do(),r=t.memoizedState!==null,e!==null&&e.memoizedState!==null!==r&&(t.flags|=8192),r&&t.mode&1?ge&1073741824&&(oe(t),t.subtreeFlags&6&&(t.flags|=8192)):oe(t),null;case 24:return null;case 25:return null}throw Error(v(156,t.tag))}function pp(e,t){switch(vo(t),t.tag){case 1:return ye(t.type)&&Kr(),e=t.flags,e&65536?(t.flags=e&-65537|128,t):null;case 3:return un(),j(he),j(se),No(),e=t.flags,e&65536&&!(e&128)?(t.flags=e&-65537|128,t):null;case 5:return Co(t),null;case 13:if(j(U),e=t.memoizedState,e!==null&&e.dehydrated!==null){if(t.alternate===null)throw Error(v(340));ln()}return e=t.flags,e&65536?(t.flags=e&-65537|128,t):null;case 19:return j(U),null;case 4:return un(),null;case 10:return So(t.type._context),null;case 22:case 23:return Do(),null;case 24:return null;default:return null}}var kr=!1,ue=!1,mp=typeof WeakSet=="function"?WeakSet:Set,S=null;function Yt(e,t){var n=e.ref;if(n!==null)if(typeof n=="function")try{n(null)}catch(r){B(e,t,r)}else n.current=null}function Ui(e,t,n){try{n()}catch(r){B(e,t,r)}}var Qu=!1;function hp(e,t){if(Ei=Br,e=ea(),ho(e)){if("selectionStart"in e)var n={start:e.selectionStart,end:e.selectionEnd};else e:{n=(n=e.ownerDocument)&&n.defaultView||window;var r=n.getSelection&&n.getSelection();if(r&&r.rangeCount!==0){n=r.anchorNode;var l=r.anchorOffset,i=r.focusNode;r=r.focusOffset;try{n.nodeType,i.nodeType}catch{n=null;break e}var o=0,u=-1,s=-1,c=0,m=0,h=e,p=null;t:for(;;){for(var g;h!==n||l!==0&&h.nodeType!==3||(u=o+l),h!==i||r!==0&&h.nodeType!==3||(s=o+r),h.nodeType===3&&(o+=h.nodeValue.length),(g=h.firstChild)!==null;)p=h,h=g;for(;;){if(h===e)break t;if(p===n&&++c===l&&(u=o),p===i&&++m===r&&(s=o),(g=h.nextSibling)!==null)break;h=p,p=h.parentNode}h=g}n=u===-1||s===-1?null:{start:u,end:s}}else n=null}n=n||{start:0,end:0}}else n=null;for(xi={focusedElem:e,selectionRange:n},Br=!1,S=t;S!==null;)if(t=S,e=t.child,(t.subtreeFlags&1028)!==0&&e!==null)e.return=t,S=e;else for(;S!==null;){t=S;try{var w=t.alternate;if(t.flags&1024)switch(t.tag){case 0:case 11:case 15:break;case 1:if(w!==null){var k=w.memoizedProps,D=w.memoizedState,f=t.stateNode,a=f.getSnapshotBeforeUpdate(t.elementType===t.type?k:Ie(t.type,k),D);f.__reactInternalSnapshotBeforeUpdate=a}break;case 3:var d=t.stateNode.containerInfo;d.nodeType===1?d.textContent="":d.nodeType===9&&d.documentElement&&d.removeChild(d.documentElement);break;case 5:case 6:case 4:case 17:break;default:throw Error(v(163))}}catch(y){B(t,t.return,y)}if(e=t.sibling,e!==null){e.return=t.return,S=e;break}S=t.return}return w=Qu,Qu=!1,w}function In(e,t,n){var r=t.updateQueue;if(r=r!==null?r.lastEffect:null,r!==null){var l=r=r.next;do{if((l.tag&e)===e){var i=l.destroy;l.destroy=void 0,i!==void 0&&Ui(t,n,i)}l=l.next}while(l!==r)}}function yl(e,t){if(t=t.updateQueue,t=t!==null?t.lastEffect:null,t!==null){var n=t=t.next;do{if((n.tag&e)===e){var r=n.create;n.destroy=r()}n=n.next}while(n!==t)}}function $i(e){var t=e.ref;if(t!==null){var n=e.stateNode;switch(e.tag){case 5:e=n;break;default:e=n}typeof t=="function"?t(e):t.current=e}}function Ja(e){var t=e.alternate;t!==null&&(e.alternate=null,Ja(t)),e.child=null,e.deletions=null,e.sibling=null,e.tag===5&&(t=e.stateNode,t!==null&&(delete t[Ve],delete t[Kn],delete t[Ni],delete t[Jd],delete t[qd])),e.stateNode=null,e.return=null,e.dependencies=null,e.memoizedProps=null,e.memoizedState=null,e.pendingProps=null,e.stateNode=null,e.updateQueue=null}function qa(e){return e.tag===5||e.tag===3||e.tag===4}function Ku(e){e:for(;;){for(;e.sibling===null;){if(e.return===null||qa(e.return))return null;e=e.return}for(e.sibling.return=e.return,e=e.sibling;e.tag!==5&&e.tag!==6&&e.tag!==18;){if(e.flags&2||e.child===null||e.tag===4)continue e;e.child.return=e,e=e.child}if(!(e.flags&2))return e.stateNode}}function Vi(e,t,n){var r=e.tag;if(r===5||r===6)e=e.stateNode,t?n.nodeType===8?n.parentNode.insertBefore(e,t):n.insertBefore(e,t):(n.nodeType===8?(t=n.parentNode,t.insertBefore(e,n)):(t=n,t.appendChild(e)),n=n._reactRootContainer,n!=null||t.onclick!==null||(t.onclick=Qr));else if(r!==4&&(e=e.child,e!==null))for(Vi(e,t,n),e=e.sibling;e!==null;)Vi(e,t,n),e=e.sibling}function Bi(e,t,n){var r=e.tag;if(r===5||r===6)e=e.stateNode,t?n.insertBefore(e,t):n.appendChild(e);else if(r!==4&&(e=e.child,e!==null))for(Bi(e,t,n),e=e.sibling;e!==null;)Bi(e,t,n),e=e.sibling}var te=null,Ae=!1;function et(e,t,n){for(n=n.child;n!==null;)ba(e,t,n),n=n.sibling}function ba(e,t,n){if(Be&&typeof Be.onCommitFiberUnmount=="function")try{Be.onCommitFiberUnmount(sl,n)}catch{}switch(n.tag){case 5:ue||Yt(n,t);case 6:var r=te,l=Ae;te=null,et(e,t,n),te=r,Ae=l,te!==null&&(Ae?(e=te,n=n.stateNode,e.nodeType===8?e.parentNode.removeChild(n):e.removeChild(n)):te.removeChild(n.stateNode));break;case 18:te!==null&&(Ae?(e=te,n=n.stateNode,e.nodeType===8?Bl(e.parentNode,n):e.nodeType===1&&Bl(e,n),Vn(e)):Bl(te,n.stateNode));break;case 4:r=te,l=Ae,te=n.stateNode.containerInfo,Ae=!0,et(e,t,n),te=r,Ae=l;break;case 0:case 11:case 14:case 15:if(!ue&&(r=n.updateQueue,r!==null&&(r=r.lastEffect,r!==null))){l=r=r.next;do{var i=l,o=i.destroy;i=i.tag,o!==void 0&&(i&2||i&4)&&Ui(n,t,o),l=l.next}while(l!==r)}et(e,t,n);break;case 1:if(!ue&&(Yt(n,t),r=n.stateNode,typeof r.componentWillUnmount=="function"))try{r.props=n.memoizedProps,r.state=n.memoizedState,r.componentWillUnmount()}catch(u){B(n,t,u)}et(e,t,n);break;case 21:et(e,t,n);break;case 22:n.mode&1?(ue=(r=ue)||n.memoizedState!==null,et(e,t,n),ue=r):et(e,t,n);break;default:et(e,t,n)}}function Xu(e){var t=e.updateQueue;if(t!==null){e.updateQueue=null;var n=e.stateNode;n===null&&(n=e.stateNode=new mp),t.forEach(function(r){var l=_p.bind(null,e,r);n.has(r)||(n.add(r),r.then(l,l))})}}function Re(e,t){var n=t.deletions;if(n!==null)for(var r=0;rl&&(l=o),r&=~i}if(r=l,r=Q()-r,r=(120>r?120:480>r?480:1080>r?1080:1920>r?1920:3e3>r?3e3:4320>r?4320:1960*vp(r/1960))-r,10e?16:e,ot===null)var r=!1;else{if(e=ot,ot=null,ll=0,R&6)throw Error(v(331));var l=R;for(R|=4,S=e.current;S!==null;){var i=S,o=i.child;if(S.flags&16){var u=i.deletions;if(u!==null){for(var s=0;sQ()-Mo?zt(e,0):Ao|=n),ve(e,t)}function uc(e,t){t===0&&(e.mode&1?(t=fr,fr<<=1,!(fr&130023424)&&(fr=4194304)):t=1);var n=ce();e=Je(e,t),e!==null&&(er(e,t,n),ve(e,n))}function xp(e){var t=e.memoizedState,n=0;t!==null&&(n=t.retryLane),uc(e,n)}function _p(e,t){var n=0;switch(e.tag){case 13:var r=e.stateNode,l=e.memoizedState;l!==null&&(n=l.retryLane);break;case 19:r=e.stateNode;break;default:throw Error(v(314))}r!==null&&r.delete(t),uc(e,n)}var sc;sc=function(e,t,n){if(e!==null)if(e.memoizedProps!==t.pendingProps||he.current)me=!0;else{if(!(e.lanes&n)&&!(t.flags&128))return me=!1,fp(e,t,n);me=!!(e.flags&131072)}else me=!1,F&&t.flags&1048576&&fa(t,Gr,t.index);switch(t.lanes=0,t.tag){case 2:var r=t.type;Lr(e,t),e=t.pendingProps;var l=rn(t,se.current);en(t,n),l=zo(null,t,r,e,l,n);var i=Oo();return t.flags|=1,typeof l=="object"&&l!==null&&typeof l.render=="function"&&l.$$typeof===void 0?(t.tag=1,t.memoizedState=null,t.updateQueue=null,ye(r)?(i=!0,Xr(t)):i=!1,t.memoizedState=l.state!==null&&l.state!==void 0?l.state:null,xo(t),l.updater=ml,t.stateNode=l,l._reactInternals=t,Ri(t,r,e,n),t=Mi(null,t,r,!0,i,n)):(t.tag=0,F&&i&&yo(t),ae(null,t,l,n),t=t.child),t;case 16:r=t.elementType;e:{switch(Lr(e,t),e=t.pendingProps,l=r._init,r=l(r._payload),t.type=r,l=t.tag=Np(r),e=Ie(r,e),l){case 0:t=Ai(null,t,r,e,n);break e;case 1:t=Bu(null,t,r,e,n);break e;case 11:t=$u(null,t,r,e,n);break e;case 14:t=Vu(null,t,r,Ie(r.type,e),n);break e}throw Error(v(306,r,""))}return t;case 0:return r=t.type,l=t.pendingProps,l=t.elementType===r?l:Ie(r,l),Ai(e,t,r,l,n);case 1:return r=t.type,l=t.pendingProps,l=t.elementType===r?l:Ie(r,l),Bu(e,t,r,l,n);case 3:e:{if(Qa(t),e===null)throw Error(v(387));r=t.pendingProps,i=t.memoizedState,l=i.element,ha(e,t),qr(t,r,null,n);var o=t.memoizedState;if(r=o.element,i.isDehydrated)if(i={element:r,isDehydrated:!1,cache:o.cache,pendingSuspenseBoundaries:o.pendingSuspenseBoundaries,transitions:o.transitions},t.updateQueue.baseState=i,t.memoizedState=i,t.flags&256){l=sn(Error(v(423)),t),t=Hu(e,t,r,n,l);break e}else if(r!==l){l=sn(Error(v(424)),t),t=Hu(e,t,r,n,l);break e}else for(we=ct(t.stateNode.containerInfo.firstChild),ke=t,F=!0,Me=null,n=wa(t,null,r,n),t.child=n;n;)n.flags=n.flags&-3|4096,n=n.sibling;else{if(ln(),r===l){t=qe(e,t,n);break e}ae(e,t,r,n)}t=t.child}return t;case 5:return ka(t),e===null&&Oi(t),r=t.type,l=t.pendingProps,i=e!==null?e.memoizedProps:null,o=l.children,_i(r,l)?o=null:i!==null&&_i(r,i)&&(t.flags|=32),Wa(e,t),ae(e,t,o,n),t.child;case 6:return e===null&&Oi(t),null;case 13:return Ka(e,t,n);case 4:return _o(t,t.stateNode.containerInfo),r=t.pendingProps,e===null?t.child=on(t,null,r,n):ae(e,t,r,n),t.child;case 11:return r=t.type,l=t.pendingProps,l=t.elementType===r?l:Ie(r,l),$u(e,t,r,l,n);case 7:return ae(e,t,t.pendingProps,n),t.child;case 8:return ae(e,t,t.pendingProps.children,n),t.child;case 12:return ae(e,t,t.pendingProps.children,n),t.child;case 10:e:{if(r=t.type._context,l=t.pendingProps,i=t.memoizedProps,o=l.value,A(Zr,r._currentValue),r._currentValue=o,i!==null)if(Fe(i.value,o)){if(i.children===l.children&&!he.current){t=qe(e,t,n);break e}}else for(i=t.child,i!==null&&(i.return=t);i!==null;){var u=i.dependencies;if(u!==null){o=i.child;for(var s=u.firstContext;s!==null;){if(s.context===r){if(i.tag===1){s=Ye(-1,n&-n),s.tag=2;var c=i.updateQueue;if(c!==null){c=c.shared;var m=c.pending;m===null?s.next=s:(s.next=m.next,m.next=s),c.pending=s}}i.lanes|=n,s=i.alternate,s!==null&&(s.lanes|=n),Ti(i.return,n,t),u.lanes|=n;break}s=s.next}}else if(i.tag===10)o=i.type===t.type?null:i.child;else if(i.tag===18){if(o=i.return,o===null)throw Error(v(341));o.lanes|=n,u=o.alternate,u!==null&&(u.lanes|=n),Ti(o,n,t),o=i.sibling}else o=i.child;if(o!==null)o.return=i;else for(o=i;o!==null;){if(o===t){o=null;break}if(i=o.sibling,i!==null){i.return=o.return,o=i;break}o=o.return}i=o}ae(e,t,l.children,n),t=t.child}return t;case 9:return l=t.type,r=t.pendingProps.children,en(t,n),l=Oe(l),r=r(l),t.flags|=1,ae(e,t,r,n),t.child;case 14:return r=t.type,l=Ie(r,t.pendingProps),l=Ie(r.type,l),Vu(e,t,r,l,n);case 15:return Ba(e,t,t.type,t.pendingProps,n);case 17:return r=t.type,l=t.pendingProps,l=t.elementType===r?l:Ie(r,l),Lr(e,t),t.tag=1,ye(r)?(e=!0,Xr(t)):e=!1,en(t,n),va(t,r,l),Ri(t,r,l,n),Mi(null,t,r,!0,e,n);case 19:return Xa(e,t,n);case 22:return Ha(e,t,n)}throw Error(v(156,t.tag))};function ac(e,t){return Ms(e,t)}function Cp(e,t,n,r){this.tag=e,this.key=n,this.sibling=this.child=this.return=this.stateNode=this.type=this.elementType=null,this.index=0,this.ref=null,this.pendingProps=t,this.dependencies=this.memoizedState=this.updateQueue=this.memoizedProps=null,this.mode=r,this.subtreeFlags=this.flags=0,this.deletions=null,this.childLanes=this.lanes=0,this.alternate=null}function Ne(e,t,n,r){return new Cp(e,t,n,r)}function Uo(e){return e=e.prototype,!(!e||!e.isReactComponent)}function Np(e){if(typeof e=="function")return Uo(e)?1:0;if(e!=null){if(e=e.$$typeof,e===ro)return 11;if(e===lo)return 14}return 2}function mt(e,t){var n=e.alternate;return n===null?(n=Ne(e.tag,t,e.key,e.mode),n.elementType=e.elementType,n.type=e.type,n.stateNode=e.stateNode,n.alternate=e,e.alternate=n):(n.pendingProps=t,n.type=e.type,n.flags=0,n.subtreeFlags=0,n.deletions=null),n.flags=e.flags&14680064,n.childLanes=e.childLanes,n.lanes=e.lanes,n.child=e.child,n.memoizedProps=e.memoizedProps,n.memoizedState=e.memoizedState,n.updateQueue=e.updateQueue,t=e.dependencies,n.dependencies=t===null?null:{lanes:t.lanes,firstContext:t.firstContext},n.sibling=e.sibling,n.index=e.index,n.ref=e.ref,n}function Ar(e,t,n,r,l,i){var o=2;if(r=e,typeof e=="function")Uo(e)&&(o=1);else if(typeof e=="string")o=5;else e:switch(e){case Ut:return Ot(n.children,l,i,t);case no:o=8,l|=8;break;case ni:return e=Ne(12,n,t,l|2),e.elementType=ni,e.lanes=i,e;case ri:return e=Ne(13,n,t,l),e.elementType=ri,e.lanes=i,e;case li:return e=Ne(19,n,t,l),e.elementType=li,e.lanes=i,e;case gs:return gl(n,l,i,t);default:if(typeof e=="object"&&e!==null)switch(e.$$typeof){case ys:o=10;break e;case vs:o=9;break e;case ro:o=11;break e;case lo:o=14;break e;case tt:o=16,r=null;break e}throw Error(v(130,e==null?e:typeof e,""))}return t=Ne(o,n,t,l),t.elementType=e,t.type=r,t.lanes=i,t}function Ot(e,t,n,r){return e=Ne(7,e,r,t),e.lanes=n,e}function gl(e,t,n,r){return e=Ne(22,e,r,t),e.elementType=gs,e.lanes=n,e.stateNode={isHidden:!1},e}function Zl(e,t,n){return e=Ne(6,e,null,t),e.lanes=n,e}function Jl(e,t,n){return t=Ne(4,e.children!==null?e.children:[],e.key,t),t.lanes=n,t.stateNode={containerInfo:e.containerInfo,pendingChildren:null,implementation:e.implementation},t}function Pp(e,t,n,r,l){this.tag=t,this.containerInfo=e,this.finishedWork=this.pingCache=this.current=this.pendingChildren=null,this.timeoutHandle=-1,this.callbackNode=this.pendingContext=this.context=null,this.callbackPriority=0,this.eventTimes=Ll(0),this.expirationTimes=Ll(-1),this.entangledLanes=this.finishedLanes=this.mutableReadLanes=this.expiredLanes=this.pingedLanes=this.suspendedLanes=this.pendingLanes=0,this.entanglements=Ll(0),this.identifierPrefix=r,this.onRecoverableError=l,this.mutableSourceEagerHydrationData=null}function $o(e,t,n,r,l,i,o,u,s){return e=new Pp(e,t,n,u,s),t===1?(t=1,i===!0&&(t|=8)):t=0,i=Ne(3,null,null,t),e.current=i,i.stateNode=e,i.memoizedState={element:r,isDehydrated:n,cache:null,transitions:null,pendingSuspenseBoundaries:null},xo(i),e}function zp(e,t,n){var r=3"u"||typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE!="function"))try{__REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE(t)}catch(n){console.error(n)}}t(),e.exports=Ee})(Of);var pc,ts=bl;pc=ts.createRoot,ts.hydrateRoot;const q=new kf,Ip=["audio-classification","audio-to-audio","automatic-speech-recognition","conversational","depth-estimation","document-question-answering","feature-extraction","fill-mask","graph-ml","image-classification","image-segmentation","image-to-image","image-to-text","multiple-choice","object-detection","other","question-answering","reinforcement-learning","robotics","sentence-similarity","summarization","table-question-answering","table-to-text","tabular-classification","tabular-regression","tabular-to-text","text-classification","text-generation","text-retrieval","text-to-image","text-to-speech","text2text-generation","time-series-forecasting","token-classification","translation","unconditional-image-generation","video-classification","visual-question-answering","voice-activity-detection","zero-shot-classification","zero-shot-image-classification"].filter(e=>Object.getOwnPropertyNames(q).includes(zf(e))),ql={},Ap=async e=>{if(ql[e])return ql[e];const t=[];for await(const n of Kc({search:{task:e}}))t.push(n);return t.sort((n,r)=>n.downloads>r.downloads?-1:n.downloadsr.likes?-1:n.likesr.name?-1:n.nameze("div",{className:"w-full",children:[P("p",{className:"text-xl",children:"Task"}),ze("select",{className:"bg-yellow-200 cursor-pointer py-6 text-center w-full",onChange:t=>e.setTask(t.target.value),placeholder:"Select a task",value:e.task,children:[P("option",{children:"Select a task"}),Ip.map(t=>P("option",{value:t,children:t},t))]})]}),jp=e=>{const[t,n]=ne.useState(!1),[r,l]=ne.useState([]);return ne.useEffect(()=>{e.task&&(n(!0),Ap(e.task).then(i=>l(i)).finally(()=>n(!1)))},[e.task]),r.length>0?ze("div",{className:"w-full",children:[P("p",{className:"text-xl",children:"Model"}),ze("select",{className:"bg-yellow-200 cursor-pointer py-6 text-center w-full",onChange:i=>e.setModel(i.target.value),placeholder:"Select a model",value:e.model,children:[P("option",{children:"Select a model"}),r.map(i=>P("option",{value:i.name,children:i.name},i.name))]})]}):P("p",{className:"text-center w-full",children:e.task?t?"Loading models for this task":"No models available for this task":"Select a task to view available models"})},Dp=e=>ze("div",{className:"w-full",children:[P("p",{className:"text-xl",children:"Inputs"}),e.inputs?P("audio",{className:"w-full",controls:!0,src:URL.createObjectURL(e.inputs)}):ze("label",{className:"bg-yellow-200 block cursor-pointer py-6 text-center w-full",children:["No file chosen",P("input",{accept:"audio/*",className:"hidden",onChange:t=>{t.target.files&&t.target.files[0]&&e.setInputs(t.target.files[0])},type:"file"})]})]}),Fp=e=>ze("div",{className:"w-full",children:[P("p",{className:"text-xl",children:"Inputs"}),e.inputs?P("img",{className:"w-full",src:URL.createObjectURL(e.inputs)}):ze("label",{className:"bg-yellow-200 block cursor-pointer py-6 text-center w-full",children:["No file chosen",P("input",{accept:"image/*",className:"hidden",onChange:t=>{t.target.files&&t.target.files[0]&&e.setInputs(t.target.files[0])},type:"file"})]})]}),Up=e=>ze("div",{className:"w-full",children:[P("p",{className:"text-xl",children:"Inputs"}),P("input",{className:"bg-yellow-200 py-6 text-center w-full",onChange:t=>{t.target.value?e.setInputs(t.target.value):e.setInputs("")},type:"text",value:e.inputs??""})]}),$p=e=>e.model&&e.task?["audio-classification","automatic-speech-recognition"].includes(e.task)?P(Dp,{inputs:e.inputs,model:e.model,setInputs:e.setInputs,task:e.task}):["image-classification","image-segmentation","object-detection"].includes(e.task)?P(Fp,{inputs:e.inputs,model:e.model,setInputs:e.setInputs,task:e.task}):["conversational","feature-extraction","fill-mask","question-answering","summarization","table-question-answering","text-classification","text-generation","text-to-image","token-classification","translation","zero-shot-classification"].includes(e.task)?P(Up,{inputs:e.inputs,model:e.model,setInputs:e.setInputs,task:e.task}):P("div",{className:"w-full",children:P("p",{className:"text-center",children:"Inference for this task is not yet supported."})}):P(ne.Fragment,{}),Vp=e=>{if(e.inputs&&e.model&&e.task){const t=()=>{e.setInputs(void 0),e.setOutput(void 0)};return P("button",{className:`border-4 border-yellow-200 py-6 text-center w-full ${e.loading?"cursor-not-allowed opacity-50":""}`,disabled:e.loading,onClick:t,children:"Clear"})}return P(ne.Fragment,{})},Bp=e=>{if(e.inputs&&e.model&&e.task){const t=async()=>{if(e.inputs&&e.model&&e.task){e.setLoading(!0);try{switch(e.task){case"audio-classification":{const n=await q.audioClassification({data:e.inputs,model:e.model});e.setOutput(n);break}case"automatic-speech-recognition":{const n=await q.automaticSpeechRecognition({data:e.inputs,model:e.model});e.setOutput(n);break}case"conversational":{const n=await q.conversational({inputs:{text:e.inputs},model:e.model});e.setOutput(n);break}case"feature-extraction":{const n=await q.featureExtraction({inputs:e.inputs,model:e.model});e.setOutput(n);break}case"fill-mask":{const n=await q.fillMask({inputs:e.inputs,model:e.model});e.setOutput(n);break}case"image-classification":{const n=await q.imageClassification({data:e.inputs,model:e.model});e.setOutput(n);break}case"image-segmentation":{const n=await q.imageSegmentation({data:e.inputs,model:e.model});e.setOutput(n);break}case"object-detection":{const n=await q.objectDetection({data:e.inputs,model:e.model});e.setOutput(n);break}case"question-answering":{const n=await q.questionAnswering({inputs:{context:e.inputs,question:e.inputs},model:e.model});e.setOutput(n);break}case"summarization":{const n=await q.summarization({inputs:e.inputs,model:e.model});e.setOutput(n);break}case"table-question-answering":{const n=await q.tableQuestionAnswering({inputs:{query:e.inputs,table:{[e.inputs]:[e.inputs]}},model:e.model});e.setOutput(n);break}case"text-classification":{const n=await q.textClassification({inputs:e.inputs,model:e.model});e.setOutput(n);break}case"text-generation":{const n=await q.textGeneration({inputs:e.inputs,model:e.model});e.setOutput(n);break}case"text-to-image":{const n=await q.textToImage({inputs:e.inputs,model:e.model});e.setOutput(n);break}case"token-classification":{const n=await q.tokenClassification({inputs:e.inputs,model:e.model});e.setOutput(n);break}case"translation":{const n=await q.translation({inputs:e.inputs,model:e.model});e.setOutput(n);break}case"zero-shot-classification":{const n=await q.zeroShotClassification({inputs:e.inputs,model:e.model,parameters:{candidate_labels:[e.inputs]}});e.setOutput(n);break}}}catch(n){n instanceof Error&&e.setOutput(n.message)}e.setLoading(!1)}};return P("button",{className:`bg-yellow-200 py-6 text-center w-full ${e.loading?"cursor-not-allowed opacity-50":""}`,disabled:e.loading,onClick:t,children:e.loading?"Submitting":"Submit"})}return P(ne.Fragment,{})},Hp=e=>ze("div",{className:"w-full",children:[P("p",{className:"text-xl",children:"Output"}),P("img",{className:`w-full ${e.loading?"cursor-wait opacity-50":""}`,src:URL.createObjectURL(e.output)})]}),Wp=e=>{const t=(()=>{try{return JSON.stringify(e.output,void 0,2)}catch(n){if(n instanceof Error)return`Error during JSON.stringify: ${n.message}`}})();return ze("div",{className:"w-full",children:[P("p",{className:"text-xl",children:"Output"}),P("pre",{className:`bg-yellow-200 p-6 select-text w-full whitespace-pre-wrap ${e.loading?"cursor-wait opacity-50":""}`,children:t})]})},Qp=e=>e.output&&e.task?["text-to-image"].includes(e.task)?P(Hp,{loading:e.loading,output:e.output}):P(Wp,{loading:e.loading,output:e.output}):P(ne.Fragment,{}),Kp=()=>{const[e,t]=ne.useState(),[n,r]=ne.useState(),[l,i]=ne.useState(),[o,u]=ne.useState(!1),[s,c]=ne.useState();return console.log("App",{task:e,model:n,inputs:l,loading:o,output:s}),P("div",{className:"bg-yellow-500 flex flex-col h-full items-center min-h-screen min-w-screen overflow-auto w-full",children:ze("div",{className:"flex flex-col items-center justify-center py-24 space-y-12 w-2/3 lg:w-1/3",children:[P("header",{className:"text-center text-6xl",children:"🤗"}),P(Mp,{setTask:t,task:e}),P(jp,{model:n,setModel:r,task:e}),P($p,{inputs:l,model:n,setInputs:i,task:e}),P(Vp,{inputs:l,loading:o,model:n,setInputs:i,setOutput:c,task:e}),P(Bp,{inputs:l,loading:o,model:n,setLoading:u,setOutput:c,task:e}),P(Qp,{loading:o,output:s,task:e})]})})},Xp=()=>{const e="root",t=document.getElementById(e);if(t){const n=pc(t),r=P(ne.StrictMode,{children:P(Kp,{})});n.render(r)}};Xp(); diff --git a/spaces/almakedon/faster-whisper-webui/src/whisper/fasterWhisperContainer.py b/spaces/almakedon/faster-whisper-webui/src/whisper/fasterWhisperContainer.py deleted file mode 100644 index 5bd640eeba90f7ad2c6a2795ed14e40d30e90c4c..0000000000000000000000000000000000000000 --- a/spaces/almakedon/faster-whisper-webui/src/whisper/fasterWhisperContainer.py +++ /dev/null @@ -1,207 +0,0 @@ -import os -from typing import List, Union - -from faster_whisper import WhisperModel, download_model -from src.config import ModelConfig, VadInitialPromptMode -from src.hooks.progressListener import ProgressListener -from src.languages import get_language_from_name -from src.modelCache import ModelCache -from src.prompts.abstractPromptStrategy import AbstractPromptStrategy -from src.whisper.abstractWhisperContainer import AbstractWhisperCallback, AbstractWhisperContainer -from src.utils import format_timestamp - -class FasterWhisperContainer(AbstractWhisperContainer): - def __init__(self, model_name: str, device: str = None, compute_type: str = "float16", - download_root: str = None, - cache: ModelCache = None, models: List[ModelConfig] = []): - super().__init__(model_name, device, compute_type, download_root, cache, models) - - def ensure_downloaded(self): - """ - Ensure that the model is downloaded. This is useful if you want to ensure that the model is downloaded before - passing the container to a subprocess. - """ - model_config = self._get_model_config() - - if os.path.isdir(model_config.url): - model_config.path = model_config.url - else: - model_config.path = download_model(model_config.url, output_dir=self.download_root) - - def _get_model_config(self) -> ModelConfig: - """ - Get the model configuration for the model. - """ - for model in self.models: - if model.name == self.model_name: - return model - return None - - def _create_model(self): - print("Loading faster whisper model " + self.model_name + " for device " + str(self.device)) - model_config = self._get_model_config() - model_url = model_config.url - - if model_config.type == "whisper": - if model_url not in ["tiny", "base", "small", "medium", "large", "large-v1", "large-v2"]: - raise Exception("FasterWhisperContainer does not yet support Whisper models. Use ct2-transformers-converter to convert the model to a faster-whisper model.") - if model_url == "large": - # large is an alias for large-v1 - model_url = "large-v1" - - device = self.device - - if (device is None): - device = "auto" - - model = WhisperModel(model_url, device=device, compute_type=self.compute_type) - return model - - def create_callback(self, language: str = None, task: str = None, - prompt_strategy: AbstractPromptStrategy = None, - **decodeOptions: dict) -> AbstractWhisperCallback: - """ - Create a WhisperCallback object that can be used to transcript audio files. - - Parameters - ---------- - language: str - The target language of the transcription. If not specified, the language will be inferred from the audio content. - task: str - The task - either translate or transcribe. - prompt_strategy: AbstractPromptStrategy - The prompt strategy to use. If not specified, the prompt from Whisper will be used. - decodeOptions: dict - Additional options to pass to the decoder. Must be pickleable. - - Returns - ------- - A WhisperCallback object. - """ - return FasterWhisperCallback(self, language=language, task=task, prompt_strategy=prompt_strategy, **decodeOptions) - -class FasterWhisperCallback(AbstractWhisperCallback): - def __init__(self, model_container: FasterWhisperContainer, language: str = None, task: str = None, - prompt_strategy: AbstractPromptStrategy = None, - **decodeOptions: dict): - self.model_container = model_container - self.language = language - self.task = task - self.prompt_strategy = prompt_strategy - self.decodeOptions = decodeOptions - - self._printed_warning = False - - def invoke(self, audio, segment_index: int, prompt: str, detected_language: str, progress_listener: ProgressListener = None): - """ - Peform the transcription of the given audio file or data. - - Parameters - ---------- - audio: Union[str, np.ndarray, torch.Tensor] - The audio file to transcribe, or the audio data as a numpy array or torch tensor. - segment_index: int - The target language of the transcription. If not specified, the language will be inferred from the audio content. - task: str - The task - either translate or transcribe. - progress_listener: ProgressListener - A callback to receive progress updates. - """ - model: WhisperModel = self.model_container.get_model() - language_code = self._lookup_language_code(self.language) if self.language else None - - # Copy decode options and remove options that are not supported by faster-whisper - decodeOptions = self.decodeOptions.copy() - verbose = decodeOptions.pop("verbose", None) - - logprob_threshold = decodeOptions.pop("logprob_threshold", None) - - patience = decodeOptions.pop("patience", None) - length_penalty = decodeOptions.pop("length_penalty", None) - suppress_tokens = decodeOptions.pop("suppress_tokens", None) - - if (decodeOptions.pop("fp16", None) is not None): - if not self._printed_warning: - print("WARNING: fp16 option is ignored by faster-whisper - use compute_type instead.") - self._printed_warning = True - - # Fix up decode options - if (logprob_threshold is not None): - decodeOptions["log_prob_threshold"] = logprob_threshold - - decodeOptions["patience"] = float(patience) if patience is not None else 1.0 - decodeOptions["length_penalty"] = float(length_penalty) if length_penalty is not None else 1.0 - - # See if supress_tokens is a string - if so, convert it to a list of ints - decodeOptions["suppress_tokens"] = self._split_suppress_tokens(suppress_tokens) - - initial_prompt = self.prompt_strategy.get_segment_prompt(segment_index, prompt, detected_language) \ - if self.prompt_strategy else prompt - - segments_generator, info = model.transcribe(audio, \ - language=language_code if language_code else detected_language, task=self.task, \ - initial_prompt=initial_prompt, \ - **decodeOptions - ) - - segments = [] - - for segment in segments_generator: - segments.append(segment) - - if progress_listener is not None: - progress_listener.on_progress(segment.end, info.duration) - if verbose: - print("[{}->{}] {}".format(format_timestamp(segment.start, True), format_timestamp(segment.end, True), - segment.text)) - - text = " ".join([segment.text for segment in segments]) - - # Convert the segments to a format that is easier to serialize - whisper_segments = [{ - "text": segment.text, - "start": segment.start, - "end": segment.end, - - # Extra fields added by faster-whisper - "words": [{ - "start": word.start, - "end": word.end, - "word": word.word, - "probability": word.probability - } for word in (segment.words if segment.words is not None else []) ] - } for segment in segments] - - result = { - "segments": whisper_segments, - "text": text, - "language": info.language if info else None, - - # Extra fields added by faster-whisper - "language_probability": info.language_probability if info else None, - "duration": info.duration if info else None - } - - # If we have a prompt strategy, we need to increment the current prompt - if self.prompt_strategy: - self.prompt_strategy.on_segment_finished(segment_index, prompt, detected_language, result) - - if progress_listener is not None: - progress_listener.on_finished() - return result - - def _split_suppress_tokens(self, suppress_tokens: Union[str, List[int]]): - if (suppress_tokens is None): - return None - if (isinstance(suppress_tokens, list)): - return suppress_tokens - - return [int(token) for token in suppress_tokens.split(",")] - - def _lookup_language_code(self, language: str): - language = get_language_from_name(language) - - if language is None: - raise ValueError("Invalid language: " + language) - - return language.code diff --git a/spaces/amarchheda/ChordDuplicate/portaudio/examples/paex_wmme_surround.c b/spaces/amarchheda/ChordDuplicate/portaudio/examples/paex_wmme_surround.c deleted file mode 100644 index 55fc2551316fc00effee811c5c9ddf0ba8f49d76..0000000000000000000000000000000000000000 --- a/spaces/amarchheda/ChordDuplicate/portaudio/examples/paex_wmme_surround.c +++ /dev/null @@ -1,210 +0,0 @@ -/** @file paex_wmme_surround.c - @ingroup examples_src - @brief Use WMME-specific channelMask to request 5.1 surround sound output. - @author Ross Bencina -*/ -/* - * $Id: $ - * Portable Audio I/O Library - * Windows MME surround sound output test - * - * Copyright (c) 2007 Ross Bencina - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files - * (the "Software"), to deal in the Software without restriction, - * including without limitation the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * The text above constitutes the entire PortAudio license; however, - * the PortAudio community also makes the following non-binding requests: - * - * Any person wishing to distribute modifications to the Software is - * requested to send the modifications to the original developer so that - * they can be incorporated into the canonical version. It is also - * requested that these non-binding requests be included along with the - * license above. - */ - -#include -#include - -#include /* required when using pa_win_wmme.h */ -#include /* required when using pa_win_wmme.h */ - -#include "portaudio.h" -#include "pa_win_wmme.h" - -#define NUM_SECONDS (12) -#define SAMPLE_RATE (44100) -#define FRAMES_PER_BUFFER (64) - -#ifndef M_PI -#define M_PI (3.14159265) -#endif - -#define TABLE_SIZE (100) - -#define CHANNEL_COUNT (6) - - - -typedef struct -{ - float sine[TABLE_SIZE]; - int phase; - int currentChannel; - int cycleCount; -} -paTestData; - -/* This routine will be called by the PortAudio engine when audio is needed. -** It may called at interrupt level on some machines so don't do anything -** that could mess up the system like calling malloc() or free(). -*/ -static int patestCallback( const void *inputBuffer, void *outputBuffer, - unsigned long framesPerBuffer, - const PaStreamCallbackTimeInfo* timeInfo, - PaStreamCallbackFlags statusFlags, - void *userData ) -{ - paTestData *data = (paTestData*)userData; - float *out = (float*)outputBuffer; - unsigned long i,j; - - (void) timeInfo; /* Prevent unused variable warnings. */ - (void) statusFlags; - (void) inputBuffer; - - for( i=0; icurrentChannel && data->cycleCount < 4410 ){ - *out++ = data->sine[data->phase]; - data->phase += 1 + j; // play each channel at a different pitch so they can be distinguished - if( data->phase >= TABLE_SIZE ){ - data->phase -= TABLE_SIZE; - } - }else{ - *out++ = 0; - } - } - - data->cycleCount++; - if( data->cycleCount > 44100 ){ - data->cycleCount = 0; - - ++data->currentChannel; - if( data->currentChannel >= CHANNEL_COUNT ) - data->currentChannel -= CHANNEL_COUNT; - } - } - - return paContinue; -} - -/*******************************************************************/ -int main(int argc, char* argv[]) -{ - PaStreamParameters outputParameters; - PaWinMmeStreamInfo wmmeStreamInfo; - PaStream *stream; - PaError err; - paTestData data; - int i; - int deviceIndex; - - printf("PortAudio Test: output a sine blip on each channel. SR = %d, BufSize = %d, Chans = %d\n", SAMPLE_RATE, FRAMES_PER_BUFFER, CHANNEL_COUNT); - - err = Pa_Initialize(); - if( err != paNoError ) goto error; - - deviceIndex = Pa_GetHostApiInfo( Pa_HostApiTypeIdToHostApiIndex( paMME ) )->defaultOutputDevice; - if( argc == 2 ){ - sscanf( argv[1], "%d", &deviceIndex ); - } - - printf( "using device id %d (%s)\n", deviceIndex, Pa_GetDeviceInfo(deviceIndex)->name ); - - /* initialise sinusoidal wavetable */ - for( i=0; idefaultLowOutputLatency; - outputParameters.hostApiSpecificStreamInfo = NULL; - - /* it's not strictly necessary to provide a channelMask for surround sound - output. But if you want to be sure which channel mask PortAudio will use - then you should supply one */ - wmmeStreamInfo.size = sizeof(PaWinMmeStreamInfo); - wmmeStreamInfo.hostApiType = paMME; - wmmeStreamInfo.version = 1; - wmmeStreamInfo.flags = paWinMmeUseChannelMask; - wmmeStreamInfo.channelMask = PAWIN_SPEAKER_5POINT1; /* request 5.1 output format */ - outputParameters.hostApiSpecificStreamInfo = &wmmeStreamInfo; - - - if( Pa_IsFormatSupported( 0, &outputParameters, SAMPLE_RATE ) == paFormatIsSupported ){ - printf( "Pa_IsFormatSupported reports device will support %d channels.\n", CHANNEL_COUNT ); - }else{ - printf( "Pa_IsFormatSupported reports device will not support %d channels.\n", CHANNEL_COUNT ); - } - - err = Pa_OpenStream( - &stream, - NULL, /* no input */ - &outputParameters, - SAMPLE_RATE, - FRAMES_PER_BUFFER, - paClipOff, /* we won't output out of range samples so don't bother clipping them */ - patestCallback, - &data ); - if( err != paNoError ) goto error; - - err = Pa_StartStream( stream ); - if( err != paNoError ) goto error; - - printf("Play for %d seconds.\n", NUM_SECONDS ); - Pa_Sleep( NUM_SECONDS * 1000 ); - - err = Pa_StopStream( stream ); - if( err != paNoError ) goto error; - - err = Pa_CloseStream( stream ); - if( err != paNoError ) goto error; - - Pa_Terminate(); - printf("Test finished.\n"); - - return err; -error: - Pa_Terminate(); - fprintf( stderr, "An error occurred while using the portaudio stream\n" ); - fprintf( stderr, "Error number: %d\n", err ); - fprintf( stderr, "Error message: %s\n", Pa_GetErrorText( err ) ); - return err; -} diff --git a/spaces/ardha27/rvc-models/infer_pack/transforms.py b/spaces/ardha27/rvc-models/infer_pack/transforms.py deleted file mode 100644 index a11f799e023864ff7082c1f49c0cc18351a13b47..0000000000000000000000000000000000000000 --- a/spaces/ardha27/rvc-models/infer_pack/transforms.py +++ /dev/null @@ -1,209 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform( - inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1.0, - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE, -): - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = {"tails": tails, "tail_bound": tail_bound} - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1 - - -def unconstrained_rational_quadratic_spline( - inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails="linear", - tail_bound=1.0, - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE, -): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == "linear": - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError("{} tails are not implemented.".format(tails)) - - ( - outputs[inside_interval_mask], - logabsdet[inside_interval_mask], - ) = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, - right=tail_bound, - bottom=-tail_bound, - top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - ) - - return outputs, logabsdet - - -def rational_quadratic_spline( - inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0.0, - right=1.0, - bottom=0.0, - top=1.0, - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE, -): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError("Input to a transform is not within its domain") - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError("Minimal bin width too large for the number of bins") - if min_bin_height * num_bins > 1.0: - raise ValueError("Minimal bin height too large for the number of bins") - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (inputs - input_cumheights) * ( - input_derivatives + input_derivatives_plus_one - 2 * input_delta - ) + input_heights * (input_delta - input_derivatives) - b = input_heights * input_derivatives - (inputs - input_cumheights) * ( - input_derivatives + input_derivatives_plus_one - 2 * input_delta - ) - c = -input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ( - (input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta - ) - derivative_numerator = input_delta.pow(2) * ( - input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2) - ) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * ( - input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta - ) - denominator = input_delta + ( - (input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta - ) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * ( - input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2) - ) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/arixiii/open-reverse-proxy/Dockerfile b/spaces/arixiii/open-reverse-proxy/Dockerfile deleted file mode 100644 index 6953fc05439efb70991552cf56f28365b5b6c15b..0000000000000000000000000000000000000000 --- a/spaces/arixiii/open-reverse-proxy/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM node:18 - -WORKDIR /app - -RUN npm install express express-http-proxy - -COPY . . - -EXPOSE 7860 - -CMD [ "node", "server.js" ] \ No newline at end of file diff --git a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/utils/text/chinese_mandarin/phonemizer.py b/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/utils/text/chinese_mandarin/phonemizer.py deleted file mode 100644 index 727c881e1062badc57df7418aa07e7434d57335c..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/utils/text/chinese_mandarin/phonemizer.py +++ /dev/null @@ -1,37 +0,0 @@ -from typing import List - -import jieba -import pypinyin - -from .pinyinToPhonemes import PINYIN_DICT - - -def _chinese_character_to_pinyin(text: str) -> List[str]: - pinyins = pypinyin.pinyin(text, style=pypinyin.Style.TONE3, heteronym=False, neutral_tone_with_five=True) - pinyins_flat_list = [item for sublist in pinyins for item in sublist] - return pinyins_flat_list - - -def _chinese_pinyin_to_phoneme(pinyin: str) -> str: - segment = pinyin[:-1] - tone = pinyin[-1] - phoneme = PINYIN_DICT.get(segment, [""])[0] - return phoneme + tone - - -def chinese_text_to_phonemes(text: str, seperator: str = "|") -> str: - tokenized_text = jieba.cut(text, HMM=False) - tokenized_text = " ".join(tokenized_text) - pinyined_text: List[str] = _chinese_character_to_pinyin(tokenized_text) - - results: List[str] = [] - - for token in pinyined_text: - if token[-1] in "12345": # TODO transform to is_pinyin() - pinyin_phonemes = _chinese_pinyin_to_phoneme(token) - - results += list(pinyin_phonemes) - else: # is ponctuation or other - results += list(token) - - return seperator.join(results) diff --git a/spaces/artificialimagination/ai_detect_v0.1/app.py b/spaces/artificialimagination/ai_detect_v0.1/app.py deleted file mode 100644 index a2bc4e6f8578c06c53ab50a9483e9ce9227e10d1..0000000000000000000000000000000000000000 --- a/spaces/artificialimagination/ai_detect_v0.1/app.py +++ /dev/null @@ -1,331 +0,0 @@ -import datetime -import os -import subprocess -import pathlib -import time -import torch -import torchaudio -import traceback - -import xgboost as xgb -import numpy as np - -# from pydub import AudioSegment -# from pydub.silence import split_on_silence -from spleeter.separator import Separator -from spleeter.audio.adapter import AudioAdapter - -from transformers import ( - Wav2Vec2Processor, - HubertForCTC, - AutoProcessor, - HubertModel, - HubertForSequenceClassification, - Wav2Vec2FeatureExtractor - ) - - -# TARGET_SR = 22050 -TARGET_SR = 16_000 -NUM_SAMPLES = 5 * TARGET_SR - -MODELS_OUTPUT_DIR = os.getcwd() - - -# https://web.stanford.edu/~nanbhas/blog/forward-hooks-pytorch/#extracting-activations-from-a-layer -class SaveOutputHook: - def __init__(self): - self.outputs = None - - def __call__(self, module, module_in, module_out): - self.outputs = module_out.detach().numpy() - - def clear(self): - self.outputs = None - - - -def copy_from_cloud(cloud_model_path, verbose): - local_path = os.path.join(MODELS_OUTPUT_DIR, - os.path.split(cloud_model_path)[-1]) - subprocess.run( - ["gsutil","-m", "cp", "-r", cloud_model_path, local_path], - check=True - ) - - if verbose: - print(f'Copied from {cloud_model_path} to {local_path}') - - return local_path - - -def model_from_cloud(cloud_path, verbose=False): - local_path = copy_from_cloud(cloud_path, verbose) - return xgb.Booster(model_file=local_path) - - -class XGB_AIDetect: - - def __init__( - self, - xgb_model_filename, # path to the model file - number_of_samples=NUM_SAMPLES, - target_sample_rate=TARGET_SR, - ): - self.xgb_model_filename = xgb_model_filename - self.target_sample_rate = target_sample_rate - # self.transform_name = transform_name - self.number_of_samples = number_of_samples - # self.vocal_extractor = None - self.model = None - self.initialized = False - - if torch.cuda.is_available(): - self.device = "cuda" - else: - self.device = "cpu" - return - - def initialize(self): - # initialize the model - # self.model = model_from_cloud(self.xgb_model_filename, verbose=True) - self.model = xgb.Booster(model_file=self.xgb_model_filename) - - # initial vocal extractor - self.vocal_extractor = Separator('spleeter:2stems') - - self.sid_model = HubertForSequenceClassification.from_pretrained( - "superb/hubert-base-superb-sid") - self.sid_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( - "superb/hubert-base-superb-sid") - self.hook = SaveOutputHook() # for grabbing the penultimate layer of the SID model - - if self.model and self.sid_model and self.sid_feature_extractor: - self.initialized = True - else: - raise Exception("Ai Detection improperly initialized. Please try again.") - - return - - def normalize_audio( - self, - audio, - sample_rate, - target_sample_rate=TARGET_SR, - number_samples=NUM_SAMPLES, - ): - # TODO: sort this out, working ok with out because we load the audio and set the sample rate - # resample to make sure it is what the model expects - # resampler = torchaudio.transforms.Resample(sample_rate, target_sample_rate) - # audio = resampler(audio) - - # # mix down to mono 2 -> 1 channel - # if audio.shape[0] > 1: - # audio = torch.mean(audio, dim=0, keepdim=True) - - # clip - if audio.shape[1] > self.number_of_samples: - audio = audio[:, :self.number_of_samples] - - # pad - length_audio = audio.shape[1] - if length_audio < self.number_of_samples: - pad = self.number_of_samples - length_audio - padding = (0, pad) - audio = torch.nn.functional.pad(audio, padding) - - return audio - - def make_slices(self, audio_squeeze): - duration = self.number_of_samples - output = [] - - # handle stereo - if audio_squeeze.shape[0] == 2: - for i in range(0, len(audio_squeeze[1]), duration): - if (i + duration) <= len(audio_squeeze[1]): - output.append(audio_squeeze[:2, i:i + duration]) - else: - print("yo") - print(f"{len(audio_squeeze[1]) - duration}") - output.append(audio_squeeze[:2, len(audio_squeeze[1]) - duration:len(audio_squeeze[1])]) - else: - for i in range(0, len(audio_squeeze), duration): - if (i + duration) <= len(audio_squeeze): - output.append(audio_squeeze[i:i + duration]) - else: - output.append(audio_squeeze[len(audio_squeeze) - duration:len(audio_squeeze)]) - return output - - def extract_vox(self, audio): - try: - prediction = self.vocal_extractor.separate(audio) - except: - print('audio shape:', audio.shape) - raise - vox = prediction["vocals"] - return vox - - # Not in use for inference - # def strip_silences(self, data): - # sound = AudioSegment.from_raw(data) - # chunks = split_on_silence( - # sound, - - # # split on silences longer than 1000ms (1 sec) - # min_silence_len=100, - - # # anything under -16 dBFS is considered silence - # silence_thresh=-16, - - # # keep 200 ms of leading/trailing silence - # keep_silence=200 - # ) - # output = AudioSegment.empty() - # for c in chunks: - # output += c - # return output - - def shape_audio(self, audio): - # made for LFCC or MFCC - pad = torch.zeros(1, 64 - 13, 214) - pad = pad.to(self.device) - audio = torch.cat([audio, pad], dim=1) - pad_2 = torch.zeros(1, 64, 216 - 214) - pad_2 = pad_2.to(self.device) - audio = torch.cat([audio, pad_2], dim=2) - return audio - - def make_embedding(self, audio): - inputs = self.sid_feature_extractor( - audio, sampling_rate=self.target_sample_rate, padding=True, - return_tensors="pt") - # print() - handle = self.sid_model.projector.register_forward_hook(self.hook) - - model_output = self.sid_model( - input_values=inputs['input_values'].squeeze(), - attention_mask=inputs['attention_mask'].squeeze(), - ) - - penultimate_outputs = self.hook.outputs - # print('penultimates shape:', penultimate_outputs.shape) - handle.remove() - self.hook.clear() - return penultimate_outputs.mean(axis=0).flatten() - - - def prepare(self, input_file): - audio_loader = AudioAdapter.default() - waveform, sr = audio_loader.load( - input_file, - sample_rate=self.target_sample_rate) - - # waveform, sr = torchaudio.load(input_file) - - print(f"Loaded audio at sr: {sr}") - - # extract vocals - vox = self.extract_vox(waveform) - - # TODO - # vox = self.strip_silences(vox) - - # process data - # convert vox to tensor - audio = torch.from_numpy(vox) - - # swap dims - audio = audio.permute(1, 0) - - # # make mono. # jarfa: commented this out, the embeddings run on stereo - # # if audio.shape[0] > 1: - # audio = torch.mean(audio, dim=0, keepdim=True) - - # # get rid of one dim - # audio_squeeze = torch.squeeze(audio) - - # audio_squeeze = torchaudio.functional.vad( - # audio_squeeze, - # self.target_sample_rate) - - # make slices here - audio_slices = self.make_slices(audio) #(audio_squeeze) - - # # apply transform. # not applicable for this one - # input_list = [self.transform(i) for i in audio_slices] - - # # unsqueeze to add dimension - # input_list = [torch.unsqueeze(i, 0) for i in input_list] - - # if self.transform_name in ["MFCC", "LFCC"]: - # input_list = [self.shape_audio(i) for i in input_list] - - # TODO: make embedding - input_list = [ - self.make_embedding(aslice.to(self.device)) - for aslice in audio_slices - ] - return input_list - - - def predict(self, input_list): - batch = xgb.DMatrix( - np.stack(input_list, axis=0) - ) - probs = self.model.predict(batch) - return list(probs) - - - def run_prediction(self, input_file): - start_time = time.process_time() - input_list = self.prepare(input_file) - predictions = self.predict(input_list) - elapsed_time = time.process_time() - start_time - output = {} - output["file_name"] = input_file - output["model"] = self.xgb_model_filename - output["elapsed_time"] = str(datetime.timedelta(seconds=elapsed_time)) - output["predictions"] = self.pretty_response(predictions) - print(predictions) - return output - - def pretty_response(self, predictions): - output = {} - for i, pred in enumerate(predictions): - secs = i * 5 - t = str(datetime.timedelta(seconds=secs)) - output[t] = str(pred) - return output - - -if __name__ == '__main__': - # just for testing I literally downloaded this from gs://artificial_imagination/models/xgb_v0_epoch010_20230829_07_30.bin - # and uploaded it to the local colab storage - detect = XGB_AIDetect( - 'models_xgb_v0_epoch010_20230829_07_30.bin') - - # intitialize everything needed to detect from mp3 to prediction - detect.initialize() - - import json - - import gradio as gr - - - def inter_predict(local_file): - try: - response = detect.run_prediction(local_file) - except Exception as e: - traceback.print_exc(e) - raise e - - return json.dumps(response, indent=2) - - - demo = gr.Interface(fn=inter_predict, - inputs=gr.Audio(source="upload", type="filepath"), - outputs="text", - cache_examples=True, - ) - demo.launch() \ No newline at end of file diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/stacked_bar_chart_with_text.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/stacked_bar_chart_with_text.py deleted file mode 100644 index 180d20871a3f8ab9161ab1cfa7ded80f7b6f1431..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/stacked_bar_chart_with_text.py +++ /dev/null @@ -1,27 +0,0 @@ -""" -Stacked Bar Chart with Text Overlay -=================================== -This example shows how to overlay text on a stacked bar chart. For both the -bar and text marks, we use the ``stack`` argument in the ``x`` encoding to -cause the values to be stacked horizontally. -""" -# category: bar charts -import altair as alt -from vega_datasets import data - -source=data.barley() - -bars = alt.Chart(source).mark_bar().encode( - x=alt.X('sum(yield):Q', stack='zero'), - y=alt.Y('variety:N'), - color=alt.Color('site') -) - -text = alt.Chart(source).mark_text(dx=-15, dy=3, color='white').encode( - x=alt.X('sum(yield):Q', stack='zero'), - y=alt.Y('variety:N'), - detail='site:N', - text=alt.Text('sum(yield):Q', format='.1f') -) - -bars + text diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/antlr4/Token.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/antlr4/Token.py deleted file mode 100644 index 6f4d5e2629e544b5e75eec65f9e2fd64a9588984..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/antlr4/Token.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -# Use of this file is governed by the BSD 3-clause license that -# can be found in the LICENSE.txt file in the project root. -# - -# A token has properties: text, type, line, character position in the line -# (so we can ignore tabs), token channel, index, and source from which -# we obtained this token. -from io import StringIO - - -class Token (object): - - INVALID_TYPE = 0 - - # During lookahead operations, this "token" signifies we hit rule end ATN state - # and did not follow it despite needing to. - EPSILON = -2 - - MIN_USER_TOKEN_TYPE = 1 - - EOF = -1 - - # All tokens go to the parser (unless skip() is called in that rule) - # on a particular "channel". The parser tunes to a particular channel - # so that whitespace etc... can go to the parser on a "hidden" channel. - - DEFAULT_CHANNEL = 0 - - # Anything on different channel than DEFAULT_CHANNEL is not parsed - # by parser. - - HIDDEN_CHANNEL = 1 - - def __init__(self): - self.source = None - self.type = None # token type of the token - self.channel = None # The parser ignores everything not on DEFAULT_CHANNEL - self.start = None # optional; return -1 if not implemented. - self.stop = None # optional; return -1 if not implemented. - self.tokenIndex = None # from 0..n-1 of the token object in the input stream - self.line = None # line=1..n of the 1st character - self.column = None # beginning of the line at which it occurs, 0..n-1 - self._text = None # text of the token. - - @property - def text(self): - return self._text - - # Explicitly set the text for this token. If {code text} is not - # {@code null}, then {@link #getText} will return this value rather than - # extracting the text from the input. - # - # @param text The explicit text of the token, or {@code null} if the text - # should be obtained from the input along with the start and stop indexes - # of the token. - - @text.setter - def text(self, text:str): - self._text = text - - - def getTokenSource(self): - return self.source[0] - - def getInputStream(self): - return self.source[1] - -class CommonToken(Token): - - - # An empty {@link Pair} which is used as the default value of - # {@link #source} for tokens that do not have a source. - EMPTY_SOURCE = (None, None) - - def __init__(self, source:tuple = EMPTY_SOURCE, type:int = None, channel:int=Token.DEFAULT_CHANNEL, start:int=-1, stop:int=-1): - super().__init__() - self.source = source - self.type = type - self.channel = channel - self.start = start - self.stop = stop - self.tokenIndex = -1 - if source[0] is not None: - self.line = source[0].line - self.column = source[0].column - else: - self.column = -1 - - # Constructs a new {@link CommonToken} as a copy of another {@link Token}. - # - #

        - # If {@code oldToken} is also a {@link CommonToken} instance, the newly - # constructed token will share a reference to the {@link #text} field and - # the {@link Pair} stored in {@link #source}. Otherwise, {@link #text} will - # be assigned the result of calling {@link #getText}, and {@link #source} - # will be constructed from the result of {@link Token#getTokenSource} and - # {@link Token#getInputStream}.

        - # - # @param oldToken The token to copy. - # - def clone(self): - t = CommonToken(self.source, self.type, self.channel, self.start, self.stop) - t.tokenIndex = self.tokenIndex - t.line = self.line - t.column = self.column - t.text = self.text - return t - - @property - def text(self): - if self._text is not None: - return self._text - input = self.getInputStream() - if input is None: - return None - n = input.size - if self.start < n and self.stop < n: - return input.getText(self.start, self.stop) - else: - return "" - - @text.setter - def text(self, text:str): - self._text = text - - def __str__(self): - with StringIO() as buf: - buf.write("[@") - buf.write(str(self.tokenIndex)) - buf.write(",") - buf.write(str(self.start)) - buf.write(":") - buf.write(str(self.stop)) - buf.write("='") - txt = self.text - if txt is not None: - txt = txt.replace("\n","\\n") - txt = txt.replace("\r","\\r") - txt = txt.replace("\t","\\t") - else: - txt = "" - buf.write(txt) - buf.write("',<") - buf.write(str(self.type)) - buf.write(">") - if self.channel > 0: - buf.write(",channel=") - buf.write(str(self.channel)) - buf.write(",") - buf.write(str(self.line)) - buf.write(":") - buf.write(str(self.column)) - buf.write("]") - return buf.getvalue() diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/contourpy/util/__init__.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/contourpy/util/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/attention-refocusing/Attention-refocusing/gligen/ldm/modules/diffusionmodules/util.py b/spaces/attention-refocusing/Attention-refocusing/gligen/ldm/modules/diffusionmodules/util.py deleted file mode 100644 index 753ddfbdd20fdfbf9ce72d960fadf76abfbca6d7..0000000000000000000000000000000000000000 --- a/spaces/attention-refocusing/Attention-refocusing/gligen/ldm/modules/diffusionmodules/util.py +++ /dev/null @@ -1,277 +0,0 @@ -import os -import math -import torch -import torch.nn as nn -import numpy as np -from einops import repeat - -from ldm.util import instantiate_from_config - - - -class FourierEmbedder(): - def __init__(self, num_freqs=64, temperature=100): - - self.num_freqs = num_freqs - self.temperature = temperature - self.freq_bands = temperature ** ( torch.arange(num_freqs) / num_freqs ) - - @ torch.no_grad() - def __call__(self, x, cat_dim=-1): - "x: arbitrary shape of tensor. dim: cat dim" - out = [] - for freq in self.freq_bands: - out.append( torch.sin( freq*x ) ) - out.append( torch.cos( freq*x ) ) - return torch.cat(out, cat_dim) - - - -def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): - if schedule == "linear": - betas = ( - torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2 - ) - - elif schedule == "cosine": - timesteps = ( - torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s - ) - alphas = timesteps / (1 + cosine_s) * np.pi / 2 - alphas = torch.cos(alphas).pow(2) - alphas = alphas / alphas[0] - betas = 1 - alphas[1:] / alphas[:-1] - betas = np.clip(betas, a_min=0, a_max=0.999) - - elif schedule == "sqrt_linear": - betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) - elif schedule == "sqrt": - betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5 - else: - raise ValueError(f"schedule '{schedule}' unknown.") - return betas.numpy() - - -def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True): - if ddim_discr_method == 'uniform': - c = num_ddpm_timesteps // num_ddim_timesteps - ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c))) - elif ddim_discr_method == 'quad': - ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int) - else: - raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"') - - # assert ddim_timesteps.shape[0] == num_ddim_timesteps - # add one to get the final alpha values right (the ones from first scale to data during sampling) - steps_out = ddim_timesteps + 1 - if verbose: - print(f'Selected timesteps for ddim sampler: {steps_out}') - return steps_out - - -def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True): - # select alphas for computing the variance schedule - alphas = alphacums[ddim_timesteps] - alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist()) - - # according the the formula provided in https://arxiv.org/abs/2010.02502 - sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)) - if verbose: - print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}') - print(f'For the chosen value of eta, which is {eta}, ' - f'this results in the following sigma_t schedule for ddim sampler {sigmas}') - return sigmas, alphas, alphas_prev - - -def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): - """ - Create a beta schedule that discretizes the given alpha_t_bar function, - which defines the cumulative product of (1-beta) over time from t = [0,1]. - :param num_diffusion_timesteps: the number of betas to produce. - :param alpha_bar: a lambda that takes an argument t from 0 to 1 and - produces the cumulative product of (1-beta) up to that - part of the diffusion process. - :param max_beta: the maximum beta to use; use values lower than 1 to - prevent singularities. - """ - betas = [] - for i in range(num_diffusion_timesteps): - t1 = i / num_diffusion_timesteps - t2 = (i + 1) / num_diffusion_timesteps - betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) - return np.array(betas) - - -def extract_into_tensor(a, t, x_shape): - b, *_ = t.shape - out = a.gather(-1, t) - return out.reshape(b, *((1,) * (len(x_shape) - 1))) - - -def checkpoint(func, inputs, params, flag): - """ - Evaluate a function without caching intermediate activations, allowing for - reduced memory at the expense of extra compute in the backward pass. - :param func: the function to evaluate. - :param inputs: the argument sequence to pass to `func`. - :param params: a sequence of parameters `func` depends on but does not - explicitly take as arguments. - :param flag: if False, disable gradient checkpointing. - """ - if flag: - args = tuple(inputs) + tuple(params) - return CheckpointFunction.apply(func, len(inputs), *args) - else: - return func(*inputs) - - -class CheckpointFunction(torch.autograd.Function): - @staticmethod - def forward(ctx, run_function, length, *args): - ctx.run_function = run_function - ctx.input_tensors = list(args[:length]) - ctx.input_params = list(args[length:]) - - with torch.no_grad(): - output_tensors = ctx.run_function(*ctx.input_tensors) - return output_tensors - - @staticmethod - def backward(ctx, *output_grads): - ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors] - with torch.enable_grad(): - # Fixes a bug where the first op in run_function modifies the - # Tensor storage in place, which is not allowed for detach()'d - # Tensors. - shallow_copies = [x.view_as(x) for x in ctx.input_tensors] - output_tensors = ctx.run_function(*shallow_copies) - input_grads = torch.autograd.grad( - output_tensors, - ctx.input_tensors + ctx.input_params, - output_grads, - allow_unused=True, - ) - del ctx.input_tensors - del ctx.input_params - del output_tensors - return (None, None) + input_grads - - -def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False): - """ - Create sinusoidal timestep embeddings. - :param timesteps: a 1-D Tensor of N indices, one per batch element. - These may be fractional. - :param dim: the dimension of the output. - :param max_period: controls the minimum frequency of the embeddings. - :return: an [N x dim] Tensor of positional embeddings. - """ - if not repeat_only: - half = dim // 2 - freqs = torch.exp( - -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half - ).to(device=timesteps.device) - args = timesteps[:, None].float() * freqs[None] - embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) - if dim % 2: - embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) - else: - embedding = repeat(timesteps, 'b -> b d', d=dim) - return embedding - - -def zero_module(module): - """ - Zero out the parameters of a module and return it. - """ - for p in module.parameters(): - p.detach().zero_() - return module - - -def scale_module(module, scale): - """ - Scale the parameters of a module and return it. - """ - for p in module.parameters(): - p.detach().mul_(scale) - return module - - -def mean_flat(tensor): - """ - Take the mean over all non-batch dimensions. - """ - return tensor.mean(dim=list(range(1, len(tensor.shape)))) - - -def normalization(channels): - """ - Make a standard normalization layer. - :param channels: number of input channels. - :return: an nn.Module for normalization. - """ - return GroupNorm32(32, channels) - - -# PyTorch 1.7 has SiLU, but we support PyTorch 1.5. -class SiLU(nn.Module): - def forward(self, x): - return x * torch.sigmoid(x) - - -class GroupNorm32(nn.GroupNorm): - def forward(self, x): - return super().forward(x.float()).type(x.dtype) - #return super().forward(x).type(x.dtype) - -def conv_nd(dims, *args, **kwargs): - """ - Create a 1D, 2D, or 3D convolution module. - """ - if dims == 1: - return nn.Conv1d(*args, **kwargs) - elif dims == 2: - return nn.Conv2d(*args, **kwargs) - elif dims == 3: - return nn.Conv3d(*args, **kwargs) - raise ValueError(f"unsupported dimensions: {dims}") - - -def linear(*args, **kwargs): - """ - Create a linear module. - """ - return nn.Linear(*args, **kwargs) - - -def avg_pool_nd(dims, *args, **kwargs): - """ - Create a 1D, 2D, or 3D average pooling module. - """ - if dims == 1: - return nn.AvgPool1d(*args, **kwargs) - elif dims == 2: - return nn.AvgPool2d(*args, **kwargs) - elif dims == 3: - return nn.AvgPool3d(*args, **kwargs) - raise ValueError(f"unsupported dimensions: {dims}") - - -class HybridConditioner(nn.Module): - - def __init__(self, c_concat_config, c_crossattn_config): - super().__init__() - self.concat_conditioner = instantiate_from_config(c_concat_config) - self.crossattn_conditioner = instantiate_from_config(c_crossattn_config) - - def forward(self, c_concat, c_crossattn): - c_concat = self.concat_conditioner(c_concat) - c_crossattn = self.crossattn_conditioner(c_crossattn) - return {'c_concat': [c_concat], 'c_crossattn': [c_crossattn]} - - -def noise_like(shape, device, repeat=False): - repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1))) - noise = lambda: torch.randn(shape, device=device) - return repeat_noise() if repeat else noise() \ No newline at end of file diff --git a/spaces/avivdm1/AutoGPT/autogpt/config/singleton.py b/spaces/avivdm1/AutoGPT/autogpt/config/singleton.py deleted file mode 100644 index 55b2aeea120bbe51ca837265fcb7fbff467e55f2..0000000000000000000000000000000000000000 --- a/spaces/avivdm1/AutoGPT/autogpt/config/singleton.py +++ /dev/null @@ -1,24 +0,0 @@ -"""The singleton metaclass for ensuring only one instance of a class.""" -import abc - - -class Singleton(abc.ABCMeta, type): - """ - Singleton metaclass for ensuring only one instance of a class. - """ - - _instances = {} - - def __call__(cls, *args, **kwargs): - """Call method for the singleton metaclass.""" - if cls not in cls._instances: - cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) - return cls._instances[cls] - - -class AbstractSingleton(abc.ABC, metaclass=Singleton): - """ - Abstract singleton class for ensuring only one instance of a class. - """ - - pass diff --git a/spaces/awaawawawa/iurf7irfuyytruyyugb/optimizedSD/inpaint_gradio.py b/spaces/awaawawawa/iurf7irfuyytruyyugb/optimizedSD/inpaint_gradio.py deleted file mode 100644 index f547c0f9789bb29c1b016edb28426b18f78f259b..0000000000000000000000000000000000000000 --- a/spaces/awaawawawa/iurf7irfuyytruyyugb/optimizedSD/inpaint_gradio.py +++ /dev/null @@ -1,328 +0,0 @@ -import argparse -import os -import re -import time -from contextlib import nullcontext -from itertools import islice -from random import randint - -import gradio as gr -import numpy as np -import torch -from PIL import Image -from einops import rearrange, repeat -from omegaconf import OmegaConf -from pytorch_lightning import seed_everything -from torch import autocast -from torchvision.utils import make_grid -from tqdm import tqdm, trange -from transformers import logging - -from ldmlib.util import instantiate_from_config -from optimUtils import split_weighted_subprompts, logger - -logging.set_verbosity_error() -import mimetypes - -mimetypes.init() -mimetypes.add_type("application/javascript", ".js") - - -def chunk(it, size): - it = iter(it) - return iter(lambda: tuple(islice(it, size)), ()) - - -def load_model_from_config(ckpt, verbose=False): - print(f"Loading model from {ckpt}") - pl_sd = torch.load(ckpt, map_location="cpu") - if "global_step" in pl_sd: - print(f"Global Step: {pl_sd['global_step']}") - sd = pl_sd["state_dict"] - return sd - - -def load_img(image, h0, w0): - image = image.convert("RGB") - w, h = image.size - print(f"loaded input image of size ({w}, {h})") - if h0 is not None and w0 is not None: - h, w = h0, w0 - - w, h = map(lambda x: x - x % 64, (w, h)) # resize to integer multiple of 32 - - print(f"New image size ({w}, {h})") - image = image.resize((w, h), resample=Image.LANCZOS) - image = np.array(image).astype(np.float32) / 255.0 - image = image[None].transpose(0, 3, 1, 2) - image = torch.from_numpy(image) - return 2.0 * image - 1.0 - - -def load_mask(mask, h0, w0, newH, newW, invert=False): - image = mask.convert("RGB") - w, h = image.size - print(f"loaded input mask of size ({w}, {h})") - if h0 is not None and w0 is not None: - h, w = h0, w0 - - w, h = map(lambda x: x - x % 64, (w, h)) # resize to integer multiple of 32 - - print(f"New mask size ({w}, {h})") - image = image.resize((newW, newH), resample=Image.LANCZOS) - # image = image.resize((64, 64), resample=Image.LANCZOS) - image = np.array(image) - - if invert: - print("inverted") - where_0, where_1 = np.where(image == 0), np.where(image == 255) - image[where_0], image[where_1] = 255, 0 - image = image.astype(np.float32) / 255.0 - image = image[None].transpose(0, 3, 1, 2) - image = torch.from_numpy(image) - return image - - -def generate( - image, - mask_image, - prompt, - strength, - ddim_steps, - n_iter, - batch_size, - Height, - Width, - scale, - ddim_eta, - unet_bs, - device, - seed, - outdir, - img_format, - turbo, - full_precision, -): - if seed == "": - seed = randint(0, 1000000) - seed = int(seed) - seed_everything(seed) - sampler = "ddim" - - # Logging - logger(locals(), log_csv="logs/inpaint_gradio_logs.csv") - - init_image = load_img(image['image'], Height, Width).to(device) - - model.unet_bs = unet_bs - model.turbo = turbo - model.cdevice = device - modelCS.cond_stage_model.device = device - - if device != "cpu" and full_precision == False: - model.half() - modelCS.half() - modelFS.half() - init_image = init_image.half() - # mask.half() - - tic = time.time() - os.makedirs(outdir, exist_ok=True) - outpath = outdir - sample_path = os.path.join(outpath, "_".join(re.split(":| ", prompt)))[:150] - os.makedirs(sample_path, exist_ok=True) - base_count = len(os.listdir(sample_path)) - - # n_rows = opt.n_rows if opt.n_rows > 0 else batch_size - assert prompt is not None - data = [batch_size * [prompt]] - - modelFS.to(device) - - init_latent = modelFS.get_first_stage_encoding(modelFS.encode_first_stage(init_image)) # move to latent space - init_latent = repeat(init_latent, "1 ... -> b ...", b=batch_size) - if mask_image is None: - mask = load_mask(image['mask'], Height, Width, init_latent.shape[2], init_latent.shape[3], True).to(device) - else: - image['mask']=mask_image - mask = load_mask(mask_image, Height, Width, init_latent.shape[2], init_latent.shape[3], True).to(device) - - mask = mask[0][0].unsqueeze(0).repeat(4, 1, 1).unsqueeze(0) - mask = repeat(mask, '1 ... -> b ...', b=batch_size) - - if device != "cpu": - mem = torch.cuda.memory_allocated() / 1e6 - modelFS.to("cpu") - while torch.cuda.memory_allocated() / 1e6 >= mem: - time.sleep(1) - - if strength == 1: - print("strength should be less than 1, setting it to 0.999") - strength = 0.999 - assert 0.0 <= strength < 1.0, "can only work with strength in [0.0, 1.0]" - t_enc = int(strength * ddim_steps) - print(f"target t_enc is {t_enc} steps") - - if full_precision == False and device != "cpu": - precision_scope = autocast - else: - precision_scope = nullcontext - - all_samples = [] - seeds = "" - with torch.no_grad(): - all_samples = list() - for _ in trange(n_iter, desc="Sampling"): - for prompts in tqdm(data, desc="data"): - with precision_scope("cuda"): - modelCS.to(device) - uc = None - if scale != 1.0: - uc = modelCS.get_learned_conditioning(batch_size * [""]) - if isinstance(prompts, tuple): - prompts = list(prompts) - - subprompts, weights = split_weighted_subprompts(prompts[0]) - if len(subprompts) > 1: - c = torch.zeros_like(uc) - totalWeight = sum(weights) - # normalize each "sub prompt" and add it - for i in range(len(subprompts)): - weight = weights[i] - # if not skip_normalize: - weight = weight / totalWeight - c = torch.add(c, modelCS.get_learned_conditioning(subprompts[i]), alpha=weight) - else: - c = modelCS.get_learned_conditioning(prompts) - - if device != "cpu": - mem = torch.cuda.memory_allocated() / 1e6 - modelCS.to("cpu") - while torch.cuda.memory_allocated() / 1e6 >= mem: - time.sleep(1) - - # encode (scaled latent) - z_enc = model.stochastic_encode( - init_latent, torch.tensor([t_enc] * batch_size).to(device), - seed, ddim_eta, ddim_steps) - - # decode it - samples_ddim = model.sample( - t_enc, - c, - z_enc, - unconditional_guidance_scale=scale, - unconditional_conditioning=uc, - mask=mask, - x_T=init_latent, - sampler=sampler, - ) - - modelFS.to(device) - print("saving images") - for i in range(batch_size): - x_samples_ddim = modelFS.decode_first_stage(samples_ddim[i].unsqueeze(0)) - x_sample = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0) - all_samples.append(x_sample.to("cpu")) - x_sample = 255.0 * rearrange(x_sample[0].cpu().numpy(), "c h w -> h w c") - Image.fromarray(x_sample.astype(np.uint8)).save( - os.path.join(sample_path, "seed_" + str(seed) + "_" + f"{base_count:05}.{img_format}") - ) - seeds += str(seed) + "," - seed += 1 - base_count += 1 - - if device != "cpu": - mem = torch.cuda.memory_allocated() / 1e6 - modelFS.to("cpu") - while torch.cuda.memory_allocated() / 1e6 >= mem: - time.sleep(1) - - del samples_ddim - del x_sample - del x_samples_ddim - print("memory_final = ", torch.cuda.memory_allocated() / 1e6) - - toc = time.time() - - time_taken = (toc - tic) / 60.0 - grid = torch.cat(all_samples, 0) - grid = make_grid(grid, nrow=n_iter) - grid = 255.0 * rearrange(grid, "c h w -> h w c").cpu().numpy() - - txt = ( - "Samples finished in " - + str(round(time_taken, 3)) - + " minutes and exported to \n" - + sample_path - + "\nSeeds used = " - + seeds[:-1] - ) - return Image.fromarray(grid.astype(np.uint8)), image['mask'], txt - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='txt2img using gradio') - parser.add_argument('--config_path', default="optimizedSD/v1-inference.yaml", type=str, help='config path') - parser.add_argument('--ckpt_path', default="models/ldm/stable-diffusion-v1/model.ckpt", type=str, help='ckpt path') - args = parser.parse_args() - config = args.config_path - ckpt = args.ckpt_path - sd = load_model_from_config(f"{ckpt}") - li, lo = [], [] - for key, v_ in sd.items(): - sp = key.split(".") - if (sp[0]) == "model": - if "input_blocks" in sp: - li.append(key) - elif "middle_block" in sp: - li.append(key) - elif "time_embed" in sp: - li.append(key) - else: - lo.append(key) - for key in li: - sd["model1." + key[6:]] = sd.pop(key) - for key in lo: - sd["model2." + key[6:]] = sd.pop(key) - - config = OmegaConf.load(f"{config}") - - model = instantiate_from_config(config.modelUNet) - _, _ = model.load_state_dict(sd, strict=False) - model.eval() - - modelCS = instantiate_from_config(config.modelCondStage) - _, _ = modelCS.load_state_dict(sd, strict=False) - modelCS.eval() - - modelFS = instantiate_from_config(config.modelFirstStage) - _, _ = modelFS.load_state_dict(sd, strict=False) - modelFS.eval() - del sd - - demo = gr.Interface( - fn=generate, - inputs=[ - gr.Image(tool="sketch", type="pil"), - gr.Image(tool="editor", type="pil"), - "text", - gr.Slider(0, 0.99, value=0.99, step=0.01), - gr.Slider(1, 1000, value=50), - gr.Slider(1, 100, step=1), - gr.Slider(1, 100, step=1), - gr.Slider(64, 4096, value=512, step=64), - gr.Slider(64, 4096, value=512, step=64), - gr.Slider(0, 50, value=7.5, step=0.1), - gr.Slider(0, 1, step=0.01), - gr.Slider(1, 2, value=1, step=1), - gr.Text(value="cuda"), - "text", - gr.Text(value="outputs/inpaint-samples"), - gr.Radio(["png", "jpg"], value='png'), - "checkbox", - "checkbox", - ], - outputs=["image", "image", "text"], - ) - demo.launch() diff --git a/spaces/awacke1/AI.Dashboard.Maps/index.html b/spaces/awacke1/AI.Dashboard.Maps/index.html deleted file mode 100644 index 019df6f46613420f023433b7ad23953f3a770059..0000000000000000000000000000000000000000 --- a/spaces/awacke1/AI.Dashboard.Maps/index.html +++ /dev/null @@ -1,36 +0,0 @@ - - - - - - My static Space - - - -
        - - - - - - - - -
        - - diff --git a/spaces/awacke1/AIandSmartTools/style.css b/spaces/awacke1/AIandSmartTools/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/awacke1/AIandSmartTools/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/awacke1/ActingGameMechanicsForSocialIntelligence/README.md b/spaces/awacke1/ActingGameMechanicsForSocialIntelligence/README.md deleted file mode 100644 index a2b3f2f229acb333b5d3094e7648914b622061bb..0000000000000000000000000000000000000000 --- a/spaces/awacke1/ActingGameMechanicsForSocialIntelligence/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: ActingGameMechanicsForSocialIntelligence -emoji: 🐠 -colorFrom: red -colorTo: indigo -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awacke1/Docker-FlanT5-TextGeneratorTranslator/static/style.css b/spaces/awacke1/Docker-FlanT5-TextGeneratorTranslator/static/style.css deleted file mode 100644 index 7b50df8f6904c75f560224034d8aadd76656c6f8..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Docker-FlanT5-TextGeneratorTranslator/static/style.css +++ /dev/null @@ -1,45 +0,0 @@ -body { - --text: hsl(0 0% 15%); - padding: 2.5rem; - font-family: sans-serif; - color: var(--text); -} - -body.dark-theme { - --text: hsl(0 0% 90%); - background-color: hsl(223 39% 7%); -} - -main { - max-width: 80rem; - text-align: center; -} - -section { - display: flex; - flex-direction: column; - align-items: center; -} - -a { - color: var(--text); -} - -form { - width: 30rem; - margin: 0 auto; -} - -input { - width: 100%; -} - -button { - cursor: pointer; -} - -.text-gen-output { - min-height: 1.2rem; - margin: 1rem; - border: 0.5px solid grey; -} diff --git a/spaces/awacke1/chatGPT/utils.py b/spaces/awacke1/chatGPT/utils.py deleted file mode 100644 index b09b072410049e2aa6f82cdd775084d8c0f7064e..0000000000000000000000000000000000000000 --- a/spaces/awacke1/chatGPT/utils.py +++ /dev/null @@ -1,54 +0,0 @@ -import json, os -from tencentcloud.common import credential -from tencentcloud.common.profile.client_profile import ClientProfile -from tencentcloud.common.profile.http_profile import HttpProfile -from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException -from tencentcloud.tmt.v20180321 import tmt_client, models - -def get_tmt_client(): - try: - # 实例化一个认证对象,入参需要传入腾讯云账户 SecretId 和 SecretKey,此处还需注意密钥对的保密 - # 代码泄露可能会导致 SecretId 和 SecretKey 泄露,并威胁账号下所有资源的安全性。以下代码示例仅供参考,建议采用更安全的方式来使用密钥,请参见:https://cloud.tencent.com/document/product/1278/85305 - # 密钥可前往官网控制台 https://console.cloud.tencent.com/cam/capi 进行获取 - SecretId = os.environ.get("TENCENTCLOUD_SECRET_ID") - SecretKey = os.environ.get("TENCENTCLOUD_SECRET_KEY") - cred = credential.Credential(SecretId, SecretKey) - # 实例化一个http选项,可选的,没有特殊需求可以跳过 - httpProfile = HttpProfile() - httpProfile.endpoint = "tmt.tencentcloudapi.com" - - # 实例化一个client选项,可选的,没有特殊需求可以跳过 - clientProfile = ClientProfile() - clientProfile.httpProfile = httpProfile - # 实例化要请求产品的client对象,clientProfile是可选的 - client = tmt_client.TmtClient(cred, "ap-shanghai", clientProfile) - print(f'client_{client}') - return client - except TencentCloudSDKException as err: - print(f'client_err_{err}') - return None - -def getTextTrans_tmt(tmt_client, text, source='zh', target='en'): - def is_chinese(string): - for ch in string: - if u'\u4e00' <= ch <= u'\u9fff': - return True - return False - - if tmt_client is None: - return text - if not is_chinese(text) and target == 'en': - return text - try: - req = models.TextTranslateRequest() - params = { - "SourceText": text, - "Source": source, - "Target": target, - "ProjectId": 0 - } - req.from_json_string(json.dumps(params)) - resp = tmt_client.TextTranslate(req) - return resp.TargetText - except Exception as e: - return text \ No newline at end of file diff --git a/spaces/azusarang/so-vits-svc-models-ba_P/inference/infer_tool.py b/spaces/azusarang/so-vits-svc-models-ba_P/inference/infer_tool.py deleted file mode 100644 index 985eea3ab5ad86dfcb98472a9bd17456fe8d5763..0000000000000000000000000000000000000000 --- a/spaces/azusarang/so-vits-svc-models-ba_P/inference/infer_tool.py +++ /dev/null @@ -1,407 +0,0 @@ -import hashlib -import io -import json -import logging -import os -import time -from pathlib import Path -from inference import slicer -import gc - -import librosa -import numpy as np -# import onnxruntime -import soundfile -import torch -import torchaudio - -import cluster -import utils -from models import SynthesizerTrn - -from diffusion.unit2mel import load_model_vocoder -import yaml - -logging.getLogger('matplotlib').setLevel(logging.WARNING) - - -def read_temp(file_name): - if not os.path.exists(file_name): - with open(file_name, "w") as f: - f.write(json.dumps({"info": "temp_dict"})) - return {} - else: - try: - with open(file_name, "r") as f: - data = f.read() - data_dict = json.loads(data) - if os.path.getsize(file_name) > 50 * 1024 * 1024: - f_name = file_name.replace("\\", "/").split("/")[-1] - print(f"clean {f_name}") - for wav_hash in list(data_dict.keys()): - if int(time.time()) - int(data_dict[wav_hash]["time"]) > 14 * 24 * 3600: - del data_dict[wav_hash] - except Exception as e: - print(e) - print(f"{file_name} error,auto rebuild file") - data_dict = {"info": "temp_dict"} - return data_dict - - -def write_temp(file_name, data): - with open(file_name, "w") as f: - f.write(json.dumps(data)) - - -def timeit(func): - def run(*args, **kwargs): - t = time.time() - res = func(*args, **kwargs) - print('executing \'%s\' costed %.3fs' % (func.__name__, time.time() - t)) - return res - - return run - - -def format_wav(audio_path): - if Path(audio_path).suffix == '.wav': - return - raw_audio, raw_sample_rate = librosa.load(audio_path, mono=True, sr=None) - soundfile.write(Path(audio_path).with_suffix(".wav"), raw_audio, raw_sample_rate) - - -def get_end_file(dir_path, end): - file_lists = [] - for root, dirs, files in os.walk(dir_path): - files = [f for f in files if f[0] != '.'] - dirs[:] = [d for d in dirs if d[0] != '.'] - for f_file in files: - if f_file.endswith(end): - file_lists.append(os.path.join(root, f_file).replace("\\", "/")) - return file_lists - - -def get_md5(content): - return hashlib.new("md5", content).hexdigest() - -def fill_a_to_b(a, b): - if len(a) < len(b): - for _ in range(0, len(b) - len(a)): - a.append(a[0]) - -def mkdir(paths: list): - for path in paths: - if not os.path.exists(path): - os.mkdir(path) - -def pad_array(arr, target_length): - current_length = arr.shape[0] - if current_length >= target_length: - return arr - else: - pad_width = target_length - current_length - pad_left = pad_width // 2 - pad_right = pad_width - pad_left - padded_arr = np.pad(arr, (pad_left, pad_right), 'constant', constant_values=(0, 0)) - return padded_arr - -def split_list_by_n(list_collection, n, pre=0): - for i in range(0, len(list_collection), n): - yield list_collection[i-pre if i-pre>=0 else i: i + n] - - -class F0FilterException(Exception): - pass - -class Svc(object): - def __init__(self, net_g_path, config_path, - device=None, - cluster_model_path="logs/44k/kmeans_10000.pt", - nsf_hifigan_enhance = False, - diffusion_model_path="logs/44k/diffusion/model_0.pt", - diffusion_config_path="configs/diffusion.yaml", - shallow_diffusion = False, - only_diffusion = False, - ): - self.net_g_path = net_g_path - self.only_diffusion = only_diffusion - self.shallow_diffusion = shallow_diffusion - if device is None: - self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu") - # self.dev = torch.device("cpu") - else: - self.dev = torch.device(device) - self.net_g_ms = None - if not self.only_diffusion: - self.hps_ms = utils.get_hparams_from_file(config_path) - self.target_sample = self.hps_ms.data.sampling_rate - self.hop_size = self.hps_ms.data.hop_length - self.spk2id = self.hps_ms.spk - try: - self.speech_encoder = self.hps_ms.model.speech_encoder - except Exception as e: - self.speech_encoder = 'vec768l12' - - self.nsf_hifigan_enhance = nsf_hifigan_enhance - if self.shallow_diffusion or self.only_diffusion: - if os.path.exists(diffusion_model_path) and os.path.exists(diffusion_model_path): - self.diffusion_model,self.vocoder,self.diffusion_args = load_model_vocoder(diffusion_model_path,self.dev,config_path=diffusion_config_path) - if self.only_diffusion: - self.target_sample = self.diffusion_args.data.sampling_rate - self.hop_size = self.diffusion_args.data.block_size - self.spk2id = self.diffusion_args.spk - self.speech_encoder = self.diffusion_args.data.encoder - else: - print("No diffusion model or config found. Shallow diffusion mode will False") - self.shallow_diffusion = self.only_diffusion = False - - # load hubert and model - if not self.only_diffusion: - self.load_model() - self.hubert_model = utils.get_speech_encoder(self.speech_encoder,device=self.dev) - self.volume_extractor = utils.Volume_Extractor(self.hop_size) - else: - self.hubert_model = utils.get_speech_encoder(self.diffusion_args.data.encoder,device=self.dev) - self.volume_extractor = utils.Volume_Extractor(self.diffusion_args.data.block_size) - - if os.path.exists(cluster_model_path): - self.cluster_model = cluster.get_cluster_model(cluster_model_path) - if self.shallow_diffusion : self.nsf_hifigan_enhance = False - if self.nsf_hifigan_enhance: - from modules.enhancer import Enhancer - self.enhancer = Enhancer('nsf-hifigan', 'pretrain/nsf_hifigan/model',device=self.dev) - - def load_model(self): - # get model configuration - self.net_g_ms = SynthesizerTrn( - self.hps_ms.data.filter_length // 2 + 1, - self.hps_ms.train.segment_size // self.hps_ms.data.hop_length, - **self.hps_ms.model) - _ = utils.load_checkpoint(self.net_g_path, self.net_g_ms, None) - if "half" in self.net_g_path and torch.cuda.is_available(): - _ = self.net_g_ms.half().eval().to(self.dev) - else: - _ = self.net_g_ms.eval().to(self.dev) - - - - def get_unit_f0(self, wav, tran, cluster_infer_ratio, speaker, f0_filter ,f0_predictor,cr_threshold=0.05): - - f0_predictor_object = utils.get_f0_predictor(f0_predictor,hop_length=self.hop_size,sampling_rate=self.target_sample,device=self.dev,threshold=cr_threshold) - - f0, uv = f0_predictor_object.compute_f0_uv(wav) - if f0_filter and sum(f0) == 0: - raise F0FilterException("No voice detected") - f0 = torch.FloatTensor(f0).to(self.dev) - uv = torch.FloatTensor(uv).to(self.dev) - - f0 = f0 * 2 ** (tran / 12) - f0 = f0.unsqueeze(0) - uv = uv.unsqueeze(0) - - wav16k = librosa.resample(wav, orig_sr=self.target_sample, target_sr=16000) - wav16k = torch.from_numpy(wav16k).to(self.dev) - c = self.hubert_model.encoder(wav16k) - c = utils.repeat_expand_2d(c.squeeze(0), f0.shape[1]) - - if cluster_infer_ratio !=0: - cluster_c = cluster.get_cluster_center_result(self.cluster_model, c.cpu().numpy().T, speaker).T - cluster_c = torch.FloatTensor(cluster_c).to(self.dev) - c = cluster_infer_ratio * cluster_c + (1 - cluster_infer_ratio) * c - - c = c.unsqueeze(0) - return c, f0, uv - - def infer(self, speaker, tran, raw_path, - cluster_infer_ratio=0, - auto_predict_f0=False, - noice_scale=0.4, - f0_filter=False, - f0_predictor='pm', - enhancer_adaptive_key = 0, - cr_threshold = 0.05, - k_step = 100 - ): - - speaker_id = self.spk2id.get(speaker) - if not speaker_id and type(speaker) is int: - if len(self.spk2id.__dict__) >= speaker: - speaker_id = speaker - sid = torch.LongTensor([int(speaker_id)]).to(self.dev).unsqueeze(0) - wav, sr = librosa.load(raw_path, sr=self.target_sample) - c, f0, uv = self.get_unit_f0(wav, tran, cluster_infer_ratio, speaker, f0_filter,f0_predictor,cr_threshold=cr_threshold) - if "half" in self.net_g_path and torch.cuda.is_available(): - c = c.half() - with torch.no_grad(): - start = time.time() - if not self.only_diffusion: - audio,f0 = self.net_g_ms.infer(c, f0=f0, g=sid, uv=uv, predict_f0=auto_predict_f0, noice_scale=noice_scale) - audio = audio[0,0].data.float() - if self.shallow_diffusion: - audio_mel = self.vocoder.extract(audio[None,:],self.target_sample) - else: - audio = torch.FloatTensor(wav).to(self.dev) - audio_mel = None - if self.only_diffusion or self.shallow_diffusion: - vol = self.volume_extractor.extract(audio[None,:])[None,:,None].to(self.dev) - f0 = f0[:,:,None] - c = c.transpose(-1,-2) - audio_mel = self.diffusion_model( - c, - f0, - vol, - spk_id = sid, - spk_mix_dict = None, - gt_spec=audio_mel, - infer=True, - infer_speedup=self.diffusion_args.infer.speedup, - method=self.diffusion_args.infer.method, - k_step=k_step) - audio = self.vocoder.infer(audio_mel, f0).squeeze() - if self.nsf_hifigan_enhance: - audio, _ = self.enhancer.enhance( - audio[None,:], - self.target_sample, - f0[:,:,None], - self.hps_ms.data.hop_length, - adaptive_key = enhancer_adaptive_key) - use_time = time.time() - start - print("vits use time:{}".format(use_time)) - return audio, audio.shape[-1] - - def clear_empty(self): - # clean up vram - torch.cuda.empty_cache() - - def unload_model(self): - # unload model - self.net_g_ms = self.net_g_ms.to("cpu") - del self.net_g_ms - if hasattr(self,"enhancer"): - self.enhancer.enhancer = self.enhancer.enhancer.to("cpu") - del self.enhancer.enhancer - del self.enhancer - gc.collect() - - def slice_inference(self, - raw_audio_path, - spk, - tran, - slice_db, - cluster_infer_ratio, - auto_predict_f0, - noice_scale, - pad_seconds=0.5, - clip_seconds=0, - lg_num=0, - lgr_num =0.75, - f0_predictor='pm', - enhancer_adaptive_key = 0, - cr_threshold = 0.05, - k_step = 100 - ): - wav_path = Path(raw_audio_path).with_suffix('.wav') - chunks = slicer.cut(wav_path, db_thresh=slice_db) - audio_data, audio_sr = slicer.chunks2audio(wav_path, chunks) - per_size = int(clip_seconds*audio_sr) - lg_size = int(lg_num*audio_sr) - lg_size_r = int(lg_size*lgr_num) - lg_size_c_l = (lg_size-lg_size_r)//2 - lg_size_c_r = lg_size-lg_size_r-lg_size_c_l - lg = np.linspace(0,1,lg_size_r) if lg_size!=0 else 0 - - audio = [] - for (slice_tag, data) in audio_data: - print(f'#=====segment start, {round(len(data) / audio_sr, 3)}s======') - # padd - length = int(np.ceil(len(data) / audio_sr * self.target_sample)) - if slice_tag: - print('jump empty segment') - _audio = np.zeros(length) - audio.extend(list(pad_array(_audio, length))) - continue - if per_size != 0: - datas = split_list_by_n(data, per_size,lg_size) - else: - datas = [data] - for k,dat in enumerate(datas): - per_length = int(np.ceil(len(dat) / audio_sr * self.target_sample)) if clip_seconds!=0 else length - if clip_seconds!=0: print(f'###=====segment clip start, {round(len(dat) / audio_sr, 3)}s======') - # padd - pad_len = int(audio_sr * pad_seconds) - dat = np.concatenate([np.zeros([pad_len]), dat, np.zeros([pad_len])]) - raw_path = io.BytesIO() - soundfile.write(raw_path, dat, audio_sr, format="wav") - raw_path.seek(0) - out_audio, out_sr = self.infer(spk, tran, raw_path, - cluster_infer_ratio=cluster_infer_ratio, - auto_predict_f0=auto_predict_f0, - noice_scale=noice_scale, - f0_predictor = f0_predictor, - enhancer_adaptive_key = enhancer_adaptive_key, - cr_threshold = cr_threshold, - k_step = k_step - ) - _audio = out_audio.cpu().numpy() - pad_len = int(self.target_sample * pad_seconds) - _audio = _audio[pad_len:-pad_len] - _audio = pad_array(_audio, per_length) - if lg_size!=0 and k!=0: - lg1 = audio[-(lg_size_r+lg_size_c_r):-lg_size_c_r] if lgr_num != 1 else audio[-lg_size:] - lg2 = _audio[lg_size_c_l:lg_size_c_l+lg_size_r] if lgr_num != 1 else _audio[0:lg_size] - lg_pre = lg1*(1-lg)+lg2*lg - audio = audio[0:-(lg_size_r+lg_size_c_r)] if lgr_num != 1 else audio[0:-lg_size] - audio.extend(lg_pre) - _audio = _audio[lg_size_c_l+lg_size_r:] if lgr_num != 1 else _audio[lg_size:] - audio.extend(list(_audio)) - return np.array(audio) - -class RealTimeVC: - def __init__(self): - self.last_chunk = None - self.last_o = None - self.chunk_len = 16000 # chunk length - self.pre_len = 3840 # cross fade length, multiples of 640 - - # Input and output are 1-dimensional numpy waveform arrays - - def process(self, svc_model, speaker_id, f_pitch_change, input_wav_path, - cluster_infer_ratio=0, - auto_predict_f0=False, - noice_scale=0.4, - f0_filter=False): - - import maad - audio, sr = torchaudio.load(input_wav_path) - audio = audio.cpu().numpy()[0] - temp_wav = io.BytesIO() - if self.last_chunk is None: - input_wav_path.seek(0) - - audio, sr = svc_model.infer(speaker_id, f_pitch_change, input_wav_path, - cluster_infer_ratio=cluster_infer_ratio, - auto_predict_f0=auto_predict_f0, - noice_scale=noice_scale, - f0_filter=f0_filter) - - audio = audio.cpu().numpy() - self.last_chunk = audio[-self.pre_len:] - self.last_o = audio - return audio[-self.chunk_len:] - else: - audio = np.concatenate([self.last_chunk, audio]) - soundfile.write(temp_wav, audio, sr, format="wav") - temp_wav.seek(0) - - audio, sr = svc_model.infer(speaker_id, f_pitch_change, temp_wav, - cluster_infer_ratio=cluster_infer_ratio, - auto_predict_f0=auto_predict_f0, - noice_scale=noice_scale, - f0_filter=f0_filter) - - audio = audio.cpu().numpy() - ret = maad.util.crossfade(self.last_o, audio, self.pre_len) - self.last_chunk = audio[-self.pre_len:] - self.last_o = audio - return ret[self.chunk_len:2 * self.chunk_len] - \ No newline at end of file diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/loaders/PlayCanvasLoader.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/loaders/PlayCanvasLoader.js deleted file mode 100644 index 00f8ced7dc1284caa3ac252e0b754b172242b6cb..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/examples/js/loaders/PlayCanvasLoader.js +++ /dev/null @@ -1,203 +0,0 @@ -/** - * @author mrdoob / http://mrdoob.com/ - * @author Mugen87 / https://github.com/Mugen87 - */ - -THREE.PlayCanvasLoader = function ( manager ) { - - this.manager = ( manager !== undefined ) ? manager : THREE.DefaultLoadingManager; - -}; - -THREE.PlayCanvasLoader.prototype = { - - constructor: THREE.PlayCanvasLoader, - - load: function ( url, onLoad, onProgress, onError ) { - - var scope = this; - - var loader = new THREE.FileLoader( scope.manager ); - loader.setPath( scope.path ); - loader.load( url, function ( text ) { - - onLoad( scope.parse( JSON.parse( text ) ) ); - - }, onProgress, onError ); - - }, - - setPath: function ( value ) { - - this.path = value; - return this; - - }, - - parse: function ( json ) { - - function parseVertices( data ) { - - var attributes = {}; - - // create a buffer attribute for each array that contains vertex information - - for ( var name in data ) { - - var array = data[ name ]; - - var type = array.type; - var size = array.components; - - var attribute; - - switch ( type ) { - - case 'float32': - attribute = new THREE.Float32BufferAttribute( array.data, size ); - break; - - case 'uint8': - attribute = new THREE.Uint8BufferAttribute( array.data, size ); - break; - - case 'uint16': - attribute = new THREE.Uint16BufferAttribute( array.data, size ); - break; - - default: - console.log( 'THREE.PlayCanvasLoader: Array type "%s" not yet supported.', type ); - - } - - attributes[ name ] = attribute; - - } - - data._attributes = attributes; - - } - - function parseMeshes( data ) { - - // create buffer geometry - - var geometry = new THREE.BufferGeometry(); - - geometry.setIndex( data.indices ); - - var attributes = model.vertices[ data.vertices ]._attributes; - - for ( var name in attributes ) { - - var attribute = attributes[ name ]; - - if ( name === 'texCoord0' ) name = 'uv'; - - geometry.addAttribute( name, attribute ); - - } - - data._geometry = geometry; - - } - - function parseMeshInstances( data ) { - - var node = model.nodes[ data.node ]; - var mesh = model.meshes[ data.mesh ]; - - if ( node._geometries === undefined ) { - - node._geometries = []; - - } - - node._geometries.push( mesh._geometry ); - - } - - function parseNodes( data ) { - - var object = new THREE.Group(); - - var geometries = data._geometries; - - if ( geometries !== undefined ) { - - var material = new THREE.MeshPhongMaterial(); - - for ( var i = 0, l = geometries.length; i < l; i ++ ) { - - var geometry = geometries[ i ]; - - object.add( new THREE.Mesh( geometry, material ) ); - - } - - } - - for ( var i = 0, l = data.rotation.length; i < l; i ++ ) { - - data.rotation[ i ] *= Math.PI / 180; - - } - - // - - object.name = data.name; - - object.position.fromArray( data.position ); - object.quaternion.setFromEuler( new THREE.Euler().fromArray( data.rotation ) ); - object.scale.fromArray( data.scale ); - - data._object = object; - - } - - // - - var model = json.model; - - for ( var i = 0, l = model.vertices.length; i < l; i ++ ) { - - parseVertices( model.vertices[ i ] ); - - } - - for ( var i = 0, l = model.meshes.length; i < l; i ++ ) { - - parseMeshes( model.meshes[ i ] ); - - } - - for ( var i = 0, l = model.meshInstances.length; i < l; i ++ ) { - - parseMeshInstances( model.meshInstances[ i ] ); - - } - - for ( var i = 0, l = model.nodes.length; i < l; i ++ ) { - - parseNodes( model.nodes[ i ] ); - - } - - // setup scene hierarchy - - for ( var i = 0, l = model.parents.length; i < l; i ++ ) { - - var parent = model.parents[ i ]; - - if ( parent === - 1 ) continue; - - model.nodes[ parent ]._object.add( model.nodes[ i ]._object ); - - - } - - return model.nodes[ 0 ]._object; - - } - -}; diff --git a/spaces/beihai/GFPGAN-V1.3-whole-image/.history/app_20220327195551.py b/spaces/beihai/GFPGAN-V1.3-whole-image/.history/app_20220327195551.py deleted file mode 100644 index 09dff2dbf40f5889c786c1e27d513f55b2aca90a..0000000000000000000000000000000000000000 --- a/spaces/beihai/GFPGAN-V1.3-whole-image/.history/app_20220327195551.py +++ /dev/null @@ -1,68 +0,0 @@ -import os -os.system("pip install gfpgan") - -#os.system("pip freeze") -#os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v0.2.0/GFPGANCleanv1-NoCE-C2.pth -P .") -import random -import gradio as gr -from PIL import Image -import torch -# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/a/ab/Abraham_Lincoln_O-77_matte_collodion_print.jpg/1024px-Abraham_Lincoln_O-77_matte_collodion_print.jpg', 'lincoln.jpg') -# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/5/50/Albert_Einstein_%28Nobel%29.png', 'einstein.png') -# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/9/9d/Thomas_Edison2.jpg/1024px-Thomas_Edison2.jpg', 'edison.jpg') -# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/a/a9/Henry_Ford_1888.jpg/1024px-Henry_Ford_1888.jpg', 'Henry.jpg') -# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/0/06/Frida_Kahlo%2C_by_Guillermo_Kahlo.jpg/800px-Frida_Kahlo%2C_by_Guillermo_Kahlo.jpg', 'Frida.jpg') - - -import cv2 -import glob -import numpy as np -from basicsr.utils import imwrite -from gfpgan import GFPGANer - -bg_upsampler = None - - - -# set up GFPGAN restorer -restorer = GFPGANer( - model_path='experiments/pretrained_models/GFPGANv1.3.pth', - upscale=2, - arch='clean', - channel_multiplier=2, - bg_upsampler=bg_upsampler) - - -def inference(img): - input_img = cv2.imread(img, cv2.IMREAD_COLOR) - cropped_faces, restored_faces, restored_img = restorer.enhance( - input_img, has_aligned=False, only_center_face=False, paste_back=True) - - #return Image.fromarray(restored_faces[0][:,:,::-1]) - return Image.fromarray(restored_img[:, :, ::-1]) - -title = "让美好回忆更清晰" - - -description = "上传老照片,点击Submit,稍等片刻,右侧Output将照片另存为即可。" -article = "

        visitor badge
        " - -article = "

        | | Github Repo

        visitor badge
        " - -gr.Interface( - inference, - [gr.inputs.Image(type="filepath", label="Input")], - gr.outputs.Image(type="pil", label="Output"), - title=title, - description=description, - article=article, - examples=[ - ['lincoln.jpg'], - ['einstein.png'], - ['edison.jpg'], - ['Henry.jpg'], - ['Frida.jpg'] - ] - ).launch(enable_queue=True,cache_examples=True,share=True) - - diff --git a/spaces/beihai/GFPGAN-V1.3-whole-image/setup.py b/spaces/beihai/GFPGAN-V1.3-whole-image/setup.py deleted file mode 100644 index 474e9188aa2dc5c19614921760ce4ad99bd19c13..0000000000000000000000000000000000000000 --- a/spaces/beihai/GFPGAN-V1.3-whole-image/setup.py +++ /dev/null @@ -1,107 +0,0 @@ -#!/usr/bin/env python - -from setuptools import find_packages, setup - -import os -import subprocess -import time - -version_file = 'gfpgan/version.py' - - -def readme(): - with open('README.md', encoding='utf-8') as f: - content = f.read() - return content - - -def get_git_hash(): - - def _minimal_ext_cmd(cmd): - # construct minimal environment - env = {} - for k in ['SYSTEMROOT', 'PATH', 'HOME']: - v = os.environ.get(k) - if v is not None: - env[k] = v - # LANGUAGE is used on win32 - env['LANGUAGE'] = 'C' - env['LANG'] = 'C' - env['LC_ALL'] = 'C' - out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0] - return out - - try: - out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) - sha = out.strip().decode('ascii') - except OSError: - sha = 'unknown' - - return sha - - -def get_hash(): - if os.path.exists('.git'): - sha = get_git_hash()[:7] - else: - sha = 'unknown' - - return sha - - -def write_version_py(): - content = """# GENERATED VERSION FILE -# TIME: {} -__version__ = '{}' -__gitsha__ = '{}' -version_info = ({}) -""" - sha = get_hash() - with open('VERSION', 'r') as f: - SHORT_VERSION = f.read().strip() - VERSION_INFO = ', '.join([x if x.isdigit() else f'"{x}"' for x in SHORT_VERSION.split('.')]) - - version_file_str = content.format(time.asctime(), SHORT_VERSION, sha, VERSION_INFO) - with open(version_file, 'w') as f: - f.write(version_file_str) - - -def get_version(): - with open(version_file, 'r') as f: - exec(compile(f.read(), version_file, 'exec')) - return locals()['__version__'] - - -def get_requirements(filename='requirements.txt'): - here = os.path.dirname(os.path.realpath(__file__)) - with open(os.path.join(here, filename), 'r') as f: - requires = [line.replace('\n', '') for line in f.readlines()] - return requires - - -if __name__ == '__main__': - write_version_py() - setup( - name='gfpgan', - version=get_version(), - description='GFPGAN aims at developing Practical Algorithms for Real-world Face Restoration', - long_description=readme(), - long_description_content_type='text/markdown', - author='Xintao Wang', - author_email='xintao.wang@outlook.com', - keywords='computer vision, pytorch, image restoration, super-resolution, face restoration, gan, gfpgan', - url='https://github.com/TencentARC/GFPGAN', - include_package_data=True, - packages=find_packages(exclude=('options', 'datasets', 'experiments', 'results', 'tb_logger', 'wandb')), - classifiers=[ - 'Development Status :: 4 - Beta', - 'License :: OSI Approved :: Apache Software License', - 'Operating System :: OS Independent', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - ], - license='Apache License Version 2.0', - setup_requires=['cython', 'numpy'], - install_requires=get_requirements(), - zip_safe=False) diff --git a/spaces/bertin-project/bertin/README.md b/spaces/bertin-project/bertin/README.md deleted file mode 100644 index bd3e4cc5b10e65379dbbc69896aa365049ddc6b5..0000000000000000000000000000000000000000 --- a/spaces/bertin-project/bertin/README.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: BERTIN -emoji: 🔥 -colorFrom: yellow -colorTo: red -sdk: streamlit -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/bguberfain/Detic/tools/convert-thirdparty-pretrained-model-to-d2.py b/spaces/bguberfain/Detic/tools/convert-thirdparty-pretrained-model-to-d2.py deleted file mode 100644 index ec042b8ce48d193b40fd1e6311b2cc4b0c4e4086..0000000000000000000000000000000000000000 --- a/spaces/bguberfain/Detic/tools/convert-thirdparty-pretrained-model-to-d2.py +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import argparse -import pickle -import torch - -""" -Usage: - -cd DETIC_ROOT/models/ -wget https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/resnet50_miil_21k.pth -python ../tools/convert-thirdparty-pretrained-model-to-d2.py --path resnet50_miil_21k.pth - -wget https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth -python ../tools/convert-thirdparty-pretrained-model-to-d2.py --path swin_base_patch4_window7_224_22k.pth - -""" - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument('--path', default='') - args = parser.parse_args() - - print('Loading', args.path) - model = torch.load(args.path, map_location="cpu") - # import pdb; pdb.set_trace() - if 'model' in model: - model = model['model'] - if 'state_dict' in model: - model = model['state_dict'] - ret = { - "model": model, - "__author__": "third_party", - "matching_heuristics": True - } - out_path = args.path.replace('.pth', '.pkl') - print('Saving to', out_path) - pickle.dump(ret, open(out_path, "wb")) diff --git a/spaces/bino-ocle/audio-intelligence-dash/app/css_components/file.css b/spaces/bino-ocle/audio-intelligence-dash/app/css_components/file.css deleted file mode 100644 index 93058e03a2f4dd7cdad73bb9051474b4be985405..0000000000000000000000000000000000000000 --- a/spaces/bino-ocle/audio-intelligence-dash/app/css_components/file.css +++ /dev/null @@ -1,81 +0,0 @@ -body { - font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Raleway, Helvetica, - Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; -} - -.logo { - width: 180px; -} - -.title { - font-weight: 600; - text-align: left; - color: black; - font-size: 18px; -} - -.alert, -#component-2, -#component-3 { - padding: 24px; - color: black; - background-color: #f4f8fb; - border: 1px solid #d6dce7; - border-radius: 8px; - box-shadow: 0px 6px 15px rgb(0 0 0 / 2%), 0px 2px 5px rgb(0 0 0 / 4%); -} - -ol { - list-style: disc; -} - -.alert__info { - background-color: #f4f8fb; - color: #323552; -} - -.alert__warning { - background-color: #fffae5; - color: #917115; - border: 1px solid #e4cf2b; -} - -#pw { - -webkit-text-security: disc; -} - -/* unvisited link */ -a:link { - color: #52DFDF; -} - -/* visited link */ -a:visited { - color: #52DFDF; -} - -/* mouse over link */ -a:hover { - color: #52DFDF; -} - -/* selected link */ -a:active { - color: #52DFDF; -} - -li { - margin-left: 1em; -} - -.apikey { -} - -.entity-list { - color: #52DFDF; - font-size: 16px -} - -.entity-elt { - color: black -} \ No newline at end of file diff --git a/spaces/bioriAsaeru/text-to-voice/Big Fish Game Universal Crack How to Solve Common Problems and Errors.md b/spaces/bioriAsaeru/text-to-voice/Big Fish Game Universal Crack How to Solve Common Problems and Errors.md deleted file mode 100644 index 27fb31da7b2fc5ab6dae666ee2d5bd4a9e143b5e..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Big Fish Game Universal Crack How to Solve Common Problems and Errors.md +++ /dev/null @@ -1,6 +0,0 @@ -
        -

        The boy was brave and very beautiful. His tribespeople called him the Tenas Tyee7 and they loved him. Of all his wealth of fish and furs, of game and hykwa8 he gave to the boys who had none; he hunted food for the old people; he tanned skins and furs for those whose feet were feeble, whose eyes were fading, whose blood ran thin with age.

        -

        big fish game universal crack


        DOWNLOAD ★★★ https://urloso.com/2uyQE9



        -

        NBCUNIVERSAL TELEVISION AND STREAMING BOLSTERS ITS INVESTMENT IN CABLE PORTFOLIO WITH 33 NEW UNSCRIPTED SERIES, OVER 30 ADDICTIVE RETURNING HITS, MUST-WATCH LIVE EVENTS & A CONTINUED FOCUS ON SCRIPTEDNEW BRAVO SHOWS INCLUDE: "Life is a Ballroom (WT)," "Love Without Borders (WT)," "Love Match Atlanta (WT)," and Franchise Spinoffs "Below Deck Adventure," "Summer House Winter Charm" and "Kandi OLG Project (WT)" NEW E! SHOWS INCLUDE: "Clash of the Cover Bands (WT)" from Executive Producer Jimmy Fallon, "Jason Biggs Cash at Your Door" with Actor Jason Biggs, "We Got Love Iman & Teyana," "Showbiz Moms & Dads," and "Relatively Famous: Ranch Rules (WT)," Plus Laverne Cox Joins as New Signature E! Talent, and the "People's Choice Awards" Sets Date for Dec. 7NEW OXYGEN SHOWS AND SPECIALS INCLUDE: "911 Crisis Center," "Alaska Man Murders (WT)," Four All-New "Snapped Notorious" Specials, "Family Massacre," "New York Homicide," "The Real Murders of Atlanta," "Relationship Killer with Faith Jenkins (WT)," "The Toolbox Killer" and MoreNEW SYFY ORIGINALS INCLUDE: "Chucky" from Executive Producer Don Mancini (also airing on USA), "Day of the Dead," "Astrid & Lilly Save the World," "Bring It On: Halloween (WT)" and "Slumber Party Massacre" NEW USA NETWORK ORIGINALS INCLUDE: "Nash Bridges" Movie with Original Cast Members Don Johnson and Cheech Marin, "Chucky" from Executive Producer Don Mancini (also airing on SYFY), "America's Big Deal" with Inventor and Entrepreneur Joy Mangano and "Mud, Sweat & Beards (WT)" NEW YORK - May 13, 2021 - NBCUniversal Television and Streaming is solidifying its commitment to its cable portfolio following a year where each of its cable brands - Bravo, E!, Oxygen, SYFY, Universal Kids and USA Network - remained leaders in their respective areas of focus, including Bravo and USA once again securing positions among the top five ad-supported cable entertainment networks in the key 18-49 demo. NBCUniversal unveils a wide-ranging slate for its cable portfolio, which will collectively add 33 new unscripted shows or specials, bring back over 30 fan-favorite hits with more pick-ups to come, launch five scripted series and air three original movies in addition to multiple live events. "A key driver of success at NBCUniversal's cable networks is our ability to tap into the scope and scale of our collective portfolio while leveraging each brand's distinct audience to build unrivaled fan destinations across our networks and on Peacock," said Frances Berwick, Chairman, Entertainment Networks, NBCUniversal Television and Streaming. "Our cable networks represent a vital part of our business and we will continue to invest in our brands and IP in innovative ways, whether that be through mixing live sports and big event series on USA, introducing a new host to E!'s flagship Red Carpet, or bringing our addictive Bravo shows to life with fan experiences, our networks remain the home of culture-defining television." "With more than 40 new series and specials across the cable networks, our content team has been firing on all cylinders to ensure our brands are at the center of destination viewing," continued Susan Rovner, Chairman, Entertainment Content, NBCUniversal Television and Streaming. "Our high-profile competition shows, spinoffs of fan favorite franchises and much-anticipated scripted originals super-serve existing fans while also enticing new ones." NBCUniversal's cable portfolio is one of the strongest in the marketplace. Not only are Bravo and USA maintaining their positions as top-five ad supported cable entertainment networks in the key 18-49 demo, but both cablers are pacing for their best years ever in both on-demand views and livestreams. Bravo has the most series in the top 20 reality programs on cable across linear, social and VOD year-to-date, and USA's "WWE Monday Night Raw" ranks as the #1 cable entertainment program in Live viewership. Additionally, within their respective audience groups, NBCU's other cable networks - E!, Oxygen, SYFY and Universal Kids - dominated in 2020 and are on track to deliver in 2021. E! ended the year as the #1 most social cable reality network with a record-breaking 706M social engagements across platforms. Oxygen had its fourth consecutive year of true crime growth and viewers spent more time watching than ever before, ranking as the #1 cable network in audience engagement among all key demos and outpacing its top competitor by double digits. With over 350 hours of original programming for 2021, Oxygen is poised to continue this trend. SYFY is setting new records in on-demand views and livestreams, posting its best 1Q ever and pacing for its best year ever in both metrics. The network is also home to the most-watched new cable drama in the last year with breakout hit "Resident Alien." Universal Kids was the fastest growing kids' network of 2020 with double-digit year-over-year growth in their key K2-11 demo and among total views. On digital, the Universal Kids' YouTube page secured 170M organic views in 2020, up from 16M two years ago, and last quarter the page hit a major milestone as it surpassed 1M subscribers. Below please find full slate breakdowns for each network. For photos and additional show information, go to NETWORK SCHEDULE BREAKDOWNS: BRAVO As a top five cable entertainment brand with the #1 most loyal fanbase, Bravo is a premiere lifestyle and entertainment destination with sought-after programming that drives cultural conversation. Bravo will fortify its unscripted slate with new "can't miss" reality series, spinoffs of fan-favorite hits and the return of the network's top franchises. All-new spinoffs include "Below Deck Adventure," which follows yachties on an adventure-filled experience with charter guests; "Summer House Winter Charm" in which fan-favorites from the beloved "Summer House" and "Southern Charm" franchises will leave their warmer climates and come together for a winter adventure in Stowe, VT;and more. The network will also bring back top franchises "Million Dollar Listing Los Angeles," "Vanderpump Rules," all-new seasons of multiple "Real Housewives" series, among many others. New programming coming to Bravo includes: "Below Deck Adventure" - Premieres Early 2022 This new series takes wealthy thrill-seekers on the mega-yacht trip of a lifetime to experience thrilling YOLO adventures and daredevil activities all against some of the world's most beautiful backdrops. Season 1 will be set in the glacial fjords of Norway where charter guests will dog sled, heli ski and cold-water plunge their way through the day, dine on freshly caught seafood straight from the Scandinavian waters in the evening, and still get all steamed up in the hot tub by night. The exacting expectations of high-end luxury coupled with the physical demands of cold-weather adventures will push the crew to new heights and unparalleled pressures that makes this new series one-of-a-kind. Produced by Shed Media with Lisa Shannon and Dan Peirson serving as executive producers. Mark Cronin is also set to executive produce, along with Grace Lee serving as showrunner and executive producer. "Kandi OLG Project (WT)" - Premieres Late 2021In this new occu-series, we'll watch as Kandi, Todd and the Old Lady Gang (comprised of Mama Joyce, and Aunts Nora & Bertha) continue to build their restaurant empire while keeping their over-the-top and opinionated staff in line and making their vision come to fruition. The new series follows the staff in and out of the restaurant, witnessing how workplace slights bleed into their social lives. Everyone at OLG has a passion - be it for dancing, comedy or just to follow in Kandi's mogul footsteps, and these dreams can get in the way of running food and ensuring customers don't leave those dreaded one-star Yelp reviews. And the fact that some are single and constantly flirting with the cute clientele - and each other - doesn't exactly help things either. It's a monumental task to face, but if anyone can do it, it's Kandi! Produced by Truly Original with Steven Weinstock, Glenda Hersh, Kandi Burruss and Todd Tucker serving as executive producers. "Life is a Ballroom (WT)"- Premieres Late 2021Professional Amateur dancing is the fastest growing division of ballroom dance in the world. In this sport, known as Pro-Am, amateur dancers pay professional dance partners to compete with them in competitions around the country each week. From the producers of "90 Day Fiancé," this access driven docuseries will follow and intercut the stories of the most fascinating, talented and obsessed ballroom amateur dancers in the country. Produced by Sharp Entertainment with Matt Sharp and Dan Adler serving as executive producers. "Love Match Atlanta (WT)" - Premieres Early 2022 This all-new series explores Atlanta's most highly regarded Black matchmaking companies as they utilize distinct methodology, charm and skill to find a match for some of the city's most eligible and demanding singles. Celebrating the business of Black love, the series follows these matchmakers and their teams as they work to outmaneuver their competitors, matching high profile singles who are on the hunt for the ultimate relationship and willing to pay top dollar for it. Produced by Matador Content, a Boat Rocker Company, with Todd Lubin and Jay Peterson serving as executive producers. "Love Without Borders (WT)" - Premieres Mid 2022Singles dream of escaping their everyday lives to find their soulmate by risking everything for a chance at love with a stranger, in a strange land. Participants will leave their jobs, homes, possessions and families behind, but they don't know where in the world they're going or who they'll meet, all in the name of love! Produced by Kinetic Content with Chris Coelen, Eric Detwiler, Karrie Wolfe and Katie Griffin serving as executive producers. "Summer House Winter Charm" - Premieres this Fall When the temperature drops, the drama heats up! This all-new series follows some of Bravo's favorite "Summer House" and "Southern Charm" cast members and their friends during a two-week vacation at a ski house in Vermont. Produced by Truly Original with Steven Weinstock, Glenda Hersh, Lauren Eskelin, Jamie Jakimo, Maggie Langtry and Trish Gold serving as executive producers. Shows premiering on Bravo this summer include "Below Deck Mediterranean," "Family Karma," "The Real Housewives of Beverly Hills," "The Real Housewives of Potomac," "Shahs of Sunset" and the all-new "Top Chef Amateurs" and "Million Dollar Listing: Ryan's Renovation." E! As the #1 most social cable reality network, E! will continue to be a leader in pop culture programming with the return of its highest-rated new series, "The Bradshaw Bunch," which featuresfour-time NFL Super Bowl champion quarterback Terry Bradshaw and his family; the return of the only awards show for the people, by the people, the "People's Choice Awards" on Dec. 7;and more.The network also picks up five new unscripted shows, including a music showdown competition series "Clash of the Cover Bands (WT)," which hails from executive producer Jimmy Fallon; "Jason Biggs Cash at Your Door" with actor and comedian Jason Biggs; and more.E! continues to cement its position as a leader in the pop culture zeitgeist with the recent announcement of actress and LGBTQ+ advocate Laverne Cox joining the network as the new host of E!'s signature red carpet franchise, "Live from E!," beginning in 2022. Additionally, Cox will host a series of celebrity-based interview specials spotlighting trendsetters, tastemakers and those making an impact in the Hollywood community throughout next year. New programming coming to E! includes: "Clash of the Cover Bands (WT)" - Premieres this Fall Jimmy Fallon will executive produce "Clash of the Cover Bands (WT)," a new music competition series on E!. In each episode, two bands of similar musical genre (e.g. Pop Divas, Boy Bands, Heavy Metal etc.) go head-to-head to see which band delivers the most impressive cover performance for a chance to win a cash prize and bragging rights. "Clash of the Cover Bands (WT)" is produced by Universal Television Alternative Studio, a division of Universal Studio Group, and Electric Hotdog with Jimmy Fallon and Jim Juvonen serving as executive producers. "Jason Biggs Cash at Your Door" - Premieres this Fall From the producers of "Cash Cab" comes this new game show, hosted by Jason Biggs, that comes to you, in your very own home, when you least expect it. Because who doesn't want to win cash without leaving the house? Produced by Lion Television with Jason Biggs, Allison Corn, Tony Tackaberry, Stan Hsue and Adam Dolgins serving as executive producers, with the format by Ami Amir. The format was created by Matar and is distributed by All3Media. "Relatively Famous: Ranch Rules (WT)" - Premieres Early 2022Eight celebrity offspring come together to live and work as ranch hands for four weeks in Steamboat Springs, Colorado, hoping to prove to themselves, their parents, and the world that they are more than their last names. Each episode throws the cast into fish-out-of-water situations and hilarious misadventures as they work to restore and reopen Saddleback Ranch to the public after a year of shutdown and loss due to the pandemic. Produced by Fremantle with Kevin Lee, Justin Berfield, Jason Felts, Joe Simpson and Angela Rae Berg serving as executive producers. "Showbiz Moms & Dads" - Premieres Early 2022From the Emmy-winning producers of "RuPaul's Drag Race" and "Million Dollar Listing," Bravo's classic series "Showbiz Moms & Dads" is coming to E! with all-new bigger-than-life parents of budding stars. This time around, these talented kids are not just dancers, actors, models and musicians... but are social media influencers with new content platforms with millions of followers. And stage parents are more competitive than ever before! Produced by World of Wonder with Fenton Bailey, Randy Barbato and Tom Campbell serving as executive producers. "We Got Love Iman & Teyana" - Premieres this FallThis refreshingly authentic and completely unfiltered new docu-series follows Teyana Taylor and Iman Shumpert as they take the world by storm, all while juggling music, fashion, business and family. With their tight entourage of family and friends, Teyana and Iman are ready to continue building their empire, despite the whirlwind chaos and drama that goes along with it. Produced by STX Unscripted with Jason Goldberg, Teyana Taylor, Iman Shumpert, Shanta Conic, Michelle Kongkasuwan and Ailee O'Neil serving as executive producers. Shows premiering on E! this summer include "Botched" and the all-new "Reunion Road Trip." The "E! News" brand is the leading multi-platform publisher delivering breaking entertainment news and pop culture coverage 24/7 across linear, digital and social media. The brand's programming slate includes "Daily Pop" and "Nightly Pop" on linear; one of the top entertainment news websites with EOnline.com and a growing presence on YouTube; "E! News' The Rundown" on Snapchat; and an impressive social media presence across Facebook, Twitter and Instagram, with the latter boasting over 16 million Instagram follows on the @enews handle. OXYGEN Last year in a highly competitive market, Oxygen maintained four consecutive years of true crime growth and was the fastest growing top 30 cable entertainment network among its key F25-54 demo. With multiple returning shows, including the 30th season of the flagship series "Snapped;" the bone-chilling spinoff "Snapped: Behind Bars;" Dick Wolf's "Cold Justice" and more; plus 17 all-new series or specials, including "911 Crisis Center," which gives viewers a behind-the-scenes look at the fast-paced, high-stakes world of a dynamic 911 call center outside of Cleveland and four "Snapped Notorious" specials, the brand will continue to be a leading multiplatform destination for the genre. New programming coming to Oxygen includes: "911 Crisis Center" - Premieres this Fall This one-of-a-kind documentary series brings viewers behind-the-scenes of the fast-paced, high-stakes world of a dynamic 911 call center outside of Cleveland. This is an up-close and personal look at an amazing team of 911 dispatchers as they take on a never-ending bombardment of panic-stricken callers, and save lives. These dedicated professionals really are the unsung heroes of law enforcement. Produced by Shed Media with Dan Peirson, Lisa Shannon and Adam Kassen serving as executive producers. "Alaska Man Murders (WT)" - Premieres this Fall Investigating homicide cases is hard. Investigating homicide cases in Alaska is even harder. "Alaska Man Murders (WT)" explores the darkness that lurks within America's Last Frontier. Isolation, extreme weather, challenging terrain and other unique factors make for cases that can be incredibly difficult to crack. These are the stories of investigators who succeeded despite the odds. Produced by RIVR Media with Dee Haslam, Lori Styer and Myles Reiff serving as executive producers. "Family Massacre" - Premieres Late 2021"Family Massacre" is a gripping and powerful exploration of some of the most ruthless murders ever committed. This series follows the true and gruesome tales of the unthinkable: multiple members of the same family slain in cold blood. In each episode, we hear from friends and surviving relatives, those people closest to the family that was massacred, while also detailing the work of the dedicated investigators and prosecutors tasked with finding their killer and bringing them to justice. Through first-hand accounts, archival footage and cinematic recreations, we see the twists and turns of the investigation unfold and delve deep into who could have committed such a shocking crime and just how they were caught and made to answer for it. Produced by Renegade 83 with David Garfinkle, Jay Renfroe and Chris Rowe serving as Executive Producers. "Final Moments" - Premieres Early 2022This all-new series delves into heart-wrenching crimes, revealing the emotional truth of the victims leading up to their death. Each episode tracks a new investigation and features real footage, pictures and social media posts that shed light on the life of the victim and the crime. What were they doing? What might they have been thinking and feeling? What was their Final Moment? Produced by Dick Wolf's Wolf Entertainment and ITV America's Good Caper Content with Dick Wolf, Tom Thayer, Kathryn Vaughan, Jordana Hochman and Tim McConville serving as executive producers. "The Girl Who Lived (WT)" - Two-Hour Special Premieres this Fall From Executive Producer Elizabeth Smart, "The Girl Who Lived (WT)" tells the story of Kara Robinson: abducted in broad daylight from a friend's front yard, the 15-year-old is held captive and sexually assaulted for 18 harrowing hours. When she bravely engineers her own escape and leads authorities to her assailant's apartment, they uncover a series of crimes far darker and more deadly than anyone ever imagined. Produced by Marwar Junction Productions and Entertainment One with Elizabeth Smart, Kara Robinson Chamberlain, Allison Berkley, Joseph Freed, Tara Long, Geno McDermott and Carolyn Blackstone-Day serving as executive producers. "New York Homicide" - Premieres Early 2022This all-new series dives deep into some of the worst murder cases in the city's recent history. Each hour-long, self-contained episode lets viewers bear witness to the tragedy, the trauma, and the triumph of New Yorkers in the face of Gotham City's worst crimes. With exclusive access to former and current detectives, investigators guide viewers through their most complex cases, while the victim's loved ones recount the ongoing emotional impact. Archival material and dramatic re-creations immerse the audience in the twists and turns of these unforgettable capers. NEW YORK HOMICIDE. Real cases. Real victims. Real heroes. Real New Yorkers. Produced by ITV America's Good Caper Content with Kathryn Vaughan, Jordana Hochman, Brain DeCubellis and Diane and Michele Warshay serving as executive producers. "The Real Murders of Atlanta" - Premieres Early 2022"The Real Murders of Atlanta" portrays the shocking, sinful and salacious cases of homicide that highlight the boundaries between gentrified Southern dynasties, hip hop hustlers and the flashy nouveau riche of this metropolitan mecca of music, entertainment and tech. It's the dark side of the New South, where deadly battles for status and affluence emerge between those who are willing to kill for the good life and those willing to kill to keep it. Produced by 44 Blue Productions with Stephanie Drachkovitch, David Hale, Dan Snook and Robert Wise serving as Executive Producers. "Relationship Killer with Faith Jenkins (WT)" - Premieres Early 2022This true Crime series delves into jaw-droppingly evil stories of love that sours, and break-ups that turn downright murderous. Hosted by Divorce Court's presiding judge, Faith Jenkins, these twisted tales of relationships gone bad show what happens when breaking up means only one thing: someone has to die. Produced by Texas Crew Productions and Faith in Justice Productions with David Karabinas, Mary Bissell and Brad Bernstein serving as executive producers for Texas Crew, and Faith Jenkins serving as executive producer for Faith in Justice. "Snapped Notorious: The Cleveland Strangler" - Two-Hour Special Premieres this Fall For over two years, the Cleveland Strangler murdered eleven women and lived with their bodies decomposing inside his house. Five women managed to escape from his attacks and share details about the terror they experienced inside his house of horrors. Produced by Catalina Content with Jeff Collins, Deborah Allen and Russell Heldt serving as executive producers. "Snapped Notorious: Happy Face Killer" - Two-Hour Special Premieres this Fall The man known as the Happy Face Killer viciously strangled 8 women and dumped their bodies along the road. Crime novelist, M William Phelps, shares rare on-camera interviews and never-heard-before chilling recordings with the psychopathic killer. Produced by Catalina Content with Jeff Collins, Deborah Allen, Russell Heldt and M. William Phelps serving as executive producers. "Snapped Notorious: River Valley Killer" - Two-Hour Special Premieres this Fall From 1993 to 2000, the quiet community of Fort Smith, Arkansas was terrorized by a twisted serial killer, a deranged necrophiliac who targeted elderly and vulnerable women. He became known as The River Valley Killer. Produced by Catalina Content with Jeff Collins, Deborah Allen and Russell Heldt serving as executive producers. "The Toolbox Killer" - Two-Hour Special Premieres this Fall In his own words, America's most sadistic serial killer describes his 1979 killing spree in this 2-hour documentary. Known as "The Toolbox Killer," Lawrence Bittaker was silent about his crimes for 40 years, until he met investigator Laura Brand. Over the course of five years, Brand recorded her many conversations with Bittaker as he spoke from death row about his methods and motives, providing unique insights into the mind of a criminal sadist. Produced by Mike Mathis Productions with Mike Mathis, Matthew Testa and Laura Brand serving as executive producers. "Twisted Killers" - Premieres Early 2022The most baffling cases. The most bizarre killers. What drives acts of evil? "Twisted Killers" tells the shocking stories of some of America's darkest, most unusual murderers. Along the way, a trio of criminal experts, including former NYC DA Beth Karas, retired LAPD Homicide Detective Tracey Benjamin and Forensic Psychologist Kate Termini, provide insight and expertise on how these twisted killers were brought to justice. Produced by ITN Productions with Bruce Kennedy and Ian Russell serving as executive producers. "Untitled Carolyn Warmus Project (WT)" - Special Event Series Premieres Early 2022This true crime limited series exposes new theories about the "Fatal Attraction Killer," a case named for its similarity to the blockbuster film centered around a woman who becomes obsessed with her married lover. After spending 27 years in prison for the crime, Carolyn Warmus is speaking for the first time since her release. The three-hour series aims to untangle the twisted web of sex, lies and deceit that defined the infamous case. Produced by Entertainment One (eOne) and Bee Holder Productions with Tara Long, Geno McDermott, Ben Megargel and Lorri Leighton serving as executive producers for Entertainment One (eOne), and Steve Lee Jones serving as executive producer for Bee Holder Productions. Shows premiering on Oxygen this summer include "Killer Couples," the all-new series "Charmed to Death" and the all-new two-hour specials "Snapped Notorious: The Girl in the Box" and "Murdered and Missing in Montana." SYFY As a fan-focused network redefining genre programming, SYFY continues to rank among the top 10 highest-reaching cable entertainment networks across all key demos and is home to the Alan Tudyk-led dramedy "Resident Alien," which was the most-watched new cable drama in the last year. The serieswill return for its sophomore run in 2022, joining the previously announced cross-network drama "Chucky,"which will air on USA and SYFY and features a vintage Chucky doll wreaking havoc on an idyllic American town. These titles join the all-new scripted pickups "Day of the Dead" and "Astrid & Lilly Save the World." The sci-fi brand will also put a horror spin on the popular "Bring It On" film franchise with an all-new original movie, "Bring It On: Halloween (WT)," in partnership with Universal 1440 Entertainment, set for 2022, in addition to the slasher film reboot "Slumber Party Massacre," which will air as an original movie this Fall. New programming coming to SYFY includes: "Astrid & Lilly Save the World" - Premieres in 2022When outcast high school BFFs Astrid & Lilly accidentally crack open a portal to a terrifying monster dimension, they have to figure out how to save the world, if they can survive high school. Produced by Blue Ice Pictures with Lance Samuels, Daniel Iron and Armand Leo serving as executive producers. Noelle Stehman and Betsy Van Stone wrote the pilot and will also serve as executive producers. "Bring It On: Halloween (WT)" - All-New Original Movie Premieres in 2022 Held down by restrictive rules, an embattled cheerleading squad seeks the freedom of a creepy, closed school gym to practice for regionals, but when members of the squad start to disappear, the cheerleaders must unmask their assailant to save themselves. Produced by Universal 1440 Entertainment, "Bring It On: Halloween (WT)" will be released on non-theatrical platforms in 2022. Universal 1440 Entertainment is a production arm of Universal Filmed Entertainment Group (UFEG). "Chucky" - Premieres on SYFY and USA this Fall After a vintage Chucky doll turns up at a suburban yard sale, an idyllic American town is thrown into chaos as a series of horrifying murders begin to expose the town's hypocrisies and secrets. Meanwhile, the arrival of enemies - and allies - from Chucky's past threatens to expose the truth behind the killings, as well as the demon doll's untold origins, as a seemingly ordinary child who somehow became this notorious monster. Produced by UCP, the series will be executive produced by creator Don Mancini, David Kirschner, and Nick Antosca via his banner Eat the Cat, through his overall deal with the studio. Alex Hedlund and Harley Peyton will also serve as executive producers. Mancini, who penned the film franchise, will also write the adaptation, serve as showrunner and direct the first episode. "Day of the Dead" - Premieres this Fall "Day of the Dead" is the intense story of six strangers trying to survive the first 24 hours of an undead invasion. This ode to George A. Romero's famous flesh-eaters reminds us that sometimes all it takes to bring people together is a horde of hungry zombies trying to rip them apart. Produced by Cartel and HiTide Studios with Stan Spry, Jeff Holland and Drew Brown serving as executive producers for Cartel, and James Dudelson, Robert Dudelson, Jordan Kizwani and Matt Drake serving as executive producers for HiTide Studios. Jed Elinoff and Scott Thomas will serve as co-showrunners and executive producers and Steven Kostanski will also serve as executive producer. "Slumber Party Massacre" - All-New Original Movie Premieres this Fall A new contemporary twist-filled reimagining of the 1982 slasher cult classic just in time for Halloween. A slumber party turns into a bloodbath, as a psychotic serial killer wielding a power drill disrupts the fun. "Slumber Party Massacre" is produced in partnership with Shout! Studios with Brent Haynes, Bob Emmer, Garson Foos and Jordan Fields serving as executive producers. Danishka Esterhazy serves as director and worked off the screenplay by Suzanne Keilly. The film is also produced by Blue Ice Pictures. Premiering on SYFY this summer is "SurrealEstate." USA NETWORK Building off its more than 25-year streak as a top five cable network, USA is furthering its commitment to scripted programming this fall with the pickup of the highly coveted revival movie "Nash Bridges," which hails from Village Roadshow and features original cast members Don Johnson and Cheech Marin.This title joins the critically acclaimed drama "The Sinner" which recently announced Frances Fisher ("Watchmen"), Michael Mosley ("Ozark") and Alice Kremelberg ("The Trial of the Chicago 7") will join Bill Pullman for its fourth installment, and cross-network drama "Chucky" (with SYFY)from executive producer Don Mancini. The cabler is bolstering its unscripted lineup with the return of tentpole series "Chrisley Knows Best," "Temptation Island" and more, plus the pickup of two all-new unscripted series, "America's Big Deal" and "Mud, Sweat & Beards" (WT)." The network will also continue its over three-decade partnership with the WWE by airing "WWE Monday Night Raw," the #1 cable entertainment program in Live viewership,and the recently renewed "WWE NXT," every Monday and Tuesday, respectively, for all 52 weeks of this year. Since its shift to Tuesdays, "NXT" is posting more than 30% growth and contributing to the network's investment in live event programming. USA will also gain the rights to highly sought-after sports coverage beginning in 2022. New programming coming to USA includes: "America's Big Deal" - Premieres this Fall This groundbreaking competition series invites inventors from across the nation to sell their products LIVE on-air and compete for the chance to strike a life-changing deal with a retail giant. The mastermind behind the series is America's most celebrated entrepreneur, Joy Mangano, who is making it her personal mission to lift up America's greatest inventors and small businesses to give them the same opportunity that launched her business dynasty... the chance to make the biggest deal of their life. Tapping into One Platform Commerce, contestants will sell their wares in real time through NBCUniversal Checkout, with live sales numbers determining who stays and who goes. Produced by DIGA Studios with Tony DiSanto, Nick Rigg, Tommy Coriale and Alison Holloway serving as executive producers. Joy Mangano also serves as an executive producer. "Chucky" - Premieres on USA and SYFY this Fall After a vintage Chucky doll turns up at a suburban yard sale, an idyllic American town is thrown into chaos as a series of horrifying murders begin to expose the town's hypocrisies and secrets. Meanwhile, the arrival of enemies - and allies - from Chucky's past threatens to expose the truth behind the killings, as well as the demon doll's untold origins, as a seemingly ordinary child who somehow became this notorious monster. Produced by UCP, the series will be executive produced by creator Don Mancini, David Kirschner, and Nick Antosca via his banner Eat the Cat, through his overall deal with the studio. Alex Hedlund and Harley Peyton will also serve as executive producers. Mancini, who penned the film franchise, will also write the adaptation, serve as showrunner and direct the first episode. "Mud, Sweat & Beards" (WT) - Premieres in 2022On each episode of this all-new series, Donny Dust and Ray Livingston tackle the earth's most remote locations, where they'll build a new primitive paradise using their unrivaled wit and wilderness ingenuity. These best friends and current world-class survivalists will work hand-in-hand as they do everything from building elaborate shelters to tracking down natural food sources, all while combating extreme weather, hunger, predators and wicked BO. Produced by Leftfield Pictures with Shawn Witt, Gretchen Palek, Ryan Pender, Zach Green and Andrew Schechter serving as executive producers. "Nash Bridges" - Special Revival Movie Premieres this Fall"Nash Bridges" returns as a 2-hour movie for USA Network with stars Don Johnson (Nash Bridges) and Cheech Marin (Joe Rodriguez) reprising their roles. The two-hour movie brings the duo back together as elite investigators for the San Francisco Police Department Special Investigations Unit. The film will be produced by Village Roadshow Television.

        aaccfb2cb3
        -
        -
        \ No newline at end of file diff --git a/spaces/bioriAsaeru/text-to-voice/FNet Utilities Fat32 Format Tool V1.84.21l HOT.md b/spaces/bioriAsaeru/text-to-voice/FNet Utilities Fat32 Format Tool V1.84.21l HOT.md deleted file mode 100644 index 67e7a760b5c6e1b937307a9d24565a42ea9184e1..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/FNet Utilities Fat32 Format Tool V1.84.21l HOT.md +++ /dev/null @@ -1,6 +0,0 @@ -

        FNet Utilities Fat32 Format Tool V1.84.21l


        DOWNLOADhttps://urloso.com/2uyPrh



        - - aaccfb2cb3
        -
        -
        -

        diff --git a/spaces/bioriAsaeru/text-to-voice/Hotel Chevalier Br Rip 1080p Movie Torrents.md b/spaces/bioriAsaeru/text-to-voice/Hotel Chevalier Br Rip 1080p Movie Torrents.md deleted file mode 100644 index 7466b863ceda3b9a83a061e0823b913f012a13de..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Hotel Chevalier Br Rip 1080p Movie Torrents.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Hotel Chevalier Br Rip 1080p Movie Torrents


        Download Ziphttps://urloso.com/2uyOxP



        -
        - 3cee63e6c2
        -
        -
        -

        diff --git a/spaces/bioriAsaeru/text-to-voice/How to Choose the Optimal Siemens Motox Geared Motor for Your Needs.md b/spaces/bioriAsaeru/text-to-voice/How to Choose the Optimal Siemens Motox Geared Motor for Your Needs.md deleted file mode 100644 index aa3e40cea2e0138106456a0cba32c8cbc172afca..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/How to Choose the Optimal Siemens Motox Geared Motor for Your Needs.md +++ /dev/null @@ -1,23 +0,0 @@ -
        -

        In addition to the worm geared motors, we also offer project planning of the entire drive (required torque, power, speed, etc..). Due to our long-standing contact to all relevant geared motor manufacturers we can convince with low prices. Let us send you a non-binding offer now! Of course we also sell drives such as standard motors and special motors. Contact ATG Engineering! Bildquelle: © Siemens AG 2017, Alle Rechte vorbehaltenSIMOGEAR worm geared motorsThe SIMOGEAR worm geared motors from Siemens are characterized by a wide transmission ratio range in only 1-2 gear stages. Available rated gear torque: From 33 to 1450 Nm Max. Motor power: 0.55 to 7.5 kW

        -

        Siemens Motox Geared Motor


        Download Zip ……… https://urloso.com/2uyRE4



        -

        Our extensive range of Siemens geared motors ensures that you will find the optimal product for your needs. With the new SIMOGEAR you will benefit especially from its ability to deliver the highest level of flexibility due to our wide range of gear units, total adaptability and compact design. We also supply servo-geared motors for Motion Control applications.

        -

        The new SIMOGEAR Siemens geared motors deliver performance from 0.09 kW up to 55 kW. It can achieve a gear unit torque up to 8.000 Nm with helical, parallel shaft, helical bevel and worm geared gear units, additional types and sizes will follow. Due to accordance to the current measures, SIMOGEAR is compatible to many other suppliers of gear motors.

        -

        With MOTOX, Siemens provides the complete range of geared motors. Our portfolio comprises all commonly used types of gear units and are suitable for many drive applications. Together, the MOTOX family offers a whole world of geared motor options that definitely has the answer to your specific requirements.

        -

        The SIMOTICS S-1FG1 servo geared motors are compact geared motors. Compared to standard Siemens geared motors with induction machines, they have smaller dimensions, weigh less and have a higher dynamic response.

        -

        -

        The range of types covers helical, parallel shaft, bevel and helical worm geared motors in the usual frame sizes and speed/torque classes. The SIMOTICS S-1FG1 servo geared motors use the same operating heads as the SIMOGEAR geared motors.

        -

        Flexible servo drive system for demanding applications. Siemens has expanded its drive portfolio for servo applications to include the SIMOTICS S-1FG1 servo geared motor that is optimally harmonized with the SINAMICS S120 inverter system.

        -

        Siemens is integrating its geared motors and motion control divisions more closely in the UK, and has promoted Julie Ferguson to be the new manager of its geared motors business with the aim of doubling the business within three years.

        -

        The Siemens Drive Technologies Division is supplementing its Motox geared motors with a new range of worm gears; they are particularly suitable for conveyor systems (Fig.). The single-stage worm geared motor of the S range is available in the three frame sizes S08, S18 and S28, in a torque range from 18 to 80 Nm and in a power range from 0.12 to 0.75 kW (4-pole). Installation can take the shaft-mounted, foot-mounted or flange-mounted form. The input shaft is available either solid or hollow. The use of high-grade materials allows high rated gear torques up to 80 Nm and high transmission ratios of up to 100 in a single stage. The worm shafts are case-hardened and ground, which makes running quieter. A strong radial force is achieved by high-grade roller bearings and greater distances between bearings. The tooth root security of the Cavex concave-profile teeth allows high load peaks. The gears are maintenance-free due to lubrication-for-life with synthetic oil, as well as high-grade bearings and seals.

        -

        Helical geared motors are the conventional solution for your drive application. Helical gear units are coaxial units where the Siemens gear unit output shaft is in-line with the Siemens motor shaft. A solid shaft is always used as output shaft.

        -

        Parallel shaft Siemens gear motors are the modern version of coaxial geared motors. As a result of their compact and short design, they take up less space than helical geared motors. Parallel shaft geared motors can either have a solid shaft - or alternatively a hollow shaft as so-called plug-on gear unit.

        -

        Helical worm geared motors are the favorably-priced solution
        for drives with angular geared motors. The efficiency is
        significantly better than that of pure worm geared motors
        due to the implementation as helical-worm geared motors.

        -

        Helical bevel Siemens gear motors are angular geared motors where the Siemens gear unit output shaft is rotated through 90° to the motor shaft. Helical bevel geared units can either have a solid or a hollow shaft.

        -

        The SIMOGEAR geared motor delivers performance from 0.09 kW up to 55 kW. It can achieve a gear unit torque up to 19 500 Nm with the helical, parallel shaft, bevel, helical worm, and worm geared motors. Due to the current measures, SIMOGEAR is compatible with many other geared motors suppliers.

        -

        The extensive range of geared motors ensures that you will find the optimal product for your needs. With the new SIMOGEAR, you will benefit especially from its ability to deliver the highest level of flexibility due to the wide range of gear units, total adaptability, and compact design.

        -

        The Simogear family includes geared motors in various designs such as helical, parallel shaft, bevel helical and worm gear units with outputs ranging from 0.09 kW to 200 kW and with output torques up to 50,000 Nm. By offering additional frame sizes, Siemens can now supply a complete portfolio of geared motor designs. With their high gear ratios in the two- and three-stage ranges, finer torque grading, high power density and outstanding efficiency, the new models are especially well-equipped to meet the requirements of modern conveyor systems. By virtue of its connection dimensions in conformity with market standards, the new motor range is completely compatible with existing installations.

        -

        The extensive range of servo geared motors includes a series designed especially for motion control applications. Among their distinguishing features are their excellent balance quality and compact design. Siemens proven 1FK7 and 1FT7 synchronous servomotors are also available as factory fitted motors with planetary gear units. The 1FK7 motors are additionally available with offset shaft and angle geared units in various specifications. For main motors they optionally offer switchgear units.

        -

        Siemens Mechanical Drives in South Africa has many examples of where it has supplied its units and couplings. On the geared motors side it can easily supply up to 1 000 units and more a month. This does not include the comprehensive service department that refurbishes both the motors and geared units. The Division employs 108 staff and 95 of these are located in the assembly, manufacturing and services departments.

        aaccfb2cb3
        -
        -
        \ No newline at end of file diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/docs/tutorials/README.md b/spaces/brjathu/HMR2.0/vendor/detectron2/docs/tutorials/README.md deleted file mode 100644 index 1ca9c94d042ef838143a45490fe6b4556c19f3c9..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/docs/tutorials/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# Read the docs: - -The latest documentation built from this directory is available at [detectron2.readthedocs.io](https://detectron2.readthedocs.io/). -Documents in this directory are not meant to be read on github. diff --git a/spaces/camenduru-com/terminal/README.md b/spaces/camenduru-com/terminal/README.md deleted file mode 100644 index 1f846beaf2540732e3dd1123ad615e04dd853590..0000000000000000000000000000000000000000 --- a/spaces/camenduru-com/terminal/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Terminal -emoji: 💻 -colorFrom: pink -colorTo: pink -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/utils/collect_env.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/utils/collect_env.py deleted file mode 100644 index 807b6c7e6245d0a21221b1b8d29b841ec8251761..0000000000000000000000000000000000000000 --- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/utils/collect_env.py +++ /dev/null @@ -1,242 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import importlib -import numpy as np -import os -import re -import subprocess -import sys -from collections import defaultdict -import PIL -import torch -import torchvision -from tabulate import tabulate - -__all__ = ["collect_env_info"] - - -def collect_torch_env(): - try: - import torch.__config__ - - return torch.__config__.show() - except ImportError: - # compatible with older versions of pytorch - from torch.utils.collect_env import get_pretty_env_info - - return get_pretty_env_info() - - -def get_env_module(): - var_name = "DETECTRON2_ENV_MODULE" - return var_name, os.environ.get(var_name, "") - - -def detect_compute_compatibility(CUDA_HOME, so_file): - try: - cuobjdump = os.path.join(CUDA_HOME, "bin", "cuobjdump") - if os.path.isfile(cuobjdump): - output = subprocess.check_output( - "'{}' --list-elf '{}'".format(cuobjdump, so_file), shell=True - ) - output = output.decode("utf-8").strip().split("\n") - arch = [] - for line in output: - line = re.findall(r"\.sm_([0-9]*)\.", line)[0] - arch.append(".".join(line)) - arch = sorted(set(arch)) - return ", ".join(arch) - else: - return so_file + "; cannot find cuobjdump" - except Exception: - # unhandled failure - return so_file - - -def collect_env_info(): - has_gpu = torch.cuda.is_available() # true for both CUDA & ROCM - torch_version = torch.__version__ - - # NOTE that CUDA_HOME/ROCM_HOME could be None even when CUDA runtime libs are functional - from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME - - has_rocm = False - if (getattr(torch.version, "hip", None) is not None) and (ROCM_HOME is not None): - has_rocm = True - has_cuda = has_gpu and (not has_rocm) - - data = [] - data.append(("sys.platform", sys.platform)) # check-template.yml depends on it - data.append(("Python", sys.version.replace("\n", ""))) - data.append(("numpy", np.__version__)) - - try: - import detectron2 # noqa - - data.append( - ("detectron2", detectron2.__version__ + " @" + os.path.dirname(detectron2.__file__)) - ) - except ImportError: - data.append(("detectron2", "failed to import")) - except AttributeError: - data.append(("detectron2", "imported a wrong installation")) - - try: - import detectron2._C as _C - except ImportError as e: - data.append(("detectron2._C", f"not built correctly: {e}")) - - # print system compilers when extension fails to build - if sys.platform != "win32": # don't know what to do for windows - try: - # this is how torch/utils/cpp_extensions.py choose compiler - cxx = os.environ.get("CXX", "c++") - cxx = subprocess.check_output("'{}' --version".format(cxx), shell=True) - cxx = cxx.decode("utf-8").strip().split("\n")[0] - except subprocess.SubprocessError: - cxx = "Not found" - data.append(("Compiler ($CXX)", cxx)) - - if has_cuda and CUDA_HOME is not None: - try: - nvcc = os.path.join(CUDA_HOME, "bin", "nvcc") - nvcc = subprocess.check_output("'{}' -V".format(nvcc), shell=True) - nvcc = nvcc.decode("utf-8").strip().split("\n")[-1] - except subprocess.SubprocessError: - nvcc = "Not found" - data.append(("CUDA compiler", nvcc)) - if has_cuda and sys.platform != "win32": - try: - so_file = importlib.util.find_spec("detectron2._C").origin - except (ImportError, AttributeError): - pass - else: - data.append( - ("detectron2 arch flags", detect_compute_compatibility(CUDA_HOME, so_file)) - ) - else: - # print compilers that are used to build extension - data.append(("Compiler", _C.get_compiler_version())) - data.append(("CUDA compiler", _C.get_cuda_version())) # cuda or hip - if has_cuda and getattr(_C, "has_cuda", lambda: True)(): - data.append( - ("detectron2 arch flags", detect_compute_compatibility(CUDA_HOME, _C.__file__)) - ) - - data.append(get_env_module()) - data.append(("PyTorch", torch_version + " @" + os.path.dirname(torch.__file__))) - data.append(("PyTorch debug build", torch.version.debug)) - - if not has_gpu: - has_gpu_text = "No: torch.cuda.is_available() == False" - else: - has_gpu_text = "Yes" - data.append(("GPU available", has_gpu_text)) - if has_gpu: - devices = defaultdict(list) - for k in range(torch.cuda.device_count()): - cap = ".".join((str(x) for x in torch.cuda.get_device_capability(k))) - name = torch.cuda.get_device_name(k) + f" (arch={cap})" - devices[name].append(str(k)) - for name, devids in devices.items(): - data.append(("GPU " + ",".join(devids), name)) - - if has_rocm: - msg = " - invalid!" if not (ROCM_HOME and os.path.isdir(ROCM_HOME)) else "" - data.append(("ROCM_HOME", str(ROCM_HOME) + msg)) - else: - try: - from torch.utils.collect_env import get_nvidia_driver_version, run as _run - - data.append(("Driver version", get_nvidia_driver_version(_run))) - except Exception: - pass - msg = " - invalid!" if not (CUDA_HOME and os.path.isdir(CUDA_HOME)) else "" - data.append(("CUDA_HOME", str(CUDA_HOME) + msg)) - - cuda_arch_list = os.environ.get("TORCH_CUDA_ARCH_LIST", None) - if cuda_arch_list: - data.append(("TORCH_CUDA_ARCH_LIST", cuda_arch_list)) - data.append(("Pillow", PIL.__version__)) - - try: - data.append( - ( - "torchvision", - str(torchvision.__version__) + " @" + os.path.dirname(torchvision.__file__), - ) - ) - if has_cuda: - try: - torchvision_C = importlib.util.find_spec("torchvision._C").origin - msg = detect_compute_compatibility(CUDA_HOME, torchvision_C) - data.append(("torchvision arch flags", msg)) - except (ImportError, AttributeError): - data.append(("torchvision._C", "Not found")) - except AttributeError: - data.append(("torchvision", "unknown")) - - try: - import fvcore - - data.append(("fvcore", fvcore.__version__)) - except (ImportError, AttributeError): - pass - - try: - import iopath - - data.append(("iopath", iopath.__version__)) - except (ImportError, AttributeError): - pass - - try: - import cv2 - - data.append(("cv2", cv2.__version__)) - except (ImportError, AttributeError): - data.append(("cv2", "Not found")) - env_str = tabulate(data) + "\n" - env_str += collect_torch_env() - return env_str - - -def test_nccl_ops(): - num_gpu = torch.cuda.device_count() - if os.access("/tmp", os.W_OK): - import torch.multiprocessing as mp - - dist_url = "file:///tmp/nccl_tmp_file" - print("Testing NCCL connectivity ... this should not hang.") - mp.spawn(_test_nccl_worker, nprocs=num_gpu, args=(num_gpu, dist_url), daemon=False) - print("NCCL succeeded.") - - -def _test_nccl_worker(rank, num_gpu, dist_url): - import torch.distributed as dist - - dist.init_process_group(backend="NCCL", init_method=dist_url, rank=rank, world_size=num_gpu) - dist.barrier(device_ids=[rank]) - - -if __name__ == "__main__": - try: - from detectron2.utils.collect_env import collect_env_info as f - - print(f()) - except ImportError: - print(collect_env_info()) - - if torch.cuda.is_available(): - num_gpu = torch.cuda.device_count() - for k in range(num_gpu): - device = f"cuda:{k}" - try: - x = torch.tensor([1, 2.0], dtype=torch.float32) - x = x.to(device) - except Exception as e: - print( - f"Unable to copy tensor to device={device}: {e}. " - "Your CUDA environment is broken." - ) - if num_gpu > 1: - test_nccl_ops() diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/tools/train_net.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/tools/train_net.py deleted file mode 100644 index 8a6f29715da49f524604acc7bd38bda1bab99fd5..0000000000000000000000000000000000000000 --- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/tools/train_net.py +++ /dev/null @@ -1,163 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) Facebook, Inc. and its affiliates. -""" -A main training script. - -This scripts reads a given config file and runs the training or evaluation. -It is an entry point that is made to train standard models in detectron2. - -In order to let one script support training of many models, -this script contains logic that are specific to these built-in models and therefore -may not be suitable for your own project. -For example, your research project perhaps only needs a single "evaluator". - -Therefore, we recommend you to use detectron2 as an library and take -this file as an example of how to use the library. -You may want to write your own script with your datasets and other customizations. -""" - -import logging -import os -from collections import OrderedDict - -import detectron2.utils.comm as comm -from detectron2.checkpoint import DetectionCheckpointer -from detectron2.config import get_cfg -from detectron2.data import MetadataCatalog -from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, hooks, launch -from detectron2.evaluation import ( - CityscapesInstanceEvaluator, - CityscapesSemSegEvaluator, - COCOEvaluator, - COCOPanopticEvaluator, - DatasetEvaluators, - LVISEvaluator, - PascalVOCDetectionEvaluator, - SemSegEvaluator, - verify_results, -) -from detectron2.modeling import GeneralizedRCNNWithTTA - - -def build_evaluator(cfg, dataset_name, output_folder=None): - """ - Create evaluator(s) for a given dataset. - This uses the special metadata "evaluator_type" associated with each builtin dataset. - For your own dataset, you can simply create an evaluator manually in your - script and do not have to worry about the hacky if-else logic here. - """ - if output_folder is None: - output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") - evaluator_list = [] - evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type - if evaluator_type in ["sem_seg", "coco_panoptic_seg"]: - evaluator_list.append( - SemSegEvaluator( - dataset_name, - distributed=True, - output_dir=output_folder, - ) - ) - if evaluator_type in ["coco", "coco_panoptic_seg"]: - evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) - if evaluator_type == "coco_panoptic_seg": - evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) - if evaluator_type == "cityscapes_instance": - return CityscapesInstanceEvaluator(dataset_name) - if evaluator_type == "cityscapes_sem_seg": - return CityscapesSemSegEvaluator(dataset_name) - elif evaluator_type == "pascal_voc": - return PascalVOCDetectionEvaluator(dataset_name) - elif evaluator_type == "lvis": - return LVISEvaluator(dataset_name, output_dir=output_folder) - if len(evaluator_list) == 0: - raise NotImplementedError( - "no Evaluator for the dataset {} with the type {}".format(dataset_name, evaluator_type) - ) - elif len(evaluator_list) == 1: - return evaluator_list[0] - return DatasetEvaluators(evaluator_list) - - -class Trainer(DefaultTrainer): - """ - We use the "DefaultTrainer" which contains pre-defined default logic for - standard training workflow. They may not work for you, especially if you - are working on a new research project. In that case you can write your - own training loop. You can use "tools/plain_train_net.py" as an example. - """ - - @classmethod - def build_evaluator(cls, cfg, dataset_name, output_folder=None): - return build_evaluator(cfg, dataset_name, output_folder) - - @classmethod - def test_with_TTA(cls, cfg, model): - logger = logging.getLogger("detectron2.trainer") - # In the end of training, run an evaluation with TTA - # Only support some R-CNN models. - logger.info("Running inference with test-time augmentation ...") - model = GeneralizedRCNNWithTTA(cfg, model) - evaluators = [ - cls.build_evaluator( - cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA") - ) - for name in cfg.DATASETS.TEST - ] - res = cls.test(cfg, model, evaluators) - res = OrderedDict({k + "_TTA": v for k, v in res.items()}) - return res - - -def setup(args): - """ - Create configs and perform basic setups. - """ - cfg = get_cfg() - cfg.merge_from_file(args.config_file) - cfg.merge_from_list(args.opts) - cfg.freeze() - default_setup(cfg, args) - return cfg - - -def main(args): - cfg = setup(args) - - if args.eval_only: - model = Trainer.build_model(cfg) - DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( - cfg.MODEL.WEIGHTS, resume=args.resume - ) - res = Trainer.test(cfg, model) - if cfg.TEST.AUG.ENABLED: - res.update(Trainer.test_with_TTA(cfg, model)) - if comm.is_main_process(): - verify_results(cfg, res) - return res - - """ - If you'd like to do anything fancier than the standard training logic, - consider writing your own training loop (see plain_train_net.py) or - subclassing the trainer. - """ - trainer = Trainer(cfg) - trainer.resume_or_load(resume=args.resume) - if cfg.TEST.AUG.ENABLED: - trainer.register_hooks( - [hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model))] - ) - return trainer.train() - - -if __name__ == "__main__": - args = default_argument_parser().parse_args() - print("Command Line Args:", args) - launch( - main, - args.num_gpus, - num_machines=args.num_machines, - machine_rank=args.machine_rank, - dist_url=args.dist_url, - args=(args,), - ) diff --git a/spaces/changkeyculing/chatgpt-detector-single/app.py b/spaces/changkeyculing/chatgpt-detector-single/app.py deleted file mode 100644 index e46b34655f2d46f78c6c602c47fd748a2c0e5a1a..0000000000000000000000000000000000000000 --- a/spaces/changkeyculing/chatgpt-detector-single/app.py +++ /dev/null @@ -1,60 +0,0 @@ -import os -import gradio as gr -from transformers import pipeline - -auth_token = "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" -pipeline_en = pipeline(task="text-classification", model="changkeyculing/en-gpt-detect",use_auth_token=auth_token) -pipeline_zh = pipeline(task="text-classification", model="changkeyculing/zh-gpt-detect",use_auth_token=auth_token) - - - -def predict_en(text): - res = pipeline_en(text)[0] - return res['label'],res['score'] - -def predict_zh(text): - res = pipeline_zh(text)[0] - return res['label'],res['score'] - - - - -with gr.Blocks() as demo: - gr.Markdown(""" - ## ChatGPT Detector 🔬 (Sinlge-text version) - Visit our project on Github: [chatgpt-comparison-detection project](https://github.com/Hello-SimpleAI/chatgpt-comparison-detection)
        - 欢迎在 Github 上关注我们的 [ChatGPT 对比与检测项目](https://github.com/Hello-SimpleAI/chatgpt-comparison-detection) - - We provide three kinds of detectors, all in Bilingual / 我们提供了三个版本的检测器,且都支持中英文: - - [**QA version / 问答版**](https://huggingface.co/spaces/Hello-SimpleAI/chatgpt-detector-qa)
        - detect whether an **answer** is generated by ChatGPT for certain **question**, using PLM-based classifiers / 判断某个**问题的回答**是否由ChatGPT生成,使用基于PTM的分类器来开发; - - [Sinlge-text version / 独立文本版 (👈 Current / 当前使用)](https://huggingface.co/spaces/Hello-SimpleAI/chatgpt-detector-single)
        - detect whether a piece of text is ChatGPT generated, using PLM-based classifiers / 判断**单条文本**是否由ChatGPT生成,使用基于PTM的分类器来开发; - - [Linguistic version / 语言学版](https://huggingface.co/spaces/Hello-SimpleAI/chatgpt-detector-ling)
        - detect whether a piece of text is ChatGPT generated, using linguistic features / 判断**单条文本**是否由ChatGPT生成,使用基于语言学特征的模型来开发; - - - """) - with gr.Tab("English"): - gr.Markdown(""" - Note: Providing more text to the `Text` box can make the prediction more accurate! - """) - t1 = gr.Textbox(lines=5, label='Text',value="There are a few things that can help protect your credit card information from being misused when you give it to a restaurant or any other business:\n\nEncryption: Many businesses use encryption to protect your credit card information when it is being transmitted or stored. This means that the information is transformed into a code that is difficult for anyone to read without the right key.") - button1 = gr.Button("🤖 Predict!") - label1 = gr.Textbox(lines=1, label='Predicted Label 🎃') - score1 = gr.Textbox(lines=1, label='Prob') - with gr.Tab("中文版"): - gr.Markdown(""" - 注意: 在`文本`栏中输入更多的文本,可以让预测更准确哦! - """) - t2 = gr.Textbox(lines=5, label='文本',value="对于OpenAI大力出奇迹的工作,自然每个人都有自己的看点。我自己最欣赏的地方是ChatGPT如何解决 “AI校正(Alignment)“这个问题。这个问题也是我们课题组这两年在探索的学术问题之一。") - button2 = gr.Button("🤖 预测!") - label2 = gr.Textbox(lines=1, label='预测结果 🎃') - score2 = gr.Textbox(lines=1, label='模型概率') - - button1.click(predict_en, inputs=[t1], outputs=[label1,score1], api_name='predict_en') - button2.click(predict_zh, inputs=[t2], outputs=[label2,score2], api_name='predict_zh') - - - -demo.launch() \ No newline at end of file diff --git a/spaces/chansung/LLaMA-7B/gen.py b/spaces/chansung/LLaMA-7B/gen.py deleted file mode 100644 index 534f57f7ce5ed64b5155cb29b164fdbc3d1b7beb..0000000000000000000000000000000000000000 --- a/spaces/chansung/LLaMA-7B/gen.py +++ /dev/null @@ -1,97 +0,0 @@ -from typing import Tuple - -import os -import time -import json -from pathlib import Path - -import torch -from fairscale.nn.model_parallel.initialize import initialize_model_parallel -from llama.generation import LLaMA -from llama.model import ModelArgs, Transformer -from llama.tokenizer import Tokenizer - -from google.cloud import storage - -bucket_name = os.environ.get("GCS_BUCKET") - -llama_weight_path = "weights/llama" -tokenizer_weight_path = "weights/tokenizer" - -def setup_model_parallel() -> Tuple[int, int]: - local_rank = int(os.environ.get("LOCAL_RANK", -1)) - world_size = int(os.environ.get("WORLD_SIZE", -1)) - - torch.distributed.init_process_group("nccl") - initialize_model_parallel(world_size) - torch.cuda.set_device(local_rank) - - # seed must be the same in all processes - torch.manual_seed(1) - return local_rank, world_size - -def download_pretrained_models( - ckpt_path: str, - tokenizer_path: str -): - os.makedirs(llama_weight_path) - os.makedirs(tokenizer_weight_path) - - storage_client = storage.Client.create_anonymous_client() - bucket = storage_client.bucket(bucket_name) - - blobs = bucket.list_blobs(prefix=f"{ckpt_path}/") - for blob in blobs: - filename = blob.name.split("/")[1] - blob.download_to_filename(f"{llama_weight_path}/{filename}") - - blobs = bucket.list_blobs(prefix=f"{tokenizer_path}/") - for blob in blobs: - filename = blob.name.split("/")[1] - blob.download_to_filename(f"{tokenizer_weight_path}/{filename}") - -def get_pretrained_models( - ckpt_path: str, - tokenizer_path: str, - local_rank: int, - world_size: int) -> LLaMA: - - download_pretrained_models(ckpt_path, tokenizer_path) - - start_time = time.time() - checkpoints = sorted(Path(llama_weight_path).glob("*.pth")) - - llama_ckpt_path = checkpoints[local_rank] - print("Loading") - checkpoint = torch.load(llama_ckpt_path, map_location="cpu") - with open(Path(llama_weight_path) / "params.json", "r") as f: - params = json.loads(f.read()) - - model_args: ModelArgs = ModelArgs(max_seq_len=512, max_batch_size=1, **params) - tokenizer = Tokenizer(model_path=f"{tokenizer_weight_path}/tokenizer.model") - model_args.vocab_size = tokenizer.n_words - torch.set_default_tensor_type(torch.cuda.HalfTensor) - model = Transformer(model_args).cuda().half() - torch.set_default_tensor_type(torch.FloatTensor) - model.load_state_dict(checkpoint, strict=False) - - generator = LLaMA(model, tokenizer) - print(f"Loaded in {time.time() - start_time:.2f} seconds") - return generator - -def get_output( - generator: LLaMA, - prompt: str, - max_gen_len: int = 256, - temperature: float = 0.8, - top_p: float = 0.95): - - prompts = [prompt] - results = generator.generate( - prompts, - max_gen_len=max_gen_len, - temperature=temperature, - top_p=top_p - ) - - return results \ No newline at end of file diff --git a/spaces/chendl/compositional_test/multimodal/YOLOX/exps/default/yolox_tiny.py b/spaces/chendl/compositional_test/multimodal/YOLOX/exps/default/yolox_tiny.py deleted file mode 100644 index 5220de2f2e6760d5c9a966d5dd397aad721fc60a..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/multimodal/YOLOX/exps/default/yolox_tiny.py +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- -# Copyright (c) Megvii, Inc. and its affiliates. - -import os - -from yolox.exp import Exp as MyExp - - -class Exp(MyExp): - def __init__(self): - super(Exp, self).__init__() - self.depth = 0.33 - self.width = 0.375 - self.input_size = (416, 416) - self.mosaic_scale = (0.5, 1.5) - self.random_size = (10, 20) - self.test_size = (416, 416) - self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0] - self.enable_mixup = False diff --git a/spaces/chendl/compositional_test/transformers/examples/research_projects/rag/distributed_ray_retriever.py b/spaces/chendl/compositional_test/transformers/examples/research_projects/rag/distributed_ray_retriever.py deleted file mode 100644 index dd5baaf726116f8569228af74c221c67b477d1cb..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/research_projects/rag/distributed_ray_retriever.py +++ /dev/null @@ -1,152 +0,0 @@ -import logging -import random - -import ray - -from transformers import RagConfig, RagRetriever, RagTokenizer -from transformers.models.rag.retrieval_rag import CustomHFIndex - - -logger = logging.getLogger(__name__) - - -class RayRetriever: - def __init__(self): - self.initialized = False - - def create_rag_retriever(self, config, question_encoder_tokenizer, generator_tokenizer, index): - if not self.initialized: - self.retriever = RagRetriever( - config, - question_encoder_tokenizer=question_encoder_tokenizer, - generator_tokenizer=generator_tokenizer, - index=index, - init_retrieval=False, - ) - self.initialized = True - - def init_retrieval(self): - self.retriever.index.init_index() - - def retrieve(self, question_hidden_states, n_docs): - doc_ids, retrieved_doc_embeds = self.retriever._main_retrieve(question_hidden_states, n_docs) - return doc_ids, retrieved_doc_embeds - - -class RagRayDistributedRetriever(RagRetriever): - """ - A distributed retriever built on top of the ``Ray`` API, a library - for building distributed applications (https://docs.ray.io/en/master/). - package. During training, all training workers initialize their own - instance of a `RagRayDistributedRetriever`, and each instance of - this distributed retriever shares a common set of Retrieval Ray - Actors (https://docs.ray.io/en/master/walkthrough.html#remote - -classes-actors) that load the index on separate processes. Ray - handles the communication between the `RagRayDistributedRetriever` - instances and the remote Ray actors. If training is done in a - non-distributed setup, the index will simply be loaded in the same - process as the training worker and Ray will not be used. - - Args: - config (:class:`~transformers.RagConfig`): - The configuration of the RAG model this Retriever is used with. Contains parameters indicating which ``Index`` to build. - question_encoder_tokenizer (:class:`~transformers.PreTrainedTokenizer`): - The tokenizer that was used to tokenize the question. - It is used to decode the question and then use the generator_tokenizer. - generator_tokenizer (:class:`~transformers.PreTrainedTokenizer`): - The tokenizer used for the generator part of the RagModel. - retrieval_workers (:obj:`List[ray.ActorClass(RayRetriever)]`): A list of already initialized `RayRetriever` actors. - These actor classes run on remote processes and are responsible for performing the index lookup. - index (:class:`~transformers.retrieval_rag.Index`, optional, defaults to the one defined by the configuration): - If specified, use this index instead of the one built using the configuration - """ - - def __init__(self, config, question_encoder_tokenizer, generator_tokenizer, retrieval_workers, index=None): - if index is not None and index.is_initialized() and len(retrieval_workers) > 0: - raise ValueError( - "When using Ray for distributed fine-tuning, " - "you'll need to provide the paths instead, " - "as the dataset and the index are loaded " - "separately. More info in examples/rag/use_own_knowledge_dataset.py " - ) - super().__init__( - config, - question_encoder_tokenizer=question_encoder_tokenizer, - generator_tokenizer=generator_tokenizer, - index=index, - init_retrieval=False, - ) - self.retrieval_workers = retrieval_workers - if len(self.retrieval_workers) > 0: - ray.get( - [ - worker.create_rag_retriever.remote(config, question_encoder_tokenizer, generator_tokenizer, index) - for worker in self.retrieval_workers - ] - ) - - def init_retrieval(self): - """ - Retriever initialization function, needs to be called from the - training process. This function triggers retrieval initialization - for all retrieval actors if using distributed setting, or loads - index into current process if training is not distributed. - """ - logger.info("initializing retrieval") - - if len(self.retrieval_workers) > 0: - ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers]) - else: - # Non-distributed training. Load index into this same process. - self.index.init_index() - - def retrieve(self, question_hidden_states, n_docs): - """ - Retrieves documents for specified ``question_hidden_states``. If - running training with multiple workers, a random retrieval actor is - selected to perform the index lookup and return the result. - - Args: - question_hidden_states (:obj:`np.ndarray` of shape :obj:`(batch_size, vector_size)`): - A batch of query vectors to retrieve with. - n_docs (:obj:`int`): - The number of docs retrieved per query. - - Output: - retrieved_doc_embeds (:obj:`np.ndarray` of shape :obj:`(batch_size, n_docs, dim)` - The retrieval embeddings of the retrieved docs per query. - doc_ids (:obj:`np.ndarray` of shape :obj:`batch_size, n_docs`) - The ids of the documents in the index - doc_dicts (:obj:`List[dict]`): - The retrieved_doc_embeds examples per query. - """ - if len(self.retrieval_workers) > 0: - # Select a random retrieval actor. - random_worker = self.retrieval_workers[random.randint(0, len(self.retrieval_workers) - 1)] - doc_ids, retrieved_doc_embeds = ray.get(random_worker.retrieve.remote(question_hidden_states, n_docs)) - else: - doc_ids, retrieved_doc_embeds = self._main_retrieve(question_hidden_states, n_docs) - return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(doc_ids) - - @classmethod - def get_tokenizers(cls, retriever_name_or_path, indexed_dataset=None, **kwargs): - return super(RagRayDistributedRetriever, cls).get_tokenizers(retriever_name_or_path, indexed_dataset, **kwargs) - - @classmethod - def from_pretrained(cls, retriever_name_or_path, actor_handles, indexed_dataset=None, **kwargs): - config = kwargs.pop("config", None) or RagConfig.from_pretrained(retriever_name_or_path, **kwargs) - rag_tokenizer = RagTokenizer.from_pretrained(retriever_name_or_path, config=config) - question_encoder_tokenizer = rag_tokenizer.question_encoder - generator_tokenizer = rag_tokenizer.generator - if indexed_dataset is not None: - config.index_name = "custom" - index = CustomHFIndex(config.retrieval_vector_size, indexed_dataset) - else: - index = cls._build_index(config) - return cls( - config, - question_encoder_tokenizer=question_encoder_tokenizer, - generator_tokenizer=generator_tokenizer, - retrieval_workers=actor_handles, - index=index, - ) diff --git a/spaces/chompionsawelo/whisper_transcribe/ui/lang_setting.py b/spaces/chompionsawelo/whisper_transcribe/ui/lang_setting.py deleted file mode 100644 index 25e3005a26c2d93bf56c7755f97bdf923836ec58..0000000000000000000000000000000000000000 --- a/spaces/chompionsawelo/whisper_transcribe/ui/lang_setting.py +++ /dev/null @@ -1,85 +0,0 @@ -from ui.ui_component import * - - -def change_lang(input): - # Change language function - global current_ui_lang - current_ui_lang = get_ui_dict(input) - print(f"Change language to {available_ui_lang[input]}") - return [ - # Top - top_markdown.update( - current_ui_lang["top_markdown"]), - input_url.update( - label=current_ui_lang["input_url_label"], info=current_ui_lang["input_url_info"]), - url_download_button.update( - current_ui_lang["download_button_value"]), - input_video.update( - label=current_ui_lang["input_video_label"]), - start_time.update( - label=current_ui_lang["start_time_label"]), - end_time.update( - label=current_ui_lang["end_time_label"]), - lang_radio.update( - choices=current_ui_lang["lang_radio_choices"], value=None, label=current_ui_lang["lang_radio_label"], info=current_ui_lang["lang_radio_info"],), - model_dropdown.update( - choices=current_ui_lang["model_dropdown_choices"], value=None, label=current_ui_lang["model_dropdown_label"], info=current_ui_lang["model_dropdown_info"]), - start_button.update( - current_ui_lang["start_button_value"]), - - # Middle - middle_markdown.update( - current_ui_lang["middle_markdown"]), - adjust_speaker.update( - label=current_ui_lang["adjust_speaker_value"]), - prev_button.update( - current_ui_lang["prev_button_value"]), - next_button.update( - current_ui_lang["next_button_value"]), - adjust_button.update( - current_ui_lang["adjust_button_value"]), - - # Bottom - bottom_markdown.update( - current_ui_lang["bottom_markdown"]), - output_video.update( - label=current_ui_lang["output_video_label"]), - download_video_subtitle_button.update( - current_ui_lang["download_video_button_value"]), - output_transcribe.update( - label=current_ui_lang["output_transcribe_label"]), - - # Summary - summary_markdown.update( - current_ui_lang["summary_markdown"]), - summary_button.update( - current_ui_lang["summary_button_value"]), - output_summary.update( - label=current_ui_lang["output_summary_label"]), - ] - - -# comp_to_update and change_lang return must always be equal -comp_to_update = [ - top_markdown, - input_url, - url_download_button, - input_video, - start_time, - end_time, - lang_radio, - model_dropdown, - start_button, - middle_markdown, - adjust_speaker, - prev_button, - next_button, - adjust_button, - bottom_markdown, - output_video, - download_video_subtitle_button, - output_transcribe, - summary_markdown, - summary_button, - output_summary, -] diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/altair/_magics.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/altair/_magics.py deleted file mode 100644 index 7fe6131182952ff30bf63543de528657f7ba77a2..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/altair/_magics.py +++ /dev/null @@ -1,109 +0,0 @@ -""" -Magic functions for rendering vega-lite specifications -""" -__all__ = ["vegalite"] - -import json -import warnings - -import IPython -from IPython.core import magic_arguments -import pandas as pd -from toolz import curried - -from altair.vegalite import v5 as vegalite_v5 - -try: - import yaml - - YAML_AVAILABLE = True -except ImportError: - YAML_AVAILABLE = False - - -RENDERERS = { - "vega-lite": { - "5": vegalite_v5.VegaLite, - }, -} - - -TRANSFORMERS = { - "vega-lite": { - "5": vegalite_v5.data_transformers, - }, -} - - -def _prepare_data(data, data_transformers): - """Convert input data to data for use within schema""" - if data is None or isinstance(data, dict): - return data - elif isinstance(data, pd.DataFrame): - return curried.pipe(data, data_transformers.get()) - elif isinstance(data, str): - return {"url": data} - else: - warnings.warn("data of type {} not recognized".format(type(data)), stacklevel=1) - return data - - -def _get_variable(name): - """Get a variable from the notebook namespace.""" - ip = IPython.get_ipython() - if ip is None: - raise ValueError( - "Magic command must be run within an IPython " - "environemnt, in which get_ipython() is defined." - ) - if name not in ip.user_ns: - raise NameError( - "argument '{}' does not match the " - "name of any defined variable".format(name) - ) - return ip.user_ns[name] - - -@magic_arguments.magic_arguments() -@magic_arguments.argument( - "data", - nargs="?", - help="local variablename of a pandas DataFrame to be used as the dataset", -) -@magic_arguments.argument("-v", "--version", dest="version", default="v5") -@magic_arguments.argument("-j", "--json", dest="json", action="store_true") -def vegalite(line, cell): - """Cell magic for displaying vega-lite visualizations in CoLab. - - %%vegalite [dataframe] [--json] [--version='v5'] - - Visualize the contents of the cell using Vega-Lite, optionally - specifying a pandas DataFrame object to be used as the dataset. - - if --json is passed, then input is parsed as json rather than yaml. - """ - args = magic_arguments.parse_argstring(vegalite, line) - existing_versions = {"v5": "5"} - version = existing_versions[args.version] - assert version in RENDERERS["vega-lite"] - VegaLite = RENDERERS["vega-lite"][version] - data_transformers = TRANSFORMERS["vega-lite"][version] - - if args.json: - spec = json.loads(cell) - elif not YAML_AVAILABLE: - try: - spec = json.loads(cell) - except json.JSONDecodeError as err: - raise ValueError( - "%%vegalite: spec is not valid JSON. " - "Install pyyaml to parse spec as yaml" - ) from err - else: - spec = yaml.load(cell, Loader=yaml.SafeLoader) - - if args.data is not None: - data = _get_variable(args.data) - spec["data"] = _prepare_data(data, data_transformers) - - return VegaLite(spec) diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/docx/image/png.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/docx/image/png.py deleted file mode 100644 index 4e899fa5c448bfbe38d27a20b50315287901de97..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/docx/image/png.py +++ /dev/null @@ -1,303 +0,0 @@ -# encoding: utf-8 - -from __future__ import absolute_import, division, print_function - -from .constants import MIME_TYPE, PNG_CHUNK_TYPE -from .exceptions import InvalidImageStreamError -from .helpers import BIG_ENDIAN, StreamReader -from .image import BaseImageHeader - - -class Png(BaseImageHeader): - """ - Image header parser for PNG images - """ - @property - def content_type(self): - """ - MIME content type for this image, unconditionally `image/png` for - PNG images. - """ - return MIME_TYPE.PNG - - @property - def default_ext(self): - """ - Default filename extension, always 'png' for PNG images. - """ - return 'png' - - @classmethod - def from_stream(cls, stream): - """ - Return a |Png| instance having header properties parsed from image in - *stream*. - """ - parser = _PngParser.parse(stream) - - px_width = parser.px_width - px_height = parser.px_height - horz_dpi = parser.horz_dpi - vert_dpi = parser.vert_dpi - - return cls(px_width, px_height, horz_dpi, vert_dpi) - - -class _PngParser(object): - """ - Parses a PNG image stream to extract the image properties found in its - chunks. - """ - def __init__(self, chunks): - super(_PngParser, self).__init__() - self._chunks = chunks - - @classmethod - def parse(cls, stream): - """ - Return a |_PngParser| instance containing the header properties - parsed from the PNG image in *stream*. - """ - chunks = _Chunks.from_stream(stream) - return cls(chunks) - - @property - def px_width(self): - """ - The number of pixels in each row of the image. - """ - IHDR = self._chunks.IHDR - return IHDR.px_width - - @property - def px_height(self): - """ - The number of stacked rows of pixels in the image. - """ - IHDR = self._chunks.IHDR - return IHDR.px_height - - @property - def horz_dpi(self): - """ - Integer dots per inch for the width of this image. Defaults to 72 - when not present in the file, as is often the case. - """ - pHYs = self._chunks.pHYs - if pHYs is None: - return 72 - return self._dpi(pHYs.units_specifier, pHYs.horz_px_per_unit) - - @property - def vert_dpi(self): - """ - Integer dots per inch for the height of this image. Defaults to 72 - when not present in the file, as is often the case. - """ - pHYs = self._chunks.pHYs - if pHYs is None: - return 72 - return self._dpi(pHYs.units_specifier, pHYs.vert_px_per_unit) - - @staticmethod - def _dpi(units_specifier, px_per_unit): - """ - Return dots per inch value calculated from *units_specifier* and - *px_per_unit*. - """ - if units_specifier == 1 and px_per_unit: - return int(round(px_per_unit * 0.0254)) - return 72 - - -class _Chunks(object): - """ - Collection of the chunks parsed from a PNG image stream - """ - def __init__(self, chunk_iterable): - super(_Chunks, self).__init__() - self._chunks = list(chunk_iterable) - - @classmethod - def from_stream(cls, stream): - """ - Return a |_Chunks| instance containing the PNG chunks in *stream*. - """ - chunk_parser = _ChunkParser.from_stream(stream) - chunks = [chunk for chunk in chunk_parser.iter_chunks()] - return cls(chunks) - - @property - def IHDR(self): - """ - IHDR chunk in PNG image - """ - match = lambda chunk: chunk.type_name == PNG_CHUNK_TYPE.IHDR # noqa - IHDR = self._find_first(match) - if IHDR is None: - raise InvalidImageStreamError('no IHDR chunk in PNG image') - return IHDR - - @property - def pHYs(self): - """ - pHYs chunk in PNG image, or |None| if not present - """ - match = lambda chunk: chunk.type_name == PNG_CHUNK_TYPE.pHYs # noqa - return self._find_first(match) - - def _find_first(self, match): - """ - Return first chunk in stream order returning True for function - *match*. - """ - for chunk in self._chunks: - if match(chunk): - return chunk - return None - - -class _ChunkParser(object): - """ - Extracts chunks from a PNG image stream - """ - def __init__(self, stream_rdr): - super(_ChunkParser, self).__init__() - self._stream_rdr = stream_rdr - - @classmethod - def from_stream(cls, stream): - """ - Return a |_ChunkParser| instance that can extract the chunks from the - PNG image in *stream*. - """ - stream_rdr = StreamReader(stream, BIG_ENDIAN) - return cls(stream_rdr) - - def iter_chunks(self): - """ - Generate a |_Chunk| subclass instance for each chunk in this parser's - PNG stream, in the order encountered in the stream. - """ - for chunk_type, offset in self._iter_chunk_offsets(): - chunk = _ChunkFactory(chunk_type, self._stream_rdr, offset) - yield chunk - - def _iter_chunk_offsets(self): - """ - Generate a (chunk_type, chunk_offset) 2-tuple for each of the chunks - in the PNG image stream. Iteration stops after the IEND chunk is - returned. - """ - chunk_offset = 8 - while True: - chunk_data_len = self._stream_rdr.read_long(chunk_offset) - chunk_type = self._stream_rdr.read_str(4, chunk_offset, 4) - data_offset = chunk_offset + 8 - yield chunk_type, data_offset - if chunk_type == 'IEND': - break - # incr offset for chunk len long, chunk type, chunk data, and CRC - chunk_offset += (4 + 4 + chunk_data_len + 4) - - -def _ChunkFactory(chunk_type, stream_rdr, offset): - """ - Return a |_Chunk| subclass instance appropriate to *chunk_type* parsed - from *stream_rdr* at *offset*. - """ - chunk_cls_map = { - PNG_CHUNK_TYPE.IHDR: _IHDRChunk, - PNG_CHUNK_TYPE.pHYs: _pHYsChunk, - } - chunk_cls = chunk_cls_map.get(chunk_type, _Chunk) - return chunk_cls.from_offset(chunk_type, stream_rdr, offset) - - -class _Chunk(object): - """ - Base class for specific chunk types. Also serves as the default chunk - type. - """ - def __init__(self, chunk_type): - super(_Chunk, self).__init__() - self._chunk_type = chunk_type - - @classmethod - def from_offset(cls, chunk_type, stream_rdr, offset): - """ - Return a default _Chunk instance that only knows its chunk type. - """ - return cls(chunk_type) - - @property - def type_name(self): - """ - The chunk type name, e.g. 'IHDR', 'pHYs', etc. - """ - return self._chunk_type - - -class _IHDRChunk(_Chunk): - """ - IHDR chunk, contains the image dimensions - """ - def __init__(self, chunk_type, px_width, px_height): - super(_IHDRChunk, self).__init__(chunk_type) - self._px_width = px_width - self._px_height = px_height - - @classmethod - def from_offset(cls, chunk_type, stream_rdr, offset): - """ - Return an _IHDRChunk instance containing the image dimensions - extracted from the IHDR chunk in *stream* at *offset*. - """ - px_width = stream_rdr.read_long(offset) - px_height = stream_rdr.read_long(offset, 4) - return cls(chunk_type, px_width, px_height) - - @property - def px_width(self): - return self._px_width - - @property - def px_height(self): - return self._px_height - - -class _pHYsChunk(_Chunk): - """ - pYHs chunk, contains the image dpi information - """ - def __init__(self, chunk_type, horz_px_per_unit, vert_px_per_unit, - units_specifier): - super(_pHYsChunk, self).__init__(chunk_type) - self._horz_px_per_unit = horz_px_per_unit - self._vert_px_per_unit = vert_px_per_unit - self._units_specifier = units_specifier - - @classmethod - def from_offset(cls, chunk_type, stream_rdr, offset): - """ - Return a _pHYsChunk instance containing the image resolution - extracted from the pHYs chunk in *stream* at *offset*. - """ - horz_px_per_unit = stream_rdr.read_long(offset) - vert_px_per_unit = stream_rdr.read_long(offset, 4) - units_specifier = stream_rdr.read_byte(offset, 8) - return cls( - chunk_type, horz_px_per_unit, vert_px_per_unit, units_specifier - ) - - @property - def horz_px_per_unit(self): - return self._horz_px_per_unit - - @property - def vert_px_per_unit(self): - return self._vert_px_per_unit - - @property - def units_specifier(self): - return self._units_specifier diff --git a/spaces/cihyFjudo/fairness-paper-search/Kahin Hai Mera Pyar 720p Full Mo .md b/spaces/cihyFjudo/fairness-paper-search/Kahin Hai Mera Pyar 720p Full Mo .md deleted file mode 100644 index 752804a3b8cf994b16c5ee0a4d2eeb8d81a2d71e..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Kahin Hai Mera Pyar 720p Full Mo .md +++ /dev/null @@ -1,6 +0,0 @@ -

        Kahin Hai Mera Pyar 720p Full Mo


        Download Filehttps://tinurli.com/2uwiS6



        -
        - aaccfb2cb3
        -
        -
        -

        diff --git a/spaces/cihyFjudo/fairness-paper-search/Skandal Seks Di Pejabat Risda (video Part 02) EXCLUSIVE.md b/spaces/cihyFjudo/fairness-paper-search/Skandal Seks Di Pejabat Risda (video Part 02) EXCLUSIVE.md deleted file mode 100644 index be9a010193fe38e667bdd97c0f5c51b76f251764..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Skandal Seks Di Pejabat Risda (video Part 02) EXCLUSIVE.md +++ /dev/null @@ -1,6 +0,0 @@ -

        skandal seks di pejabat risda (video part 02)


        Download Zip ★★★★★ https://tinurli.com/2uwiPI



        -
        - aaccfb2cb3
        -
        -
        -

        diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/jpeg2000htdec.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/jpeg2000htdec.c deleted file mode 100644 index 51cd96e0f1c0a41d7cb02bb3def0ce3878a433aa..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/jpeg2000htdec.c +++ /dev/null @@ -1,1451 +0,0 @@ -/* - * Copyright (c) 2022 Caleb Etemesi - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/* - * Copyright 2019 - 2021, Osamu Watanabe - * - * Redistribution and use in source and binary forms, with or without modification, - * are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * 3. Neither the name of the copyright holder nor the names of its contributors - * may be used to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS” AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR - * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include "libavutil/attributes.h" -#include "libavutil/common.h" -#include "libavutil/avassert.h" -#include "jpeg2000htdec.h" -#include "jpeg2000.h" -#include "jpeg2000dec.h" - -#define J2K_Q1 0 -#define J2K_Q2 1 - -#define HT_SHIFT_SIGMA 0 -#define HT_SHIFT_SCAN 4 -#define HT_SHIFT_REF 3 -#define HT_SHIFT_REF_IND 2 - -/* See Rec. ITU-T T.800, Table 2 */ -const static uint8_t mel_e[13] = { 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 4, 5 }; - -static const uint16_t dec_cxt_vlc_table1[1024]; -static const uint16_t dec_cxt_vlc_table0[1024]; - -typedef struct StateVars { - int32_t pos; - uint32_t bits; - uint32_t tmp; - uint32_t last; - uint8_t bits_left; - uint64_t bit_buf; -} StateVars; - -typedef struct MelDecoderState { - uint8_t k; - uint8_t run; - uint8_t one; -} MelDecoderState; - -/** - * Given a precomputed c, checks whether n % d == 0. c is precomputed from d - * using precompute_c(). - */ -av_always_inline -static uint32_t is_divisible(uint32_t n, uint64_t c) -{ - return n * c <= c - 1; -} - -/** - * Precompute the number c used by is_divisible(). - */ -av_always_inline -static uint64_t precompute_c(uint32_t d) -{ - return 1 + (0xffffffffffffffffull / d); -} - -static void jpeg2000_init_zero(StateVars *s) -{ - s->bits_left = 0; - s->bit_buf = 0; - s->tmp = 0; - s->bits = 0; - s->pos = 0; - s->last = 0; -} - -static void jpeg2000_init_mel(StateVars *s, uint32_t Pcup) -{ - jpeg2000_init_zero(s); - s->pos = Pcup; -} - -static void jpeg2000_init_mag_ref(StateVars *s, uint32_t Lref) -{ - s->pos = Lref - 2; - s->bits = 0; - s->last = 0xFF; - s->tmp = 0; - s->bits_left = 0; - s->bit_buf = 0; -} - -static void jpeg2000_init_mel_decoder(MelDecoderState *mel_state) -{ - mel_state->k = 0; - mel_state->run = 0; - mel_state->one = 0; -} - -/** - * Refill the buffer backwards in little endian while skipping over stuffing - * bits. Stuffing bits are those that appear in the position of any byte whose - * LSBs are all 1's if the last consumed byte was larger than 0x8F. - */ -static int jpeg2000_bitbuf_refill_backwards(StateVars *buffer, const uint8_t *array) -{ - uint64_t tmp = 0; - int32_t position = buffer->pos; - uint32_t new_bits = 32; - - if (buffer->bits_left >= 32) - return 0; // enough data, no need to pull in more bits - - /** - * We are reading bytes from end to start and need to handle being close to - * the end. Subtracting by 4 means we will read some of the bytes of the MEL - * byte stream since the MEL byte stream ends at the start of the VLC byte - * stream. This is okay as they are masked away since we check for cases - * where that occurs (when the position is less than 4). - */ - position -= 4; - - tmp = AV_RB32(&array[position + 1]); - - if (buffer->pos < 4){ - /* mask un-needed bits if we are close to input end */ - uint64_t mask = (1ull << (buffer->pos + 1) * 8) - 1; - tmp &= mask; - } - - /** - * Unstuff bits. Load a temporary byte, which precedes the position we - * currently at, to ensure that we can also un-stuff if the stuffed bit is - * the bottom most bits. - */ - tmp <<= 8; - tmp |= array[buffer->pos + 1]; - - if ((tmp & 0x7FFF000000) > 0x7F8F000000) { - tmp &= 0x7FFFFFFFFF; - new_bits--; - } - if ((tmp & 0x007FFF0000) > 0x007F8F0000) { - tmp = (tmp & 0x007FFFFFFF) + ((tmp & 0xFF00000000) >> 1); - new_bits--; - } - if ((tmp & 0x00007FFF00) > 0x00007F8F00) { - tmp = (tmp & 0x00007FFFFF) + ((tmp & 0xFFFF000000) >> 1); - new_bits--; - } - if ((tmp & 0x0000007FFF) > 0x0000007F8F) { - tmp = (tmp & 0x0000007FFF) + ((tmp & 0xFFFFFF0000) >> 1); - new_bits--; - } - - tmp >>= 8; // Remove temporary byte loaded - - /* Add bits to the MSB of the bit buffer */ - buffer->bit_buf |= tmp << buffer->bits_left; - buffer->bits_left += new_bits; - buffer->pos = FFMAX(0, position); - return 0; -} - -/** - * Refill the bit-buffer reading new bits going forward - * in the stream while skipping over stuffed bits. - */ -static void jpeg2000_bitbuf_refill_forward(StateVars *buffer, const uint8_t *array, - uint32_t length) -{ - while (buffer->bits_left < 32) { - buffer->tmp = 0xFF; - buffer->bits = (buffer->last == 0xFF) ? 7 : 8; - if (buffer->pos <= length) { - buffer->tmp = array[buffer->pos]; - buffer->pos += 1; - buffer->last = buffer->tmp; - } - buffer->bit_buf |= ((uint64_t) buffer->tmp) << buffer->bits_left; - buffer->bits_left += buffer->bits; - } -} - -/** - * Drops bits from lower bits in the bit buffer. buf contains the bit buffers. - * nbits is the number of bits to remove. - */ -av_always_inline -static void jpeg2000_bitbuf_drop_bits_lsb(StateVars *buf, uint8_t nbits) -{ - av_assert2(buf->bits_left >= nbits); // cannot read more bits than available - buf->bit_buf >>= nbits; - buf->bits_left -= nbits; -} - -/** - * Get bits from the bit buffer reading them from the least significant bits - * moving to the most significant bits. In case there are fewer bits, refill - * from buf moving backwards. - */ -av_always_inline -static uint64_t jpeg2000_bitbuf_get_bits_lsb(StateVars *bit_stream, uint8_t nbits, - const uint8_t *buf) -{ - uint64_t bits; - uint64_t mask = (1ull << nbits) - 1; - if (bit_stream->bits_left < nbits) - jpeg2000_bitbuf_refill_backwards(bit_stream, buf); - bits = bit_stream->bit_buf & mask; - jpeg2000_bitbuf_drop_bits_lsb(bit_stream, nbits); - return bits; -} - -/** - * Get bits from the bit buffer reading them from the least significant bits - * moving to the most significant bits. In case there are fewer bits, refill from - * buf moving forward. - */ -av_always_inline -static uint64_t jpeg2000_bitbuf_get_bits_lsb_forward(StateVars *bit_stream, - uint8_t nbits, const uint8_t *buf, - uint32_t length) -{ - uint64_t bits; - uint64_t mask = (1ull << nbits) - 1; - - if (bit_stream->bits_left <= nbits) - jpeg2000_bitbuf_refill_forward(bit_stream, buf, length); - bits = bit_stream->bit_buf & mask; - jpeg2000_bitbuf_drop_bits_lsb(bit_stream, nbits); - return bits; -} - -/** - * Look ahead bit buffer without discarding bits. - */ -av_always_inline -static uint64_t jpeg2000_bitbuf_peek_bits_lsb(StateVars *stream, uint8_t nbits) -{ - uint64_t mask = (1ull << nbits) - 1; - return stream->bit_buf & mask; -} - -static void jpeg2000_init_vlc(StateVars *s, uint32_t Lcup, uint32_t Pcup, - const uint8_t *Dcup) -{ - s->bits_left = 0; - s->bit_buf = 0; - s->pos = Lcup - 2 - Pcup; - s->last = Dcup[Lcup - 2]; - s->tmp = (s->last) >> 4; - s->bits = ((s->tmp & 7) < 7) ? 4 : 3; - - jpeg2000_bitbuf_refill_backwards(s, Dcup + Pcup); - jpeg2000_bitbuf_drop_bits_lsb(s, 4); -} - -/** - * Decode prefix codes for VLC segment. See Rec. ITU-T T.814, 7.3.5. - */ -av_always_inline -static int jpeg2000_decode_ctx_vlc(const Jpeg2000DecoderContext *s, - StateVars *vlc_stream, const uint16_t *table, - const uint8_t *Dcup, uint8_t *sig_pat, - uint8_t *res_off, uint8_t *emb_pat_k, - uint8_t *emb_pat_1, uint8_t pos, - uint32_t Pcup, uint16_t context) -{ - uint32_t value; - uint8_t len; - uint64_t index; - uint64_t code_word; - - jpeg2000_bitbuf_refill_backwards(vlc_stream, Dcup + Pcup); - - code_word = vlc_stream->bit_buf & 0x7f; - index = code_word + (context << 7); - - av_assert0(index < 1024); // The CxtVLC table has 1024 entries. - - value = table[index]; - - len = (value & 0x000F) >> 1; - - res_off[pos] = (uint8_t) (value & 1); - sig_pat[pos] = (uint8_t) ((value & 0x00F0) >> 4); - emb_pat_k[pos] = (uint8_t) ((value & 0x0F00) >> 8); - emb_pat_1[pos] = (uint8_t) ((value & 0xF000) >> 12); - - jpeg2000_bitbuf_drop_bits_lsb(vlc_stream, len); - return 0; -} - -/** - * Decode variable length u-vlc prefix. See decodeUPrefix procedure at Rec. - * ITU-T T.814, 7.3.6. - */ -av_always_inline -static uint8_t vlc_decode_u_prefix(StateVars *vlc_stream, const uint8_t *refill_array) -{ - static const uint8_t return_value[8] = { 5, 1, 2, 1, 3, 1, 2, 1 }; - static const uint8_t drop_bits[8] = { 3, 1, 2, 1, 3, 1, 2, 1 }; - - uint8_t bits; - - if (vlc_stream->bits_left < 3) - jpeg2000_bitbuf_refill_backwards(vlc_stream, refill_array); - - bits = jpeg2000_bitbuf_peek_bits_lsb(vlc_stream, 3); - - jpeg2000_bitbuf_drop_bits_lsb(vlc_stream, drop_bits[bits]); - return return_value[bits]; -} - -/** - * Decode variable length u-vlc suffix. See decodeUSuffix procedure at Rec. - * ITU-T T.814, 7.3.6. - */ -av_always_inline -static uint8_t vlc_decode_u_suffix(StateVars *vlc_stream, uint8_t suffix, - const uint8_t *refill_array) -{ - static const int mask[] = { 1, 31 }; - static const int drop_bits[] = { 1, 5 }; - - uint8_t bits; - int cond = suffix != 3; - if (suffix < 3) - return 0; - - if (vlc_stream->bits_left < 5) - jpeg2000_bitbuf_refill_backwards(vlc_stream, refill_array); - - bits = jpeg2000_bitbuf_peek_bits_lsb(vlc_stream, 5); - - jpeg2000_bitbuf_drop_bits_lsb(vlc_stream, drop_bits[cond]); - return bits & mask[cond]; -} - -/** - * Decode u-vlc extension values. See decodeUExtension procedure at Rec. ITU-T - * T.814, 7.3.6. - */ -av_always_inline -static uint8_t vlc_decode_u_extension(StateVars *vlc_stream, uint8_t suffix, - const uint8_t *refill_array) -{ - return jpeg2000_bitbuf_get_bits_lsb(vlc_stream, 4 * (suffix >= 28), refill_array); -} - -/** - * Magnitude and Sign decode procedures. See decodeMagSgnValue procedure at Rec. - * ITU-T T.814, 7.3.8. - */ -av_always_inline -static int32_t jpeg2000_decode_mag_sgn(StateVars *mag_sgn_stream, int32_t m_n, - int32_t i_n, const uint8_t *buf, uint32_t length) -{ - int32_t val = 0; - if (m_n > 0) { - val = jpeg2000_bitbuf_get_bits_lsb_forward(mag_sgn_stream,m_n,buf,length); - val += (i_n << m_n); - } - return val; -} - -av_always_inline -static void recover_mag_sgn(StateVars *mag_sgn, uint8_t pos, uint16_t q, int32_t m_n[2], - int32_t known_1[2], const uint8_t emb_pat_1[2], - int32_t v[2][4], int32_t m[2][4], uint8_t *E, - uint32_t *mu_n, const uint8_t *Dcup, uint32_t Pcup, - uint32_t pLSB) -{ - for (int i = 0; i < 4; i++) { - int32_t n = 4 * q + i; - m_n[pos] = m[pos][i]; - known_1[pos] = (emb_pat_1[pos] >> i) & 1; - v[pos][i] = jpeg2000_decode_mag_sgn(mag_sgn, m_n[pos], known_1[pos], Dcup, Pcup); - - if (m_n[pos] != 0) { - E[n] = 32 - ff_clz(v[pos][i] | 1); - mu_n[n] = (v[pos][i] >> 1) + 1; - mu_n[n] <<= pLSB; - mu_n[n] |= ((uint32_t) (v[pos][i] & 1)) << 31; // sign bit. - } - } -} - -static int jpeg2000_import_bit(StateVars *stream, const uint8_t *array, uint32_t length) -{ - int cond = stream->pos <= length; - int pos = FFMIN(stream->pos, length); - if (stream->bits == 0) { - stream->bits = (stream->tmp == 0xFF) ? 7 : 8; - stream->pos += cond; - stream->tmp = cond ? array[pos] : 0xFF; - } - stream->bits -= 1; - return (stream->tmp >> stream->bits) & 1; -} - -static int jpeg2000_peek_bit(StateVars *stream, const uint8_t *array, uint32_t length) -{ - if (stream->bits == 0) { - int cond = stream->pos <= length; - int pos = FFMIN(stream->pos, length); - stream->bits = (stream->tmp == 0xFF) ? 7 : 8; - stream->pos += cond; - stream->tmp = cond ? array[pos] : 0xFF; - } - return (stream->tmp >> stream->bits) & 1; -} - -static int jpeg2000_decode_mel_sym(MelDecoderState *mel_state, - StateVars *mel_stream, - const uint8_t *Dcup, - uint32_t Lcup) -{ - - if (mel_state->run == 0 && mel_state->one == 0) { - uint8_t eval; - uint8_t bit; - - eval = mel_e[mel_state->k]; - bit = jpeg2000_import_bit(mel_stream, Dcup, Lcup); - if (bit == 1) { - mel_state->run = 1 << eval; - mel_state->k = FFMIN(12, mel_state->k + 1); - } else { - mel_state->run = 0; - while (eval > 0) { - bit = jpeg2000_import_bit(mel_stream, Dcup, Lcup); - mel_state->run = (2 * (mel_state->run)) + bit; - eval -= 1; - } - mel_state->k = FFMAX(0, mel_state->k - 1); - mel_state->one = 1; - } - } - if (mel_state->run > 0) { - mel_state->run -= 1; - return 0; - } else { - mel_state->one = 0; - return 1; - } -} - -/** - * Magref decoding procedures. - */ -av_always_inline -static int jpeg2000_import_magref_bit(StateVars *stream, const uint8_t *array, - uint32_t length) -{ - return jpeg2000_bitbuf_get_bits_lsb(stream, 1, array); -} - -/** - * Signal EMB decode. - */ -static int jpeg2000_decode_sig_emb(const Jpeg2000DecoderContext *s, MelDecoderState *mel_state, - StateVars *mel_stream, StateVars *vlc_stream, - const uint16_t *vlc_table, const uint8_t *Dcup, - uint8_t *sig_pat, uint8_t *res_off, uint8_t *emb_pat_k, - uint8_t *emb_pat_1, uint8_t pos, uint16_t context, - uint32_t Lcup, uint32_t Pcup) -{ - if (context == 0) { - uint8_t sym; - sym = jpeg2000_decode_mel_sym(mel_state, mel_stream, Dcup, Lcup); - if (sym == 0) { - sig_pat[pos] = 0; - res_off[pos] = 0; - emb_pat_k[pos] = 0; - emb_pat_1[pos] = 0; - return 0; - } - } - return jpeg2000_decode_ctx_vlc(s, vlc_stream, vlc_table, Dcup, sig_pat, - res_off, emb_pat_k, emb_pat_1, pos, Pcup, - context); -} - -av_always_inline -static int jpeg2000_get_state(int x1, int x2, int width, int shift_by, - const uint8_t *block_states) -{ - return (block_states[(x1 + 1) * (width + 2) + (x2 + 1)] >> shift_by) & 1; -} - -av_always_inline -static void jpeg2000_modify_state(int x1, int x2, int width, - int value, uint8_t *block_states) -{ - block_states[(x1 + 1) * (width + 2) + (x2 + 1)] |= value; -} - -av_always_inline -static int jpeg2000_decode_ht_cleanup_segment(const Jpeg2000DecoderContext *s, - Jpeg2000Cblk *cblk, Jpeg2000T1Context *t1, - MelDecoderState *mel_state, - StateVars *mel_stream, StateVars *vlc_stream, - StateVars *mag_sgn_stream, const uint8_t *Dcup, - uint32_t Lcup, uint32_t Pcup, uint8_t pLSB, - int width, int height, int32_t *sample_buf, - uint8_t *block_states) -{ - uint16_t q = 0; // Represents current quad position - uint16_t q1, q2; - uint16_t context1, context2; - uint16_t context = 0; - - uint8_t sig_pat[2] = { 0 }; // significance pattern - uint8_t res_off[2] = { 0 }; // residual offset - uint8_t emb_pat_k[2] = { 0 }; // exponent Max Bound pattern K - uint8_t emb_pat_1[2] = { 0 }; // exponent Max Bound pattern 1 - uint8_t gamma[2] = { 0 }; - - uint8_t E_n[2] = { 0 }; - uint8_t E_ne[2] = { 0 }; - uint8_t E_nw[2] = { 0 }; - uint8_t E_nf[2] = { 0 }; - - uint8_t max_e[2] = { 0 }; - uint8_t u_pfx[2] = { 0 }; - uint8_t u_sfx[2] = { 0 }; - uint8_t u_ext[2] = { 0 }; - - int32_t u[2] = { 0 }; - int32_t U[2] = { 0 }; // exponent bound - int32_t m_n[2] = { 0 }; - int32_t known_1[2] = { 0 }; - - int32_t m[2][4] = { 0 }; - int32_t v[2][4] = { 0 }; - - uint8_t kappa[2] = { 1, 1 }; - - int ret = 0; - - int sp; - - uint64_t c; - - uint8_t *sigma; - uint32_t *mu; - - const uint8_t *vlc_buf = Dcup + Pcup; - - /* convert to raster-scan */ - const uint16_t is_border_x = width % 2; - const uint16_t is_border_y = height % 2; - - const uint16_t quad_width = ff_jpeg2000_ceildivpow2(width, 1); - const uint16_t quad_height = ff_jpeg2000_ceildivpow2(height, 1); - - size_t buf_size = 4 * quad_width * quad_height; - - uint8_t *sigma_n = av_calloc(buf_size, sizeof(uint8_t)); - uint8_t *E = av_calloc(buf_size, sizeof(uint8_t)); - uint32_t *mu_n = av_calloc(buf_size, sizeof(uint32_t)); - - if (!sigma_n || !E || !mu_n) { - ret = AVERROR(ENOMEM); - goto free; - } - - sigma = sigma_n; - mu = mu_n; - - while (q < quad_width - 1) { - q1 = q; - q2 = q1 + 1; - - if ((ret = jpeg2000_decode_sig_emb(s, mel_state, mel_stream, vlc_stream, - dec_cxt_vlc_table0, Dcup, sig_pat, res_off, - emb_pat_k, emb_pat_1, J2K_Q1, context, Lcup, - Pcup)) < 0) - goto free; - - for (int i = 0; i < 4; i++) - sigma_n[4 * q1 + i] = (sig_pat[J2K_Q1] >> i) & 1; - - /* calculate context */ - context = sigma_n[4 * q1]; // f - context |= sigma_n[4 * q1 + 1]; // sf - context += sigma_n[4 * q1 + 2] << 1; // w << 1 - context += sigma_n[4 * q1 + 3] << 2; - - if ((ret = jpeg2000_decode_sig_emb(s, mel_state, mel_stream, vlc_stream, - dec_cxt_vlc_table0, Dcup, sig_pat, res_off, - emb_pat_k, emb_pat_1, J2K_Q2, context, Lcup, - Pcup)) < 0) - goto free; - - for (int i = 0; i < 4; i++) - sigma_n[4 * q2 + i] = (sig_pat[J2K_Q2] >> i) & 1; - - /* calculate context for the next quad */ - context = sigma_n[4 * q2]; // f - context |= sigma_n[4 * q2 + 1]; // sf - context += sigma_n[4 * q2 + 2] << 1; // w << 1 - context += sigma_n[4 * q2 + 3] << 2; // sw << 2 - - u[0] = 0; - u[1] = 0; - - jpeg2000_bitbuf_refill_backwards(vlc_stream, vlc_buf); - - if (res_off[J2K_Q1] == 1 && res_off[J2K_Q2] == 1) { - - if (jpeg2000_decode_mel_sym(mel_state, mel_stream, Dcup, Lcup) == 1) { - - u_pfx[J2K_Q1] = vlc_decode_u_prefix(vlc_stream, vlc_buf); - u_pfx[J2K_Q2] = vlc_decode_u_prefix(vlc_stream, vlc_buf); - - u_sfx[J2K_Q1] = vlc_decode_u_suffix(vlc_stream, u_pfx[J2K_Q1], vlc_buf); - u_sfx[J2K_Q2] = vlc_decode_u_suffix(vlc_stream, u_pfx[J2K_Q2], vlc_buf); - - u_ext[J2K_Q1] = vlc_decode_u_extension(vlc_stream, u_sfx[J2K_Q1], vlc_buf); - u_ext[J2K_Q2] = vlc_decode_u_extension(vlc_stream, u_sfx[J2K_Q2], vlc_buf); - - u[J2K_Q1] = 2 + u_pfx[J2K_Q1] + u_sfx[J2K_Q1] + (u_ext[J2K_Q1] * 4); - u[J2K_Q2] = 2 + u_pfx[J2K_Q2] + u_sfx[J2K_Q2] + (u_ext[J2K_Q2] * 4); - - } else { - u_pfx[J2K_Q1] = vlc_decode_u_prefix(vlc_stream, vlc_buf); - - if (u_pfx[J2K_Q1] > 2) { - u[J2K_Q2] = jpeg2000_bitbuf_get_bits_lsb(vlc_stream, 1, vlc_buf) + 1; - u_sfx[J2K_Q1] = vlc_decode_u_suffix(vlc_stream, u_pfx[J2K_Q1], vlc_buf); - u_ext[J2K_Q1] = vlc_decode_u_extension(vlc_stream, u_sfx[J2K_Q1], vlc_buf); - } else { - u_pfx[J2K_Q2] = vlc_decode_u_prefix(vlc_stream, vlc_buf); - u_sfx[J2K_Q1] = vlc_decode_u_suffix(vlc_stream, u_pfx[J2K_Q1], vlc_buf); - u_sfx[J2K_Q2] = vlc_decode_u_suffix(vlc_stream, u_pfx[J2K_Q2], vlc_buf); - u_ext[J2K_Q1] = vlc_decode_u_extension(vlc_stream, u_sfx[J2K_Q1], vlc_buf); - u_ext[J2K_Q2] = vlc_decode_u_extension(vlc_stream, u_sfx[J2K_Q2], vlc_buf); - u[J2K_Q2] = u_pfx[J2K_Q2] + u_sfx[J2K_Q2] + (u_ext[J2K_Q2] * 4); - } - /* See Rec. ITU-T T.814, 7.3.6(3) */ - u[J2K_Q1] = u_pfx[J2K_Q1] + u_sfx[J2K_Q1] + (u_ext[J2K_Q1] * 4); - } - - } else if (res_off[J2K_Q1] == 1 || res_off[J2K_Q2] == 1) { - uint8_t pos = res_off[J2K_Q1] == 1 ? 0 : 1; - u_pfx[pos] = vlc_decode_u_prefix(vlc_stream, vlc_buf); - u_sfx[pos] = vlc_decode_u_suffix(vlc_stream, u_pfx[pos], vlc_buf); - u_ext[pos] = vlc_decode_u_extension(vlc_stream, u_sfx[pos], vlc_buf); - u[pos] = u_pfx[pos] + u_sfx[pos] + (u_ext[pos] * 4); - } - U[J2K_Q1] = kappa[J2K_Q1] + u[J2K_Q1]; - U[J2K_Q2] = kappa[J2K_Q2] + u[J2K_Q2]; - - for (int i = 0; i < 4; i++) { - m[J2K_Q1][i] = sigma_n[4 * q1 + i] * U[J2K_Q1] - ((emb_pat_k[J2K_Q1] >> i) & 1); - m[J2K_Q2][i] = sigma_n[4 * q2 + i] * U[J2K_Q2] - ((emb_pat_k[J2K_Q2] >> i) & 1); - } - - recover_mag_sgn(mag_sgn_stream, J2K_Q1, q1, m_n, known_1, emb_pat_1, v, m, - E, mu_n, Dcup, Pcup, pLSB); - - recover_mag_sgn(mag_sgn_stream, J2K_Q2, q2, m_n, known_1, emb_pat_1, v, m, - E, mu_n, Dcup, Pcup, pLSB); - - q += 2; // Move to the next quad pair - } - - if (quad_width % 2 == 1) { - q1 = q; - - if ((ret = jpeg2000_decode_sig_emb(s, mel_state, mel_stream, vlc_stream, - dec_cxt_vlc_table0, Dcup, sig_pat, res_off, - emb_pat_k, emb_pat_1, J2K_Q1, context, Lcup, - Pcup)) < 0) - goto free; - - for (int i = 0; i < 4; i++) - sigma_n[4 * q1 + i] = (sig_pat[J2K_Q1] >> i) & 1; - - u[J2K_Q1] = 0; - - if (res_off[J2K_Q1] == 1) { - u_pfx[J2K_Q1] = vlc_decode_u_prefix(vlc_stream, vlc_buf); - u_sfx[J2K_Q1] = vlc_decode_u_suffix(vlc_stream, u_pfx[J2K_Q1], vlc_buf); - u_ext[J2K_Q1] = vlc_decode_u_extension(vlc_stream, u_sfx[J2K_Q1], vlc_buf); - u[J2K_Q1] = u_pfx[J2K_Q1] + u_sfx[J2K_Q1] + (u_ext[J2K_Q1] * 4); - } - - U[J2K_Q1] = kappa[J2K_Q1] + u[J2K_Q1]; - - for (int i = 0; i < 4; i++) - m[J2K_Q1][i] = sigma_n[4 * q1 + i] * U[J2K_Q1] - ((emb_pat_k[J2K_Q1] >> i) & 1); - - recover_mag_sgn(mag_sgn_stream, J2K_Q1, q1, m_n, known_1, emb_pat_1, v, m, - E, mu_n, Dcup, Pcup, pLSB); - - q++; // move to next quad pair - } - - /** - * Initial line pair end. As an optimization, we can replace modulo - * operations with checking if a number is divisible , since that's the only - * thing we need. This is paired with is_divisible. Credits to Daniel Lemire - * blog post [1]. - * - * [1] - * https://lemire.me/blog/2019/02/08/faster-remainders-when-the-divisor-is-a-constant-beating-compilers-and-libdivide/ - * - * It's UB on zero, but the spec doesn't allow a quad being zero, so we - * error out early in case that's the case. - */ - c = precompute_c(quad_width); - - for (int row = 1; row < quad_height; row++) { - while ((q - (row * quad_width)) < quad_width - 1 && q < (quad_height * quad_width)) { - q1 = q; - q2 = q + 1; - context1 = sigma_n[4 * (q1 - quad_width) + 1]; - context1 += sigma_n[4 * (q1 - quad_width) + 3] << 2; // ne - - if (!is_divisible(q1, c)) { - context1 |= sigma_n[4 * (q1 - quad_width) - 1]; // nw - context1 += (sigma_n[4 * q1 - 1] | sigma_n[4 * q1 - 2]) << 1; // sw | q - } - if (!is_divisible(q1 + 1, c)) - context1 |= sigma_n[4 * (q1 - quad_width) + 5] << 2; - - if ((ret = jpeg2000_decode_sig_emb(s, mel_state, mel_stream, vlc_stream, - dec_cxt_vlc_table1, Dcup, sig_pat, res_off, - emb_pat_k, emb_pat_1, J2K_Q1, context1, Lcup, - Pcup)) - < 0) - goto free; - - for (int i = 0; i < 4; i++) - sigma_n[4 * q1 + i] = (sig_pat[J2K_Q1] >> i) & 1; - - context2 = sigma_n[4 * (q2 - quad_width) + 1]; - context2 += sigma_n[4 * (q2 - quad_width) + 3] << 2; - - if (!is_divisible(q2, c)) { - context2 |= sigma_n[4 * (q2 - quad_width) - 1]; - context2 += (sigma_n[4 * q2 - 1] | sigma_n[4 * q2 - 2]) << 1; - } - if (!is_divisible(q2 + 1, c)) - context2 |= sigma_n[4 * (q2 - quad_width) + 5] << 2; - - if ((ret = jpeg2000_decode_sig_emb(s, mel_state, mel_stream, vlc_stream, - dec_cxt_vlc_table1, Dcup, sig_pat, res_off, - emb_pat_k, emb_pat_1, J2K_Q2, context2, Lcup, - Pcup)) - < 0) - goto free; - - for (int i = 0; i < 4; i++) - sigma_n[4 * q2 + i] = (sig_pat[J2K_Q2] >> i) & 1; - - u[J2K_Q1] = 0; - u[J2K_Q2] = 0; - - jpeg2000_bitbuf_refill_backwards(vlc_stream, vlc_buf); - - if (res_off[J2K_Q1] == 1 && res_off[J2K_Q2] == 1) { - u_pfx[J2K_Q1] = vlc_decode_u_prefix(vlc_stream, vlc_buf); - u_pfx[J2K_Q2] = vlc_decode_u_prefix(vlc_stream, vlc_buf); - - u_sfx[J2K_Q1] = vlc_decode_u_suffix(vlc_stream, u_pfx[J2K_Q1], vlc_buf); - u_sfx[J2K_Q2] = vlc_decode_u_suffix(vlc_stream, u_pfx[J2K_Q2], vlc_buf); - - u_ext[J2K_Q1] = vlc_decode_u_extension(vlc_stream, u_sfx[J2K_Q1], vlc_buf); - u_ext[J2K_Q2] = vlc_decode_u_extension(vlc_stream, u_sfx[J2K_Q2], vlc_buf); - - u[J2K_Q1] = u_pfx[J2K_Q1] + u_sfx[J2K_Q1] + (u_ext[J2K_Q1] << 2); - u[J2K_Q2] = u_pfx[J2K_Q2] + u_sfx[J2K_Q2] + (u_ext[J2K_Q2] << 2); - - } else if (res_off[J2K_Q1] == 1 || res_off[J2K_Q2] == 1) { - uint8_t pos = res_off[J2K_Q1] == 1 ? 0 : 1; - - u_pfx[pos] = vlc_decode_u_prefix(vlc_stream, vlc_buf); - u_sfx[pos] = vlc_decode_u_suffix(vlc_stream, u_pfx[pos], vlc_buf); - u_ext[pos] = vlc_decode_u_extension(vlc_stream, u_sfx[pos], vlc_buf); - - u[pos] = u_pfx[pos] + u_sfx[pos] + (u_ext[pos] << 2); - } - sp = sig_pat[J2K_Q1]; - - gamma[J2K_Q1] = 1; - - if (sp == 0 || sp == 1 || sp == 2 || sp == 4 || sp == 8) - gamma[J2K_Q1] = 0; - - sp = sig_pat[J2K_Q2]; - - gamma[J2K_Q2] = 1; - - if (sp == 0 || sp == 1 || sp == 2 || sp == 4 || sp == 8) - gamma[J2K_Q2] = 0; - - E_n[J2K_Q1] = E[4 * (q1 - quad_width) + 1]; - E_n[J2K_Q2] = E[4 * (q2 - quad_width) + 1]; - - E_ne[J2K_Q1] = E[4 * (q1 - quad_width) + 3]; - E_ne[J2K_Q2] = E[4 * (q2 - quad_width) + 3]; - - E_nw[J2K_Q1] = (!is_divisible(q1, c)) * E[FFMAX((4 * (q1 - quad_width) - 1), 0)]; - E_nw[J2K_Q2] = (!is_divisible(q2, c)) * E[FFMAX((4 * (q2 - quad_width) - 1), 0)]; - - E_nf[J2K_Q1] = (!is_divisible(q1 + 1, c)) * E[4 * (q1 - quad_width) + 5]; - E_nf[J2K_Q2] = (!is_divisible(q2 + 1, c)) * E[4 * (q2 - quad_width) + 5]; - - max_e[J2K_Q1] = FFMAX(E_nw[J2K_Q1], FFMAX3(E_n[J2K_Q1], E_ne[J2K_Q1], E_nf[J2K_Q1])); - max_e[J2K_Q2] = FFMAX(E_nw[J2K_Q2], FFMAX3(E_n[J2K_Q2], E_ne[J2K_Q2], E_nf[J2K_Q2])); - - kappa[J2K_Q1] = FFMAX(1, gamma[J2K_Q1] * (max_e[J2K_Q1] - 1)); - kappa[J2K_Q2] = FFMAX(1, gamma[J2K_Q2] * (max_e[J2K_Q2] - 1)); - - U[J2K_Q1] = kappa[J2K_Q1] + u[J2K_Q1]; - U[J2K_Q2] = kappa[J2K_Q2] + u[J2K_Q2]; - - for (int i = 0; i < 4; i++) { - m[J2K_Q1][i] = sigma_n[4 * q1 + i] * U[J2K_Q1] - ((emb_pat_k[J2K_Q1] >> i) & 1); - m[J2K_Q2][i] = sigma_n[4 * q2 + i] * U[J2K_Q2] - ((emb_pat_k[J2K_Q2] >> i) & 1); - } - recover_mag_sgn(mag_sgn_stream, J2K_Q1, q1, m_n, known_1, emb_pat_1, v, m, - E, mu_n, Dcup, Pcup, pLSB); - - recover_mag_sgn(mag_sgn_stream, J2K_Q2, q2, m_n, known_1, emb_pat_1, v, m, - E, mu_n, Dcup, Pcup, pLSB); - - q += 2; // Move to the next quad pair - } - - if (quad_width % 2 == 1) { - q1 = q; - - /* calculate context for current quad */ - context1 = sigma_n[4 * (q1 - quad_width) + 1]; - context1 += (sigma_n[4 * (q1 - quad_width) + 3] << 2); - - if (!is_divisible(q1, c)) { - context1 |= sigma_n[4 * (q1 - quad_width) - 1]; - context1 += (sigma_n[4 * q1 - 1] | sigma_n[4 * q1 - 2]) << 1; - } - if (!is_divisible(q1 + 1, c)) - context1 |= sigma_n[4 * (q1 - quad_width) + 5] << 2; - - if ((ret = jpeg2000_decode_sig_emb(s, mel_state, mel_stream, vlc_stream, - dec_cxt_vlc_table1, Dcup, sig_pat, res_off, - emb_pat_k, emb_pat_1, J2K_Q1, context1, Lcup, - Pcup)) < 0) - goto free; - - for (int i = 0; i < 4; i++) - sigma_n[4 * q1 + i] = (sig_pat[J2K_Q1] >> i) & 1; - - u[J2K_Q1] = 0; - - /* Recover mag_sgn value */ - if (res_off[J2K_Q1] == 1) { - u_pfx[J2K_Q1] = vlc_decode_u_prefix(vlc_stream, vlc_buf); - u_sfx[J2K_Q1] = vlc_decode_u_suffix(vlc_stream, u_pfx[J2K_Q1], vlc_buf); - u_ext[J2K_Q1] = vlc_decode_u_extension(vlc_stream, u_sfx[J2K_Q1], vlc_buf); - - u[J2K_Q1] = u_pfx[J2K_Q1] + u_sfx[J2K_Q1] + (u_ext[J2K_Q1] << 2); - } - - sp = sig_pat[J2K_Q1]; - - gamma[J2K_Q1] = 1; - - if (sp == 0 || sp == 1 || sp == 2 || sp == 4 || sp == 8) - gamma[J2K_Q1] = 0; - - E_n[J2K_Q1] = E[4 * (q1 - quad_width) + 1]; - - E_ne[J2K_Q1] = E[4 * (q1 - quad_width) + 3]; - - E_nw[J2K_Q1] = (!is_divisible(q1, c)) * E[FFMAX((4 * (q1 - quad_width) - 1), 0)]; - - E_nf[J2K_Q1] = (!is_divisible(q1 + 1, c)) * E[4 * (q1 - quad_width) + 5]; - - max_e[J2K_Q1] = FFMAX(E_nw[J2K_Q1], FFMAX3(E_n[J2K_Q1], E_ne[J2K_Q1], E_nf[J2K_Q1])); - - kappa[J2K_Q1] = FFMAX(1, gamma[J2K_Q1] * (max_e[J2K_Q1] - 1)); - - U[J2K_Q1] = kappa[J2K_Q1] + u[J2K_Q1]; - - for (int i = 0; i < 4; i++) - m[J2K_Q1][i] = sigma_n[4 * q1 + i] * U[J2K_Q1] - ((emb_pat_k[J2K_Q1] >> i) & 1); - - recover_mag_sgn(mag_sgn_stream, J2K_Q1, q1, m_n, known_1, emb_pat_1, v, m, - E, mu_n, Dcup, Pcup, pLSB); - q += 1; - } - } - - // convert to raster-scan - for (int y = 0; y < quad_height; y++) { - for (int x = 0; x < quad_width; x++) { - int j1, j2; - int x1, x2 , x3; - - j1 = 2 * y; - j2 = 2 * x; - - sample_buf[j2 + (j1 * width)] = (int32_t)*mu; - jpeg2000_modify_state(j1, j2, width, *sigma, block_states); - sigma += 1; - mu += 1; - - x1 = y != quad_height - 1 || is_border_y == 0; - sample_buf[j2 + ((j1 + 1) * width)] = ((int32_t)*mu) * x1; - jpeg2000_modify_state(j1 + 1, j2, width, (*sigma) * x1, block_states); - sigma += 1; - mu += 1; - - x2 = x != quad_width - 1 || is_border_x == 0; - sample_buf[(j2 + 1) + (j1 * width)] = ((int32_t)*mu) * x2; - jpeg2000_modify_state(j1, j2 + 1, width, (*sigma) * x2, block_states); - sigma += 1; - mu += 1; - - x3 = x1 | x2; - sample_buf[(j2 + 1) + (j1 + 1) * width] = ((int32_t)*mu) * x3; - jpeg2000_modify_state(j1 + 1, j2 + 1, width, (*sigma) * x3, block_states); - sigma += 1; - mu += 1; - } - } - ret = 1; -free: - av_freep(&sigma_n); - av_freep(&E); - av_freep(&mu_n); - return ret; -} - -static void jpeg2000_calc_mbr(uint8_t *mbr, const uint16_t i, const uint16_t j, - const uint32_t mbr_info, uint8_t causal_cond, - uint8_t *block_states, int width) -{ - int local_mbr = 0; - - local_mbr |= jpeg2000_get_state(i - 1, j - 1, width, HT_SHIFT_SIGMA, block_states); - local_mbr |= jpeg2000_get_state(i - 1, j + 0, width, HT_SHIFT_SIGMA, block_states); - local_mbr |= jpeg2000_get_state(i - 1, j + 1, width, HT_SHIFT_SIGMA, block_states); - - local_mbr |= jpeg2000_get_state(i + 0, j - 1, width, HT_SHIFT_SIGMA, block_states); - local_mbr |= jpeg2000_get_state(i + 0, j + 1, width, HT_SHIFT_SIGMA, block_states); - - local_mbr |= jpeg2000_get_state(i + 1, j - 1, width, HT_SHIFT_SIGMA, block_states) * causal_cond; - local_mbr |= jpeg2000_get_state(i + 1, j + 0, width, HT_SHIFT_SIGMA, block_states) * causal_cond; - local_mbr |= jpeg2000_get_state(i + 1, j + 1, width, HT_SHIFT_SIGMA, block_states) * causal_cond; - - local_mbr |= jpeg2000_get_state(i - 1, j - 1, width, HT_SHIFT_REF, block_states) * - jpeg2000_get_state(i - 1, j - 1, width, HT_SHIFT_SCAN, block_states); - local_mbr |= jpeg2000_get_state(i - 1, j + 0, width, HT_SHIFT_REF, block_states) * - jpeg2000_get_state(i - 1, j - 1, width, HT_SHIFT_SCAN, block_states); - local_mbr |= jpeg2000_get_state(i - 1, j + 1, width, HT_SHIFT_REF, block_states) * - jpeg2000_get_state(i - 1, j + 1, width, HT_SHIFT_SCAN, block_states); - - local_mbr |= jpeg2000_get_state(i + 0, j - 1, width, HT_SHIFT_REF, block_states) * - jpeg2000_get_state(i + 0, j - 1, width, HT_SHIFT_SCAN, block_states); - local_mbr |= jpeg2000_get_state(i + 0, j + 1, width, HT_SHIFT_REF, block_states) * - jpeg2000_get_state(i + 0, j + 1, width, HT_SHIFT_SCAN, block_states); - - local_mbr |= jpeg2000_get_state(i + 1, j - 1, width, HT_SHIFT_REF, block_states) * - jpeg2000_get_state(i + 1, j - 1, width, HT_SHIFT_SCAN, block_states) * causal_cond; - local_mbr |= jpeg2000_get_state(i + 1, j + 0, width, HT_SHIFT_REF, block_states) * - jpeg2000_get_state(i + 1, j + 0, width, HT_SHIFT_SCAN, block_states) * causal_cond; - local_mbr |= jpeg2000_get_state(i + 1, j + 1, width, HT_SHIFT_REF, block_states) * - jpeg2000_get_state(i + 1, j + 1, width, HT_SHIFT_SCAN, block_states) * causal_cond; - - *mbr |= local_mbr; -} - -static void jpeg2000_process_stripes_block(StateVars *sig_prop, int i_s, int j_s, - int width, int height, int stride, int pLSB, - int32_t *sample_buf, uint8_t *block_states, - uint8_t *magref_segment, uint32_t magref_length) -{ - for (int j = j_s; j < j_s + width; j++) { - uint32_t mbr_info = 0; - for (int i = i_s; i < i_s + height; i++) { - int modify_state, cond; - uint8_t bit; - uint8_t causal_cond = i != (i_s + height - 1); - int32_t *sp = &sample_buf[j + (i * (stride - 2))]; - uint8_t mbr = 0; - - if (jpeg2000_get_state(i, j, stride - 2, HT_SHIFT_SIGMA, block_states) == 0) - jpeg2000_calc_mbr(&mbr, i, j, mbr_info & 0x1EF, causal_cond, block_states, stride - 2); - mbr_info >>= 3; - cond = mbr != 0; - bit = jpeg2000_peek_bit(sig_prop, magref_segment, magref_length); - *sp |= (bit * cond) << pLSB; - sig_prop->bits -= cond; - modify_state = (((1 << HT_SHIFT_REF_IND) | (1 << HT_SHIFT_REF)) * cond) | 1 << HT_SHIFT_SCAN; - jpeg2000_modify_state(i, j, stride - 2, modify_state, block_states); - } - } -} - -/** - * See procedure decodeSigPropMag at Rec. ITU-T T.814, 7.4. -*/ -av_noinline -static void jpeg2000_decode_sigprop_segment(Jpeg2000Cblk *cblk, uint16_t width, - uint16_t height, uint8_t *magref_segment, - uint32_t magref_length, uint8_t pLSB, - int32_t *sample_buf, uint8_t *block_states) -{ - StateVars sp_dec; - - const uint16_t num_v_stripe = height / 4; - const uint16_t num_h_stripe = width / 4; - int b_width = 4; - int b_height = 4; - int stride = width + 2; - - int last_width; - uint16_t i = 0, j = 0; - - jpeg2000_init_zero(&sp_dec); - - for (int n1 = 0; n1 < num_v_stripe; n1++) { - j = 0; - for (int n2 = 0; n2 < num_h_stripe; n2++) { - jpeg2000_process_stripes_block(&sp_dec, i, j, b_width, b_height, stride, - pLSB, sample_buf, block_states, magref_segment, - magref_length); - j += 4; - } - last_width = width % 4; - if (last_width) - jpeg2000_process_stripes_block(&sp_dec, i, j, last_width, b_height, stride, - pLSB, sample_buf, block_states, magref_segment, - magref_length); - i += 4; - } - - /* Decode remaining height stripes */ - b_height = height % 4; - j = 0; - for (int n2 = 0; n2 < num_h_stripe; n2++) { - jpeg2000_process_stripes_block(&sp_dec, i, j, b_width, b_height, stride, - pLSB, sample_buf, block_states, magref_segment, - magref_length); - j += 4; - } - last_width = width % 4; - if (last_width) - jpeg2000_process_stripes_block(&sp_dec, i, j, last_width, b_height, stride, - pLSB, sample_buf, block_states, magref_segment, - magref_length); -} - -/** - * See procedure decodeSigPropMag at Rec. ITU-T T.814, 7.5. -*/ -static int -jpeg2000_decode_magref_segment(Jpeg2000Cblk *cblk, uint16_t width, uint16_t block_height, uint8_t *magref_segment, - uint32_t magref_length, uint8_t pLSB, int32_t *sample_buf, uint8_t *block_states) -{ - - StateVars mag_ref = { 0 }; - const uint16_t num_v_stripe = block_height / 4; - uint16_t height = 4; - uint16_t i_start = 0; - int32_t *sp; - - jpeg2000_init_mag_ref(&mag_ref, magref_length); - - for (int n1 = 0; n1 < num_v_stripe; n1++) { - for (int j = 0; j < width; j++) { - for (int i = i_start; i < i_start + height; i++) { - /** - * We move column wise, going from one quad to another. See - * Rec. ITU-T T.814, Figure 7. - */ - sp = &sample_buf[j + i * width]; - if (jpeg2000_get_state(i, j, width, HT_SHIFT_SIGMA, block_states) != 0) { - jpeg2000_modify_state(i, j, width, 1 << HT_SHIFT_REF_IND, block_states); - *sp |= jpeg2000_import_magref_bit(&mag_ref, magref_segment, magref_length) << pLSB; - } - } - } - i_start += 4; - } - height = block_height % 4; - for (int j = 0; j < width; j++) { - for (int i = i_start; i < i_start + height; i++) { - sp = &sample_buf[j + i * width]; - if (jpeg2000_get_state(i, j, width, HT_SHIFT_SIGMA, block_states) != 0) { - jpeg2000_modify_state(i, j, width, 1 << HT_SHIFT_REF_IND, block_states); - *sp |= jpeg2000_import_magref_bit(&mag_ref, magref_segment, magref_length) << pLSB; - } - } - } - return 1; -} - - -int -ff_jpeg2000_decode_htj2k(const Jpeg2000DecoderContext *s, Jpeg2000CodingStyle *codsty, Jpeg2000T1Context *t1, Jpeg2000Cblk *cblk, - int width, int height, int magp, uint8_t roi_shift) -{ - uint8_t p0 = 0; // Number of placeholder passes - uint32_t Lcup; // Length of HT cleanup segment - uint32_t Lref; // Length of Refinement segment - uint32_t Scup; // HT cleanup segment suffix length - uint32_t Pcup; // HT cleanup segment prefix length - - uint8_t S_blk; // Number of skipped magnitude bitplanes - uint8_t pLSB; - - uint8_t *Dcup; // Byte of an HT cleanup segment - uint8_t *Dref; // Byte of an HT refinement segment - - int z_blk; // Number of ht coding pass - - uint8_t empty_passes; - - StateVars mag_sgn; // Magnitude and Sign - StateVars mel; // Adaptive run-length coding - StateVars vlc; // Variable Length coding - StateVars sig_prop; // Significance propagation - - MelDecoderState mel_state; - - int ret; - - /* Temporary buffers */ - int32_t *sample_buf; - uint8_t *block_states; - - int32_t n, val; // Post-processing - - int32_t M_b = magp; - - /* codeblock size as constrained by Rec. ITU-T T.800, Table A.18 */ - av_assert0(width <= 1024U && height <= 1024U); - av_assert0(width * height <= 4096); - av_assert0(width * height > 0); - - memset(t1->data, 0, t1->stride * height * sizeof(*t1->data)); - memset(t1->flags, 0, t1->stride * (height + 2) * sizeof(*t1->flags)); - - if (cblk->npasses == 0) - return 0; - - if (cblk->npasses > 3) - p0 = 0; - else if (cblk->length == 0) - p0 = 1; - - empty_passes = p0 * 3; - z_blk = cblk->npasses - empty_passes; - - if (z_blk <= 0) - return 0; // No passes within this set, continue - - Lcup = cblk->pass_lengths[0]; - Lref = cblk->pass_lengths[1]; - - if (Lcup < 2) { - av_log(s->avctx, AV_LOG_ERROR, - "Cleanup pass length must be at least 2 bytes in length\n"); - return AVERROR_INVALIDDATA; - } - Dcup = cblk->data; - Dref = cblk->data + Lcup; // Dref comes after the refinement segment - S_blk = p0 + cblk->zbp; - pLSB = 30 - S_blk; - - Scup = (Dcup[Lcup - 1] << 4) + (Dcup[Lcup - 2] & 0x0F); - - if (Scup < 2 || Scup > Lcup || Scup > 4079) { - av_log(s->avctx, AV_LOG_ERROR, "Cleanup pass suffix length is invalid %d\n", - Scup); - ret = AVERROR_INVALIDDATA; - goto free; - } - Pcup = Lcup - Scup; - - /* modDcup shall be done before the creation of vlc instance. */ - Dcup[Lcup - 1] = 0xFF; - Dcup[Lcup - 2] |= 0x0F; - - /* Magnitude and refinement */ - jpeg2000_init_zero(&mag_sgn); - jpeg2000_bitbuf_refill_forward(&mag_sgn, Dcup, Pcup); - - /* Significance propagation */ - jpeg2000_init_zero(&sig_prop); - - /* Adaptive run length */ - jpeg2000_init_mel(&mel, Pcup); - - /* Variable Length coding */ - jpeg2000_init_vlc(&vlc, Lcup, Pcup, Dcup); - - jpeg2000_init_mel_decoder(&mel_state); - - sample_buf = av_calloc((width + 4) * (height + 4), sizeof(int32_t)); - block_states = av_calloc((width + 4) * (height + 4), sizeof(uint8_t)); - - if (!sample_buf || !block_states) { - ret = AVERROR(ENOMEM); - goto free; - } - if ((ret = jpeg2000_decode_ht_cleanup_segment(s, cblk, t1, &mel_state, &mel, &vlc, - &mag_sgn, Dcup, Lcup, Pcup, pLSB, width, - height, sample_buf, block_states)) < 0) - goto free; - - if (cblk->npasses > 1) - jpeg2000_decode_sigprop_segment(cblk, width, height, Dref, Lref, - pLSB - 1, sample_buf, block_states); - - if (cblk->npasses > 2) - if ((ret = jpeg2000_decode_magref_segment(cblk, width, height, Dref, Lref, - pLSB - 1, sample_buf, block_states)) < 0) - goto free; - - pLSB = 31 - M_b; - - /* Reconstruct the sample values */ - for (int y = 0; y < height; y++) { - for (int x = 0; x < width; x++) { - n = x + (y * t1->stride); - val = sample_buf[x + (y * width)]; - /* Convert sign-magnitude to two's complement. */ - val = val >> 31 ? 0x80000000 - val : val; - val >>= (pLSB - 1); - t1->data[n] = val; - } - } -free: - av_freep(&sample_buf); - av_freep(&block_states); - return ret; -} - -/** - * CtxVLC tables (see Rec. ITU-T T.800, Annex C) as found at - * https://github.com/osamu620/OpenHTJ2K (author: Osamu Watanabe) - */ -static const uint16_t dec_cxt_vlc_table1[1024] = { - 0x0016, 0x006A, 0x0046, 0x00DD, 0x0086, 0x888B, 0x0026, 0x444D, 0x0016, 0x00AA, 0x0046, 0x88AD, 0x0086, - 0x003A, 0x0026, 0x00DE, 0x0016, 0x00CA, 0x0046, 0x009D, 0x0086, 0x005A, 0x0026, 0x222D, 0x0016, 0x009A, - 0x0046, 0x007D, 0x0086, 0x01FD, 0x0026, 0x007E, 0x0016, 0x006A, 0x0046, 0x88CD, 0x0086, 0x888B, 0x0026, - 0x111D, 0x0016, 0x00AA, 0x0046, 0x005D, 0x0086, 0x003A, 0x0026, 0x00EE, 0x0016, 0x00CA, 0x0046, 0x00BD, - 0x0086, 0x005A, 0x0026, 0x11FF, 0x0016, 0x009A, 0x0046, 0x003D, 0x0086, 0x04ED, 0x0026, 0x2AAF, 0x0016, - 0x006A, 0x0046, 0x00DD, 0x0086, 0x888B, 0x0026, 0x444D, 0x0016, 0x00AA, 0x0046, 0x88AD, 0x0086, 0x003A, - 0x0026, 0x44EF, 0x0016, 0x00CA, 0x0046, 0x009D, 0x0086, 0x005A, 0x0026, 0x222D, 0x0016, 0x009A, 0x0046, - 0x007D, 0x0086, 0x01FD, 0x0026, 0x00BE, 0x0016, 0x006A, 0x0046, 0x88CD, 0x0086, 0x888B, 0x0026, 0x111D, - 0x0016, 0x00AA, 0x0046, 0x005D, 0x0086, 0x003A, 0x0026, 0x4CCF, 0x0016, 0x00CA, 0x0046, 0x00BD, 0x0086, - 0x005A, 0x0026, 0x00FE, 0x0016, 0x009A, 0x0046, 0x003D, 0x0086, 0x04ED, 0x0026, 0x006F, 0x0002, 0x0088, - 0x0002, 0x005C, 0x0002, 0x0018, 0x0002, 0x00DE, 0x0002, 0x0028, 0x0002, 0x009C, 0x0002, 0x004A, 0x0002, - 0x007E, 0x0002, 0x0088, 0x0002, 0x00CC, 0x0002, 0x0018, 0x0002, 0x888F, 0x0002, 0x0028, 0x0002, 0x00FE, - 0x0002, 0x003A, 0x0002, 0x222F, 0x0002, 0x0088, 0x0002, 0x04FD, 0x0002, 0x0018, 0x0002, 0x00BE, 0x0002, - 0x0028, 0x0002, 0x00BF, 0x0002, 0x004A, 0x0002, 0x006E, 0x0002, 0x0088, 0x0002, 0x00AC, 0x0002, 0x0018, - 0x0002, 0x444F, 0x0002, 0x0028, 0x0002, 0x00EE, 0x0002, 0x003A, 0x0002, 0x113F, 0x0002, 0x0088, 0x0002, - 0x005C, 0x0002, 0x0018, 0x0002, 0x00CF, 0x0002, 0x0028, 0x0002, 0x009C, 0x0002, 0x004A, 0x0002, 0x006F, - 0x0002, 0x0088, 0x0002, 0x00CC, 0x0002, 0x0018, 0x0002, 0x009F, 0x0002, 0x0028, 0x0002, 0x00EF, 0x0002, - 0x003A, 0x0002, 0x233F, 0x0002, 0x0088, 0x0002, 0x04FD, 0x0002, 0x0018, 0x0002, 0x00AF, 0x0002, 0x0028, - 0x0002, 0x44FF, 0x0002, 0x004A, 0x0002, 0x005F, 0x0002, 0x0088, 0x0002, 0x00AC, 0x0002, 0x0018, 0x0002, - 0x007F, 0x0002, 0x0028, 0x0002, 0x00DF, 0x0002, 0x003A, 0x0002, 0x111F, 0x0002, 0x0028, 0x0002, 0x005C, - 0x0002, 0x008A, 0x0002, 0x00BF, 0x0002, 0x0018, 0x0002, 0x00FE, 0x0002, 0x00CC, 0x0002, 0x007E, 0x0002, - 0x0028, 0x0002, 0x8FFF, 0x0002, 0x004A, 0x0002, 0x007F, 0x0002, 0x0018, 0x0002, 0x00DF, 0x0002, 0x00AC, - 0x0002, 0x133F, 0x0002, 0x0028, 0x0002, 0x222D, 0x0002, 0x008A, 0x0002, 0x00BE, 0x0002, 0x0018, 0x0002, - 0x44EF, 0x0002, 0x2AAD, 0x0002, 0x006E, 0x0002, 0x0028, 0x0002, 0x15FF, 0x0002, 0x004A, 0x0002, 0x009E, - 0x0002, 0x0018, 0x0002, 0x00CF, 0x0002, 0x003C, 0x0002, 0x223F, 0x0002, 0x0028, 0x0002, 0x005C, 0x0002, - 0x008A, 0x0002, 0x2BBF, 0x0002, 0x0018, 0x0002, 0x04EF, 0x0002, 0x00CC, 0x0002, 0x006F, 0x0002, 0x0028, - 0x0002, 0x27FF, 0x0002, 0x004A, 0x0002, 0x009F, 0x0002, 0x0018, 0x0002, 0x00DE, 0x0002, 0x00AC, 0x0002, - 0x444F, 0x0002, 0x0028, 0x0002, 0x222D, 0x0002, 0x008A, 0x0002, 0x8AAF, 0x0002, 0x0018, 0x0002, 0x00EE, - 0x0002, 0x2AAD, 0x0002, 0x005F, 0x0002, 0x0028, 0x0002, 0x44FF, 0x0002, 0x004A, 0x0002, 0x888F, 0x0002, - 0x0018, 0x0002, 0xAAAF, 0x0002, 0x003C, 0x0002, 0x111F, 0x0004, 0x8FFD, 0x0028, 0x005C, 0x0004, 0x00BC, - 0x008A, 0x66FF, 0x0004, 0x00CD, 0x0018, 0x111D, 0x0004, 0x009C, 0x003A, 0x8AAF, 0x0004, 0x00FC, 0x0028, - 0x133D, 0x0004, 0x00AC, 0x004A, 0x3BBF, 0x0004, 0x2BBD, 0x0018, 0x5FFF, 0x0004, 0x006C, 0x157D, 0x455F, - 0x0004, 0x2FFD, 0x0028, 0x222D, 0x0004, 0x22AD, 0x008A, 0x44EF, 0x0004, 0x00CC, 0x0018, 0x4FFF, 0x0004, - 0x007C, 0x003A, 0x447F, 0x0004, 0x04DD, 0x0028, 0x233D, 0x0004, 0x009D, 0x004A, 0x00DE, 0x0004, 0x88BD, - 0x0018, 0xAFFF, 0x0004, 0x115D, 0x1FFD, 0x444F, 0x0004, 0x8FFD, 0x0028, 0x005C, 0x0004, 0x00BC, 0x008A, - 0x8CEF, 0x0004, 0x00CD, 0x0018, 0x111D, 0x0004, 0x009C, 0x003A, 0x888F, 0x0004, 0x00FC, 0x0028, 0x133D, - 0x0004, 0x00AC, 0x004A, 0x44DF, 0x0004, 0x2BBD, 0x0018, 0x8AFF, 0x0004, 0x006C, 0x157D, 0x006F, 0x0004, - 0x2FFD, 0x0028, 0x222D, 0x0004, 0x22AD, 0x008A, 0x00EE, 0x0004, 0x00CC, 0x0018, 0x2EEF, 0x0004, 0x007C, - 0x003A, 0x277F, 0x0004, 0x04DD, 0x0028, 0x233D, 0x0004, 0x009D, 0x004A, 0x1BBF, 0x0004, 0x88BD, 0x0018, - 0x37FF, 0x0004, 0x115D, 0x1FFD, 0x333F, 0x0002, 0x0088, 0x0002, 0x02ED, 0x0002, 0x00CA, 0x0002, 0x4CCF, - 0x0002, 0x0048, 0x0002, 0x23FF, 0x0002, 0x001A, 0x0002, 0x888F, 0x0002, 0x0088, 0x0002, 0x006C, 0x0002, - 0x002A, 0x0002, 0x00AF, 0x0002, 0x0048, 0x0002, 0x22EF, 0x0002, 0x00AC, 0x0002, 0x005F, 0x0002, 0x0088, - 0x0002, 0x444D, 0x0002, 0x00CA, 0x0002, 0xCCCF, 0x0002, 0x0048, 0x0002, 0x00FE, 0x0002, 0x001A, 0x0002, - 0x006F, 0x0002, 0x0088, 0x0002, 0x005C, 0x0002, 0x002A, 0x0002, 0x009F, 0x0002, 0x0048, 0x0002, 0x00DF, - 0x0002, 0x03FD, 0x0002, 0x222F, 0x0002, 0x0088, 0x0002, 0x02ED, 0x0002, 0x00CA, 0x0002, 0x8CCF, 0x0002, - 0x0048, 0x0002, 0x11FF, 0x0002, 0x001A, 0x0002, 0x007E, 0x0002, 0x0088, 0x0002, 0x006C, 0x0002, 0x002A, - 0x0002, 0x007F, 0x0002, 0x0048, 0x0002, 0x00EE, 0x0002, 0x00AC, 0x0002, 0x003E, 0x0002, 0x0088, 0x0002, - 0x444D, 0x0002, 0x00CA, 0x0002, 0x00BE, 0x0002, 0x0048, 0x0002, 0x00BF, 0x0002, 0x001A, 0x0002, 0x003F, - 0x0002, 0x0088, 0x0002, 0x005C, 0x0002, 0x002A, 0x0002, 0x009E, 0x0002, 0x0048, 0x0002, 0x00DE, 0x0002, - 0x03FD, 0x0002, 0x111F, 0x0004, 0x8AED, 0x0048, 0x888D, 0x0004, 0x00DC, 0x00CA, 0x3FFF, 0x0004, 0xCFFD, - 0x002A, 0x003D, 0x0004, 0x00BC, 0x005A, 0x8DDF, 0x0004, 0x8FFD, 0x0048, 0x006C, 0x0004, 0x027D, 0x008A, - 0x99FF, 0x0004, 0x00EC, 0x00FA, 0x003C, 0x0004, 0x00AC, 0x001A, 0x009F, 0x0004, 0x2FFD, 0x0048, 0x007C, - 0x0004, 0x44CD, 0x00CA, 0x67FF, 0x0004, 0x1FFD, 0x002A, 0x444D, 0x0004, 0x00AD, 0x005A, 0x8CCF, 0x0004, - 0x4FFD, 0x0048, 0x445D, 0x0004, 0x01BD, 0x008A, 0x4EEF, 0x0004, 0x45DD, 0x00FA, 0x111D, 0x0004, 0x009C, - 0x001A, 0x222F, 0x0004, 0x8AED, 0x0048, 0x888D, 0x0004, 0x00DC, 0x00CA, 0xAFFF, 0x0004, 0xCFFD, 0x002A, - 0x003D, 0x0004, 0x00BC, 0x005A, 0x11BF, 0x0004, 0x8FFD, 0x0048, 0x006C, 0x0004, 0x027D, 0x008A, 0x22EF, - 0x0004, 0x00EC, 0x00FA, 0x003C, 0x0004, 0x00AC, 0x001A, 0x227F, 0x0004, 0x2FFD, 0x0048, 0x007C, 0x0004, - 0x44CD, 0x00CA, 0x5DFF, 0x0004, 0x1FFD, 0x002A, 0x444D, 0x0004, 0x00AD, 0x005A, 0x006F, 0x0004, 0x4FFD, - 0x0048, 0x445D, 0x0004, 0x01BD, 0x008A, 0x11DF, 0x0004, 0x45DD, 0x00FA, 0x111D, 0x0004, 0x009C, 0x001A, - 0x155F, 0x0006, 0x00FC, 0x0018, 0x111D, 0x0048, 0x888D, 0x00AA, 0x4DDF, 0x0006, 0x2AAD, 0x005A, 0x67FF, - 0x0028, 0x223D, 0x00BC, 0xAAAF, 0x0006, 0x00EC, 0x0018, 0x5FFF, 0x0048, 0x006C, 0x008A, 0xCCCF, 0x0006, - 0x009D, 0x00CA, 0x44EF, 0x0028, 0x003C, 0x8FFD, 0x137F, 0x0006, 0x8EED, 0x0018, 0x1FFF, 0x0048, 0x007C, - 0x00AA, 0x4CCF, 0x0006, 0x227D, 0x005A, 0x1DDF, 0x0028, 0x444D, 0x4FFD, 0x155F, 0x0006, 0x00DC, 0x0018, - 0x2EEF, 0x0048, 0x445D, 0x008A, 0x22BF, 0x0006, 0x009C, 0x00CA, 0x8CDF, 0x0028, 0x222D, 0x2FFD, 0x226F, - 0x0006, 0x00FC, 0x0018, 0x111D, 0x0048, 0x888D, 0x00AA, 0x1BBF, 0x0006, 0x2AAD, 0x005A, 0x33FF, 0x0028, - 0x223D, 0x00BC, 0x8AAF, 0x0006, 0x00EC, 0x0018, 0x9BFF, 0x0048, 0x006C, 0x008A, 0x8ABF, 0x0006, 0x009D, - 0x00CA, 0x4EEF, 0x0028, 0x003C, 0x8FFD, 0x466F, 0x0006, 0x8EED, 0x0018, 0xCFFF, 0x0048, 0x007C, 0x00AA, - 0x8CCF, 0x0006, 0x227D, 0x005A, 0xAEEF, 0x0028, 0x444D, 0x4FFD, 0x477F, 0x0006, 0x00DC, 0x0018, 0xAFFF, - 0x0048, 0x445D, 0x008A, 0x2BBF, 0x0006, 0x009C, 0x00CA, 0x44DF, 0x0028, 0x222D, 0x2FFD, 0x133F, 0x00F6, - 0xAFFD, 0x1FFB, 0x003C, 0x0008, 0x23BD, 0x007A, 0x11DF, 0x00F6, 0x45DD, 0x2FFB, 0x4EEF, 0x00DA, 0x177D, - 0xCFFD, 0x377F, 0x00F6, 0x3FFD, 0x8FFB, 0x111D, 0x0008, 0x009C, 0x005A, 0x1BBF, 0x00F6, 0x00CD, 0x00BA, - 0x8DDF, 0x4FFB, 0x006C, 0x9BFD, 0x455F, 0x00F6, 0x67FD, 0x1FFB, 0x002C, 0x0008, 0x00AC, 0x007A, 0x009F, - 0x00F6, 0x00AD, 0x2FFB, 0x7FFF, 0x00DA, 0x004C, 0x5FFD, 0x477F, 0x00F6, 0x00EC, 0x8FFB, 0x001C, 0x0008, - 0x008C, 0x005A, 0x888F, 0x00F6, 0x00CC, 0x00BA, 0x2EEF, 0x4FFB, 0x115D, 0x8AED, 0x113F, 0x00F6, 0xAFFD, - 0x1FFB, 0x003C, 0x0008, 0x23BD, 0x007A, 0x1DDF, 0x00F6, 0x45DD, 0x2FFB, 0xBFFF, 0x00DA, 0x177D, 0xCFFD, - 0x447F, 0x00F6, 0x3FFD, 0x8FFB, 0x111D, 0x0008, 0x009C, 0x005A, 0x277F, 0x00F6, 0x00CD, 0x00BA, 0x22EF, - 0x4FFB, 0x006C, 0x9BFD, 0x444F, 0x00F6, 0x67FD, 0x1FFB, 0x002C, 0x0008, 0x00AC, 0x007A, 0x11BF, 0x00F6, - 0x00AD, 0x2FFB, 0xFFFF, 0x00DA, 0x004C, 0x5FFD, 0x233F, 0x00F6, 0x00EC, 0x8FFB, 0x001C, 0x0008, 0x008C, - 0x005A, 0x006F, 0x00F6, 0x00CC, 0x00BA, 0x8BBF, 0x4FFB, 0x115D, 0x8AED, 0x222F}; - -static const uint16_t dec_cxt_vlc_table0[1024] = { - 0x0026, 0x00AA, 0x0046, 0x006C, 0x0086, 0x8AED, 0x0018, 0x8DDF, 0x0026, 0x01BD, 0x0046, 0x5FFF, 0x0086, - 0x027D, 0x005A, 0x155F, 0x0026, 0x003A, 0x0046, 0x444D, 0x0086, 0x4CCD, 0x0018, 0xCCCF, 0x0026, 0x2EFD, - 0x0046, 0x99FF, 0x0086, 0x009C, 0x00CA, 0x133F, 0x0026, 0x00AA, 0x0046, 0x445D, 0x0086, 0x8CCD, 0x0018, - 0x11DF, 0x0026, 0x4FFD, 0x0046, 0xCFFF, 0x0086, 0x009D, 0x005A, 0x007E, 0x0026, 0x003A, 0x0046, 0x1FFF, - 0x0086, 0x88AD, 0x0018, 0x00BE, 0x0026, 0x8FFD, 0x0046, 0x4EEF, 0x0086, 0x888D, 0x00CA, 0x111F, 0x0026, - 0x00AA, 0x0046, 0x006C, 0x0086, 0x8AED, 0x0018, 0x45DF, 0x0026, 0x01BD, 0x0046, 0x22EF, 0x0086, 0x027D, - 0x005A, 0x227F, 0x0026, 0x003A, 0x0046, 0x444D, 0x0086, 0x4CCD, 0x0018, 0x11BF, 0x0026, 0x2EFD, 0x0046, - 0x00FE, 0x0086, 0x009C, 0x00CA, 0x223F, 0x0026, 0x00AA, 0x0046, 0x445D, 0x0086, 0x8CCD, 0x0018, 0x00DE, - 0x0026, 0x4FFD, 0x0046, 0xABFF, 0x0086, 0x009D, 0x005A, 0x006F, 0x0026, 0x003A, 0x0046, 0x6EFF, 0x0086, - 0x88AD, 0x0018, 0x2AAF, 0x0026, 0x8FFD, 0x0046, 0x00EE, 0x0086, 0x888D, 0x00CA, 0x222F, 0x0004, 0x00CA, - 0x0088, 0x027D, 0x0004, 0x4CCD, 0x0028, 0x00FE, 0x0004, 0x2AFD, 0x0048, 0x005C, 0x0004, 0x009D, 0x0018, - 0x00DE, 0x0004, 0x01BD, 0x0088, 0x006C, 0x0004, 0x88AD, 0x0028, 0x11DF, 0x0004, 0x8AED, 0x0048, 0x003C, - 0x0004, 0x888D, 0x0018, 0x111F, 0x0004, 0x00CA, 0x0088, 0x006D, 0x0004, 0x88CD, 0x0028, 0x88FF, 0x0004, - 0x8BFD, 0x0048, 0x444D, 0x0004, 0x009C, 0x0018, 0x00BE, 0x0004, 0x4EFD, 0x0088, 0x445D, 0x0004, 0x00AC, - 0x0028, 0x00EE, 0x0004, 0x45DD, 0x0048, 0x222D, 0x0004, 0x003D, 0x0018, 0x007E, 0x0004, 0x00CA, 0x0088, - 0x027D, 0x0004, 0x4CCD, 0x0028, 0x1FFF, 0x0004, 0x2AFD, 0x0048, 0x005C, 0x0004, 0x009D, 0x0018, 0x11BF, - 0x0004, 0x01BD, 0x0088, 0x006C, 0x0004, 0x88AD, 0x0028, 0x22EF, 0x0004, 0x8AED, 0x0048, 0x003C, 0x0004, - 0x888D, 0x0018, 0x227F, 0x0004, 0x00CA, 0x0088, 0x006D, 0x0004, 0x88CD, 0x0028, 0x4EEF, 0x0004, 0x8BFD, - 0x0048, 0x444D, 0x0004, 0x009C, 0x0018, 0x2AAF, 0x0004, 0x4EFD, 0x0088, 0x445D, 0x0004, 0x00AC, 0x0028, - 0x8DDF, 0x0004, 0x45DD, 0x0048, 0x222D, 0x0004, 0x003D, 0x0018, 0x155F, 0x0004, 0x005A, 0x0088, 0x006C, - 0x0004, 0x88DD, 0x0028, 0x23FF, 0x0004, 0x11FD, 0x0048, 0x444D, 0x0004, 0x00AD, 0x0018, 0x00BE, 0x0004, - 0x137D, 0x0088, 0x155D, 0x0004, 0x00CC, 0x0028, 0x00DE, 0x0004, 0x02ED, 0x0048, 0x111D, 0x0004, 0x009D, - 0x0018, 0x007E, 0x0004, 0x005A, 0x0088, 0x455D, 0x0004, 0x44CD, 0x0028, 0x00EE, 0x0004, 0x1FFD, 0x0048, - 0x003C, 0x0004, 0x00AC, 0x0018, 0x555F, 0x0004, 0x47FD, 0x0088, 0x113D, 0x0004, 0x02BD, 0x0028, 0x477F, - 0x0004, 0x4CDD, 0x0048, 0x8FFF, 0x0004, 0x009C, 0x0018, 0x222F, 0x0004, 0x005A, 0x0088, 0x006C, 0x0004, - 0x88DD, 0x0028, 0x00FE, 0x0004, 0x11FD, 0x0048, 0x444D, 0x0004, 0x00AD, 0x0018, 0x888F, 0x0004, 0x137D, - 0x0088, 0x155D, 0x0004, 0x00CC, 0x0028, 0x8CCF, 0x0004, 0x02ED, 0x0048, 0x111D, 0x0004, 0x009D, 0x0018, - 0x006F, 0x0004, 0x005A, 0x0088, 0x455D, 0x0004, 0x44CD, 0x0028, 0x1DDF, 0x0004, 0x1FFD, 0x0048, 0x003C, - 0x0004, 0x00AC, 0x0018, 0x227F, 0x0004, 0x47FD, 0x0088, 0x113D, 0x0004, 0x02BD, 0x0028, 0x22BF, 0x0004, - 0x4CDD, 0x0048, 0x22EF, 0x0004, 0x009C, 0x0018, 0x233F, 0x0006, 0x4DDD, 0x4FFB, 0xCFFF, 0x0018, 0x113D, - 0x005A, 0x888F, 0x0006, 0x23BD, 0x008A, 0x00EE, 0x002A, 0x155D, 0xAAFD, 0x277F, 0x0006, 0x44CD, 0x8FFB, - 0x44EF, 0x0018, 0x467D, 0x004A, 0x2AAF, 0x0006, 0x00AC, 0x555B, 0x99DF, 0x1FFB, 0x003C, 0x5FFD, 0x266F, - 0x0006, 0x1DDD, 0x4FFB, 0x6EFF, 0x0018, 0x177D, 0x005A, 0x1BBF, 0x0006, 0x88AD, 0x008A, 0x5DDF, 0x002A, - 0x444D, 0x2FFD, 0x667F, 0x0006, 0x00CC, 0x8FFB, 0x2EEF, 0x0018, 0x455D, 0x004A, 0x119F, 0x0006, 0x009C, - 0x555B, 0x8CCF, 0x1FFB, 0x111D, 0x8CED, 0x006E, 0x0006, 0x4DDD, 0x4FFB, 0x3FFF, 0x0018, 0x113D, 0x005A, - 0x11BF, 0x0006, 0x23BD, 0x008A, 0x8DDF, 0x002A, 0x155D, 0xAAFD, 0x222F, 0x0006, 0x44CD, 0x8FFB, 0x00FE, - 0x0018, 0x467D, 0x004A, 0x899F, 0x0006, 0x00AC, 0x555B, 0x00DE, 0x1FFB, 0x003C, 0x5FFD, 0x446F, 0x0006, - 0x1DDD, 0x4FFB, 0x9BFF, 0x0018, 0x177D, 0x005A, 0x00BE, 0x0006, 0x88AD, 0x008A, 0xCDDF, 0x002A, 0x444D, - 0x2FFD, 0x007E, 0x0006, 0x00CC, 0x8FFB, 0x4EEF, 0x0018, 0x455D, 0x004A, 0x377F, 0x0006, 0x009C, 0x555B, - 0x8BBF, 0x1FFB, 0x111D, 0x8CED, 0x233F, 0x0004, 0x00AA, 0x0088, 0x047D, 0x0004, 0x01DD, 0x0028, 0x11DF, - 0x0004, 0x27FD, 0x0048, 0x005C, 0x0004, 0x8AAD, 0x0018, 0x2BBF, 0x0004, 0x009C, 0x0088, 0x006C, 0x0004, - 0x00CC, 0x0028, 0x00EE, 0x0004, 0x8CED, 0x0048, 0x222D, 0x0004, 0x888D, 0x0018, 0x007E, 0x0004, 0x00AA, - 0x0088, 0x006D, 0x0004, 0x88CD, 0x0028, 0x00FE, 0x0004, 0x19FD, 0x0048, 0x003C, 0x0004, 0x2AAD, 0x0018, - 0xAAAF, 0x0004, 0x8BFD, 0x0088, 0x005D, 0x0004, 0x00BD, 0x0028, 0x4CCF, 0x0004, 0x44ED, 0x0048, 0x4FFF, - 0x0004, 0x223D, 0x0018, 0x111F, 0x0004, 0x00AA, 0x0088, 0x047D, 0x0004, 0x01DD, 0x0028, 0x99FF, 0x0004, - 0x27FD, 0x0048, 0x005C, 0x0004, 0x8AAD, 0x0018, 0x00BE, 0x0004, 0x009C, 0x0088, 0x006C, 0x0004, 0x00CC, - 0x0028, 0x00DE, 0x0004, 0x8CED, 0x0048, 0x222D, 0x0004, 0x888D, 0x0018, 0x444F, 0x0004, 0x00AA, 0x0088, - 0x006D, 0x0004, 0x88CD, 0x0028, 0x2EEF, 0x0004, 0x19FD, 0x0048, 0x003C, 0x0004, 0x2AAD, 0x0018, 0x447F, - 0x0004, 0x8BFD, 0x0088, 0x005D, 0x0004, 0x00BD, 0x0028, 0x009F, 0x0004, 0x44ED, 0x0048, 0x67FF, 0x0004, - 0x223D, 0x0018, 0x133F, 0x0006, 0x00CC, 0x008A, 0x9DFF, 0x2FFB, 0x467D, 0x1FFD, 0x99BF, 0x0006, 0x2AAD, - 0x002A, 0x66EF, 0x4FFB, 0x005C, 0x2EED, 0x377F, 0x0006, 0x89BD, 0x004A, 0x00FE, 0x8FFB, 0x006C, 0x67FD, - 0x889F, 0x0006, 0x888D, 0x001A, 0x5DDF, 0x00AA, 0x222D, 0x89DD, 0x444F, 0x0006, 0x2BBD, 0x008A, 0xCFFF, - 0x2FFB, 0x226D, 0x009C, 0x00BE, 0x0006, 0xAAAD, 0x002A, 0x1DDF, 0x4FFB, 0x003C, 0x4DDD, 0x466F, 0x0006, - 0x8AAD, 0x004A, 0xAEEF, 0x8FFB, 0x445D, 0x8EED, 0x177F, 0x0006, 0x233D, 0x001A, 0x4CCF, 0x00AA, 0xAFFF, - 0x88CD, 0x133F, 0x0006, 0x00CC, 0x008A, 0x77FF, 0x2FFB, 0x467D, 0x1FFD, 0x3BBF, 0x0006, 0x2AAD, 0x002A, - 0x00EE, 0x4FFB, 0x005C, 0x2EED, 0x007E, 0x0006, 0x89BD, 0x004A, 0x4EEF, 0x8FFB, 0x006C, 0x67FD, 0x667F, - 0x0006, 0x888D, 0x001A, 0x00DE, 0x00AA, 0x222D, 0x89DD, 0x333F, 0x0006, 0x2BBD, 0x008A, 0x57FF, 0x2FFB, - 0x226D, 0x009C, 0x199F, 0x0006, 0xAAAD, 0x002A, 0x99DF, 0x4FFB, 0x003C, 0x4DDD, 0x155F, 0x0006, 0x8AAD, - 0x004A, 0xCEEF, 0x8FFB, 0x445D, 0x8EED, 0x277F, 0x0006, 0x233D, 0x001A, 0x1BBF, 0x00AA, 0x3FFF, 0x88CD, - 0x111F, 0x0006, 0x45DD, 0x2FFB, 0x111D, 0x0018, 0x467D, 0x8FFD, 0xCCCF, 0x0006, 0x19BD, 0x004A, 0x22EF, - 0x002A, 0x222D, 0x3FFD, 0x888F, 0x0006, 0x00CC, 0x008A, 0x00FE, 0x0018, 0x115D, 0xCFFD, 0x8AAF, 0x0006, - 0x00AC, 0x003A, 0x8CDF, 0x1FFB, 0x133D, 0x66FD, 0x466F, 0x0006, 0x8CCD, 0x2FFB, 0x5FFF, 0x0018, 0x006C, - 0x4FFD, 0xABBF, 0x0006, 0x22AD, 0x004A, 0x00EE, 0x002A, 0x233D, 0xAEFD, 0x377F, 0x0006, 0x2BBD, 0x008A, - 0x55DF, 0x0018, 0x005C, 0x177D, 0x119F, 0x0006, 0x009C, 0x003A, 0x4CCF, 0x1FFB, 0x333D, 0x8EED, 0x444F, - 0x0006, 0x45DD, 0x2FFB, 0x111D, 0x0018, 0x467D, 0x8FFD, 0x99BF, 0x0006, 0x19BD, 0x004A, 0x2EEF, 0x002A, - 0x222D, 0x3FFD, 0x667F, 0x0006, 0x00CC, 0x008A, 0x4EEF, 0x0018, 0x115D, 0xCFFD, 0x899F, 0x0006, 0x00AC, - 0x003A, 0x00DE, 0x1FFB, 0x133D, 0x66FD, 0x226F, 0x0006, 0x8CCD, 0x2FFB, 0x9BFF, 0x0018, 0x006C, 0x4FFD, - 0x00BE, 0x0006, 0x22AD, 0x004A, 0x1DDF, 0x002A, 0x233D, 0xAEFD, 0x007E, 0x0006, 0x2BBD, 0x008A, 0xCEEF, - 0x0018, 0x005C, 0x177D, 0x277F, 0x0006, 0x009C, 0x003A, 0x8BBF, 0x1FFB, 0x333D, 0x8EED, 0x455F, 0x1FF9, - 0x1DDD, 0xAFFB, 0x00DE, 0x8FF9, 0x001C, 0xFFFB, 0x477F, 0x4FF9, 0x177D, 0x3FFB, 0x3BBF, 0x2FF9, 0xAEEF, - 0x8EED, 0x444F, 0x1FF9, 0x22AD, 0x000A, 0x8BBF, 0x8FF9, 0x00FE, 0xCFFD, 0x007E, 0x4FF9, 0x115D, 0x5FFB, - 0x577F, 0x2FF9, 0x8DDF, 0x2EED, 0x333F, 0x1FF9, 0x2BBD, 0xAFFB, 0x88CF, 0x8FF9, 0xBFFF, 0xFFFB, 0x377F, - 0x4FF9, 0x006D, 0x3FFB, 0x00BE, 0x2FF9, 0x66EF, 0x9FFD, 0x133F, 0x1FF9, 0x009D, 0x000A, 0xABBF, 0x8FF9, - 0xDFFF, 0x6FFD, 0x006E, 0x4FF9, 0x002C, 0x5FFB, 0x888F, 0x2FF9, 0xCDDF, 0x4DDD, 0x222F, 0x1FF9, 0x1DDD, - 0xAFFB, 0x4CCF, 0x8FF9, 0x001C, 0xFFFB, 0x277F, 0x4FF9, 0x177D, 0x3FFB, 0x99BF, 0x2FF9, 0xCEEF, 0x8EED, - 0x004E, 0x1FF9, 0x22AD, 0x000A, 0x00AE, 0x8FF9, 0x7FFF, 0xCFFD, 0x005E, 0x4FF9, 0x115D, 0x5FFB, 0x009E, - 0x2FF9, 0x5DDF, 0x2EED, 0x003E, 0x1FF9, 0x2BBD, 0xAFFB, 0x00CE, 0x8FF9, 0xEFFF, 0xFFFB, 0x667F, 0x4FF9, - 0x006D, 0x3FFB, 0x8AAF, 0x2FF9, 0x00EE, 0x9FFD, 0x233F, 0x1FF9, 0x009D, 0x000A, 0x1BBF, 0x8FF9, 0x4EEF, - 0x6FFD, 0x455F, 0x4FF9, 0x002C, 0x5FFB, 0x008E, 0x2FF9, 0x99DF, 0x4DDD, 0x111F}; diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download Yeager Hunter Legend - A Stunning Monster Hunting Game with Next-Gen Graphics.md b/spaces/congsaPfin/Manga-OCR/logs/Download Yeager Hunter Legend - A Stunning Monster Hunting Game with Next-Gen Graphics.md deleted file mode 100644 index 9132472d604db18937c34b0656a54cff24cf590f..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Download Yeager Hunter Legend - A Stunning Monster Hunting Game with Next-Gen Graphics.md +++ /dev/null @@ -1,208 +0,0 @@ -
        -

        Download Game Yeager Hunter Legend: A Guide for Beginners

        -

        If you are looking for a thrilling action role-playing game that lets you hunt monsters, explore an alien world, and customize your character, then you should check out Yeager Hunter Legend. This game is developed by IGG.COM, the same company behind popular titles like Lords Mobile and Castle Clash. In this game, you play as Yeager, an elite hunter from the Vyderan clan who is sent to retrieve a stolen relic from the mysterious planet of Ekors. Along the way, you will encounter ferocious beasts, ancient civilizations, and dark secrets.

        -

        Yeager Hunter Legend is available for Android, iOS, and PC devices. You can download it for free from the Google Play Store, App Store, or BlueStacks emulator. However, before you do that, you might want to read this guide first. In this article, we will show you how to download and install the game on your device, how to choose your weapon and customize your character, how to hunt monsters and explore the world of Ekors, how to play with other players and join alliances, and some tips and tricks to get stronger and have more fun. Let's get started!

        -

        download game yeager hunter legend


        Downloadhttps://urlca.com/2uO7Z4



        -

        How to Download and Install Yeager Hunter Legend on Your Device

        -

        Depending on what device you are using, there are different steps you need to follow to download and install Yeager Hunter Legend. Here are the instructions for each platform:

        -

        Android

        -

        If you have an Android device, you can download Yeager Hunter Legend from the Google Play Store. Here are the steps:

        -
          -
        1. Open the Google Play Store app on your device.
        2. -
        3. Search for "Yeager Hunter Legend" in the search bar.
        4. -
        5. Tap on the game icon from the results.
        6. -
        7. Tap on "Install" to start downloading the game.
        8. -
        9. Wait for the download to finish. The game will automatically install on your device.
        10. -
        11. Tap on "Open" or find the game icon on your home screen or app drawer.
        12. -
        13. Enjoy playing Yeager Hunter Legend!
        14. -
        -

        Before you download the game, make sure you have enough storage space on your device. The game requires about 1.5 GB of free space to run smoothly. You also need to have a stable internet connection to play the game online. The game is compatible with Android devices running version 4.4 or higher. The recommended system requirements are 3 GB of RAM and a quad-core processor.

        -

        iOS

        -

        If you have an iOS device, you can download Yeager Hunter Legend from the App Store. Here are the steps:

        -
          -
        1. Open the App Store app on your device.
        2. -
        3. Search for "Yeager Hunter Legend" in the search bar.
        4. -
        5. Tap on the game icon from the results.
        6. -
        7. Tap on "Get" to start downloading the game.
        8. -
        9. Enter your Apple ID password or use Touch ID or Face ID to confirm the download.
        10. -
        11. Wait for the download to finish. The game will automatically install on your device.
        12. -
        13. Tap on "Open" or find the game icon on your home screen or app library.
        14. -
        15. Enjoy playing Yeager Hunter Legend!
        16. -
        -

        Before you download the game, make sure you have enough storage space on your device. The game requires about 1.6 GB of free space to run smoothly. You also need to have a stable internet connection to play the game online. The game is compatible with iOS devices running version 10.0 or higher. The recommended system requirements are iPhone 7 or newer, iPad Air 2 or newer, or iPod touch 7th generation or newer.

        -

        PC

        -

        If you have a PC, you can download Yeager Hunter Legend from the BlueStacks emulator. BlueStacks is a software that allows you to play Android games on your PC. Here are the steps:

        -
          -
        1. Download and install BlueStacks from its official website: https://www.bluestacks.com/
        2. -
        3. Open BlueStacks and sign in with your Google account.
        4. -
        5. Search for "Yeager Hunter Legend" in the search bar.
        6. -
        7. Click on the game icon from the results.
        8. -
        9. Click on "Install" to start downloading the game.
        10. -
        11. Wait for the download to finish. The game will automatically install on your PC.
        12. -
        13. Click on "Open" or find the game icon on your BlueStacks home screen.
        14. -
        15. Enjoy playing Yeager Hunter Legend!
        16. -
        -

        Before you download the game, make sure you have enough storage space on your PC. The game requires about 2 GB of free space to run smoothly. You also need to have a stable internet connection to play the game online. The game is compatible with Windows 7 or higher and Mac OS X 10.11 or higher. The recommended system requirements are 4 GB of RAM and an Intel or AMD processor.

        -

        How to download Yeager: Hunter Legend on Android
        -Yeager: Hunter Legend best weapon class and skills
        -Yeager: Hunter Legend review and gameplay tips
        -Yeager: Hunter Legend APK download for free
        -Yeager: Hunter Legend monster hunting guide and strategies
        -Yeager: Hunter Legend forge and upgrade equipment tutorial
        -Yeager: Hunter Legend secrets and hidden locations
        -Yeager: Hunter Legend Kallarite and Crystal hack
        -Yeager: Hunter Legend iOS download and installation
        -Yeager: Hunter Legend ancient seals and sigils guide
        -Yeager: Hunter Legend discord server and community
        -Yeager: Hunter Legend multiplayer mode and alliance features
        -Yeager: Hunter Legend cinematic trailer and story
        -Yeager: Hunter Legend comparison with other RPG games
        -Yeager: Hunter Legend cheats and codes
        -Yeager: Hunter Legend mod APK download and features
        -Yeager: Hunter Legend PC version and emulator
        -Yeager: Hunter Legend best beasts to hunt and materials to farm
        -Yeager: Hunter Legend subscription details and benefits
        -Yeager: Hunter Legend latest update and patch notes
        -Yeager: Hunter Legend official Facebook page and news
        -Yeager: Hunter Legend system requirements and compatibility
        -Yeager: Hunter Legend weapons school and movesets guide
        -Yeager: Hunter Legend planet Ekors map and exploration tips
        -Yeager: Hunter Legend graphics settings and performance optimization
        -Yeager: Hunter Legend customer service and support
        -Yeager: Hunter Legend fan art and wallpapers
        -Yeager: Hunter Legend lore and backstory
        -Yeager: Hunter Legend achievements and rewards
        -Yeager: Hunter Legend Easter eggs and references
        -Yeager: Hunter Legend bugs and glitches report
        -Yeager: Hunter Legend voice actors and motion capture technology
        -Yeager: Hunter Legend Vyderan clan and Empire history
        -Yeager: Hunter Legend character customization and appearance options
        -Yeager: Hunter Legend data privacy and security practices
        -Yeager: Hunter Legend ratings and reviews from users
        -Yeager: Hunter Legend in-app purchases and prices
        -Yeager: Hunter Legend screenshots and videos
        -Yeager: Hunter Legend future plans and development roadmap
        -Yeager: Hunter Legend FAQs and common issues

        -

        How to Choose Your Weapon and Customize Your Character

        -

        After you download and install Yeager Hunter Legend, you can start creating your own hunter character and choose your weapon class. There are five weapon classes in the game, each with its own advantages and disadvantages. You can also customize your character's appearance, name, and gender. Here are some details about each weapon class and how to customize your character:

        -

        Weapon Classes

        -

        The five weapon classes in Yeager Hunter Legend are Hunting Sword, Force Hammer, Fury Blades, Flux Blaster, and Eidolon Spear. Each weapon class has its own moveset, abilities, and sigils that affect your combat style and performance. Here is a brief overview of each weapon class:

        - - - - - - -
        Weapon ClassDescription
        Hunting SwordA balanced weapon that can deal quick slashes and powerful thrusts. It has a high mobility and can use sigils to enhance its attacks or heal itself.
        Force HammerA heavy weapon that can deal massive damage and stun enemies with its swings and slams. It has a low mobility but can use sigils to increase its defense or unleash shockwaves.
        Fury BladesA dual-wielding weapon that can deal rapid strikes and combos with its blades. It has a medium mobility and can use sigils to boost its speed or unleash elemental attacks.
        Flux BlasterA ranged weapon that can deal precise shots and explosive blasts with its gun. It has a medium mobility and can use sigils to switch between different ammo types or activate special modes.Eidolon SpearA versatile weapon that can deal swift stabs and wide sweeps with its spear. It has a high mobility and can use sigils to summon an eidolon that fights alongside you or transforms your weapon.
        -

        You can switch between different weapon classes at any time in the game. You can also learn different weapon schools that give you access to different moves and abilities for each weapon class. You can unlock new weapon schools by completing quests, hunting monsters, or buying them with in-game currency.

        -

        Character Creation

        -

        After you choose your weapon class, you can customize your character's appearance, name, and gender. You can choose from various options for your character's face, hair, eyes, skin, and outfit. You can also enter a name for your character and select a male or female voice. You can change your character's appearance and name at any time in the game by visiting the barber shop or the name changer in the main hub.

        -

        Equipment

        -

        As you progress in the game, you will be able to forge, upgrade, and equip different gear for your character. Gear includes weapons, armor, accessories, and sigils. Gear can improve your character's stats, such as attack, defense, speed, and health. Gear can also have special effects, such as elemental damage, critical chance, or resistance.

        -

        You can forge new gear by using Kallar-infused beast parts that you obtain from hunting monsters. You can upgrade your gear by using more beast parts or other materials. You can equip up to four pieces of gear at a time: one weapon, one armor, one accessory, and one sigil. You can also have multiple sets of gear that you can switch between depending on the situation.

        -

        Ancient Seals

        -

        Ancient seals are artifacts that grant you hunting skills and bonuses. Hunting skills are special abilities that you can use in combat, such as healing, buffing, debuffing, or attacking. Bonuses are passive effects that enhance your stats or performance, such as increased damage, reduced cooldowns, or extra rewards.

        -

        You can equip up to three ancient seals at a time: one primary seal, one secondary seal, and one tertiary seal. The primary seal determines your hunting skill and its level. The secondary seal determines the bonus effect and its level. The tertiary seal determines the sigil effect and its level.

        -

        You can obtain ancient seals by completing quests, hunting monsters, or buying them with in-game currency. You can also fuse two ancient seals of the same type to create a new one with a higher level or a different effect.

        -

        How to Hunt Monsters and Explore the World of Ekors

        -

        One of the main features of Yeager Hunter Legend is hunting monsters and exploring the world of Ekors. Ekors is a planet full of diverse environments, such as forests, canyons, deserts, volcanoes, and ruins. Each environment has its own monsters, secrets, and challenges.

        -

        To hunt monsters and explore Ekors, you need to accept quests from the quest board or NPCs in the main hub. Quests will assign you a specific monster to hunt or a specific area to explore. You can also choose to hunt or explore freely without any quest objectives.

        -

        Once you accept a quest or choose a destination, you will be transported to the hunting grounds or the exploration zone. There you will encounter various monsters that you can fight or avoid. You will also find various items that you can collect or interact with.

        -

        Combat System

        -

        The combat system in Yeager Hunter Legend is fast-paced and dynamic. You can use various moves and abilities to attack enemies or defend yourself. You can also use combos to chain your attacks and deal more damage. You can also use sigils to activate special effects or modes that enhance your combat abilities.

        -

        The combat system is different for each weapon class and weapon school. You need to learn the moves and abilities of your weapon and how to use them effectively. You also need to pay attention to your stamina, health, and sigil gauges. Stamina is used for performing moves and abilities. Health is your life force that decreases when you take damage. Sigil is your energy that allows you to use sigils.

        -

        You can replenish your stamina, health, and sigil by using items, skills, or sigils. You can also replenish them by resting at a campsite or returning to the main hub. You can carry up to 10 items at a time, such as potions, bombs, traps, or whistles. You can use items by tapping on their icons on the screen or by assigning them to quick slots.

        -

        To fight enemies, you need to target them by tapping on them or by using the auto-target feature. You can switch targets by swiping left or right on the screen or by using the target switch button. You can also lock on a target by tapping on the lock button. You can move around by using the virtual joystick on the left side of the screen. You can attack by tapping on the attack button on the right side of the screen. You can perform different attacks by tapping, holding, or swiping the attack button. You can also use special moves or abilities by tapping on their icons on the right side of the screen.

        -

        To defend yourself, you need to dodge, block, or parry enemy attacks. You can dodge by tapping on the dodge button on the right side of the screen. You can block by holding the block button on the right side of the screen. You can parry by timing your block right before an enemy attack hits you. Dodging, blocking, and parrying consume stamina and have different effects depending on your weapon class and weapon school.

        -

        To use sigils, you need to tap on the sigil button on the right side of the screen. Sigils are special effects or modes that enhance your combat abilities for a limited time. Sigils consume sigil energy and have different effects depending on your weapon class, weapon school, and ancient seal.

        -

        Monster Types

        -

        The world of Ekors is inhabited by various types of monsters that you can hunt or encounter in your quests or exploration. Monsters have different abilities, behaviors, and weaknesses that you need to learn and exploit. Monsters also drop different parts that you can use to forge or upgrade your gear.

        -

        There are four main types of monsters in Yeager Hunter Legend: beasts, reptiles, insects, and ancients. Each type has its own subtypes that have specific characteristics and traits. Here is a brief overview of each type and some examples of each subtype:

        - - - - - - - - - - - - - - -
        TypeSubtypeExample
        BeastsFurry mammals that are agile and ferocious.Wolvar: A wolf-like beast that hunts in packs and uses its claws and fangs to attack.
        Feathered birds that are swift and cunning.Roc: A giant eagle-like beast that flies in the sky and uses its talons and beak to attack.
        Horned ungulates that are sturdy and powerful.Bullhorn: A bull-like beast that charges at its enemies and uses its horns and hooves to attack.
        ReptilesScaled lizards that are stealthy and venomous.Viper: A snake-like reptile that slithers on the ground and uses its fangs and tail to attack.
        Armored turtles that are defensive and explosive.Blastoise: A turtle-like reptile that hides in its shell and uses its cannons and mines to attack.
        Spiked crocodiles that are aggressive and durable.Crocus: A crocodile-like reptile that lurks in the water and uses its jaws and spikes to attack.
        InsectsWinged bugs that are nimble and annoying.Beezle: A bee-like insect that buzzes in the air and uses its stinger and swarm to attack.
        Segmented worms that are flexible and corrosive.Acidworm: A worm-like insect that burrows in the ground and uses its acid and tentacles to attack.
        Crustacean crabs that are hardy and clawed.Crabster: A crab-like insect that scuttles on the land and uses its pincers and shells to attack.
        AncientsDragon-like creatures that are majestic and elemental.Drake: A dragon-like ancient that breathes fire and uses its wings and claws to attack.
        Giant-like creatures that are colossal and destructive.Titan: A giant-like ancient that causes earthquakes and uses its fists and rocks to attack.
        Mech-like creatures that are advanced and technological.Cyber: A mech-like ancient that shoots lasers and uses its gears and missiles to attack.
        -

        You can learn more about each monster type and subtype by checking the monster encyclopedia in the game. The monster encyclopedia will show you the monster's name, appearance, description, stats, abilities, weaknesses, drops, and locations. You can also see your hunting record for each monster, such as how many times you have hunted it, how long it took you to hunt it, and what rewards you got from it.

        -

        Exploration

        -

        Besides hunting monsters, you can also explore the world of Ekors and discover its secrets and wonders. Ekors is a vast and diverse planet with different environments, such as forests, canyons, deserts, volcanoes, and ruins. Each environment has its own features, such as plants, animals, weather, terrain, and structures.

        -

        You can explore Ekors by using the map feature in the game. The map will show you the different regions of Ekors and the different areas within each region. You can also see your current location, your quest objectives, your allies' locations, and your enemies' locations. You can also mark points of interest on the map, such as campsites, resources, items, or secrets.

        -

        You can navigate through Ekors by using the virtual joystick on the left side of the screen. You can also use the sprint button on the right side of the screen to run faster. You can also use the jump button on the right side of the screen to jump over obstacles or gaps. You can also use the glide button on the right side of the screen to glide in the air with your jetpack. You can also use the interact button on the right side of the screen to interact with objects or NPCs in the environment.

        -

        As you explore Ekors, you will find various items that you can collect or interact with. These items include resources, such as plants, minerals, or beast parts; consumables, such as potions, bombs, or traps; equipment, such as weapons, armor, or accessories; ancient seals, such as artifacts that grant hunting skills or bonuses; secrets, such as hidden items, lore, or quests; and NPCs, such as allies or enemies that you can talk to or fight with.

        -

        How to Play with Other Players and Join Alliances

        -

        Another feature of Yeager Hunter Legend is playing with other players and joining alliances. You can team up with other hunters from around the world and take on bigger challenges together. You can also join or create an alliance and participate in events with your alliance members. Here are some details about how to play with other players and join alliances:

        -

        Co-op Mode

        -

        Co-op mode is a mode where you can team up with other players and hunt monsters or explore Ekors together. You can play co-op mode by using the co-op feature in the game. The co-op feature will allow you to join or create a co-op room where you can invite or find other players to play with. You can also use the quick match feature to join a random co-op room with other players who have similar levels or preferences as you.

        -

        You can play co-op mode with up to four players at a time. You can communicate with your co-op partners by using the chat feature or the voice chat feature in the game. You can also use emotes or stickers to express yourself or convey messages. You can also see your co-op partners' names , health, and sigil gauges on the screen. You can also see their weapon class, weapon school, and ancient seal icons on the screen.

        -

        When you play co-op mode, you will share the same quest objectives, rewards, and items with your co-op partners. You will also share the same monster health, stamina, and sigil gauges with your co-op partners. You can help your co-op partners by healing them, buffing them, or reviving them when they are down. You can also cooperate with your co-op partners by using combos, skills, or sigils that complement each other.

        -

        Alliance System

        -

        Alliance system is a system where you can join or create an alliance and participate in events with your alliance members. You can access the alliance system by using the alliance feature in the game. The alliance feature will allow you to join or create an alliance where you can invite or find other players to join. You can also use the alliance chat feature to communicate with your alliance members.

        -

        You can join or create an alliance with up to 50 players at a time. You can see your alliance name, logo, level, rank, and members on the screen. You can also see your alliance contribution, reputation, and rewards on the screen. You can contribute to your alliance by completing quests, hunting monsters, or donating resources or items. You can earn reputation and rewards by participating in alliance events, such as raids, wars, or tournaments.

        -

        When you join or create an alliance, you will be able to access exclusive features and benefits that are only available for alliance members. These include:

        -
          -
        • Alliance camp: A special campsite where you can rest, heal, and interact with your alliance members.
        • -
        • Alliance shop: A special shop where you can buy rare items or services with alliance currency.
        • -
        • Alliance vault: A special storage where you can deposit or withdraw resources or items with your alliance members.
        • -
        • Alliance quests: Special quests that are only available for alliance members and offer higher rewards and challenges.
        • -
        • Alliance skills: Special skills that are only available for alliance members and offer passive bonuses or active effects for your character.
        • -
        -

        Tips and Tricks to Get Stronger and Have More Fun

        -

        Now that you know how to download and install Yeager Hunter Legend, how to choose your weapon and customize your character, how to hunt monsters and explore Ekors, and how to play with other players and join alliances, you might want to know some tips and tricks to get stronger and have more fun in the game. Here are some of them:

        -

        Main Quests and Side Quests

        -

        Main quests are quests that advance the main story of the game. They will introduce you to new characters, locations, and events in the game. They will also reward you with experience points, Kallarite, crystals, gear, ancient seals, and other items. You can access main quests by using the quest feature in the game. The quest feature will show you the current main quest that you need to complete and its objectives. You can also see the previous main quests that you have completed and their summaries.

        -

        Side quests are quests that are not related to the main story of the game. They will give you more information about the world of Ekors and its inhabitants. They will also reward you with experience points, Kallarite, crystals, gear, ancient seals , and other items. You can access side quests by talking to NPCs in the main hub or in the hunting grounds or exploration zones. The NPCs will have a yellow exclamation mark above their heads if they have a side quest for you. You can also see the available side quests and their objectives by using the quest feature in the game.

        -

        We recommend that you complete both main quests and side quests as much as possible. They will help you level up your character, improve your gear, unlock new features and benefits, and learn more about the game. They will also make your gameplay more fun and varied.

        -

        Commissions and Bounties

        -

        Commissions are daily tasks that you can complete to earn extra resources and items. They are similar to side quests, but they are more simple and repetitive. They will ask you to do things like hunting a certain number of monsters, collecting a certain amount of resources, or forging a certain piece of gear. You can access commissions by using the commission feature in the game. The commission feature will show you the available commissions and their objectives and rewards. You can also see the progress and status of your commissions.

        -

        Bounties are weekly challenges that you can complete to earn extra crystals and ancient seals. They are similar to commissions, but they are more difficult and rewarding. They will ask you to do things like hunting a specific monster, completing a specific quest, or achieving a specific goal. You can access bounties by using the bounty feature in the game. The bounty feature will show you the available bounties and their objectives and rewards. You can also see the progress and status of your bounties.

        -

        We recommend that you complete both commissions and bounties as much as possible. They will help you earn more resources and items that you can use to upgrade your character and gear. They will also make your gameplay more challenging and satisfying.

        -

        In-game Currency

        -

        In-game currency is the money that you can use to buy items or services in the game. There are two types of in-game currency: Kallarite and crystals. Kallarite is the common currency that you can earn by completing quests, hunting monsters, or selling items. Crystals are the premium currency that you can buy with real money or earn by completing bounties or achievements.

        -

        You can use Kallarite to buy items or services from various shops in the game, such as the weapon shop, the armor shop, the accessory shop, the sigil shop, the item shop, the forge shop, or the barber shop. You can also use Kallarite to upgrade your gear or ancient seals at the forge shop or the seal shop.

        -

        You can use crystals to buy premium items or services from various shops in the game, such as the crystal shop, the ancient seal shop, or the alliance shop. You can also use crystals to buy extra slots for your gear, ancient seals, or inventory at the storage shop. You can also use crystals to speed up your forging or upgrading process at the forge shop or the seal shop.

        -

        We recommend that you spend your in-game currency wisely and save it for important purchases or upgrades. You should also try to earn more in-game currency by completing quests, hunting monsters, selling items, completing bounties, or achieving achievements.

        -

        In-app Purchases

        -

        In-app purchases are optional features that you can buy with real money to enhance your gameplay experience. There are two types of in-app purchases: premium items and subscriptions. Premium items are one-time purchases that give you access to exclusive items or benefits in the game. Subscriptions are recurring purchases that give you access to exclusive features or benefits in the game for a limited time.

        -

        Premium items include things like:

        -
          -
        • Starter pack: A bundle of items that help you start your adventure in Yeager Hunter Legend.
        • -
        • Booster pack: A bundle of items that boost your character's stats or performance for a limited time.
        • -
        • Cosmetic pack: A bundle of items that change your character's appearance or style.
        • -
        • Limited pack: A bundle of items that are only available for a limited time or quantity.
        • -
        -

        Subscriptions include things like:

        -
          -
        • VIP membership: A monthly subscription that gives you access to exclusive features and benefits in Yeager Hunter Legend.
        • -
        • Alliance membership: A monthly subscription that gives you access to exclusive features and benefits for your alliance in Yeager Hunter Legend.
        • -
        • Season pass: A seasonal subscription that gives you access to exclusive quests, rewards, and events in Yeager Hunter Legend.
        • -
        -

        We recommend that you only buy in-app purchases if you really want to support the game developers or if you really want to enjoy the game more. You should also be aware of the terms and conditions of each in-app purchase and how to cancel them if you want to. You should also be responsible and not spend more than you can afford.

        -

        Conclusion

        -

        Yeager Hunter Legend is a game that offers you a lot of fun and excitement. You can hunt monsters, explore Ekors, customize your character, choose your weapon, play with other players, join alliances, and more. You can also learn some tips and tricks to get stronger and have more fun in the game. If you are looking for a thrilling action role-playing game that lets you hunt monsters, explore an alien world, and customize your character, then you should download Yeager Hunter Legend today and start your adventure!

        -

        Do you have any questions or comments about Yeager Hunter Legend? Do you have any suggestions or feedback for the game developers? Do you want to share your hunting stories or screenshots with other players? If so, feel free to leave a comment below or visit the official website, Facebook page, or Discord server of Yeager Hunter Legend. We would love to hear from you!

        -

        FAQs

        -

        Here are some frequently asked questions and answers about Yeager Hunter Legend:

        -
          -
        1. What is the difference between Kallar and Ekors?
          -Kallar is the name of the planet where the Vyderan clan lives. Ekors is the name of the planet where the stolen relic is located and where the game takes place.
        2. -
        3. What is the difference between hunting grounds and exploration zones?
          -Hunting grounds are areas where you can hunt specific monsters for quests or rewards. Exploration zones are areas where you can explore freely and find secrets or items.
        4. -
        5. What is the difference between weapon classes and weapon schools?
          -Weapon classes are the types of weapons that you can use in the game, such as Hunting Sword, Force Hammer, Fury Blades, Flux Blaster, or Eidolon Spear. Weapon schools are the subtypes of weapons that have different movesets and abilities for each weapon class, such as Fire Sword, Ice Hammer, Lightning Blades, Plasma Blaster, or Wind Spear.
        6. -
        7. What is the difference between ancient seals and sigils?
          -Ancient seals are artifacts that grant you hunting skills and bonuses. Hunting skills are special abilities that you can use in combat, such as healing, buffing, debuffing, or attacking. Bonuses are passive effects that enhance your stats or performance, such as increased damage, reduced cooldowns, or extra rewards. Sigils are special effects or modes that enhance your combat abilities for a limited time. Sigils consume sigil energy and have different effects depending on your weapon class, weapon school, and ancient seal.
        8. -
        9. What is the difference between Kallarite and crystals?
          -Kallarite is the common currency that you can earn by completing quests, hunting monsters, or selling items. Crystals are the premium currency that you can buy with real money or earn by completing bounties or achievements.
        10. -

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/FIFA Mobile APK Para Hilesi Futbol Yldzlarnz Toplayn ve Dnya Kupasna Hazrlann.md b/spaces/congsaPfin/Manga-OCR/logs/FIFA Mobile APK Para Hilesi Futbol Yldzlarnz Toplayn ve Dnya Kupasna Hazrlann.md deleted file mode 100644 index 9b76759a1426bab56be767d09da3140c323bb3c3..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/FIFA Mobile APK Para Hilesi Futbol Yldzlarnz Toplayn ve Dnya Kupasna Hazrlann.md +++ /dev/null @@ -1,112 +0,0 @@ -
        -

        FIFA Mobile APK Para Hilesi 2022 Apk Dayı: How to Get Unlimited Money in FIFA Mobile

        -

        If you are a fan of soccer games, you probably have heard of FIFA Mobile, the official mobile game of the FIFA World Cup 2022™. FIFA Mobile is a popular soccer game that lets you build your ultimate team of soccer stars, compete in various modes, and relive the world's greatest soccer tournament. But what if you want to get unlimited money in FIFA Mobile without spending real money? That's where FIFA Mobile APK Para Hilesi 2022 Apk Dayı comes in.

        -

        Introduction

        -

        FIFA Mobile is a free-to-play soccer game that is available for iOS and Android devices. It features over 15,000 authentic soccer players from over 600 teams, including world-class talent like Kylian Mbappé, Christian Pulisic, Vinicius Jr, and Son Heung-min. You can play through the entire tournament with any of the 32 qualified national teams, or rewrite history and take control of 15 non-qualified national teams. You can also compete against other players in pvp modes, such as Head-to-Head, VS Attack, and Manager Mode.

        -

        fifa mobile apk para hilesi 2022 apk dayı


        DOWNLOADhttps://urlca.com/2uOdKG



        -

        However, as with most free-to-play games, FIFA Mobile also has a currency system that limits your progress and enjoyment. You need coins and gems to buy player packs, upgrade your players, unlock new modes, and more. While you can earn some coins and gems by playing the game, they are not enough to get you the best players and teams. That's why some players resort to using FIFA Mobile APK Para Hilesi 2022 Apk Dayı, a modded version of FIFA Mobile that gives you unlimited money in the game.

        -

        FIFA Mobile APK Para Hilesi 2022 Apk Dayı is a modified version of FIFA Mobile that has been hacked by a third-party developer. It allows you to access a hidden feature in the game that lets you generate unlimited coins and gems. With this feature, you can buy any player pack you want, upgrade your players to their maximum potential, unlock all modes and features, and more. You can also enjoy the game with enhanced graphics and gameplay, thanks to the new engine that supports up to 60 fps.

        -

        By using FIFA Mobile APK Para Hilesi 2022 Apk Dayı, you can have more fun and excitement in playing FIFA Mobile. You can build your dream team of soccer legends, such as Paolo Maldini, Ronaldinho, Zidane, Beckham, Ronaldo, and more. You can also dominate your opponents in pvp modes with your superior team and skills. You can relive the world's greatest soccer tournament with any team you want, even if they are not qualified for the World Cup. You can experience realistic soccer simulation with new stadiums, SFX, commentary, and more.

        -

        How to Download and Install FIFA Mobile APK Para Hilesi 2022 Apk Dayı

        -

        If you want to try out FIFA Mobile APK Para Hilesi 2022 Apk Dayı, you need to download and install it on your device. However, before you do that, you need to take some precautions to avoid any problems or risks. Here are the steps to download and install FIFA Mobile APK Para Hilesi 2022 Apk Dayı:

        -
          -
        1. First, you need to uninstall the original version of FIFA Mobile from your device. This is because the modded version will not work if you have the original version installed. To uninstall FIFA Mobile, go to your device settings, find the app, and tap on uninstall.
        2. -
        3. Second, you need to enable the installation of apps from unknown sources on your device. This is because the modded version is not available on the official app stores, and you need to download it from a third-party website. To enable the installation of apps from unknown sources, go to your device settings, find the security option, and toggle on the unknown sources option.
        4. -
        5. Third, you need to download the FIFA Mobile APK Para Hilesi 2022 Apk Dayı file from a reliable and trusted website. There are many websites that claim to offer the modded version of FIFA Mobile, but some of them may contain viruses or malware that can harm your device or steal your data. To avoid this, you need to do some research and find a reputable website that has positive reviews and feedback from other users. One such website is [Apk Dayı], which is a Turkish website that offers various modded games and apps for free.
        6. -
        7. Fourth, you need to locate the downloaded file on your device and tap on it to start the installation process. You may see a warning message that says the file may harm your device, but you can ignore it and proceed with the installation. The installation process may take a few minutes, depending on your device and internet speed.
        8. -
        9. Fifth, you need to launch the modded version of FIFA Mobile and enjoy the game with unlimited money. You may need to grant some permissions to the app, such as access to your storage, camera, microphone, etc. You may also need to sign in with your Google Play Games account or create a new one if you don't have one.
        10. -
        -

        These are the steps to download and install FIFA Mobile APK Para Hilesi 2022 Apk Dayı on your device. However, you should be aware that using the modded version of FIFA Mobile may have some drawbacks and risks. For example:

        -
          -
        • You may not be able to access some features or modes that require an internet connection or a server verification, such as live events, leaderboards, tournaments, etc.
        • -
        • You may face some compatibility or performance issues with your device or game version, such as crashes, glitches, errors, etc.
        • -
        • You may violate the terms and conditions of FIFA Mobile and EA Sports, which may result in a ban or suspension of your account or device.
        • -
        • You may expose your device or data to potential threats or attacks from hackers or malware that may be hidden in the modded file or website.
        • -
        -

        Therefore, you should use FIFA Mobile APK Para Hilesi 2022 Apk Dayı at your own risk and discretion. We are not responsible for any consequences or damages that may arise from using the modded version of FIFA Mobile.

        -

        How to Use FIFA Mobile APK Para Hilesi 2022 Apk Dayı

        -

        Once you have downloaded and installed FIFA Mobile APK Para Hilesi 2022 Apk Dayı on your device, you can start using it and enjoy the game with unlimited money. Here are some tips on how to use FIFA Mobile APK Para Hilesi 2022 Apk Dayı:

        -

        How to access the unlimited money feature in the game

        -

        The main feature of FIFA Mobile APK Para Hilesi 2022 Apk Dayı is that it gives you unlimited coins and gems in the game. To access this feature, you need to go to the store section in the game menu. There, you will see that all the player packs and bundles are free and have no limit. You can buy as many packs as you want without spending any real money. You can also see that your coin and gem balance is always maxed out at 99999999. You can use these coins and gems to buy anything else in the game, such as upgrades, modes, items, etc.

        -

        fifa mobile mod apk sınırsız para hilesi 2022
        -fifa mobile 2022 apk dayı indir ücretsiz
        -fifa mobile apk para hilesi nasıl yapılır 2022
        -fifa mobile 18.1.03 mod apk unlocked all menu
        -fifa mobile world cup 2022 apk download
        -fifa mobile apk para hilesi 2022 güncel
        -fifa mobile apk dayı hileli oyun indir
        -fifa mobile mod apk unlimited money 2022
        -fifa mobile apk para hilesi yapma programı
        -fifa mobile 2022 apk dayı kurulumu
        -fifa mobile apk para hilesi 2022 ios
        -fifa mobile apk dayı son sürüm indir
        -fifa mobile mod apk menu hileli 2022
        -fifa mobile apk para hilesi 2022 android
        -fifa mobile apk dayı yorumları ve puanları
        -fifa mobile mod apk free download 2022
        -fifa mobile apk para hilesi 2022 online
        -fifa mobile apk dayı güvenilir mi
        -fifa mobile mod apk latest version 2022
        -fifa mobile apk para hilesi 2022 video
        -fifa mobile apk dayı sorunları ve çözümleri
        -fifa mobile mod apk no root 2022
        -fifa mobile apk para hilesi 2022 türkçe
        -fifa mobile apk dayı destek ve iletişim
        -fifa mobile mod apk hack 2022

        -

        How to use the money to build your ultimate team and compete in various modes

        -

        With unlimited money in FIFA Mobile APK Para Hilesi 2022 Apk Dayı, you can build your ultimate team of soccer stars without any restrictions. You can buy any player pack you want and get the best players in the game, such as Lionel Messi, Cristiano Ronaldo, Neymar, Mbappé, and more. You can also upgrade your players to their maximum ratings and skills, and customize their appearance, kits, badges, etc. You can also create your own formations and tactics, and optimize your team chemistry and performance. With your ultimate team, you can compete in various modes in FIFA Mobile APK Para Hilesi 2022 Apk Dayı. You can play through the entire World Cup 2022 tournament with any team you want, even if they are not qualified. You can also play against other players in pvp modes, such as Head-to-Head, VS Attack, and Manager Mode. You can also participate in live events, tournaments, seasons, and more. You can win rewards, trophies, and glory with your team.

        How to enjoy the game with enhanced graphics and gameplay

        -

        Another feature of FIFA Mobile APK Para Hilesi 2022 Apk Dayı is that it improves the graphics and gameplay of FIFA Mobile. Thanks to the new engine that supports up to 60 fps, you can enjoy smooth and realistic soccer simulation on your device. You can also experience new stadiums, SFX, commentary, animations, and more. You can also adjust the graphics settings according to your device and preference. With enhanced graphics and gameplay, you can enjoy FIFA Mobile APK Para Hilesi 2022 Apk Dayı more than ever. You can feel the thrill and excitement of playing soccer on your device. You can also immerse yourself in the atmosphere and emotion of the World Cup 2022 tournament. You can also appreciate the details and quality of the game.

        -

        Tips and Tricks for FIFA Mobile APK Para Hilesi 2022 Apk Dayı

        -

        While FIFA Mobile APK Para Hilesi 2022 Apk Dayı gives you unlimited money and other advantages in FIFA Mobile, you still need some tips and tricks to make the most out of it. Here are some tips and tricks for FIFA Mobile APK Para Hilesi 2022 Apk Dayı:

        -

        How to optimize your team chemistry and performance

        -

        Team chemistry is an important factor in FIFA Mobile that affects your team's performance on the pitch. Team chemistry is determined by various factors, such as player nationality, league, club, position, formation, etc. The higher your team chemistry, the better your team will play together. To optimize your team chemistry in FIFA Mobile APK Para Hilesi 2022 Apk Dayı, you need to consider these factors when building your team. You need to choose players that have a high compatibility with each other based on their attributes. You also need to choose a formation that suits your play style and strategy. You also need to adjust your tactics and instructions according to your opponent and situation.

        -

        How to score goals and win matches with ease

        -

        Scoring goals is the main objective of soccer games, and FIFA Mobile is no exception. To score goals and win matches in FIFA Mobile APK Para Hilesi 2022 Apk Dayı, you need to master some skills and techniques. Here are some of them:

        -
          -
        • Use the sprint button wisely. The sprint button allows you to run faster with the ball, but it also reduces your control and accuracy. Use it only when you have enough space or when you need to outrun your defender.
        • -
        • Use the skill moves effectively. The skill moves allow you to perform various tricks and feints with the ball, such as roulette, rainbow flick, heel-to-heel flick, etc. Use them to confuse or beat your defender or goalkeeper.
        • -
        • Use the shoot button correctly. The shoot button allows you to take a shot at the goal, but it also depends on various factors, such as power, angle, distance, etc. Use it only when you have a clear chance or when you are close to the goal.
        • -
        • Use the pass button smartly. The pass button allows you to pass the ball to your teammate or cross it into the box. Use it to create chances or opportunities for yourself or your teammates.
        • -
        • Use the switch button properly. The switch button allows you to change the player you are controlling on the pitch. Use it to select the best player for each situation or scenario.
        • -
        -

        These are some of the skills and techniques that can help you score goals and win matches in FIFA Mobile APK Para Hilesi 2022 Apk Dayı.

        -

        How to avoid bans and other issues while using the modded version

        -

        As mentioned earlier, using FIFA Mobile APK Para Hilesi 2022 Apk Dayı may have some drawbacks and risks. One of them is that you may get banned or suspended by FIFA Mobile or EA Sports for violating their terms and conditions. To avoid this, you need to follow some precautions and tips while using the modded version. Here are some of them:

        -
          -
        • Do not use the modded version on your main account or device. Use a secondary account or device that you don't care about losing or getting banned.
        • -
        • Do not use the modded version on online modes or features that require an internet connection or a server verification, such as live events, leaderboards, tournaments, etc. Use the modded version only on offline modes or features that do not require an internet connection or a server verification, such as World Cup, VS Attack, Manager Mode, etc.
        • -
        • Do not use the modded version excessively or excessively. Use the modded version moderately and reasonably. Do not buy too many player packs, upgrade too many players, unlock too many modes, etc. Do not win too many matches, score too many goals, dominate too many opponents, etc.
        • -
        • Do not brag or boast about using the modded version to other players or on social media. Keep it a secret and do not share it with anyone. Do not provoke or challenge other players who are using the original version of FIFA Mobile.
        • -
        -

        These are some of the precautions and tips that can help you avoid bans and other issues while using FIFA Mobile APK Para Hilesi 2022 Apk Dayı.

        -

        Conclusion

        -

        FIFA Mobile APK Para Hilesi 2022 Apk Dayı is a modded version of FIFA Mobile that gives you unlimited money in the game. It allows you to buy any player pack you want, upgrade your players to their maximum potential, unlock all modes and features, and more. It also improves the graphics and gameplay of FIFA Mobile with a new engine that supports up to 60 fps. It lets you enjoy FIFA Mobile more than ever with your ultimate team of soccer stars. However, FIFA Mobile APK Para Hilesi 2022 Apk Dayı also has some drawbacks and risks. It may not work on some features or modes that require an internet connection or a server verification. It may cause some compatibility or performance issues with your device or game version. It may violate the terms and conditions of FIFA Mobile and EA Sports, which may result in a ban or suspension of your account or device. It may expose your device or data to potential threats or attacks from hackers or malware. Therefore, you should use FIFA Mobile APK Para Hilesi 2022 Apk Dayı at your own risk and discretion. We are not responsible for any consequences or damages that may arise from using the modded version of FIFA Mobile. If you want to try it out, you can follow the steps and tips we have provided in this article. But if you want to play FIFA Mobile safely and legally, you can download the original version of FIFA Mobile from the official app stores.

        -

        We hope this article has been helpful and informative for you. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and have a great day!

        -

        FAQs

        -

        Here are some frequently asked questions about FIFA Mobile APK Para Hilesi 2022 Apk Dayı:

        -

        Q: Is FIFA Mobile APK Para Hilesi 2022 Apk Dayı safe to use?

        -

        A: FIFA Mobile APK Para Hilesi 2022 Apk Dayı is not safe to use, as it is a modded version of FIFA Mobile that has been hacked by a third-party developer. It may contain viruses or malware that can harm your device or steal your data. It may also violate the terms and conditions of FIFA Mobile and EA Sports, which may result in a ban or suspension of your account or device.

        -

        Q: Is FIFA Mobile APK Para Hilesi 2022 Apk Dayı legal to use?

        -

        A: FIFA Mobile APK Para Hilesi 2022 Apk Dayı is not legal to use, as it is a modded version of FIFA Mobile that has been hacked by a third-party developer. It infringes the intellectual property rights of FIFA Mobile and EA Sports, which may result in legal action against you.

        -

        Q: How can I download FIFA Mobile APK Para Hilesi 2022 Apk Dayı?

        -

        A: You can download FIFA Mobile APK Para Hilesi 2022 Apk Dayı from a reliable and trusted website that offers various modded games and apps for free. One such website is [Apk Dayı], which is a Turkish website that has positive reviews and feedback from other users.

        -

        Q: How can I install FIFA Mobile APK Para Hilesi 202 2 Apk Dayı?

        -

        A: You can install FIFA Mobile APK Para Hilesi 2022 Apk Dayı by following these steps:

        -
          -
        1. Uninstall the original version of FIFA Mobile from your device.
        2. -
        3. Enable the installation of apps from unknown sources on your device.
        4. -
        5. Download the FIFA Mobile APK Para Hilesi 2022 Apk Dayı file from a reliable and trusted website.
        6. -
        7. Locate the downloaded file on your device and tap on it to start the installation process.
        8. -
        9. Launch the modded version of FIFA Mobile and enjoy the game with unlimited money.
        10. -
        -

        Q: How can I use FIFA Mobile APK Para Hilesi 2022 Apk Dayı?

        -

        A: You can use FIFA Mobile APK Para Hilesi 2022 Apk Dayı by following these tips:

        -
          -
        • Access the unlimited money feature in the game by going to the store section in the game menu.
        • -
        • Use the money to buy any player pack you want, upgrade your players to their maximum potential, unlock all modes and features, and more.
        • -
        • Build your ultimate team of soccer stars and compete in various modes in the game.
        • -
        • Enjoy the game with enhanced graphics and gameplay with a new engine that supports up to 60 fps.
        • -
        • Avoid bans and other issues while using the modded version by following some precautions and tips.
        • -

        197e85843d
        -
        -
        \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Grow Your Business with APK Shopee Merchant Features Benefits and Tips.md b/spaces/congsaPfin/Manga-OCR/logs/Grow Your Business with APK Shopee Merchant Features Benefits and Tips.md deleted file mode 100644 index 8b891aec63d6036a4b31e7c753381982c1fecbd5..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Grow Your Business with APK Shopee Merchant Features Benefits and Tips.md +++ /dev/null @@ -1,161 +0,0 @@ - -

        Download APK Shopee Merchant: A Guide for Android Users

        -

        If you are an online seller who wants to grow your business with Shopee, you might be interested in downloading APK Shopee Merchant. This is a practical and reliable application that helps you manage your business more easily with Shopee, no. 1 online shopping platform in Indonesia, anytime and anywhere.

        -

        But what is Shopee Merchant, and what is an APK file? And why would you want to download it instead of getting it from Google Play? In this article, we will answer these questions and show you how to download and use APK Shopee Merchant on your Android device.

        -

        download apk shopee merchant


        DOWNLOAD ———>>> https://urlca.com/2uO9RI



        -

        What is Shopee Merchant?

        -

        Shopee Merchant is an app that allows you to join ShopeePay and ShopeeFood easily in one app. ShopeePay is a digital payment service that lets you accept payments from customers using QR codes or phone numbers. ShopeeFood is a food delivery service that lets you sell your food products to hungry customers in your area.

        -

        As a merchant, you will get the following benefits from using Shopee Merchant:

        -
          -
        • Self-registration: You can sign up as a seller on Shopee without any hassle or fees.
        • -
        • Supporting features: You can access various features that help you manage your inventory, orders , payments, promotions, and customer service.
        • -
        • Integrated wallet: You can receive and withdraw your earnings directly from your ShopeePay wallet.
        • -
        • Self-promo creation: You can create and customize your own promotional materials, such as banners, flyers, and stickers, to attract more customers.
        • -
        • Analytics and insights: You can monitor your business performance and get useful tips and suggestions to improve your sales.
        • -
        -

        With Shopee Merchant, you can enjoy the convenience and security of selling online with Shopee, the leading e-commerce platform in Southeast Asia and Taiwan.

        -

        What is an APK file?

        -

        An APK file is a file format that stands for Android Package Kit. It is used to distribute and install applications on Android devices. An APK file contains all the components of an app, such as the code, resources, assets, certificates, and manifest.

        -

        How to download apk shopee partner app for android
        -Shopee partner apk latest version free download
        -Benefits of using shopee partner app for shopeepay and shopeefood merchant
        -Shopee partner app review and rating by users
        -Tips and tricks to manage your business with shopee partner app
        -Shopee partner app download size and compatibility
        -How to join shopeepay and shopeefood easily with shopee partner app
        -How to track your wallet balance and transaction history with shopee partner app
        -How to organize your menu and create promotion with shopee partner app
        -How to update your information and menu with shopee partner app
        -Shopee partner app vs other apps for online shopping platform merchants
        -How to contact shopee customer service through shopee partner app
        -How to register and verify your account with shopee partner app
        -How to use shopee partner app offline mode
        -How to sync your data across devices with shopee partner app
        -How to backup and restore your data with shopee partner app
        -How to enable notifications and alerts with shopee partner app
        -How to customize your settings and preferences with shopee partner app
        -How to troubleshoot common issues with shopee partner app
        -How to uninstall and reinstall shopee partner app
        -How to get the best deals and discounts with shopee partner app
        -How to increase your sales and revenue with shopee partner app
        -How to attract more customers and reviews with shopee partner app
        -How to improve your ranking and visibility with shopee partner app
        -How to integrate your social media accounts with shopee partner app
        -How to access analytics and reports with shopee partner app
        -How to use QR code scanner and generator with shopee partner app
        -How to accept multiple payment methods with shopee partner app
        -How to manage your inventory and orders with shopee partner app
        -How to handle refunds and cancellations with shopee partner app
        -How to join the shopee community and network with other merchants with shopee partner app
        -How to participate in contests and events with shopee partner app
        -How to earn rewards and points with shopee partner app
        -How to redeem vouchers and coupons with shopee partner app
        -How to share feedback and suggestions with shopee partner app
        -Shopee partner apk modded version download link
        -Shopee partner apk cracked version download link
        -Shopee partner apk premium version download link
        -Shopee partner apk pro version download link
        -Shopee partner apk hacked version download link
        -Shopee partner apk old version download link
        -Shopee partner apk beta version download link
        -Shopee partner apk original version download link
        -Shopee partner apk mirror version download link
        -Shopee partner apk alternative version download link

        -

        An APK file can be opened on Android devices by using a file manager app or a web browser. However, before installing an APK file, you need to enable the option to allow installation of apps from unknown sources in your device settings. This is because APK files are not verified by Google Play, which is the official app store for Android devices.

        -

        Why download APK Shopee Merchant?

        -

        Access the latest version of the app

        -

        One of the reasons why you might want to download APK Shopee Merchant is to access the latest version of the app. Sometimes, the app updates are not available on Google Play due to various reasons, such as compatibility issues, regional restrictions, or technical errors. By downloading the APK file from a reliable source, you can get the most updated version of Shopee Merchant, which may have new features, bug fixes, or performance improvements.

        -

        Install the app on unsupported devices

        -

        Another reason why you might want to download APK Shopee Merchant is to install the app on devices that are not supported by Google Play. Some devices may not be compatible with Google Play due to their hardware specifications, software versions, or manufacturer policies. Some devices may also have limited storage space that prevents them from downloading large apps from Google Play. By downloading the APK file from a website, you can install Shopee Merchant on any device that runs on Android OS, as long as it meets the minimum requirements of the app.

        -

        Avoid regional restrictions

        -

        A third reason why you might want to download APK Shopee Merchant is to avoid regional restrictions. Some apps may not be available or accessible in certain regions due to legal regulations, licensing agreements, or censorship policies. For example, Shopee Merchant may not be available in some countries where Shopee does not operate or where online selling is prohibited or regulated. By downloading the APK file from a website, you can bypass these restrictions and use Shopee Merchant wherever you are.

        -

        How to download APK Shopee Merchant?

        -

        Find a reliable source

        -

        The first step to download APK Shopee Merchant is to find a reliable source that offers the APK file for download. There are many websites that provide APK files for various apps, but not all of them are trustworthy or safe. Some websites may contain malware, viruses, or fake files that can harm your device or steal your data.

        -

        To find a reliable source, you should look for the following criteria:

        -
          -
        • The website has a good reputation and positive reviews from other users.
        • -
        • The website has a secure connection (HTTPS) and a valid certificate.
        • -
        • The website provides clear and accurate information about the APK file, such as the name, size, version, developer, and permissions.
        • -
        • The website does not require you to register, pay, or complete surveys to download the APK file.
        • -
        • The website does not have excessive ads or pop-ups that interfere with your browsing experience.
        • -
        -

        One example of a reliable source that offers APK Shopee Merchant for download is [APKPure], which is one of the most popular and trusted websites for downloading APK files.

        -

        Enable unknown sources

        -

        The second step to download APK Shopee Merchant is to enable unknown sources on your device settings. This will allow you to install apps from sources other than Google Play. To do this, follow these steps:

        -
          -
        1. Go to your device settings and tap on Security or Privacy.
        2. -
        3. Find the option that says Unknown sources or Install unknown apps and toggle it on.
        4. -
        5. A warning message will appear asking you to confirm your action. Tap on OK or Allow.
        6. -
        -

        Note that this option may vary depending on your device model and Android version.

        Download and install the file

        -

        The third step to download APK Shopee Merchant is to download and install the file on your device. To do this, follow these steps:

        -
          -
        1. Go to the website that offers the APK file for download and tap on the download button or link.
        2. -
        3. A pop-up window will appear asking you to confirm your download. Tap on OK or Download.
        4. -
        5. Wait for the download to complete. You can check the progress on your notification bar or your download folder.
        6. -
        7. Once the download is finished, tap on the APK file to open it. You may need to use a file manager app to locate it on your device.
        8. -
        9. A prompt will appear asking you to install the app. Tap on Install or Next.
        10. -
        11. Wait for the installation to complete. You can check the progress on your screen or your notification bar.
        12. -
        13. Once the installation is finished, tap on Open or Done.
        14. -
        -

        Congratulations! You have successfully downloaded and installed APK Shopee Merchant on your device. You can now start using the app to manage your business with Shopee.

        -

        How to use APK Shopee Merchant?

        -

        Register as a merchant

        -

        The first step to use APK Shopee Merchant is to register as a merchant on Shopee. To do this, follow these steps:

        -
          -
        1. Open the app and tap on Sign Up or Register.
        2. -
        3. Select your country and enter your phone number. Tap on Next or Send OTP.
        4. -
        5. Enter the one-time password (OTP) that you received via SMS. Tap on Next or Verify.
        6. -
        7. Create a password and a username for your account. Tap on Next or Register.
        8. -
        9. Fill in your personal information, such as your name, email address, and date of birth. Tap on Next or Continue.
        10. -
        11. Select the type of business you want to run, such as food, beverage, or others. Tap on Next or Continue.
        12. -
        13. Fill in your business information, such as your business name, address, category, and description. Tap on Next or Continue.
        14. -
        15. Upload your identity document, such as your ID card, passport, or driver's license. Tap on Next or Continue.
        16. -
        17. Upload your business document, such as your business license, tax number, or bank statement. Tap on Next or Continue.
        18. -
        19. Review and confirm your information and documents. Tap on Submit or Finish.
        20. -
        -

        Your registration is now complete. You will receive a confirmation message from Shopee within 24 hours. Once your account is verified, you can start selling on ShopeePay and ShopeeFood.

        -

        Manage your business

        -

        The second step to use APK Shopee Merchant is to manage your business using the app. To do this, you can access various features and functions that help you with the following tasks:

        - - - - - - -
        TaskFeatureDescription
        Create and edit your menuMenuYou can add, edit, delete, or arrange your products in different categories and subcategories. You can also set the prices, discounts, stock availability, and delivery options for each product.
        Track your orders and paymentsOrdersYou can view, accept, reject, or cancel your orders from customers. You can also update the status of your orders, such as preparing, ready, or delivered. You can also view the payment details and history of each order.
        Promote your productsPromotionsYou can create and manage various types of promotions for your products, such as vouchers, flash sales, free shipping, or bundle deals. You can also set the duration, budget, and target audience for each promotion.
        Communicate with customersChatYou can chat with your customers directly from the app. You can send and receive text messages, images, videos, voice notes, or stickers. You can also use quick replies or templates to answer common questions or requests.
        -

        With these features, you can manage your business more efficiently and effectively with Shopee Merchant.

        -

        Grow your sales

        -

        The third step to use APK Shopee Merchant is to grow your sales using the app. To do this, you can access various features and benefits that help you with the following goals:

        - - - - - - -
        GoalFeatureBenefit
        Increase your visibilitySelf-promo creationYou can create and customize your own promotional materials, such as banners, flyers, and stickers, to attract more customers. You can also print or share them on social media platforms.
        Improve your reputationRatings and reviewsYou can collect and display ratings and reviews from your customers on your menu page. You can also respond to them and thank them for their feedback. This can help you build trust and loyalty among your customers.
        Expand your marketRegional expansionYou can expand your market to other regions where Shopee operates, such as Malaysia, Singapore, Thailand, Vietnam, Philippines, or Taiwan. You can also adjust your menu and prices according to the local preferences and demand.
        Optimize your performanceAnalytics and insightsYou can monitor your business performance and get useful tips and suggestions to improve your sales. You can also access various reports and statistics, such as sales volume, revenue, customer behavior, and market trends.
        -

        With these features and benefits, you can grow your sales and customer satisfaction with Shopee Merchant.

        -

        Conclusion

        -

        In conclusion, downloading APK Shopee Merchant is a smart and convenient way to manage your business with Shopee on your Android device. You can access the latest version of the app, install it on unsupported devices, and avoid regional restrictions. You can also register as a merchant, manage your business, and grow your sales using various features and benefits that Shopee Merchant offers. If you are an online seller who wants to join ShopeePay and ShopeeFood easily in one app, you should download APK Shopee Merchant today and start selling more with Shopee.

        -

        FAQs

        -

        Here are some frequently asked questions that you might have about downloading APK Shopee Merchant:

        -
          -
        1. Is it safe to download APK files from unknown sources?
        2. -

          It depends on the source that you download the APK file from. Some sources may be reliable and safe, while others may be malicious or fraudulent. To ensure your safety, you should only download APK files from reputable and trusted websites, such as [APKPure]. You should also scan the APK file with an antivirus app before installing it on your device.

          -
        3. How can I update my APK Shopee Merchant app?
        4. -

          You can update your APK Shopee Merchant app by downloading the latest version of the APK file from the same source that you downloaded it from. You can also check for updates within the app by tapping on the menu icon and selecting Settings > About > Check for updates.

          -
        5. What if I encounter problems or errors while using the app?
        6. -

          If you encounter any problems or errors while using the app, you can try the following solutions:

          -
            -
          • Clear the cache and data of the app by going to your device settings > Apps > Shopee Merchant > Storage > Clear cache / Clear data.
          • -
          • Uninstall and reinstall the app by deleting the APK file from your device and downloading it again from the website.
          • -
          • Contact Shopee for support or feedback by tapping on the menu icon and selecting Help Center > Contact Us.
          • -
          -
        7. Can I use APK Shopee Merchant on other operating systems besides Android?
        8. -

          No, you cannot use APK Shopee Merchant on other operating systems besides Android. APK files are only compatible with Android devices. If you want to use Shopee Merchant on other devices, such as iOS or Windows, you will need to download the app from their respective app stores or use the web version of Shopee Merchant.

          -
        9. How can I contact Shopee for support or feedback?
        10. -

          You can contact Shopee for support or feedback by tapping on the menu icon and selecting Help Center > Contact Us. You can also email them at [merchant.support@shopee.com] or call them at [1500 407]. They are available 24/7 to assist you with any issues or inquiries that you may have.

          -

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Kiss of War Mod Apk A Unique Strategy Game with Historical Maps and Weapons.md b/spaces/congsaPfin/Manga-OCR/logs/Kiss of War Mod Apk A Unique Strategy Game with Historical Maps and Weapons.md deleted file mode 100644 index 4a51f1d9ad54dda1bade088bf392bf41e1aaf235..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Kiss of War Mod Apk A Unique Strategy Game with Historical Maps and Weapons.md +++ /dev/null @@ -1,105 +0,0 @@ - -

        Kiss of War Hacked APK: What Is It and How to Get It?

        -

        Kiss of War is a war strategy game set in the late modern period. It tells a story about a group of charming women with different pasts fighting against the invaders with allies. You will play as a commander in the game, train powerful troops, recruit beautiful agents as officers, rally other commanders in the world, and conquer this land.

        -

        The game features vivid war scenes based on actual geography of Europe, real-time multiplayer combat, multiple countries to select, and a wide selection of weapons and vehicles. The game has received positive reviews from millions of players who enjoy its immersive gameplay and stunning graphics.

        -

        kiss of war hacked apk


        Download File >>> https://urlca.com/2uO6GV



        -

        However, some players may find it hard to progress in the game due to its challenging levels and limited resources. That's why some players resort to using a hacked APK to get an edge over their opponents. A hacked APK is a modified version of the original game that gives you access to unlimited resources, unlocked officers, enhanced combat, and other features that are not available in the official version.

        -

        Using a hacked APK can be tempting, but it also comes with some risks. You may encounter malware, viruses, or spyware that can harm your device or steal your personal information. You may also face legal issues or bans from the game developers if they detect your cheating activities. Therefore, you need to be careful and responsible when using a hacked APK.

        -

        Features of Kiss of War Hacked APK

        -

        Unlimited Resources

        -

        One of the main features of Kiss of War hacked APK is that it gives you unlimited resources such as money, gold, food, steel, oil, energy, and more. These resources are essential for building and upgrading your base, training and producing your troops, researching new technologies, and recruiting new officers.

        -

        With unlimited resources, you don't have to worry about running out of them or spending real money to buy them. You can easily max out your buildings, troops, officers, and researches without any limitations. You can also use them to buy items from the shop or participate in events.

        -

        Unlocked Officers

        -

        Another feature of Kiss of War hacked APK is that it unlocks all the officers in the game. Officers are characters that lead your army and boost your power. They have different skills, talents, traits, and stories that make them unique and charming.

        -

        The game has three types of officers: legendary, epic, and elite. Legendary officers are the most powerful and rare ones that can only be obtained from elite recruitment events or spending a lot of gold. Epic officers are the second most powerful and rare ones that can be obtained from normal recruitment events or spending some gold. Elite officers are the least powerful and common ones that can be obtained from free recruitment events or spending a little gold.

        -

        With unlocked officers, you don't have to wait for the events or spend any gold to get them. You can have all the officers in the game at your disposal and use them to form your dream team. You can also level up, upgrade, and customize your officers without any restrictions.

        -

        Enhanced Combat

        -

        The third feature of Kiss of War hacked APK is that it enhances your combat performance and experience in the game. The game offers various modes of combat such as campaign, arena, alliance war, world war, and more. You will face different enemies and challenges in each mode and need to use your skills and strategies to win.

        -

        kiss of war mod apk latest version 2022
        -kiss of war hack mod apk unlimited money and gold
        -kiss of war mod apk download for android
        -kiss of war cheat mod apk free download
        -kiss of war modded apk with female army
        -kiss of war hacked version apk online
        -kiss of war mod apk no root required
        -kiss of war hack apk full unlocked
        -kiss of war mod apk offline mode
        -kiss of war cheat apk with unlimited resources
        -kiss of war modded version apk 2023 update
        -kiss of war hack apk for ios devices
        -kiss of war mod apk with fascinated battles
        -kiss of war cheat mod apk with sophisticated weapons
        -kiss of war modded apk with realistic graphics
        -kiss of war hacked apk with strategic depth
        -kiss of war mod apk with 18th century Europe map
        -kiss of war hack mod apk with powerful commanders
        -kiss of war mod apk free shopping and upgrades
        -kiss of war cheat apk with easy controls
        -kiss of war modded version apk with multiplayer mode
        -kiss of war hacked version apk with unlimited gems and coins
        -kiss of war mod apk with original soundtracks and effects
        -kiss of war hack apk with anti-ban feature
        -kiss of war mod apk with daily rewards and missions
        -kiss of war cheat mod apk with fast loading and installation
        -kiss of war modded apk with no ads and pop-ups
        -kiss of war hacked apk with auto-update feature
        -kiss of war mod apk with customisable settings and options
        -kiss of war hack mod apk with smooth gameplay and performance
        -kiss of war mod apk with different languages support
        -kiss of war cheat apk with social media integration
        -kiss of war modded version apk with leaderboards and achievements
        -kiss of war hacked version apk with unlimited energy and stamina
        -kiss of war mod apk with various game modes and levels
        -kiss of war hack apk with VIP features and benefits
        -kiss of war mod apk with friendly user interface and design
        -kiss of war cheat mod apk with high compatibility and security
        -kiss of war modded apk with regular bug fixes and improvements
        -kiss of war hacked apk with awesome reviews and ratings

        -

        With enhanced combat, you can get better control over your troops and strategies in the game. You can adjust the speed, formation, direction, and target of your troops according to the situation. You can also use special skills and items to boost your power and damage. You can win battles against real players and invaders with ease and earn rewards and glory.

        -

        How to Download and Install Kiss of War Hacked APK

        -

        Requirements

        -

        Before you download and install Kiss of War hacked APK, you need to make sure that your device meets the minimum system requirements for running the hacked APK. These are:

        -
          -
        • Android 4.4 or higher
        • -
        • At least 2 GB of RAM
        • -
        • At least 1 GB of free storage space
        • -
        • A stable internet connection
        • -
        -

        You also need to enable some permissions and settings on your device to allow the installation of the hacked APK. These are:

        -
          -
        • Allow installation from unknown sources
        • -
        • Disable antivirus or firewall software
        • -
        • Backup your original game data
        • -
        -

        Steps

        -

        After you have checked the requirements and enabled the permissions and settings, you can follow these steps to download and install Kiss of War hacked APK on your device:

        -
          -
        1. Find a reliable source for downloading the hacked APK. You can search online for websites or forums that offer the latest version of Kiss of War hacked APK. Make sure that the source is trustworthy and has positive feedback from other users.
        2. -
        3. Verify and install the hacked APK on your device. After you have downloaded the hacked APK file, you need to scan it with a security tool to make sure that it is safe and virus-free. Then, you need to tap on the file and follow the instructions to install it on your device.
        4. -
        5. Launch and enjoy the hacked APK on your device. After you have installed the hacked APK on your device, you need to launch it from your app drawer or home screen. You will see a new icon with a different name than the original game. Tap on it and enjoy the features of Kiss of War hacked APK.
        6. -
        -

        Conclusion

        -

        Kiss of War is a war strategy game that lets you play as a commander in a world full of charming women fighting against invaders. The game offers various features such as vivid war scenes, real-time multiplayer combat, multiple countries to select, and a wide selection of weapons and vehicles.

        -

        If you want to get an edge over your opponents in the game, you can use a hacked APK to get unlimited resources, unlocked officers, enhanced combat, and other features that are not available in the official version. However, you need to be careful and responsible when using a hacked APK as it may come with some risks such as malware, viruses, spyware, legal issues, or bans.

        -

        If you are interested in trying out Kiss of War hacked APK, you can follow our guide on how to download and install it on your device. Make sure that you meet the requirements and enable the permissions and settings before installing it. Also, make sure that you find a reliable source for downloading it and verify it before installing it.

        -

        We hope that this article has helped you understand what Kiss of War hacked APK is and how to get it. If you have any questions or feedback, please feel free to leave a comment below.

        -

        FAQs

        -
          -
        • Q: Is Kiss of War hacked APK safe to use?
        • -
        • A: Kiss of War hacked APK is not an official product of the game developers or publishers. It is a modified version of the original game that gives you access to features that are not available in the official version. Therefore, it may not be safe to use as it may contain malware, viruses, spyware, or other harmful programs that can damage your device or steal your personal information. Therefore, you need to be careful and responsible when using a hacked APK and only download it from a reliable source and scan it with a security tool before installing it.
        • -
        • Q: Is Kiss of War hacked APK legal to use?
        • -
        • A: Kiss of War hacked APK is not an official product of the game developers or publishers. It is a modified version of the original game that violates the terms of service and the intellectual property rights of the game. Therefore, it may not be legal to use as it may infringe the rights of the game developers or publishers and expose you to legal issues or bans. Therefore, you need to be careful and responsible when using a hacked APK and only use it for personal and educational purposes and not for commercial or malicious purposes.
        • -
        • Q: Is Kiss of War hacked APK compatible with all devices?
        • -
        • A: Kiss of War hacked APK is not an official product of the game developers or publishers. It is a modified version of the original game that may not be compatible with all devices or versions of the game. Therefore, it may not work properly or cause errors or crashes on some devices or versions of the game. Therefore, you need to be careful and responsible when using a hacked APK and only use it on devices that meet the minimum system requirements and have the latest version of the game installed.
        • -
        • Q: How to update Kiss of War hacked APK?
        • -
        • A: Kiss of War hacked APK is not an official product of the game developers or publishers. It is a modified version of the original game that may not receive regular updates or support from the game developers or publishers. Therefore, it may not have the latest features or fixes that are available in the official version of the game. Therefore, you need to be careful and responsible when using a hacked APK and only download it from a reliable source that provides the latest version of the hacked APK.
        • -
        • Q: How to uninstall Kiss of War hacked APK?
        • -
        • A: Kiss of War hacked APK is not an official product of the game developers or publishers. It is a modified version of the original game that may not be easy to uninstall from your device. Therefore, you need to be careful and responsible when using a hacked APK and only install it on devices that you can easily restore or reset if needed. To uninstall Kiss of War hacked APK from your device, you can follow these steps:
        • -
            -
          1. Go to your device settings and find the app manager or applications option.
          2. -
          3. Find and select Kiss of War hacked APK from the list of apps installed on your device.
          4. -
          5. Tap on the uninstall button and confirm your action.
          6. -
          7. Wait for the app to be removed from your device.
          8. -

          197e85843d
          -
          -
          \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Los Angeles Crimes 1.6 APK Unlimited Ammo Realistic Graphics and More.md b/spaces/congsaPfin/Manga-OCR/logs/Los Angeles Crimes 1.6 APK Unlimited Ammo Realistic Graphics and More.md deleted file mode 100644 index b44779d1beae9dc7f4c6a41969dea0555bd25bb6..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Los Angeles Crimes 1.6 APK Unlimited Ammo Realistic Graphics and More.md +++ /dev/null @@ -1,96 +0,0 @@ -
          -

          Los Angeles Crimes 1.6 APK: A Sandbox Game for Android

          -

          If you are looking for a fun and realistic sandbox game for your Android device, you might want to check out Los Angeles Crimes 1.6 APK. This is a fan-made game inspired by the GTA series, where you can roam freely in a huge city, engage in various activities, and interact with other players online. In this article, we will tell you everything you need to know about Los Angeles Crimes, including its features, how to download and install it, why you should play it, and some tips and tricks to help you enjoy it more.

          -

          los angeles crimes 1.6 apk


          Download Filehttps://urlca.com/2uOdBg



          -

          What is Los Angeles Crimes?

          -

          Los Angeles Crimes is an action game developed by Mohammad Alizadeh, an independent developer from Iran. It is not affiliated with Rockstar Games or GTA in any way, but it does share some similarities with them. Los Angeles Crimes is a sandbox game, which means that you can do whatever you want in the game world, without any limitations or objectives. You can drive cars, bikes, boats, helicopters, and planes, shoot guns, fight with fists or melee weapons, play soccer or car race, join team deathmatch or zombie survival modes, or just explore the city and its surroundings.

          -

          Features of Los Angeles Crimes

          -

          Los Angeles Crimes has many features that make it an enjoyable and immersive game for Android users. Some of these features are:

          -
            -
          • Free roam mode: You can wander around the city and its outskirts, which are based on real locations in Los Angeles. You can find various landmarks, such as the Hollywood sign, the Santa Monica pier, the Griffith Observatory, and more. You can also enter some buildings, such as shops, hotels, restaurants, and apartments.
          • -
          • Multiplayer mode: You can create or join online games with up to 10 players per server. You can chat with other players using text or voice messages. You can also choose from different game modes, such as team deathmatch, zombie survival, car race, soccer, or free roam.
          • -
          • Active ragdoll and realistic physics: The game uses a physics engine that simulates the movements and interactions of objects and characters in a realistic way. You can see the effects of gravity, collisions, explosions, bullet impacts, and more. The game also uses an active ragdoll system that makes the characters react to forces and injuries in a natural way.
          • -
          • LAN support: You can play with your friends on a local network without using the internet. You just need to connect your devices to the same Wi-Fi network and create or join a LAN game.
          • -
          • PS4 controller support: You can use a PS4 controller to play the game via Bluetooth. You can customize the controller settings in the options menu.
          • -
          -

          How to download and install Los Angeles Crimes 1.6 APK

          -

          To download and install Los Angeles Crimes 1.6 APK on your Android device, you need to follow these steps:

          -
            -
          1. Go to APKCombo, FileHippo, or any other trusted website that offers the latest version of Los Angeles Crimes APK.
          2. -
          3. Download the APK file (416 MB) to your device.
          4. -
          5. Enable the installation of apps from unknown sources in your device settings.
          6. -
          7. Locate the downloaded APK file in your file manager and tap on it to install it.
          8. -
          9. Wait for the installation process to finish and launch the game from your app drawer.
          10. -
          -

          Why play

          Why play Los Angeles Crimes?

          -

          Los Angeles Crimes is a game that offers you a lot of freedom and fun. You can play it for various reasons, such as:

          -

          los angeles crimes mod apk unlimited ammo
          -los angeles crimes online multiplayer apk
          -los angeles crimes android game download
          -los angeles crimes gta 5 mod apk
          -los angeles crimes apk obb latest version
          -los angeles crimes ragdoll physics apk
          -los angeles crimes open world game apk
          -los angeles crimes offline mode apk
          -los angeles crimes apk pure download
          -los angeles crimes cheats and hacks apk
          -los angeles crimes free roam apk
          -los angeles crimes zombie mode apk
          -los angeles crimes realistic graphics apk
          -los angeles crimes beta 1.6 apk
          -los angeles crimes full version apk
          -los angeles crimes voice chat apk
          -los angeles crimes mod menu apk
          -los angeles crimes car mods apk
          -los angeles crimes fps mode apk
          -los angeles crimes new update 1.6 apk
          -los angeles crimes best settings apk
          -los angeles crimes map editor apk
          -los angeles crimes how to install apk
          -los angeles crimes system requirements apk
          -los angeles crimes gameplay review apk
          -los angeles crimes tips and tricks apk
          -los angeles crimes skins and outfits apk
          -los angeles crimes weapons and vehicles apk
          -los angeles crimes missions and quests apk
          -los angeles crimes bugs and glitches apk
          -los angeles crimes fan made videos apk
          -los angeles crimes discord server apk
          -los angeles crimes developer contact apk
          -los angeles crimes rating and feedback apk
          -los angeles crimes alternatives and similar games apk
          -los angeles crimes download for pc windows 10 apk
          -los angeles crimes emulator for mac os apk
          -los angeles crimes compatible devices list apk
          -los angeles crimes file size and storage space apk
          -los angeles crimes safe and secure download link apk

          -

          Explore a vast open world

          -

          One of the main attractions of Los Angeles Crimes is its open world, which is based on real locations in Los Angeles. You can explore the city and its outskirts, which are full of details and surprises. You can find different types of vehicles, such as cars, bikes, boats, helicopters, and planes, and drive them wherever you want. You can also discover hidden places, such as underground tunnels, rooftops, or secret bases. You can also interact with various objects and NPCs, such as vending machines, ATMs, pedestrians, animals, and more.

          -

          Create and join multiplayer games

          -

          Another reason to play Los Angeles Crimes is its multiplayer mode, which allows you to play with other players online. You can create or join online games with up to 10 players per server. You can chat with other players using text or voice messages. You can also choose from different game modes, such as team deathmatch, zombie survival, car race, soccer, or free roam. You can cooperate or compete with other players in these modes, and have fun together.

          -

          Customize your character and vehicles

          -

          A third reason to play Los Angeles Crimes is its customization options. You can customize your character's appearance, clothes, accessories, and weapons. You can also customize your vehicles' color, wheels, engine, suspension, and more. You can save your customizations and use them in any game mode. You can also share your customizations with other players online.

          -

          Tips and tricks for Los Angeles Crimes

          -

          If you want to enjoy Los Angeles Crimes more, you might want to follow these tips and tricks:

          -

          Use the map and radar to navigate

          -

          The game has a map and a radar that show you the layout of the city and the locations of important things. You can use the map to see the whole city and zoom in or out. You can also set waypoints on the map to guide you to your destination. You can use the radar to see the nearby vehicles, players, weapons, crates, police, gangs, and more. You can also see the health and armor bars of yourself and other players on the radar.

          -

          Collect weapons and ammo from crates

          -

          The game has various weapons that you can use to fight or defend yourself. You can find weapons and ammo in crates that are scattered around the city. You can also buy weapons from shops or get them from other players. You can carry up to four weapons at a time: one melee weapon, one handgun, one shotgun or rifle, and one heavy weapon or explosive. You can switch between your weapons using the weapon wheel.

          -

          Avoid the police and gangs

          -

          The game has police and gangs that will chase you if you commit crimes or enter their territory. The police will try to arrest you or shoot you if you resist. The gangs will try to kill you or rob you if you cross them. You can see the police's wanted level and the gangs' hostility level on the radar. You can lower these levels by hiding or escaping from them. You can also fight back if you have enough weapons and ammo.

          -

          Conclusion

          -

          Los Angeles Crimes is a sandbox game for Android that lets you do whatever you want in a huge city inspired by GTA. You can explore the city and its surroundings, create or join multiplayer games online or offline, customize your character and vehicles, and have fun with realistic physics and ragdoll effects. If you are looking for a fun and realistic sandbox game for your Android device, you might want to download Los Angeles Crimes 1.6 APK from APKCombo, FileHippo, or any other trusted website that offers it.

          -

          Summary of the main points

          -
            -
          • Los Angeles Crimes is a fan-made sandbox game inspired by GTA for Android devices.
          • -
          • You can do whatever you want in the game world without any limitations or objectives.
          • -
          • You can explore a vast open world based on real locations in Los Angeles.
          • -
          • You can create or join multiplayer games with up to 10 players per server online or offline.
          • -
          • You can customize your character's appearance, clothes, accessories, and weapons.
          • -
          • You can customize your vehicles' color, wheels, engine, suspension, and more.
          • -
          • You can enjoy realistic physics and ragdoll effects in the game.
          • -
          • You can download Los Angeles Crimes 1.6 APK from [mailto:alizadeh.mohammad@gmail.com](alizadeh.mohammad@gmail.com). You can also follow him on his Instagram account: @mohammad_alizadeh_. You can also visit his website: lac-game.com, where you can find more information about the game, such as the changelog, the FAQ, the forum, and more.

            -

            How can I support the development of Los Angeles Crimes?

            -

            You can support the development of Los Angeles Crimes by donating to the developer via PayPal or Patreon. You can find the links to these platforms on his website: lac-game.com. You can also support him by rating and reviewing the game on the website that you downloaded it from, and by sharing it with your friends and family.

            401be4b1e0
            -
            -
            \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Raees Full Movie Download in HD 720p Shah Rukh Khans Best Performance as a Crime Lord.md b/spaces/congsaPfin/Manga-OCR/logs/Raees Full Movie Download in HD 720p Shah Rukh Khans Best Performance as a Crime Lord.md deleted file mode 100644 index b45b9d4bf30273f8d3509d0e6515804c7b47f03e..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Raees Full Movie Download in HD 720p Shah Rukh Khans Best Performance as a Crime Lord.md +++ /dev/null @@ -1,87 +0,0 @@ - -

            Raees Full Movie HD Download 2017 YouTube: How to Watch the Bollywood Blockbuster Online

            -

            Raees is a 2017 Bollywood crime drama film starring Shah Rukh Khan, Mahira Khan and Nawazuddin Siddiqui. The film is directed by Rahul Dholakia and produced by Red Chillies Entertainment and Excel Entertainment. The film is based on the life of Abdul Latif, a notorious bootlegger and gangster who operated in Gujarat in the 1980s and 1990s.

            -

            The film was released on 25 January 2017 and received positive reviews from critics and audiences alike. The film was praised for its performances, direction, music, action and cinematography. The film also faced some controversies due to its portrayal of a Muslim protagonist and its release date clashing with another film, Kaabil.

            -

            raees full movie hd download 2017 youtube


            Download File ❤❤❤ https://urlca.com/2uOcZ9



            -

            The film was a commercial success and became one of the highest-grossing Bollywood films of 2017. The film also won several awards and nominations, including five Filmfare Awards.

            -

            How to Watch Raees Full Movie HD Online

            -

            If you are looking for ways to watch Raees full movie HD online, you have several options to choose from. Here are some of the best ways to watch the film legally and safely:

            -

            Netflix

            -

            Netflix is one of the most popular streaming platforms in the world and it has Raees available for its subscribers. You can watch Raees full movie HD on Netflix with a monthly subscription fee of $8.99 for the basic plan, $13.99 for the standard plan or $17.99 for the premium plan. You can also get a free trial for 30 days if you are a new user. Netflix also allows you to download the film for offline viewing on your devices.

            -

            YouTube

            -

            YouTube is another popular platform where you can watch Raees full movie HD online. You can rent or buy the film on YouTube for $3.99 or $12.99 respectively. You can also watch the film for free with ads if you have a YouTube Premium subscription, which costs $11.99 per month or $6.99 per month for students. YouTube also allows you to download the film for offline viewing on your devices.

            -

            Amazon Prime Video

            -

            Amazon Prime Video is another option where you can watch Raees full movie HD online. You can rent or buy the film on Amazon Prime Video for $3.99 or $12.99 respectively. You can also watch the film for free if you have an Amazon Prime membership, which costs $12.99 per month or $119 per year. Amazon Prime Video also allows you to download the film for offline viewing on your devices.

            -

            Raees Full Movie HD Download 2017 YouTube: Pros and Cons

            -

            Watching Raees full movie HD online has its pros and cons. Here are some of them:

            -

            raees 2017 full movie watch online hd free
            -raees shahrukh khan full movie download hd 720p
            -raees full movie hd 1080p download filmywap
            -raees full movie online with english subtitles
            -raees full movie hd download utorrent
            -raees full movie netflix watch online free
            -raees full movie hindi 2017 youtube hd
            -raees full movie download mp4 hd 480p
            -raees full movie hd download pagalworld
            -raees full movie online dailymotion hd
            -raees full movie download filmyzilla hd 720p
            -raees full movie watch online hotstar hd
            -raees full movie download in hindi hd 1080p
            -raees full movie online streaming hd free
            -raees full movie download worldfree4u hd
            -raees full movie youtube video hd 2017
            -raees full movie download moviespyhd net
            -raees full movie online amazon prime video hd
            -raees full movie download khatrimaza hd 720p
            -raees full movie watch online zee5 hd free
            -raees full movie download bolly4u hd 480p
            -raees full movie online mx player hd free
            -raees full movie download skymovies hd 1080p
            -raees full movie watch online sonyliv hd free
            -raees full movie download coolmoviez hd 720p
            -raees full movie online eros now hd free
            -raees full movie download movierulz hd 480p
            -raees full movie watch online voot hd free
            -raees full movie download tamilrockers hd 1080p
            -raees full movie online jiocinema hd free
            -raees full movie download rdxhd hd 720p
            -raees full movie watch online airtel xstream hd free
            -raees full movie download okjatt hd 480p
            -raees full movie watch online viu hd free
            -raees full movie download filmyhit hd 1080p
            -raees full movie online hungama play hd free
            -raees full movie download mp4moviez hd 720p
            -raees full movie watch online altbalaji hd free
            -raees full movie download extramovies hd 480p
            -raees full movie watch online youtube premium hd free

            -

            Pros

            -
              -
            • You can watch the film anytime and anywhere with an internet connection.
            • -
            • You can enjoy the film in high quality and with subtitles.
            • -
            • You can save money and time by not going to the theater.
            • -
            • You can avoid piracy and support the filmmakers.
            • -
            -

            Cons

            -
              -
            • You may experience buffering or lagging issues depending on your internet speed and device.
            • -
            • You may miss out on the theater experience and the social aspect of watching a film with others.
            • -
            • You may face geo-restrictions or unavailability of the film on some platforms in some regions.
            • -
            • You may need to pay extra fees or subscriptions to access some platforms.
            • -
            -

            Conclusion

            -

            Raees is a Bollywood crime drama film that tells the story of a bootlegger who rises to become a powerful figure in Gujarat. The film stars Shah Rukh Khan, Mahira Khan and Nawazuddin Siddiqui in lead roles and is directed by Rahul Dholakia. The film was released in 2017 and became a hit among critics and audiences alike.

            -

            If you want to watch Raees full movie HD online, you have several options to choose from, such as Netflix, YouTube and Amazon Prime Video. Each option has its pros and cons, so you should weigh them carefully before deciding which one suits you best. You should also avoid illegal downloads and torrents, as they are harmful to your device and the film industry. You should always watch the film legally and safely on the platforms that have the rights to stream it.

            -

            FAQs

            -

            Here are some frequently asked questions about Raees full movie HD download 2017 YouTube:

            -

            Q: Is Raees based on a true story?

            -

            A: Raees is loosely based on the life of Abdul Latif, a notorious bootlegger and gangster who operated in Gujarat in the 1980s and 1990s. However, the filmmakers have denied any direct connection and have stated that the film is a fictional story.

            -

            Q: Who is the female lead in Raees?

            -

            A: The female lead in Raees is Mahira Khan, a Pakistani actress who made her Bollywood debut with this film. She plays the role of Aasiya, the wife of Raees.

            -

            Q: What is the meaning of Raees?

            -

            A: Raees is an Arabic word that means "rich" or "wealthy". It is also a common name for boys in Muslim communities. In the film, Raees is the name of the protagonist, who is a wealthy and influential bootlegger.

            -

            Q: What is the box office collection of Raees?

            -

            A: Raees was a commercial success and grossed over ₹308 crore worldwide. It became the highest-grossing Bollywood film of 2017 until it was surpassed by Baahubali 2: The Conclusion.

            -

            Q: Where can I find the songs of Raees?

            -

            A: The songs of Raees are composed by Ram Sampath, JAM8 and Kalyanji-Anandji. The lyrics are written by Javed Akhtar, Amitabh Bhattacharya, Mayur Puri and others. The songs are available on various music platforms such as Spotify, Gaana, JioSaavn, Wynk and YouTube Music.

            197e85843d
            -
            -
            \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/The Secrets Behind Love Catcher in Bali How to Download and Enjoy the Show.md b/spaces/congsaPfin/Manga-OCR/logs/The Secrets Behind Love Catcher in Bali How to Download and Enjoy the Show.md deleted file mode 100644 index b3c013b4ff25fefb227f595ac00139f3ca34ed4c..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/The Secrets Behind Love Catcher in Bali How to Download and Enjoy the Show.md +++ /dev/null @@ -1,103 +0,0 @@ -
            -

            Download Love Catcher in Bali

            -

            If you are looking for a dating show that will keep you on the edge of your seat, then you should check out Love Catcher in Bali. This is the fourth season of the popular Korean reality show Love Catcher, where participants have to choose between love or money. In this article, we will tell you what Love Catcher in Bali is all about, why you should watch it, and how to download it.

            -

            download love catcher in bali


            Download ✔✔✔ https://urlca.com/2uO9Bf



            -

            What is Love Catcher in Bali?

            -

            Love Catcher in Bali is a dating show that takes place in the beautiful island of Bali, Indonesia. The show features eight regular members who live together for two weeks and try to find their true love. However, there is a twist. Among the eight members, there are four love catchers and four money catchers. The love catchers are looking for genuine romance, while the money catchers are looking for a cash prize of 50 million won (about $42,000). The catch is that no one knows who is who.

            -

            The show follows the members as they go on dates, play games, and face various missions and tests. At the end of each episode, they have to vote for their partner. If both partners are love catchers, they can leave together as a couple. If one partner is a love catcher and the other is a money catcher, the money catcher can take all the money and leave alone. If both partners are money catchers, they get nothing and have to stay until the end.

            -

            The show is hosted by five celebrities who provide commentary and analysis on the members' actions and interactions. They are comedienne Jang Do Yeon, actor Joo Woo Jae, TV personality Jun Hyun

            Moo, singer Gabee, and idol Kim Yo Han. They also interact with the members through video calls and surprise visits.

            -

            How to download love catcher in bali episodes
            -Watch love catcher in bali online free
            -Love catcher in bali eng sub download
            -Love catcher in bali season 4 recap
            -Love catcher in bali cast and crew
            -Love catcher in bali ratings and reviews
            -Love catcher in bali full episodes download
            -Love catcher in bali episode 1 eng sub
            -Love catcher in bali finale spoilers
            -Love catcher in bali behind the scenes
            -Download love catcher in bali with subtitles
            -Love catcher in bali best moments and clips
            -Love catcher in bali theme song download
            -Love catcher in bali couples update
            -Love catcher in bali filming location
            -Download love catcher in bali on TVING
            -Love catcher in bali episode guide and summary
            -Love catcher in bali netflix release date
            -Love catcher in bali trailer and teaser
            -Love catcher in bali reunion show download
            -Download love catcher in bali season 1 2 3
            -Love catcher in bali korean dating show
            -Love catcher in bali money or love game
            -Love catcher in bali kim yo han and lee yu jeong
            -Download love catcher in bali hd quality
            -Love catcher in bali fan theories and predictions
            -Love catcher in bali ost download mp3
            -Love catcher in bali merchandise and products
            -Download love catcher in bali on android or ios
            -Love catcher in bali memes and jokes

            -

            Why you should watch Love Catcher in Bali?

            -

            There are many reasons why you should watch Love Catcher in Bali, but here are some of the most compelling ones:

            -

            It's a thrilling psychological love game

            -

            Love Catcher in Bali is not your typical dating show. It's a game of deception, manipulation, and strategy. The members have to hide their true identities and intentions, while trying to figure out who is who. They have to balance their feelings and their goals, while facing the risk of betrayal and disappointment. The show keeps you guessing who is a love catcher and who is a money catcher, and who will end up with whom. You will find yourself rooting for some couples, while suspecting others.

            -

            It's set in a beautiful island

            -

            Love Catcher in Bali is also a visual treat. The show takes advantage of the stunning scenery and attractions of Bali, one of the most popular tourist destinations in the world. The members get to enjoy the sun, the sea, the sand, and the culture of the island. They visit various places, such as temples, beaches, waterfalls, markets, and restaurants. They also experience various activities, such as surfing, yoga, cooking, and dancing. The show makes you want to pack your bags and go to Bali yourself.

            -

            It's full of romance and drama

            -

            Of course, Love Catcher in Bali is also a show about love. The show features eight attractive and charming members who have different personalities and backgrounds. They form various couples, some of which are sweet and adorable, while others are fiery and intense. They have chemistry and sparks, but they also have conflicts and misunderstandings. They make you laugh, cry, swoon, and scream. The show will make you feel all kinds of emotions.

            -

            How to download Love Catcher in Bali?

            -

            If you are interested in watching Love Catcher in Bali, you might be wondering how to download it. Well, there are several options that you can choose from, depending on your preference and convenience. Here are some of them:

            -

            Option 1: TVING

            -

            The easiest way to watch and download Love Catcher in Bali is to use TVING, a Korean streaming service that offers various content, including dramas, movies, variety shows, sports, and more. TVING is the official platform that broadcasts Love Catcher in Bali, so you can watch it with high quality and subtitles. You can also download the episodes to watch offline later.

            -

            To use TVING, you need to create an account and subscribe to a plan. There are different plans that you can choose from, depending on your budget and needs. The cheapest plan costs 7,900 won (about $6.60) per month. You can pay with your credit card or PayPal.

            -

            To watch and download Love Catcher in Bali on TVING, follow these steps:

            -
              -
            1. Go to the TVING website or download the TVING app on your device.
            2. -
            3. Login with your account.
            4. -
            5. Search for Love Catcher in Bali or go to this link: [text].
            6. -
            7. Select the episode that you want to watch or download.
            8. -
            9. If you want to watch it online, click on the play button.
            10. -
            11. If you want to download it offline, click on the download button.
            12. -
            13. Enjoy watching Love Catcher in Bali.
            14. -
            -

            Option 2: Dailymotion

            -

            An alternative way to watch and download Love Catcher in Bali is to use Dailymotion, a video-sharing platform that hosts various content from around the world. Dailymotion allows users to upload and share videos for free. You can find Love Catcher in Bali episodes on Dailymotion uploaded by other users who have recorded them from TV or other sources.

            -

            To use Dailymotion, you don't need to create an account or pay anything. However, you might encounter some drawbacks, such as low quality, ads, incomplete episodes, or broken links. You also need to be careful about spoilers or fake videos.

            -

            To watch and download Love Catcher in Bali -

              -
            1. Go to the Dailymotion website or download the Dailymotion app on your device.
            2. -
            3. Search for Love Catcher in Bali or go to this link: [text].
            4. -
            5. Select the episode that you want to watch or download.
            6. -
            7. If you want to watch it online, click on the play button.
            8. -
            9. If you want to download it offline, click on the three dots icon and select "Download".
            10. -
            11. Enjoy watching Love Catcher in Bali.
            12. -
            -

            Option 3: MyDramaList

            -

            A third way to watch and download Love Catcher in Bali is to use MyDramaList, a website for Asian drama fans. MyDramaList provides information, reviews, ratings, recommendations, and discussions about various Asian dramas, movies, and shows. You can also find links to watch and download them from different sources.

            -

            To use MyDramaList, you need to create an account and join the community. You can also create your own watchlist, rate and review the shows that you have watched, and interact with other fans. You can access MyDramaList for free, but you can also support the site by becoming a premium member or donating.

            -

            To watch and download Love Catcher in Bali on MyDramaList, follow these steps:

            -
              -
            1. Go to the MyDramaList website or download the MyDramaList app on your device.
            2. -
            3. Login with your account.
            4. -
            5. Search for Love Catcher in Bali or go to this link: [text].
            6. -
            7. Select the episode that you want to watch or download.
            8. -
            9. Scroll down to the "Watch Online" section and choose a source that suits you. Some of the sources are TVING, Dailymotion, YouTube, Viki, etc.
            10. -
            11. If you want to watch it online, click on the link and follow the instructions on the source website or app.
            12. -
            13. If you want to download it offline, look for a download option on the source website or app. If there is no download option, you can use a third-party tool or extension to download the video.
            14. -
            15. Enjoy watching Love Catcher in Bali.
            16. -
            -

            Conclusion

            -

            Love Catcher in Bali is a dating show that will keep you entertained and intrigued. It's a psychological love game that tests the members' honesty and sincerity. It's set in a beautiful island that showcases the beauty and culture of Bali. It's full of romance and drama that will make you feel all kinds of emotions. If you are looking for a show that will make your heart race and your mind wonder, then you should watch Love Catcher in Bali.

            -

            You can watch and download Love Catcher in Bali from various options, such as TVING, Dailymotion, or MyDramaList. Choose the option that works best for you and enjoy watching this thrilling show. You won't regret it!

            -

            FAQs

            -

            Q1: When did Love Catcher in Bali air?

            -

            A1: Love Catcher in Bali aired from November 18, 2022 to January 6, 2023.

            -

            Q2: How many episodes are there in Love Catcher in Bali?

            -

            A2: There are eight episodes in Love Catcher in Bali.

            -

            Q3: Who are the hosts of Love Catcher in Bali?

            -

            A3: The hosts of Love Catcher in Bali are Jang Do Yeon, Joo Woo Jae, Jun Hyun Moo, Gabee, and Kim Yo Han.

            -

            Q4: Who are the regular members of Love Catcher in Bali?

            -

            A4: The regular members of Love Catcher in Bali are Lee Yu Jeong, Choi Tae Eun, Hong Seung Yeon, Kim Su Ji, Lee Seung Hyun, Park Ji Won, Kim Min Jae, and Lee Ji Eun.

            -

            Q5: Who won Love Catcher in Bali?

            -

            A5: Spoiler alert! The winners of Love Catcher in Bali were Choi Tae Eun and Kim Su Ji.

            197e85843d
            -
            -
            \ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/assets/Avast antivirus license key 2038 The ultimate guide to download and install.md b/spaces/contluForse/HuggingGPT/assets/Avast antivirus license key 2038 The ultimate guide to download and install.md deleted file mode 100644 index 9a2681c52567510700addca54748df697d831257..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Avast antivirus license key 2038 The ultimate guide to download and install.md +++ /dev/null @@ -1,6 +0,0 @@ -
            -

            Avast Antivirus Crack Applications is well-known worldwide because of its robust and powerful antivirus as well as other anti-malware programs. The antivirus software license key is a variety of distinct cyber security technologies to offer real-time protection against new and existing risks.

            -

            Avast antivirus license key 2038


            Download ---> https://ssurll.com/2uzvK1



            -

            Avast Antivirus license key Software is famed worldwide for its efficient and robust antivirus along with other anti-malware applications. Avast Premier 2021 License File Serial Number is a mix of different cyber security technologies to provide real-time protection against existing and new The user interface of the Avast Antivirus Crack is lightweight and has all the necessary options required by the consumer to safeguard the workstations. The interface has four tabs like Scan Computer Summary, Realtime Shields, as well as Care.

            aaccfb2cb3
            -
            -
            \ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/assets/Cv Pcc A Irmandade Do Crime Download Pdf.md b/spaces/contluForse/HuggingGPT/assets/Cv Pcc A Irmandade Do Crime Download Pdf.md deleted file mode 100644 index 317a8a0e7c398b6ec44dd7763becf1eec28f2dea..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Cv Pcc A Irmandade Do Crime Download Pdf.md +++ /dev/null @@ -1,6 +0,0 @@ -

            cv pcc a irmandade do crime download pdf


            Download Filehttps://ssurll.com/2uzyXI



            - - 3cee63e6c2
            -
            -
            -

            diff --git a/spaces/contluForse/HuggingGPT/assets/Deewaar Hindi Full Movie HD Download for Free The Film that Changed the Face of Indian Cinema.md b/spaces/contluForse/HuggingGPT/assets/Deewaar Hindi Full Movie HD Download for Free The Film that Changed the Face of Indian Cinema.md deleted file mode 100644 index deabb992b6999e7a8ad7480b17a2df4a9fda2265..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Deewaar Hindi Full Movie HD Download for Free The Film that Changed the Face of Indian Cinema.md +++ /dev/null @@ -1,5 +0,0 @@ - -

            download Kishore Kumar For Shashi Kapoor unlimited Movies and videos Download Here.Kishore Kumar For Shashi Kapoor Hd,3gp. mp4 320p and More Videos You Can Download Easyly. tamilrockers and movierulz, tamilgun, filmywap, and pagalworld videos and Movies download.

            -

            Deewaar hindi full movie free download hd


            DOWNLOAD ★★★ https://ssurll.com/2uzwzi



            aaccfb2cb3
            -
            -
            \ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/assets/Download Alien Covenant (English) - Where to Find the Best Deals and Offers for the Movie.md b/spaces/contluForse/HuggingGPT/assets/Download Alien Covenant (English) - Where to Find the Best Deals and Offers for the Movie.md deleted file mode 100644 index ed81d74694af1ea33bbf95784a63f3ecfd1e75e9..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Download Alien Covenant (English) - Where to Find the Best Deals and Offers for the Movie.md +++ /dev/null @@ -1,6 +0,0 @@ -

            DOwnload Alien: Covenant (English)


            DOWNLOADhttps://ssurll.com/2uzxaX



            - - aaccfb2cb3
            -
            -
            -

            diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/ops/assign_score_withk.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/ops/assign_score_withk.py deleted file mode 100644 index 4906adaa2cffd1b46912fbe7d4f87ef2f9fa0012..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/ops/assign_score_withk.py +++ /dev/null @@ -1,123 +0,0 @@ -from torch.autograd import Function - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext( - '_ext', ['assign_score_withk_forward', 'assign_score_withk_backward']) - - -class AssignScoreWithK(Function): - r"""Perform weighted sum to generate output features according to scores. - Modified from `PAConv `_. - - This is a memory-efficient CUDA implementation of assign_scores operation, - which first transform all point features with weight bank, then assemble - neighbor features with ``knn_idx`` and perform weighted sum of ``scores``. - - See the `paper `_ appendix Sec. D for - more detailed descriptions. - - Note: - This implementation assumes using ``neighbor`` kernel input, which is - (point_features - center_features, point_features). - See https://github.com/CVMI-Lab/PAConv/blob/main/scene_seg/model/ - pointnet2/paconv.py#L128 for more details. - """ - - @staticmethod - def forward(ctx, - scores, - point_features, - center_features, - knn_idx, - aggregate='sum'): - """ - Args: - scores (torch.Tensor): (B, npoint, K, M), predicted scores to - aggregate weight matrices in the weight bank. - ``npoint`` is the number of sampled centers. - ``K`` is the number of queried neighbors. - ``M`` is the number of weight matrices in the weight bank. - point_features (torch.Tensor): (B, N, M, out_dim) - Pre-computed point features to be aggregated. - center_features (torch.Tensor): (B, N, M, out_dim) - Pre-computed center features to be aggregated. - knn_idx (torch.Tensor): (B, npoint, K), index of sampled kNN. - We assume the first idx in each row is the idx of the center. - aggregate (str, optional): Aggregation method. - Can be 'sum', 'avg' or 'max'. Defaults: 'sum'. - - Returns: - torch.Tensor: (B, out_dim, npoint, K), the aggregated features. - """ - agg = {'sum': 0, 'avg': 1, 'max': 2} - - B, N, M, out_dim = point_features.size() - _, npoint, K, _ = scores.size() - - output = point_features.new_zeros((B, out_dim, npoint, K)) - ext_module.assign_score_withk_forward( - point_features.contiguous(), - center_features.contiguous(), - scores.contiguous(), - knn_idx.contiguous(), - output, - B=B, - N0=N, - N1=npoint, - M=M, - K=K, - O=out_dim, - aggregate=agg[aggregate]) - - ctx.save_for_backward(output, point_features, center_features, scores, - knn_idx) - ctx.agg = agg[aggregate] - - return output - - @staticmethod - def backward(ctx, grad_out): - """ - Args: - grad_out (torch.Tensor): (B, out_dim, npoint, K) - - Returns: - grad_scores (torch.Tensor): (B, npoint, K, M) - grad_point_features (torch.Tensor): (B, N, M, out_dim) - grad_center_features (torch.Tensor): (B, N, M, out_dim) - """ - _, point_features, center_features, scores, knn_idx = ctx.saved_tensors - - agg = ctx.agg - - B, N, M, out_dim = point_features.size() - _, npoint, K, _ = scores.size() - - grad_point_features = point_features.new_zeros(point_features.shape) - grad_center_features = center_features.new_zeros(center_features.shape) - grad_scores = scores.new_zeros(scores.shape) - - ext_module.assign_score_withk_backward( - grad_out.contiguous(), - point_features.contiguous(), - center_features.contiguous(), - scores.contiguous(), - knn_idx.contiguous(), - grad_point_features, - grad_center_features, - grad_scores, - B=B, - N0=N, - N1=npoint, - M=M, - K=K, - O=out_dim, - aggregate=agg) - - return grad_scores, grad_point_features, \ - grad_center_features, None, None - - -assign_score_withk = AssignScoreWithK.apply diff --git a/spaces/csanjay/DR_Predictor/README.md b/spaces/csanjay/DR_Predictor/README.md deleted file mode 100644 index 594245be9b83f37237cfdaf4121ea0c4060d28e3..0000000000000000000000000000000000000000 --- a/spaces/csanjay/DR_Predictor/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: DR Predictor -emoji: 📊 -colorFrom: red -colorTo: yellow -sdk: gradio -sdk_version: 3.4.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/cscan/CodeFormer/CodeFormer/basicsr/metrics/__init__.py b/spaces/cscan/CodeFormer/CodeFormer/basicsr/metrics/__init__.py deleted file mode 100644 index 19d55cc8321f124c918d78465b053aef67f13a33..0000000000000000000000000000000000000000 --- a/spaces/cscan/CodeFormer/CodeFormer/basicsr/metrics/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -from copy import deepcopy - -from basicsr.utils.registry import METRIC_REGISTRY -from .psnr_ssim import calculate_psnr, calculate_ssim - -__all__ = ['calculate_psnr', 'calculate_ssim'] - - -def calculate_metric(data, opt): - """Calculate metric from data and options. - - Args: - opt (dict): Configuration. It must constain: - type (str): Model type. - """ - opt = deepcopy(opt) - metric_type = opt.pop('type') - metric = METRIC_REGISTRY.get(metric_type)(**data, **opt) - return metric diff --git a/spaces/cvlab/zero123-live/taming-transformers/taming/data/helper_types.py b/spaces/cvlab/zero123-live/taming-transformers/taming/data/helper_types.py deleted file mode 100644 index fb51e301da08602cfead5961c4f7e1d89f6aba79..0000000000000000000000000000000000000000 --- a/spaces/cvlab/zero123-live/taming-transformers/taming/data/helper_types.py +++ /dev/null @@ -1,49 +0,0 @@ -from typing import Dict, Tuple, Optional, NamedTuple, Union -from PIL.Image import Image as pil_image -from torch import Tensor - -try: - from typing import Literal -except ImportError: - from typing_extensions import Literal - -Image = Union[Tensor, pil_image] -BoundingBox = Tuple[float, float, float, float] # x0, y0, w, h -CropMethodType = Literal['none', 'random', 'center', 'random-2d'] -SplitType = Literal['train', 'validation', 'test'] - - -class ImageDescription(NamedTuple): - id: int - file_name: str - original_size: Tuple[int, int] # w, h - url: Optional[str] = None - license: Optional[int] = None - coco_url: Optional[str] = None - date_captured: Optional[str] = None - flickr_url: Optional[str] = None - flickr_id: Optional[str] = None - coco_id: Optional[str] = None - - -class Category(NamedTuple): - id: str - super_category: Optional[str] - name: str - - -class Annotation(NamedTuple): - area: float - image_id: str - bbox: BoundingBox - category_no: int - category_id: str - id: Optional[int] = None - source: Optional[str] = None - confidence: Optional[float] = None - is_group_of: Optional[bool] = None - is_truncated: Optional[bool] = None - is_occluded: Optional[bool] = None - is_depiction: Optional[bool] = None - is_inside: Optional[bool] = None - segmentation: Optional[Dict] = None diff --git a/spaces/dandan4272/hand_gesture_rec/train_on_DHG_stgcn.py b/spaces/dandan4272/hand_gesture_rec/train_on_DHG_stgcn.py deleted file mode 100644 index 6ce179de0b9cae2823846a155620a43efd082882..0000000000000000000000000000000000000000 --- a/spaces/dandan4272/hand_gesture_rec/train_on_DHG_stgcn.py +++ /dev/null @@ -1,244 +0,0 @@ -from torch.utils.tensorboard import SummaryWriter - -from model.stgcn import TwoStreamSpatialTemporalGraph -from util.DHG_parse_data import * -from Mydataset import * -import torch.optim as optim -import time -import argparse -import os -from model.network import * - -parser = argparse.ArgumentParser() - -parser.add_argument("-b", "--batch_size", type=int, default=32) # 16 -parser.add_argument("-lr", "--learning_rate", type=float, default=1e-3) -parser.add_argument('--cuda', default=True, help='enables cuda') -parser.add_argument('-j', '--workers', default=0, type=int, metavar='N', - help='number of data loading workers (default: 8)') -parser.add_argument('--epochs', default=300, type=int, metavar='N', - help='number of total epochs to run') # 1000 - -parser.add_argument('--patiences', default=50, type=int, - help='number of epochs to tolerate no improvement of val_loss') # 1000 - - -parser.add_argument('--test_subject_id', type=int, default=3, - help='id of test subject, for cross-validation') - -parser.add_argument('--data_cfg', type=int, default=0, - help='0 for 14 class, 1 for 28') - - -parser.add_argument('--dp_rate', type=float, default=0.2, - help='dropout rate') # 1000 - - - - -def init_data_loader(test_subject_id, data_cfg): - - train_data, test_data = get_train_test_data(test_subject_id, data_cfg) - - - train_dataset = Hand_Dataset(train_data, use_data_aug = True, time_len = 8) - - test_dataset = Hand_Dataset(test_data, use_data_aug = False, time_len = 8) - - print("train data num: ",len(train_dataset)) - print("test data num: ",len(test_dataset)) - - print("batch size:", args.batch_size) - print("workers:", args.workers) - - train_loader = torch.utils.data.DataLoader( - train_dataset, - batch_size=args.batch_size, shuffle=True, - num_workers=args.workers, pin_memory=False) - - val_loader = torch.utils.data.DataLoader( - test_dataset, - batch_size=args.batch_size, shuffle=False, - num_workers=args.workers, pin_memory=False) - - return train_loader, val_loader - -def init_model(data_cfg): - if data_cfg == 0: - class_num = 14 - elif data_cfg == 1: - class_num = 28 - - # model = DG_STA(class_num, args.dp_rate) - # model = torch.nn.DataParallel(model).cuda() - graph_args = {'strategy': 'spatial'} - - # class_names = ['shake_hand', 'palm', 'fist', 'clock_wise', 'anti_clockwise', 'ok', 'thumb', 'v', 'heart','no_gesture'] - # num_class = len(class_names) - - model = TwoStreamSpatialTemporalGraph(graph_args, class_num) - - # model = DG_STA(class_num, args.dp_rate) - model = torch.nn.DataParallel(model).cuda() - return model - - -def model_foreward(sample_batched,model,criterion): - - data = sample_batched["skeleton"].float() - label = sample_batched["label"] - label = label.type(torch.LongTensor) - label = label.cuda() - label = torch.autograd.Variable(label, requires_grad=False) - - - score = model(data) - - loss = criterion(score,label) - - acc = get_acc(score, label) - - return score,loss, acc - - - -def get_acc(score, labels): - score = score.cpu().data.numpy() - labels = labels.cpu().data.numpy() - outputs = np.argmax(score, axis=1) - return np.sum(outputs==labels)/float(labels.size) - -torch.backends.cudnn.deterministic = True -torch.backends.cudnn.benchmark = False - -if __name__ == "__main__": - - writer = SummaryWriter(log_dir='log_ST-GCN') - - print("\nhyperparamter......") - args = parser.parse_args() - print(args) - - print("test_subject_id: ", args.test_subject_id) - - #folder for saving trained model... - # change this path to the fold where you want to save your pre-trained model - model_fold = "ST-GCN_ID-{}_dp-{}_lr-{}_dc-{}/".format(args.test_subject_id,args.dp_rate, args.learning_rate, args.data_cfg) - # try: - # os.mkdir(model_fold) - # except: - # pass - try: - os.makedirs(os.path.join('weights', model_fold)) - except: - pass - - - - train_loader, val_loader = init_data_loader(args.test_subject_id,args.data_cfg) - - - #.........inital model - print("\ninit model.............") - model = init_model(args.data_cfg) - model_solver = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.learning_rate) - - #........set loss - criterion = torch.nn.CrossEntropyLoss() - - - # - train_data_num = 2660 - test_data_num = 140 - iter_per_epoch = int(train_data_num / args.batch_size) - - #parameters recording training log - max_acc = 0 - no_improve_epoch = 0 - n_iter = 0 - - #***********training#*********** - for epoch in range(args.epochs): - print("\ntraining.............") - model.train() - start_time = time.time() - train_acc = 0 - train_loss = 0 - for i, sample_batched in enumerate(train_loader): - n_iter += 1 - #print("training i:",i) - if i + 1 > iter_per_epoch: - continue - score,loss, acc = model_foreward(sample_batched, model, criterion) - - model.zero_grad() - loss.backward() - #clip_grad_norm_(model.parameters(), 0.1) - model_solver.step() - - - train_acc += acc - train_loss += loss - - #print(i) - - - - train_acc /= float(i + 1) - train_loss /= float(i + 1) - writer.add_scalar('train_acc',train_acc,epoch) - writer.add_scalar('train_loss',train_loss,epoch) - print("*** DHS Epoch: [%2d] time: %4.4f, " - "cls_loss: %.4f train_ACC: %.6f ***" - % (epoch + 1, time.time() - start_time, - train_loss.data, train_acc)) - start_time = time.time() - - #adjust_learning_rate(model_solver, epoch + 1, args) - #print(print(model.module.encoder.gcn_network[0].edg_weight)) - - #***********evaluation*********** - with torch.no_grad(): - val_loss = 0 - acc_sum = 0 - model.eval() - for i, sample_batched in enumerate(val_loader): - #print("testing i:", i) - label = sample_batched["label"] - score, loss, acc = model_foreward(sample_batched, model, criterion) - val_loss += loss - - if i == 0: - score_list = score - label_list = label - else: - score_list = torch.cat((score_list, score), 0) - label_list = torch.cat((label_list, label), 0) - - - val_loss = val_loss / float(i + 1) - val_cc = get_acc(score_list,label_list) - - writer.add_scalar('val_cc', val_cc, epoch) - writer.add_scalar('val_loss', val_loss, epoch) - print("*** DHS Epoch: [%2d], " - "val_loss: %.6f," - "val_ACC: %.6f ***" - % (epoch + 1, val_loss, val_cc)) - - #save best model - if val_cc > max_acc: - max_acc = val_cc - no_improve_epoch = 0 - val_cc = round(val_cc, 10) - - torch.save(model.state_dict(), - '{}/epoch_{}_acc_{}.pth'.format(os.path.join('weights', model_fold), epoch + 1, val_cc)) - print("performance improve, saved the new model......best acc: {}".format(max_acc)) - else: - no_improve_epoch += 1 - print("no_improve_epoch: {} best acc {}".format(no_improve_epoch,max_acc)) - - if no_improve_epoch > args.patiences: - print("stop training....") - break \ No newline at end of file diff --git a/spaces/dataminers/dataminers/ef.py b/spaces/dataminers/dataminers/ef.py deleted file mode 100644 index d583d3769504df3397bf63dc279b77984907a7fd..0000000000000000000000000000000000000000 --- a/spaces/dataminers/dataminers/ef.py +++ /dev/null @@ -1,193 +0,0 @@ -import pandas as pd -import numpy as np -from datetime import datetime as dt -from pypfopt.efficient_frontier import EfficientFrontier -import streamlit as st -import plotly.graph_objects as go -import plotly.express as px -from PIL import Image - -### START AND RUN STREAMLIT -#https://docs.streamlit.io/library/get-started/installation - -def ef_viz(stock_df,choices): - #st.write("EF Visualization KOI EDITS") - # st.header('CAPM Model and the Efficient Frontier') - - symbols, weights, benchmark, investing_style, rf, A_coef,ticker = choices.values() - tickers = symbols - - #tickers.append('sp500') - #st.write(tickers) - #st.write(stock_df) - - # Yearly returns for individual companies - #https://stackoverflow.com/questions/69284773/unable-to-resample-the-pandas-with-date-column-typeerror-only-valid-with-dateti - stock_dff = stock_df.copy() - stock_dff['Date'] = pd.to_datetime(stock_dff['Date']) - - - # ind_er_df = stock_dff.set_index('Date') - #st.write(stock_dff.columns) - ind_er_df = stock_dff.resample('Y', on = 'Date').last().pct_change().mean() - ind_er = ind_er_df[tickers] - #st.write(ind_er) - ann_sd = stock_df[tickers].pct_change().apply(lambda x: np.log(1+x)).std().apply(lambda x: x*np.sqrt(250)) - assets = pd.concat([ind_er, ann_sd], axis=1) # Creating a table for visualising returns and volatility of assets - assets.columns = ['Returns', 'Volatility'] - assets - #st.write(assets) - ln_pct_change = stock_df[tickers].pct_change().apply(lambda x: np.log(1+x))[1:] - #Cov Matrix - cov_matrix =ln_pct_change.cov() - - ## CREATE PORFOLIOS WEIGHTS - p_ret = [] # Define an empty array for portfolio returns - p_vol = [] # Define an empty array for portfolio volatility - p_weights = [] # Define an empty array for asset weights - - num_assets = len(tickers) - num_portfolios = 1000 - - for portfolio in range(num_portfolios): - weights = np.random.random(num_assets) - weights = weights/np.sum(weights) - p_weights.append(weights) - returns = np.dot(weights, ind_er) # Returns are the product of individual expected returns of asset and its - # weights - p_ret.append(returns) - var = cov_matrix.mul(weights, axis=0).mul(weights, axis=1).sum().sum()# Portfolio Variance - sd = np.sqrt(var) # Daily standard deviation - ann_sd = sd*np.sqrt(250) # Annual standard deviation = volatility - p_vol.append(ann_sd) - - data = {'Returns':p_ret, 'Volatility':p_vol} - - for counter, symbol in enumerate(stock_df[tickers].columns.tolist()): - #print(counter, symbol) - data[symbol] = [w[counter] for w in p_weights] - - port_ef_df = pd.DataFrame(data) - port_ef_df['Vol'] = port_ef_df['Volatility'] - - ## NEEDS INPUT INSTEAD OF HARD CODE - #a = 5 #the coefficient of risk aversion is A. If an invest is less risk averse A is small. We assume 25 < A < 35. - #rf = 0.041 - - min_vol_port = port_ef_df.iloc[port_ef_df['Volatility'].idxmin()] - optimal_risky_port = port_ef_df.iloc[((port_ef_df['Returns']-rf)/port_ef_df['Volatility']).idxmax()] - - ### Make DF and data string for when hover over data points - def make_op_df(df, tickers): - new = {} - op_str = str() - new['Returns'] = df[0] - new['Volatility'] = df[1] - - for i in range(0,len(tickers)): - new[tickers[i]]= df[i+2] - op_str += str(tickers[i]) + ': ' + str(round(df[i+2],4)) + '
            ' - - return pd.DataFrame(new, index=[0]), op_str - - op_df, op_str = make_op_df(optimal_risky_port, tickers) - - def make_port_str(df, tickers): - port_str_lst = [] - for i in range(0,len(df)): - temp = str() - for u in range(0,len(tickers)): - temp += str(tickers[u])+ ': ' + str(round(df[tickers[u]][i],4)) + '
            ' - port_str_lst.append(temp) - - return port_str_lst - - port_str_lst = make_port_str(port_ef_df, tickers) - - ## CREATE CAPM LINE #https://www.youtube.com/watch?v=JWx2wcrSGkk - cal_x = [] - cal_y = [] - utl = [] - - - - for er in np.linspace(rf, max(data['Returns'])+rf,20): - sd = (er - rf)/ ((optimal_risky_port[0] - rf)/ optimal_risky_port[1]) - u = er - 0.5*A_coef*(sd**2) - cal_x.append(sd) - cal_y.append(er) - utl.append(u) - - data2 = {'Utility':utl, 'cal_x':cal_x, 'cal_y':cal_y} - - utl_df = pd.DataFrame(data2) - - ## Create Figure - fig3 = go.Figure() - - #https://plotly.com/python/colorscales/ - fig3.add_trace(go.Scatter(x=port_ef_df['Volatility'], y=port_ef_df['Returns'], hovertemplate='Volatility: %{x}
            Returns: %{y}
            %{text}',\ - text= port_str_lst, mode='markers', \ - marker=dict(color=port_ef_df['Volatility'], colorbar=dict(title="Volatility"), \ - size=port_ef_df['Returns']*50, cmax=max(port_ef_df['Volatility']),\ - cmin=min(port_ef_df['Volatility'])),name='Portfolio')) - #, mode='markers', size=port_ef_df['Returns'], \ - #size_max=30, color=port_ef_df['Vol'])) - fig3.add_trace(go.Scatter(x=utl_df['cal_x'], y=utl_df['cal_y'], mode='lines', line = dict(color='rgba(11,156,49,1)'),name='Ultility Function',\ - hovertemplate='Volatility: %{x}
            Returns: %{y}')) #)) - - fig3.add_trace(go.Scatter(x=op_df['Volatility'], y=op_df['Returns'], mode='markers', \ - marker=dict(color= 'rgba(11,156,49,1)', size=30),\ - hovertemplate='Volatility: %{x}
            Returns: %{y}
            %{text}',\ - text=[op_str])) - ### HOVER TEMPLATE # https://plotly.com/python/hover-text-and-formatting/ - - -# ### SAVE IN CASE CANNOT FIGURE OUT THE HOVER TEMPLATE -# fig2 = px.scatter(op_df, 'Volatility', 'Returns') -# fig2.update_traces(marker=dict(color= 'rgba(11,156,49,1)', size=35)) - -# fig1 = px.line(utl_df, x="cal_x", y="cal_y") -# #fig1.update_traces(line=dict(color = 'rgba(11,156,49,1)')) - -# fig = px.scatter(port_ef_df, 'Volatility', 'Returns', size='Returns', size_max=30, color='Vol') -# #https://stackoverflow.com/questions/59057881/python-plotly-how-to-customize-hover-template-on-with-what-information-to-show -# #https://stackoverflow.com/questions/65124833/plotly-how-to-combine-scatter-and-line-plots-using-plotly-express - -# #data3 = -# fig3.data = [fig2.data,fig1.data,fig.data] -# #fig3.update_traces(line=dict(color = 'rgba(11,156,49,1)')) -# #### - - fig3.update_layout(showlegend=False)#, legend_title_text = "Contestant") - fig3.update_xaxes(title_text="Volatility") - fig3.update_yaxes(title_text="Portfolio Return Rates") - - st.plotly_chart(fig3, use_container_width=True) - - #st.write(op_str) - op_df = op_df.style.set_properties(**{'color':'green'}) - st.subheader('Optimal Returns vs Volatility and Portfolio weights') - col1, col2, col3 = st.columns([1,6,1]) - with col1: - st.write("") - - with col2: - st.write(op_df) - - with col3: - st.write("") - - im = Image.open('EFvsMinvar.png') - st.subheader('Understand the Efficient Frontier') - col1, col2, col3 = st.columns([1,6,1]) - - with col1: - st.write("") - - with col2: - st.image(im, caption='Elements of the Efficient Frontier',use_column_width='auto') - - with col3: - st.write("") - \ No newline at end of file diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fastapi/responses.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fastapi/responses.py deleted file mode 100644 index c0a13b7555efc9d99c5c887fee1c94c88ba7e89c..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fastapi/responses.py +++ /dev/null @@ -1,34 +0,0 @@ -from typing import Any - -from starlette.responses import FileResponse as FileResponse # noqa -from starlette.responses import HTMLResponse as HTMLResponse # noqa -from starlette.responses import JSONResponse as JSONResponse # noqa -from starlette.responses import PlainTextResponse as PlainTextResponse # noqa -from starlette.responses import RedirectResponse as RedirectResponse # noqa -from starlette.responses import Response as Response # noqa -from starlette.responses import StreamingResponse as StreamingResponse # noqa - -try: - import ujson -except ImportError: # pragma: nocover - ujson = None # type: ignore - - -try: - import orjson -except ImportError: # pragma: nocover - orjson = None # type: ignore - - -class UJSONResponse(JSONResponse): - def render(self, content: Any) -> bytes: - assert ujson is not None, "ujson must be installed to use UJSONResponse" - return ujson.dumps(content, ensure_ascii=False).encode("utf-8") - - -class ORJSONResponse(JSONResponse): - def render(self, content: Any) -> bytes: - assert orjson is not None, "orjson must be installed to use ORJSONResponse" - return orjson.dumps( - content, option=orjson.OPT_NON_STR_KEYS | orjson.OPT_SERIALIZE_NUMPY - ) diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/cffLib/width.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/cffLib/width.py deleted file mode 100644 index c0a746b6922d4c66d0559078457c9546c77c65d3..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/cffLib/width.py +++ /dev/null @@ -1,209 +0,0 @@ -# -*- coding: utf-8 -*- - -"""T2CharString glyph width optimizer. - -CFF glyphs whose width equals the CFF Private dictionary's ``defaultWidthX`` -value do not need to specify their width in their charstring, saving bytes. -This module determines the optimum ``defaultWidthX`` and ``nominalWidthX`` -values for a font, when provided with a list of glyph widths.""" - -from fontTools.ttLib import TTFont -from collections import defaultdict -from operator import add -from functools import reduce - - -class missingdict(dict): - def __init__(self, missing_func): - self.missing_func = missing_func - - def __missing__(self, v): - return self.missing_func(v) - - -def cumSum(f, op=add, start=0, decreasing=False): - - keys = sorted(f.keys()) - minx, maxx = keys[0], keys[-1] - - total = reduce(op, f.values(), start) - - if decreasing: - missing = lambda x: start if x > maxx else total - domain = range(maxx, minx - 1, -1) - else: - missing = lambda x: start if x < minx else total - domain = range(minx, maxx + 1) - - out = missingdict(missing) - - v = start - for x in domain: - v = op(v, f[x]) - out[x] = v - - return out - - -def byteCost(widths, default, nominal): - - if not hasattr(widths, "items"): - d = defaultdict(int) - for w in widths: - d[w] += 1 - widths = d - - cost = 0 - for w, freq in widths.items(): - if w == default: - continue - diff = abs(w - nominal) - if diff <= 107: - cost += freq - elif diff <= 1131: - cost += freq * 2 - else: - cost += freq * 5 - return cost - - -def optimizeWidthsBruteforce(widths): - """Bruteforce version. Veeeeeeeeeeeeeeeeery slow. Only works for smallests of fonts.""" - - d = defaultdict(int) - for w in widths: - d[w] += 1 - - # Maximum number of bytes using default can possibly save - maxDefaultAdvantage = 5 * max(d.values()) - - minw, maxw = min(widths), max(widths) - domain = list(range(minw, maxw + 1)) - - bestCostWithoutDefault = min(byteCost(widths, None, nominal) for nominal in domain) - - bestCost = len(widths) * 5 + 1 - for nominal in domain: - if byteCost(widths, None, nominal) > bestCost + maxDefaultAdvantage: - continue - for default in domain: - cost = byteCost(widths, default, nominal) - if cost < bestCost: - bestCost = cost - bestDefault = default - bestNominal = nominal - - return bestDefault, bestNominal - - -def optimizeWidths(widths): - """Given a list of glyph widths, or dictionary mapping glyph width to number of - glyphs having that, returns a tuple of best CFF default and nominal glyph widths. - - This algorithm is linear in UPEM+numGlyphs.""" - - if not hasattr(widths, "items"): - d = defaultdict(int) - for w in widths: - d[w] += 1 - widths = d - - keys = sorted(widths.keys()) - minw, maxw = keys[0], keys[-1] - domain = list(range(minw, maxw + 1)) - - # Cumulative sum/max forward/backward. - cumFrqU = cumSum(widths, op=add) - cumMaxU = cumSum(widths, op=max) - cumFrqD = cumSum(widths, op=add, decreasing=True) - cumMaxD = cumSum(widths, op=max, decreasing=True) - - # Cost per nominal choice, without default consideration. - nomnCostU = missingdict( - lambda x: cumFrqU[x] + cumFrqU[x - 108] + cumFrqU[x - 1132] * 3 - ) - nomnCostD = missingdict( - lambda x: cumFrqD[x] + cumFrqD[x + 108] + cumFrqD[x + 1132] * 3 - ) - nomnCost = missingdict(lambda x: nomnCostU[x] + nomnCostD[x] - widths[x]) - - # Cost-saving per nominal choice, by best default choice. - dfltCostU = missingdict( - lambda x: max(cumMaxU[x], cumMaxU[x - 108] * 2, cumMaxU[x - 1132] * 5) - ) - dfltCostD = missingdict( - lambda x: max(cumMaxD[x], cumMaxD[x + 108] * 2, cumMaxD[x + 1132] * 5) - ) - dfltCost = missingdict(lambda x: max(dfltCostU[x], dfltCostD[x])) - - # Combined cost per nominal choice. - bestCost = missingdict(lambda x: nomnCost[x] - dfltCost[x]) - - # Best nominal. - nominal = min(domain, key=lambda x: bestCost[x]) - - # Work back the best default. - bestC = bestCost[nominal] - dfltC = nomnCost[nominal] - bestCost[nominal] - ends = [] - if dfltC == dfltCostU[nominal]: - starts = [nominal, nominal - 108, nominal - 1132] - for start in starts: - while cumMaxU[start] and cumMaxU[start] == cumMaxU[start - 1]: - start -= 1 - ends.append(start) - else: - starts = [nominal, nominal + 108, nominal + 1132] - for start in starts: - while cumMaxD[start] and cumMaxD[start] == cumMaxD[start + 1]: - start += 1 - ends.append(start) - default = min(ends, key=lambda default: byteCost(widths, default, nominal)) - - return default, nominal - - -def main(args=None): - """Calculate optimum defaultWidthX/nominalWidthX values""" - - import argparse - - parser = argparse.ArgumentParser( - "fonttools cffLib.width", - description=main.__doc__, - ) - parser.add_argument( - "inputs", metavar="FILE", type=str, nargs="+", help="Input TTF files" - ) - parser.add_argument( - "-b", - "--brute-force", - dest="brute", - action="store_true", - help="Use brute-force approach (VERY slow)", - ) - - args = parser.parse_args(args) - - for fontfile in args.inputs: - font = TTFont(fontfile) - hmtx = font["hmtx"] - widths = [m[0] for m in hmtx.metrics.values()] - if args.brute: - default, nominal = optimizeWidthsBruteforce(widths) - else: - default, nominal = optimizeWidths(widths) - print( - "glyphs=%d default=%d nominal=%d byteCost=%d" - % (len(widths), default, nominal, byteCost(widths, default, nominal)) - ) - - -if __name__ == "__main__": - import sys - - if len(sys.argv) == 1: - import doctest - - sys.exit(doctest.testmod().failed) - main() diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/B_A_S_E_.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/B_A_S_E_.py deleted file mode 100644 index f468a963a1e2a8d503b57f4d7aeff12b8770cc67..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/B_A_S_E_.py +++ /dev/null @@ -1,5 +0,0 @@ -from .otBase import BaseTTXConverter - - -class table_B_A_S_E_(BaseTTXConverter): - pass diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/S_T_A_T_.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/S_T_A_T_.py deleted file mode 100644 index 1769de91b5f0416354e040b52e3615c6824fd2f9..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/S_T_A_T_.py +++ /dev/null @@ -1,5 +0,0 @@ -from .otBase import BaseTTXConverter - - -class table_S_T_A_T_(BaseTTXConverter): - pass diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fsspec/caching.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fsspec/caching.py deleted file mode 100644 index 511de1dee8f3416cf89475c9393275748df00022..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fsspec/caching.py +++ /dev/null @@ -1,804 +0,0 @@ -import collections -import functools -import logging -import math -import os -import threading -import warnings -from concurrent.futures import ThreadPoolExecutor - -logger = logging.getLogger("fsspec") - - -class BaseCache(object): - """Pass-though cache: doesn't keep anything, calls every time - - Acts as base class for other cachers - - Parameters - ---------- - blocksize: int - How far to read ahead in numbers of bytes - fetcher: func - Function of the form f(start, end) which gets bytes from remote as - specified - size: int - How big this file is - """ - - name = "none" - - def __init__(self, blocksize, fetcher, size): - self.blocksize = blocksize - self.fetcher = fetcher - self.size = size - - def _fetch(self, start, stop): - if start is None: - start = 0 - if stop is None: - stop = self.size - if start >= self.size or start >= stop: - return b"" - return self.fetcher(start, stop) - - -class MMapCache(BaseCache): - """memory-mapped sparse file cache - - Opens temporary file, which is filled blocks-wise when data is requested. - Ensure there is enough disc space in the temporary location. - - This cache method might only work on posix - """ - - name = "mmap" - - def __init__(self, blocksize, fetcher, size, location=None, blocks=None): - super().__init__(blocksize, fetcher, size) - self.blocks = set() if blocks is None else blocks - self.location = location - self.cache = self._makefile() - - def _makefile(self): - import mmap - import tempfile - - if self.size == 0: - return bytearray() - - # posix version - if self.location is None or not os.path.exists(self.location): - if self.location is None: - fd = tempfile.TemporaryFile() - self.blocks = set() - else: - fd = open(self.location, "wb+") - fd.seek(self.size - 1) - fd.write(b"1") - fd.flush() - else: - fd = open(self.location, "rb+") - - return mmap.mmap(fd.fileno(), self.size) - - def _fetch(self, start, end): - logger.debug(f"MMap cache fetching {start}-{end}") - if start is None: - start = 0 - if end is None: - end = self.size - if start >= self.size or start >= end: - return b"" - start_block = start // self.blocksize - end_block = end // self.blocksize - need = [i for i in range(start_block, end_block + 1) if i not in self.blocks] - while need: - # TODO: not a for loop so we can consolidate blocks later to - # make fewer fetch calls; this could be parallel - i = need.pop(0) - sstart = i * self.blocksize - send = min(sstart + self.blocksize, self.size) - logger.debug(f"MMap get block #{i} ({sstart}-{send}") - self.cache[sstart:send] = self.fetcher(sstart, send) - self.blocks.add(i) - - return self.cache[start:end] - - def __getstate__(self): - state = self.__dict__.copy() - # Remove the unpicklable entries. - del state["cache"] - return state - - def __setstate__(self, state): - # Restore instance attributes - self.__dict__.update(state) - self.cache = self._makefile() - - -class ReadAheadCache(BaseCache): - """Cache which reads only when we get beyond a block of data - - This is a much simpler version of BytesCache, and does not attempt to - fill holes in the cache or keep fragments alive. It is best suited to - many small reads in a sequential order (e.g., reading lines from a file). - """ - - name = "readahead" - - def __init__(self, blocksize, fetcher, size): - super().__init__(blocksize, fetcher, size) - self.cache = b"" - self.start = 0 - self.end = 0 - - def _fetch(self, start, end): - if start is None: - start = 0 - if end is None or end > self.size: - end = self.size - if start >= self.size or start >= end: - return b"" - l = end - start - if start >= self.start and end <= self.end: - # cache hit - return self.cache[start - self.start : end - self.start] - elif self.start <= start < self.end: - # partial hit - part = self.cache[start - self.start :] - l -= len(part) - start = self.end - else: - # miss - part = b"" - end = min(self.size, end + self.blocksize) - self.cache = self.fetcher(start, end) # new block replaces old - self.start = start - self.end = self.start + len(self.cache) - return part + self.cache[:l] - - -class FirstChunkCache(BaseCache): - """Caches the first block of a file only - - This may be useful for file types where the metadata is stored in the header, - but is randomly accessed. - """ - - name = "first" - - def __init__(self, blocksize, fetcher, size): - super().__init__(blocksize, fetcher, size) - self.cache = None - - def _fetch(self, start, end): - start = start or 0 - end = end or self.size - if start < self.blocksize: - if self.cache is None: - if end > self.blocksize: - data = self.fetcher(0, end) - self.cache = data[: self.blocksize] - return data[start:] - self.cache = self.fetcher(0, self.blocksize) - part = self.cache[start:end] - if end > self.blocksize: - part += self.fetcher(self.blocksize, end) - return part - else: - return self.fetcher(start, end) - - -class BlockCache(BaseCache): - """ - Cache holding memory as a set of blocks. - - Requests are only ever made ``blocksize`` at a time, and are - stored in an LRU cache. The least recently accessed block is - discarded when more than ``maxblocks`` are stored. - - Parameters - ---------- - blocksize : int - The number of bytes to store in each block. - Requests are only ever made for ``blocksize``, so this - should balance the overhead of making a request against - the granularity of the blocks. - fetcher : Callable - size : int - The total size of the file being cached. - maxblocks : int - The maximum number of blocks to cache for. The maximum memory - use for this cache is then ``blocksize * maxblocks``. - """ - - name = "blockcache" - - def __init__(self, blocksize, fetcher, size, maxblocks=32): - super().__init__(blocksize, fetcher, size) - self.nblocks = math.ceil(size / blocksize) - self.maxblocks = maxblocks - self._fetch_block_cached = functools.lru_cache(maxblocks)(self._fetch_block) - - def __repr__(self): - return "".format( - self.blocksize, self.size, self.nblocks - ) - - def cache_info(self): - """ - The statistics on the block cache. - - Returns - ------- - NamedTuple - Returned directly from the LRU Cache used internally. - """ - return self._fetch_block_cached.cache_info() - - def __getstate__(self): - state = self.__dict__ - del state["_fetch_block_cached"] - return state - - def __setstate__(self, state): - self.__dict__.update(state) - self._fetch_block_cached = functools.lru_cache(state["maxblocks"])( - self._fetch_block - ) - - def _fetch(self, start, end): - if start is None: - start = 0 - if end is None: - end = self.size - if start >= self.size or start >= end: - return b"" - - # byte position -> block numbers - start_block_number = start // self.blocksize - end_block_number = end // self.blocksize - - # these are cached, so safe to do multiple calls for the same start and end. - for block_number in range(start_block_number, end_block_number + 1): - self._fetch_block_cached(block_number) - - return self._read_cache( - start, - end, - start_block_number=start_block_number, - end_block_number=end_block_number, - ) - - def _fetch_block(self, block_number): - """ - Fetch the block of data for `block_number`. - """ - if block_number > self.nblocks: - raise ValueError( - "'block_number={}' is greater than the number of blocks ({})".format( - block_number, self.nblocks - ) - ) - - start = block_number * self.blocksize - end = start + self.blocksize - logger.info("BlockCache fetching block %d", block_number) - block_contents = super()._fetch(start, end) - return block_contents - - def _read_cache(self, start, end, start_block_number, end_block_number): - """ - Read from our block cache. - - Parameters - ---------- - start, end : int - The start and end byte positions. - start_block_number, end_block_number : int - The start and end block numbers. - """ - start_pos = start % self.blocksize - end_pos = end % self.blocksize - - if start_block_number == end_block_number: - block = self._fetch_block_cached(start_block_number) - return block[start_pos:end_pos] - - else: - # read from the initial - out = [] - out.append(self._fetch_block_cached(start_block_number)[start_pos:]) - - # intermediate blocks - # Note: it'd be nice to combine these into one big request. However - # that doesn't play nicely with our LRU cache. - for block_number in range(start_block_number + 1, end_block_number): - out.append(self._fetch_block_cached(block_number)) - - # final block - out.append(self._fetch_block_cached(end_block_number)[:end_pos]) - - return b"".join(out) - - -class BytesCache(BaseCache): - """Cache which holds data in a in-memory bytes object - - Implements read-ahead by the block size, for semi-random reads progressing - through the file. - - Parameters - ---------- - trim: bool - As we read more data, whether to discard the start of the buffer when - we are more than a blocksize ahead of it. - """ - - name = "bytes" - - def __init__(self, blocksize, fetcher, size, trim=True): - super().__init__(blocksize, fetcher, size) - self.cache = b"" - self.start = None - self.end = None - self.trim = trim - - def _fetch(self, start, end): - # TODO: only set start/end after fetch, in case it fails? - # is this where retry logic might go? - if start is None: - start = 0 - if end is None: - end = self.size - if start >= self.size or start >= end: - return b"" - if ( - self.start is not None - and start >= self.start - and self.end is not None - and end < self.end - ): - # cache hit: we have all the required data - offset = start - self.start - return self.cache[offset : offset + end - start] - - if self.blocksize: - bend = min(self.size, end + self.blocksize) - else: - bend = end - - if bend == start or start > self.size: - return b"" - - if (self.start is None or start < self.start) and ( - self.end is None or end > self.end - ): - # First read, or extending both before and after - self.cache = self.fetcher(start, bend) - self.start = start - elif start < self.start: - if self.end - end > self.blocksize: - self.cache = self.fetcher(start, bend) - self.start = start - else: - new = self.fetcher(start, self.start) - self.start = start - self.cache = new + self.cache - elif bend > self.end: - if self.end > self.size: - pass - elif end - self.end > self.blocksize: - self.cache = self.fetcher(start, bend) - self.start = start - else: - new = self.fetcher(self.end, bend) - self.cache = self.cache + new - - self.end = self.start + len(self.cache) - offset = start - self.start - out = self.cache[offset : offset + end - start] - if self.trim: - num = (self.end - self.start) // (self.blocksize + 1) - if num > 1: - self.start += self.blocksize * num - self.cache = self.cache[self.blocksize * num :] - return out - - def __len__(self): - return len(self.cache) - - -class AllBytes(BaseCache): - """Cache entire contents of the file""" - - name = "all" - - def __init__(self, blocksize=None, fetcher=None, size=None, data=None): - super().__init__(blocksize, fetcher, size) - if data is None: - data = self.fetcher(0, self.size) - self.data = data - - def _fetch(self, start, end): - return self.data[start:end] - - -class KnownPartsOfAFile(BaseCache): - """ - Cache holding known file parts. - - Parameters - ---------- - blocksize: int - How far to read ahead in numbers of bytes - fetcher: func - Function of the form f(start, end) which gets bytes from remote as - specified - size: int - How big this file is - data: dict - A dictionary mapping explicit `(start, stop)` file-offset tuples - with known bytes. - strict: bool, default True - Whether to fetch reads that go beyond a known byte-range boundary. - If `False`, any read that ends outside a known part will be zero - padded. Note that zero padding will not be used for reads that - begin outside a known byte-range. - """ - - name = "parts" - - def __init__(self, blocksize, fetcher, size, data={}, strict=True, **_): - super(KnownPartsOfAFile, self).__init__(blocksize, fetcher, size) - self.strict = strict - - # simple consolidation of contiguous blocks - if data: - old_offsets = sorted(list(data.keys())) - offsets = [old_offsets[0]] - blocks = [data.pop(old_offsets[0])] - for start, stop in old_offsets[1:]: - start0, stop0 = offsets[-1] - if start == stop0: - offsets[-1] = (start0, stop) - blocks[-1] += data.pop((start, stop)) - else: - offsets.append((start, stop)) - blocks.append(data.pop((start, stop))) - - self.data = dict(zip(offsets, blocks)) - else: - self.data = data - - def _fetch(self, start, stop): - out = b"" - for (loc0, loc1), data in self.data.items(): - # If self.strict=False, use zero-padded data - # for reads beyond the end of a "known" buffer - if loc0 <= start < loc1: - off = start - loc0 - out = data[off : off + stop - start] - if not self.strict or loc0 <= stop <= loc1: - # The request is within a known range, or - # it begins within a known range, and we - # are allowed to pad reads beyond the - # buffer with zero - out += b"\x00" * (stop - start - len(out)) - return out - else: - # The request ends outside a known range, - # and we are being "strict" about reads - # beyond the buffer - start = loc1 - break - - # We only get here if there is a request outside the - # known parts of the file. In an ideal world, this - # should never happen - if self.fetcher is None: - # We cannot fetch the data, so raise an error - raise ValueError(f"Read is outside the known file parts: {(start, stop)}. ") - # We can fetch the data, but should warn the user - # that this may be slow - warnings.warn( - f"Read is outside the known file parts: {(start, stop)}. " - f"IO/caching performance may be poor!" - ) - logger.debug(f"KnownPartsOfAFile cache fetching {start}-{stop}") - return out + super()._fetch(start, stop) - - -class UpdatableLRU: - """ - Custom implementation of LRU cache that allows updating keys - - Used by BackgroudBlockCache - """ - - CacheInfo = collections.namedtuple( - "CacheInfo", ["hits", "misses", "maxsize", "currsize"] - ) - - def __init__(self, func, max_size=128): - self._cache = collections.OrderedDict() - self._func = func - self._max_size = max_size - self._hits = 0 - self._misses = 0 - self._lock = threading.Lock() - - def __call__(self, *args): - with self._lock: - if args in self._cache: - self._cache.move_to_end(args) - self._hits += 1 - return self._cache[args] - - result = self._func(*args) - - with self._lock: - self._cache[args] = result - self._misses += 1 - if len(self._cache) > self._max_size: - self._cache.popitem(last=False) - - return result - - def is_key_cached(self, *args): - with self._lock: - return args in self._cache - - def add_key(self, result, *args): - with self._lock: - self._cache[args] = result - if len(self._cache) > self._max_size: - self._cache.popitem(last=False) - - def cache_info(self): - with self._lock: - return self.CacheInfo( - maxsize=self._max_size, - currsize=len(self._cache), - hits=self._hits, - misses=self._misses, - ) - - -class BackgroundBlockCache(BaseCache): - """ - Cache holding memory as a set of blocks with pre-loading of - the next block in the background. - - Requests are only ever made ``blocksize`` at a time, and are - stored in an LRU cache. The least recently accessed block is - discarded when more than ``maxblocks`` are stored. If the - next block is not in cache, it is loaded in a separate thread - in non-blocking way. - - Parameters - ---------- - blocksize : int - The number of bytes to store in each block. - Requests are only ever made for ``blocksize``, so this - should balance the overhead of making a request against - the granularity of the blocks. - fetcher : Callable - size : int - The total size of the file being cached. - maxblocks : int - The maximum number of blocks to cache for. The maximum memory - use for this cache is then ``blocksize * maxblocks``. - """ - - name = "background" - - def __init__(self, blocksize, fetcher, size, maxblocks=32): - super().__init__(blocksize, fetcher, size) - self.nblocks = math.ceil(size / blocksize) - self.maxblocks = maxblocks - self._fetch_block_cached = UpdatableLRU(self._fetch_block, maxblocks) - - self._thread_executor = ThreadPoolExecutor(max_workers=1) - self._fetch_future_block_number = None - self._fetch_future = None - self._fetch_future_lock = threading.Lock() - - def __repr__(self): - return "".format( - self.blocksize, self.size, self.nblocks - ) - - def cache_info(self): - """ - The statistics on the block cache. - - Returns - ------- - NamedTuple - Returned directly from the LRU Cache used internally. - """ - return self._fetch_block_cached.cache_info() - - def __getstate__(self): - state = self.__dict__ - del state["_fetch_block_cached"] - del state["_thread_executor"] - del state["_fetch_future_block_number"] - del state["_fetch_future"] - del state["_fetch_future_lock"] - return state - - def __setstate__(self, state): - self.__dict__.update(state) - self._fetch_block_cached = UpdatableLRU(self._fetch_block, state["maxblocks"]) - self._thread_executor = ThreadPoolExecutor(max_workers=1) - self._fetch_future_block_number = None - self._fetch_future = None - self._fetch_future_lock = threading.Lock() - - def _fetch(self, start, end): - if start is None: - start = 0 - if end is None: - end = self.size - if start >= self.size or start >= end: - return b"" - - # byte position -> block numbers - start_block_number = start // self.blocksize - end_block_number = end // self.blocksize - - fetch_future_block_number = None - fetch_future = None - with self._fetch_future_lock: - # Background thread is running. Check we we can or must join it. - if self._fetch_future is not None: - if self._fetch_future.done(): - logger.info("BlockCache joined background fetch without waiting.") - self._fetch_block_cached.add_key( - self._fetch_future.result(), self._fetch_future_block_number - ) - # Cleanup the fetch variables. Done with fetching the block. - self._fetch_future_block_number = None - self._fetch_future = None - else: - # Must join if we need the block for the current fetch - must_join = bool( - start_block_number - <= self._fetch_future_block_number - <= end_block_number - ) - if must_join: - # Copy to the local variables to release lock - # before waiting for result - fetch_future_block_number = self._fetch_future_block_number - fetch_future = self._fetch_future - - # Cleanup the fetch variables. Have a local copy. - self._fetch_future_block_number = None - self._fetch_future = None - - # Need to wait for the future for the current read - if fetch_future is not None: - logger.info("BlockCache waiting for background fetch.") - # Wait until result and put it in cache - self._fetch_block_cached.add_key( - fetch_future.result(), fetch_future_block_number - ) - - # these are cached, so safe to do multiple calls for the same start and end. - for block_number in range(start_block_number, end_block_number + 1): - self._fetch_block_cached(block_number) - - # fetch next block in the background if nothing is running in the background, - # the block is within file and it is not already cached - end_block_plus_1 = end_block_number + 1 - with self._fetch_future_lock: - if ( - self._fetch_future is None - and end_block_plus_1 <= self.nblocks - and not self._fetch_block_cached.is_key_cached(end_block_plus_1) - ): - self._fetch_future_block_number = end_block_plus_1 - self._fetch_future = self._thread_executor.submit( - self._fetch_block, end_block_plus_1, "async" - ) - - return self._read_cache( - start, - end, - start_block_number=start_block_number, - end_block_number=end_block_number, - ) - - def _fetch_block(self, block_number, log_info="sync"): - """ - Fetch the block of data for `block_number`. - """ - if block_number > self.nblocks: - raise ValueError( - "'block_number={}' is greater than the number of blocks ({})".format( - block_number, self.nblocks - ) - ) - - start = block_number * self.blocksize - end = start + self.blocksize - logger.info("BlockCache fetching block (%s) %d", log_info, block_number) - block_contents = super()._fetch(start, end) - return block_contents - - def _read_cache(self, start, end, start_block_number, end_block_number): - """ - Read from our block cache. - - Parameters - ---------- - start, end : int - The start and end byte positions. - start_block_number, end_block_number : int - The start and end block numbers. - """ - start_pos = start % self.blocksize - end_pos = end % self.blocksize - - if start_block_number == end_block_number: - block = self._fetch_block_cached(start_block_number) - return block[start_pos:end_pos] - - else: - # read from the initial - out = [] - out.append(self._fetch_block_cached(start_block_number)[start_pos:]) - - # intermediate blocks - # Note: it'd be nice to combine these into one big request. However - # that doesn't play nicely with our LRU cache. - for block_number in range(start_block_number + 1, end_block_number): - out.append(self._fetch_block_cached(block_number)) - - # final block - out.append(self._fetch_block_cached(end_block_number)[:end_pos]) - - return b"".join(out) - - -caches = { - # one custom case - None: BaseCache, -} - - -def register_cache(cls, clobber=False): - """'Register' cache implementation. - - Parameters - ---------- - clobber: bool, optional - If set to True (default is False) - allow to overwrite existing - entry. - - Raises - ------ - ValueError - """ - name = cls.name - if not clobber and name in caches: - raise ValueError(f"Cache with name {name!r} is already known: {caches[name]}") - caches[name] = cls - - -for c in ( - BaseCache, - MMapCache, - BytesCache, - ReadAheadCache, - BlockCache, - FirstChunkCache, - AllBytes, - KnownPartsOfAFile, - BackgroundBlockCache, -): - register_cache(c) diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-dcc65f03.js b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-dcc65f03.js deleted file mode 100644 index 4b427678283154a6fc15475a8a5be37678434fc3..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-dcc65f03.js +++ /dev/null @@ -1,6 +0,0 @@ -import{S as E,e as L,s as M,f as R,g as p,h as _,j as v,n as w,k as m,m as j,o as H,Y,r as C,u as b,v as B,w as d,t as h,x as T,P as A,I as V,Z as K,p as D,F as O,G as S,H as N,ai as W,y as X,z as x,C as ee,V as te,ae as le,Q as ne,R as ie}from"./index-9e76ffee.js";import{f as se,B as oe}from"./Button-30a08c0b.js";import{C as re,a as ce}from"./Copy-92242405.js";import{E as ae}from"./Empty-8e3485c0.js";import{B as fe}from"./BlockLabel-9545c6da.js";function ue(a){let e,t;return{c(){e=R("svg"),t=R("path"),p(t,"fill","currentColor"),p(t,"d","M5 3h2v2H5v5a2 2 0 0 1-2 2a2 2 0 0 1 2 2v5h2v2H5c-1.07-.27-2-.9-2-2v-4a2 2 0 0 0-2-2H0v-2h1a2 2 0 0 0 2-2V5a2 2 0 0 1 2-2m14 0a2 2 0 0 1 2 2v4a2 2 0 0 0 2 2h1v2h-1a2 2 0 0 0-2 2v4a2 2 0 0 1-2 2h-2v-2h2v-5a2 2 0 0 1 2-2a2 2 0 0 1-2-2V5h-2V3h2m-7 12a1 1 0 0 1 1 1a1 1 0 0 1-1 1a1 1 0 0 1-1-1a1 1 0 0 1 1-1m-4 0a1 1 0 0 1 1 1a1 1 0 0 1-1 1a1 1 0 0 1-1-1a1 1 0 0 1 1-1m8 0a1 1 0 0 1 1 1a1 1 0 0 1-1 1a1 1 0 0 1-1-1a1 1 0 0 1 1-1Z"),p(e,"xmlns","http://www.w3.org/2000/svg"),p(e,"xmlns:xlink","http://www.w3.org/1999/xlink"),p(e,"aria-hidden","true"),p(e,"role","img"),p(e,"class","iconify iconify--mdi"),p(e,"width","100%"),p(e,"height","100%"),p(e,"preserveAspectRatio","xMidYMid meet"),p(e,"viewBox","0 0 24 24")},m(l,i){_(l,e,i),v(e,t)},p:w,i:w,o:w,d(l){l&&m(e)}}}let U=class extends E{constructor(e){super(),L(this,e,null,ue,M,{})}};function Z(a,e,t){const l=a.slice();return l[5]=e[t],l[7]=t,l}function q(a,e,t){const l=a.slice();return l[5]=e[t],l[7]=t,l}function _e(a){let e,t;return{c(){e=j("div"),t=h(a[1]),p(e,"class","json-item svelte-1kspdo")},m(l,i){_(l,e,i),v(e,t)},p(l,i){i&2&&T(t,l[1])},i:w,o:w,d(l){l&&m(e)}}}function me(a){let e,t;return{c(){e=j("div"),t=h(a[1]),p(e,"class","json-item number svelte-1kspdo")},m(l,i){_(l,e,i),v(e,t)},p(l,i){i&2&&T(t,l[1])},i:w,o:w,d(l){l&&m(e)}}}function de(a){let e,t=a[1].toLocaleString()+"",l;return{c(){e=j("div"),l=h(t),p(e,"class","json-item bool svelte-1kspdo")},m(i,r){_(i,e,r),v(e,l)},p(i,r){r&2&&t!==(t=i[1].toLocaleString()+"")&&T(l,t)},i:w,o:w,d(i){i&&m(e)}}}function be(a){let e,t,l,i;return{c(){e=j("div"),t=h('"'),l=h(a[1]),i=h('"'),p(e,"class","json-item string svelte-1kspdo")},m(r,o){_(r,e,o),v(e,t),v(e,l),v(e,i)},p(r,o){o&2&&T(l,r[1])},i:w,o:w,d(r){r&&m(e)}}}function ke(a){let e;return{c(){e=j("div"),e.textContent="null",p(e,"class","json-item null svelte-1kspdo")},m(t,l){_(t,e,l)},p:w,i:w,o:w,d(t){t&&m(e)}}}function pe(a){let e,t,l,i;const r=[ge,ve],o=[];function f(n,s){return n[0]?0:1}return e=f(a),t=o[e]=r[e](a),{c(){t.c(),l=A()},m(n,s){o[e].m(n,s),_(n,l,s),i=!0},p(n,s){let c=e;e=f(n),e===c?o[e].p(n,s):(C(),b(o[c],1,1,()=>{o[c]=null}),B(),t=o[e],t?t.p(n,s):(t=o[e]=r[e](n),t.c()),d(t,1),t.m(l.parentNode,l))},i(n){i||(d(t),i=!0)},o(n){b(t),i=!1},d(n){n&&m(l),o[e].d(n)}}}function he(a){let e,t,l,i;const r=[je,we],o=[];function f(n,s){return n[0]?0:1}return e=f(a),t=o[e]=r[e](a),{c(){t.c(),l=A()},m(n,s){o[e].m(n,s),_(n,l,s),i=!0},p(n,s){let c=e;e=f(n),e===c?o[e].p(n,s):(C(),b(o[c],1,1,()=>{o[c]=null}),B(),t=o[e],t?t.p(n,s):(t=o[e]=r[e](n),t.c()),d(t,1),t.m(l.parentNode,l))},i(n){i||(d(t),i=!0)},o(n){b(t),i=!1},d(n){n&&m(l),o[e].d(n)}}}function ve(a){let e,t,l,i,r=V(Object.entries(a[1])),o=[];for(let n=0;nb(o[n],1,1,()=>{o[n]=null});return{c(){e=h(`{ - `),t=j("div");for(let n=0;nb(o[n],1,1,()=>{o[n]=null});return{c(){e=h(`[ - `),t=j("div");for(let n=0;n{n[y]=null}),B(),r=n[i],r?r.p(c,u):(r=n[i]=f[i](c),r.c()),d(r,1),r.m(l,null))},i(c){o||(d(r),o=!0)},o(c){b(r),o=!1},d(c){c&&(m(e),m(t),m(l)),n[i].d()}}}function Oe(a,e,t){let{value:l}=e,{depth:i}=e,{collapsed:r=i>4}=e;const o=()=>{t(0,r=!1)},f=()=>{t(0,r=!1)};return a.$$set=n=>{"value"in n&&t(1,l=n.value),"depth"in n&&t(2,i=n.depth),"collapsed"in n&&t(0,r=n.collapsed)},[r,l,i,o,f]}class I extends E{constructor(e){super(),L(this,e,Oe,ye,M,{value:1,depth:2,collapsed:0})}}function Se(a){let e,t;return e=new ae({props:{$$slots:{default:[Je]},$$scope:{ctx:a}}}),{c(){O(e.$$.fragment)},m(l,i){S(e,l,i),t=!0},p(l,i){const r={};i&32&&(r.$$scope={dirty:i,ctx:l}),e.$set(r)},i(l){t||(d(e.$$.fragment,l),t=!0)},o(l){b(e.$$.fragment,l),t=!1},d(l){N(e,l)}}}function Ne(a){let e,t,l,i,r,o,f,n,s;const c=[Be,Ce],u=[];function y(g,J){return g[1]?0:1}return t=y(a),l=u[t]=c[t](a),o=new I({props:{value:a[0],depth:0}}),{c(){e=j("button"),l.c(),i=H(),r=j("div"),O(o.$$.fragment),p(e,"class","svelte-1trjy9a"),p(r,"class","json-holder svelte-1trjy9a")},m(g,J){_(g,e,J),u[t].m(e,null),_(g,i,J),_(g,r,J),S(o,r,null),f=!0,n||(s=D(e,"click",a[2]),n=!0)},p(g,J){let k=t;t=y(g),t!==k&&(C(),b(u[k],1,1,()=>{u[k]=null}),B(),l=u[t],l||(l=u[t]=c[t](g),l.c()),d(l,1),l.m(e,null));const P={};J&1&&(P.value=g[0]),o.$set(P)},i(g){f||(d(l),d(o.$$.fragment,g),f=!0)},o(g){b(l),b(o.$$.fragment,g),f=!1},d(g){g&&(m(e),m(i),m(r)),u[t].d(),N(o),n=!1,s()}}}function Je(a){let e,t;return e=new U({}),{c(){O(e.$$.fragment)},m(l,i){S(e,l,i),t=!0},i(l){t||(d(e.$$.fragment,l),t=!0)},o(l){b(e.$$.fragment,l),t=!1},d(l){N(e,l)}}}function Ce(a){let e,t,l;return t=new re({}),{c(){e=j("span"),O(t.$$.fragment),p(e,"class","copy-text")},m(i,r){_(i,e,r),S(t,e,null),l=!0},i(i){l||(d(t.$$.fragment,i),l=!0)},o(i){b(t.$$.fragment,i),l=!1},d(i){i&&m(e),N(t)}}}function Be(a){let e,t,l,i;return t=new ce({}),{c(){e=j("span"),O(t.$$.fragment)},m(r,o){_(r,e,o),S(t,e,null),i=!0},i(r){i||(d(t.$$.fragment,r),r&&(l||X(()=>{l=x(e,se,{duration:300}),l.start()})),i=!0)},o(r){b(t.$$.fragment,r),i=!1},d(r){r&&m(e),N(t)}}}function He(a){let e,t,l,i,r;const o=[Ne,Se],f=[];function n(s,c){return c&1&&(e=null),e==null&&(e=!!(s[0]&&s[0]!=='""'&&!Te(s[0]))),e?0:1}return t=n(a,-1),l=f[t]=o[t](a),{c(){l.c(),i=A()},m(s,c){f[t].m(s,c),_(s,i,c),r=!0},p(s,[c]){let u=t;t=n(s,c),t===u?f[t].p(s,c):(C(),b(f[u],1,1,()=>{f[u]=null}),B(),l=f[t],l?l.p(s,c):(l=f[t]=o[t](s),l.c()),d(l,1),l.m(i.parentNode,i))},i(s){r||(d(l),r=!0)},o(s){b(l),r=!1},d(s){s&&m(i),f[t].d(s)}}}function Te(a){return a&&Object.keys(a).length===0&&Object.getPrototypeOf(a)===Object.prototype}function Ve(a,e,t){let{value:l={}}=e,i=!1,r;function o(){t(1,i=!0),r&&clearTimeout(r),r=setTimeout(()=>{t(1,i=!1)},1e3)}async function f(){"clipboard"in navigator&&(await navigator.clipboard.writeText(JSON.stringify(l,null,2)),o())}return W(()=>{r&&clearTimeout(r)}),a.$$set=n=>{"value"in n&&t(0,l=n.value)},[l,i,f]}class Ee extends E{constructor(e){super(),L(this,e,Ve,He,M,{value:0})}}function $(a){let e,t;return e=new fe({props:{Icon:U,show_label:a[6],label:a[5],float:!1,disable:a[7]===!1}}),{c(){O(e.$$.fragment)},m(l,i){S(e,l,i),t=!0},p(l,i){const r={};i&64&&(r.show_label=l[6]),i&32&&(r.label=l[5]),i&128&&(r.disable=l[7]===!1),e.$set(r)},i(l){t||(d(e.$$.fragment,l),t=!0)},o(l){b(e.$$.fragment,l),t=!1},d(l){N(e,l)}}}function Le(a){let e,t,l,i,r,o=a[5]&&$(a);const f=[a[4]];let n={};for(let s=0;s{o=null}),B());const u=c&16?ne(f,[ie(s[4])]):{};t.$set(u);const y={};c&8&&(y.value=s[3]),i.$set(y)},i(s){r||(d(o),d(t.$$.fragment,s),d(i.$$.fragment,s),r=!0)},o(s){b(o),b(t.$$.fragment,s),b(i.$$.fragment,s),r=!1},d(s){s&&(m(e),m(l)),o&&o.d(s),N(t,s),N(i,s)}}}function Me(a){let e,t;return e=new oe({props:{visible:a[2],test_id:"json",elem_id:a[0],elem_classes:a[1],container:a[7],scale:a[8],min_width:a[9],padding:!1,$$slots:{default:[Le]},$$scope:{ctx:a}}}),{c(){O(e.$$.fragment)},m(l,i){S(e,l,i),t=!0},p(l,[i]){const r={};i&4&&(r.visible=l[2]),i&1&&(r.elem_id=l[0]),i&2&&(r.elem_classes=l[1]),i&128&&(r.container=l[7]),i&256&&(r.scale=l[8]),i&512&&(r.min_width=l[9]),i&4344&&(r.$$scope={dirty:i,ctx:l}),e.$set(r)},i(l){t||(d(e.$$.fragment,l),t=!0)},o(l){b(e.$$.fragment,l),t=!1},d(l){N(e,l)}}}function Ae(a,e,t){let{elem_id:l=""}=e,{elem_classes:i=[]}=e,{visible:r=!0}=e,{value:o}=e,f,{loading_status:n}=e,{label:s}=e,{show_label:c}=e,{container:u=!0}=e,{scale:y=null}=e,{min_width:g=void 0}=e;const J=ee();return a.$$set=k=>{"elem_id"in k&&t(0,l=k.elem_id),"elem_classes"in k&&t(1,i=k.elem_classes),"visible"in k&&t(2,r=k.visible),"value"in k&&t(3,o=k.value),"loading_status"in k&&t(4,n=k.loading_status),"label"in k&&t(5,s=k.label),"show_label"in k&&t(6,c=k.show_label),"container"in k&&t(7,u=k.container),"scale"in k&&t(8,y=k.scale),"min_width"in k&&t(9,g=k.min_width)},a.$$.update=()=>{a.$$.dirty&1032&&o!==f&&(t(10,f=o),J("change"))},[l,i,r,o,n,s,c,u,y,g,f]}class De extends E{constructor(e){super(),L(this,e,Ae,Me,M,{elem_id:0,elem_classes:1,visible:2,value:3,loading_status:4,label:5,show_label:6,container:7,scale:8,min_width:9})}}const ze=De,Fe=["static"];export{ze as Component,Fe as modes}; -//# sourceMappingURL=index-dcc65f03.js.map diff --git a/spaces/deepseek-ai/deepseek-coder-7b-instruct/README.md b/spaces/deepseek-ai/deepseek-coder-7b-instruct/README.md deleted file mode 100644 index 7961b68cfc9844bbe3b9ee81a5dc223ef31e1a66..0000000000000000000000000000000000000000 --- a/spaces/deepseek-ai/deepseek-coder-7b-instruct/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Chat with DeepSeek Coder 7B -emoji: 🐬 -colorFrom: blue -colorTo: blue -sdk: gradio -sdk_version: 3.48.0 -app_file: app.py -pinned: false -suggested_hardware: a10g-small ---- \ No newline at end of file diff --git a/spaces/devthedeveloper/Bark-with-Voice-Cloning/swap_voice.py b/spaces/devthedeveloper/Bark-with-Voice-Cloning/swap_voice.py deleted file mode 100644 index be1135be3648f9757046de1f9a4e240bd818be5a..0000000000000000000000000000000000000000 --- a/spaces/devthedeveloper/Bark-with-Voice-Cloning/swap_voice.py +++ /dev/null @@ -1,62 +0,0 @@ -from bark.generation import load_codec_model, generate_text_semantic, grab_best_device -from bark import SAMPLE_RATE -from encodec.utils import convert_audio -from bark.hubert.hubert_manager import HuBERTManager -from bark.hubert.pre_kmeans_hubert import CustomHubert -from bark.hubert.customtokenizer import CustomTokenizer -from bark.api import semantic_to_waveform -from scipy.io.wavfile import write as write_wav -from util.helper import create_filename -from util.settings import Settings - - -import torchaudio -import torch -import os -import gradio - -def swap_voice_from_audio(swap_audio_filename, selected_speaker, tokenizer_lang, seed, batchcount, progress=gradio.Progress(track_tqdm=True)): - use_gpu = not os.environ.get("BARK_FORCE_CPU", False) - progress(0, desc="Loading Codec") - - # From https://github.com/gitmylo/bark-voice-cloning-HuBERT-quantizer - hubert_manager = HuBERTManager() - hubert_manager.make_sure_hubert_installed() - hubert_manager.make_sure_tokenizer_installed(tokenizer_lang=tokenizer_lang) - - # From https://github.com/gitmylo/bark-voice-cloning-HuBERT-quantizer - # Load HuBERT for semantic tokens - - # Load the HuBERT model - device = grab_best_device(use_gpu) - hubert_model = CustomHubert(checkpoint_path='./models/hubert/hubert.pt').to(device) - model = load_codec_model(use_gpu=use_gpu) - - # Load the CustomTokenizer model - tokenizer = CustomTokenizer.load_from_checkpoint(f'./models/hubert/{tokenizer_lang}_tokenizer.pth').to(device) # Automatically uses the right layers - - progress(0.25, desc="Converting WAV") - - # Load and pre-process the audio waveform - wav, sr = torchaudio.load(swap_audio_filename) - if wav.shape[0] == 2: # Stereo to mono if needed - wav = wav.mean(0, keepdim=True) - - wav = convert_audio(wav, sr, model.sample_rate, model.channels) - wav = wav.to(device) - semantic_vectors = hubert_model.forward(wav, input_sample_hz=model.sample_rate) - semantic_tokens = tokenizer.get_token(semantic_vectors) - - audio = semantic_to_waveform( - semantic_tokens, - history_prompt=selected_speaker, - temp=0.7, - silent=False, - output_full=False) - - settings = Settings('config.yaml') - - result = create_filename(settings.output_folder_path, None, "swapvoice",".wav") - write_wav(result, SAMPLE_RATE, audio) - return result - diff --git a/spaces/diacanFperku/AutoGPT/Classmate Marathi Movie Download Utorrent Kickass 45 [Extra Quality].md b/spaces/diacanFperku/AutoGPT/Classmate Marathi Movie Download Utorrent Kickass 45 [Extra Quality].md deleted file mode 100644 index 6788bbd8f49425cb440a56d19e82da4ff48492d1..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Classmate Marathi Movie Download Utorrent Kickass 45 [Extra Quality].md +++ /dev/null @@ -1,6 +0,0 @@ -

            classmate marathi movie download utorrent kickass 45


            Download Ziphttps://gohhs.com/2uFUSB



            -
            -787 Web TORRENT JOKER 2019 movie download full HD 720p Hindi Filmywap ... DOWNLOAD Chhapaak 2020 Hindi 720p PreDVD DHURALA 2020 marathi 720p PreDVD x264 AAC 1. ... A teen girl finds herself besieged by an evil supernatural force after she played Ouija with two classmates. ... Running time 1 45 00. 4d29de3e1b
            -
            -
            -

            diff --git a/spaces/diacanFperku/AutoGPT/Fiat EPER V84 05.2014 Multilanguage.md b/spaces/diacanFperku/AutoGPT/Fiat EPER V84 05.2014 Multilanguage.md deleted file mode 100644 index e48b3b11e65072cbee4dd6dfc875ef3ec2c677e1..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Fiat EPER V84 05.2014 Multilanguage.md +++ /dev/null @@ -1,9 +0,0 @@ - -

            The program Fiat ePER was developed at 19-year. Its programme is aimed to the catalogues of parts for the cars and the minivans. ePER is important catalogues of spare parts and is a part of the program Fiat ePER to Search Engine for spare parts. EPER [06.2013.06.03].

            -

            Fiat ePER v84 05.2014 Multilanguage


            Download File - https://gohhs.com/2uFUWd



            -

            ePER is the catalog of spare parts for the cars. The program has been designed for search engines and is also used as a graphical catalogue of spare parts.The software ePER was developed in the 80's. ePER is based on webservices, so that the list of models (and of details (v20w32nt.dll). Fazuka ePER is the program for the catalogue of spare parts for the cars Fiat ePER. Does not only have ePER an automatic search engine for spare parts, but also for engines.Fazuka is a program for the ePER. The catalog of ePER is based on G-in. Windows XP SP2 and newer.

            -

            Fiat ePER is a part of the program Fiat ePER that is the search engine for spare parts for the cars, and for the engines.. The catalog of spare parts for the cars, and for the engines., and is based on the webservice of the parts. The program is based on the file system for the webservices list of parts. In the file system, you can include the details of the lists of parts, and you can carry over the catalogue of parts.The operation of such a file system may be asked of the standard installer as an administrator. The program is a part of the program Fiat ePER to search the engines. In this operation search to carry over the catalog of parts.. The program is based on the file system for the webservices list of parts.

            -

            Fiat ePER is a part of the program Fiat ePER that is the search engine for spare parts for the cars, and for the engines.. The catalog of spare parts for the cars, and for the engines., and is based on the webservice of the parts. The operation of such a file system may be asked of the standard installer as an administrator. The program is a part of the program Fiat ePER to search the engines. In this operation search to carry over the catalog of parts..

            -

            899543212b
            -
            -
            \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/Goldplay Gp-1005 Driver Indirl ((FULL)).md b/spaces/diacanFperku/AutoGPT/Goldplay Gp-1005 Driver Indirl ((FULL)).md deleted file mode 100644 index 0a7e5d9bb6c0a5fdea35a23fb60608af42f13a22..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Goldplay Gp-1005 Driver Indirl ((FULL)).md +++ /dev/null @@ -1,6 +0,0 @@ -

            Goldplay Gp-1005 Driver Indirl


            Download Filehttps://gohhs.com/2uFVH9



            - -pro cs6 serial keygen free download[/url] goldplay gp-1005 driver indir. 1911 dll settlers 7 crack insidious 2 download full movie dll tool license keygen free. 4d29de3e1b
            -
            -
            -

            diff --git a/spaces/diacanFperku/AutoGPT/Numerical Heat Transfer And Fluid Flow Suhas V.patankar Solution Pdf !!HOT!!.md b/spaces/diacanFperku/AutoGPT/Numerical Heat Transfer And Fluid Flow Suhas V.patankar Solution Pdf !!HOT!!.md deleted file mode 100644 index f870b2a13588489274ad73f91dfe0851fa418ad8..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Numerical Heat Transfer And Fluid Flow Suhas V.patankar Solution Pdf !!HOT!!.md +++ /dev/null @@ -1,6 +0,0 @@ -

            Numerical Heat Transfer And Fluid Flow Suhas V.patankar Solution Pdf


            Download Ziphttps://gohhs.com/2uFVde



            - -Download solution matlab code this solution is available in pdf and may read any files with same name. 0 jessie 0 jessie-7. some of the questions, statements and formulas of the present assignment are as follows: 1. matlab_data_sets. This assignment consists of two parts: part A contains five short-quiz questions with multiple choice answers and part B contains several assignments to help you. The heat transfer problem is studied in chapter 3. This online book deals with the modeling of real world application. heat transfer solutions for small and large plants are discussed in this chapter the effect of temperature on the specific heat of a system is considered in this chapter the heat conduction of a material is studied in this chapter. find the errors in the following code. With more experience you will be able to obtain a clear picture of the functions of data types. C. It is an independent work. ) 4. the value of temperature at the outside boundary is 100 degrees. thermo module — heat transfer for building science materials and structures thermo. heat transfer coefficient hc is related to convection coefficient. This can be simply done using the analyze button, which is located on the toolbar (see Figure 3. [Insert Description Here] 2. 5. Fretting. with environmental systems [Insert Description Here] 1. 13). a branch in the middle of the copper tubing is heated to 900 degrees and is held constant at that temperature for 50 minutes. online book the heat transfer coefficient hc is related to convection coefficient. the first issue of the associated. 5e – Heat Transfer. ck co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie k 4fefd39f24
            -
            -
            -

            diff --git a/spaces/diacanFperku/AutoGPT/Queen Discography 19672009 Mp3 320 Kbps.md b/spaces/diacanFperku/AutoGPT/Queen Discography 19672009 Mp3 320 Kbps.md deleted file mode 100644 index 783d650627d7eb5b033911c4c7ba040b800636e6..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Queen Discography 19672009 Mp3 320 Kbps.md +++ /dev/null @@ -1,101 +0,0 @@ -
            -

            Queen Discography 1967-2009 Mp3 320 Kbps: How to Download and Enjoy the Music of the Rock Legends

            - -

            Queen is one of the most legendary and influential rock bands of all time. Their music has captivated millions of fans around the world for decades. Their discography includes 15 studio albums, 10 live albums, and countless singles and compilations. If you are a fan of Queen or want to discover their amazing music, you might want to download their discography in high-quality Mp3 format.

            -

            Queen Discography 19672009 Mp3 320 Kbps


            DOWNLOADhttps://gohhs.com/2uFTyl



            - -

            In this article, we will show you how to download Queen Discography 1967-2009 Mp3 320 Kbps, the most complete and comprehensive collection of their music in the best possible quality. We will also give you some information about their albums and songs, and some tips on how to enjoy their music.

            - -

            How to Download Queen Discography 1967-2009 Mp3 320 Kbps

            - -

            There are many websites and platforms where you can download Queen Discography 1967-2009 Mp3 320 Kbps, but not all of them are reliable or safe. Some of them might contain viruses, malware, or low-quality files. Some of them might require you to pay or register before accessing the download links. Some of them might have incomplete or inaccurate information about the albums and songs.

            - -

            To avoid these problems, we recommend you to use a trusted and reputable torrent website that offers Queen Discography 1967-2009 Mp3 320 Kbps as a magnet link or a torrent file. A torrent website is a website where users can share and download files using a peer-to-peer network. A magnet link is a URL that contains the information needed to download the files from other users. A torrent file is a small file that contains the same information as a magnet link.

            -

            - -

            Some of the benefits of using a torrent website are:

            -
              -
            • You can download large files faster and more efficiently.
            • -
            • You can pause and resume your downloads at any time.
            • -
            • You can check the quality and authenticity of the files before downloading them.
            • -
            • You can access a wide variety of content from different sources.
            • -
            - -

            Some of the drawbacks of using a torrent website are:

            -
              -
            • You might need to install a torrent client software on your device to download the files.
            • -
            • You might need to use a VPN service to protect your privacy and security online.
            • -
            • You might need to follow some rules and etiquette when using the torrent website and the peer-to-peer network.
            • -
            - -

            One of the best torrent websites that offers Queen Discography 1967-2009 Mp3 320 Kbps is SolidTorrents. SolidTorrents is a modern and user-friendly torrent search engine that indexes millions of torrents from various sources. It has a simple and intuitive interface that allows you to search for any content you want. It also has some useful features such as filters, categories, ratings, comments, and more.

            - -

            To download Queen Discography 1967-2009 Mp3 320 Kbps from SolidTorrents, follow these steps:

            -
              -
            1. Go to https://solidtorrents.to/ on your web browser.
            2. -
            3. Type "Queen Discography 1967-2009 Mp3 320 Kbps" in the search box and hit enter.
            4. -
            5. You will see a list of results that match your query. Look for the one that has the most seeders, leechers, file size, and downloads. Seeders are users who have the complete file and are sharing it with others. Leechers are users who are downloading the file but have not completed it yet. File size is the total amount of data that you need to download. Downloads are the number of times that the file has been downloaded by other users.
            6. -
            7. Click on the result that you want to download. You will see more details about the file, such as its name, description, files list, trackers list, and more info.
            8. -
            9. Click on either "Torrent Download" or "Magnet Download" to start downloading the file. If you click on "Torrent Download", you will need to save the torrent file on your device and open it with your torrent client software. If you click on "Magnet Download", you will need to copy the magnet link URL and paste it on your torrent client software.
            10. -
            11. Wait for your download to finish. Depending on your internet speed and the availability of seeders, this might take some time.
            12. -
            13. Enjoy listening to Queen Discography 1967-2009 Mp3 320 Kbps on your device!
            14. -
            - -

            How to Enjoy Queen Discography 1967-2009 Mp3 320 Kbps

            - -

            Now that you have downloaded Queen Discography 1967-2009 Mp3 320 Kbps, you might be wondering how to enjoy their music to the fullest. Here are some tips that might help you:

            -
              -
            • Use a good quality headphones or speakers to listen to their songs. You will appreciate their sound quality better if you use a device that can deliver clear and crisp audio.
            • -
            • Listen to their albums in chronological order. You will be able to appreciate their musical evolution and diversity better if you listen to their albums from their debut in 1973 to their last in 2008.
            • -
            • Read the lyrics and sing along with their songs. You will be able to understand their messages and emotions better if you read the words that they wrote and sang.
            • -
            • Watch their live performances and documentaries online. You will be able to see their charisma and energy on stage better if you watch them perform live in front of thousands of fans. You will also be able to learn more about their history and achievements if you watch documentaries that feature interviews, footage, and anecdotes about them.
            • -
            • Join online communities and forums dedicated to Queen fans. You will be able to interact with other fans who share your passion and enthusiasm for Queen. You will also be able to discover new facts, trivia, opinions, and recommendations about them.
            • -
            - -

            Conclusion

            - -

            Queen Discography 1967-2009 Mp3 320 Kbps is a must-have for any rock fan who wants to enjoy the music of one of the greatest bands of all time. By following this guide, you will be able to download their discography in high-quality Mp3 format from a reliable torrent website. You will also be able to enjoy their songs in various ways by using some tips that we provided.

            - -

            We hope that this article was helpful and informative for you. If you have any questions or feedback, feel free to leave a comment below. Thank you for reading!

            -

            Some Highlights of Queen Discography 1967-2009 Mp3 320 Kbps

            - -

            Queen Discography 1967-2009 Mp3 320 Kbps contains all the albums and songs that Queen released during their career. Each album and song has its own story and significance, but here are some of the highlights that you should not miss:

            -
              -
            • Queen (1973): The debut album that introduced Queen to the world. It features some of their early classics, such as "Keep Yourself Alive", "Liar", and "Seven Seas of Rhye". It also showcases their diverse influences, from hard rock to progressive rock to glam rock.
            • -
            • Sheer Heart Attack (1974): The third album that marked a breakthrough for Queen. It features their first international hit, "Killer Queen", as well as other fan favorites, such as "Brighton Rock", "Now I'm Here", and "Stone Cold Crazy". It also demonstrates their versatility and experimentation, from ballads to metal to music hall.
            • -
            • A Night at the Opera (1975): The fourth album that is widely regarded as their masterpiece. It features their most famous song, "Bohemian Rhapsody", as well as other classics, such as "You're My Best Friend", "'39", and "Love of My Life". It also showcases their ambition and creativity, from opera to folk to rockabilly.
            • -
            • News of the World (1977): The sixth album that is one of their best-selling albums. It features two of their most iconic anthems, "We Will Rock You" and "We Are the Champions", as well as other gems, such as "Spread Your Wings", "It's Late", and "My Melancholy Blues". It also reflects their adaptation and simplification, from complex arrangements to catchy hooks.
            • -
            • The Game (1980): The eighth album that is their most successful album in the US. It features two of their biggest hits, "Another One Bites the Dust" and "Crazy Little Thing Called Love", as well as other tracks, such as "Play the Game", "Save Me", and "Dragon Attack". It also marks their first use of synthesizers, from rock to funk to disco.
            • -
            • The Works (1984): The eleventh album that is one of their most underrated albums. It features one of their most popular songs, "Radio Ga Ga", as well as other singles, such as "I Want to Break Free", "It's a Hard Life", and "Hammer to Fall". It also represents their comeback and resilience, from commercial failure to critical acclaim.
            • -
            • Innuendo (1991): The fourteenth album that is their final album with Freddie Mercury. It features one of their most poignant songs, "The Show Must Go On", as well as other masterpieces, such as "Innuendo", "I'm Going Slightly Mad", and "These Are the Days of Our Lives". It also displays their courage and dignity, from illness to legacy.
            • -
            - -

            Conclusion

            - -

            Queen Discography 1967-2009 Mp3 320 Kbps is a must-have for any rock fan who wants to enjoy the music of one of the greatest bands of all time. By following this guide, you will be able to download their discography in high-quality Mp3 format from a reliable torrent website. You will also be able to enjoy their songs in various ways by using some tips that we provided. You will also be able to discover some of the highlights of their discography that you should not miss.

            - -

            We hope that this article was helpful and informative for you. If you have any questions or feedback, feel free to leave a comment below. Thank you for reading!

            -

            Some Tips on How to Organize and Manage Queen Discography 1967-2009 Mp3 320 Kbps

            - -

            Queen Discography 1967-2009 Mp3 320 Kbps is a large and comprehensive collection of music that might take up a lot of space and time on your device. You might want to organize and manage it in a way that makes it easier and more convenient for you to access and enjoy. Here are some tips that might help you:

            -
              -
            • Create a separate folder for Queen Discography 1967-2009 Mp3 320 Kbps on your device. You can name it whatever you want, but make sure it is easy to find and remember.
            • -
            • Within the folder, create subfolders for each album that Queen released. You can name them according to the album title and year of release, such as "1973 - Queen" or "1980 - The Game".
            • -
            • Within each subfolder, place the Mp3 files of the songs that belong to that album. You can name them according to the track number and title, such as "01 - Keep Yourself Alive.mp3" or "11 - Bohemian Rhapsody.mp3".
            • -
            • Add album art and metadata to each Mp3 file. Album art is the image that represents the album cover. Metadata is the information that describes the file, such as artist name, album name, song title, genre, etc. You can use a software or an online tool to add album art and metadata to your Mp3 files.
            • -
            • Create playlists for your favorite songs or albums. Playlists are collections of songs that you can play in a specific order or randomly. You can create playlists based on your mood, preference, occasion, theme, etc. You can use a software or an online tool to create playlists for your Mp3 files.
            • -
            • Backup your Queen Discography 1967-2009 Mp3 320 Kbps folder regularly. Backup is the process of copying your files to another location or device for safekeeping. You might want to backup your folder in case something happens to your original device or files, such as damage, loss, theft, corruption, etc. You can use a software or an online tool to backup your folder.
            • -
            - -

            Conclusion

            - -

            Queen Discography 1967-2009 Mp3 320 Kbps is a must-have for any rock fan who wants to enjoy the music of one of the greatest bands of all time. By following this guide, you will be able to download their discography in high-quality Mp3 format from a reliable torrent website. You will also be able to enjoy their songs in various ways by using some tips that we provided. You will also be able to discover some of the highlights of their discography that you should not miss. You will also be able to organize and manage their discography in a way that makes it easier and more convenient for you to access and enjoy.

            - -

            We hope that this article was helpful and informative for you. If you have any questions or feedback, feel free to leave a comment below. Thank you for reading!

            -

            Conclusion

            - -

            Queen Discography 1967-2009 Mp3 320 Kbps is a must-have for any rock fan who wants to enjoy the music of one of the greatest bands of all time. By following this guide, you will be able to download their discography in high-quality Mp3 format from a reliable torrent website. You will also be able to enjoy their songs in various ways by using some tips that we provided. You will also be able to discover some of the highlights of their discography that you should not miss. You will also be able to organize and manage their discography in a way that makes it easier and more convenient for you to access and enjoy.

            - -

            We hope that this article was helpful and informative for you. If you have any questions or feedback, feel free to leave a comment below. Thank you for reading!

            3cee63e6c2
            -
            -
            \ No newline at end of file diff --git a/spaces/diagaiwei/ir_chinese_medqa/colbert/training/training.py b/spaces/diagaiwei/ir_chinese_medqa/colbert/training/training.py deleted file mode 100644 index c3264fc1796aafb654ff3cc0176904aa5f02d68b..0000000000000000000000000000000000000000 --- a/spaces/diagaiwei/ir_chinese_medqa/colbert/training/training.py +++ /dev/null @@ -1,158 +0,0 @@ -import time -import torch -import random -import torch.nn as nn -import numpy as np - -from transformers import AdamW, get_linear_schedule_with_warmup -from colbert.infra import ColBERTConfig -from colbert.training.rerank_batcher import RerankBatcher - -from colbert.utils.amp import MixedPrecisionManager -from colbert.training.lazy_batcher import LazyBatcher -from colbert.parameters import DEVICE - -from colbert.modeling.colbert import ColBERT -from colbert.modeling.reranker.electra import ElectraReranker - -from colbert.utils.utils import print_message -from colbert.training.utils import print_progress, manage_checkpoints - - - -def train(config: ColBERTConfig, triples, queries=None, collection=None): - config.checkpoint = config.checkpoint or 'bert-base-uncased' - - if config.rank < 1: - config.help() - - random.seed(12345) - np.random.seed(12345) - torch.manual_seed(12345) - torch.cuda.manual_seed_all(12345) - - assert config.bsize % config.nranks == 0, (config.bsize, config.nranks) - config.bsize = config.bsize // config.nranks - - print("Using config.bsize =", config.bsize, "(per process) and config.accumsteps =", config.accumsteps) - - if collection is not None: - if config.reranker: - reader = RerankBatcher(config, triples, queries, collection, (0 if config.rank == -1 else config.rank), config.nranks) - else: - reader = LazyBatcher(config, triples, queries, collection, (0 if config.rank == -1 else config.rank), config.nranks) - else: - raise NotImplementedError() - - if not config.reranker: - colbert = ColBERT(name=config.checkpoint, colbert_config=config) - else: - colbert = ElectraReranker.from_pretrained(config.checkpoint) - - colbert = colbert.to(DEVICE) - colbert.train() - - colbert = torch.nn.parallel.DistributedDataParallel(colbert, device_ids=[config.rank], - output_device=config.rank, - find_unused_parameters=True) - - optimizer = AdamW(filter(lambda p: p.requires_grad, colbert.parameters()), lr=config.lr, eps=1e-8) - optimizer.zero_grad() - - scheduler = None - if config.warmup is not None: - print(f"#> LR will use {config.warmup} warmup steps and linear decay over {config.maxsteps} steps.") - scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=config.warmup, - num_training_steps=config.maxsteps) - - warmup_bert = config.warmup_bert - if warmup_bert is not None: - set_bert_grad(colbert, False) - - amp = MixedPrecisionManager(config.amp) - labels = torch.zeros(config.bsize, dtype=torch.long, device=DEVICE) - - start_time = time.time() - train_loss = None - train_loss_mu = 0.999 - - start_batch_idx = 0 - - # if config.resume: - # assert config.checkpoint is not None - # start_batch_idx = checkpoint['batch'] - - # reader.skip_to_batch(start_batch_idx, checkpoint['arguments']['bsize']) - - for batch_idx, BatchSteps in zip(range(start_batch_idx, config.maxsteps), reader): - if (warmup_bert is not None) and warmup_bert <= batch_idx: - set_bert_grad(colbert, True) - warmup_bert = None - - this_batch_loss = 0.0 - - for batch in BatchSteps: - with amp.context(): - try: - queries, passages, target_scores = batch - encoding = [queries, passages] - except: - encoding, target_scores = batch - encoding = [encoding.to(DEVICE)] - - scores = colbert(*encoding) - - if config.use_ib_negatives: - scores, ib_loss = scores - - scores = scores.view(-1, config.nway) - - if len(target_scores) and not config.ignore_scores: - target_scores = torch.tensor(target_scores).view(-1, config.nway).to(DEVICE) - target_scores = target_scores * config.distillation_alpha - target_scores = torch.nn.functional.log_softmax(target_scores, dim=-1) - - log_scores = torch.nn.functional.log_softmax(scores, dim=-1) - loss = torch.nn.KLDivLoss(reduction='batchmean', log_target=True)(log_scores, target_scores) - else: - loss = nn.CrossEntropyLoss()(scores, labels[:scores.size(0)]) - - if config.use_ib_negatives: - if config.rank < 1: - print('\t\t\t\t', loss.item(), ib_loss.item()) - - loss += ib_loss - - loss = loss / config.accumsteps - - if config.rank < 1: - print_progress(scores) - - amp.backward(loss) - - this_batch_loss += loss.item() - - train_loss = this_batch_loss if train_loss is None else train_loss - train_loss = train_loss_mu * train_loss + (1 - train_loss_mu) * this_batch_loss - - amp.step(colbert, optimizer, scheduler) - - if config.rank < 1: - print_message(batch_idx, train_loss) - manage_checkpoints(config, colbert, optimizer, batch_idx+1, savepath=None) - - if config.rank < 1: - print_message("#> Done with all triples!") - ckpt_path = manage_checkpoints(config, colbert, optimizer, batch_idx+1, savepath=None, consumed_all_triples=True) - - return ckpt_path # TODO: This should validate and return the best checkpoint, not just the last one. - - - -def set_bert_grad(colbert, value): - try: - for p in colbert.bert.parameters(): - assert p.requires_grad is (not value) - p.requires_grad = value - except AttributeError: - set_bert_grad(colbert.module, value) diff --git a/spaces/digitalxingtong/Taffy-Bert-VITS2/start.bat b/spaces/digitalxingtong/Taffy-Bert-VITS2/start.bat deleted file mode 100644 index 418d21233dbf720b0dd09821904d9d6a31b123a2..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Taffy-Bert-VITS2/start.bat +++ /dev/null @@ -1,2 +0,0 @@ -set PYTHON=venv\python.exe -start cmd /k "set PYTHON=%PYTHON%" \ No newline at end of file diff --git a/spaces/dineshreddy/WALT/mmdet/models/dense_heads/ssd_head.py b/spaces/dineshreddy/WALT/mmdet/models/dense_heads/ssd_head.py deleted file mode 100644 index 145622b64e3f0b3f7f518fc61a2a01348ebfa4f3..0000000000000000000000000000000000000000 --- a/spaces/dineshreddy/WALT/mmdet/models/dense_heads/ssd_head.py +++ /dev/null @@ -1,265 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import xavier_init -from mmcv.runner import force_fp32 - -from mmdet.core import (build_anchor_generator, build_assigner, - build_bbox_coder, build_sampler, multi_apply) -from ..builder import HEADS -from ..losses import smooth_l1_loss -from .anchor_head import AnchorHead - - -# TODO: add loss evaluator for SSD -@HEADS.register_module() -class SSDHead(AnchorHead): - """SSD head used in https://arxiv.org/abs/1512.02325. - - Args: - num_classes (int): Number of categories excluding the background - category. - in_channels (int): Number of channels in the input feature map. - anchor_generator (dict): Config dict for anchor generator - bbox_coder (dict): Config of bounding box coder. - reg_decoded_bbox (bool): If true, the regression loss would be - applied directly on decoded bounding boxes, converting both - the predicted boxes and regression targets to absolute - coordinates format. Default False. It should be `True` when - using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head. - train_cfg (dict): Training config of anchor head. - test_cfg (dict): Testing config of anchor head. - """ # noqa: W605 - - def __init__(self, - num_classes=80, - in_channels=(512, 1024, 512, 256, 256, 256), - anchor_generator=dict( - type='SSDAnchorGenerator', - scale_major=False, - input_size=300, - strides=[8, 16, 32, 64, 100, 300], - ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]), - basesize_ratio_range=(0.1, 0.9)), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - clip_border=True, - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0], - ), - reg_decoded_bbox=False, - train_cfg=None, - test_cfg=None): - super(AnchorHead, self).__init__() - self.num_classes = num_classes - self.in_channels = in_channels - self.cls_out_channels = num_classes + 1 # add background class - self.anchor_generator = build_anchor_generator(anchor_generator) - num_anchors = self.anchor_generator.num_base_anchors - - reg_convs = [] - cls_convs = [] - for i in range(len(in_channels)): - reg_convs.append( - nn.Conv2d( - in_channels[i], - num_anchors[i] * 4, - kernel_size=3, - padding=1)) - cls_convs.append( - nn.Conv2d( - in_channels[i], - num_anchors[i] * (num_classes + 1), - kernel_size=3, - padding=1)) - self.reg_convs = nn.ModuleList(reg_convs) - self.cls_convs = nn.ModuleList(cls_convs) - - self.bbox_coder = build_bbox_coder(bbox_coder) - self.reg_decoded_bbox = reg_decoded_bbox - self.use_sigmoid_cls = False - self.cls_focal_loss = False - self.train_cfg = train_cfg - self.test_cfg = test_cfg - # set sampling=False for archor_target - self.sampling = False - if self.train_cfg: - self.assigner = build_assigner(self.train_cfg.assigner) - # SSD sampling=False so use PseudoSampler - sampler_cfg = dict(type='PseudoSampler') - self.sampler = build_sampler(sampler_cfg, context=self) - self.fp16_enabled = False - - def init_weights(self): - """Initialize weights of the head.""" - for m in self.modules(): - if isinstance(m, nn.Conv2d): - xavier_init(m, distribution='uniform', bias=0) - - def forward(self, feats): - """Forward features from the upstream network. - - Args: - feats (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - - Returns: - tuple: - cls_scores (list[Tensor]): Classification scores for all scale - levels, each is a 4D-tensor, the channels number is - num_anchors * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for all scale - levels, each is a 4D-tensor, the channels number is - num_anchors * 4. - """ - cls_scores = [] - bbox_preds = [] - for feat, reg_conv, cls_conv in zip(feats, self.reg_convs, - self.cls_convs): - cls_scores.append(cls_conv(feat)) - bbox_preds.append(reg_conv(feat)) - return cls_scores, bbox_preds - - def loss_single(self, cls_score, bbox_pred, anchor, labels, label_weights, - bbox_targets, bbox_weights, num_total_samples): - """Compute loss of a single image. - - Args: - cls_score (Tensor): Box scores for eachimage - Has shape (num_total_anchors, num_classes). - bbox_pred (Tensor): Box energies / deltas for each image - level with shape (num_total_anchors, 4). - anchors (Tensor): Box reference for each scale level with shape - (num_total_anchors, 4). - labels (Tensor): Labels of each anchors with shape - (num_total_anchors,). - label_weights (Tensor): Label weights of each anchor with shape - (num_total_anchors,) - bbox_targets (Tensor): BBox regression targets of each anchor wight - shape (num_total_anchors, 4). - bbox_weights (Tensor): BBox regression loss weights of each anchor - with shape (num_total_anchors, 4). - num_total_samples (int): If sampling, num total samples equal to - the number of total anchors; Otherwise, it is the number of - positive anchors. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - - loss_cls_all = F.cross_entropy( - cls_score, labels, reduction='none') * label_weights - # FG cat_id: [0, num_classes -1], BG cat_id: num_classes - pos_inds = ((labels >= 0) & - (labels < self.num_classes)).nonzero().reshape(-1) - neg_inds = (labels == self.num_classes).nonzero().view(-1) - - num_pos_samples = pos_inds.size(0) - num_neg_samples = self.train_cfg.neg_pos_ratio * num_pos_samples - if num_neg_samples > neg_inds.size(0): - num_neg_samples = neg_inds.size(0) - topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples) - loss_cls_pos = loss_cls_all[pos_inds].sum() - loss_cls_neg = topk_loss_cls_neg.sum() - loss_cls = (loss_cls_pos + loss_cls_neg) / num_total_samples - - if self.reg_decoded_bbox: - # When the regression loss (e.g. `IouLoss`, `GIouLoss`) - # is applied directly on the decoded bounding boxes, it - # decodes the already encoded coordinates to absolute format. - bbox_pred = self.bbox_coder.decode(anchor, bbox_pred) - - loss_bbox = smooth_l1_loss( - bbox_pred, - bbox_targets, - bbox_weights, - beta=self.train_cfg.smoothl1_beta, - avg_factor=num_total_samples) - return loss_cls[None], loss_bbox - - @force_fp32(apply_to=('cls_scores', 'bbox_preds')) - def loss(self, - cls_scores, - bbox_preds, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute losses of the head. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - Has shape (N, num_anchors * num_classes, H, W) - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W) - gt_bboxes (list[Tensor]): each item are the truth boxes for each - image in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - assert len(featmap_sizes) == self.anchor_generator.num_levels - - device = cls_scores[0].device - - anchor_list, valid_flag_list = self.get_anchors( - featmap_sizes, img_metas, device=device) - cls_reg_targets = self.get_targets( - anchor_list, - valid_flag_list, - gt_bboxes, - img_metas, - gt_bboxes_ignore_list=gt_bboxes_ignore, - gt_labels_list=gt_labels, - label_channels=1, - unmap_outputs=False) - if cls_reg_targets is None: - return None - (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, - num_total_pos, num_total_neg) = cls_reg_targets - - num_images = len(img_metas) - all_cls_scores = torch.cat([ - s.permute(0, 2, 3, 1).reshape( - num_images, -1, self.cls_out_channels) for s in cls_scores - ], 1) - all_labels = torch.cat(labels_list, -1).view(num_images, -1) - all_label_weights = torch.cat(label_weights_list, - -1).view(num_images, -1) - all_bbox_preds = torch.cat([ - b.permute(0, 2, 3, 1).reshape(num_images, -1, 4) - for b in bbox_preds - ], -2) - all_bbox_targets = torch.cat(bbox_targets_list, - -2).view(num_images, -1, 4) - all_bbox_weights = torch.cat(bbox_weights_list, - -2).view(num_images, -1, 4) - - # concat all level anchors to a single tensor - all_anchors = [] - for i in range(num_images): - all_anchors.append(torch.cat(anchor_list[i])) - - # check NaN and Inf - assert torch.isfinite(all_cls_scores).all().item(), \ - 'classification scores become infinite or NaN!' - assert torch.isfinite(all_bbox_preds).all().item(), \ - 'bbox predications become infinite or NaN!' - - losses_cls, losses_bbox = multi_apply( - self.loss_single, - all_cls_scores, - all_bbox_preds, - all_anchors, - all_labels, - all_label_weights, - all_bbox_targets, - all_bbox_weights, - num_total_samples=num_total_pos) - return dict(loss_cls=losses_cls, loss_bbox=losses_bbox) diff --git a/spaces/dmvaldman/ICLR2023/app.py b/spaces/dmvaldman/ICLR2023/app.py deleted file mode 100644 index 851d326f9ddfb280efde5d483489fa7452bf73d9..0000000000000000000000000000000000000000 --- a/spaces/dmvaldman/ICLR2023/app.py +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/env python - -from __future__ import annotations - -import gradio as gr - -from paper_list import PaperList - -DESCRIPTION = '# ICLR 2023 Paper Submissions' -NOTES = ''' -- [ICLR 2023](https://openreview.net/group?id=ICLR.cc/2023/Conference) -- [List of submitted papers](https://docs.google.com/spreadsheets/d/1dQMjjetud2edTEREdLiuD4giC244lxY67ZxaL7NiMUc/edit#gid=1277917086) -''' - - -def main(): - paper_list = PaperList() - - with gr.Blocks(css='style.css') as demo: - gr.Markdown(DESCRIPTION) - - search_box = gr.Textbox( - label='Search Title', - placeholder= - 'You can search for titles with regular expressions. e.g. (? - - - - - -
            - - - - ); -} diff --git a/spaces/dorkai/ChatUIPro/app/layout.tsx b/spaces/dorkai/ChatUIPro/app/layout.tsx deleted file mode 100644 index 7092434d43ead807142aebdfb5c925ef37b07e00..0000000000000000000000000000000000000000 --- a/spaces/dorkai/ChatUIPro/app/layout.tsx +++ /dev/null @@ -1,25 +0,0 @@ -import { getLocaleOnServer } from '@/i18n/server' - -import './styles/globals.css' -import './styles/markdown.scss' - -const LocaleLayout = ({ - children, -}: { - children: React.ReactNode -}) => { - const locale = getLocaleOnServer() - return ( - - -
            -
            - {children} -
            -
            - - - ) -} - -export default LocaleLayout diff --git a/spaces/dorkai/singpt/modules/RWKV.py b/spaces/dorkai/singpt/modules/RWKV.py deleted file mode 100644 index 5cf8937ad37944c0cebeeb8e0891bec1474724ea..0000000000000000000000000000000000000000 --- a/spaces/dorkai/singpt/modules/RWKV.py +++ /dev/null @@ -1,74 +0,0 @@ -import os -from pathlib import Path - -import numpy as np -from tokenizers import Tokenizer - -import modules.shared as shared -from modules.callbacks import Iteratorize - -np.set_printoptions(precision=4, suppress=True, linewidth=200) - -os.environ['RWKV_JIT_ON'] = '1' -os.environ["RWKV_CUDA_ON"] = '1' if shared.args.rwkv_cuda_on else '0' # use CUDA kernel for seq mode (much faster) - -from rwkv.model import RWKV -from rwkv.utils import PIPELINE, PIPELINE_ARGS - - -class RWKVModel: - def __init__(self): - pass - - @classmethod - def from_pretrained(self, path, dtype="fp16", device="cuda"): - tokenizer_path = Path(f"{path.parent}/20B_tokenizer.json") - - if shared.args.rwkv_strategy is None: - model = RWKV(model=str(path), strategy=f'{device} {dtype}') - else: - model = RWKV(model=str(path), strategy=shared.args.rwkv_strategy) - pipeline = PIPELINE(model, str(tokenizer_path)) - - result = self() - result.pipeline = pipeline - return result - - def generate(self, context="", token_count=20, temperature=1, top_p=1, top_k=50, alpha_frequency=0.1, alpha_presence=0.1, token_ban=[0], token_stop=[], callback=None): - args = PIPELINE_ARGS( - temperature = temperature, - top_p = top_p, - top_k = top_k, - alpha_frequency = alpha_frequency, # Frequency Penalty (as in GPT-3) - alpha_presence = alpha_presence, # Presence Penalty (as in GPT-3) - token_ban = token_ban, # ban the generation of some tokens - token_stop = token_stop - ) - - return context+self.pipeline.generate(context, token_count=token_count, args=args, callback=callback) - - def generate_with_streaming(self, **kwargs): - with Iteratorize(self.generate, kwargs, callback=None) as generator: - reply = kwargs['context'] - for token in generator: - reply += token - yield reply - -class RWKVTokenizer: - def __init__(self): - pass - - @classmethod - def from_pretrained(self, path): - tokenizer_path = path / "20B_tokenizer.json" - tokenizer = Tokenizer.from_file(str(tokenizer_path)) - - result = self() - result.tokenizer = tokenizer - return result - - def encode(self, prompt): - return self.tokenizer.encode(prompt).ids - - def decode(self, ids): - return self.tokenizer.decode(ids) diff --git a/spaces/dpe1/can_this_pokemon_evolve/whatever stuff is neede/app.py b/spaces/dpe1/can_this_pokemon_evolve/whatever stuff is neede/app.py deleted file mode 100644 index 8a7e338bb1eaa08450f7f79a326a2f5c8bb27629..0000000000000000000000000000000000000000 --- a/spaces/dpe1/can_this_pokemon_evolve/whatever stuff is neede/app.py +++ /dev/null @@ -1,29 +0,0 @@ -import wahtever -import gradio as gr -from gradio.components import Textbox -def chungu(iasoipa): - if iasoipa!='': print(iasoipa) - return wahtever.isisisisis(iasoipa) -demo = gr.Interface( - chungu, - [ -Textbox(label="enter pokemons name (it uses fuzzy search)"), - ], - [ - Textbox(label="can it evolve????"), - ], - live=True, - title='Can this Pokemon evolve?', - description="This shows you if a Pokemon can evolve and how. Unlike using Wiki, this doesn't spoil what it evolves into and at what level.\nI also used real time fuzzy search so that you don't have to enter exact name of a Pokemon.", - article="""NOTE I used a hole Bunch of data from https://pokemondb.net/evolution/level. - -Most pokemon evolve upon reaching a certain level. A few Pokémon also need to meet a condition such as gender to evolve; these are also listed. - -To evolve a Pokémon via stone, simply select the item to use in your bag, then select the Pokémon to apply it to - they will evolve straight away. - -A Pokémon's Friendship (aka happiness) can be increased in many ways. The simplest is walking around with the Pokémon in your party, battling and growing them, but not letting them faint. Massages and haircuts increase Friendship by a larger amount. Giving them a Soothe Bell to hold increases the effect of the other methods. See more in the glossary or this PokéBase question. - -For special conditions, Once the condition is met, the Pokémon needs to level up once more in order to evolve.""" -) - -demo.launch() \ No newline at end of file diff --git a/spaces/duycse1603/math2tex/ScanSSD/layers/modules/__init__.py b/spaces/duycse1603/math2tex/ScanSSD/layers/modules/__init__.py deleted file mode 100644 index 529c3c7956e9c576bd41122cd85fbe0cfaa1478a..0000000000000000000000000000000000000000 --- a/spaces/duycse1603/math2tex/ScanSSD/layers/modules/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .l2norm import L2Norm -from .multibox_loss import MultiBoxLoss -from .focal_loss import FocalLoss - -__all__ = ['L2Norm', 'MultiBoxLoss', 'FocalLoss'] diff --git a/spaces/eIysia/VITS-Umamusume-voice-synthesizer/text/mandarin.py b/spaces/eIysia/VITS-Umamusume-voice-synthesizer/text/mandarin.py deleted file mode 100644 index 093d8826809aa2681f6088174427337a59e0c882..0000000000000000000000000000000000000000 --- a/spaces/eIysia/VITS-Umamusume-voice-synthesizer/text/mandarin.py +++ /dev/null @@ -1,329 +0,0 @@ -import os -import sys -import re -from pypinyin import lazy_pinyin, BOPOMOFO -import jieba -import cn2an -import logging - -logging.getLogger('jieba').setLevel(logging.WARNING) -jieba.initialize() - - -# List of (Latin alphabet, bopomofo) pairs: -_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', 'ㄟˉ'), - ('b', 'ㄅㄧˋ'), - ('c', 'ㄙㄧˉ'), - ('d', 'ㄉㄧˋ'), - ('e', 'ㄧˋ'), - ('f', 'ㄝˊㄈㄨˋ'), - ('g', 'ㄐㄧˋ'), - ('h', 'ㄝˇㄑㄩˋ'), - ('i', 'ㄞˋ'), - ('j', 'ㄐㄟˋ'), - ('k', 'ㄎㄟˋ'), - ('l', 'ㄝˊㄛˋ'), - ('m', 'ㄝˊㄇㄨˋ'), - ('n', 'ㄣˉ'), - ('o', 'ㄡˉ'), - ('p', 'ㄆㄧˉ'), - ('q', 'ㄎㄧㄡˉ'), - ('r', 'ㄚˋ'), - ('s', 'ㄝˊㄙˋ'), - ('t', 'ㄊㄧˋ'), - ('u', 'ㄧㄡˉ'), - ('v', 'ㄨㄧˉ'), - ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'), - ('x', 'ㄝˉㄎㄨˋㄙˋ'), - ('y', 'ㄨㄞˋ'), - ('z', 'ㄗㄟˋ') -]] - -# List of (bopomofo, romaji) pairs: -_bopomofo_to_romaji = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄅㄛ', 'p⁼wo'), - ('ㄆㄛ', 'pʰwo'), - ('ㄇㄛ', 'mwo'), - ('ㄈㄛ', 'fwo'), - ('ㄅ', 'p⁼'), - ('ㄆ', 'pʰ'), - ('ㄇ', 'm'), - ('ㄈ', 'f'), - ('ㄉ', 't⁼'), - ('ㄊ', 'tʰ'), - ('ㄋ', 'n'), - ('ㄌ', 'l'), - ('ㄍ', 'k⁼'), - ('ㄎ', 'kʰ'), - ('ㄏ', 'h'), - ('ㄐ', 'ʧ⁼'), - ('ㄑ', 'ʧʰ'), - ('ㄒ', 'ʃ'), - ('ㄓ', 'ʦ`⁼'), - ('ㄔ', 'ʦ`ʰ'), - ('ㄕ', 's`'), - ('ㄖ', 'ɹ`'), - ('ㄗ', 'ʦ⁼'), - ('ㄘ', 'ʦʰ'), - ('ㄙ', 's'), - ('ㄚ', 'a'), - ('ㄛ', 'o'), - ('ㄜ', 'ə'), - ('ㄝ', 'e'), - ('ㄞ', 'ai'), - ('ㄟ', 'ei'), - ('ㄠ', 'au'), - ('ㄡ', 'ou'), - ('ㄧㄢ', 'yeNN'), - ('ㄢ', 'aNN'), - ('ㄧㄣ', 'iNN'), - ('ㄣ', 'əNN'), - ('ㄤ', 'aNg'), - ('ㄧㄥ', 'iNg'), - ('ㄨㄥ', 'uNg'), - ('ㄩㄥ', 'yuNg'), - ('ㄥ', 'əNg'), - ('ㄦ', 'əɻ'), - ('ㄧ', 'i'), - ('ㄨ', 'u'), - ('ㄩ', 'ɥ'), - ('ˉ', '→'), - ('ˊ', '↑'), - ('ˇ', '↓↑'), - ('ˋ', '↓'), - ('˙', ''), - (',', ','), - ('。', '.'), - ('!', '!'), - ('?', '?'), - ('—', '-') -]] - -# List of (romaji, ipa) pairs: -_romaji_to_ipa = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('ʃy', 'ʃ'), - ('ʧʰy', 'ʧʰ'), - ('ʧ⁼y', 'ʧ⁼'), - ('NN', 'n'), - ('Ng', 'ŋ'), - ('y', 'j'), - ('h', 'x') -]] - -# List of (bopomofo, ipa) pairs: -_bopomofo_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄅㄛ', 'p⁼wo'), - ('ㄆㄛ', 'pʰwo'), - ('ㄇㄛ', 'mwo'), - ('ㄈㄛ', 'fwo'), - ('ㄅ', 'p⁼'), - ('ㄆ', 'pʰ'), - ('ㄇ', 'm'), - ('ㄈ', 'f'), - ('ㄉ', 't⁼'), - ('ㄊ', 'tʰ'), - ('ㄋ', 'n'), - ('ㄌ', 'l'), - ('ㄍ', 'k⁼'), - ('ㄎ', 'kʰ'), - ('ㄏ', 'x'), - ('ㄐ', 'tʃ⁼'), - ('ㄑ', 'tʃʰ'), - ('ㄒ', 'ʃ'), - ('ㄓ', 'ts`⁼'), - ('ㄔ', 'ts`ʰ'), - ('ㄕ', 's`'), - ('ㄖ', 'ɹ`'), - ('ㄗ', 'ts⁼'), - ('ㄘ', 'tsʰ'), - ('ㄙ', 's'), - ('ㄚ', 'a'), - ('ㄛ', 'o'), - ('ㄜ', 'ə'), - ('ㄝ', 'ɛ'), - ('ㄞ', 'aɪ'), - ('ㄟ', 'eɪ'), - ('ㄠ', 'ɑʊ'), - ('ㄡ', 'oʊ'), - ('ㄧㄢ', 'jɛn'), - ('ㄩㄢ', 'ɥæn'), - ('ㄢ', 'an'), - ('ㄧㄣ', 'in'), - ('ㄩㄣ', 'ɥn'), - ('ㄣ', 'ən'), - ('ㄤ', 'ɑŋ'), - ('ㄧㄥ', 'iŋ'), - ('ㄨㄥ', 'ʊŋ'), - ('ㄩㄥ', 'jʊŋ'), - ('ㄥ', 'əŋ'), - ('ㄦ', 'əɻ'), - ('ㄧ', 'i'), - ('ㄨ', 'u'), - ('ㄩ', 'ɥ'), - ('ˉ', '→'), - ('ˊ', '↑'), - ('ˇ', '↓↑'), - ('ˋ', '↓'), - ('˙', ''), - (',', ','), - ('。', '.'), - ('!', '!'), - ('?', '?'), - ('—', '-') -]] - -# List of (bopomofo, ipa2) pairs: -_bopomofo_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄅㄛ', 'pwo'), - ('ㄆㄛ', 'pʰwo'), - ('ㄇㄛ', 'mwo'), - ('ㄈㄛ', 'fwo'), - ('ㄅ', 'p'), - ('ㄆ', 'pʰ'), - ('ㄇ', 'm'), - ('ㄈ', 'f'), - ('ㄉ', 't'), - ('ㄊ', 'tʰ'), - ('ㄋ', 'n'), - ('ㄌ', 'l'), - ('ㄍ', 'k'), - ('ㄎ', 'kʰ'), - ('ㄏ', 'h'), - ('ㄐ', 'tɕ'), - ('ㄑ', 'tɕʰ'), - ('ㄒ', 'ɕ'), - ('ㄓ', 'tʂ'), - ('ㄔ', 'tʂʰ'), - ('ㄕ', 'ʂ'), - ('ㄖ', 'ɻ'), - ('ㄗ', 'ts'), - ('ㄘ', 'tsʰ'), - ('ㄙ', 's'), - ('ㄚ', 'a'), - ('ㄛ', 'o'), - ('ㄜ', 'ɤ'), - ('ㄝ', 'ɛ'), - ('ㄞ', 'aɪ'), - ('ㄟ', 'eɪ'), - ('ㄠ', 'ɑʊ'), - ('ㄡ', 'oʊ'), - ('ㄧㄢ', 'jɛn'), - ('ㄩㄢ', 'yæn'), - ('ㄢ', 'an'), - ('ㄧㄣ', 'in'), - ('ㄩㄣ', 'yn'), - ('ㄣ', 'ən'), - ('ㄤ', 'ɑŋ'), - ('ㄧㄥ', 'iŋ'), - ('ㄨㄥ', 'ʊŋ'), - ('ㄩㄥ', 'jʊŋ'), - ('ㄥ', 'ɤŋ'), - ('ㄦ', 'əɻ'), - ('ㄧ', 'i'), - ('ㄨ', 'u'), - ('ㄩ', 'y'), - ('ˉ', '˥'), - ('ˊ', '˧˥'), - ('ˇ', '˨˩˦'), - ('ˋ', '˥˩'), - ('˙', ''), - (',', ','), - ('。', '.'), - ('!', '!'), - ('?', '?'), - ('—', '-') -]] - - -def number_to_chinese(text): - numbers = re.findall(r'\d+(?:\.?\d+)?', text) - for number in numbers: - text = text.replace(number, cn2an.an2cn(number), 1) - return text - - -def chinese_to_bopomofo(text): - text = text.replace('、', ',').replace(';', ',').replace(':', ',') - words = jieba.lcut(text, cut_all=False) - text = '' - for word in words: - bopomofos = lazy_pinyin(word, BOPOMOFO) - if not re.search('[\u4e00-\u9fff]', word): - text += word - continue - for i in range(len(bopomofos)): - bopomofos[i] = re.sub(r'([\u3105-\u3129])$', r'\1ˉ', bopomofos[i]) - if text != '': - text += ' ' - text += ''.join(bopomofos) - return text - - -def latin_to_bopomofo(text): - for regex, replacement in _latin_to_bopomofo: - text = re.sub(regex, replacement, text) - return text - - -def bopomofo_to_romaji(text): - for regex, replacement in _bopomofo_to_romaji: - text = re.sub(regex, replacement, text) - return text - - -def bopomofo_to_ipa(text): - for regex, replacement in _bopomofo_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def bopomofo_to_ipa2(text): - for regex, replacement in _bopomofo_to_ipa2: - text = re.sub(regex, replacement, text) - return text - - -def chinese_to_romaji(text): - text = number_to_chinese(text) - text = chinese_to_bopomofo(text) - text = latin_to_bopomofo(text) - text = bopomofo_to_romaji(text) - text = re.sub('i([aoe])', r'y\1', text) - text = re.sub('u([aoəe])', r'w\1', text) - text = re.sub('([ʦsɹ]`[⁼ʰ]?)([→↓↑ ]+|$)', - r'\1ɹ`\2', text).replace('ɻ', 'ɹ`') - text = re.sub('([ʦs][⁼ʰ]?)([→↓↑ ]+|$)', r'\1ɹ\2', text) - return text - - -def chinese_to_lazy_ipa(text): - text = chinese_to_romaji(text) - for regex, replacement in _romaji_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def chinese_to_ipa(text): - text = number_to_chinese(text) - text = chinese_to_bopomofo(text) - text = latin_to_bopomofo(text) - text = bopomofo_to_ipa(text) - text = re.sub('i([aoe])', r'j\1', text) - text = re.sub('u([aoəe])', r'w\1', text) - text = re.sub('([sɹ]`[⁼ʰ]?)([→↓↑ ]+|$)', - r'\1ɹ`\2', text).replace('ɻ', 'ɹ`') - text = re.sub('([s][⁼ʰ]?)([→↓↑ ]+|$)', r'\1ɹ\2', text) - return text - - -def chinese_to_ipa2(text): - text = number_to_chinese(text) - text = chinese_to_bopomofo(text) - text = latin_to_bopomofo(text) - text = bopomofo_to_ipa2(text) - text = re.sub(r'i([aoe])', r'j\1', text) - text = re.sub(r'u([aoəe])', r'w\1', text) - text = re.sub(r'([ʂɹ]ʰ?)([˩˨˧˦˥ ]+|$)', r'\1ʅ\2', text) - text = re.sub(r'(sʰ?)([˩˨˧˦˥ ]+|$)', r'\1ɿ\2', text) - return text \ No newline at end of file diff --git a/spaces/emc348/faces-through-time/torch_utils/ops/upfirdn2d.py b/spaces/emc348/faces-through-time/torch_utils/ops/upfirdn2d.py deleted file mode 100644 index ceeac2b9834e33b7c601c28bf27f32aa91c69256..0000000000000000000000000000000000000000 --- a/spaces/emc348/faces-through-time/torch_utils/ops/upfirdn2d.py +++ /dev/null @@ -1,384 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Custom PyTorch ops for efficient resampling of 2D images.""" - -import os -import warnings -import numpy as np -import torch -import traceback - -from .. import custom_ops -from .. import misc -from . import conv2d_gradfix - -#---------------------------------------------------------------------------- - -_inited = False -_plugin = None - -def _init(): - global _inited, _plugin - if not _inited: - sources = ['upfirdn2d.cpp', 'upfirdn2d.cu'] - sources = [os.path.join(os.path.dirname(__file__), s) for s in sources] - try: - _plugin = custom_ops.get_plugin('upfirdn2d_plugin', sources=sources, extra_cuda_cflags=['--use_fast_math']) - except: - warnings.warn('Failed to build CUDA kernels for upfirdn2d. Falling back to slow reference implementation. Details:\n\n' + traceback.format_exc()) - return _plugin is not None - -def _parse_scaling(scaling): - if isinstance(scaling, int): - scaling = [scaling, scaling] - assert isinstance(scaling, (list, tuple)) - assert all(isinstance(x, int) for x in scaling) - sx, sy = scaling - assert sx >= 1 and sy >= 1 - return sx, sy - -def _parse_padding(padding): - if isinstance(padding, int): - padding = [padding, padding] - assert isinstance(padding, (list, tuple)) - assert all(isinstance(x, int) for x in padding) - if len(padding) == 2: - padx, pady = padding - padding = [padx, padx, pady, pady] - padx0, padx1, pady0, pady1 = padding - return padx0, padx1, pady0, pady1 - -def _get_filter_size(f): - if f is None: - return 1, 1 - assert isinstance(f, torch.Tensor) and f.ndim in [1, 2] - fw = f.shape[-1] - fh = f.shape[0] - with misc.suppress_tracer_warnings(): - fw = int(fw) - fh = int(fh) - misc.assert_shape(f, [fh, fw][:f.ndim]) - assert fw >= 1 and fh >= 1 - return fw, fh - -#---------------------------------------------------------------------------- - -def setup_filter(f, device=torch.device('cpu'), normalize=True, flip_filter=False, gain=1, separable=None): - r"""Convenience function to setup 2D FIR filter for `upfirdn2d()`. - - Args: - f: Torch tensor, numpy array, or python list of the shape - `[filter_height, filter_width]` (non-separable), - `[filter_taps]` (separable), - `[]` (impulse), or - `None` (identity). - device: Result device (default: cpu). - normalize: Normalize the filter so that it retains the magnitude - for constant input signal (DC)? (default: True). - flip_filter: Flip the filter? (default: False). - gain: Overall scaling factor for signal magnitude (default: 1). - separable: Return a separable filter? (default: select automatically). - - Returns: - Float32 tensor of the shape - `[filter_height, filter_width]` (non-separable) or - `[filter_taps]` (separable). - """ - # Validate. - if f is None: - f = 1 - f = torch.as_tensor(f, dtype=torch.float32) - assert f.ndim in [0, 1, 2] - assert f.numel() > 0 - if f.ndim == 0: - f = f[np.newaxis] - - # Separable? - if separable is None: - separable = (f.ndim == 1 and f.numel() >= 8) - if f.ndim == 1 and not separable: - f = f.ger(f) - assert f.ndim == (1 if separable else 2) - - # Apply normalize, flip, gain, and device. - if normalize: - f /= f.sum() - if flip_filter: - f = f.flip(list(range(f.ndim))) - f = f * (gain ** (f.ndim / 2)) - f = f.to(device=device) - return f - -#---------------------------------------------------------------------------- - -def upfirdn2d(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1, impl='cuda'): - r"""Pad, upsample, filter, and downsample a batch of 2D images. - - Performs the following sequence of operations for each channel: - - 1. Upsample the image by inserting N-1 zeros after each pixel (`up`). - - 2. Pad the image with the specified number of zeros on each side (`padding`). - Negative padding corresponds to cropping the image. - - 3. Convolve the image with the specified 2D FIR filter (`f`), shrinking it - so that the footprint of all output pixels lies within the input image. - - 4. Downsample the image by keeping every Nth pixel (`down`). - - This sequence of operations bears close resemblance to scipy.signal.upfirdn(). - The fused op is considerably more efficient than performing the same calculation - using standard PyTorch ops. It supports gradients of arbitrary order. - - Args: - x: Float32/float64/float16 input tensor of the shape - `[batch_size, num_channels, in_height, in_width]`. - f: Float32 FIR filter of the shape - `[filter_height, filter_width]` (non-separable), - `[filter_taps]` (separable), or - `None` (identity). - up: Integer upsampling factor. Can be a single int or a list/tuple - `[x, y]` (default: 1). - down: Integer downsampling factor. Can be a single int or a list/tuple - `[x, y]` (default: 1). - padding: Padding with respect to the upsampled image. Can be a single number - or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` - (default: 0). - flip_filter: False = convolution, True = correlation (default: False). - gain: Overall scaling factor for signal magnitude (default: 1). - impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). - - Returns: - Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. - """ - assert isinstance(x, torch.Tensor) - assert impl in ['ref', 'cuda'] - if impl == 'cuda' and x.device.type == 'cuda' and _init(): - return _upfirdn2d_cuda(up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain).apply(x, f) - return _upfirdn2d_ref(x, f, up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain) - -#---------------------------------------------------------------------------- - -@misc.profiled_function -def _upfirdn2d_ref(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1): - """Slow reference implementation of `upfirdn2d()` using standard PyTorch ops. - """ - # Validate arguments. - assert isinstance(x, torch.Tensor) and x.ndim == 4 - if f is None: - f = torch.ones([1, 1], dtype=torch.float32, device=x.device) - assert isinstance(f, torch.Tensor) and f.ndim in [1, 2] - assert f.dtype == torch.float32 and not f.requires_grad - batch_size, num_channels, in_height, in_width = x.shape - upx, upy = _parse_scaling(up) - downx, downy = _parse_scaling(down) - padx0, padx1, pady0, pady1 = _parse_padding(padding) - - # Upsample by inserting zeros. - x = x.reshape([batch_size, num_channels, in_height, 1, in_width, 1]) - x = torch.nn.functional.pad(x, [0, upx - 1, 0, 0, 0, upy - 1]) - x = x.reshape([batch_size, num_channels, in_height * upy, in_width * upx]) - - # Pad or crop. - x = torch.nn.functional.pad(x, [max(padx0, 0), max(padx1, 0), max(pady0, 0), max(pady1, 0)]) - x = x[:, :, max(-pady0, 0) : x.shape[2] - max(-pady1, 0), max(-padx0, 0) : x.shape[3] - max(-padx1, 0)] - - # Setup filter. - f = f * (gain ** (f.ndim / 2)) - f = f.to(x.dtype) - if not flip_filter: - f = f.flip(list(range(f.ndim))) - - # Convolve with the filter. - f = f[np.newaxis, np.newaxis].repeat([num_channels, 1] + [1] * f.ndim) - if f.ndim == 4: - x = conv2d_gradfix.conv2d(input=x, weight=f, groups=num_channels) - else: - x = conv2d_gradfix.conv2d(input=x, weight=f.unsqueeze(2), groups=num_channels) - x = conv2d_gradfix.conv2d(input=x, weight=f.unsqueeze(3), groups=num_channels) - - # Downsample by throwing away pixels. - x = x[:, :, ::downy, ::downx] - return x - -#---------------------------------------------------------------------------- - -_upfirdn2d_cuda_cache = dict() - -def _upfirdn2d_cuda(up=1, down=1, padding=0, flip_filter=False, gain=1): - """Fast CUDA implementation of `upfirdn2d()` using custom ops. - """ - # Parse arguments. - upx, upy = _parse_scaling(up) - downx, downy = _parse_scaling(down) - padx0, padx1, pady0, pady1 = _parse_padding(padding) - - # Lookup from cache. - key = (upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain) - if key in _upfirdn2d_cuda_cache: - return _upfirdn2d_cuda_cache[key] - - # Forward op. - class Upfirdn2dCuda(torch.autograd.Function): - @staticmethod - def forward(ctx, x, f): # pylint: disable=arguments-differ - assert isinstance(x, torch.Tensor) and x.ndim == 4 - if f is None: - f = torch.ones([1, 1], dtype=torch.float32, device=x.device) - assert isinstance(f, torch.Tensor) and f.ndim in [1, 2] - y = x - if f.ndim == 2: - y = _plugin.upfirdn2d(y, f, upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain) - else: - y = _plugin.upfirdn2d(y, f.unsqueeze(0), upx, 1, downx, 1, padx0, padx1, 0, 0, flip_filter, np.sqrt(gain)) - y = _plugin.upfirdn2d(y, f.unsqueeze(1), 1, upy, 1, downy, 0, 0, pady0, pady1, flip_filter, np.sqrt(gain)) - ctx.save_for_backward(f) - ctx.x_shape = x.shape - return y - - @staticmethod - def backward(ctx, dy): # pylint: disable=arguments-differ - f, = ctx.saved_tensors - _, _, ih, iw = ctx.x_shape - _, _, oh, ow = dy.shape - fw, fh = _get_filter_size(f) - p = [ - fw - padx0 - 1, - iw * upx - ow * downx + padx0 - upx + 1, - fh - pady0 - 1, - ih * upy - oh * downy + pady0 - upy + 1, - ] - dx = None - df = None - - if ctx.needs_input_grad[0]: - dx = _upfirdn2d_cuda(up=down, down=up, padding=p, flip_filter=(not flip_filter), gain=gain).apply(dy, f) - - assert not ctx.needs_input_grad[1] - return dx, df - - # Add to cache. - _upfirdn2d_cuda_cache[key] = Upfirdn2dCuda - return Upfirdn2dCuda - -#---------------------------------------------------------------------------- - -def filter2d(x, f, padding=0, flip_filter=False, gain=1, impl='cuda'): - r"""Filter a batch of 2D images using the given 2D FIR filter. - - By default, the result is padded so that its shape matches the input. - User-specified padding is applied on top of that, with negative values - indicating cropping. Pixels outside the image are assumed to be zero. - - Args: - x: Float32/float64/float16 input tensor of the shape - `[batch_size, num_channels, in_height, in_width]`. - f: Float32 FIR filter of the shape - `[filter_height, filter_width]` (non-separable), - `[filter_taps]` (separable), or - `None` (identity). - padding: Padding with respect to the output. Can be a single number or a - list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` - (default: 0). - flip_filter: False = convolution, True = correlation (default: False). - gain: Overall scaling factor for signal magnitude (default: 1). - impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). - - Returns: - Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. - """ - padx0, padx1, pady0, pady1 = _parse_padding(padding) - fw, fh = _get_filter_size(f) - p = [ - padx0 + fw // 2, - padx1 + (fw - 1) // 2, - pady0 + fh // 2, - pady1 + (fh - 1) // 2, - ] - return upfirdn2d(x, f, padding=p, flip_filter=flip_filter, gain=gain, impl=impl) - -#---------------------------------------------------------------------------- - -def upsample2d(x, f, up=2, padding=0, flip_filter=False, gain=1, impl='cuda'): - r"""Upsample a batch of 2D images using the given 2D FIR filter. - - By default, the result is padded so that its shape is a multiple of the input. - User-specified padding is applied on top of that, with negative values - indicating cropping. Pixels outside the image are assumed to be zero. - - Args: - x: Float32/float64/float16 input tensor of the shape - `[batch_size, num_channels, in_height, in_width]`. - f: Float32 FIR filter of the shape - `[filter_height, filter_width]` (non-separable), - `[filter_taps]` (separable), or - `None` (identity). - up: Integer upsampling factor. Can be a single int or a list/tuple - `[x, y]` (default: 1). - padding: Padding with respect to the output. Can be a single number or a - list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` - (default: 0). - flip_filter: False = convolution, True = correlation (default: False). - gain: Overall scaling factor for signal magnitude (default: 1). - impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). - - Returns: - Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. - """ - upx, upy = _parse_scaling(up) - padx0, padx1, pady0, pady1 = _parse_padding(padding) - fw, fh = _get_filter_size(f) - p = [ - padx0 + (fw + upx - 1) // 2, - padx1 + (fw - upx) // 2, - pady0 + (fh + upy - 1) // 2, - pady1 + (fh - upy) // 2, - ] - return upfirdn2d(x, f, up=up, padding=p, flip_filter=flip_filter, gain=gain*upx*upy, impl=impl) - -#---------------------------------------------------------------------------- - -def downsample2d(x, f, down=2, padding=0, flip_filter=False, gain=1, impl='cuda'): - r"""Downsample a batch of 2D images using the given 2D FIR filter. - - By default, the result is padded so that its shape is a fraction of the input. - User-specified padding is applied on top of that, with negative values - indicating cropping. Pixels outside the image are assumed to be zero. - - Args: - x: Float32/float64/float16 input tensor of the shape - `[batch_size, num_channels, in_height, in_width]`. - f: Float32 FIR filter of the shape - `[filter_height, filter_width]` (non-separable), - `[filter_taps]` (separable), or - `None` (identity). - down: Integer downsampling factor. Can be a single int or a list/tuple - `[x, y]` (default: 1). - padding: Padding with respect to the input. Can be a single number or a - list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` - (default: 0). - flip_filter: False = convolution, True = correlation (default: False). - gain: Overall scaling factor for signal magnitude (default: 1). - impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). - - Returns: - Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. - """ - downx, downy = _parse_scaling(down) - padx0, padx1, pady0, pady1 = _parse_padding(padding) - fw, fh = _get_filter_size(f) - p = [ - padx0 + (fw - downx + 1) // 2, - padx1 + (fw - downx) // 2, - pady0 + (fh - downy + 1) // 2, - pady1 + (fh - downy) // 2, - ] - return upfirdn2d(x, f, down=down, padding=p, flip_filter=flip_filter, gain=gain, impl=impl) - -#---------------------------------------------------------------------------- diff --git a/spaces/ennov8ion/dreamlike-models/index.html b/spaces/ennov8ion/dreamlike-models/index.html deleted file mode 100644 index 40b11abfac0f6f7c145d1d349a978f07587cf433..0000000000000000000000000000000000000000 --- a/spaces/ennov8ion/dreamlike-models/index.html +++ /dev/null @@ -1,305 +0,0 @@ -import gradio as gr -import os -import sys -from pathlib import Path - -models = [ - {"name": "Deliberate", "url": "Masagin/Deliberate"}, - {"name": "Dreamlike Anime", "url": "dreamlike-art/dreamlike-anime-1.0"}, - {"name": "Dreamlike Diffusion", "url": "dreamlike-art/dreamlike-diffusion-1.0"}, - {"name": "Dreamlike Photoreal", "url": "dreamlike-art/dreamlike-photoreal-2.0"}, - {"name": "Dreamshaper", "url": "Lykon/DreamShaper"}, - {"name": "Lyriel 1.3", "url": "sakistriker/Lyriel_V1.3"}, - {"name": "Never Ending Dream 2", "url": "luongphamit/NeverEnding-Dream2"}, - {"name": "Protogen X 5.8", "url": "darkstorm2150/Protogen_x5.8_Official_Release"}, - {"name": "❤ ART MODELS ==========", "url": "dreamlike-art/dreamlike-diffusion-1.0"}, - {"name": "Alice in Diffusion Land", "url": "Guizmus/SDArt_AliceInDiffusionLand"}, - {"name": "Alt Clip", "url": "BAAI/AltCLIP"}, - {"name": "Anything Midjourney 4.1", "url": "Joeythemonster/anything-midjourney-v-4-1"}, - {"name": "Chaos and Order", "url": "Guizmus/SDArt_ChaosAndOrder768"}, - {"name": "Chilloutclara", "url": "Fred99774/chilloutvlara"}, - {"name": "Comic Diffusion", "url": "ogkalu/Comic-Diffusion"}, - {"name": "Cosmic Horros 768", "url": "Guizmus/SDArt_cosmichorrors768"}, - {"name": "Cosmic Horros", "url": "Guizmus/SDArt_cosmichorrors"}, - {"name": "DGSpitzer", "url": "DGSpitzer/DGSpitzer-Art-Diffusion"}, - {"name": "Dungeons and Diffusion", "url": "0xJustin/Dungeons-and-Diffusion"}, - {"name": "Elden Ring", "url": "nitrosocke/elden-ring-diffusion"}, - {"name": "Epic Diffusion 1.1", "url": "johnslegers/epic-diffusion-v1.1"}, - {"name": "Epic Diffusion", "url": "johnslegers/epic-diffusion"}, - {"name": "EpicMix Realism", "url": "Duskfallcrew/EpicMix_Realism"}, - {"name": "Fantasy Mix", "url": "theintuitiveye/FantasyMix"}, - {"name": "Girl New 1", "url": "Fred99774/girlnew1"}, - {"name": "Lit 6B", "url": "hakurei/lit-6B"}, - {"name": "Luna Diffusion", "url": "proximasanfinetuning/luna-diffusion"}, - {"name": "Midjourney 4.0", "url": "flax/midjourney-v4-diffusion"}, - {"name": "Midjourney 4.1", "url": "Joeythemonster/anything-midjourney-v-4-1"}, - {"name": "Mo-Di Diffusion", "url": "nitrosocke/mo-di-diffusion"}, - {"name": "Nitro Diffusion", "url": "nitrosocke/Nitro-Diffusion"}, - {"name": "Openjourney V2", "url": "prompthero/openjourney-v2"}, - {"name": "Openjourney", "url": "prompthero/openjourney"}, - {"name": "Seek Art Mega", "url": "coreco/seek.art_MEGA"}, - {"name": "Something", "url": "Guizmus/SDArt_something"}, - {"name": "Spider Verse diffusion", "url": "nitrosocke/spider-verse-diffusion"}, - {"name": "Vintedois 1.0", "url": "22h/vintedois-diffusion-v0-1"}, - {"name": "Vintedois 2.0", "url": "22h/vintedois-diffusion-v0-2"}, - {"name": "❤ ART STYLES ==========", "url": "joachimsallstrom/Double-Exposure-Diffusion"}, - {"name": "Balloon Art", "url": "Fictiverse/Stable_Diffusion_BalloonArt_Model"}, - {"name": "Double Exposure Diffusion", "url": "joachimsallstrom/Double-Exposure-Diffusion"}, - {"name": "Fluid Art", "url": "Fictiverse/Stable_Diffusion_FluidArt_Model"}, - {"name": "GTA5 Artwork Diffusion", "url": "ItsJayQz/GTA5_Artwork_Diffusion"}, - {"name": "Marvel WhatIf Diffusion", "url": "ItsJayQz/Marvel_WhatIf_Diffusion"}, - {"name": "Naruto Diffuser", "url": "lambdalabs/sd-naruto-diffusers"}, - {"name": "Papercut", "url": "Fictiverse/Stable_Diffusion_PaperCut_Model"}, - {"name": "Pokemon Diffuser", "url": "lambdalabs/sd-pokemon-diffusers"}, - {"name": "Synthwave Punk 2", "url": "ItsJayQz/SynthwavePunk-v2"}, - {"name": "Valorant Diffusion", "url": "ItsJayQz/Valorant_Diffusion"}, - {"name": "Van Gogh Diffusion", "url": "dallinmackay/Van-Gogh-diffusion"}, - {"name": "Vectorartz Diffusion", "url": "coder119/Vectorartz_Diffusion"}, - {"name": "VoxelArt", "url": "Fictiverse/Stable_Diffusion_VoxelArt_Model"}, - {"name": "❤ ANIME MODELS ==========", "url": "dreamlike-art/dreamlike-anime-1.0"}, - {"name": "7 Pa", "url": "AIARTCHAN/7pa"}, - {"name": "A Certain Model", "url": "JosephusCheung/ACertainModel"}, - {"name": "A Certain Thing", "url": "JosephusCheung/ACertainThing"}, - {"name": "A Certainity", "url": "JosephusCheung/ACertainty"}, - {"name": "Abyss Hell Hero", "url": "AIARTCHAN/AbyssHellHero"}, - {"name": "Abyss Maple 3", "url": "AIARTCHAN/AbyssMapleVer3"}, - {"name": "Abyss Orange Mix 2", "url": "WarriorMama777/AbyssOrangeMix2"}, - {"name": "Abyss Orange Mix 4", "url": "sakistriker/AbyssOrangeMix3"}, - {"name": "Abyss Orange Mix", "url": "WarriorMama777/AbyssOrangeMix"}, - {"name": "AbyssHell 3", "url": "AIARTCHAN/AbyssHellVer3"}, - {"name": "All 526 Animated", "url": "stablediffusionapi/all-526-animated"}, - {"name": "Anidosmix 3", "url": "AIARTCHAN/anidosmixV2"}, - {"name": "Anime Kawai Diffusion", "url": "Ojimi/anime-kawai-diffusion"}, - {"name": "Anireal 3D V2", "url": "circulus/sd-anireal-3d-v2"}, - {"name": "AnyLORA", "url": "kubanemil/AnyLORA"}, - {"name": "Anything 2.1", "url": "swl-models/anything-v2.1"}, - {"name": "Anything 3.0 Light", "url": "mm00/anything-v3.0-light"}, - {"name": "Anything 3.0", "url": "Linaqruf/anything-v3.0"}, - {"name": "Anything 3.1", "url": "cag/anything-v3-1"}, - {"name": "Anything 3X", "url": "iZELX1/Anything-V3-X"}, - {"name": "Anything 4.0", "url": "andite/anything-v4.0"}, - {"name": "Anything 5", "url": "sakistriker/Anything_V5_PrtRE"}, - {"name": "Anything 5.0", "url": "stablediffusionapi/anything-v5"}, - {"name": "Anything Else 4", "url": "stablediffusionapi/anythingelse-v4"}, - {"name": "Anything Else 5", "url": "stablediffusionapi/anything-v5"}, - {"name": "Arcane Diffusion", "url": "nitrosocke/Arcane-Diffusion"}, - {"name": "Archer Diffusion", "url": "nitrosocke/archer-diffusion"}, - {"name": "Asian Mix", "url": "D1b4l4p/AsianMix"}, - {"name": "Blood Orange Mix", "url": "WarriorMama777/BloodOrangeMix"}, - {"name": "CamelliaMix 2.5D","url": "stablediffusionapi/camelliamix25d"}, - {"name": "CamelliaMix Line","url": "stablediffusionapi/camelliamixline"}, - {"name": "CamelliaMix","url": "Powidl43/CamelliaMix"}, - {"name": "Cetusmix", "url": "stablediffusionapi/cetusmix"}, - {"name": "Chik Mix", "url": "stablediffusionapi/chikmix"}, - {"name": "Chikmix", "url": "stablediffusionapi/chikmix"}, - {"name": "Chillout App Factory","url": "stablediffusionapi/chillout-app-factory"}, - {"name": "Classic Anime", "url": "nitrosocke/classic-anim-diffusion"}, - {"name": "Cool Japan Diffusion 2.1.2", "url": "aipicasso/cool-japan-diffusion-2-1-2"}, - {"name": "Cosmic Babes", "url": "stablediffusionapi/cosmic-babes"}, - {"name": "Counterfeit 1.0", "url": "gsdf/counterfeit-v1.0"}, - {"name": "Counterfeit 2", "url": "gsdf/Counterfeit-V2.0"}, - {"name": "Counterfeit 2.0", "url": "gsdf/Counterfeit-V2.0"}, - {"name": "Counterfeit 3.0", "url": "stablediffusionapi/counterfeit-v30"}, - {"name": "CuteSexyRobutts", "url": "andite/cutesexyrobutts-diffusion"}, - {"name": "CyberPunk Anime", "url": "DGSpitzer/Cyberpunk-Anime-Diffusion"}, - {"name": "Dark Sushi Mix", "url": "stablediffusionapi/dark-sushi-mix"}, - {"name": "Dash Sushi 25d", "url": "stablediffusionapi/dark-sushi-25d"}, - {"name": "DucHaiten Anime", "url": "DucHaiten/DucHaitenAnime"}, - {"name": "Eerie Orange Mix", "url": "WarriorMama777/EerieOrangeMix"}, - {"name": "Eimis Anime Diffusion", "url": "eimiss/EimisAnimeDiffusion_1.0v"}, - {"name": "Ghibli Diffusion", "url": "nitrosocke/Ghibli-Diffusion"}, - {"name": "GrapeFruit", "url": "iZELX1/Grapefruit"}, - {"name": "GuoFeng 3", "url": "xiaolxl/GuoFeng3"}, - {"name": "Guweiz Diffusion", "url": "andite/guweiz-diffusion"}, - {"name": "Hiten Diffusion", "url": "andite/hiten-diffusion"}, - {"name": "Icomix 2", "url": "stablediffusionapi/icomix-2"}, - {"name": "InkPunk Diffusion", "url": "Envvi/Inkpunk-Diffusion"}, - {"name": "Mama Orange Mixs", "url": "WarriorMama777/OrangeMixs"}, - {"name": "Mashuu Diffusion", "url": "andite/mashuu-diffusion"}, - {"name": "Meainamis 8", "url": "sakistriker/MeinaMix_V8"}, - {"name": "Meina Alter", "url": "stablediffusionapi/meinaalter"}, - {"name": "Meina Pastel", "url": "stablediffusionapi/meinapastel"}, - {"name": "MeinaMix 7", "url": "Nacholmo/meinamixv7-diffusers"}, - {"name": "Mignon Diffusion", "url": "andite/mignon-diffusion"}, - {"name": "MikaPikazo Diffusion", "url": "andite/mikapikazo-diffusion"}, - {"name": "Mikapikazo", "url": "andite/mikapikazo-diffusion"}, - {"name": "Mix Pro V4", "url": "AIARTCHAN/MIX-Pro-V4"}, - {"name": "NeverEnding-Dream", "url": "Lykon/NeverEnding-Dream"}, - {"name": "Niji V5 Style 1", "url": "sakistriker/NijiV5style_V1"}, - {"name": "Openjourney 4", "url": "prompthero/openjourney-v4"}, - {"name": "OpenNiji", "url": "Korakoe/OpenNiji"}, - {"name": "Pastel Mix", "url": "andite/pastel-mix"}, - {"name": "Picasso Diffusion 1.1", "url": "aipicasso/picasso-diffusion-1-1"}, - {"name": "Piromizu Diffusion", "url": "andite/piromizu-diffusion"}, - {"name": "Protogen 2.2", "url": "darkstorm2150/Protogen_v2.2_Official_Release"}, - {"name": "Protogen Infinity", "url": "darkstorm2150/Protogen_Infinity_Official_Release"}, - {"name": "Protogen X 3.4", "url": "darkstorm2150/Protogen_x3.4_Official_Release"}, - {"name": "Rev Anim", "url": "stablediffusionapi/rev-anim"}, - {"name": "Rev Animated", "url": "coreml/coreml-ReV-Animated"}, - {"name": "Rev Animated", "url": "LottePeisch/RevAnimated-Diffusers"}, - {"name": "Something V 2.2","url": "NoCrypt/SomethingV2_2"}, - {"name": "Something V2","url": "NoCrypt/SomethingV2"}, - {"name": "Three Delicacy", "url": "stablediffusionapi/three-delicacy"}, - {"name": "Three Delicacy wonto", "url": "stablediffusionapi/three-delicacy-wonto"}, - {"name": "TMND mix", "url": "stablediffusionapi/tmnd-mix"}, - {"name": "Waifu Diffusion", "url": "hakurei/waifu-diffusion"}, - {"name": "❤ REALISTIC PHOTO MODELS ==========", "url": "dreamlike-art/dreamlike-photoreal-2.0"}, - {"name": "AmiIReal", "url": "stablediffusionapi/amireal"}, - {"name": "Analog Diffusion", "url": "wavymulder/Analog-Diffusion"}, - {"name": "Circulus 2.8", "url": "circulus/sd-photoreal-v2.8"}, - {"name": "Circulus Photoreal V2", "url": "circulus/sd-photoreal-real-v2"}, - {"name": "Claudfuen 1", "url": "claudfuen/photorealistic-fuen-v1"}, - {"name": "Collage Diffusion", "url": "wavymulder/collage-diffusion"}, - {"name": "Cyberrealistic", "url": "stablediffusionapi/cyberrealistic"}, - {"name": "Dreamful 2", "url": "Hius/DreamFul-V2"}, - {"name": "GakkiMix768", "url": "Sa1i/gakki-mix-768"}, - {"name": "Grimoeresigils", "url": "ECarbenia/grimoiresigils"}, - {"name": "HARDBlend", "url": "theintuitiveye/HARDblend"}, - {"name": "HassanBlend 1.4", "url": "hassanblend/hassanblend1.4"}, - {"name": "HassanBlend 1.5.1.2", "url": "hassanblend/HassanBlend1.5.1.2"}, - {"name": "Lomo Diffusion", "url": "wavymulder/lomo-diffusion"}, - {"name": "Model Shoot", "url": "wavymulder/modelshoot"}, - {"name": "Portrait Plus", "url": "wavymulder/portraitplus"}, - {"name": "QuinceMix", "url": "Hemlok/QuinceMix"}, - {"name": "Realistic Vision 1.4", "url": "SG161222/Realistic_Vision_V1.4"}, - {"name": "The Ally", "url": "stablediffusionapi/the-ally"}, - {"name": "Timeless Diffusion", "url": "wavymulder/timeless-diffusion"}, - {"name": "UltraSkin", "url": "VegaKH/Ultraskin"}, - {"name": "Wavyfusion", "url": "wavymulder/wavyfusion"}, - {"name": "❤ SEMI-REALISTIC MODELS ==========", "url": "stablediffusionapi/all-526"}, - {"name": "All 526", "url": "stablediffusionapi/all-526"}, - {"name": "All 526 animated", "url": "stablediffusionapi/all-526-animated"}, - {"name": "Circulus Semi Real 2", "url": "circulus/sd-photoreal-semi-v2"}, - {"name": "Semi Real Mix", "url": "robotjung/SemiRealMix"}, - {"name": "SpyBG", "url": "stablediffusionapi/spybg"}, - {"name": "❤ STABLE DIFFUSION MODELS ==========", "url": "stabilityai/stable-diffusion-2-1"}, - {"name": "Stable Diffusion 1.4","url": "CompVis/stable-diffusion-v1-4"}, - {"name": "Stable Diffusion 1.5","url": "runwayml/stable-diffusion-v1-5"}, - {"name": "Stable Diffusion 2.1","url": "stabilityai/stable-diffusion-2-1"}, - {"name": "Stable Diffusion 2.1 Base","url": "stabilityai/stable-diffusion-2-1-base"}, - {"name": "Stable Diffusion 2.1 Unclip","url": "stabilityai/stable-diffusion-2-1-unclip"}, - {"name": "❤ SCI FI MODELS ==========", "url": "nitrosocke/Future-Diffusion"}, - {"name": "Future Diffusion", "url": "nitrosocke/Future-Diffusion"}, - {"name": "JWST Deep Space Diffusion", "url": "dallinmackay/JWST-Deep-Space-diffusion"}, - {"name": "Robo Diffusion 3 Base", "url": "nousr/robo-diffusion-2-base"}, - {"name": "Robo Diffusion", "url": "nousr/robo-diffusion"}, - {"name": "Tron Legacy Diffusion", "url": "dallinmackay/Tron-Legacy-diffusion"}, - {"name": "❤ 3D ART MODELS ==========", "url": "DucHaiten/DucHaitenAIart"}, - {"name": "DucHaiten Art", "url": "DucHaiten/DucHaitenAIart"}, - {"name": "DucHaiten ClassicAnime", "url": "DucHaiten/DH_ClassicAnime"}, - {"name": "DucHaiten DreamWorld", "url": "DucHaiten/DucHaitenDreamWorld"}, - {"name": "DucHaiten Journey", "url": "DucHaiten/DucHaitenJourney"}, - {"name": "DucHaiten StyleLikeMe", "url": "DucHaiten/DucHaiten-StyleLikeMe"}, - {"name": "DucHaiten SuperCute", "url": "DucHaiten/DucHaitenSuperCute"}, - {"name": "Redshift Diffusion 768", "url": "nitrosocke/redshift-diffusion-768"}, - {"name": "Redshift Diffusion", "url": "nitrosocke/redshift-diffusion"}, -] - -current_model = models[0] - -text_gen = gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link") - -models2 = [] -for model in models: - model_url = f"models/{model['url']}" - loaded_model = gr.Interface.load(model_url, live=True, preprocess=True) - models2.append(loaded_model) - - -def text_it(inputs, text_gen=text_gen): - return text_gen(inputs) - - -def set_model(current_model_index): - global current_model - current_model = models[current_model_index] - return gr.update(label=f"{current_model['name']}") - - -def send_it(inputs, model_choice): - proc = models2[model_choice] - return proc(inputs) - - -css = """""" - -with gr.Blocks(css=css) as myface: - gr.HTML( - """ - - - - - - - - - - - - - - - -""" - ) - - with gr.Row(): - with gr.Row(): - input_text = gr.Textbox(label="Prompt idea", lines=1) - # Model selection dropdown - model_name1 = gr.Dropdown( - label="Choose Model", - choices=[m["name"] for m in models], - type="index", - value=current_model["name"], - interactive=True, - ) - with gr.Row(): - see_prompts = gr.Button("Generate Prompts") - run = gr.Button("Generate Images", variant="primary") - with gr.Tab("Main"): - with gr.Row(): - output1 = gr.Image(label=f"{current_model['name']}") - output2 = gr.Image(label=f"{current_model['name']}") - output3 = gr.Image(label=f"{current_model['name']}") - output4 = gr.Image(label=f"{current_model['name']}") - with gr.Row(): - magic1 = gr.Textbox(lines=4) - magic2 = gr.Textbox(lines=4) - magic3 = gr.Textbox(lines=4) - magic4 = gr.Textbox(lines=4) - - with gr.Row(): - output5 = gr.Image(label=f"{current_model['name']}") - output6 = gr.Image(label=f"{current_model['name']}") - output7 = gr.Image(label=f"{current_model['name']}") - output8 = gr.Image(label=f"{current_model['name']}") - with gr.Row(): - magic5 = gr.Textbox(lines=4) - magic6 = gr.Textbox(lines=4) - magic7 = gr.Textbox(lines=4) - magic8 = gr.Textbox(lines=4) - - model_name1.change(set_model, inputs=model_name1, outputs=[output1, output2, output3, output4, output5, output6, output7, output8]) - - run.click(send_it, inputs=[magic1, model_name1], outputs=[output1]) - run.click(send_it, inputs=[magic2, model_name1], outputs=[output2]) - run.click(send_it, inputs=[magic3, model_name1], outputs=[output3]) - run.click(send_it, inputs=[magic4, model_name1], outputs=[output4]) - run.click(send_it, inputs=[magic5, model_name1], outputs=[output5]) - run.click(send_it, inputs=[magic6, model_name1], outputs=[output6]) - run.click(send_it, inputs=[magic7, model_name1], outputs=[output7]) - run.click(send_it, inputs=[magic8, model_name1], outputs=[output8]) - - see_prompts.click(text_it, inputs=[input_text], outputs=[magic1]) - see_prompts.click(text_it, inputs=[input_text], outputs=[magic2]) - see_prompts.click(text_it, inputs=[input_text], outputs=[magic3]) - see_prompts.click(text_it, inputs=[input_text], outputs=[magic4]) - see_prompts.click(text_it, inputs=[input_text], outputs=[magic5]) - see_prompts.click(text_it, inputs=[input_text], outputs=[magic6]) - see_prompts.click(text_it, inputs=[input_text], outputs=[magic7]) - see_prompts.click(text_it, inputs=[input_text], outputs=[magic8]) - -myface.queue(concurrency_count=200) -myface.launch(inline=True, show_api=False, max_threads=400) \ No newline at end of file diff --git a/spaces/facebook/StyleNeRF/gui_utils/imgui_utils.py b/spaces/facebook/StyleNeRF/gui_utils/imgui_utils.py deleted file mode 100644 index 333024bd6999bf2b18a5cb96766c4da3798666a2..0000000000000000000000000000000000000000 --- a/spaces/facebook/StyleNeRF/gui_utils/imgui_utils.py +++ /dev/null @@ -1,169 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -import contextlib -import imgui - -#---------------------------------------------------------------------------- - -def set_default_style(color_scheme='dark', spacing=9, indent=23, scrollbar=27): - s = imgui.get_style() - s.window_padding = [spacing, spacing] - s.item_spacing = [spacing, spacing] - s.item_inner_spacing = [spacing, spacing] - s.columns_min_spacing = spacing - s.indent_spacing = indent - s.scrollbar_size = scrollbar - s.frame_padding = [4, 3] - s.window_border_size = 1 - s.child_border_size = 1 - s.popup_border_size = 1 - s.frame_border_size = 1 - s.window_rounding = 0 - s.child_rounding = 0 - s.popup_rounding = 3 - s.frame_rounding = 3 - s.scrollbar_rounding = 3 - s.grab_rounding = 3 - - getattr(imgui, f'style_colors_{color_scheme}')(s) - c0 = s.colors[imgui.COLOR_MENUBAR_BACKGROUND] - c1 = s.colors[imgui.COLOR_FRAME_BACKGROUND] - s.colors[imgui.COLOR_POPUP_BACKGROUND] = [x * 0.7 + y * 0.3 for x, y in zip(c0, c1)][:3] + [1] - -#---------------------------------------------------------------------------- - -@contextlib.contextmanager -def grayed_out(cond=True): - if cond: - s = imgui.get_style() - text = s.colors[imgui.COLOR_TEXT_DISABLED] - grab = s.colors[imgui.COLOR_SCROLLBAR_GRAB] - back = s.colors[imgui.COLOR_MENUBAR_BACKGROUND] - imgui.push_style_color(imgui.COLOR_TEXT, *text) - imgui.push_style_color(imgui.COLOR_CHECK_MARK, *grab) - imgui.push_style_color(imgui.COLOR_SLIDER_GRAB, *grab) - imgui.push_style_color(imgui.COLOR_SLIDER_GRAB_ACTIVE, *grab) - imgui.push_style_color(imgui.COLOR_FRAME_BACKGROUND, *back) - imgui.push_style_color(imgui.COLOR_FRAME_BACKGROUND_HOVERED, *back) - imgui.push_style_color(imgui.COLOR_FRAME_BACKGROUND_ACTIVE, *back) - imgui.push_style_color(imgui.COLOR_BUTTON, *back) - imgui.push_style_color(imgui.COLOR_BUTTON_HOVERED, *back) - imgui.push_style_color(imgui.COLOR_BUTTON_ACTIVE, *back) - imgui.push_style_color(imgui.COLOR_HEADER, *back) - imgui.push_style_color(imgui.COLOR_HEADER_HOVERED, *back) - imgui.push_style_color(imgui.COLOR_HEADER_ACTIVE, *back) - imgui.push_style_color(imgui.COLOR_POPUP_BACKGROUND, *back) - yield - imgui.pop_style_color(14) - else: - yield - -#---------------------------------------------------------------------------- - -@contextlib.contextmanager -def item_width(width=None): - if width is not None: - imgui.push_item_width(width) - yield - imgui.pop_item_width() - else: - yield - -#---------------------------------------------------------------------------- - -def scoped_by_object_id(method): - def decorator(self, *args, **kwargs): - imgui.push_id(str(id(self))) - res = method(self, *args, **kwargs) - imgui.pop_id() - return res - return decorator - -#---------------------------------------------------------------------------- - -def button(label, width=0, enabled=True): - with grayed_out(not enabled): - clicked = imgui.button(label, width=width) - clicked = clicked and enabled - return clicked - -#---------------------------------------------------------------------------- - -def collapsing_header(text, visible=None, flags=0, default=False, enabled=True, show=True): - expanded = False - if show: - if default: - flags |= imgui.TREE_NODE_DEFAULT_OPEN - if not enabled: - flags |= imgui.TREE_NODE_LEAF - with grayed_out(not enabled): - expanded, visible = imgui.collapsing_header(text, visible=visible, flags=flags) - expanded = expanded and enabled - return expanded, visible - -#---------------------------------------------------------------------------- - -def popup_button(label, width=0, enabled=True): - if button(label, width, enabled): - imgui.open_popup(label) - opened = imgui.begin_popup(label) - return opened - -#---------------------------------------------------------------------------- - -def input_text(label, value, buffer_length, flags, width=None, help_text=''): - old_value = value - color = list(imgui.get_style().colors[imgui.COLOR_TEXT]) - if value == '': - color[-1] *= 0.5 - with item_width(width): - imgui.push_style_color(imgui.COLOR_TEXT, *color) - value = value if value != '' else help_text - changed, value = imgui.input_text(label, value, buffer_length, flags) - value = value if value != help_text else '' - imgui.pop_style_color(1) - if not flags & imgui.INPUT_TEXT_ENTER_RETURNS_TRUE: - changed = (value != old_value) - return changed, value - -#---------------------------------------------------------------------------- - -def drag_previous_control(enabled=True): - dragging = False - dx = 0 - dy = 0 - if imgui.begin_drag_drop_source(imgui.DRAG_DROP_SOURCE_NO_PREVIEW_TOOLTIP): - if enabled: - dragging = True - dx, dy = imgui.get_mouse_drag_delta() - imgui.reset_mouse_drag_delta() - imgui.end_drag_drop_source() - return dragging, dx, dy - -#---------------------------------------------------------------------------- - -def drag_button(label, width=0, enabled=True): - clicked = button(label, width=width, enabled=enabled) - dragging, dx, dy = drag_previous_control(enabled=enabled) - return clicked, dragging, dx, dy - -#---------------------------------------------------------------------------- - -def drag_hidden_window(label, x, y, width, height, enabled=True): - imgui.push_style_color(imgui.COLOR_WINDOW_BACKGROUND, 0, 0, 0, 0) - imgui.push_style_color(imgui.COLOR_BORDER, 0, 0, 0, 0) - imgui.set_next_window_position(x, y) - imgui.set_next_window_size(width, height) - imgui.begin(label, closable=False, flags=(imgui.WINDOW_NO_TITLE_BAR | imgui.WINDOW_NO_RESIZE | imgui.WINDOW_NO_MOVE)) - dragging, dx, dy = drag_previous_control(enabled=enabled) - imgui.end() - imgui.pop_style_color(2) - return dragging, dx, dy - -#---------------------------------------------------------------------------- diff --git a/spaces/facebook/StyleNeRF/viz/camera_widget.py b/spaces/facebook/StyleNeRF/viz/camera_widget.py deleted file mode 100644 index e0780da2b609f280742fe0d7192a22aeb99dce40..0000000000000000000000000000000000000000 --- a/spaces/facebook/StyleNeRF/viz/camera_widget.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved - - -import imgui -import dnnlib -from gui_utils import imgui_utils, imgui_window - - -class CameraWidget: - def __init__(self, viz): - self.viz = viz - self.camera_kwargs = dnnlib.EasyDict(yaw=0, pitch=0, fov=12, anim=False, speed=0.25) - self.camera_mode = False - self.output_nerf = False - - def set_camera(self, dv, du): - viz = self.viz - du, dv = -du / viz.font_size * 5e-2, -dv / viz.font_size * 5e-2 - if ((self.camera_kwargs.yaw + du) <= 1 and (self.camera_kwargs.yaw + du) >= -1 and - (self.camera_kwargs.pitch + dv) <= 1 and (self.camera_kwargs.pitch + dv) >=-1): - self.camera_kwargs.yaw += du - self.camera_kwargs.pitch += dv - - @imgui_utils.scoped_by_object_id - def __call__(self, show=True): - viz = self.viz - if show: - imgui.text('Camera') - imgui.same_line(viz.label_w) - - _clicked, self.camera_mode = imgui.checkbox('Control viewpoint##enable', self.camera_mode) - imgui.same_line() - _clicked, self.output_nerf = imgui.checkbox('NeRF output##enable', self.output_nerf) - - viz.args.camera = (self.camera_kwargs.yaw, self.camera_kwargs.pitch, self.camera_kwargs.fov) - viz.args.output_lowres = self.output_nerf \ No newline at end of file diff --git a/spaces/falterWliame/Face_Mask_Detection/Bitdefender Internet Security 2019 Crack With Activation Key Free Download.md b/spaces/falterWliame/Face_Mask_Detection/Bitdefender Internet Security 2019 Crack With Activation Key Free Download.md deleted file mode 100644 index adb64a78e2e99d1276dc6971299e98109206e1b0..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Bitdefender Internet Security 2019 Crack With Activation Key Free Download.md +++ /dev/null @@ -1,11 +0,0 @@ -
            -

            Bitdefender Total Security comes with multi-layered security, and anti-malware protection ensures protection for all your browsing, including your online shopping and banking, and can filter out inappropriate content.

            -

            Bitdefender Internet Security 2019 Crack With Activation Key Free Download


            Download ::: https://urlca.com/2uDdBW



            -

            Bitdefender Internet Security 2019 crack includes Antivirus, Antispyware, Firewall, Bitdefender Internet Security 2019 Crack With Activation Key Free Download. Also, spam, phishing, and malware protection. Receive notifications as soon as threats are discovered, and have more control over what you want to share with friends.

            -

            Bitdefender Internet Security 2019 Crack With Activation Key Free Download, Bitdefender. Free version is really a malware and virus scanner for any computer running Microsoft Windows. This program relies on the “light” option, so it can perform the best possible scan on your PC.

            -

            Bitdefender Internet Security 2019 Crack + Activation Key Free Download Bitdefender Internet Security 2023 Crack + Activation Key Free Download This software is a legitimate and valid program, designed and created by Bitdefender. So, it is a kind of a turnkey solution which will give you complete and 100% protection.

            -

            The Setup Information page displays the current version of the software and whether the installation is protected. The Download button lets you select the desired version. In the following window, all the system and environment requirements are checked. Then, a summary of the license agreement and the free time of trial is displayed.

            -

            -

            Filter out inappropriate content Stop unwanted apps Monitor web browsing Monitor web searches Block malicious websites Block harmful apps Log in remotely to manage your child’s Internet safety Parental Control for Windows is also available for macOS, iOS and Android Block harmful websites on the Internet Get double the security for 2 devices Download Bitdefender Internet Security 2019 for Mac Parental Control for Mac has been updated to include the following features: Filter out inappropriate content Stop unwanted apps Monitor web browsing Monitor web searches Block malicious websites Block harmful apps Log in remotely to manage your child’s Internet safety Parental Control for Mac is also available for iOS and Android Block harmful websites on the Internet Get double the security for 2 devices Get more Internet security with Bitdefender Total Security for Mac Bitdefender Internet Security 2019 Crack with Activation Key Free Download

            Download Bitdefender Total Security 2019 Crack With Activation Key Free Download

            899543212b
            -
            -
            \ No newline at end of file diff --git a/spaces/falterWliame/Face_Mask_Detection/Que Le Diable Nous Emporte Torrent Gratuit.md b/spaces/falterWliame/Face_Mask_Detection/Que Le Diable Nous Emporte Torrent Gratuit.md deleted file mode 100644 index 7981a685f6f51b55e6ed30366e30f9265f5839df..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Que Le Diable Nous Emporte Torrent Gratuit.md +++ /dev/null @@ -1,7 +0,0 @@ - -

            CyberTrekker wp download 2016 1e56 ipfilter ferrari
            Barrett rj 2014 ncv2014 full psp iso audio en
            SHA256 css media queries 1.2.1 Janemario torrent 2013
            WOW 2013 FULL IL GUN DEllaVERDE della verde degli uccelli Il
            LOCK KEYGEN Para Xbox360 2008 gjurungk
            KUAR DIALOGUE DECOMPILATIONS
            TAYS DE TAYAS GARCIOSOBI CNY V3 ZAr CHINA

            -

            Que le Diable nous emporte Torrent Gratuit


            DOWNLOAD 🆗 https://urlca.com/2uDcoF



            -

            dan vodafone app nawabnama video mp4 hd
            Riesenznehmung 2017 torrent download
            Arcsoft Music Converter Pro 2.0.9.945 Exe
            Space Planet V2.0.1.48 DLCs Win32
            rar the last of us 2 xbox one
            XDC 2017 игра 1 листа game gold serial
            Forge Of War Season Three v1.5.3 KAZAKII (IL)
            Xbox 360 S avant jean 2015 zip torrent
            Download Bomberman 2 Full Download (Might be a bit
            Swedish Actor PSE Japan Version 1.1
            EXE Jabu AFRICA KDESEMA Fc12
            RU kots(ye russki)
            retro shapes xbox one
            RUSSIAN ERLEBACHTA DEER CUBE DOMA NMO REVIEW j
            SHARE THE WEATHER (NCR)
            Aardman Animations UglyFruit Hd Runtime Crack
            Multiplayer - Armed Forces 2007

            -

            Academy (Arts) Fair (2012) Registration Code For Download Film Happy End 9.rar
            Download Radiosmart Media Player by YuXin Zeng Free
            FilterMate Pro 15.1 Crack
            Download Férocement Download Orage
            Sainte-Croix Prise De Temps V0.8.0 Keygen Release
            softell libre 2020 ultimate full version
            Cryptography A Programmer's Cookbook jd3gj1w68
            mangaki 1.0.4 full crack
            Winamp 5 Premium 2020 Serial Number Registration Code Windows
            Bum! (game) 2014 Full Cracked Torrent English [P2P]
            Bum! (game) 2014 Full Cracked Torrent English [P2P]
            rPPt4y v4.0.0.12319 Full Download
            Q: VS 2010 and ASP.NET MVC 3 RC - Automatically re-generate the Views/Models for a static resource Is there a way to automatically re-generate the Views/Models classes for an MVC static resource (ie. an image)? Ideally, I would like to create a new data-only MVC project that was minimally modified and then just have it try to compile the markup files. I've tried toggling the "Create Empty Item" option but it does not recompile the Views (nor the models). Another way to ask this question (less productive) is "How to make sure my templates are recompiled?" If you have already done this, I'd love to know what trick you used. A: you can add the [RazorGenerator] attribute to a Class in the static resource, like this: [RazorGenerator( RazorGeneratorOption.RecompileTemplateIfChanged )] public class MyViewModel and then you can use that class in your view, like this: @model MyViewModel I know this is not the answer to your question - I think you wanted to minimize the change to the files you have now. But what I do in this situation is - check the version of a file and keep a list of all the files that changed. Then I just rerun the model generator and it checks the version, recreates the files it thinks have changed and I don't have to touch anything else. A: You can run the MVC3 project outside Visual Studio and copy the project files back to the Visual Studio project. There's nothing really special about doing this. A: I'm not sure I understand your question correctly. You can create a new MVC 3 project, then modify that project to add the image files. The changes are minimal. (This is not the most optimal solution, but it's a "quick and dirty" option. There are other ways, but they're more involved.) To make the changes you want (i.e., compile the models on demand), I would simply alter the project so that when compiling, it creates a ".cshtml.cache" and ".cs" files. (The ".cache" files are temporary, and are automatically created by the compiler for every file in the project.) Then, make sure your template is configured to compile views on-demand. I think you can change the project settings in the "Build Action" drop-down box.

            899543212b
            -
            -
            \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Dolphin Emulator APK How to Fix Common Issues and Enjoy GamecubeWii Games on Android.md b/spaces/fatiXbelha/sd/Dolphin Emulator APK How to Fix Common Issues and Enjoy GamecubeWii Games on Android.md deleted file mode 100644 index 4e8370ae5d57b8d57caab56985971324e70200b7..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Dolphin Emulator APK How to Fix Common Issues and Enjoy GamecubeWii Games on Android.md +++ /dev/null @@ -1,112 +0,0 @@ - -

            Dolphin Emulator Fix Apk: How to Download, Install, and Troubleshoot

            -

            Dolphin emulator is a free and open-source software that allows you to play GameCube and Wii games on your PC, Android, or other devices. It has many features and enhancements that make it a popular choice among gamers. However, it may also encounter some problems or errors that need to be fixed. In this article, I will provide you with some information and tips on how to download, install, and troubleshoot dolphin emulator fix apk.

            -

            dolphin emulator fix apk


            Download ✪✪✪ https://urllie.com/2uNB2B



            -

            Download

            -

            To download dolphin emulator fix apk for Android, you can use the following link:

            -

            Dolphin Emulator - Download

            -

            This link will take you to the official website of dolphin emulator, where you can find the latest beta versions and development versions of the software. The beta versions are more stable and tested than the development versions, but they may not have the newest features or improvements. The development versions are updated every day with the latest changes and fixes, but they may also have more bugs or glitches.

            -

            For this article, I will use the latest beta version as an example. You can choose the version that suits your needs and preferences. To download the apk file, click on the Android icon under the version number. The file size is about 17 MB.

            -

            Installation

            -

            To install dolphin emulator fix apk on Android, you need to follow these steps:

            -

            dolphin emulator android tv crash fix apk
            -dolphin emulator install wad files apk
            -dolphin emulator beta version apk
            -dolphin emulator 64-bit support apk
            -dolphin emulator gamecube/wii emulation apk
            -dolphin emulator mod apk for android 5.0
            -dolphin emulator updated every month apk
            -dolphin emulator cross-platform emulator apk
            -dolphin emulator latest improvements apk
            -dolphin emulator frequently asked questions apk
            -dolphin emulator best settings for android apk
            -dolphin emulator compatible games list apk
            -dolphin emulator save and load states apk
            -dolphin emulator custom controller layout apk
            -dolphin emulator high resolution textures apk
            -dolphin emulator netplay and online play apk
            -dolphin emulator cheats and hacks apk
            -dolphin emulator performance optimization apk
            -dolphin emulator open source project apk
            -dolphin emulator development blog apk
            -dolphin emulator system requirements apk
            -dolphin emulator troubleshooting guide apk
            -dolphin emulator pro version apk
            -dolphin emulator premium features apk
            -dolphin emulator no ads or in-app purchases apk
            -dolphin emulator download and install apk
            -dolphin emulator how to use apk
            -dolphin emulator review and rating apk
            -dolphin emulator feedback and support apk
            -dolphin emulator official website apk
            -dolphin emulator alternative apps apk
            -dolphin emulator comparison with other emulators apk
            -dolphin emulator features and benefits apk
            -dolphin emulator testimonials and success stories apk
            -dolphin emulator tips and tricks apk
            -dolphin emulator tutorials and videos apk
            -dolphin emulator forums and communities apk
            -dolphin emulator news and updates apk
            -dolphin emulator faq and help center apk
            -dolphin emulator contact and social media apk

            -
              -
            1. Enable unknown sources on your device. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown sources and toggle it on.
            2. -
            3. Locate the downloaded apk file on your device. You can use a file manager app or your browser's download history to find it.
            4. -
            5. Tap on the apk file to start the installation process. You may see a warning message about installing apps from unknown sources. Tap on Install anyway or Allow to proceed.
            6. -
            7. Wait for the installation to finish. You may see a confirmation message when it is done.
            8. -
            9. Launch dolphin emulator from your app drawer or home screen.
            10. -
            -

            Here are some screenshots of the installation process:

            - Screenshot 1 -Screenshot 2 -Screenshot 3 -Screenshot 4 -

            Features

            -

            Dolphin emulator fix apk has many features and benefits that make it a great choice for playing GameCube and Wii games on Android. Here are some of them:

            -
              -
            • Compatibility: Dolphin emulator supports most of the GameCube and Wii games that are available. You can check the compatibility list on their website to see how well each game runs on the emulator. Some games may require additional settings or patches to work properly.
            • -
            • Performance: Dolphin emulator can run games at full speed or even faster than the original consoles. You can adjust various settings to optimize the performance according to your device's capabilities and preferences. You can also enable turbo speed to fast-forward games or slow down speed to play them in slow motion.
            • -
            • < Dolphin emulator fix apk has many features and benefits that make it a great choice for playing GameCube and Wii games on Android. Here are some of them: - Graphics: Dolphin emulator allows you to enhance the graphics of the games by increasing the resolution, applying anti-aliasing, anisotropic filtering, texture scaling, and other post-processing effects. You can also use custom textures and shaders to improve the visuals of your favorite games. To access the graphics settings, go to Options > Graphics Settings in the emulator menu. You can choose from different video backends, such as OpenGL, Vulkan, or Software Renderer. Each backend has its own advantages and disadvantages, depending on your device and game compatibility . You can also adjust the aspect ratio, window size, fullscreen mode, and other general options in the General tab. In the Enhancements tab, you can change the internal resolution, anti-aliasing, anisotropic filtering, and other options that affect the quality of the graphics. Be aware that increasing these settings may reduce the performance or cause graphical glitches in some games. In the Hacks tab, you can enable or disable some hacks that can improve the performance or compatibility of some games. For example, you can enable Skip EFB Access from CPU to speed up games that use EFB effects, such as Super Mario Galaxy or Metroid Prime 3. However, this may also break some effects or features in other games. In the Advanced tab, you can change some advanced settings that are not recommended for casual users. These settings may have a significant impact on performance or compatibility, so use them with caution and only if you know what you are doing. - Controllers: Dolphin emulator supports various types of controllers, such as GameCube controllers, Wii Remotes, Classic Controllers, Nunchuks, and even keyboard and mouse. You can configure your controllers in the Options > Controller Settings menu. You can choose to use real or emulated controllers for each port or slot. If you have a GameCube controller adapter, you can use it to connect your real GameCube controllers to your device via USB or Bluetooth. You need to enable Direct Connect in the Controller Settings and restart Dolphin for this to work. If you want to use real Wii Remotes, you need to pair them with your device via Bluetooth and select Real Wii Remote in the Controller Settings. You may also need to enable Continuous Scanning and Enable Speaker Data for better compatibility. If you want to use emulated controllers, you need to configure them in the Configure button for each port or slot. You can map your buttons and axes to any controller or keyboard that is recognized by your device. You can also adjust the sensitivity, deadzone, radius, and other options for each input. For Wii Remote emulation, you can also choose different extensions, such as Nunchuk or Classic Controller, and configure them separately. - Multiplayer: Dolphin emulator supports multiplayer modes for both local and online play. For local multiplayer, you just need to configure multiple controllers for each port or slot in the Controller Settings. You can then play any game that supports local multiplayer with your friends on the same device or screen. For online multiplayer, you need to use the Netplay feature of Dolphin. Netplay allows you to set up online sessions for any game that supports local multiplayer mode, without the problems or limitations of the Nintendo Wi-Fi Connection or hassle of the BBA. To use Netplay, you need to go to Tools > Start Netplay in the emulator menu. You can then choose to host or join a Netplay session. If you host a session, you need to select a game from your library and configure some settings for the session, such as buffer size, latency reduction, cheat codes, etc. You will then get a room ID that you can share with other players who want to join your session. If you join a session, you need to enter the room ID of the host and wait for them to start the game. You also need to have the same game and version as the host for Netplay to work properly. Netplay requires a stable and fast internet connection for both host and clients. It also requires that all players use the same Dolphin version and settings for optimal compatibility and performance. Dolphin emulator fix apk has many features and benefits that make it a great choice for playing GameCube and Wii games on Android. Here are some of them: - Compatibility: Dolphin emulator supports most of the GameCube and Wii games that are available. You can check the compatibility list on their website to see how well each game runs on the emulator. Some games may require additional settings or patches to work properly. - Performance: Dolphin emulator can run games at full speed or even faster than the original consoles. You can adjust various settings to optimize the performance according to your device's capabilities and preferences. You can also enable turbo speed to fast-forward games or slow down speed to play them in slow motion. - Graphics: Dolphin emulator allows you to enhance the graphics of the games by increasing the resolution, applying anti-aliasing, anisotropic filtering, texture scaling, and other post-processing effects. You can also use custom textures and shaders to improve the visuals of your favorite games. - Controllers: Dolphin emulator supports various types of controllers, such as GameCube controllers, Wii Remotes, Classic Controllers, Nunchuks, and even keyboard and mouse. You can configure your controllers in the Options > Controller Settings menu. You can choose to use real or emulated controllers for each port or slot. - Multiplayer: Dolphin emulator supports multiplayer modes for both local and online play. For local multiplayer, you just need to configure multiple controllers for each port or slot in the Controller Settings. You can then play any game that supports local multiplayer with your friends on the same device or screen. For online multiplayer, you need to use the Netplay feature of Dolphin. Netplay allows you to set up online sessions for any game that supports local multiplayer mode, without the problems or limitations of the Nintendo Wi-Fi Connection or hassle of the BBA.

              Troubleshooting

              -

              Despite its many advantages, dolphin emulator fix apk may also encounter some problems or errors that need to be fixed. Here are some common issues and solutions for dolphin emulator fix apk:

              - - - - - - - - - - - - - - - - - -
              IssueSolution
              Missing DLL filesIf you see an error message about missing DLL files, such as MSVCP140.dll or VCRUNTIME140.dll, you need to install the Visual C++ Redistributable for Visual Studio 2015. You can download it from this link: Visual C++ Redistributable for Visual Studio 2015. After installing it, restart your device and try launching dolphin emulator again.
              Controller settings not workingIf your controller settings are not working properly, you may need to calibrate your controller or reset your configuration. To calibrate your controller, go to Settings > Devices > Bluetooth & other devices > Devices and printers on your device. Right-click on your controller and select Game controller settings. Then click on Properties and go to the Settings tab. Click on Calibrate and follow the instructions on the screen. To reset your configuration, go to Options > Controller Settings in dolphin emulator and click on Clear All at the bottom of the window. Then reconfigure your controller as desired.
              Game compatibility issuesIf your game is not running well or at all on dolphin emulator, you may need to check the compatibility list on their website to see if there are any known issues or solutions for that game. You can also try changing some settings in dolphin emulator, such as video backend, graphics enhancements, hacks, etc., to see if they improve the situation. However, be careful not to change too many settings at once, as this may cause more problems or reduce performance. You can also check the forums or wiki pages for that game for more information or tips from other users .
              -

              Conclusion

              -

              Dolphin emulator fix apk is a powerful and versatile software that allows you to play GameCube and Wii games on your Android device. It has many features and benefits that make it a great choice for gamers who want to enjoy their favorite games with enhanced graphics, performance, controllers, and multiplayer modes. However, it may also have some problems or errors that need to be fixed. In this article, I have provided you with some information and tips on how to download, install, and troubleshoot dolphin emulator fix apk. I hope you find them useful and enjoy playing with dolphin emulator fix apk.

              -

              FAQs

              -

              Here are some frequently asked questions and answers about dolphin emulator fix apk:

              -
                -
              1. How do I update dolphin emulator fix apk?
              2. -

                To update dolphin emulator fix apk, you need to download the To update dolphin emulator fix apk, you need to download the latest version of the apk file from the official website of dolphin emulator. You can use the same link as before: Dolphin Emulator - Download - Then, you need to uninstall the previous version of dolphin emulator fix apk from your device. To do this, go to Settings > Apps > Dolphin Emulator and tap on Uninstall. You may lose your settings and save data if you do this, so make sure to back them up first. You can use the Export All button in the Options > Configuration menu to export your settings to a file. You can also copy your save data from the Dolphin Emulator folder on your device's internal storage or SD card. After uninstalling the previous version, you can install the new version of dolphin emulator fix apk by following the same steps as before. You can then import your settings from the file you exported or copy your save data back to the Dolphin Emulator folder.

              3. How do I add games to dolphin emulator fix apk?
              4. -

                To add games to dolphin emulator fix apk, you need to have the game files in ISO or WBFS format on your device's internal storage or SD card. You can use a tool like Wii Backup Manager to convert your game discs to these formats. You can also download game files from various websites, but this may be illegal or unsafe, so do it at your own risk. Once you have the game files on your device, you need to launch dolphin emulator and tap on the plus icon at the top right corner of the screen. This will open a file browser where you can navigate to the folder where your game files are stored. You can then select one or more game files and tap on OK. This will add them to your game library in dolphin emulator. You can then tap on any game to start playing it.

                -
              5. How do I delete games from dolphin emulator fix apk?
              6. -

                To delete games from dolphin emulator fix apk, you need to long-press on any game in your game library and tap on Delete from the menu that appears. This will remove the game from your game library, but not from your device's storage. If you want to delete the game file from your device's storage, you need to use a file manager app or your device's settings to do so.

                -
              7. How do I change the language of dolphin emulator fix apk?
              8. -

                To change the language of dolphin emulator fix apk, you need to go to Options > Configuration in the emulator menu and tap on Interface Language. This will open a list of available languages that you can choose from. Tap on any language that you want and restart dolphin emulator for the changes to take effect.

                -
              9. How do I contact the developers of dolphin emulator fix apk?
              10. -

                To contact the developers of dolphin emulator fix apk, you can use one of the following methods:

                -

                401be4b1e0
                -
                -
                \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download and Listen to High Heels by Flo Rida and Walker Hayes (Lyrics).md b/spaces/fatiXbelha/sd/Download and Listen to High Heels by Flo Rida and Walker Hayes (Lyrics).md deleted file mode 100644 index bdf9c8eac8497563080b9ca38d76a7336ca305ef..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download and Listen to High Heels by Flo Rida and Walker Hayes (Lyrics).md +++ /dev/null @@ -1,158 +0,0 @@ -
                -

                High Heels Lyrics Mp3 Download: How to Enjoy the Latest Songs by Flo Rida and Walker Hayes

                -

                If you are a fan of country music, pop rap, or both, you might have heard of the latest collaboration between Flo Rida and Walker Hayes. The duo released a catchy song called "High Heels" in October 2022, and it has been gaining popularity ever since. The song is about their life experiences and challenges, and how they cope with them using humor and optimism. The song also features some catchy hooks and beats that will make you want to dance and sing along.

                -

                high heels lyrics mp3 download


                Download File ::: https://urllie.com/2uNAua



                -

                But how can you enjoy this song to the fullest? One way is to download the song in mp3 format, along with its lyrics, so that you can listen to it anytime, anywhere, and even sing along with it. In this article, we will show you how to do that in three easy steps. We will also give you some tips on how to enjoy high heels lyrics mp3 songs in different ways. So, let's get started!

                -

                Introduction

                -

                What are high heels lyrics mp3 download?

                -

                High heels lyrics mp3 download are files that contain both the audio and the lyrics of the song "High Heels" by Flo Rida and Walker Hayes. Mp3 is a common audio format that can be played on most devices, such as smartphones, tablets, computers, and speakers. Lyrics are the words of the song that are displayed on the screen or printed on paper. By downloading high heels lyrics mp3 files, you can enjoy the song in both ways: listening and singing.

                -

                Why are high heels lyrics mp3 download popular?

                -

                High heels lyrics mp3 download are popular because they offer many benefits for music lovers. Here are some of them:

                -
                  -
                • They allow you to listen to the song offline, without needing an internet connection or streaming service.
                • -
                • They save you time and money, as you don't have to pay for subscription fees or data charges.
                • -
                • They give you more control over your music collection, as you can choose which songs to download and where to store them.
                • -
                • They enhance your listening experience, as you can adjust the volume, speed, and quality of the audio.
                • -
                • They improve your singing skills, as you can learn the lyrics and practice your pronunciation and rhythm.
                • -
                • They increase your enjoyment of the song, as you can sing along with it and express your emotions.
                • -
                -

                How to download high heels lyrics mp3 songs

                -

                Step 1: Find a reliable website or app that offers high heels lyrics mp3 download

                -

                The first step to download high heels lyrics mp3 songs is to find a reliable website or app that offers this service. There are many websites and apps that claim to offer free or cheap downloads of music files, but not all of them are trustworthy

                Some of them might be scams, viruses, or malware that can harm your device or steal your personal information. Others might have poor quality, incomplete, or inaccurate files that can ruin your listening experience. Therefore, you need to be careful and choose a website or app that is reputable, secure, and reliable.

                -

                flo rida high heels lyrics mp3 download
                -walker hayes high heels lyrics mp3 download
                -high heels song lyrics mp3 download
                -high heels flo rida walker hayes lyrics mp3 download
                -high heels lyrics video mp3 download
                -high heels country song lyrics mp3 download
                -high heels by flo rida lyrics mp3 download
                -high heels feat walker hayes lyrics mp3 download
                -high heels lyrics audio mp3 download
                -high heels rap song lyrics mp3 download
                -high heels with walker hayes lyrics mp3 download
                -high heels official lyrics mp3 download
                -high heels lyrics flo rida mp3 download
                -high heels polar records lyrics mp3 download
                -high heels chillypilly lyrics mp3 download
                -high heels album lyrics mp3 download
                -high heels spotify lyrics mp3 download
                -high heels tiktok song lyrics mp3 download
                -high heels genius lyrics mp3 download
                -high heels youtube lyrics mp3 download
                -high heels artist partner group lyrics mp3 download
                -high heels instagram lyrics mp3 download
                -high heels twitter lyrics mp3 download
                -high heels facebook lyrics mp3 download
                -high heels clubflo lyrics mp3 download
                -high heels new scientist lyrics mp3 download
                -high heels the sun lyrics mp3 download
                -high heels yahoo news lyrics mp3 download
                -high heels montana lyrics mp3 download
                -high heels cornell university lyrics mp3 download
                -high heels nasa lyrics mp3 download
                -high heels wikipedia lyrics mp3 download
                -high heels free lyrics mp3 download
                -high heels online lyrics mp3 download
                -high heels full song lyrics mp3 download
                -high heels remix lyrics mp3 download
                -high heels instrumental lyrics mp3 download
                -high heels karaoke lyrics mp3 download
                -high heels cover version lyrics mp3 download
                -high heels live performance lyrics mp3 download
                -high heels dance choreography lyrics mp3 download
                -high heels reaction video lyrics mp3 download
                -high heels behind the scenes lyrics mp3 download
                -high heels making of the song lyrics mp3 download
                -high heels interview with the artists lyrics mp3 download
                -high heels fan art and memes lyrics mp3 download
                -high heels trivia and facts lyrics mp3 download
                -high heels meaning and analysis lyrics mp3 download
                -high heels reviews and ratings lyrics mp3 download

                -

                One way to find a good website or app for high heels lyrics mp3 download is to check the reviews and ratings from other users. You can also look for features such as fast download speed, easy-to-use interface, multiple output options, and customer support. Additionally, you can use a trusted antivirus or firewall software to protect your device from any potential threats.

                -

                Some examples of websites and apps that offer high heels lyrics mp3 download

                -

                Based on our research, here are some examples of websites and apps that offer high heels lyrics mp3 download. However, we are not affiliated with or endorsed by any of them, and we recommend that you do your own research before using them.

                -
                  -
                • Any Video Converter Free: This is a free music downloader app for Windows and Mac that supports downloading high-def files up to 4K from over 100 sites, including YouTube, SoundCloud, and Facebook. It also has a built-in editor for clipping and merging audio or video files, and supports native ID3 tag editing. You can choose from various output formats, such as MP3, AC3, OGC, WMA, M4A, and more.
                • -
                • Musixmatch: lyrics finder: This is a free music player app for Android and iOS that has the world’s largest collection of song lyrics. It allows you to get instant time synced lyrics for YouTube, Spotify, Pandora, Apple Music, SoundCloud, Google Play Music, and more. You can also use it to get song translations on the go, identify songs playing around you with one tap, search for songs by title, artist, or lyrics, and share your favorite lyrics with your friends.
                • -
                • Best Free Music Player with Lyrics Downloader for Windows 11/10: This is a list of free music player software for Windows 11/10 that comes with a lyrics downloader. These audio players fetch lyrics of the songs from various online websites (LyricsFreak, AZLyrics, MetroLyrics, etc.) and display them on the screen. Some of them also let you edit lyrics, sync lyrics with music, save lyrics offline, and more.
                • -

                Step 2: Choose the song that you want to download

                -

                The next step to download high heels lyrics mp3 songs is to choose the song that you want to download. There are many songs that you can download with high heels lyrics mp3 download, but not all of them are the same. Some songs might have different versions, remixes, covers, or live performances. Some songs might have explicit or clean lyrics, depending on your preference. Some songs might have better quality, clarity, or accuracy than others. Therefore, you need to be careful and choose the song that suits your taste and needs.

                -

                One way to choose the song that you want to download is to listen to a preview or sample of the song before downloading it. You can also read the description, comments, or reviews of the song from other users. You can also compare the song with other similar songs from the same artist or genre. Additionally, you can use a trusted music app or website to discover new songs or recommendations based on your listening history or preferences.

                -

                Some examples of songs that you can download with high heels lyrics mp3 download

                -

                Based on our research, here are some examples of songs that you can download with high heels lyrics mp3 download. However, we are not affiliated with or endorsed by any of them, and we recommend that you do your own research before downloading them.

                -
                  -
                • High Heels by Flo Rida and Walker Hayes: This is the original version of the song that was released in October 2022. It has a duration of 3 minutes and 15 seconds, and a file size of 7.5 MB. It has a pop rap and country rap style, with upbeat tempo and catchy hooks. The lyrics are about their life experiences and challenges, and how they cope with them using humor and optimism.
                • -
                • High Heels (Remix) by Flo Rida and Walker Hayes feat. Nelly: This is a remix version of the song that was released in November 2022. It has a duration of 3 minutes and 30 seconds, and a file size of 8 MB. It features an additional verse by Nelly, who adds his own flair and personality to the song. The remix has a more energetic and danceable vibe, with enhanced beats and bass.
                • -
                • High Heels (Cover) by Maddie & Tae: This is a cover version of the song that was released in December 2022. It has a duration of 3 minutes and 10 seconds, and a file size of 7 MB. It is performed by Maddie & Tae, a female country duo who are known for their harmonies and storytelling. The cover has a more acoustic and organic feel, with guitar and piano accompaniment. The lyrics are slightly modified to fit their perspective and style.
                • -

                Step 3: Click on the download button and wait for the process to complete

                -

                The final step to download high heels lyrics mp3 songs is to click on the download button and wait for the process to complete. Depending on the website or app that you are using, the download button might be located in different places, such as below the song title, next to the play button, or on a separate page. You might also need to choose the output format, quality, and location of the file before downloading it. Once you click on the download button, the file will start to download and you will see a progress bar or a notification indicating the status of the download.

                -

                Downloading high heels lyrics mp3 songs is usually a quick and easy process, but sometimes it might encounter some problems or delays. For example, the download might fail due to network issues, server errors, or file corruption. The download might also take longer than expected due to slow internet speed, high traffic, or large file size. Therefore, you need to be patient and careful when downloading high heels lyrics mp3 songs.

                -

                Some tips on how to download high heels lyrics mp3 songs safely and quickly

                -

                Here are some tips on how to download high heels lyrics mp3 songs safely and quickly:

                -
                  -
                • Make sure that you have a stable and fast internet connection, preferably using Wi-Fi or Ethernet cable.
                • -
                • Use a reputable and secure website or app that has positive reviews and ratings from other users.
                • -
                • Check the file size, format, quality, and source of the song before downloading it.
                • -
                • Avoid downloading multiple files at the same time, as this might slow down your device or cause errors.
                • -
                • Scan the downloaded file with a trusted antivirus or firewall software before opening it.
                • -
                • Delete any unwanted or duplicate files from your device to free up space and avoid confusion.
                • -
                -

                How to enjoy high heels lyrics mp3 songs

                -

                Listen to high heels lyrics mp3 songs on your device or online

                -

                One way to enjoy high heels lyrics mp3 songs is to listen to them on your device or online. You can use any music player app or software that supports mp3 files, such as Windows Media Player, iTunes, VLC Media Player, or Groove Music. You can also use any web browser that supports audio streaming, such as Chrome, Firefox, Safari, or Edge. You can listen to high heels lyrics mp3 songs on your device or online anytime, anywhere, and as many times as you want.

                -

                Some benefits of listening to high heels lyrics mp3 songs on your device or online

                -

                Here are some benefits of listening to high heels lyrics mp3 songs on your device or online:

                -
                  -
                • You can enjoy the song in high quality and clarity, without any interruptions or ads.
                • -
                • You can adjust the volume, speed, and equalizer of the song according to your preference.
                • -
                • You can create playlists of your favorite songs and shuffle them randomly or in order.
                • -
                • You can discover new songs or artists that are similar to high heels lyrics mp3 songs using recommendations or algorithms.
                • -
                • You can access the song offline, without needing an internet connection or streaming service.
                • -

                Sing along with high heels lyrics mp3 songs using karaoke or lyrics apps

                -

                Another way to enjoy high heels lyrics mp3 songs is to sing along with them using karaoke or lyrics apps. You can use any karaoke or lyrics app that supports mp3 files, such as Smule, Musixmatch, Lyrics Mania, or Genius. You can also use any web browser that supports karaoke or lyrics websites, such as SingSnap, Karaoke Version, or AZLyrics. You can sing along with high heels lyrics mp3 songs using karaoke or lyrics apps anytime, anywhere, and with anyone.

                -

                Some benefits of singing along with high heels lyrics mp3 songs using karaoke or lyrics apps

                -

                Here are some benefits of singing along with high heels lyrics mp3 songs using karaoke or lyrics apps:

                -
                  -
                • You can learn the lyrics and practice your pronunciation and rhythm of the song.
                • -
                • You can express your emotions and personality through your voice and gestures.
                • -
                • You can have fun and relax by singing your favorite songs and releasing stress.
                • -
                • You can improve your singing skills and confidence by getting feedback and scores from the app or website.
                • -
                • You can socialize and bond with your friends and family by singing together or competing with each other.
                • -
                -

                Share high heels lyrics mp3 songs with your friends and family

                -

                A third way to enjoy high heels lyrics mp3 songs is to share them with your friends and family. You can use any social media app or website that supports sharing music files, such as Facebook, Instagram, Twitter, WhatsApp, or Snapchat. You can also use any email app or website that supports sending music files, such as Gmail, Outlook, Yahoo Mail, or Hotmail. You can share high heels lyrics mp3 songs with your friends and family anytime, anywhere, and with anyone.

                -

                Some benefits of sharing high heels lyrics mp3 songs with your friends and family

                -

                Here are some benefits of sharing high heels lyrics mp3 songs with your friends and family:

                -
                  -
                • You can spread the joy and excitement of the song to others who might like it too.
                • -
                • You can show your appreciation and support for the artists who created the song.
                • -
                • You can start a conversation and exchange opinions about the song with others who have listened to it.
                • -
                • You can create memories and connections with others who share your musical taste and interest.
                • -
                • You can discover new songs or artists that others have shared with you.
                • -
                -

                Conclusion

                -

                In conclusion, high heels lyrics mp3 download are a great way to enjoy the latest songs by Flo Rida and Walker Hayes. By downloading high heels lyrics mp3 songs, you can listen to them offline, save time and money, control your music collection, enhance your listening experience, improve your singing skills, and increase your enjoyment of the song. You can also enjoy high heels lyrics mp3 songs in different ways, such as listening to them on your device or online, singing along with them using karaoke or lyrics apps, and sharing them with your friends and family. So, what are you waiting for? Download high heels lyrics mp3 songs today and have fun!

                -

                FAQs

                -

                Here are some frequently asked questions about high heels lyrics mp3 download:

                -
                  -
                1. Q: How much does it cost to download high heels lyrics mp3 songs?
                2. -
                3. A: It depends on the website or app that you are using. Some websites or apps offer free downloads of music files, while others charge a fee or require a subscription. You should check the terms and conditions of the website or app before downloading any files.
                4. -
                5. Q: How long does it take to download high heels lyrics mp3 songs?
                6. -
                7. A: It depends on the file size, internet speed, and traffic of the website or app that you are using. Generally, it takes a few minutes to download a song in mp3 format. However, it might take longer if the file is large, the internet is slow, or the website or app is busy.
                8. -
                9. Q: How many high heels lyrics mp3 songs can I download?
                10. -
                11. A: It depends on the storage space of your device and the website or app that you are using. Generally, you can download as many songs as you want as long as you have enough space on your device. However, some websites or apps might have a limit on how many files you can download per day or per month. You should check the terms and conditions of the website or app before downloading any files.
                12. -
                13. Q: How can I delete high heels lyrics mp3 songs from my device?
                14. -
                15. A: You can delete high heels lyrics mp3 songs from your device by following these steps:
                16. -
                17. A: - Locate the file on your device using a file manager app or software.
                18. -
                19. - Tap or click on the file and select the delete option.
                20. -
                21. - Confirm your action and wait for the file to be deleted.
                22. -
                23. Q: How can I share high heels lyrics mp3 songs with others?
                24. -
                25. A: You can share high heels lyrics mp3 songs with others by following these steps:
                26. -
                27. A: - Choose the song that you want to share from your device or online.
                28. -
                29. - Tap or click on the share option and select the app or website that you want to use.
                30. -
                31. - Enter the recipient's name, email, phone number, or username.
                32. -
                33. - Add a message or caption if you want.
                34. -
                35. - Tap or click on the send option and wait for the file to be sent.
                36. -

                401be4b1e0
                -
                -
                \ No newline at end of file diff --git a/spaces/fclong/summary/fengshen/examples/clue1.1/data_preprocessing/ocnli_preprocessing.py b/spaces/fclong/summary/fengshen/examples/clue1.1/data_preprocessing/ocnli_preprocessing.py deleted file mode 100644 index 344a8ea7b7049b9f4373ad4c36dc284c395b0034..0000000000000000000000000000000000000000 --- a/spaces/fclong/summary/fengshen/examples/clue1.1/data_preprocessing/ocnli_preprocessing.py +++ /dev/null @@ -1,60 +0,0 @@ -import json -from tqdm import tqdm -import os -import argparse - - -label2desc={'contradiction':'矛盾','neutral':'自然','entailment':'蕴含'} - -def load_data(file_path,is_training=False): - with open(file_path, 'r', encoding='utf8') as f: - lines = f.readlines() - result=[] - for line in tqdm(lines): - data = json.loads(line) - texta = data['sentence1'] - textb = data['sentence2'] - question = '' - choice = [v for k,v in label2desc.items()] - answer = label2desc[data['label']] if 'label' in data.keys() else '' - label = choice.index(answer) if 'label' in data.keys() else 0 - text_id = data['id'] if 'id' in data.keys() else 0 - result.append({'task_type':'自然语言推理', - 'texta':texta, - 'textb':textb, - 'question':question, - 'choice':choice, - 'answer':answer, - 'label':label, - 'id':text_id}) - for i in range(5): - print(result[i]) - return result - - -def save_data(data,file_path): - with open(file_path, 'w', encoding='utf8') as f: - for line in data: - json_data=json.dumps(line,ensure_ascii=False) - f.write(json_data+'\n') - - -if __name__=="__main__": - parser = argparse.ArgumentParser(description="train") - parser.add_argument("--data_path", type=str,default="") - parser.add_argument("--save_path", type=str,default="") - - args = parser.parse_args() - - - data_path = args.data_path - save_path = args.save_path - - if not os.path.exists(save_path): - os.makedirs(save_path) - - file_list = ['train','dev','test'] - for file in file_list: - file_path = os.path.join(data_path,file+'.json') - output_path = os.path.join(save_path,file+'.json') - save_data(load_data(file_path),output_path) diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/American Truck Simulator APK OBB The Most Realistic and Immersive Truck Simulation Game for Android Users.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/American Truck Simulator APK OBB The Most Realistic and Immersive Truck Simulation Game for Android Users.md deleted file mode 100644 index 57e257c150e77c049d1e4cd948d54d838e775dd6..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/American Truck Simulator APK OBB The Most Realistic and Immersive Truck Simulation Game for Android Users.md +++ /dev/null @@ -1,129 +0,0 @@ -
                -

                American Truck Simulator APK+OBB Download: How to Play the Best Trucking Game on Your Android Device

                -

                If you are a fan of simulation games, especially truck driving games, you might have heard of American Truck Simulator, one of the most popular and realistic trucking games ever made. But did you know that you can also play this game on your Android device? Yes, you read that right. You can enjoy the thrill of driving across the vast landscapes of America, delivering various cargoes, and building your own trucking empire on your smartphone or tablet. All you need is to download and install the American Truck Simulator APK+OBB files on your device. In this article, we will show you how to do that, as well as give you some tips and tricks for playing this amazing game on your Android device.

                -

                american truck simulator apk+obb download


                Download Ziphttps://gohhs.com/2uPs3y



                -

                What is American Truck Simulator?

                -

                American Truck Simulator is a truck driving simulation game developed by SCS Software, the same studio behind the successful Euro Truck Simulator series. The game was released in 2016 for Windows, Mac, and Linux platforms, and has since received numerous updates and expansions that added more states, trucks, cargoes, and features to the game.

                -

                In American Truck Simulator, you take on the role of a driver for hire who enters the local freight market, making your way up to become an owner-operator, and eventually creating one of the largest transportation companies in the United States. You can choose from a variety of trucks from iconic American brands, such as Kenworth, Peterbilt, Volvo, Mack, Western Star, Freightliner, and more. You can also customize your truck with different cabins, chassis, paintjobs, accessories, and engines. You can deliver various cargoes across different states, such as California, Nevada, Arizona, New Mexico, Oregon, Washington, Utah, Idaho, Colorado, Wyoming, Montana, Texas, Oklahoma, Kansas, and more. You can also explore the map and discover famous landmarks and attractions along the way.

                -

                Features of American Truck Simulator

                -

                Some of the features that make American Truck Simulator stand out from other trucking games are:

                -
                  -
                • Realistic graphics and physics that create an immersive driving experience.
                • -
                • Authentic truck models that are officially licensed from real manufacturers.
                • -
                • Diverse cargoes that require different skills and strategies to deliver safely and efficiently.
                • -
                • Dynamic weather and day/night cycles that affect the road conditions and visibility.
                • -
                • Advanced AI traffic that reacts to your actions and creates realistic traffic situations.
                • -
                • Radio stations that play various genres of music and news from different states.
                • -
                • World of Trucks online platform that allows you to connect with other players and join events and competitions.
                • -
                • Steam Workshop support that enables you to download and install user-made mods that add more content and features to the game.
                • -
                -

                System Requirements for American Truck Simulator

                -

                Before you download and install American Truck Simulator APK+OBB on your Android device, you need to make sure that your device meets the minimum system requirements for running the game smoothly. According to [7](https://www.systemrequirementslab.com/cyri/requirements/american-truck-simulator/13076), these are:

                According to [1](https://android.stackexchange.com/questions/34958/what-are-the-minimum-hardware-specifications-for-android), the minimum requirements for Android are:

                -

                american truck simulator mod apk+obb unlimited money
                -download american truck simulator usa for android
                -american truck simulator apk+obb free download latest version
                -how to install american truck simulator on android
                -american truck simulator apk+obb offline
                -american truck simulator 2023 apk+obb download
                -american truck simulator pro apk+obb
                -best american truck simulator games for android
                -american truck simulator apk+obb highly compressed
                -american truck simulator california apk+obb download
                -american truck simulator multiplayer apk+obb
                -download american truck simulator 2 for android
                -american truck simulator apk+obb rexdl
                -american truck simulator mod apk+obb all trucks unlocked
                -american truck simulator apk+obb revdl
                -american truck simulator new york apk+obb download
                -american truck simulator 18 wheeler apk+obb
                -download american truck simulator 3d for android
                -american truck simulator mod apk+obb unlimited fuel
                -american truck simulator apk+obb apkpure
                -american truck simulator alaska apk+obb download
                -american truck simulator online apk+obb
                -download american truck simulator hd for android
                -american truck simulator mod apk+obb no ads
                -american truck simulator apk+obb uptodown
                -american truck simulator texas apk+obb download
                -american truck simulator real apk+obb
                -download american truck simulator pro 2 for android
                -american truck simulator mod apk+obb all maps unlocked
                -american truck simulator apk+obb moddroid
                -american truck simulator florida apk+obb download
                -american truck simulator realistic apk+obb
                -download american truck simulator 2023 for android
                -american truck simulator mod apk+obb unlimited xp
                -american truck simulator apk+obb happymod
                -american truck simulator nevada apk+obb download
                -american truck simulator extreme apk+obb
                -download american truck simulator classic for android
                -american truck simulator mod apk+obb all features unlocked
                -american truck simulator apk+obb andropalace

                - - - - - - - - - - - - - -
                ProcessorRAMStorageArchitecture
                200 MHz32 MB32 MBARMv5 or higher
                -

                To check your Android device's specifications, you can use an app called "Inware". It's a free app you can get from the Google Play Store, and it allows you to look at all of your device's specifications in great detail. You can also compare your device's specifications with other devices on websites like [3](https://www.gsmarena.com/) or [4](https://www.gsmarena.com/compare.php3).

                -

                How to Download and Install American Truck Simulator APK+OBB on Your Android Device

                -

                Now that you have checked your device's specifications and made sure that it can run American Truck Simulator, you are ready to download and install the game on your device. Here are the steps you need to follow:

                -

                Step 1: Download the APK and OBB files from a trusted source

                -

                The first thing you need to do is to download the APK and OBB files of American Truck Simulator from a trusted source. You can find many websites that offer these files, but be careful not to download from shady or malicious sites that might harm your device or steal your data. One of the reliable sources that we recommend is [5](https://apkdone.com/american-truck-simulator/), where you can download the latest version of the game for free.

                -

                To download the files, go to [5](https://apkdone.com/american-truck-simulator/) and scroll down until you see the download buttons. Tap on the button that says "Download American Truck Simulator APK" and wait for the download to start. Then, tap on the button that says "Download American Truck Simulator OBB" and wait for the download to start. You should have two files in your device's download folder: one with the extension .apk and one with the extension .zip.

                -

                Step 2: Enable installation from unknown sources on your device

                -

                The next thing you need to do is to enable installation from unknown sources on your device. This is because Android devices normally do not allow installation of apps that are not from the Google Play Store, for security reasons. However, since you have downloaded the APK file from a trusted source, you can safely enable this option.

                -

                To enable installation from unknown sources, go to your device's settings and look for the security or privacy section. There, you should find an option that says "Install unknown apps" or "Allow installation from unknown sources". Tap on it and toggle it on. You might see a warning message that says installing apps from unknown sources might harm your device or data, but don't worry, as long as you have downloaded the files from a trusted source, you are safe.

                -

                Step 3: Install the APK file and extract the OBB file

                -

                The next thing you need to do is to install the APK file and extract the OBB file. The APK file is the application file that contains the game itself, while the OBB file is the data file that contains the game's assets, such as graphics, sounds, and maps.

                -

                To install the APK file, go to your device's download folder and tap on the file that has the extension .apk. You should see a prompt that asks you if you want to install this application. Tap on "Install" and wait for the installation to finish.

                -

                To extract the OBB file, you will need a file manager app that can handle zip files. You can use any app that you prefer, but one of the popular ones is [6](https://play.google.com/store/apps/details?id=com.rhmsoft.fm), which is free and easy to use. To extract the OBB file, open [6](https://play.google.com/store/apps/details?id=com.rhmsoft.fm) and go to your device's download folder. Tap on the file that has the extension .zip and select "Extract". Choose a destination folder where you want to extract the file, such as your device's internal storage or SD card. Wait for the extraction to finish.

                -

                Step 4: Move the OBB file to the correct folder

                -

                The next thing you need to do is to move the OBB file to the correct folder. The OBB file needs to be in a specific folder in order for the game to

                The next thing you need to do is to move the OBB file to the correct folder. The OBB file needs to be in a specific folder in order for the game to recognize it and load it properly. The folder name is usually the same as the game's package name, which you can find in the APK file's name or properties. For American Truck Simulator, the folder name is com.scssoft.americantrucksimulator.

                -

                To move the OBB file to the correct folder, open [6](https://play.google.com/store/apps/details?id=com.rhmsoft.fm) again and go to the destination folder where you extracted the OBB file. You should see a file named main.1.com.scssoft.americantrucksimulator.obb. Tap and hold on it and select "Cut". Then, go to your device's internal storage or SD card and look for a folder named "Android". Inside it, look for a folder named "obb". If you don't see it, create one by tapping on the "+" icon and naming it "obb". Inside the "obb" folder, look for a folder named com.scssoft.americantrucksimulator. If you don't see it, create one by tapping on the "+" icon and naming it com.scssoft.americantrucksimulator. Inside the com.scssoft.americantrucksimulator folder, tap on the "Paste" icon and wait for the OBB file to be moved.

                -

                Step 5: Launch the game and enjoy

                -

                The final thing you need to do is to launch the game and enjoy. To launch the game, go to your device's app drawer and look for an icon that says "American Truck Simulator". Tap on it and wait for the game to load. You might see a splash screen that says "Checking license" or "Verifying files". This is normal and it means that the game is checking if you have a valid copy of the game. If everything goes well, you should see the game's main menu, where you can start a new game, load an existing game, change settings, or access other features.

                -

                Congratulations! You have successfully downloaded and installed American Truck Simulator APK+OBB on your Android device. Now you can enjoy driving across America in your own truck, delivering cargoes, earning money, and building your reputation.

                -

                Tips and Tricks for Playing American Truck Simulator on Your Android Device

                -

                Now that you have installed the game on your device, you might want to know some tips and tricks for playing it better and having more fun. Here are some of them:

                -

                Keep an eye out for cops and traffic laws

                -

                One of the challenges of driving a truck in American Truck Simulator is that you have to obey the traffic laws and avoid getting fined by cops. You have to follow the speed limit, stop at red lights and stop signs, yield to other vehicles, use your headlights at night or in bad weather, and avoid collisions or damage. If you break any of these rules, you might get pulled over by a cop and receive a ticket that will deduct money from your account. You can also get fined if you are late for delivery, damage your cargo, or abandon your job.

                -

                To avoid getting fined, you should keep an eye out for cops and traffic signs on the road. You can also use your GPS or map to see the speed limit and other information about your route. You can also adjust the difficulty settings of the game to make it easier or harder for yourself.

                -

                Use your turn signals and check your surroundings

                -

                Another challenge of driving a truck in American Truck Simulator is that you have to maneuver your large vehicle in tight spaces and busy roads. You have to make turns, change lanes, park, reverse, and overtake other vehicles without causing accidents or blocking traffic. To do this, you have to use your turn signals and check your surroundings before making any move.

                -

                To use your turn signals, you can tap on the left or right arrows on your screen or use the buttons on your steering wheel if you have one. To check your surroundings, you can use your mirrors or cameras on your screen or use the buttons on your steering wheel if you have one. You can also switch between different camera views by tapping on the camera icon on your screen or using the buttons on your steering wheel if you have one.

                -

                Pick the fastest and most profitable jobs

                -

                One of the goals of playing American Truck Simulator is to earn money and reputation by delivering cargoes across different states. You can find jobs by visiting freight markets or cargo depots on the map or by using your phone or computer in your truck's cabin. You can choose from different types of cargoes, such as cars, food, chemicals, machinery

                One of the goals of playing American Truck Simulator is to earn money and reputation by delivering cargoes across different states. You can find jobs by visiting freight markets or cargo depots on the map or by using your phone or computer in your truck's cabin. You can choose from different types of cargoes, such as cars, food, chemicals, machinery, and more. You can also see the distance, time, and reward for each job.

                -

                To make the most out of your time and money, you should pick the fastest and most profitable jobs that suit your skills and preferences. You should consider factors such as the weight, size, and fragility of the cargo, the road conditions and traffic, the weather and time of day, and the reputation and trustworthiness of the employer. You should also look for bonuses and incentives that might increase your earnings, such as urgent deliveries, long distances, special cargoes, or high-value cargoes.

                -

                Customize your truck and upgrade your skills

                -

                One of the fun aspects of playing American Truck Simulator is that you can customize your truck and upgrade your skills to improve your performance and efficiency. You can buy new trucks or modify your existing ones with different parts and accessories, such as engines, transmissions, tires, lights, horns, paintjobs, and more. You can also hire drivers and buy garages to expand your business and earn more money.

                -

                To customize your truck, you can visit truck dealers or service shops on the map or use your phone or computer in your truck's cabin. You can browse through different options and see how they affect your truck's stats and appearance. You can also sell or trade your old trucks if you want to get rid of them.

                -

                To upgrade your skills, you can visit the character screen on your phone or computer in your truck's cabin. You can see your level, experience points, and skill points. You can use your skill points to unlock different perks that will help you in various aspects of the game, such as fuel efficiency, cargo handling, long distance driving, hazardous materials transport, fragile cargo transport, just-in-time delivery, eco driving, high value cargo transport, heavy cargo transport, special cargo transport, and more.

                -

                Explore the map and discover landmarks

                -

                One of the best features of American Truck Simulator is that it has a huge and detailed map that covers many states of America. You can drive across different terrains and climates, such as deserts, mountains, forests, plains, coasts, and cities. You can also see many landmarks and attractions that are based on real-life locations, such as the Golden Gate Bridge, the Grand Canyon, Mount Rushmore, the Statue of Liberty, the Hollywood Sign, and more.

                -

                To explore the map and discover landmarks, you can use your GPS or map on your screen or use the buttons on your steering wheel if you have one. You can zoom in and out and see different icons that represent different places of interest. You can also use the photo mode to take pictures of your truck and the scenery. You can also use the free roam mode to drive around without any restrictions or objectives.

                -

                Conclusion

                -

                American Truck Simulator is a great game for anyone who loves simulation games or truck driving games. It offers a realistic and immersive experience that will make you feel like you are actually driving a truck across America. You can download and install American Truck Simulator APK+OBB on your Android device by following the steps we have shown you in this article. You can also use our tips and tricks to play the game better and have more fun.

                -

                We hope you enjoyed this article and found it helpful. If you have any questions or feedback about American Truck Simulator APK+OBB download or anything else related to this topic

                We hope you enjoyed this article and found it helpful. If you have any questions or feedback about American Truck Simulator APK+OBB download or anything else related to this topic, please feel free to leave a comment below or contact us through our website. We would love to hear from you and help you with anything you need.

                -

                FAQs

                -

                Here are some of the frequently asked questions that people have about American Truck Simulator APK+OBB download and the game itself:

                -

                Q: Is American Truck Simulator APK+OBB download safe and legal?

                -

                A: Yes, American Truck Simulator APK+OBB download is safe and legal, as long as you download the files from a trusted source, such as [5](https://apkdone.com/american-truck-simulator/), and do not use any cheats or hacks that might violate the game's terms of service. However, we recommend that you also buy the original game from the official website or Steam if you can afford it, to support the developers and enjoy the full features and updates of the game.

                -

                Q: How much space does American Truck Simulator APK+OBB download take on my device?

                -

                A: American Truck Simulator APK+OBB download takes about 3.5 GB of space on your device, which includes the APK file (about 50 MB) and the OBB file (about 3.4 GB). You should make sure that you have enough free space on your device before downloading and installing the game.

                -

                Q: Can I play American Truck Simulator online with other players?

                -

                A: Yes, you can play American Truck Simulator online with other players, by using the World of Trucks platform. World of Trucks is an online service that connects players from all over the world and allows them to join events and competitions, share photos and videos, chat with each other, and more. You can access World of Trucks by creating a free account on their website or by using your existing Steam account. You can also link your World of Trucks account to your game profile in American Truck Simulator, by going to the online section on your phone or computer in your truck's cabin.

                -

                Q: Can I use mods in American Truck Simulator APK+OBB download?

                -

                A: Yes, you can use mods in American Truck Simulator APK+OBB download, by using the Steam Workshop support. Steam Workshop is a feature that allows users to create and share mods that add more content and features to the game, such as new trucks, trailers, cargoes, maps, skins, sounds, and more. You can access Steam Workshop by going to the mod manager section on your phone or computer in your truck's cabin. You can also browse and download mods from other websites, such as [8](https://atsmods.lt/) or [9](https://www.modland.net/american-truck-simulator-mods/), but be careful not to install any mods that might harm your device or data.

                -

                Q: How can I update American Truck Simulator APK+OBB download to the latest version?

                -

                A: To update American Truck Simulator APK+OBB download to the latest version, you need to download and install the new APK and OBB files from a trusted source, such as [5](https://apkdone.com/american-truck-simulator/), whenever they are available. You should also backup your game data before updating, in case something goes wrong or you want to revert to the previous version. You can backup your game data by copying the com.scssoft.americantrucksimulator folder from your device's internal storage or SD card to another location.

                197e85843d
                -
                -
                \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Black Hole Hero Vice Vegas APK - A Game that Combines Adventure Crime and Sci-Fi.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Black Hole Hero Vice Vegas APK - A Game that Combines Adventure Crime and Sci-Fi.md deleted file mode 100644 index b6fd0e048201abea18d053a3df85122715936882..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Black Hole Hero Vice Vegas APK - A Game that Combines Adventure Crime and Sci-Fi.md +++ /dev/null @@ -1,87 +0,0 @@ -
                -

                Black Hole Hero APK: A Free Open World Game for Android

                -

                If you are looking for a fun and adventurous game that lets you do anything you want in a huge city, then you should try Black Hole Hero APK. This game is a city simulator in third person view, where you can drive a car, motorbike, airplane, and more. You can also fight against various star mafia gangsters from different countries, using your superpowers and weapons. You can even create black holes and suck everything in them. In this article, we will tell you more about this amazing game and how you can download and install it on your Android device.

                -

                What is Black Hole Hero APK?

                -

                Black Hole Hero APK is an Android game developed by HGames-Artworks, a studio that specializes in creating open world games with realistic graphics and physics. The game is inspired by popular superhero movies and comics, such as Spider-Man, Iron Man, and Avengers. You play as a cyborg who has the ability to create black holes and use them to destroy your enemies and the environment. You can also use other superpowers, such as flying, shooting lasers, throwing cars, and more. The game has a large map that covers the whole city of Vice Vegas, where you can explore different locations, such as skyscrapers, streets, parks, airports, and more. You can also interact with various characters and objects in the game, such as pedestrians, animals, vehicles, weapons, and more.

                -

                black hole hero apk


                Download Zip ✺✺✺ https://gohhs.com/2uPpi1



                -

                Features of Black Hole Hero APK

                -

                Black Hole Hero APK has many features that make it an exciting and enjoyable game to play. Here are some of them:

                -

                Explore the big city

                -

                The game has a huge open world map that covers the entire city of Vice Vegas. You can roam around freely and discover different places and secrets. You can also go off-roading in the mountains or fly over the city with an airplane. The game has realistic graphics and physics that make the city look alive and dynamic. You can see the day and night cycle, weather effects, traffic lights, pedestrians, animals, and more.

                -

                Drive various vehicles

                -

                The game has a wide range of vehicles that you can drive or steal in the game. You can choose from cars, motorbikes, trucks, buses, helicopters, tanks, jets, and more. Each vehicle has its own characteristics and performance. You can also customize your vehicles with different colors, stickers, wheels, and more. You can also perform stunts on a BMX bike or find an ultimate F-90 tank or devastating battle helicopter.

                -

                Fight against star mafia gangsters

                -

                The game has a lot of action and combat elements that make it thrilling and challenging. You will face various star mafia gangsters from America, Russia, China, Mexico, Japan, and more. They will try to stop you from completing your missions or just attack you randomly. You will have to use your superpowers and weapons to fight them back. You can also use your black holes to suck them in or throw them away.

                -

                Use superpowers and weapons

                -

                The game gives you the opportunity to use your superpowers and weapons to have fun and cause chaos in the city. You can create black holes that can suck everything in them, including buildings, vehicles, people, animals, etc. You can also fly over the city with your jetpack or shoot lasers from your eyes. You can also use other powers, such as super strength, speed, agility, etc. The game also has a variety of weapons that you can use or find in the game. You can use guns, grenades, rockets, swords, hammers

              -

              Pros and cons of Black Hole Hero APK

              -

              Black Hole Hero APK is a game that has many advantages and disadvantages. Here are some of them:

              -

              Pros

              -
                -
              • The game is free to download and play. You don't need to pay anything to enjoy the game.
              • -
              • The game has high-quality graphics and sound effects that make the game realistic and immersive.
              • -
              • The game has a lot of content and variety that make the game fun and interesting. You can do many things in the game, such as exploring, driving, fighting, using superpowers, etc.
              • -
              • The game has a simple and intuitive control system that makes the game easy to play. You can use the virtual joystick, buttons, or tilt your device to control your character.
              • -
              • The game has a lot of humor and satire that make the game entertaining and amusing. You can see many references and parodies of popular culture, such as movies, comics, games, etc.
              • -
              -

              Cons

              -
                -
              • The game may have some bugs and glitches that may affect the gameplay and performance. You may experience crashes, freezes, lags, or errors in the game.
              • -
              • The game may have some ads and in-app purchases that may annoy or tempt you. You may see some ads pop up on your screen or some items that require real money to buy.
              • -
              • The game may have some violence and gore that may not be suitable for everyone. You may see some blood, explosions, deaths, or injuries in the game.
              • -
              • The game may have some repetitive and boring aspects that may make the game dull and tedious. You may do some tasks or missions that are similar or easy in the game.
              • -
              • The game may have some ethical and moral issues that may make the game controversial or offensive. You may do some actions or behaviors that are immoral or illegal in the game.
              • -
              -

              Conclusion

              -

              Black Hole Hero APK is a free open world game for Android that lets you do anything you want in a huge city. You can drive various vehicles, fight against star mafia gangsters, use superpowers and weapons, and create black holes. The game has many features, such as realistic graphics, physics, sound effects, humor, content, variety, etc. The game also has some pros and cons, such as bugs, glitches, ads, in-app purchases, violence, gore, repetition, boredom, ethics, morals, etc. The game is a great choice for anyone who loves superhero games or open world games. You can download and install the game from the official website or use this link: [https://black-hole-hero.en.uptodown.com/android]. We hope you enjoyed this article and found it helpful. Thank you for reading!

              -

              FAQs

              -

              Here are some frequently asked questions about Black Hole Hero APK:

              -

              black hole hero vice vegas apk download
              -black hole hero mod apk unlimited money
              -black hole hero game free download
              -black hole hero apk latest version
              -black hole hero vice vegas mod apk
              -black hole hero apk pure
              -black hole hero game online
              -black hole hero vice vegas cheats
              -black hole hero game play store
              -black hole hero vice vegas hack
              -black hole hero apk for pc
              -black hole hero mod apk android 1
              -black hole hero game review
              -black hole hero vice vegas gameplay
              -black hole hero apk obb
              -black hole hero mod apk rexdl
              -black hole hero game trailer
              -black hole hero vice vegas tips
              -black hole hero apk uptodown
              -black hole hero mod apk revdl
              -black hole hero game wiki
              -black hole hero vice vegas missions
              -black hole hero apk no ads
              -black hole hero mod apk happymod
              -black hole hero game download for android
              -black hole hero vice vegas offline
              -black hole hero apk old version
              -black hole hero mod apk unlimited gems
              -black hole hero game size
              -black hole hero vice vegas update
              -black hole hero apk mirror
              -black hole hero mod apk all unlocked
              -black hole hero game system requirements
              -black hole hero vice vegas weapons
              -black hole hero apk mob.org
              -black hole hero mod apk an1.com
              -black hole hero game rating
              -black hole hero vice vegas cars
              -black hole hero apk data download
              -black hole hero mod apk 1.2.0

              -
                -
              1. Q: Is Black Hole Hero APK safe to download and install?
              2. -A: Yes, Black Hole Hero APK is safe to download and install. The game does not contain any viruses or malware that can harm your device or data. However, you should always download the game from a trusted source and scan it with an antivirus before installing it.
              3. Q: Is Black Hole Hero APK compatible with my device?
              4. -A: Black Hole Hero APK is compatible with most Android devices that have Android 5.0 or higher. However, some devices may not support the game due to hardware limitations or software issues. You can check the compatibility of your device on the official website or on Google Play Store.
              5. Q: How can I update Black Hole Hero APK?
              6. -A: You can update Black Hole Hero APK by downloading and installing the latest version of the game from the official website or Google Play Store. You can also enable the automatic update option in your device settings to get the updates automatically.
              7. Q: How can I contact the developer of Black Hole Hero APK?
              8. -A: You can contact the developer of Black Hole Hero APK by sending an email to hgames.artworks@gmail.com or by visiting their Facebook page at [https://www.facebook.com/hgamesartworks]. You can also leave a comment or review on Google Play Store or on their website.
              9. Q: How can I support the developer of Black Hole Hero APK?
              10. -A: You can support the developer of Black Hole Hero APK by rating and reviewing the game on Google Play Store or on their website. You can also share the game with your friends and family on social media or other platforms. You can also buy some items or remove ads in the game with real money if you want to.

              197e85843d
              -
              -
              \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download HappyMod APK 2.9.7 - The best modded apk store with fast and easy downloads.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download HappyMod APK 2.9.7 - The best modded apk store with fast and easy downloads.md deleted file mode 100644 index 333fbd1c9a9f664da3eea107f871577f17761380..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download HappyMod APK 2.9.7 - The best modded apk store with fast and easy downloads.md +++ /dev/null @@ -1,144 +0,0 @@ - -

              What is HappyMod and How to Download It?

              -

              If you are an Android user who loves to play games and use apps on your device, you might have heard of HappyMod. HappyMod is a popular app store that allows you to download modded versions of apps and games for free. But what exactly is HappyMod, how does it work, and how can you download it? In this article, we will answer these questions and more.

              -

              apk happymod indir


              Download Zip ::: https://gohhs.com/2uPulj



              -

              What is HappyMod?

              -

              HappyMod is an app store that provides modded or modified versions of apps and games for Android devices. Modded apps and games are those that have been altered or hacked by developers or users to provide extra features, such as unlimited money, coins, gems, lives, unlocked levels, premium subscriptions, etc. With HappyMod, you can access thousands of modded apps and games in various categories, such as action, adventure, arcade, casual, simulation, strategy, etc.

              -

              Features of HappyMod

              -

              HappyMod offers many features that make it a popular choice among Android users who want to enjoy modded apps and games. Some of these features are:

              -
                -
              • Fast and safe downloads: HappyMod provides fast download speeds for modded apps and games. All the mods are scanned for viruses and malware before they are uploaded to the app store. You can also pause and resume the download progress at any time.
              • -
              • Multiple versions: HappyMod provides different versions of mods for one app or game. You can choose the version that suits your preferences and device compatibility. You can also see the user ratings and comments for each mod to help you decide which one to download.
              • -
              • User-friendly interface: HappyMod has a simple and elegant design that makes it easy to navigate and use. You can search for your favorite apps and games by name, category, or popularity. You can also see the latest updates, hot mods, and recommended mods on the home page.
              • -
              • Community support: HappyMod has a large community of users and developers who upload, share, request, and test mods. You can join the community and interact with other mod lovers. You can also request mods for apps and games that are not available in the app store.
              • -
              -

              Risks of Using HappyMod

              -

              While HappyMod has many advantages, it also has some risks that you should be aware of before using it. Some of these risks are:

              -
                -
              • Potential legal issues: Modded apps and games may violate the intellectual property rights of the original developers or publishers. Downloading and using them may be considered as piracy or illegal activity in some countries or regions. You may face legal consequences if you are caught using modded apps and games without permission.
              • -
              • Potential compatibility issues: Modded apps and games may not work properly on your device due to different hardware specifications, software versions, or system settings. They may cause crashes, errors, glitches, or performance issues on your device. They may also conflict with other apps or games that you have installed on your device.
              • -
              • Potential security issues: Modded apps and games may contain malicious code or hidden functions that may harm your device or data. They may access your personal information, such as contacts, photos, messages, location, etc., without your consent. They may also expose your device to viruses, malware, spyware, adware, etc., that may compromise your privacy and security.
              • -
              -

              Alternatives to HappyMod

              -

              If you are not comfortable with using HappyMod or want to try other options for downloading modded apps and games, there are some alternatives that you can use. Some of these alternatives are:

              -
                -
              • F-Droid:Here is the continuation of the article:

                -

                apk happymod indir android
                -apk happymod indir ücretsiz
                -apk happymod indir son sürüm
                -apk happymod indir 2023
                -apk happymod indir nasıl yapılır
                -apk happymod indir pc
                -apk happymod indir ios
                -apk happymod indir hileli
                -apk happymod indir oyunlar
                -apk happymod indir minecraft
                -apk happymod indir roblox
                -apk happymod indir pubg
                -apk happymod indir free fire
                -apk happymod indir among us
                -apk happymod indir brawl stars
                -apk happymod indir clash of clans
                -apk happymod indir subway surfers
                -apk happymod indir gta 5
                -apk happymod indir fortnite
                -apk happymod indir dream league soccer
                -apk happymod indir pes 2023
                -apk happymod indir fifa 2023
                -apk happymod indir candy crush saga
                -apk happymod indir tiktok
                -apk happymod indir instagram
                -apk happymod indir whatsapp
                -apk happymod indir facebook
                -apk happymod indir youtube
                -apk happymod indir spotify
                -apk happymod indir netflix
                -apk happymod indir zoom
                -apk happymod indir google play store
                -apk happymod indir keyword tool app[^1^]
                -apk happymod indir modlu uygulamalar
                -apk happymod indir güvenli mi
                -apk happymod indir nasıl kullanılır
                -apk happymod indir ne işe yarar
                -apk happymod indir yorumlar
                -apk happymod indir inceleme
                -apk happymod indir video
                -apk happymod indir linki
                -apk happymod indir türkçe
                -apk happymod indir en iyi modlar
                -apk happymod indir yeni güncelleme
                -apk happymod indir sorun çözme
                -apk happymod indir destek
                -apk happymod indir iletişim
                -apk happymod indir hakkında bilgi
                -apk happymod indir avantajları ve dezavantajları

                -
                  -
                • F-Droid: F-Droid is an app store that provides free and open source software (FOSS) applications for Android devices. F-Droid respects your privacy and freedom and does not track or collect any data from your device. You can find apps in various categories, such as games, graphics, internet, money, multimedia, etc. You can also browse apps by tags, licenses, or collections. F-Droid is community-maintained and allows you to submit or request apps that are not available in the app store .
                • -
                • ACMarket: ACMarket is another app store that provides modded apps and games for Android devices. ACMarket has a large collection of mods in different genres, such as action, arcade, casual, puzzle, racing, role-playing, etc. You can also find apps that are not available on the Google Play Store, such as Spotify Premium, Netflix Mod, etc. ACMarket has a user-friendly interface and fast download speeds. You can also customize the app store according to your preferences .
                • -
                • Aptoide: Aptoide is an app store that allows you to create your own personalized app store with the apps that you want. You can also browse and download apps from other users' app stores or from the official Aptoide app store. Aptoide has millions of apps and games in various categories, such as media, entertainment, education, lifestyle, productivity, etc. You can also find modded apps and games on Aptoide. Aptoide has a social aspect that lets you follow other users and see their recommendations .
                • -
                -

                How to Download HappyMod?

                -

                If you want to download HappyMod on your Android device, you have two options: download it from the official website or download it from an alternative source. Here are the steps for both options:

                -

                Download HappyMod from the Official Website

                -

                The official website of HappyMod is [happymod.com](https://www.happymod.com/). You can visit this website from your device's browser and follow these steps:

                -
                  -
                1. Tap on the Download button on the home page.
                2. -
                3. A pop-up window will appear asking you to confirm the download. Tap on OK.
                4. -
                5. The HappyMod APK file will start downloading on your device.
                6. -
                7. Once the download is complete, tap on the APK file to install it.
                8. -
                9. You may need to enable Unknown Sources in your device's settings to allow the installation of third-party apps.
                10. -
                11. Follow the instructions on the screen to complete the installation.
                12. -
                13. You can now open HappyMod and enjoy modded apps and games.
                14. -
                -

                Download HappyMod from an Alternative Source

                -

                If you cannot access the official website of HappyMod or want to try a different source, you can use one of the many websites that offer HappyMod APK files. Some examples are [apkpure.com](https://apkpure.com/happymod/com.happymod.apk), [apkdone.com](https://apkdone.com/happymod/), [apkfab.com](https://apkfab.com/happymod/com.happymod.apk), etc. You can visit any of these websites from your device's browser and follow these steps:

                -
                  -
                1. Search for HappyMod in the website's search bar or browse through the categories.
                2. -
                3. Select the version of HappyMod that you want to download and tap on the Download button.
                4. -
                5. A pop-up window will appear asking you to confirm the download. Tap on OK.
                6. -
                7. The HappyMod APK file will start downloading on your device.
                8. -
                9. Once the download is complete, tap on the APK file to install it.
                10. -
                11. You may need to enable Unknown Sources in your device's settings to allow the installation of third-party apps.
                12. -
                13. Follow the instructions on the screen to complete the installation.
                14. -
                15. You can now open HappyMod and enjoy modded apps and games.
                16. -
                -

                Install and Use HappyMod on Your Device

                -

                After you have downloaded and installed HappyMod on your device, you can start using it to download modded apps and games. Here are some tips on how to use HappyMod:

                -
                  -
                • To find modded apps and games, you can use the search bar at the top of the app or browse through the categories at the bottom of the app.
                • -
                • To download a mod

                  Here is the continuation of the article:

                  -
                    -
                  • To download a mod, tap on the Download button next to the mod name. You can see the details of the mod, such as the size, version, rating, comments, etc., before downloading it.
                  • -
                  • To install a mod, tap on the Install button after the download is complete. You may need to enable Unknown Sources in your device's settings to allow the installation of third-party apps.
                  • -
                  • To use a mod, tap on the Open button after the installation is complete. You can also find the mod in your device's app drawer or home screen.
                  • -
                  • To update a mod, tap on the Update button next to the mod name. You can also check for updates manually by tapping on the Menu button at the top left corner of the app and selecting Check for Updates.
                  • -
                  • To delete a mod, tap and hold on the mod name and select Uninstall. You can also delete mods from your device's settings or file manager.
                  • -
                  -

                  Conclusion

                  -

                  HappyMod is an app store that allows you to download modded apps and games for free. It has many features, such as fast and safe downloads, multiple versions, user-friendly interface, and community support. However, it also has some risks, such as potential legal issues, compatibility issues, and security issues. Therefore, you should use HappyMod at your own risk and discretion. You can download HappyMod from its official website or from an alternative source. You can also use other app stores that provide modded apps and games, such as F-Droid, ACMarket, or Aptoide. We hope this article has helped you understand what HappyMod is and how to download it.

                  -

                  Summary of the Main Points

                  -

                  Here is a summary of the main points of this article:

                  -
                    -
                  • HappyMod is an app store that provides modded apps and games for Android devices.
                  • -
                  • Modded apps and games are those that have been altered or hacked to provide extra features.
                  • -
                  • HappyMod has many features, such as fast and safe downloads, multiple versions, user-friendly interface, and community support.
                  • -
                  • HappyMod also has some risks, such as potential legal issues, compatibility issues, and security issues.
                  • -
                  • You can download HappyMod from its official website or from an alternative source.
                  • -
                  • You can also use other app stores that provide modded apps and games, such as F-Droid, ACMarket, or Aptoide.
                  • -
                  -

                  FAQs

                  -

                  Here are some frequently asked questions about HappyMod:

                  -
                    -
                  1. Is HappyMod safe to use?
                  2. -

                    HappyMod claims to scan all the mods for viruses and malware before uploading them to the app store. However, there is no guarantee that all the mods are safe and secure. Some mods may contain malicious code or hidden functions that may harm your device or data. Therefore, you should use HappyMod at your own risk and discretion. You should also install a reliable antivirus app on your device and scan the mods before installing them.

                    -
                  3. Is HappyMod legal to use?
                  4. -

                    HappyMod may violate the intellectual property rights of the original developers or publishers of the apps and games that it provides. Downloading and using modded apps and games may be considered as piracy or illegal activity in some countries or regions. You may face legal consequences if you are caught using modded apps and games without permission. Therefore, you should use HappyMod only for educational or personal purposes and respect the rights of the original developers or publishers.

                    -
                  5. Does HappyMod require root access?
                  6. -

                    No, HappyMod does not require root access to work on your device. You can download and install HappyMod without rooting your device. However, some mods may require root access to function properly. You should check the requirements of each mod before downloading and installing it.

                    -
                  7. How can I request mods for apps and games that are not available in HappyMod?
                  8. -

                    You can request mods for apps and games that are not available in HappyMod by joining the community and interacting with other users and developers. You can find the community section in the Menu button at the top left corner of the app. You can also visit [happymod.com/forum](https://www.happymod.com/forum/) to post your requests or suggestions.

                    -
                  9. How can I contact HappyMod if I have any questions or problems?
                  10. -

                    You can contact HappyMod by sending an email to [support@happymod.com](mailto:support@happymod.com) or by visiting [happymod.com/contact](https://www.h

                    Here is the continuation of the article:

                    -

                    You can contact HappyMod by sending an email to [support@happymod.com](mailto:support@happymod.com) or by visiting [happymod.com/contact](https://www.happymod.com/contact/) to fill out a contact form. You can also follow HappyMod on social media platforms, such as Facebook, Twitter, Instagram, YouTube, etc., to get the latest news and updates.

                    197e85843d
                    -
                    -
                    \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Melon Playground 2.0 APK for Android A Fun and Creative Simulation Game.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Melon Playground 2.0 APK for Android A Fun and Creative Simulation Game.md deleted file mode 100644 index 992241423352cf5805e96e6647b86cb7a44403db..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Melon Playground 2.0 APK for Android A Fun and Creative Simulation Game.md +++ /dev/null @@ -1,122 +0,0 @@ - -

                    Melon Playground 2.0 APK: A Fun and Creative Sandbox Game for Android

                    -

                    Do you love playing sandbox games where you can unleash your imagination and create your own scenarios? If yes, then you might want to check out Melon Playground 2.0 APK, a fun and creative sandbox game for Android devices. In this article, we will tell you what Melon Playground 2.0 APK is, what are its features, and how to download and install it on your device.

                    -

                    What is Melon Playground 2.0 APK?

                    -

                    A simple sandbox game where you can create your own scenarios

                    -

                    Melon Playground 2.0 APK is a simple sandbox game where you can create your own scenarios using a wide variety of items. You can place items on the scene, move them around, resize them, rotate them, and interact with them in different ways. You can also change the camera angle and zoom in and out to get a better view of your creations.

                    -

                    melon playground 2.0 apk


                    Download File ✶✶✶ https://gohhs.com/2uPvng



                    -

                    A sequel to the popular Melon Sandbox game by playducky.com

                    -

                    Melon Playground 2.0 APK is a sequel to the popular Melon Sandbox game by playducky.com, a developer that specializes in simulation games. The first Melon Sandbox game was released in 2021 and has over 10 million downloads on Google Play Store. It received positive reviews from users who praised its simplicity, variety, and physics-based gameplay.

                    -

                    A free and easy-to-download game for Android devices

                    -

                    Melon Playground 2.0 APK is a free game that you can download and play on your Android devices. You don't need to pay anything to enjoy this game, although you can purchase some optional in-game items with real money if you want to. You also don't need to worry about compatibility issues, as this game works on most Android devices running Android 6.0 or higher.

                    -

                    What are the features of Melon Playground 2.0 APK?

                    -

                    A wide variety of items to use and interact with

                    -

                    Melee weapons, guns, barrels, explosives, vehicles, animals, and more

                    -

                    One of the best features of Melon Playground

                    One of the best features of Melon Playground 2.0 APK is the wide variety of items that you can use and interact with. You can choose from different categories of items, such as melee weapons, guns, barrels, explosives, vehicles, animals, and more. You can use these items to create different scenarios, such as fights, races, explosions, or just have fun with them. You can also combine different items to create new effects and outcomes.

                    -

                    Physics-based gameplay and realistic ragdoll effects

                    -

                    Another feature of Melon Playground 2.0 APK is the physics-based gameplay and realistic ragdoll effects. The game uses a realistic physics engine that simulates the behavior of the items and the environment. You can see how the items react to gravity, friction, collisions, and other forces. You can also see how the characters and animals behave when they are hit by the items or fall from a height. They will flop around and bounce off the ground in a hilarious way.

                    -

                    A user-friendly interface and intuitive controls

                    -

                    Drag and drop items from the menu to the scene

                    -

                    Melon Playground 2.0 APK has a user-friendly interface and intuitive controls that make it easy for anyone to play. You can access the menu by tapping on the icon on the top left corner of the screen. From there, you can select the category and the item that you want to use. Then, you can drag and drop the item from the menu to the scene. You can place as many items as you want on the scene.

                    -

                    melon playground 2 sandbox apk
                    -melon playground 2 mod apk
                    -melon playground 2 download apk
                    -melon playground 2 apk android
                    -melon playground 2 apk free
                    -melon playground 2 apk latest version
                    -melon playground 2 apk update
                    -melon playground 2 apk offline
                    -melon playground 2 apk online
                    -melon playground 2 apk hack
                    -melon playground 2 apk unlimited money
                    -melon playground 2 apk no ads
                    -melon playground 2 apk full version
                    -melon playground 2 apk premium
                    -melon playground 2 apk pro
                    -melon playground 2 apk cracked
                    -melon playground 2 apk unlocked
                    -melon playground 2 apk for pc
                    -melon playground 2 apk for ios
                    -melon playground 2 apk for windows
                    -melon playground 2 ragdoll physics apk
                    -melon playground 2 simulation game apk
                    -melon playground 2 sandbox game apk
                    -melon playground 2 fun game apk
                    -melon playground 2 creative game apk
                    -melon playground 2 by playducky.com apk
                    -melon playground 2 by yokieee apk
                    -melonplayground 2 by alex touron apk
                    -melmod for melon playground 2 apk
                    -guide for melon playground 2 apk
                    -tips for melon playground 2 apk
                    -tricks for melon playground 2 apk
                    -cheats for melon playground 2 apk
                    -codes for melon playground 2 apk
                    -skins for melon playground 2 apk
                    -maps for melon playground 2 apk
                    -weapons for melon playground 2 apk
                    -vehicles for melon playground 2 apk
                    -characters for melon playground 2 apk
                    -items for melon playground 2 apk
                    -how to install melon playground 2 apk
                    -how to play melon playground 2 apk
                    -how to update melon playground 2 apk
                    -how to uninstall melon playground 2 apk
                    -how to hack melon playground 2 apk
                    -how to get money in melon playground 2 apk
                    -how to remove ads in melon playground 2 apk
                    -how to unlock everything in melon playground 2 apk
                    -how to create your own world in melon playground 2 apk

                    -

                    Tap and hold items to move, rotate, or resize them

                    -

                    You can also tap and hold on any item on the scene to move, rotate, or resize it. You can drag the item to any position that you want. You can also use two fingers to rotate or resize the item. You can make the item bigger or smaller, or change its orientation. You can also delete any item by tapping on the trash icon that appears when you select it.

                    -

                    Swipe the screen to change the camera angle or zoom in and out

                    -

                    You can also swipe the screen to change the camera angle or zoom in and out. You can swipe left or right to rotate the camera around the scene. You can swipe up or down to tilt the camera up or down. You can also pinch in or out to zoom in or out of the scene. You can get a closer look at your creations or get a wider view of the whole scene.

                    -

                    A sandbox mode and a challenge mode

                    -

                    Sandbox mode: Create your own scenarios and have fun with no limits

                    -

                    Melon Playground 2.0 APK has two modes that you can choose from: sandbox mode and challenge mode. In sandbox mode, you can create your own scenarios and have fun with no limits. You can use any item that you want and place it anywhere on the scene. You can also interact with the items and see what happens. You can create funny, crazy, or epic scenarios with no rules or restrictions.

                    -

                    Challenge mode: Complete various tasks and earn coins to unlock more items

                    -

                    In challenge mode, you can complete various tasks and earn coins to unlock more items. The tasks are simple but fun, such as hitting a target with a gun, blowing up a barrel with an explosive, or driving a car through an obstacle course. You will get coins for completing each task successfully. You can use these coins to buy more items from the shop. The more items you have, the more scenarios you can create.

                    -

                    How to download and install Melon Playground 2.0 APK?

                    -

                    Download the APK file from a trusted source

                    -

                    Use the link provided in this article or search for "melon playground 2 apk" on APKCombo.com or other similar websites

                    -

                    If you want to download and install Melon Playground 2.0 APK on your device, you need to get

                    If you want to download and install Melon Playground 2.0 APK on your device, you need to get the APK file from a trusted source. You can use the link provided in this article or search for "melon playground 2 apk" on APKCombo.com or other similar websites. APKCombo.com is a reliable website that offers safe and fast downloads of APK files for various Android apps and games. You can also find the latest versions and updates of your favorite apps and games on this website.

                    -

                    Make sure you have enough storage space on your device and a stable internet connection

                    -

                    Before you download the APK file, make sure you have enough storage space on your device and a stable internet connection. The APK file size of Melon Playground 2.0 APK is about 100 MB, so you need to have at least that much free space on your device. You also need to have a good internet connection to download the file without any interruptions or errors.

                    -

                    Enable unknown sources on your device settings

                    -

                    Go to Settings > Security > Unknown Sources and toggle it on

                    -

                    After you download the APK file, you need to enable unknown sources on your device settings. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on. You may see a warning message that says installing apps from unknown sources may harm your device. Don't worry, this is just a precautionary measure and you can ignore it if you trust the source of the APK file.

                    -

                    This will allow you to install apps from sources other than the Google Play Store

                    -

                    Enabling unknown sources will allow you to install apps from sources other than the Google Play Store. This is useful if you want to install apps that are not available on the Google Play Store, or if you want to install older or modified versions of apps that are not supported by the Google Play Store. However, you should be careful when installing apps from unknown sources, as some of them may contain malware or viruses that can harm your device or steal your data. Always download APK files from trusted and reputable websites, and scan them with an antivirus app before installing them.

                    -

                    Install the APK file on your device

                    -

                    Locate the downloaded APK file on your file manager or downloads folder and tap on it

                    -

                    Once you have enabled unknown sources, you can install the APK file on your device. To do this, locate the downloaded APK file on your file manager or downloads folder and tap on it. You may see a pop-up window that asks you to confirm the installation. Tap on Install and wait for the installation to finish.

                    -

                    Follow the instructions on the screen and wait for the installation to finish

                    -

                    After you tap on Install, follow the instructions on the screen and wait for the installation to finish. It may take a few seconds or minutes depending on your device and internet speed. You may see a progress bar that shows how much of the installation is done. When the installation is complete, you will see a message that says App installed. Tap on Open to launch the app or Done to exit the installer.

                    -

                    Enjoy playing Melon Playground 2.0 APK on your device

                    -

                    Congratulations! You have successfully downloaded and installed Melon Playground 2.0 APK on your device. You can now enjoy playing this fun and creative sandbox game on your Android device. You can create your own scenarios using various items, interact with them in different ways, complete challenges, earn coins, unlock more items, and have fun with no limits.

                    -

                    Conclusion

                    -

                    Melon Playground 2.0 APK is a fun and creative sandbox game for Android devices that lets you create your own scenarios using a wide variety of items. You can use melee weapons, guns, barrels, explosives, vehicles, animals, and more to create funny, crazy, or epic scenarios with no rules or restrictions. You can also enjoy physics-based gameplay and realistic ragdoll effects that make the game more entertaining and hilarious. You can also choose between sandbox mode and challenge mode depending on your mood and preference.

                    -

                    If you want to download and install Melon Playground 2.0 APK on your device, you can follow the steps mentioned in this article. You need to get the APK file from a trusted source, enable unknown sources on your device settings, and install the APK file on your device. It's easy and fast, and you don't need to pay anything to play this game.

                    -

                    Melon Playground 2.0 APK is a great game for anyone who loves sandbox games where they can unleash their imagination and creativity. It's also a great game for anyone who wants to have some fun and

                    Melon Playground 2.0 APK is a great game for anyone who loves sandbox games where they can unleash their imagination and creativity. It's also a great game for anyone who wants to have some fun and laughter with physics-based gameplay and realistic ragdoll effects. If you are looking for a simple but entertaining game that you can play on your Android device, you should definitely give Melon Playground 2.0 APK a try.

                    -

                    FAQs

                    -

                    Here are some frequently asked questions about Melon Playground 2.0 APK:

                    -

                    Is Melon Playground 2.0 APK safe to download and install?

                    -

                    Yes, Melon Playground 2.0 APK is safe to download and install, as long as you get the APK file from a trusted source. You can use the link provided in this article or search for "melon playground 2 apk" on APKCombo.com or other similar websites. These websites offer safe and fast downloads of APK files for various Android apps and games. However, you should always scan the APK file with an antivirus app before installing it, just to be on the safe side.

                    -

                    Is Melon Playground 2.0 APK available on Google Play Store?

                    -

                    No, Melon Playground 2.0 APK is not available on Google Play Store, as it is a modified version of the original Melon Sandbox game by playducky.com. The original game is available on Google Play Store, but it has fewer items and features than the modified version. If you want to enjoy the full experience of Melon Playground 2.0 APK, you need to download and install the APK file from a trusted source.

                    -

                    What are the differences between Melon Playground 2.0 APK and Melon Sandbox?

                    -

                    Melon Playground 2.0 APK is a modified version of the original Melon Sandbox game by playducky.com. The modified version has more items and features than the original version, such as:

                    -
                      -
                    • More categories of items, such as melee weapons, guns, barrels, explosives, vehicles, animals, and more
                    • -
                    • More realistic physics and ragdoll effects
                    • -
                    • A challenge mode where you can complete various tasks and earn coins to unlock more items
                    • -
                    • A user-friendly interface and intuitive controls
                    • -
                    • A better graphics and sound quality
                    • -
                    -

                    The original version of Melon Sandbox is still a fun and creative sandbox game, but it has fewer items and features than the modified version.

                    -

                    How can I get more coins in Melon Playground 2.0 APK?

                    -

                    You can get more coins in Melon Playground 2.0 APK by completing various tasks in challenge mode. The tasks are simple but fun, such as hitting a target with a gun, blowing up a barrel with an explosive, or driving a car through an obstacle course. You will get coins for completing each task successfully. You can use these coins to buy more items from the shop.

                    -

                    Can I share my creations with other players in Melon Playground 2.0 APK?

                    -

                    Unfortunately, no. Melon Playground 2.0 APK does not have a feature that allows you to share your creations with other players online. However, you can still share your creations with your friends or family by taking screenshots or recording videos of your scenarios and sending them via social media or messaging apps.

                    197e85843d
                    -
                    -
                    \ No newline at end of file diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/engine.io-parser/build/cjs/decodePacket.browser.js b/spaces/fffiloni/controlnet-animation-doodle/node_modules/engine.io-parser/build/cjs/decodePacket.browser.js deleted file mode 100644 index fb8b7abefd06909e1dc4eef1d0e2680ddcc2bda8..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/engine.io-parser/build/cjs/decodePacket.browser.js +++ /dev/null @@ -1,51 +0,0 @@ -"use strict"; -Object.defineProperty(exports, "__esModule", { value: true }); -const commons_js_1 = require("./commons.js"); -const base64_arraybuffer_js_1 = require("./contrib/base64-arraybuffer.js"); -const withNativeArrayBuffer = typeof ArrayBuffer === "function"; -const decodePacket = (encodedPacket, binaryType) => { - if (typeof encodedPacket !== "string") { - return { - type: "message", - data: mapBinary(encodedPacket, binaryType) - }; - } - const type = encodedPacket.charAt(0); - if (type === "b") { - return { - type: "message", - data: decodeBase64Packet(encodedPacket.substring(1), binaryType) - }; - } - const packetType = commons_js_1.PACKET_TYPES_REVERSE[type]; - if (!packetType) { - return commons_js_1.ERROR_PACKET; - } - return encodedPacket.length > 1 - ? { - type: commons_js_1.PACKET_TYPES_REVERSE[type], - data: encodedPacket.substring(1) - } - : { - type: commons_js_1.PACKET_TYPES_REVERSE[type] - }; -}; -const decodeBase64Packet = (data, binaryType) => { - if (withNativeArrayBuffer) { - const decoded = (0, base64_arraybuffer_js_1.decode)(data); - return mapBinary(decoded, binaryType); - } - else { - return { base64: true, data }; // fallback for old browsers - } -}; -const mapBinary = (data, binaryType) => { - switch (binaryType) { - case "blob": - return data instanceof ArrayBuffer ? new Blob([data]) : data; - case "arraybuffer": - default: - return data; // assuming the data is already an ArrayBuffer - } -}; -exports.default = decodePacket; diff --git a/spaces/fffiloni/video_frame_interpolation/style.css b/spaces/fffiloni/video_frame_interpolation/style.css deleted file mode 100644 index 0a9af829f6638befb44d31d13d1a3a2a68733b4f..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/video_frame_interpolation/style.css +++ /dev/null @@ -1,3 +0,0 @@ -.h-60 { - height: 35rem; -} \ No newline at end of file diff --git a/spaces/flowers-team/SocialAISchool/gym-minigrid/gym_minigrid/__init__.py b/spaces/flowers-team/SocialAISchool/gym-minigrid/gym_minigrid/__init__.py deleted file mode 100644 index 5c672525c51e14c4f3e5ccf9ee9467480e4d2c65..0000000000000000000000000000000000000000 --- a/spaces/flowers-team/SocialAISchool/gym-minigrid/gym_minigrid/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Import the envs module so that envs register themselves -import gym_minigrid.envs -import gym_minigrid.social_ai_envs - -# Import wrappers so it's accessible when installing with pip -import gym_minigrid.wrappers diff --git a/spaces/flowers-team/SocialAISchool/gym-minigrid/gym_minigrid/backup_envs/diverseexit.py b/spaces/flowers-team/SocialAISchool/gym-minigrid/gym_minigrid/backup_envs/diverseexit.py deleted file mode 100644 index 2634484f8a32e7492d57338ac7535195240c80ae..0000000000000000000000000000000000000000 --- a/spaces/flowers-team/SocialAISchool/gym-minigrid/gym_minigrid/backup_envs/diverseexit.py +++ /dev/null @@ -1,584 +0,0 @@ -import numpy as np - -from gym_minigrid.minigrid import * -from gym_minigrid.register import register - -import time -from collections import deque - -class TeacherPeer(NPC): - """ - A dancing NPC that the agent has to copy - """ - - def __init__(self, color, name, env, npc_type=0, knowledgeable=False, easier=False, idl=False): - super().__init__(color) - self.name = name - self.npc_dir = 1 # NPC initially looks downward - self.npc_type = npc_type - self.env = env - self.knowledgeable = knowledgeable - self.npc_actions = [] - self.dancing_step_idx = 0 - self.actions = MiniGridEnv.Actions - self.add_npc_direction = True - self.available_moves = [self.rotate_left, self.rotate_right, self.go_forward, self.toggle_action] - self.was_introduced_to = False - self.easier = easier - assert not self.easier - self.idl = idl - - self.must_eye_contact = True if (self.npc_type // 3) % 2 == 0 else False - self.wanted_intro_utterances = [ - EasyTeachingGamesGrammar.construct_utterance([2, 2]), - EasyTeachingGamesGrammar.construct_utterance([0, 1]) - ] - self.wanted_intro_utterance = self.wanted_intro_utterances[0] if (self.npc_type // 3) // 2 == 0 else self.wanted_intro_utterances[1] - if self.npc_type % 3 == 0: - # must be far, must not poke - self.must_be_poked = False - self.must_be_close = False - - elif self.npc_type % 3 == 1: - # must be close, must not poke - self.must_be_poked = False - self.must_be_close = True - - elif self.npc_type % 3 == 2: - # must be close, must poke - self.must_be_poked = True - self.must_be_close = True - - else: - raise ValueError("npc tyep {} unknown". format(self.npc_type)) - - # print("Peer type: ", self.npc_type) - # print("Peer conf: ", self.wanted_intro_utterance, self.must_eye_contact, self.must_be_close, self.must_be_poked) - - - if self.must_be_poked and not self.must_be_close: - raise ValueError("Must be poked means it must be close also.") - - self.poked = False - - self.exited = False - self.joint_attention_achieved = False - - def toggle(self, env, pos): - """Method to trigger/toggle an action this object performs""" - self.poked = True - return True - - def is_introduction_state_ok(self): - if (self.must_be_poked and self.introduction_state["poked"]) or ( - not self.must_be_poked and not self.introduction_state["poked"]): - if (self.must_be_close and self.introduction_state["close"]) or ( - not self.must_be_close and not self.introduction_state["close"]): - if (self.must_eye_contact and self.introduction_state["eye_contact"]) or ( - not self.must_eye_contact and not self.introduction_state["eye_contact"] - ): - if self.introduction_state["intro_utterance"] == self.wanted_intro_utterance: - return True - - return False - - def can_overlap(self): - # If the NPC is hidden, agent can overlap on it - return self.env.hidden_npc - - def encode(self, nb_dims=3): - if self.env.hidden_npc: - if nb_dims == 3: - return (1, 0, 0) - elif nb_dims == 4: - return (1, 0, 0, 0) - else: - return super().encode(nb_dims=nb_dims) - - def step(self, agent_utterance): - super().step() - - if self.knowledgeable: - if self.easier: - raise DeprecationWarning() - # wanted_dir = self.compute_wanted_dir(self.env.agent_pos) - # action = self.compute_turn_action(wanted_dir) - # action() - # if not self.was_introduced_to and (agent_utterance in self.wanted_intro_utterances): - # self.was_introduced_to = True - # self.introduction_state = { - # "poked": self.poked, - # "close": self.is_near_agent(), - # "eye_contact": self.is_eye_contact(), - # "correct_intro_utterance": agent_utterance == self.wanted_intro_utterance - # } - # if self.is_introduction_state_ok(): - # utterance = "Go to the {} door \n".format(self.env.target_color) - # return utterance - - else: - wanted_dir = self.compute_wanted_dir(self.env.agent_pos) - action = self.compute_turn_action(wanted_dir) - action() - if not self.was_introduced_to and (agent_utterance in self.wanted_intro_utterances): - self.was_introduced_to = True - self.introduction_state = { - "poked": self.poked, - "close": self.is_near_agent(), - "eye_contact": self.is_eye_contact(), - "intro_utterance": agent_utterance, - } - if not self.is_introduction_state_ok(): - if self.idl: - if self.env.hidden_npc: - return None - else: - return "I don't like that \n" - else: - return None - - if self.is_eye_contact() and self.was_introduced_to: - - if self.is_introduction_state_ok(): - utterance = "Go to the {} door \n".format(self.env.target_color) - if self.env.hidden_npc: - return None - else: - return utterance - else: - # no utterance - return None - - else: - self.env._rand_elem(self.available_moves)() - return None - - - def render(self, img): - c = COLORS[self.color] - - npc_shapes = [] - # Draw eyes - - if self.npc_type % 3 == 0: - npc_shapes.append(point_in_circle(cx=0.70, cy=0.50, r=0.10)) - npc_shapes.append(point_in_circle(cx=0.30, cy=0.50, r=0.10)) - # Draw mouth - npc_shapes.append(point_in_rect(0.20, 0.80, 0.72, 0.81)) - # Draw top hat - npc_shapes.append(point_in_rect(0.30, 0.70, 0.05, 0.28)) - - elif self.npc_type % 3 == 1: - npc_shapes.append(point_in_circle(cx=0.70, cy=0.50, r=0.10)) - npc_shapes.append(point_in_circle(cx=0.30, cy=0.50, r=0.10)) - # Draw mouth - npc_shapes.append(point_in_rect(0.20, 0.80, 0.72, 0.81)) - # Draw bottom hat - npc_shapes.append(point_in_triangle((0.15, 0.28), - (0.85, 0.28), - (0.50, 0.05))) - elif self.npc_type % 3 == 2: - npc_shapes.append(point_in_circle(cx=0.70, cy=0.50, r=0.10)) - npc_shapes.append(point_in_circle(cx=0.30, cy=0.50, r=0.10)) - # Draw mouth - npc_shapes.append(point_in_rect(0.20, 0.80, 0.72, 0.81)) - # Draw bottom hat - npc_shapes.append(point_in_triangle((0.15, 0.28), - (0.85, 0.28), - (0.50, 0.05))) - # Draw top hat - npc_shapes.append(point_in_rect(0.30, 0.70, 0.05, 0.28)) - - - # todo: move this to super function - # todo: super.render should be able to take the npc_shapes and then rotate them - - if hasattr(self, "npc_dir"): - # Pre-rotation to ensure npc_dir = 1 means NPC looks downwards - npc_shapes = [rotate_fn(v, cx=0.5, cy=0.5, theta=-1 * (math.pi / 2)) for v in npc_shapes] - # Rotate npc based on its direction - npc_shapes = [rotate_fn(v, cx=0.5, cy=0.5, theta=(math.pi / 2) * self.npc_dir) for v in npc_shapes] - - # Draw shapes - for v in npc_shapes: - fill_coords(img, v, c) - -# class EasyTeachingGamesSmallGrammar(object): -# -# templates = ["Where is", "Open", "What is"] -# things = ["sesame", "the exit", "the password"] -# -# grammar_action_space = spaces.MultiDiscrete([len(templates), len(things)]) -# -# @classmethod -# def construct_utterance(cls, action): -# if all(np.isnan(action)): -# return "" -# return cls.templates[int(action[0])] + " " + cls.things[int(action[1])] + " " - - -class EasyTeachingGamesGrammar(object): - - templates = ["Where is", "Open", "Which is", "How are"] - things = [ - "sesame", "the exit", "the correct door", "you", "the ceiling", "the window", "the entrance", "the closet", - "the drawer", "the fridge", "the floor", "the lamp", "the trash can", "the chair", "the bed", "the sofa" - ] - - grammar_action_space = spaces.MultiDiscrete([len(templates), len(things)]) - - @classmethod - def construct_utterance(cls, action): - if all(np.isnan(action)): - return "" - return cls.templates[int(action[0])] + " " + cls.things[int(action[1])] + " " - - -class EasyTeachingGamesEnv(MultiModalMiniGridEnv): - """ - Environment in which the agent is instructed to go to a given object - named using an English text string - """ - - def __init__( - self, - size=5, - diminished_reward=True, - step_penalty=False, - knowledgeable=False, - hard_password=False, - max_steps=50, - n_switches=3, - peer_type=None, - no_turn_off=False, - easier=False, - idl=False, - hidden_npc = False, - ): - assert size >= 5 - self.empty_symbol = "NA \n" - self.diminished_reward = diminished_reward - self.step_penalty = step_penalty - self.knowledgeable = knowledgeable - self.hard_password = hard_password - self.n_switches = n_switches - self.peer_type = peer_type - self.no_turn_off = no_turn_off - self.easier = easier - self.idl = idl - self.hidden_npc = hidden_npc - - super().__init__( - grid_size=size, - max_steps=max_steps, - # Set this to True for maximum speed - see_through_walls=True, - actions=MiniGridEnv.Actions, - action_space=spaces.MultiDiscrete([ - len(MiniGridEnv.Actions), - *EasyTeachingGamesGrammar.grammar_action_space.nvec - ]), - add_npc_direction=True - ) - - print({ - "size": size, - "diminished_reward": diminished_reward, - "step_penalty": step_penalty, - }) - - - def _gen_grid(self, width, height): - # Create the grid - self.grid = Grid(width, height, nb_obj_dims=4) - - # Randomly vary the room width and height - width = self._rand_int(5, width+1) - height = self._rand_int(5, height+1) - - self.wall_x = width - 1 - self.wall_y = height - 1 - - # Generate the surrounding walls - self.grid.wall_rect(0, 0, width, height) - - self.door_pos = [] - self.door_front_pos = [] # Remembers positions in front of door to avoid setting wizard here - - self.door_pos.append((self._rand_int(2, width-2), 0)) - self.door_front_pos.append((self.door_pos[-1][0], self.door_pos[-1][1]+1)) - - self.door_pos.append((self._rand_int(2, width-2), height-1)) - self.door_front_pos.append((self.door_pos[-1][0], self.door_pos[-1][1] - 1)) - - self.door_pos.append((0, self._rand_int(2, height-2))) - self.door_front_pos.append((self.door_pos[-1][0] + 1, self.door_pos[-1][1])) - - self.door_pos.append((width-1, self._rand_int(2, height-2))) - self.door_front_pos.append((self.door_pos[-1][0] - 1, self.door_pos[-1][1])) - - # Generate the door colors - self.door_colors = [] - while len(self.door_colors) < len(self.door_pos): - color = self._rand_elem(COLOR_NAMES) - if color in self.door_colors: - continue - self.door_colors.append(color) - - # Place the doors in the grid - for idx, pos in enumerate(self.door_pos): - color = self.door_colors[idx] - self.grid.set(*pos, Door(color)) - - # Select a random target door - self.doorIdx = self._rand_int(0, len(self.door_pos)) - self.target_pos = self.door_pos[self.doorIdx] - self.target_color = self.door_colors[self.doorIdx] - - # Set a randomly coloured Dancer NPC - color = self._rand_elem(COLOR_NAMES) - - if self.peer_type is None: - self.current_peer_type = self._rand_int(0, 12) - else: - self.current_peer_type = self.peer_type - - self.peer = TeacherPeer( - color, - ["Bobby", "Robby", "Toby"][self.current_peer_type % 3], - self, - knowledgeable=self.knowledgeable, - npc_type=self.current_peer_type, - easier=self.easier, - idl=self.idl - ) - - # height -2 so its not in front of the buttons in the way - while True: - peer_pos = np.array((self._rand_int(1, width - 1), self._rand_int(1, height - 2))) - - if ( - # not in front of any door - not tuple(peer_pos) in self.door_front_pos - ) and ( - # no_close npc is not in the middle of the 5x5 env - not (not self.peer.must_be_close and (width == 5 and height == 5) and all(peer_pos == (2, 2))) - ): - break - - self.grid.set(*peer_pos, self.peer) - self.peer.init_pos = peer_pos - self.peer.cur_pos = peer_pos - - # Randomize the agent's start position and orientation - self.place_agent(size=(width, height)) - - # Generate the mission string - self.mission = 'exit the room' - - # Dummy beginning string - self.beginning_string = "This is what you hear. \n" - self.utterance = self.beginning_string - - # utterance appended at the end of each step - self.utterance_history = "" - - # used for rendering - self.conversation = self.utterance - self.outcome_info = None - - - def step(self, action): - p_action = action[0] - utterance_action = action[1:] - - obs, reward, done, info = super().step(p_action) - - if p_action == self.actions.done: - done = True - - peer_utterance = EasyTeachingGamesGrammar.construct_utterance(utterance_action) - peer_reply = self.peer.step(peer_utterance) - - if peer_reply is not None: - self.utterance += "{}: {} \n".format(self.peer.name, peer_reply) - self.conversation += "{}: {} \n".format(self.peer.name, peer_reply) - - if all(self.agent_pos == self.target_pos): - done = True - reward = self._reward() - - elif tuple(self.agent_pos) in self.door_pos: - done = True - - # discount - if self.step_penalty: - reward = reward - 0.01 - - if self.hidden_npc: - # all npc are hidden - assert np.argwhere(obs['image'][:,:,0] == OBJECT_TO_IDX['npc']).size == 0 - assert "{}:".format(self.peer.name) not in self.utterance - - # fill observation with text - self.append_existing_utterance_to_history() - obs = self.add_utterance_to_observation(obs) - self.reset_utterance() - - if done: - if reward > 0: - self.outcome_info = "SUCCESS: agent got {} reward \n".format(np.round(reward, 1)) - else: - self.outcome_info = "FAILURE: agent got {} reward \n".format(reward) - - return obs, reward, done, info - - def _reward(self): - if self.diminished_reward: - return super()._reward() - else: - return 1.0 - - def render(self, *args, **kwargs): - obs = super().render(*args, **kwargs) - self.window.clear_text() # erase previous text - - self.window.set_caption(self.conversation, self.peer.name) - - self.window.ax.set_title("correct door: {}".format(self.target_color), loc="left", fontsize=10) - if self.outcome_info: - color = None - if "SUCCESS" in self.outcome_info: - color = "lime" - elif "FAILURE" in self.outcome_info: - color = "red" - self.window.add_text(*(0.01, 0.85, self.outcome_info), - **{'fontsize':15, 'color':color, 'weight':"bold"}) - - self.window.show_img(obs) # re-draw image to add changes to window - return obs - - -# # must be far, must not poke -# class EasyTeachingGames8x8Env(EasyTeachingGamesEnv): -# def __init__(self): -# super().__init__(size=8, knowledgeable=True, max_steps=50, peer_type=0) -# -# # must be close, must not poke -# class EasyTeachingGamesClose8x8Env(EasyTeachingGamesEnv): -# def __init__(self): -# super().__init__(size=8, knowledgeable=True, max_steps=50, peer_type=1) -# -# # must be close, must poke -# class EasyTeachingGamesPoke8x8Env(EasyTeachingGamesEnv): -# def __init__(self): -# super().__init__(size=8, knowledgeable=True, max_steps=50, peer_type=2) -# -# # 100 multi -# class EasyTeachingGamesMulti8x8Env(EasyTeachingGamesEnv): -# def __init__(self): -# super().__init__(size=8, knowledgeable=True, max_steps=50, peer_type=None) -# -# -# -# # speaking 50 steps -# register( -# id='MiniGrid-EasyTeachingGames-8x8-v0', -# entry_point='gym_minigrid.envs:EasyTeachingGames8x8Env' -# ) -# -# # demonstrating 50 steps -# register( -# id='MiniGrid-EasyTeachingGamesPoke-8x8-v0', -# entry_point='gym_minigrid.envs:EasyTeachingGamesPoke8x8Env' -# ) -# -# # demonstrating 50 steps -# register( -# id='MiniGrid-EasyTeachingGamesClose-8x8-v0', -# entry_point='gym_minigrid.envs:EasyTeachingGamesClose8x8Env' -# ) -# -# # speaking 50 steps -# register( -# id='MiniGrid-EasyTeachingGamesMulti-8x8-v0', -# entry_point='gym_minigrid.envs:EasyTeachingGamesMulti8x8Env' -# ) - -# # must be far, must not poke -# class EasierTeachingGames8x8Env(EasyTeachingGamesEnv): -# def __init__(self): -# super().__init__(size=8, knowledgeable=True, max_steps=50, peer_type=0, easier=True) -# -# # must be close, must not poke -# class EasierTeachingGamesClose8x8Env(EasyTeachingGamesEnv): -# def __init__(self): -# super().__init__(size=8, knowledgeable=True, max_steps=50, peer_type=1, easier=True) -# -# # must be close, must poke -# class EasierTeachingGamesPoke8x8Env(EasyTeachingGamesEnv): -# def __init__(self): -# super().__init__(size=8, knowledgeable=True, max_steps=50, peer_type=2, easier=True) -# -# # 100 multi -# class EasierTeachingGamesMulti8x8Env(EasyTeachingGamesEnv): -# def __init__(self): -# super().__init__(size=8, knowledgeable=True, max_steps=50, peer_type=None, easier=True) -# -# # Multi Many -# class ManyTeachingGamesMulti8x8Env(EasyTeachingGamesEnv): -# def __init__(self): -# super().__init__(size=8, knowledgeable=True, max_steps=50, peer_type=None, easier=False, many=True) -# -# class ManyTeachingGamesMultiIDL8x8Env(EasyTeachingGamesEnv): -# def __init__(self): -# super().__init__(size=8, knowledgeable=True, max_steps=50, peer_type=None, easier=False, many=True, idl=True) - - -# # speaking 50 steps -# register( -# id='MiniGrid-EasierTeachingGames-8x8-v0', -# entry_point='gym_minigrid.envs:EasierTeachingGames8x8Env' -# ) -# -# # demonstrating 50 steps -# register( -# id='MiniGrid-EasierTeachingGamesPoke-8x8-v0', -# entry_point='gym_minigrid.envs:EasierTeachingGamesPoke8x8Env' -# ) -# -# # demonstrating 50 steps -# register( -# id='MiniGrid-EasierTeachingGamesClose-8x8-v0', -# entry_point='gym_minigrid.envs:EasierTeachingGamesClose8x8Env' -# ) -# -# # speaking 50 steps -# register( -# id='MiniGrid-EasierTeachingGamesMulti-8x8-v0', -# entry_point='gym_minigrid.envs:EasierTeachingGamesMulti8x8Env' -# ) -# -# # speaking 50 steps -# register( -# id='MiniGrid-ManyTeachingGamesMulti-8x8-v0', -# entry_point='gym_minigrid.envs:ManyTeachingGamesMulti8x8Env' -# ) -# -# # speaking 50 steps -# register( -# id='MiniGrid-ManyTeachingGamesMultiIDL-8x8-v0', -# entry_point='gym_minigrid.envs:ManyTeachingGamesMultiIDL8x8Env' -# ) - -# Multi Many -class DiverseExit8x8Env(EasyTeachingGamesEnv): - def __init__(self, **kwargs): - super().__init__(size=8, knowledgeable=True, max_steps=50, peer_type=None, easier=False, **kwargs) - -# speaking 50 steps -register( - id='MiniGrid-DiverseExit-8x8-v0', - entry_point='gym_minigrid.envs:DiverseExit8x8Env' -) - diff --git a/spaces/fuckyoudeki/AutoGPT/CODE_OF_CONDUCT.md b/spaces/fuckyoudeki/AutoGPT/CODE_OF_CONDUCT.md deleted file mode 100644 index d2331b4c60b9fb27f06953273355dcf53b8d4321..0000000000000000000000000000000000000000 --- a/spaces/fuckyoudeki/AutoGPT/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,40 +0,0 @@ -# Code of Conduct for auto-gpt - -## 1. Purpose - -The purpose of this Code of Conduct is to provide guidelines for contributors to the auto-gpt project on GitHub. We aim to create a positive and inclusive environment where all participants can contribute and collaborate effectively. By participating in this project, you agree to abide by this Code of Conduct. - -## 2. Scope - -This Code of Conduct applies to all contributors, maintainers, and users of the auto-gpt project. It extends to all project spaces, including but not limited to issues, pull requests, code reviews, comments, and other forms of communication within the project. - -## 3. Our Standards - -We encourage the following behavior: - -* Being respectful and considerate to others -* Actively seeking diverse perspectives -* Providing constructive feedback and assistance -* Demonstrating empathy and understanding - -We discourage the following behavior: - -* Harassment or discrimination of any kind -* Disrespectful, offensive, or inappropriate language or content -* Personal attacks or insults -* Unwarranted criticism or negativity - -## 4. Reporting and Enforcement - -If you witness or experience any violations of this Code of Conduct, please report them to the project maintainers by email or other appropriate means. The maintainers will investigate and take appropriate action, which may include warnings, temporary or permanent bans, or other measures as necessary. - -Maintainers are responsible for ensuring compliance with this Code of Conduct and may take action to address any violations. - -## 5. Acknowledgements - -This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org/version/2/0/code_of_conduct.html). - -## 6. Contact - -If you have any questions or concerns, please contact the project maintainers. - diff --git a/spaces/fuckyoudeki/AutoGPT/autogpt/processing/__init__.py b/spaces/fuckyoudeki/AutoGPT/autogpt/processing/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/giswqs/solara-template/pages/00_home.py b/spaces/giswqs/solara-template/pages/00_home.py deleted file mode 100644 index ac56a53ddca8563b62c422b951ba37e2fcaff5c0..0000000000000000000000000000000000000000 --- a/spaces/giswqs/solara-template/pages/00_home.py +++ /dev/null @@ -1,20 +0,0 @@ -import solara - - -@solara.component -def Page(): - with solara.Column(align="center"): - markdown = """ - ## A Solara Template for Geospatial Applications - - ### Introduction - - **A collection of [Solara](https://github.com/widgetti/solara) web apps for geospatial applications.** - - - Web App: - - GitHub: - - Hugging Face: - - """ - - solara.Markdown(markdown) diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Mortal Kombat Vs Dc Universe PC Game ISO.175 The Ultimate Fighting Experience on Your Computer.md b/spaces/gotiQspiryo/whisper-ui/examples/Mortal Kombat Vs Dc Universe PC Game ISO.175 The Ultimate Fighting Experience on Your Computer.md deleted file mode 100644 index 3f210a27d6c18bc818c7b9ff207e941e20115720..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Mortal Kombat Vs Dc Universe PC Game ISO.175 The Ultimate Fighting Experience on Your Computer.md +++ /dev/null @@ -1,5 +0,0 @@ -
                    -


                    Mortal Kombat: Shaolin Monks is an action-adventure game in the Mortal Kombat series. It was developed and published by Midway for the PlayStation 2 and Xbox and was released September 16th, 2005 in the United States. In October 2004, the president of Midway, David F. Zucker, called the release of Shaolin Monks the "first step toward delivering something that Mortal Kombat fans have been calling for: a new game set in the Mortal Kombat universe every year."

                    -

                    Mortal Kombat Vs Dc Universe PC Game ISO.175


                    DOWNLOAD ★★★ https://urlgoal.com/2uyMkp



                    aaccfb2cb3
                    -
                    -
                    \ No newline at end of file diff --git a/spaces/gradio/HuBERT/examples/criss/download_and_preprocess_tatoeba.sh b/spaces/gradio/HuBERT/examples/criss/download_and_preprocess_tatoeba.sh deleted file mode 100644 index 7ed64f017d5e62695ba73745c840507b994abc0f..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/examples/criss/download_and_preprocess_tatoeba.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -SPM_ENCODE=flores/scripts/spm_encode.py -DATA=data_tmp -SPM_MODEL=criss_checkpoints/sentence.bpe.model -DICT=criss_checkpoints/dict.txt - -if [[ -f flores ]]; then - echo "flores already cloned" -else - git clone https://github.com/facebookresearch/flores -fi -if [[ -f LASER ]]; then - echo "LASER already cloned" -else - git clone https://github.com/facebookresearch/LASER -fi -mkdir -p data_tmp -declare -A lang_tatoeba_map=( ["ar_AR"]="ara" ["de_DE"]="deu" ["es_XX"]="spa" ["et_EE"]="est" ["fi_FI"]="fin" ["fr_XX"]="fra" ["hi_IN"]="hin" ["it_IT"]="ita" ["ja_XX"]="jpn" ["ko_KR"]="kor" ["kk_KZ"]="kaz" ["nl_XX"]="nld" ["ru_RU"]="rus" ["tr_TR"]="tur" ["vi_VN"]="vie" ["zh_CN"]="cmn") -for lang in ar_AR de_DE es_XX et_EE fi_FI fr_XX hi_IN it_IT ja_XX kk_KZ ko_KR nl_XX ru_RU tr_TR vi_VN zh_CN; do - lang_tatoeba=${lang_tatoeba_map[$lang]} - echo $lang_tatoeba - datadir=$DATA/${lang}-en_XX-tatoeba - rm -rf $datadir - mkdir -p $datadir - TEST_PREFIX=LASER/data/tatoeba/v1/tatoeba - python $SPM_ENCODE \ - --model ${SPM_MODEL} \ - --output_format=piece \ - --inputs ${TEST_PREFIX}.${lang_tatoeba}-eng.${lang_tatoeba} ${TEST_PREFIX}.${lang_tatoeba}-eng.eng \ - --outputs $datadir/test.bpe.${lang}-en_XX.${lang} $datadir/test.bpe.${lang}-en_XX.en_XX - - # binarize data - fairseq-preprocess \ - --source-lang ${lang} --target-lang en_XX \ - --testpref $datadir/test.bpe.${lang}-en_XX \ - --destdir $datadir \ - --srcdict ${DICT} \ - --joined-dictionary \ - --workers 4 -done diff --git a/spaces/gradio/longformer/tvm/_ffi/node_generic.py b/spaces/gradio/longformer/tvm/_ffi/node_generic.py deleted file mode 100644 index f0e800eb0be3258ca45725268664336a808af19f..0000000000000000000000000000000000000000 --- a/spaces/gradio/longformer/tvm/_ffi/node_generic.py +++ /dev/null @@ -1,116 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -"""Common implementation of Node generic related logic""" -# pylint: disable=unused-import -from __future__ import absolute_import - -from numbers import Number, Integral -# from .. import _api_internal -from .base import string_types - -# Node base class -_CLASS_NODE_BASE = None - -def _set_class_node_base(cls): - global _CLASS_NODE_BASE - _CLASS_NODE_BASE = cls - - -def _scalar_type_inference(value): - if hasattr(value, 'dtype'): - dtype = str(value.dtype) - elif isinstance(value, bool): - dtype = 'bool' - elif isinstance(value, float): - # We intentionally convert the float to float32 since it's more common in DL. - dtype = 'float32' - elif isinstance(value, int): - # We intentionally convert the python int to int32 since it's more common in DL. - dtype = 'int32' - else: - raise NotImplementedError('Cannot automatically inference the type.' - ' value={}'.format(value)) - return dtype - - -class NodeGeneric(object): - """Base class for all classes that can be converted to node.""" - def asnode(self): - """Convert value to node""" - raise NotImplementedError() - - -def convert_to_node(value): - """Convert a python value to corresponding node type. - - Parameters - ---------- - value : str - The value to be inspected. - - Returns - ------- - node : Node - The corresponding node value. - """ - if isinstance(value, _CLASS_NODE_BASE): - return value - if isinstance(value, bool): - return const(value, 'uint1x1') - if isinstance(value, Number): - return const(value) - if isinstance(value, string_types): - return _api_internal._str(value) - if isinstance(value, (list, tuple)): - value = [convert_to_node(x) for x in value] - return _api_internal._Array(*value) - if isinstance(value, dict): - vlist = [] - for item in value.items(): - if (not isinstance(item[0], _CLASS_NODE_BASE) and - not isinstance(item[0], string_types)): - raise ValueError("key of map must already been a container type") - vlist.append(item[0]) - vlist.append(convert_to_node(item[1])) - return _api_internal._Map(*vlist) - if isinstance(value, NodeGeneric): - return value.asnode() - if value is None: - return None - - raise ValueError("don't know how to convert type %s to node" % type(value)) - - -def const(value, dtype=None): - """Construct a constant value for a given type. - - Parameters - ---------- - value : int or float - The input value - - dtype : str or None, optional - The data type. - - Returns - ------- - expr : Expr - Constant expression corresponds to the value. - """ - if dtype is None: - dtype = _scalar_type_inference(value) - return _api_internal._const(value, dtype) diff --git a/spaces/gradio/stt_or_tts/README.md b/spaces/gradio/stt_or_tts/README.md deleted file mode 100644 index 647fa5e99217cbe0d26a5da899c468cfbc58d6b6..0000000000000000000000000000000000000000 --- a/spaces/gradio/stt_or_tts/README.md +++ /dev/null @@ -1,12 +0,0 @@ - ---- -title: stt_or_tts -emoji: 🔥 -colorFrom: indigo -colorTo: indigo -sdk: gradio -sdk_version: 4.1.2 -app_file: run.py -pinned: false -hf_oauth: true ---- diff --git a/spaces/gulabpatel/GFP_GAN/gfpgan/weights/README.md b/spaces/gulabpatel/GFP_GAN/gfpgan/weights/README.md deleted file mode 100644 index 4d7b7e642591ef88575d9e6c360a4d29e0cc1a4f..0000000000000000000000000000000000000000 --- a/spaces/gulabpatel/GFP_GAN/gfpgan/weights/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Weights - -Put the downloaded weights to this folder. diff --git a/spaces/gwang-kim/DATID-3D/pose_estimation/nvdiffrast/build/lib/nvdiffrast/tensorflow/plugin_loader.py b/spaces/gwang-kim/DATID-3D/pose_estimation/nvdiffrast/build/lib/nvdiffrast/tensorflow/plugin_loader.py deleted file mode 100644 index d428c55de2194e42be331b1cad1b2162709a4cd4..0000000000000000000000000000000000000000 --- a/spaces/gwang-kim/DATID-3D/pose_estimation/nvdiffrast/build/lib/nvdiffrast/tensorflow/plugin_loader.py +++ /dev/null @@ -1,207 +0,0 @@ -# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -import glob -import os -import re -import uuid -import hashlib -import tempfile -import shutil -import tensorflow as tf -from tensorflow.python.client import device_lib # pylint: disable=no-name-in-module - -#---------------------------------------------------------------------------- -# Global options. - -_nvdiffrast_cache_dir = None - -def set_cache_dir(path: str) -> None: - '''Set CUDA kernel compilation temp dir. - - If `set_cache_dir` is not called, the cache directory will default to - one of the below: - - - Value of NVDIFFRAST_CACHE_DIR env var, if set - - $HOME/.cache/nvdiffrast if HOME env var is set - - $USERPROFILE/.cache/nvdiffrast if USERPROFILE is set. - - Args: - path: Where to save CUDA kernel build temporaries - ''' - global _nvdiffrast_cache_dir - _nvdiffrast_cache_dir = path - -def make_cache_dir_path(*paths: str) -> str: - if _nvdiffrast_cache_dir is not None: - return os.path.join(_nvdiffrast_cache_dir, *paths) - if 'NVDIFFRAST_CACHE_DIR' in os.environ: - return os.path.join(os.environ['NVDIFFRAST_CACHE_DIR'], *paths) - if 'HOME' in os.environ: - return os.path.join(os.environ['HOME'], '.cache', 'nvdiffrast', *paths) - if 'USERPROFILE' in os.environ: - return os.path.join(os.environ['USERPROFILE'], '.cache', 'nvdiffrast', *paths) - return os.path.join(tempfile.gettempdir(), '.cache', 'nvdiffrast', *paths) - -cuda_cache_version_tag = 'v1' -do_not_hash_included_headers = False # Speed up compilation by assuming that headers included by the CUDA code never change. Unsafe! -verbose = True # Print status messages to stdout. - -#---------------------------------------------------------------------------- -# Internal helper funcs. - -def _find_compiler_bindir(): - hostx64_paths = sorted(glob.glob('C:/Program Files (x86)/Microsoft Visual Studio/*/Enterprise/VC/Tools/MSVC/*/bin/Hostx64/x64'), reverse=True) - if hostx64_paths != []: - return hostx64_paths[0] - hostx64_paths = sorted(glob.glob('C:/Program Files (x86)/Microsoft Visual Studio/*/Professional/VC/Tools/MSVC/*/bin/Hostx64/x64'), reverse=True) - if hostx64_paths != []: - return hostx64_paths[0] - hostx64_paths = sorted(glob.glob('C:/Program Files (x86)/Microsoft Visual Studio/*/BuildTools/VC/Tools/MSVC/*/bin/Hostx64/x64'), reverse=True) - if hostx64_paths != []: - return hostx64_paths[0] - hostx64_paths = sorted(glob.glob('C:/Program Files (x86)/Microsoft Visual Studio/*/Community/VC/Tools/MSVC/*/bin/Hostx64/x64'), reverse=True) - if hostx64_paths != []: - return hostx64_paths[0] - vc_bin_dir = 'C:/Program Files (x86)/Microsoft Visual Studio 14.0/vc/bin' - if os.path.isdir(vc_bin_dir): - return vc_bin_dir - return None - -def _get_compute_cap(device): - caps_str = device.physical_device_desc - m = re.search('compute capability: (\\d+).(\\d+)', caps_str) - major = m.group(1) - minor = m.group(2) - return (major, minor) - -def _get_cuda_gpu_arch_string(): - gpus = [x for x in device_lib.list_local_devices() if x.device_type == 'GPU'] - if len(gpus) == 0: - raise RuntimeError('No GPU devices found') - (major, minor) = _get_compute_cap(gpus[0]) - return 'sm_%s%s' % (major, minor) - -def _run_cmd(cmd): - with os.popen(cmd) as pipe: - output = pipe.read() - status = pipe.close() - if status is not None: - raise RuntimeError('NVCC returned an error. See below for full command line and output log:\n\n%s\n\n%s' % (cmd, output)) - -def _prepare_nvcc_cli(opts): - cmd = 'nvcc ' + opts.strip() - cmd += ' --disable-warnings' - cmd += ' --include-path "%s"' % tf.sysconfig.get_include() - cmd += ' --include-path "%s"' % os.path.join(tf.sysconfig.get_include(), 'external', 'protobuf_archive', 'src') - cmd += ' --include-path "%s"' % os.path.join(tf.sysconfig.get_include(), 'external', 'com_google_absl') - cmd += ' --include-path "%s"' % os.path.join(tf.sysconfig.get_include(), 'external', 'eigen_archive') - - compiler_bindir = _find_compiler_bindir() - if compiler_bindir is None: - # Require that _find_compiler_bindir succeeds on Windows. Allow - # nvcc to use whatever is the default on Linux. - if os.name == 'nt': - raise RuntimeError('Could not find MSVC/GCC/CLANG installation on this computer. Check compiler_bindir_search_path list in "%s".' % __file__) - else: - cmd += ' --compiler-bindir "%s"' % compiler_bindir - cmd += ' 2>&1' - return cmd - -#---------------------------------------------------------------------------- -# Main entry point. - -_plugin_cache = dict() - -def get_plugin(cuda_file, extra_nvcc_options=[]): - cuda_file_base = os.path.basename(cuda_file) - cuda_file_name, cuda_file_ext = os.path.splitext(cuda_file_base) - - # Already in cache? - if cuda_file in _plugin_cache: - return _plugin_cache[cuda_file] - - # Setup plugin. - if verbose: - print('Setting up TensorFlow plugin "%s": ' % cuda_file_base, end='', flush=True) - try: - # Hash CUDA source. - md5 = hashlib.md5() - with open(cuda_file, 'rb') as f: - md5.update(f.read()) - md5.update(b'\n') - - # Hash headers included by the CUDA code by running it through the preprocessor. - if not do_not_hash_included_headers: - if verbose: - print('Preprocessing... ', end='', flush=True) - with tempfile.TemporaryDirectory() as tmp_dir: - tmp_file = os.path.join(tmp_dir, cuda_file_name + '_tmp' + cuda_file_ext) - _run_cmd(_prepare_nvcc_cli('"%s" --preprocess -o "%s" --keep --keep-dir "%s"' % (cuda_file, tmp_file, tmp_dir))) - with open(tmp_file, 'rb') as f: - bad_file_str = ('"' + cuda_file.replace('\\', '/') + '"').encode('utf-8') # __FILE__ in error check macros - good_file_str = ('"' + cuda_file_base + '"').encode('utf-8') - for ln in f: - if not ln.startswith(b'# ') and not ln.startswith(b'#line '): # ignore line number pragmas - ln = ln.replace(bad_file_str, good_file_str) - md5.update(ln) - md5.update(b'\n') - - # Select compiler options. - compile_opts = '' - if os.name == 'nt': - compile_opts += '"%s"' % os.path.join(tf.sysconfig.get_lib(), 'python', '_pywrap_tensorflow_internal.lib') - compile_opts += ' --library-path="%s"' % (os.path.dirname(__file__) + r"\..\lib") # Find libraries during compilation. - elif os.name == 'posix': - compile_opts += '"%s"' % os.path.join(tf.sysconfig.get_lib(), 'python', '_pywrap_tensorflow_internal.so') - compile_opts += ' --compiler-options \'-fPIC -D_GLIBCXX_USE_CXX11_ABI=0\'' - else: - assert False # not Windows or Linux, w00t? - compile_opts += ' --gpu-architecture=%s' % _get_cuda_gpu_arch_string() - compile_opts += ' --use_fast_math' - for opt in extra_nvcc_options: - compile_opts += ' ' + opt - nvcc_cmd = _prepare_nvcc_cli(compile_opts) - - # Hash build configuration. - md5.update(('nvcc_cmd: ' + nvcc_cmd).encode('utf-8') + b'\n') - md5.update(('tf.VERSION: ' + tf.VERSION).encode('utf-8') + b'\n') - md5.update(('cuda_cache_version_tag: ' + cuda_cache_version_tag).encode('utf-8') + b'\n') - - # Compile if not already compiled. - bin_file_ext = '.dll' if os.name == 'nt' else '.so' - cuda_cache_path = make_cache_dir_path() - bin_file = os.path.join(make_cache_dir_path(), cuda_file_name + '_' + md5.hexdigest() + bin_file_ext) - if not os.path.isfile(bin_file): - if verbose: - print('Compiling... ', end='', flush=True) - with tempfile.TemporaryDirectory() as tmp_dir: - tmp_file = os.path.join(tmp_dir, cuda_file_name + '_tmp' + bin_file_ext) - _run_cmd(nvcc_cmd + ' "%s" --shared -o "%s" --keep --keep-dir "%s"' % (cuda_file, tmp_file, tmp_dir)) - os.makedirs(cuda_cache_path, exist_ok=True) - intermediate_file = os.path.join(cuda_cache_path, cuda_file_name + '_' + uuid.uuid4().hex + '_tmp' + bin_file_ext) - shutil.copyfile(tmp_file, intermediate_file) - os.rename(intermediate_file, bin_file) # atomic - - # Load. - if verbose: - print('Loading... ', end='', flush=True) - plugin = tf.load_op_library(bin_file) - - # Add to cache. - _plugin_cache[cuda_file] = plugin - if verbose: - print('Done.', flush=True) - return plugin - - except: - if verbose: - print('Failed!', flush=True) - raise - -#---------------------------------------------------------------------------- diff --git a/spaces/gwang-kim/DATID-3D/pose_estimation/nvdiffrast/build/lib/nvdiffrast/torch/torch_texture.cpp b/spaces/gwang-kim/DATID-3D/pose_estimation/nvdiffrast/build/lib/nvdiffrast/torch/torch_texture.cpp deleted file mode 100644 index 2257f566623495c7044ea3f532ef00e327477dc7..0000000000000000000000000000000000000000 --- a/spaces/gwang-kim/DATID-3D/pose_estimation/nvdiffrast/build/lib/nvdiffrast/torch/torch_texture.cpp +++ /dev/null @@ -1,718 +0,0 @@ -// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. -// -// NVIDIA CORPORATION and its licensors retain all intellectual property -// and proprietary rights in and to this software, related documentation -// and any modifications thereto. Any use, reproduction, disclosure or -// distribution of this software and related documentation without an express -// license agreement from NVIDIA CORPORATION is strictly prohibited. - -#include "torch_common.inl" -#include "torch_types.h" -#include "../common/common.h" -#include "../common/texture.h" -#include - -//------------------------------------------------------------------------ -// Kernel prototypes. - -void MipBuildKernel1 (const TextureKernelParams p); -void MipBuildKernel2 (const TextureKernelParams p); -void MipBuildKernel4 (const TextureKernelParams p); -void TextureFwdKernelNearest1 (const TextureKernelParams p); -void TextureFwdKernelNearest2 (const TextureKernelParams p); -void TextureFwdKernelNearest4 (const TextureKernelParams p); -void TextureFwdKernelLinear1 (const TextureKernelParams p); -void TextureFwdKernelLinear2 (const TextureKernelParams p); -void TextureFwdKernelLinear4 (const TextureKernelParams p); -void TextureFwdKernelLinearMipmapNearest1 (const TextureKernelParams p); -void TextureFwdKernelLinearMipmapNearest2 (const TextureKernelParams p); -void TextureFwdKernelLinearMipmapNearest4 (const TextureKernelParams p); -void TextureFwdKernelLinearMipmapLinear1 (const TextureKernelParams p); -void TextureFwdKernelLinearMipmapLinear2 (const TextureKernelParams p); -void TextureFwdKernelLinearMipmapLinear4 (const TextureKernelParams p); -void TextureFwdKernelCubeNearest1 (const TextureKernelParams p); -void TextureFwdKernelCubeNearest2 (const TextureKernelParams p); -void TextureFwdKernelCubeNearest4 (const TextureKernelParams p); -void TextureFwdKernelCubeLinear1 (const TextureKernelParams p); -void TextureFwdKernelCubeLinear2 (const TextureKernelParams p); -void TextureFwdKernelCubeLinear4 (const TextureKernelParams p); -void TextureFwdKernelCubeLinearMipmapNearest1 (const TextureKernelParams p); -void TextureFwdKernelCubeLinearMipmapNearest2 (const TextureKernelParams p); -void TextureFwdKernelCubeLinearMipmapNearest4 (const TextureKernelParams p); -void TextureFwdKernelCubeLinearMipmapLinear1 (const TextureKernelParams p); -void TextureFwdKernelCubeLinearMipmapLinear2 (const TextureKernelParams p); -void TextureFwdKernelCubeLinearMipmapLinear4 (const TextureKernelParams p); -void TextureFwdKernelLinearMipmapNearestBO1 (const TextureKernelParams p); -void TextureFwdKernelLinearMipmapNearestBO2 (const TextureKernelParams p); -void TextureFwdKernelLinearMipmapNearestBO4 (const TextureKernelParams p); -void TextureFwdKernelLinearMipmapLinearBO1 (const TextureKernelParams p); -void TextureFwdKernelLinearMipmapLinearBO2 (const TextureKernelParams p); -void TextureFwdKernelLinearMipmapLinearBO4 (const TextureKernelParams p); -void TextureFwdKernelCubeLinearMipmapNearestBO1 (const TextureKernelParams p); -void TextureFwdKernelCubeLinearMipmapNearestBO2 (const TextureKernelParams p); -void TextureFwdKernelCubeLinearMipmapNearestBO4 (const TextureKernelParams p); -void TextureFwdKernelCubeLinearMipmapLinearBO1 (const TextureKernelParams p); -void TextureFwdKernelCubeLinearMipmapLinearBO2 (const TextureKernelParams p); -void TextureFwdKernelCubeLinearMipmapLinearBO4 (const TextureKernelParams p); -void MipGradKernel1 (const TextureKernelParams p); -void MipGradKernel2 (const TextureKernelParams p); -void MipGradKernel4 (const TextureKernelParams p); -void TextureGradKernelNearest (const TextureKernelParams p); -void TextureGradKernelLinear (const TextureKernelParams p); -void TextureGradKernelLinearMipmapNearest (const TextureKernelParams p); -void TextureGradKernelLinearMipmapLinear (const TextureKernelParams p); -void TextureGradKernelCubeNearest (const TextureKernelParams p); -void TextureGradKernelCubeLinear (const TextureKernelParams p); -void TextureGradKernelCubeLinearMipmapNearest (const TextureKernelParams p); -void TextureGradKernelCubeLinearMipmapLinear (const TextureKernelParams p); -void TextureGradKernelLinearMipmapNearestBO (const TextureKernelParams p); -void TextureGradKernelLinearMipmapLinearBO (const TextureKernelParams p); -void TextureGradKernelCubeLinearMipmapNearestBO (const TextureKernelParams p); -void TextureGradKernelCubeLinearMipmapLinearBO (const TextureKernelParams p); - -//------------------------------------------------------------------------ -// Modeselektor. - -static void set_modes(TextureKernelParams& p, int filter_mode, int boundary_mode, int max_mip_level) -{ - // Mip and filter modes. - p.filterMode = filter_mode; - NVDR_CHECK(p.filterMode >= 0 && p.filterMode < TEX_MODE_COUNT, "filter_mode unsupported"); - p.enableMip = (p.filterMode == TEX_MODE_LINEAR_MIPMAP_NEAREST || p.filterMode == TEX_MODE_LINEAR_MIPMAP_LINEAR); - - // Mip level clamp. - if (p.enableMip) - { - p.mipLevelLimit = max_mip_level; - NVDR_CHECK(p.mipLevelLimit >= -1, "invalid max_mip_level"); - } - - // Boundary mode. - p.boundaryMode = boundary_mode; - NVDR_CHECK(p.boundaryMode >= 0 && p.boundaryMode < TEX_BOUNDARY_MODE_COUNT, "boundary_mode unsupported"); -} - -//------------------------------------------------------------------------ -// Mipmap construction. - -TextureMipWrapper texture_construct_mip(torch::Tensor tex, int max_mip_level, bool cube_mode) -{ - const at::cuda::OptionalCUDAGuard device_guard(device_of(tex)); - cudaStream_t stream = at::cuda::getCurrentCUDAStream(); - TextureKernelParams p = {}; // Initialize all fields to zero. - p.mipLevelLimit = max_mip_level; - p.boundaryMode = cube_mode ? TEX_BOUNDARY_MODE_CUBE : TEX_BOUNDARY_MODE_WRAP; - NVDR_CHECK(p.mipLevelLimit >= -1, "invalid max_mip_level"); - - // Check inputs. - NVDR_CHECK_DEVICE(tex); - NVDR_CHECK_CONTIGUOUS(tex); - NVDR_CHECK_F32(tex); - - // Populate parameters and sanity check tex shape. - if (!cube_mode) - { - NVDR_CHECK(tex.sizes().size() == 4 && tex.size(0) > 0 && tex.size(1) > 0 && tex.size(2) > 0 && tex.size(3) > 0, "tex must have shape[>0, >0, >0, >0]"); - } - else - { - NVDR_CHECK(tex.sizes().size() == 5 && tex.size(0) > 0 && tex.size(1) == 6 && tex.size(2) > 0 && tex.size(3) > 0 && tex.size(4) > 0, "tex must have shape[>0, 6, >0, >0, >0] in cube map mode"); - NVDR_CHECK(tex.size(2) == tex.size(3), "texture shape must be square in cube map mode"); - } - p.texDepth = tex.size(0); - p.texHeight = tex.size(cube_mode ? 2 : 1); - p.texWidth = tex.size(cube_mode ? 3 : 2); - p.channels = tex.size(cube_mode ? 4 : 3); - - // Set texture pointer. - p.tex[0] = tex.data_ptr(); - - // Generate mip offsets and calculate total size. - int mipOffsets[TEX_MAX_MIP_LEVEL]; - int mipTotal = calculateMipInfo(NVDR_CTX_PARAMS, p, mipOffsets); - - // Allocate and set mip tensor. - torch::TensorOptions opts = torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA); - torch::Tensor mip = torch::empty({mipTotal}, opts); - float* pmip = mip.data_ptr(); - for (int i=1; i <= p.mipLevelMax; i++) - p.tex[i] = pmip + mipOffsets[i]; // Pointers to mip levels. - - // Choose kernel variants based on channel count. - void* args[] = {&p}; - int channel_div_idx = 0; - if (!(p.channels & 3)) - channel_div_idx = 2; // Channel count divisible by 4. - else if (!(p.channels & 1)) - channel_div_idx = 1; // Channel count divisible by 2. - - // Build mip levels. - for (int i=1; i <= p.mipLevelMax; i++) - { - int2 ms = mipLevelSize(p, i); - int3 sz = make_int3(ms.x, ms.y, p.texDepth); - dim3 blockSize = getLaunchBlockSize(TEX_FWD_MAX_MIP_KERNEL_BLOCK_WIDTH, TEX_FWD_MAX_MIP_KERNEL_BLOCK_HEIGHT, sz.x, sz.y); - dim3 gridSize = getLaunchGridSize(blockSize, sz.x, sz.y, sz.z * (cube_mode ? 6 : 1)); - p.mipLevelOut = i; - - void* build_func_tbl[3] = { (void*)MipBuildKernel1, (void*)MipBuildKernel2, (void*)MipBuildKernel4 }; - NVDR_CHECK_CUDA_ERROR(cudaLaunchKernel(build_func_tbl[channel_div_idx], gridSize, blockSize, args, 0, stream)); - } - - // Return the mip tensor in a wrapper. - TextureMipWrapper mip_wrapper; - mip_wrapper.mip = mip; - mip_wrapper.max_mip_level = max_mip_level; - mip_wrapper.texture_size = tex.sizes().vec(); - mip_wrapper.cube_mode = cube_mode; - return mip_wrapper; -} - -//------------------------------------------------------------------------ -// Forward op. - -torch::Tensor texture_fwd_mip(torch::Tensor tex, torch::Tensor uv, torch::Tensor uv_da, torch::Tensor mip_level_bias, TextureMipWrapper mip_wrapper, std::vector mip_stack, int filter_mode, int boundary_mode) -{ - const at::cuda::OptionalCUDAGuard device_guard(device_of(tex)); - cudaStream_t stream = at::cuda::getCurrentCUDAStream(); - TextureKernelParams p = {}; // Initialize all fields to zero. - bool has_mip_stack = (mip_stack.size() > 0); - torch::Tensor& mip_w = mip_wrapper.mip; // Unwrap. - int max_mip_level = has_mip_stack ? mip_stack.size() : mip_wrapper.max_mip_level; - set_modes(p, filter_mode, boundary_mode, max_mip_level); - - // See if we have these tensors or not. - bool has_uv_da = uv_da.defined() && uv_da.nbytes(); - bool has_mip_level_bias = mip_level_bias.defined() && mip_level_bias.nbytes(); - - if (p.enableMip) - { - NVDR_CHECK(has_uv_da || has_mip_level_bias, "mipmapping filter mode requires uv_da and/or mip_level_bias input"); - NVDR_CHECK(has_mip_stack || mip_w.defined(), "mipmapping filter mode requires mip wrapper or mip stack input"); - } - - // Check inputs. - NVDR_CHECK_DEVICE(tex, uv); - NVDR_CHECK_CONTIGUOUS(tex, uv); - NVDR_CHECK_F32(tex, uv); - if (p.enableMip) - { - if (has_mip_stack) - { - TORCH_CHECK(at::cuda::check_device(mip_stack), __func__, "(): Mip stack inputs must reside on the correct GPU device"); - nvdr_check_contiguous(mip_stack, __func__, "(): Mip stack inputs must be contiguous tensors"); - nvdr_check_f32(mip_stack, __func__, "(): Mip stack inputs must be float32 tensors"); - } - else - { - NVDR_CHECK_DEVICE(mip_w); - NVDR_CHECK_CONTIGUOUS(mip_w); - NVDR_CHECK_F32(mip_w); - } - if (has_uv_da) - { - NVDR_CHECK_DEVICE(uv_da); - NVDR_CHECK_CONTIGUOUS(uv_da); - NVDR_CHECK_F32(uv_da); - } - if (has_mip_level_bias) - { - NVDR_CHECK_DEVICE(mip_level_bias); - NVDR_CHECK_CONTIGUOUS(mip_level_bias); - NVDR_CHECK_F32(mip_level_bias); - } - } - - // Sanity checks and state setters. - bool cube_mode = (boundary_mode == TEX_BOUNDARY_MODE_CUBE); - if (!cube_mode) - { - NVDR_CHECK(tex.sizes().size() == 4 && tex.size(0) > 0 && tex.size(1) > 0 && tex.size(2) > 0 && tex.size(3) > 0, "tex must have shape[>0, >0, >0, >0]"); - NVDR_CHECK(uv.sizes().size() == 4 && uv.size(0) > 0 && uv.size(1) > 0 && uv.size(2) > 0 && uv.size(3) == 2, "uv must have shape [>0, >0, >0, 2]"); - p.texHeight = tex.size(1); - p.texWidth = tex.size(2); - p.channels = tex.size(3); - } - else - { - NVDR_CHECK(tex.sizes().size() == 5 && tex.size(0) > 0 && tex.size(1) == 6 && tex.size(2) > 0 && tex.size(3) > 0 && tex.size(4) > 0, "tex must have shape[>0, 6, >0, >0, >0] in cube map mode"); - NVDR_CHECK(uv.sizes().size() == 4 && uv.size(0) > 0 && uv.size(1) > 0 && uv.size(2) > 0 && uv.size(3) == 3, "uv must have shape [>0, >0, >0, 3] in cube map mode"); - NVDR_CHECK(tex.size(2) == tex.size(3), "texture shape must be square in cube map mode"); - p.texHeight = tex.size(2); - p.texWidth = tex.size(3); - p.channels = tex.size(4); - } - NVDR_CHECK(tex.size(0) == 1 || tex.size(0) == uv.size(0), "minibatch size mismatch between inputs tex, uv"); - NVDR_CHECK(p.texWidth <= (1 << TEX_MAX_MIP_LEVEL) && p.texHeight <= (1 << TEX_MAX_MIP_LEVEL), "texture size too large"); - p.n = uv.size(0); - p.imgHeight = uv.size(1); - p.imgWidth = uv.size(2); - p.texDepth = tex.size(0); - if (p.enableMip) - { - if (has_uv_da) - { - if (!cube_mode) - NVDR_CHECK(uv_da.sizes().size() == 4 && uv_da.size(0) == p.n && uv_da.size(1) == p.imgHeight && uv_da.size(2) == p.imgWidth && uv_da.size(3) == 4, "uv_da must have shape [minibatch_size, height, width, 4]"); - else - NVDR_CHECK(uv_da.sizes().size() == 4 && uv_da.size(0) == p.n && uv_da.size(1) == p.imgHeight && uv_da.size(2) == p.imgWidth && uv_da.size(3) == 6, "uv_da must have shape [minibatch_size, height, width, 6] in cube map mode"); - } - if (has_mip_level_bias) - NVDR_CHECK(mip_level_bias.sizes().size() == 3 && mip_level_bias.size(0) == p.n && mip_level_bias.size(1) == p.imgHeight && mip_level_bias.size(2) == p.imgWidth, "mip_level_bias must have shape [minibatch_size, height, width]"); - } - - // Get input pointers. - p.tex[0] = tex.data_ptr(); - p.uv = uv.data_ptr(); - p.uvDA = (p.enableMip && has_uv_da) ? uv_da.data_ptr() : NULL; - p.mipLevelBias = (p.enableMip && has_mip_level_bias) ? mip_level_bias.data_ptr() : NULL; - - // Allocate output tensor. - torch::TensorOptions opts = torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA); - torch::Tensor out = torch::empty({p.n, p.imgHeight, p.imgWidth, p.channels}, opts); - p.out = out.data_ptr(); - - // Choose kernel variants based on channel count. - void* args[] = {&p}; - int channel_div_idx = 0; - if (!(p.channels & 3)) - channel_div_idx = 2; // Channel count divisible by 4. - else if (!(p.channels & 1)) - channel_div_idx = 1; // Channel count divisible by 2. - - // Mip-related setup. - float* pmip = 0; - if (p.enableMip) - { - if (has_mip_stack) - { - // Custom mip stack supplied. Check that sizes match and assign. - p.mipLevelMax = max_mip_level; - for (int i=1; i <= p.mipLevelMax; i++) - { - torch::Tensor& t = mip_stack[i-1]; - int2 sz = mipLevelSize(p, i); - if (!cube_mode) - NVDR_CHECK(t.sizes().size() == 4 && t.size(0) == tex.size(0) && t.size(1) == sz.y && t.size(2) == sz.x && t.size(3) == p.channels, "mip level size mismatch in custom mip stack"); - else - NVDR_CHECK(t.sizes().size() == 5 && t.size(0) == tex.size(0) && t.size(1) == 6 && t.size(2) == sz.y && t.size(3) == sz.x && t.size(4) == p.channels, "mip level size mismatch in mip stack"); - if (sz.x == 1 && sz.y == 1) - NVDR_CHECK(i == p.mipLevelMax, "mip level size mismatch in mip stack"); - p.tex[i] = t.data_ptr(); - } - } - else - { - // Generate mip offsets, check mipmap size, and set mip data pointer. - int mipOffsets[TEX_MAX_MIP_LEVEL]; - int mipTotal = calculateMipInfo(NVDR_CTX_PARAMS, p, mipOffsets); - NVDR_CHECK(tex.sizes() == mip_wrapper.texture_size && cube_mode == mip_wrapper.cube_mode, "mip does not match texture size"); - NVDR_CHECK(mip_w.sizes().size() == 1 && mip_w.size(0) == mipTotal, "wrapped mip tensor size mismatch"); - pmip = mip_w.data_ptr(); - for (int i=1; i <= p.mipLevelMax; i++) - p.tex[i] = pmip + mipOffsets[i]; // Pointers to mip levels. - } - } - - // Verify that buffers are aligned to allow float2/float4 operations. Unused pointers are zero so always aligned. - if (!cube_mode) - NVDR_CHECK(!((uintptr_t)p.uv & 7), "uv input tensor not aligned to float2"); - if ((p.channels & 3) == 0) - { - for (int i=0; i <= p.mipLevelMax; i++) - NVDR_CHECK(!((uintptr_t)p.tex[i] & 15), "tex or mip input tensor not aligned to float4"); - NVDR_CHECK(!((uintptr_t)p.out & 15), "out output tensor not aligned to float4"); - NVDR_CHECK(!((uintptr_t)pmip & 15), "mip input tensor not aligned to float4"); - } - if ((p.channels & 1) == 0) - { - for (int i=0; i <= p.mipLevelMax; i++) - NVDR_CHECK(!((uintptr_t)p.tex[i] & 7), "tex or mip input tensor not aligned to float2"); - NVDR_CHECK(!((uintptr_t)p.out & 7), "out output tensor not aligned to float2"); - NVDR_CHECK(!((uintptr_t)pmip & 7), "mip input tensor not aligned to float2"); - } - if (!cube_mode) - NVDR_CHECK(!((uintptr_t)p.uvDA & 15), "uv_da input tensor not aligned to float4"); - else - NVDR_CHECK(!((uintptr_t)p.uvDA & 7), "uv_da input tensor not aligned to float2"); - - // Choose launch parameters for texture lookup kernel. - dim3 blockSize = getLaunchBlockSize(TEX_FWD_MAX_KERNEL_BLOCK_WIDTH, TEX_FWD_MAX_KERNEL_BLOCK_HEIGHT, p.imgWidth, p.imgHeight); - dim3 gridSize = getLaunchGridSize(blockSize, p.imgWidth, p.imgHeight, p.n); - - // Choose kernel based on filter mode, cube mode, bias-only mode, and datatype. - void* func_tbl[TEX_MODE_COUNT * 2 * 2 * 3] = { - (void*)TextureFwdKernelNearest1, - (void*)TextureFwdKernelNearest2, - (void*)TextureFwdKernelNearest4, - (void*)TextureFwdKernelLinear1, - (void*)TextureFwdKernelLinear2, - (void*)TextureFwdKernelLinear4, - (void*)TextureFwdKernelLinearMipmapNearest1, - (void*)TextureFwdKernelLinearMipmapNearest2, - (void*)TextureFwdKernelLinearMipmapNearest4, - (void*)TextureFwdKernelLinearMipmapLinear1, - (void*)TextureFwdKernelLinearMipmapLinear2, - (void*)TextureFwdKernelLinearMipmapLinear4, - (void*)TextureFwdKernelCubeNearest1, - (void*)TextureFwdKernelCubeNearest2, - (void*)TextureFwdKernelCubeNearest4, - (void*)TextureFwdKernelCubeLinear1, - (void*)TextureFwdKernelCubeLinear2, - (void*)TextureFwdKernelCubeLinear4, - (void*)TextureFwdKernelCubeLinearMipmapNearest1, - (void*)TextureFwdKernelCubeLinearMipmapNearest2, - (void*)TextureFwdKernelCubeLinearMipmapNearest4, - (void*)TextureFwdKernelCubeLinearMipmapLinear1, - (void*)TextureFwdKernelCubeLinearMipmapLinear2, - (void*)TextureFwdKernelCubeLinearMipmapLinear4, - NULL, - NULL, - NULL, - NULL, - NULL, - NULL, - (void*)TextureFwdKernelLinearMipmapNearestBO1, - (void*)TextureFwdKernelLinearMipmapNearestBO2, - (void*)TextureFwdKernelLinearMipmapNearestBO4, - (void*)TextureFwdKernelLinearMipmapLinearBO1, - (void*)TextureFwdKernelLinearMipmapLinearBO2, - (void*)TextureFwdKernelLinearMipmapLinearBO4, - NULL, - NULL, - NULL, - NULL, - NULL, - NULL, - (void*)TextureFwdKernelCubeLinearMipmapNearestBO1, - (void*)TextureFwdKernelCubeLinearMipmapNearestBO2, - (void*)TextureFwdKernelCubeLinearMipmapNearestBO4, - (void*)TextureFwdKernelCubeLinearMipmapLinearBO1, - (void*)TextureFwdKernelCubeLinearMipmapLinearBO2, - (void*)TextureFwdKernelCubeLinearMipmapLinearBO4, - }; - - // Function index. - int func_idx = p.filterMode; - if (cube_mode) - func_idx += TEX_MODE_COUNT; // Cube variant. - if (p.enableMip && !has_uv_da) - func_idx += TEX_MODE_COUNT * 2; // Bias-only variant. - func_idx = func_idx * 3 + channel_div_idx; // Choose vector size. - - // Launch kernel. - NVDR_CHECK_CUDA_ERROR(cudaLaunchKernel(func_tbl[func_idx], gridSize, blockSize, args, 0, stream)); - - // Return output tensor. - return out; -} - -// Version without mipmaps. -torch::Tensor texture_fwd(torch::Tensor tex, torch::Tensor uv, int filter_mode, int boundary_mode) -{ - torch::Tensor empty_tensor; - std::vector empty_vector; - return texture_fwd_mip(tex, uv, empty_tensor, empty_tensor, TextureMipWrapper(), empty_vector, filter_mode, boundary_mode); -} - -//------------------------------------------------------------------------ -// Gradient op. - -std::tuple > texture_grad_linear_mipmap_linear(torch::Tensor tex, torch::Tensor uv, torch::Tensor dy, torch::Tensor uv_da, torch::Tensor mip_level_bias, TextureMipWrapper mip_wrapper, std::vector mip_stack, int filter_mode, int boundary_mode) -{ - const at::cuda::OptionalCUDAGuard device_guard(device_of(tex)); - cudaStream_t stream = at::cuda::getCurrentCUDAStream(); - TextureKernelParams p = {}; // Initialize all fields to zero. - bool has_mip_stack = (mip_stack.size() > 0); - torch::Tensor& mip_w = mip_wrapper.mip; // Unwrap. - int max_mip_level = has_mip_stack ? mip_stack.size() : mip_wrapper.max_mip_level; - set_modes(p, filter_mode, boundary_mode, max_mip_level); - - // See if we have these tensors or not. - bool has_uv_da = uv_da.defined() && uv_da.nbytes(); - bool has_mip_level_bias = mip_level_bias.defined() && mip_level_bias.nbytes(); - - if (p.enableMip) - { - NVDR_CHECK(has_uv_da || has_mip_level_bias, "mipmapping filter mode requires uv_da and/or mip_level_bias input"); - NVDR_CHECK(has_mip_stack || mip_w.defined(), "mipmapping filter mode requires mip wrapper or mip stack input"); - } - - // Check inputs. - NVDR_CHECK_DEVICE(tex, uv); - NVDR_CHECK_CONTIGUOUS(tex, uv); - NVDR_CHECK_F32(tex, uv); - if (p.enableMip) - { - if (has_mip_stack) - { - TORCH_CHECK(at::cuda::check_device(mip_stack), __func__, "(): Mip stack inputs must reside on the correct GPU device"); - nvdr_check_contiguous(mip_stack, __func__, "(): Mip stack inputs must be contiguous tensors"); - nvdr_check_f32(mip_stack, __func__, "(): Mip stack inputs must be float32 tensors"); - } - else - { - NVDR_CHECK_DEVICE(mip_w); - NVDR_CHECK_CONTIGUOUS(mip_w); - NVDR_CHECK_F32(mip_w); - } - if (has_uv_da) - { - NVDR_CHECK_DEVICE(uv_da); - NVDR_CHECK_CONTIGUOUS(uv_da); - NVDR_CHECK_F32(uv_da); - } - if (has_mip_level_bias) - { - NVDR_CHECK_DEVICE(mip_level_bias); - NVDR_CHECK_CONTIGUOUS(mip_level_bias); - NVDR_CHECK_F32(mip_level_bias); - } - } - - // Sanity checks and state setters. - bool cube_mode = (boundary_mode == TEX_BOUNDARY_MODE_CUBE); - if (!cube_mode) - { - NVDR_CHECK(tex.sizes().size() == 4 && tex.size(0) > 0 && tex.size(1) > 0 && tex.size(2) > 0 && tex.size(3) > 0, "tex must have shape[>0, >0, >0, >0]"); - NVDR_CHECK(uv.sizes().size() == 4 && uv.size(0) > 0 && uv.size(1) > 0 && uv.size(2) > 0 && uv.size(3) == 2, "uv must have shape [>0, >0, >0, 2]"); - p.texHeight = tex.size(1); - p.texWidth = tex.size(2); - p.channels = tex.size(3); - } - else - { - NVDR_CHECK(tex.sizes().size() == 5 && tex.size(0) > 0 && tex.size(1) == 6 && tex.size(2) > 0 && tex.size(3) > 0 && tex.size(4) > 0, "tex must have shape[>0, 6, >0, >0, >0] in cube map mode"); - NVDR_CHECK(uv.sizes().size() == 4 && uv.size(0) > 0 && uv.size(1) > 0 && uv.size(2) > 0 && uv.size(3) == 3, "uv must have shape [>0, >0, >0, 3] in cube map mode"); - NVDR_CHECK(tex.size(2) == tex.size(3), "texture shape must be square in cube map mode"); - p.texHeight = tex.size(2); - p.texWidth = tex.size(3); - p.channels = tex.size(4); - } - NVDR_CHECK(tex.size(0) == 1 || tex.size(0) == uv.size(0), "minibatch size mismatch between inputs tex, uv"); - NVDR_CHECK(p.texWidth <= (1 << TEX_MAX_MIP_LEVEL) && p.texHeight <= (1 << TEX_MAX_MIP_LEVEL), "texture size too large"); - p.n = uv.size(0); - p.imgHeight = uv.size(1); - p.imgWidth = uv.size(2); - p.texDepth = tex.size(0); - if (p.enableMip) - { - if (has_uv_da) - { - if (!cube_mode) - NVDR_CHECK(uv_da.sizes().size() == 4 && uv_da.size(0) == p.n && uv_da.size(1) == p.imgHeight && uv_da.size(2) == p.imgWidth && uv_da.size(3) == 4, "uv_da must have shape [minibatch_size, height, width, 4]"); - else - NVDR_CHECK(uv_da.sizes().size() == 4 && uv_da.size(0) == p.n && uv_da.size(1) == p.imgHeight && uv_da.size(2) == p.imgWidth && uv_da.size(3) == 6, "uv_da must have shape [minibatch_size, height, width, 6] in cube map mode"); - } - if (has_mip_level_bias) - NVDR_CHECK(mip_level_bias.sizes().size() == 3 && mip_level_bias.size(0) == p.n && mip_level_bias.size(1) == p.imgHeight && mip_level_bias.size(2) == p.imgWidth, "mip_level_bias must have shape [minibatch_size, height, width]"); - } - NVDR_CHECK(dy.sizes().size() == 4 && dy.size(0) == p.n && dy.size(1) == p.imgHeight && dy.size(2) == p.imgWidth && dy.size(3) == p.channels, "dy must have shape [minibatch_size, height, width, channels]"); - - // Get contiguous version of dy. - torch::Tensor dy_ = dy.contiguous(); - - // Get input pointers. - p.tex[0] = tex.data_ptr(); - p.uv = uv.data_ptr(); - p.dy = dy_.data_ptr(); - p.uvDA = (p.enableMip && has_uv_da) ? uv_da.data_ptr() : NULL; - p.mipLevelBias = (p.enableMip && has_mip_level_bias) ? mip_level_bias.data_ptr() : NULL; - - // Allocate output tensor for tex gradient. - torch::Tensor grad_tex = torch::zeros_like(tex); - p.gradTex[0] = grad_tex.data_ptr(); - - // Allocate output tensor for uv gradient. - torch::Tensor grad_uv; - torch::Tensor grad_uv_da; - torch::Tensor grad_mip_level_bias; - if (p.filterMode != TEX_MODE_NEAREST) - { - grad_uv = torch::empty_like(uv); - p.gradUV = grad_uv.data_ptr(); - - // Gradients for things affecting mip level. - if (p.filterMode == TEX_MODE_LINEAR_MIPMAP_LINEAR) - { - // Allocate output tensor for uv_da gradient. - if (has_uv_da) - { - grad_uv_da = torch::empty_like(uv_da); - p.gradUVDA = grad_uv_da.data_ptr(); - } - - // Allocate output tensor for mip_level_bias gradient. - if (has_mip_level_bias) - { - grad_mip_level_bias = torch::empty_like(mip_level_bias); - p.gradMipLevelBias = grad_mip_level_bias.data_ptr(); - } - } - } - - // Choose kernel variants based on channel count. - int channel_div_idx = 0; - if (!(p.channels & 3)) - channel_div_idx = 2; // Channel count divisible by 4. - else if (!(p.channels & 1)) - channel_div_idx = 1; // Channel count divisible by 2. - - // Mip-related setup. - torch::Tensor grad_mip; - std::vector grad_mip_stack; - float* pmip = 0; - float* pgradMip = 0; - if (p.enableMip) - { - if (has_mip_stack) - { - // Custom mip stack supplied. Check that sizes match, assign, construct gradient tensors. - p.mipLevelMax = max_mip_level; - for (int i=1; i <= p.mipLevelMax; i++) - { - torch::Tensor& t = mip_stack[i-1]; - int2 sz = mipLevelSize(p, i); - if (!cube_mode) - NVDR_CHECK(t.sizes().size() == 4 && t.size(0) == tex.size(0) && t.size(1) == sz.y && t.size(2) == sz.x && t.size(3) == p.channels, "mip level size mismatch in mip stack"); - else - NVDR_CHECK(t.sizes().size() == 5 && t.size(0) == tex.size(0) && t.size(1) == 6 && t.size(2) == sz.y && t.size(3) == sz.x && t.size(4) == p.channels, "mip level size mismatch in mip stack"); - if (sz.x == 1 && sz.y == 1) - NVDR_CHECK(i == p.mipLevelMax, "mip level size mismatch in mip stack"); - - torch::Tensor g = torch::zeros_like(t); - grad_mip_stack.push_back(g); - - p.tex[i] = t.data_ptr(); - p.gradTex[i] = g.data_ptr(); - } - } - else - { - // Generate mip offsets and get space for temporary mip gradients. - int mipOffsets[TEX_MAX_MIP_LEVEL]; - int mipTotal = calculateMipInfo(NVDR_CTX_PARAMS, p, mipOffsets); - NVDR_CHECK(tex.sizes() == mip_wrapper.texture_size && cube_mode == mip_wrapper.cube_mode, "mip does not match texture size"); - NVDR_CHECK(mip_w.sizes().size() == 1 && mip_w.size(0) == mipTotal, "mip tensor size mismatch"); - grad_mip = torch::zeros_like(mip_w); - pmip = (float*)mip_w.data_ptr(); - pgradMip = grad_mip.data_ptr(); - for (int i=1; i <= p.mipLevelMax; i++) - { - p.tex[i] = pmip + mipOffsets[i]; // Pointers to mip levels. - p.gradTex[i] = pgradMip + mipOffsets[i]; // Pointers to mip gradients. - } - } - } - - // Verify that buffers are aligned to allow float2/float4 operations. Unused pointers are zero so always aligned. - if (!cube_mode) - { - NVDR_CHECK(!((uintptr_t)p.uv & 7), "uv input tensor not aligned to float2"); - NVDR_CHECK(!((uintptr_t)p.gradUV & 7), "grad_uv output tensor not aligned to float2"); - NVDR_CHECK(!((uintptr_t)p.uvDA & 15), "uv_da input tensor not aligned to float4"); - NVDR_CHECK(!((uintptr_t)p.gradUVDA & 15), "grad_uv_da output tensor not aligned to float4"); - } - else - { - NVDR_CHECK(!((uintptr_t)p.uvDA & 7), "uv_da input tensor not aligned to float2"); - NVDR_CHECK(!((uintptr_t)p.gradUVDA & 7), "grad_uv_da output tensor not aligned to float2"); - } - if ((p.channels & 3) == 0) - { - for (int i=0; i <= p.mipLevelMax; i++) - { - NVDR_CHECK(!((uintptr_t)p.tex[i] & 15), "tex or mip input tensor not aligned to float4"); - NVDR_CHECK(!((uintptr_t)p.gradTex[i] & 15), "grad_tex output tensor not aligned to float4"); - } - NVDR_CHECK(!((uintptr_t)p.dy & 15), "dy input tensor not aligned to float4"); - NVDR_CHECK(!((uintptr_t)pmip & 15), "mip input tensor not aligned to float4"); - NVDR_CHECK(!((uintptr_t)pgradMip & 15), "internal mip gradient tensor not aligned to float4"); - } - if ((p.channels & 1) == 0) - { - for (int i=0; i <= p.mipLevelMax; i++) - { - NVDR_CHECK(!((uintptr_t)p.tex[i] & 7), "tex or mip input tensor not aligned to float2"); - NVDR_CHECK(!((uintptr_t)p.gradTex[i] & 7), "grad_tex output tensor not aligned to float2"); - } - NVDR_CHECK(!((uintptr_t)p.dy & 7), "dy output tensor not aligned to float2"); - NVDR_CHECK(!((uintptr_t)pmip & 7), "mip input tensor not aligned to float2"); - NVDR_CHECK(!((uintptr_t)pgradMip & 7), "internal mip gradient tensor not aligned to float2"); - } - - // Choose launch parameters for main gradient kernel. - void* args[] = {&p}; - dim3 blockSize = getLaunchBlockSize(TEX_GRAD_MAX_KERNEL_BLOCK_WIDTH, TEX_GRAD_MAX_KERNEL_BLOCK_HEIGHT, p.imgWidth, p.imgHeight); - dim3 gridSize = getLaunchGridSize(blockSize, p.imgWidth, p.imgHeight, p.n); - - void* func_tbl[TEX_MODE_COUNT * 2 * 2] = { - (void*)TextureGradKernelNearest, - (void*)TextureGradKernelLinear, - (void*)TextureGradKernelLinearMipmapNearest, - (void*)TextureGradKernelLinearMipmapLinear, - (void*)TextureGradKernelCubeNearest, - (void*)TextureGradKernelCubeLinear, - (void*)TextureGradKernelCubeLinearMipmapNearest, - (void*)TextureGradKernelCubeLinearMipmapLinear, - NULL, - NULL, - (void*)TextureGradKernelLinearMipmapNearestBO, - (void*)TextureGradKernelLinearMipmapLinearBO, - NULL, - NULL, - (void*)TextureGradKernelCubeLinearMipmapNearestBO, - (void*)TextureGradKernelCubeLinearMipmapLinearBO, - }; - - // Function index. - int func_idx = p.filterMode; - if (cube_mode) - func_idx += TEX_MODE_COUNT; // Cube variant. - if (p.enableMip && !has_uv_da) - func_idx += TEX_MODE_COUNT * 2; // Bias-only variant. - - // Launch main gradient kernel. - NVDR_CHECK_CUDA_ERROR(cudaLaunchKernel(func_tbl[func_idx], gridSize, blockSize, args, 0, stream)); - - // Launch kernel to pull gradients from mip levels. Don't do this if mip stack was supplied - individual level gradients are already there. - if (p.enableMip && !has_mip_stack) - { - dim3 blockSize = getLaunchBlockSize(TEX_GRAD_MAX_MIP_KERNEL_BLOCK_WIDTH, TEX_GRAD_MAX_MIP_KERNEL_BLOCK_HEIGHT, p.texWidth, p.texHeight); - dim3 gridSize = getLaunchGridSize(blockSize, p.texWidth, p.texHeight, p.texDepth * (cube_mode ? 6 : 1)); - int sharedBytes = blockSize.x * blockSize.y * p.channels * sizeof(float); - - void* mip_grad_func_tbl[3] = { (void*)MipGradKernel1, (void*)MipGradKernel2, (void*)MipGradKernel4 }; - NVDR_CHECK_CUDA_ERROR(cudaLaunchKernel(mip_grad_func_tbl[channel_div_idx], gridSize, blockSize, args, sharedBytes, stream)); - } - - // Return output tensors. - return std::tuple >(grad_tex, grad_uv, grad_uv_da, grad_mip_level_bias, grad_mip_stack); -} - -// Version for nearest filter mode. -torch::Tensor texture_grad_nearest(torch::Tensor tex, torch::Tensor uv, torch::Tensor dy, int filter_mode, int boundary_mode) -{ - torch::Tensor empty_tensor; - std::vector empty_vector; - std::tuple > result = texture_grad_linear_mipmap_linear(tex, uv, dy, empty_tensor, empty_tensor, TextureMipWrapper(), empty_vector, filter_mode, boundary_mode); - return std::get<0>(result); -} - -// Version for linear filter mode. -std::tuple texture_grad_linear(torch::Tensor tex, torch::Tensor uv, torch::Tensor dy, int filter_mode, int boundary_mode) -{ - torch::Tensor empty_tensor; - std::vector empty_vector; - std::tuple > result = texture_grad_linear_mipmap_linear(tex, uv, dy, empty_tensor, empty_tensor, TextureMipWrapper(), empty_vector, filter_mode, boundary_mode); - return std::tuple(std::get<0>(result), std::get<1>(result)); -} - -// Version for linear-mipmap-nearest mode. -std::tuple > texture_grad_linear_mipmap_nearest(torch::Tensor tex, torch::Tensor uv, torch::Tensor dy, torch::Tensor uv_da, torch::Tensor mip_level_bias, TextureMipWrapper mip_wrapper, std::vector mip_stack, int filter_mode, int boundary_mode) -{ - std::tuple > result = texture_grad_linear_mipmap_linear(tex, uv, dy, uv_da, mip_level_bias, mip_wrapper, mip_stack, filter_mode, boundary_mode); - return std::tuple >(std::get<0>(result), std::get<1>(result), std::get<4>(result)); -} - -//------------------------------------------------------------------------ diff --git a/spaces/gyugnsu/DragGan-Inversion/PTI/criteria/l2_loss.py b/spaces/gyugnsu/DragGan-Inversion/PTI/criteria/l2_loss.py deleted file mode 100644 index c7ac2753b02dfa9d21ccf03fa3b87b9d6fc3f01d..0000000000000000000000000000000000000000 --- a/spaces/gyugnsu/DragGan-Inversion/PTI/criteria/l2_loss.py +++ /dev/null @@ -1,8 +0,0 @@ -import torch - -l2_criterion = torch.nn.MSELoss(reduction='mean') - - -def l2_loss(real_images, generated_images): - loss = l2_criterion(real_images, generated_images) - return loss diff --git a/spaces/hasibzunair/fifa-tryon-demo/u2net_train.py b/spaces/hasibzunair/fifa-tryon-demo/u2net_train.py deleted file mode 100644 index 8f19491feadcb2e37580ab9e4eeb82e1fdbfbbc9..0000000000000000000000000000000000000000 --- a/spaces/hasibzunair/fifa-tryon-demo/u2net_train.py +++ /dev/null @@ -1,164 +0,0 @@ -import os -import torch -import torchvision -from torch.autograd import Variable -import torch.nn as nn -import torch.nn.functional as F - -from torch.utils.data import Dataset, DataLoader -from torchvision import transforms, utils -import torch.optim as optim -import torchvision.transforms as standard_transforms - -import numpy as np -import glob -import os - -from data_loader import Rescale -from data_loader import RescaleT -from data_loader import RandomCrop -from data_loader import ToTensor -from data_loader import ToTensorLab -from data_loader import SalObjDataset - -from model import U2NET -from model import U2NETP - -# ------- 1. define loss function -------- - -bce_loss = nn.BCELoss(size_average=True) - -def muti_bce_loss_fusion(d0, d1, d2, d3, d4, d5, d6, labels_v): - - loss0 = bce_loss(d0,labels_v) - loss1 = bce_loss(d1,labels_v) - loss2 = bce_loss(d2,labels_v) - loss3 = bce_loss(d3,labels_v) - loss4 = bce_loss(d4,labels_v) - loss5 = bce_loss(d5,labels_v) - loss6 = bce_loss(d6,labels_v) - - loss = loss0 + loss1 + loss2 + loss3 + loss4 + loss5 + loss6 - print("l0: %3f, l1: %3f, l2: %3f, l3: %3f, l4: %3f, l5: %3f, l6: %3f\n"%(loss0.data.item(),loss1.data.item(),loss2.data.item(),loss3.data.item(),loss4.data.item(),loss5.data.item(),loss6.data.item())) - - return loss0, loss - - -# ------- 2. set the directory of training dataset -------- - -model_name = 'u2net' #'u2netp' - -data_dir = os.path.join(os.getcwd(), 'train_data' + os.sep) -tra_image_dir = os.path.join('DUTS', 'DUTS-TR', 'DUTS-TR', 'im_aug' + os.sep) -tra_label_dir = os.path.join('DUTS', 'DUTS-TR', 'DUTS-TR', 'gt_aug' + os.sep) - -image_ext = '.jpg' -label_ext = '.png' - -model_dir = os.path.join(os.getcwd(), 'saved_models', model_name + os.sep) - -epoch_num = 100000 -batch_size_train = 12 -batch_size_val = 1 -train_num = 0 -val_num = 0 - -tra_img_name_list = glob.glob(data_dir + tra_image_dir + '*' + image_ext) - -tra_lbl_name_list = [] -for img_path in tra_img_name_list: - img_name = img_path.split(os.sep)[-1] - - aaa = img_name.split(".") - bbb = aaa[0:-1] - imidx = bbb[0] - for i in range(1,len(bbb)): - imidx = imidx + "." + bbb[i] - - tra_lbl_name_list.append(data_dir + tra_label_dir + imidx + label_ext) - -print("---") -print("train images: ", len(tra_img_name_list)) -print("train labels: ", len(tra_lbl_name_list)) -print("---") - -train_num = len(tra_img_name_list) - -salobj_dataset = SalObjDataset( - img_name_list=tra_img_name_list, - lbl_name_list=tra_lbl_name_list, - transform=transforms.Compose([ - RescaleT(320), - RandomCrop(288), - ToTensorLab(flag=0)])) -salobj_dataloader = DataLoader(salobj_dataset, batch_size=batch_size_train, shuffle=True, num_workers=1) - -# ------- 3. define model -------- -# define the net -if(model_name=='u2net'): - net = U2NET(3, 1) -elif(model_name=='u2netp'): - net = U2NETP(3,1) - -if torch.cuda.is_available(): - net.cuda() - -# ------- 4. define optimizer -------- -print("---define optimizer...") -optimizer = optim.Adam(net.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0) - -# ------- 5. training process -------- -print("---start training...") -ite_num = 0 -running_loss = 0.0 -running_tar_loss = 0.0 -ite_num4val = 0 -save_frq = 2000 # save the model every 2000 iterations - -for epoch in range(0, epoch_num): - net.train() - - for i, data in enumerate(salobj_dataloader): - ite_num = ite_num + 1 - ite_num4val = ite_num4val + 1 - - inputs, labels = data['image'], data['label'] - - inputs = inputs.type(torch.FloatTensor) - labels = labels.type(torch.FloatTensor) - - # wrap them in Variable - if torch.cuda.is_available(): - inputs_v, labels_v = Variable(inputs.cuda(), requires_grad=False), Variable(labels.cuda(), - requires_grad=False) - else: - inputs_v, labels_v = Variable(inputs, requires_grad=False), Variable(labels, requires_grad=False) - - # y zero the parameter gradients - optimizer.zero_grad() - - # forward + backward + optimize - d0, d1, d2, d3, d4, d5, d6 = net(inputs_v) - loss2, loss = muti_bce_loss_fusion(d0, d1, d2, d3, d4, d5, d6, labels_v) - - loss.backward() - optimizer.step() - - # # print statistics - running_loss += loss.data.item() - running_tar_loss += loss2.data.item() - - # del temporary outputs and loss - del d0, d1, d2, d3, d4, d5, d6, loss2, loss - - print("[epoch: %3d/%3d, batch: %5d/%5d, ite: %d] train loss: %3f, tar: %3f " % ( - epoch + 1, epoch_num, (i + 1) * batch_size_train, train_num, ite_num, running_loss / ite_num4val, running_tar_loss / ite_num4val)) - - if ite_num % save_frq == 0: - - torch.save(net.state_dict(), model_dir + model_name+"_bce_itr_%d_train_%3f_tar_%3f.pth" % (ite_num, running_loss / ite_num4val, running_tar_loss / ite_num4val)) - running_loss = 0.0 - running_tar_loss = 0.0 - net.train() # resume train - ite_num4val = 0 - diff --git a/spaces/hcapp/sd-dreambooth-library-herge-style/README.md b/spaces/hcapp/sd-dreambooth-library-herge-style/README.md deleted file mode 100644 index 6c5cb57db95c420373874f076cfc7cc7b9d1bc2e..0000000000000000000000000000000000000000 --- a/spaces/hcapp/sd-dreambooth-library-herge-style/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Sd Dreambooth Library Herge Style -emoji: 🔥 -colorFrom: red -colorTo: blue -sdk: gradio -sdk_version: 3.17.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/hlydecker/Augmented-Retrieval-qa-ChatGPT/streamlit_langchain_chat/customized_langchain/docstore/__init__.py b/spaces/hlydecker/Augmented-Retrieval-qa-ChatGPT/streamlit_langchain_chat/customized_langchain/docstore/__init__.py deleted file mode 100644 index 6250d5c3aaf5de06e7daa358a513205f302527c2..0000000000000000000000000000000000000000 --- a/spaces/hlydecker/Augmented-Retrieval-qa-ChatGPT/streamlit_langchain_chat/customized_langchain/docstore/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -"""Wrappers on top of docstores.""" -from streamlit_langchain_chat.customized_langchain.docstore.in_memory import InMemoryDocstore - - -__all__ = [ - "InMemoryDocstore", -] diff --git a/spaces/ho11laqe/nnUNet_calvingfront_detection/scripts_new/run_glacier_mtl_early_boundary_4.sh b/spaces/ho11laqe/nnUNet_calvingfront_detection/scripts_new/run_glacier_mtl_early_boundary_4.sh deleted file mode 100644 index f1ad9e623c1d44bb9fc92eeb0816e16ab69ff82d..0000000000000000000000000000000000000000 --- a/spaces/ho11laqe/nnUNet_calvingfront_detection/scripts_new/run_glacier_mtl_early_boundary_4.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -l -#SBATCH --nodes=1 --gres=gpu:1 --time=24:00:00 -#SBATCH --job-name=Task505_glacier_mtl_early_boundary_4 - -export data_raw="/home/woody/iwi5/iwi5039h/data_raw" -export nnUNet_raw_data_base="/home/woody/iwi5/iwi5039h/nnUNet_data/nnUNet_raw_data_base/" -export nnUNet_preprocessed="/home/woody/iwi5/iwi5039h/nnUNet_data/nnUNet_preprocessed/" -export RESULTS_FOLDER="/home/woody/iwi5/iwi5039h/nnUNet_data/RESULTS_FOLDER" - -cd nnunet_glacer -pwd -conda activate nnunet - -#python3 nnunet/dataset_conversion/Task504_Glacier_mtl_recon.py -data_percentage 100 -base $data_raw -#python3 nnunet/experiment_planning/nnUNet_plan_and_preprocess.py -t 504 -pl3d None -pl2d ExperimentPlanner2D_mtl - -python3 nnunet/run/run_training.py 2d nnUNetTrainerMTLearly_boundary 505 4 -p nnUNetPlans_mtl --disable_postprocessing_on_folds -python3 nnunet/inference/predict_simple.py -i $nnUNet_raw_data_base/nnUNet_raw_data/Task505_Glacier_mtl_boundary/imagesTs -o $RESULTS_FOLDER/test_predictions/Task505_Glacier_mtl_boundary/early/fold_0 -t 505 -m 2d -f 4 -p nnUNetPlans_mtl -tr nnUNetTrainerMTLearly_boundary -python3 nnunet/dataset_conversion/Task505_Glacier_mtl_recon_reverse.py -i $RESULTS_FOLDER/test_predictions/Task505_Glacier_mtl_boundary/early/fold_4 -python3 ./evaluate_nnUNet.py --predictions $RESULTS_FOLDER/test_predictions/Task505_Glacier_mtl_boundary/early/fold_4/pngs --labels_fronts $data_raw/fronts/test --labels_zones $data_raw/zones/test --sar_images $data_raw/sar_images/test diff --git a/spaces/housexu123/bingo-2.0/src/lib/isomorphic/browser.ts b/spaces/housexu123/bingo-2.0/src/lib/isomorphic/browser.ts deleted file mode 100644 index de125b1f1786d1618cb1ff47f403d76c6784f4ce..0000000000000000000000000000000000000000 --- a/spaces/housexu123/bingo-2.0/src/lib/isomorphic/browser.ts +++ /dev/null @@ -1,11 +0,0 @@ -'use client' - -const debug = console.info.bind(console) - -class WebSocketAlias extends WebSocket { - constructor(address: string | URL, ...args: any) { - super(address) - } -} - -export default { fetch, WebSocket: WebSocketAlias, debug } diff --git a/spaces/huggingface-projects/auto-retrain/Dockerfile b/spaces/huggingface-projects/auto-retrain/Dockerfile deleted file mode 100644 index 3b5cd4fad0c2dab1e93878be8e1d7e055fcc0b27..0000000000000000000000000000000000000000 --- a/spaces/huggingface-projects/auto-retrain/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -FROM python:3.9 - -RUN useradd -m -u 1000 user -USER user - -ENV HOME=/home/user \ - PATH=/home/user/.local/bin:$PATH - -WORKDIR $HOME/app - -COPY --chown=user requirements.txt requirements.txt -RUN pip install --no-cache-dir --upgrade -r requirements.txt - -COPY --chown=user . . - -CMD ["uvicorn", "src.main:app", "--host", "0.0.0.0", "--port", "7860"] diff --git a/spaces/huggingface-projects/diffuse-the-rest/build/index.html b/spaces/huggingface-projects/diffuse-the-rest/build/index.html deleted file mode 100644 index c61ee51385d7b97e0bf831277b58b611c7f36dc3..0000000000000000000000000000000000000000 --- a/spaces/huggingface-projects/diffuse-the-rest/build/index.html +++ /dev/null @@ -1,56 +0,0 @@ - - - - - - - - - - - - - - - - -
                    - - - -
                    -

                    Loading…

                    -

                    █▒▒▒▒▒▒▒▒▒

                    -
                    -
                    - - - -
                    - - diff --git a/spaces/huggingface/HuggingDiscussions/index.html b/spaces/huggingface/HuggingDiscussions/index.html deleted file mode 100644 index deeb2e5366c531b699eec49ac53d039ef4cc6f9f..0000000000000000000000000000000000000000 --- a/spaces/huggingface/HuggingDiscussions/index.html +++ /dev/null @@ -1,63 +0,0 @@ - - - - - - - - - - - - - - - - Hugging Discussions - Introducing Pull Requests and Discussions 🥳 - - - - - - - - - -
                    -

                    Hugging Face Hub Discussions

                    -

                    Let's discuss and improve the Hub's latest features

                    -

                    Go to the community tab to get started

                    - -

                    [FEEDBACK] Follow

                    1
                    #14 opened about 7 hours ago - by - victor

                    [FEEDBACK] Collections

                    20
                    #12 opened about 2 months ago - by - victor

                    [FEEDBACK] Notifications

                    41
                    #6 opened over 1 year ago - by - victor
                    -
                    - - diff --git a/spaces/hungln1102/emotion_classification_surreynlp_2023/app.py b/spaces/hungln1102/emotion_classification_surreynlp_2023/app.py deleted file mode 100644 index 1f722ebfb82048c4ea25e0db23137cdd8d31d0e9..0000000000000000000000000000000000000000 --- a/spaces/hungln1102/emotion_classification_surreynlp_2023/app.py +++ /dev/null @@ -1,28 +0,0 @@ -import streamlit as st - -from emotion_model import emotion_predict -from datetime import datetime -import logging - -name = st.text_input("Please enter your sentence here:") -result = "" -result_check = "" -logging.basicConfig(filename='example.log', encoding='utf-8', level=logging.DEBUG) - -if (st.button('Submit')): - result = name.title() - try: - result_check = emotion_predict(result) - except Exception as E: - result_check = "Error" - print(E) - st.success(result_check) - -user_response = st.text_input("Please give your emotion that you are thinking is correct") -if (st.button('Submit Feedback')): - if user_response: - st.info("Thank you for contributing") - logging.info(f"{result}, {result_check}, {user_response}, {datetime.now()}") - else: - st.info("Thank you for using") - logging.info(f"{result}, {result_check}, {datetime.now()}") diff --git a/spaces/iamironman4279/SadTalker/src/face3d/models/arcface_torch/configs/base.py b/spaces/iamironman4279/SadTalker/src/face3d/models/arcface_torch/configs/base.py deleted file mode 100644 index 78e4b36a9142b649ec39a8c59331bb2557f2ad57..0000000000000000000000000000000000000000 --- a/spaces/iamironman4279/SadTalker/src/face3d/models/arcface_torch/configs/base.py +++ /dev/null @@ -1,56 +0,0 @@ -from easydict import EasyDict as edict - -# make training faster -# our RAM is 256G -# mount -t tmpfs -o size=140G tmpfs /train_tmp - -config = edict() -config.loss = "arcface" -config.network = "r50" -config.resume = False -config.output = "ms1mv3_arcface_r50" - -config.dataset = "ms1m-retinaface-t1" -config.embedding_size = 512 -config.sample_rate = 1 -config.fp16 = False -config.momentum = 0.9 -config.weight_decay = 5e-4 -config.batch_size = 128 -config.lr = 0.1 # batch size is 512 - -if config.dataset == "emore": - config.rec = "/train_tmp/faces_emore" - config.num_classes = 85742 - config.num_image = 5822653 - config.num_epoch = 16 - config.warmup_epoch = -1 - config.decay_epoch = [8, 14, ] - config.val_targets = ["lfw", ] - -elif config.dataset == "ms1m-retinaface-t1": - config.rec = "/train_tmp/ms1m-retinaface-t1" - config.num_classes = 93431 - config.num_image = 5179510 - config.num_epoch = 25 - config.warmup_epoch = -1 - config.decay_epoch = [11, 17, 22] - config.val_targets = ["lfw", "cfp_fp", "agedb_30"] - -elif config.dataset == "glint360k": - config.rec = "/train_tmp/glint360k" - config.num_classes = 360232 - config.num_image = 17091657 - config.num_epoch = 20 - config.warmup_epoch = -1 - config.decay_epoch = [8, 12, 15, 18] - config.val_targets = ["lfw", "cfp_fp", "agedb_30"] - -elif config.dataset == "webface": - config.rec = "/train_tmp/faces_webface_112x112" - config.num_classes = 10572 - config.num_image = "forget" - config.num_epoch = 34 - config.warmup_epoch = -1 - config.decay_epoch = [20, 28, 32] - config.val_targets = ["lfw", "cfp_fp", "agedb_30"] diff --git a/spaces/iccv23-diffusers-demo/stable-diffusion-image-variations/README.md b/spaces/iccv23-diffusers-demo/stable-diffusion-image-variations/README.md deleted file mode 100644 index 5e316aef93dd637e2d41672a74a5a4e18fa4039a..0000000000000000000000000000000000000000 --- a/spaces/iccv23-diffusers-demo/stable-diffusion-image-variations/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Stable Diffusion Image Variations -emoji: 🖼️ -colorFrom: purple -colorTo: purple -sdk: gradio -sdk_version: 3.21.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/indichealth/indic-health-demo/utils/train_utils.py b/spaces/indichealth/indic-health-demo/utils/train_utils.py deleted file mode 100644 index 875692d4de6897b37d64871114cfd1c5de751354..0000000000000000000000000000000000000000 --- a/spaces/indichealth/indic-health-demo/utils/train_utils.py +++ /dev/null @@ -1,168 +0,0 @@ -from torch.utils.data import SequentialSampler, DataLoader -from tqdm import tqdm -from seqeval.metrics import f1_score, classification_report, performance_measure as confusion_matrix -import torch -import torch.nn.functional as F - -# from sklearn.metrics import confusion_matrix - - -def add_xlmr_args(parser): - """ - Adds training and validation arguments to the passed parser - """ - - parser.add_argument("--data_dir", - default=None, - type=str, - required=True, - help="The input data dir. Should contain the .tsv files (or other data files) for the task.") - parser.add_argument("--pretrained_path", default=None, type=str, required=True, - help="pretrained XLM-Roberta model path") - parser.add_argument("--task_name", - default=None, - type=str, - required=True, - help="The name of the task to train.") - parser.add_argument("--output_dir", - default=None, - type=str, - required=True, - help="The output directory where the model predictions and checkpoints will be written.") - # Other parameters - parser.add_argument("--cache_dir", - default="", - type=str, - help="Where do you want to store the pre-trained models downloaded from s3") - parser.add_argument("--max_seq_length", - default=128, - type=int, - help="The maximum total input sequence length after WordPiece tokenization. \n" - "Sequences longer than this will be truncated, and sequences shorter \n" - "than this will be padded.") - parser.add_argument("--do_train", - action='store_true', - help="Whether to run training.") - parser.add_argument("--do_eval", - action='store_true', - help="Whether to run eval or not.") - parser.add_argument("--eval_on", - default="dev", - help="Whether to run eval on the dev set or test set.") - parser.add_argument("--do_lower_case", - action='store_true', - help="Set this flag if you are using an uncased model.") - parser.add_argument("--train_batch_size", - default=32, - type=int, - help="Total batch size for training.") - parser.add_argument("--eval_batch_size", - default=32, - type=int, - help="Total batch size for eval.") - parser.add_argument("--learning_rate", - default=5e-5, - type=float, - help="The initial learning rate for Adam.") - parser.add_argument("--num_train_epochs", - default=3, - type=int, - help="Total number of training epochs to perform.") - parser.add_argument("--warmup_proportion", - default=0.1, - type=float, - help="Proportion of training to perform linear learning rate warmup for. " - "E.g., 0.1 = 10%% of training.") - parser.add_argument("--weight_decay", default=0.01, type=float, - help="Weight deay if we apply some.") - parser.add_argument("--adam_epsilon", default=1e-8, type=float, - help="Epsilon for Adam optimizer.") - parser.add_argument("--max_grad_norm", default=1.0, type=float, - help="Max gradient norm.") - parser.add_argument("--no_cuda", - action='store_true', - help="Whether not to use CUDA when available") - parser.add_argument('--seed', - type=int, - default=42, - help="random seed for initialization") - parser.add_argument('--gradient_accumulation_steps', - type=int, - default=1, - help="Number of updates steps to accumulate before performing a backward/update pass.") - parser.add_argument('--fp16', - action='store_true', - help="Whether to use 16-bit float precision instead of 32-bit") - parser.add_argument('--fp16_opt_level', type=str, default='O1', - help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." - "See details at https://nvidia.github.io/apex/amp.html") - parser.add_argument('--loss_scale', - type=float, default=0, - help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n" - "0 (default value): dynamic loss scaling.\n" - "Positive power of 2: static loss scaling value.\n") - parser.add_argument('--dropout', - type=float, default=0.3, - help = "training dropout probability") - - parser.add_argument('--freeze_model', - action='store_true', default=False, - help = "whether to freeze the XLM-R base model and train only the classification heads") - - return parser - - -def evaluate_model(model, eval_dataset, xlmr_label_list, batch_size, device): - """ - Evaluates an NER model on the eval_dataset provided. - Returns: - F1_score: Macro-average f1_score on the evaluation dataset. - Report: detailed classification report - """ - - # Run prediction for full data - eval_sampler = SequentialSampler(eval_dataset) - eval_dataloader = DataLoader( - eval_dataset, sampler=eval_sampler, batch_size=batch_size) - - model.eval() # turn of dropout - - y_true = [] - y_pred = [] - - label_map = {i: label for i, label in enumerate(xlmr_label_list, 1)} - - for input_ids, label_ids, l_mask, valid_ids in eval_dataloader: - - input_ids = input_ids.to(device) - label_ids = label_ids.to(device) - - valid_ids = valid_ids.to(device) - l_mask = l_mask.to(device) - - with torch.no_grad(): - logits = model(input_ids, labels=None, labels_mask=None, - valid_mask=valid_ids) - - logits = torch.argmax(logits, dim=2) - logits = logits.detach().cpu().numpy() - label_ids = label_ids.cpu().numpy() - - for i, cur_label in enumerate(label_ids): - temp_1 = [] - temp_2 = [] - - for j, m in enumerate(cur_label): - if valid_ids[i][j]: # if it's a valid label - temp_1.append(label_map[m]) - temp_2.append(label_map[logits[i][j]]) - - assert len(temp_1) == len(temp_2) - y_true.append(temp_1) - y_pred.append(temp_2) - - #report = classification_report(y_true, y_pred, digits=4) - #cm = confusion_matrix(y_true, y_pred) - #f1 = f1_score(y_true, y_pred, average='Macro') - - return y_pred diff --git a/spaces/innnky/nyaru-svc2.0-advanced/README.md b/spaces/innnky/nyaru-svc2.0-advanced/README.md deleted file mode 100644 index e6dad7bb7ff2c77230b4ba09997a52e9ab14edf3..0000000000000000000000000000000000000000 --- a/spaces/innnky/nyaru-svc2.0-advanced/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Nyaru Svc2.0 Advanced -emoji: 🌍 -colorFrom: red -colorTo: green -sdk: gradio -sdk_version: 3.4.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Analog Electronics Jb Gupta Pdf REPACK Free Download.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Analog Electronics Jb Gupta Pdf REPACK Free Download.md deleted file mode 100644 index abec6eda768397a5dbea2c14c9a42d553fec7567..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Analog Electronics Jb Gupta Pdf REPACK Free Download.md +++ /dev/null @@ -1,366 +0,0 @@ - -

                    Analog Electronics JB Gupta PDF Free Download: A Review of the Book

                    -

                    Analog electronics is a branch of electronics that deals with the design and analysis of circuits that use continuous signals such as voltage and current. Analog electronics is essential for understanding and working with various devices and systems such as amplifiers, filters, oscillators, modulators, demodulators, sensors, transducers and more. If you are looking for a comprehensive and easy-to-follow book on analog electronics, you might want to check out Analog Electronics by JB Gupta. In this article, we will give you an overview of Analog Electronics JB Gupta PDF free download, its contents, features, benefits and limitations.

                    -

                    analog electronics jb gupta pdf free download


                    DOWNLOADhttps://urlin.us/2uEwKk



                    -

                    What is Analog Electronics by JB Gupta?

                    -

                    Analog Electronics by JB Gupta is a book that covers the fundamentals and applications of analog electronics in a clear and concise manner. It is written by JB Gupta, a professor of electrical engineering at Delhi College of Engineering. The book was first published in 2011 by S.K. Kataria & Sons and has since been updated and revised several times. The book is suitable for undergraduate students of electrical, electronics and communication engineering as well as for professionals and hobbyists who want to learn more about analog electronics.

                    -

                    What are the contents of Analog Electronics by JB Gupta?

                    -

                    Analog Electronics by JB Gupta has 404 pages divided into 14 chapters. The chapters are:

                    -
                      -
                    • Chapter 1: Introduction to Analog Electronics
                    • -
                    • Chapter 2: Diodes and Applications
                    • -
                    • Chapter 3: Bipolar Junction Transistors
                    • -
                    • Chapter 4: Field Effect Transistors
                    • -
                    • Chapter 5: Transistor Biasing and Stabilization
                    • -
                    • Chapter 6: Small Signal Amplifiers
                    • -
                    • Chapter 7: Large Signal Amplifiers
                    • -
                    • Chapter 8: Feedback Amplifiers
                    • -
                    • Chapter 9: Oscillators
                    • -
                    • Chapter 10: Operational Amplifiers
                    • -
                    • Chapter 11: Linear Integrated Circuits
                    • -
                    • Chapter 12: Wave Shaping Circuits
                    • -
                    • Chapter 13: Modulation and Demodulation Circuits
                    • -
                    • Chapter 14: Power Supplies
                    • -
                    -

                    The book also has appendices that cover topics such as number systems, Boolean algebra, Karnaugh maps, Laplace transforms, Fourier series and transforms, Bode plots, filter design and more. The book also has a glossary of terms, a list of symbols and abbreviations, a bibliography and an index.

                    -

                    -

                    What are the features of Analog Electronics by JB Gupta?

                    -

                    Analog Electronics by JB Gupta has many features that make it a useful and user-friendly book for learning analog electronics. Some of the main features are:

                    -
                      -
                    • Simplified language and style that makes the concepts easy to understand.
                    • -
                    • Numerous examples, diagrams, tables and graphs that illustrate the theory and applications.
                    • -
                    • Solved problems at the end of each chapter that help the readers to test their knowledge and skills.
                    • -
                    • Objective type questions with answers that help the readers to prepare for competitive exams.
                    • -
                    • Review questions at the end of each chapter that help the readers to revise the concepts.
                    • -
                    • Experiments at the end of each chapter that help the readers to gain practical experience.
                    • -
                    -

                    What are the benefits of Analog Electronics by JB Gupta?

                    -

                    Analog Electronics by JB Gupta has many benefits that make it a valuable book for learning analog electronics. Some of the main benefits are:

                    -
                      -
                    • Comprehensive coverage of analog electronics from basics to advanced topics.
                    • -
                    • Clear explanation of concepts with real-life examples and applications.
                    • -
                    • Systematic approach to problem solving with step-by-step solutions.
                    • -
                    • Updated information on latest developments and trends in analog electronics.
                    • -
                    • Affordable price and easy availability in PDF format.
                    • -
                    - -

                    What are the limitations of Analog Electronics by JB Gupta?

                    - -

                    Analog Electronics by JB Gupta also has some limitations that you should be aware of before downloading it. Some of the main limitations are:

                    - -
                      - -
                    • Limited availability in print format. The book is mostly available in PDF format online.
                    • - -
                    • Possible errors or typos in some sections or problems. The book may have some minor mistakes or inaccuracies that need to be corrected or verified.
                    • - -
                    • Lack of color illustrations or animations. The book only has black-and-white diagrams and graphs that may not be very appealing or engaging for some readers.
                    • - -
                    - -

                    How to download Analog Electronics by JB Gupta PDF for free?

                    - -

                    If you want to download Analog Electronics by JB Gupta PDF for free, you have two options:

                    - -
                      - -
                    1. You can download it from Google Drive by clicking here. This is a PDF file that contains the objective type questions from the book along with their answers.
                    2. - -
                    3. You can download it from Google Books by clicking here. This is a preview version that allows you to read some pages from the book online.
                    4. - -
                    - -

                    After downloading Analog Electronics by JB Gupta PDF for free, you can read it on your computer or mobile device using any PDF reader software. You can also print it or share it with others if you want.

                    - -

                    Conclusion

                    - -

                    Analog Electronics by JB Gupta PDF free download is a good option for anyone who wants to learn analog electronics in a simple and comprehensive way. It is a book that covers the fundamentals and applications of analog electronics in a clear and concise manner. It is written by JB Gupta, a professor of electrical engineering at Delhi College of Engineering. The book has many features such as simplified language, numerous examples, solved problems, objective type questions, review questions, experiments -and more -that make it a useful -and user-friendly -book for learning analog electronics. -It also has some limitations such as limited availability in print format, -possible errors or typos, -lack of color illustrations -or animations -that you should consider before downloading it. -If you want to download Analog Electronics by JB Gupta PDF for free, -you can either get it from Google Drive -or Google Books. -You can then read it on your computer or mobile device -using any PDF reader software. -You can also print it or share it with others if you want.

                    - - -- The applications and advantages of analog electronics in various fields and industries. -- The comparison and contrast of analog electronics and digital electronics. -- The future trends and developments of analog electronics and its impact on society and technology. -

                    What are the applications and advantages of analog electronics?

                    -

                    Analog electronics has many applications and advantages in various fields and industries. Some of them are:

                    -
                      -
                    • Communication: Analog electronics is used to transmit and receive analog signals such as voice, music, video and radio waves. Analog electronics can also modulate and demodulate analog signals to convert them into digital signals or vice versa.
                    • -
                    • Measurement: Analog electronics is used to measure physical quantities such as voltage, current, resistance, temperature, pressure, light, sound and more. Analog electronics can also amplify, filter, convert and display these quantities using devices such as meters, oscilloscopes, multimeters and sensors.
                    • -
                    • Control: Analog electronics is used to control the operation and performance of various systems and devices such as motors, generators, robots, machines, vehicles and more. Analog electronics can also provide feedback, stability and optimization using devices such as controllers, regulators, comparators and timers.
                    • -
                    • Entertainment: Analog electronics is used to create and enjoy various forms of entertainment such as music, movies, games and more. Analog electronics can also produce and process sound and image signals using devices such as speakers, microphones, cameras, recorders and players.
                    • -
                    • Education: Analog electronics is used to teach and learn the principles and concepts of electronics and engineering. Analog electronics can also provide hands-on experience and practical skills using devices such as breadboards, kits, circuits and experiments.
                    • -
                    -

                    Analog electronics has many advantages over digital electronics such as:

                    -
                      -
                    • Simplicity: Analog electronics is simpler and easier to understand and design than digital electronics. Analog electronics does not require complex algorithms or programming languages to operate.
                    • -
                    • Cost: Analog electronics is cheaper and more affordable than digital electronics. Analog electronics does not require expensive components or equipment to function.
                    • -
                    • Reliability: Analog electronics is more reliable and robust than digital electronics. Analog electronics does not suffer from noise or interference that can affect the quality or accuracy of digital signals.
                    • -
                    • Flexibility: Analog electronics is more flexible and adaptable than digital electronics. Analog electronics can handle a wide range of signals with different frequencies, amplitudes and shapes without losing information or resolution.
                    • -
                    - -

                    How to compare and contrast analog electronics and digital electronics?

                    - -

                    Analog electronics and digital electronics are two different types of electronics that have their own characteristics, advantages and disadvantages. To compare and contrast analog electronics and digital electronics, we can use the following criteria:

                    - - - - - - - - - - - - - - - -
                    CriteriaAnalog ElectronicsDigital Electronics
                    Type of signalContinuous signal that varies smoothly over timeDiscrete signal that has only two values (0 or 1) at any time
                    Representation of informationPhysical quantity such as voltage or currentBinary code such as bits or bytes
                    Processing of informationAnalog circuit that uses components such as resistors, capacitors, inductors, diodes, transistors etc.Digital circuit that uses components such as logic gates, flip-flops, registers etc.
                    Accuracy of informationDepends on the quality of the signal and the componentsDepends on the number of bits used to represent the signal
                    Noise immunityLow immunity to noise or interference that can distort the signalHigh immunity to noise or interference that can be filtered out by thresholding
                    Power consumptionLow power consumption that depends on the current flowHigh power consumption that depends on the switching frequency
                    SimplicitySimpler and easier to understand and designMore complex and difficult to understand and design
                    CostCheaper and more affordableMore expensive and less affordable
                    ReliabilityMore reliable and robustLess reliable and fragile
                    FlexibilityMore flexible and adaptableLess flexible and rigid
                    ApplicationsCommunication, measurement, control, entertainment etc.Data processing, computation, storage etc.
                    - -

                    What are the future trends and developments of analog electronics?

                    - -

                    Analog electronics is not obsolete or outdated. It is still relevant -and important in many fields -and industries. -Analog electronics is also evolving -and improving -with new trends -and developments -that aim to enhance its performance -and functionality. -Some of these trends -and developments -are:

                    - -
                      - -
                    • Nanoelectronics: The use of nanoscale materials -and devices -to create analog circuits -and systems -that have higher speed, -lower power consumption, -higher density, -better integration -and new functionalities.
                    • - -
                    • Bioelectronics: The use of biological materials -and processes -to create analog circuits -and systems -that have higher sensitivity, -selectivity, -compatibility, -biodegradability -and self-assembly.
                    • - -
                    • Mixed-signal Electronics: The use of both analog -and digital signals -to create circuits -and systems -that have higher performance, -efficiency, -accuracy, -complexity -and versatility.
                    • - -
                    • Analog Artificial Intelligence: The use of analog circuits -and systems -to emulate the functions -and capabilities -of natural intelligence -such as learning, -adaptation, -optimization, -recognition, -decision making etc.
                    • - -
                    • Analog Quantum Computing: The use of quantum phenomena -such as superposition, -entanglement, -tunneling etc. -to create analog circuits -and systems -that have higher speed, -parallelism, -scalability, -security -and computational power.
                    • - -
                    - -

                    Conclusion

                    - -

                    Analog Electronics JB Gupta PDF free download is a good option for anyone who wants to learn analog electronics in a simple -and comprehensive way. -It is a book that covers the fundamentals -and applications -of analog electronics -in a clear -and concise manner. -It is written by JB Gupta, -a professor -of electrical engineering -at Delhi College -of Engineering. -The book has many features -such as simplified language, -numerous examples, -solved problems, -objective type questions, -review questions, -experiments -and more -that make it a useful -and user-friendly -book for learning analog electronics. -It also has some limitations -such as limited availability -in print format, -possible errors -or typos, -lack of color illustrations -or animations -that you should consider before downloading it. -If you want to download Analog Electronics by JB Gupta PDF for free, -you can either get it from Google Drive -or Google Books. -You can then read it on your computer -or mobile device -using any PDF reader software. -You can also print it -or share it with others if you want. -To use Analog Electronics by JB Gupta, -you need to follow some basic steps -such as downloading, -opening, -reading, -solving, -answering, -reviewing -and experimenting -with the book. -You can also follow some tips -and tricks -such as reading sequentially, -reviewing previous chapters, -making notes, -practicing more -and seeking help from online resources -to improve your learning experience -and results. - -Analog electronics has many applications -and advantages -in various fields -and industries. -It also has some characteristics, -advantages -and disadvantages -that distinguish it from digital electronics. -You can compare -and contrast analog electronics -and digital electronics using different criteria. -Analog electronics is also evolving -and improving with new trends -and developments that aim to enhance its performance -and functionality. -Analog electronics is not obsolete or outdated. It is still relevant -and important in many fields -and industries.

                    -

                    Conclusion

                    - -

                    Analog Electronics JB Gupta PDF free download is a good option for anyone who wants to learn analog electronics in a simple -and comprehensive way. -It is a book that covers the fundamentals -and applications -of analog electronics -in a clear -and concise manner. -It is written by JB Gupta, -a professor -of electrical engineering -at Delhi College -of Engineering. -The book has many features -such as simplified language, -numerous examples, -solved problems, -objective type questions, -review questions, -experiments -and more -that make it a useful -and user-friendly -book for learning analog electronics. -It also has some limitations -such as limited availability -in print format, -possible errors -or typos, -lack of color illustrations -or animations -that you should consider before downloading it. -If you want to download Analog Electronics by JB Gupta PDF for free, -you can either get it from Google Drive -or Google Books. -You can then read it on your computer -or mobile device -using any PDF reader software. -You can also print it -or share it with others if you want. -To use Analog Electronics by JB Gupta, -you need to follow some basic steps -such as downloading, -opening, -reading, -solving, -answering, -reviewing -and experimenting -with the book. -You can also follow some tips -and tricks -such as reading sequentially, -reviewing previous chapters, -making notes, -practicing more -and seeking help from online resources -to improve your learning experience -and results. - -Analog electronics has many applications -and advantages -in various fields -and industries. -It also has some characteristics, -advantages -and disadvantages -that distinguish it from digital electronics. -You can compare -and contrast analog electronics -and digital electronics using different criteria. -Analog electronics is also evolving -and improving with new trends -and developments that aim to enhance its performance -and functionality. -Analog electronics is not obsolete or outdated. It is still relevant -and important in many fields -and industries.

                    3cee63e6c2
                    -
                    -
                    \ No newline at end of file diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Autocom Cars CDP PRO 2.10.3 MultiLanguage.torrent.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Autocom Cars CDP PRO 2.10.3 MultiLanguage.torrent.md deleted file mode 100644 index aec1dc9c3d9832053b880793268ddd8f4972e769..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Autocom Cars CDP PRO 2.10.3 MultiLanguage.torrent.md +++ /dev/null @@ -1,6 +0,0 @@ -

                    Autocom Cars CDP PRO 2.10.3 MultiLanguage.torrent


                    Download File >>> https://urlin.us/2uEx3W



                    -
                    - 8a78ff9644
                    -
                    -
                    -

                    diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Download Asus Thermal Radar 2 43.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Download Asus Thermal Radar 2 43.md deleted file mode 100644 index d4c68dcdc1861a2d4e172f73a45c225219133946..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Download Asus Thermal Radar 2 43.md +++ /dev/null @@ -1,15 +0,0 @@ -

                    download asus thermal radar 2 43


                    DOWNLOAD ✶✶✶ https://urlin.us/2uEyHk



                    -
                    -Sep 26, 2016 — Download BEST Asus Thermal Radar 2 43. Download Asus Thermal Radar 2 43 ❎ DOWNLOAD: ••• asus thermal imaging radar. Asus Tf700t Firmware. -May 8, 2016 — Download Asus Radar 2 Windows 7 Driver For Windows XP Asus Tf300. -Download asus radar driver windows 7. Drivers for Windows 10. -Free programs for Windows XP, Windows 7, Windows 10. -Nowadays, in different areas of human life, a large number of different gadgets are used, as well as special devices designed to measure temperature. -Asus SABERTOOTH Z87 Manual Online: Thermal Radar 2. Thermal Radar 2 comes with the following four utilities in one interface: Thermal Tuning, Thermal Status, ... Read moreAsus SABERTOOTH Z87 Manual Online: Thermal Radar 2.Thermal Radar 2 comes with the following four utilities in one interface: Thermal Tuning, Thermal Status, Thermal Monitor, Thermal Radar 2 and Smart Fan 5 Hide -ASUS SaberTooth Z87 Plus review is a good PC for gamers. -Motherboard ASUS SABERTOOTH Z87 PLUS has long taken its place in the niche of top solutions. -Read moreASUS SaberTooth Z87 Plus review is a good PC for gamers. -ASUS SABERTOOTH Z87 PLUS motherboard is already 8a78ff9644
                    -
                    -
                    -

                    diff --git a/spaces/inreVtussa/clothingai/Examples/ArtCAM 2016 Free Download.md b/spaces/inreVtussa/clothingai/Examples/ArtCAM 2016 Free Download.md deleted file mode 100644 index 50c5968e09f903fd6a22312e4c2c8bea90cb08d8..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/ArtCAM 2016 Free Download.md +++ /dev/null @@ -1,9 +0,0 @@ - -

                    artcam 2018 software is actually handy application the designers thatll permit you to unravel your imagination. this program provides scalable cad modeling in addition to cnc machining alternatives for very broad assortment of businesses. autodesk artcam 2018 software is available in handy for the machinists, engravers, and woodworkers for creating and manufacturing 2d in addition to 3d models. get more softwares from get into pc

                    -

                    ArtCAM 2016 Free Download


                    Download Zip ✏ ✏ ✏ https://tiurll.com/2uCkYm



                    -

                    autodesk artcam 2018 software actually handy application the designers thatll permit you to unravel your imagination. this program provides scalable cad modeling in addition to cnc machining options for very broad assortment of businesses. autodesk artcam 2018 software is available in handy for the machinists, engravers, and woodworkers for creating and manufacturing 2d in addition to 3d models.




                    (adsbygoogle = window.adsbygoogle []).push();

                    -

                    autodesk artcam 2018 software actually handy application the designers thatll permit you to unravel your imagination. this program provides scalable cad modeling in addition to cnc machining alternatives for very broad assortment of businesses. autodesk artcam 2018 software is available in handy for the machinists, engravers, and woodworkers for creating and manufacturing 2d in addition to 3d models. get more softwares from get into pc

                    -

                    autodesk artcam 2018 software actually handy application the designers thatll permit you to unravel your imagination. this program provides scalable cad modeling in addition to cnc machining options for very broad assortment of businesses.

                    -

                    899543212b
                    -
                    -
                    \ No newline at end of file diff --git a/spaces/inreVtussa/clothingai/Examples/At Targheeb Wat Tarheeb Pdf NEW Download.md b/spaces/inreVtussa/clothingai/Examples/At Targheeb Wat Tarheeb Pdf NEW Download.md deleted file mode 100644 index 34b1ebeefb3622968820550d1829a5a20a2f8d53..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/At Targheeb Wat Tarheeb Pdf NEW Download.md +++ /dev/null @@ -1,13 +0,0 @@ -

                    At Targheeb Wat Tarheeb Pdf Download


                    DOWNLOAD ::: https://tiurll.com/2uCiQR



                    - -January 3, 2018 - AlTargheeb WalTarheebAttargheeb Wattarheeb. January 3, 2018 is the day everyone should celebrate the day they will live in peace and freedom! -On the day when they will live in peace and security! -The day they can live in peace and security. -January 3, 2018. -The day when they can live by their own choice. -The day they can choose to live their own way. -The day they can choose life! -On the day they can live freely! 8a78ff9644
                    -
                    -
                    -

                    diff --git a/spaces/inreVtussa/clothingai/Examples/Biblia De Estudo GLOW Setup Free.md b/spaces/inreVtussa/clothingai/Examples/Biblia De Estudo GLOW Setup Free.md deleted file mode 100644 index a1669a88f17d71b6e154db3f988a895520aaa58a..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Biblia De Estudo GLOW Setup Free.md +++ /dev/null @@ -1,6 +0,0 @@ -

                    Biblia De Estudo GLOW Setup Free


                    Download Zip ✑ ✑ ✑ https://tiurll.com/2uCkgn



                    - -See more ideas about bible quotes, christian quotes, bible verses. ... Best web hosting, free tools, uptime guarantee and FREE SETUP - Best affordable hosting ... bíblia evangélica, bíblia sagrada de estudo, bíblia pentecostal #estudobíblico ... File For Silhouette Cricut Glowforge Laser Created by Michael Scott Hassler This ... 1fdad05405
                    -
                    -
                    -

                    diff --git a/spaces/inreVtussa/clothingai/Examples/Dilwale Movie _HOT_ Download In Kickass Torrent.md b/spaces/inreVtussa/clothingai/Examples/Dilwale Movie _HOT_ Download In Kickass Torrent.md deleted file mode 100644 index 1cf3638f388c683f532307d777f3467abdce1005..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Dilwale Movie _HOT_ Download In Kickass Torrent.md +++ /dev/null @@ -1,6 +0,0 @@ -

                    Dilwale Movie Download In Kickass Torrent


                    DOWNLOADhttps://tiurll.com/2uCimp



                    -
                    -... Dilwale2015 Hindi Movie Online, Dilwale 2015 Watch Online Dailymotion, Youtube, Putlocker, Vodlocker, Dilwale 2015 Movie Download Torrent, Kickass, ... 4d29de3e1b
                    -
                    -
                    -

                    diff --git a/spaces/ivntl/MMS/uroman/lib/NLP/UTF8.pm b/spaces/ivntl/MMS/uroman/lib/NLP/UTF8.pm deleted file mode 100644 index b28cb4dede3b84f45aeade2e24f240e3a39e7cc1..0000000000000000000000000000000000000000 --- a/spaces/ivntl/MMS/uroman/lib/NLP/UTF8.pm +++ /dev/null @@ -1,1404 +0,0 @@ -################################################################ -# # -# UTF8 # -# # -################################################################ - -package NLP::UTF8; - -use NLP::utilities; -$util = NLP::utilities; - -%empty_ht = (); - -sub new { - local($caller) = @_; - - my $object = {}; - my $class = ref( $caller ) || $caller; - bless($object, $class); - return $object; -} - -sub unicode_string2string { -# input: string that might contain unicode sequences such as "U+0627" -# output: string in pure utf-8 - local($caller,$s) = @_; - - my $pre; - my $unicode; - my $post; - my $r1; - my $r2; - my $r3; - - ($pre,$unicode,$post) = ($s =~ /^(.*)(?:U\+|\\u)([0-9A-Fa-f][0-9A-Fa-f][0-9A-Fa-f][0-9A-Fa-f])(.*)$/); - return $s unless defined($post); - $r1 = $caller->unicode_string2string($pre); - $r2 = $caller->unicode_hex_string2string($unicode); - $r3 = $caller->unicode_string2string($post); - $result = $r1 . $r2 . $r3; - return $result; -} - -sub unicode_hex_string2string { -# input: "0627" (interpreted as hex code) -# output: utf-8 string for Arabic letter alef - local($caller,$unicode) = @_; - return "" unless defined($unicode); - my $d = hex($unicode); - return $caller->unicode2string($d); -} - -sub unicode2string { -# input: non-neg integer, e.g. 0x627 -# output: utf-8 string for Arabic letter alef - local($caller,$d) = @_; - return "" unless defined($d) && $d >= 0; - return sprintf("%c",$d) if $d <= 0x7F; - - my $lastbyte1 = ($d & 0x3F) | 0x80; - $d >>= 6; - return sprintf("%c%c",$d | 0xC0, $lastbyte1) if $d <= 0x1F; - - my $lastbyte2 = ($d & 0x3F) | 0x80; - $d >>= 6; - return sprintf("%c%c%c",$d | 0xE0, $lastbyte2, $lastbyte1) if $d <= 0xF; - - my $lastbyte3 = ($d & 0x3F) | 0x80; - $d >>= 6; - return sprintf("%c%c%c%c",$d | 0xF0, $lastbyte3, $lastbyte2, $lastbyte1) if $d <= 0x7; - - my $lastbyte4 = ($d & 0x3F) | 0x80; - $d >>= 6; - return sprintf("%c%c%c%c%c",$d | 0xF8, $lastbyte4, $lastbyte3, $lastbyte2, $lastbyte1) if $d <= 0x3; - - my $lastbyte5 = ($d & 0x3F) | 0x80; - $d >>= 6; - return sprintf("%c%c%c%c%c%c",$d | 0xFC, $lastbyte5, $lastbyte4, $lastbyte3, $lastbyte2, $lastbyte1) if $d <= 0x1; - return ""; # bad input -} - -sub html2utf8 { - local($caller, $string) = @_; - - return $string unless $string =~ /\&\#\d{3,5};/; - - my $prev = ""; - my $s = $string; - while ($s ne $prev) { - $prev = $s; - ($pre,$d,$post) = ($s =~ /^(.*)\&\#(\d+);(.*)$/); - if (defined($d) && ((($d >= 160) && ($d <= 255)) - || (($d >= 1500) && ($d <= 1699)) - || (($d >= 19968) && ($d <= 40879)))) { - $html_code = "\&\#" . $d . ";"; - $utf8_code = $caller->unicode2string($d); - $s =~ s/$html_code/$utf8_code/; - } - } - return $s; -} - -sub xhtml2utf8 { - local($caller, $string) = @_; - - return $string unless $string =~ /\&\#x[0-9a-fA-F]{2,5};/; - - my $prev = ""; - my $s = $string; - while ($s ne $prev) { - $prev = $s; - if (($pre, $html_code, $x, $post) = ($s =~ /^(.*)(\&\#x([0-9a-fA-F]{2,5});)(.*)$/)) { - $utf8_code = $caller->unicode_hex_string2string($x); - $s =~ s/$html_code/$utf8_code/; - } - } - return $s; -} - -sub utf8_marker { - return sprintf("%c%c%c\n", 0xEF, 0xBB, 0xBF); -} - -sub enforcer { -# input: string that might not conform to utf-8 -# output: string in pure utf-8, with a few "smart replacements" and possibly "?" - local($caller,$s,$no_repair) = @_; - - my $ascii; - my $utf8; - my $rest; - - return $s if $s =~ /^[\x00-\x7F]*$/; - - $no_repair = 0 unless defined($no_repair); - $orig = $s; - $result = ""; - - while ($s ne "") { - ($ascii,$rest) = ($s =~ /^([\x00-\x7F]+)(.*)$/); - if (defined($ascii)) { - $result .= $ascii; - $s = $rest; - next; - } - ($utf8,$rest) = ($s =~ /^([\xC0-\xDF][\x80-\xBF])(.*)$/); - ($utf8,$rest) = ($s =~ /^([\xE0-\xEF][\x80-\xBF][\x80-\xBF])(.*)$/) - unless defined($rest); - ($utf8,$rest) = ($s =~ /^([\xF0-\xF7][\x80-\xBF][\x80-\xBF][\x80-\xBF])(.*)$/) - unless defined($rest); - ($utf8,$rest) = ($s =~ /^([\xF8-\xFB][\x80-\xBF][\x80-\xBF][\x80-\xBF][\x80-\xBF])(.*)$/) - unless defined($rest); - if (defined($utf8)) { - $result .= $utf8; - $s = $rest; - next; - } - ($c,$rest) = ($s =~ /^(.)(.*)$/); - if (defined($c)) { - if ($no_repair) { $result .= "?"; } - elsif ($c =~ /\x85/) { $result .= "..."; } - elsif ($c =~ /\x91/) { $result .= "'"; } - elsif ($c =~ /\x92/) { $result .= "'"; } - elsif ($c =~ /\x93/) { $result .= $caller->unicode2string(0x201C); } - elsif ($c =~ /\x94/) { $result .= $caller->unicode2string(0x201D); } - elsif ($c =~ /[\xC0-\xFF]/) { - $c2 = $c; - $c2 =~ tr/[\xC0-\xFF]/[\x80-\xBF]/; - $result .= "\xC3$c2"; - } else { - $result .= "?"; - } - $s = $rest; - next; - } - $s = ""; - } - $result .= "\n" if ($orig =~ /\n$/) && ! ($result =~ /\n$/); - return $result; -} - -sub split_into_utf8_characters { -# input: utf8 string -# output: list of sub-strings, each representing a utf8 character - local($caller,$string,$group_control, *ht) = @_; - - @characters = (); - $end_of_token_p_string = ""; - $skipped_bytes = ""; - $group_control = "" unless defined($group_control); - $group_ascii_numbers = ($group_control =~ /ASCII numbers/); - $group_ascii_spaces = ($group_control =~ /ASCII spaces/); - $group_ascii_punct = ($group_control =~ /ASCII punct/); - $group_ascii_chars = ($group_control =~ /ASCII chars/); - $group_xml_chars = ($group_control =~ /XML chars/); - $group_xml_tags = ($group_control =~ /XML tags/); - $return_only_chars = ($group_control =~ /return only chars/); - $return_trailing_whitespaces = ($group_control =~ /return trailing whitespaces/); - if ($group_control =~ /ASCII all/) { - $group_ascii_numbers = 1; - $group_ascii_spaces = 1; - $group_ascii_chars = 1; - $group_ascii_punct = 1; - } - if ($group_control =~ /(XML chars and tags|XML tags and chars)/) { - $group_xml_chars = 1; - $group_xml_tags = 1; - } - $orig_string = $string; - $string .= " "; - while ($string =~ /\S/) { - # one-character UTF-8 = ASCII - if ($string =~ /^[\x00-\x7F]/) { - if ($group_xml_chars - && (($dec_unicode, $rest) = ($string =~ /^&#(\d+);(.*)$/s)) - && ($utf8_char = $caller->unicode2string($dec_unicode))) { - push(@characters, $utf8_char); - $string = $rest; - } elsif ($group_xml_chars - && (($hex_unicode, $rest) = ($string =~ /^&#x([0-9a-f]{1,6});(.*)$/is)) - && ($utf8_char = $caller->unicode_hex_string2string($hex_unicode))) { - push(@characters, $utf8_char); - $string = $rest; - } elsif ($group_xml_chars - && (($html_entity_name, $rest) = ($string =~ /^&([a-z]{1,6});(.*)$/is)) - && ($dec_unicode = $ht{HTML_ENTITY_NAME_TO_DECUNICODE}->{$html_entity_name}) - && ($utf8_char = $caller->unicode2string($dec_unicode)) - ) { - push(@characters, $utf8_char); - $string = $rest; - } elsif ($group_xml_tags - && (($tag, $rest) = ($string =~ /^(<\/?[a-zA-Z][-_:a-zA-Z0-9]*(\s+[a-zA-Z][-_:a-zA-Z0-9]*=\"[^"]*\")*\s*\/?>)(.*)$/s))) { - push(@characters, $tag); - $string = $rest; - } elsif ($group_ascii_numbers && ($string =~ /^[12]\d\d\d\.[01]?\d.[0-3]?\d([^0-9].*)?$/)) { - ($date) = ($string =~ /^(\d\d\d\d\.\d?\d.\d?\d)([^0-9].*)?$/); - push(@characters,$date); - $string = substr($string, length($date)); - } elsif ($group_ascii_numbers && ($string =~ /^\d/)) { - ($number) = ($string =~ /^(\d+(,\d\d\d)*(\.\d+)?)/); - push(@characters,$number); - $string = substr($string, length($number)); - } elsif ($group_ascii_spaces && ($string =~ /^(\s+)/)) { - ($space) = ($string =~ /^(\s+)/); - $string = substr($string, length($space)); - } elsif ($group_ascii_punct && (($punct_seq) = ($string =~ /^(-+|\.+|[:,%()"])/))) { - push(@characters,$punct_seq); - $string = substr($string, length($punct_seq)); - } elsif ($group_ascii_chars && (($word) = ($string =~ /^(\$[A-Z]*|[A-Z]{1,3}\$)/))) { - push(@characters,$word); - $string = substr($string, length($word)); - } elsif ($group_ascii_chars && (($abbrev) = ($string =~ /^((?:Jan|Feb|Febr|Mar|Apr|Jun|Jul|Aug|Sep|Sept|Oct|Nov|Dec|Mr|Mrs|Dr|a.m|p.m)\.)/))) { - push(@characters,$abbrev); - $string = substr($string, length($abbrev)); - } elsif ($group_ascii_chars && (($word) = ($string =~ /^(second|minute|hour|day|week|month|year|inch|foot|yard|meter|kilometer|mile)-(?:long|old)/i))) { - push(@characters,$word); - $string = substr($string, length($word)); - } elsif ($group_ascii_chars && (($word) = ($string =~ /^(zero|one|two|three|four|five|six|seven|eight|nine|ten|eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|twenty|thirty|forty|fifty|sixty|seventy|eighty|ninety|hundred|thousand|million|billion|trillion)-/i))) { - push(@characters,$word); - $string = substr($string, length($word)); - } elsif ($group_ascii_chars && (($word) = ($string =~ /^([a-zA-Z]+)(?:[ ,;%?|()"]|'s |' |\. |\d+[:hms][0-9 ])/))) { - push(@characters,$word); - $string = substr($string, length($word)); - } elsif ($group_ascii_chars && ($string =~ /^([\x21-\x27\x2A-\x7E]+)/)) { # exclude () - ($ascii) = ($string =~ /^([\x21-\x27\x2A-\x7E]+)/); # ASCII black-characters - push(@characters,$ascii); - $string = substr($string, length($ascii)); - } elsif ($group_ascii_chars && ($string =~ /^([\x21-\x7E]+)/)) { - ($ascii) = ($string =~ /^([\x21-\x7E]+)/); # ASCII black-characters - push(@characters,$ascii); - $string = substr($string, length($ascii)); - } elsif ($group_ascii_chars && ($string =~ /^([\x00-\x7F]+)/)) { - ($ascii) = ($string =~ /^([\x00-\x7F]+)/); - push(@characters,$ascii); - $string = substr($string, length($ascii)); - } else { - push(@characters,substr($string, 0, 1)); - $string = substr($string, 1); - } - - # two-character UTF-8 - } elsif ($string =~ /^[\xC0-\xDF][\x80-\xBF]/) { - push(@characters,substr($string, 0, 2)); - $string = substr($string, 2); - - # three-character UTF-8 - } elsif ($string =~ /^[\xE0-\xEF][\x80-\xBF][\x80-\xBF]/) { - push(@characters,substr($string, 0, 3)); - $string = substr($string, 3); - - # four-character UTF-8 - } elsif ($string =~ /^[\xF0-\xF7][\x80-\xBF][\x80-\xBF][\x80-\xBF]/) { - push(@characters,substr($string, 0, 4)); - $string = substr($string, 4); - - # five-character UTF-8 - } elsif ($string =~ /^[\xF8-\xFB][\x80-\xBF][\x80-\xBF][\x80-\xBF][\x80-\xBF]/) { - push(@characters,substr($string, 0, 5)); - $string = substr($string, 5); - - # six-character UTF-8 - } elsif ($string =~ /^[\xFC-\xFD][\x80-\xBF][\x80-\xBF][\x80-\xBF][\x80-\xBF][\x80-\xBF]/) { - push(@characters,substr($string, 0, 6)); - $string = substr($string, 6); - - # not a UTF-8 character - } else { - $skipped_bytes .= substr($string, 0, 1); - $string = substr($string, 1); - } - - $end_of_token_p_string .= ($string =~ /^\S/) ? "0" : "1" - if $#characters >= length($end_of_token_p_string); - } - $string =~ s/ $//; # remove previously added space, but keep original spaces - if ($return_trailing_whitespaces) { - while ($string =~ /^[ \t]/) { - push(@characters,substr($string, 0, 1)); - $string = substr($string, 1); - } - push(@characters, "\n") if $orig_string =~ /\n$/; - } - return ($return_only_chars) ? @characters : ($skipped_bytes, $end_of_token_p_string, @characters); -} - -sub max_substring_info { - local($caller,$s1,$s2,$info_type) = @_; - - ($skipped_bytes1, $end_of_token_p_string1, @char_list1) = $caller->split_into_utf8_characters($s1, "", *empty_ht); - ($skipped_bytes2, $end_of_token_p_string2, @char_list2) = $caller->split_into_utf8_characters($s2, "", *empty_ht); - return 0 if $skipped_bytes1 || $skipped_bytes2; - - $best_substring_start1 = 0; - $best_substring_start2 = 0; - $best_substring_length = 0; - - foreach $start_pos2 ((0 .. $#char_list2)) { - last if $start_pos2 + $best_substring_length > $#char_list2; - foreach $start_pos1 ((0 .. $#char_list1)) { - last if $start_pos1 + $best_substring_length > $#char_list1; - $matching_length = 0; - while (($start_pos1 + $matching_length <= $#char_list1) - && ($start_pos2 + $matching_length <= $#char_list2) - && ($char_list1[$start_pos1+$matching_length] eq $char_list2[$start_pos2+$matching_length])) { - $matching_length++; - } - if ($matching_length > $best_substring_length) { - $best_substring_length = $matching_length; - $best_substring_start1 = $start_pos1; - $best_substring_start2 = $start_pos2; - } - } - } - if ($info_type =~ /^max-ratio1$/) { - $length1 = $#char_list1 + 1; - return ($length1 > 0) ? ($best_substring_length / $length1) : 0; - } elsif ($info_type =~ /^max-ratio2$/) { - $length2 = $#char_list2 + 1; - return ($length2 > 0) ? ($best_substring_length / $length2) : 0; - } elsif ($info_type =~ /^substring$/) { - return join("", @char_list1[$best_substring_start1 .. $best_substring_start1+$best_substring_length-1]); - } else { - $length1 = $#char_list1 + 1; - $length2 = $#char_list2 + 1; - $info = "s1=$s1;s2=$s2"; - $info .= ";best_substring_length=$best_substring_length"; - $info .= ";best_substring_start1=$best_substring_start1"; - $info .= ";best_substring_start2=$best_substring_start2"; - $info .= ";length1=$length1"; - $info .= ";length2=$length2"; - return $info; - } -} - -sub n_shared_chars_at_start { - local($caller,$s1,$s2) = @_; - - my $n = 0; - while (($s1 ne "") && ($s2 ne "")) { - ($c1, $rest1) = ($s1 =~ /^(.[\x80-\xBF]*)(.*)$/); - ($c2, $rest2) = ($s2 =~ /^(.[\x80-\xBF]*)(.*)$/); - if ($c1 eq $c2) { - $n++; - $s1 = $rest1; - $s2 = $rest2; - } else { - last; - } - } - return $n; -} - -sub char_length { - local($caller,$string,$byte_offset) = @_; - - my $char = ($byte_offset) ? substr($string, $byte_offset) : $string; - return 1 if $char =~ /^[\x00-\x7F]/; - return 2 if $char =~ /^[\xC0-\xDF]/; - return 3 if $char =~ /^[\xE0-\xEF]/; - return 4 if $char =~ /^[\xF0-\xF7]/; - return 5 if $char =~ /^[\xF8-\xFB]/; - return 6 if $char =~ /^[\xFC-\xFD]/; - return 0; -} - -sub length_in_utf8_chars { - local($caller,$s) = @_; - - $s =~ s/[\x80-\xBF]//g; - $s =~ s/[\x00-\x7F\xC0-\xFF]/c/g; - return length($s); -} - -sub byte_length_of_n_chars { - local($caller,$char_length,$string,$byte_offset,$undef_return_value) = @_; - - $byte_offset = 0 unless defined($byte_offset); - $undef_return_value = -1 unless defined($undef_return_value); - my $result = 0; - my $len; - foreach $i ((1 .. $char_length)) { - $len = $caller->char_length($string,($byte_offset+$result)); - return $undef_return_value unless $len; - $result += $len; - } - return $result; -} - -sub replace_non_ASCII_bytes { - local($caller,$string,$replacement) = @_; - - $replacement = "HEX" unless defined($replacement); - if ($replacement =~ /^(Unicode|U\+4|\\u|HEX)$/) { - $new_string = ""; - while (($pre,$utf8_char, $post) = ($string =~ /^([\x09\x0A\x20-\x7E]*)([\x00-\x08\x0B-\x1F\x7F]|[\xC0-\xDF][\x80-\xBF]|[\xE0-\xEF][\x80-\xBF][\x80-\xBF]|[\xF0-\xF7][\x80-\xBF][\x80-\xBF][\x80-\xBF]|[\xF8-\xFF][\x80-\xBF]+|[\x80-\xBF])(.*)$/s)) { - if ($replacement =~ /Unicode/) { - $new_string .= $pre . "utf8_to_unicode($utf8_char)) . ">"; - } elsif ($replacement =~ /\\u/) { - $new_string .= $pre . "\\u" . (uc sprintf("%04x", $caller->utf8_to_unicode($utf8_char))); - } elsif ($replacement =~ /U\+4/) { - $new_string .= $pre . "utf8_to_4hex_unicode($utf8_char)) . ">"; - } else { - $new_string .= $pre . "utf8_to_hex($utf8_char) . ">"; - } - $string = $post; - } - $new_string .= $string; - } else { - $new_string = $string; - $new_string =~ s/[\x80-\xFF]/$replacement/g; - } - return $new_string; -} - -sub valid_utf8_string_p { - local($caller,$string) = @_; - - return $string =~ /^(?:[\x09\x0A\x20-\x7E]|[\xC0-\xDF][\x80-\xBF]|[\xE0-\xEF][\x80-\xBF][\x80-\xBF]|[\xF0-\xF7][\x80-\xBF][\x80-\xBF][\x80-\xBF])*$/; -} - -sub valid_utf8_string_incl_ascii_control_p { - local($caller,$string) = @_; - - return $string =~ /^(?:[\x00-\x7F]|[\xC0-\xDF][\x80-\xBF]|[\xE0-\xEF][\x80-\xBF][\x80-\xBF]|[\xF0-\xF7][\x80-\xBF][\x80-\xBF][\x80-\xBF])*$/; -} - -sub utf8_to_hex { - local($caller,$s) = @_; - - $hex = ""; - foreach $i ((0 .. length($s)-1)) { - $hex .= uc sprintf("%2.2x",ord(substr($s, $i, 1))); - } - return $hex; -} - -sub hex_to_utf8 { - local($caller,$s) = @_; - # surface string \xE2\x80\xBA to UTF8 - - my $utf8 = ""; - while (($hex, $rest) = ($s =~ /^(?:\\x)?([0-9A-Fa-f]{2,2})(.*)$/)) { - $utf8 .= sprintf("%c", hex($hex)); - $s = $rest; - } - return $utf8; -} - -sub utf8_to_4hex_unicode { - local($caller,$s) = @_; - - return sprintf("%4.4x", $caller->utf8_to_unicode($s)); -} - -sub utf8_to_unicode { - local($caller,$s) = @_; - - $unicode = 0; - foreach $i ((0 .. length($s)-1)) { - $c = substr($s, $i, 1); - if ($c =~ /^[\x80-\xBF]$/) { - $unicode = $unicode * 64 + (ord($c) & 0x3F); - } elsif ($c =~ /^[\xC0-\xDF]$/) { - $unicode = $unicode * 32 + (ord($c) & 0x1F); - } elsif ($c =~ /^[\xE0-\xEF]$/) { - $unicode = $unicode * 16 + (ord($c) & 0x0F); - } elsif ($c =~ /^[\xF0-\xF7]$/) { - $unicode = $unicode * 8 + (ord($c) & 0x07); - } elsif ($c =~ /^[\xF8-\xFB]$/) { - $unicode = $unicode * 4 + (ord($c) & 0x03); - } elsif ($c =~ /^[\xFC-\xFD]$/) { - $unicode = $unicode * 2 + (ord($c) & 0x01); - } - } - return $unicode; -} - -sub charhex { - local($caller,$string) = @_; - - my $result = ""; - while ($string ne "") { - $char = substr($string, 0, 1); - $string = substr($string, 1); - if ($char =~ /^[ -~]$/) { - $result .= $char; - } else { - $hex = sprintf("%2.2x",ord($char)); - $hex =~ tr/a-f/A-F/; - $result .= ""; - } - } - return $result; -} - -sub windows1252_to_utf8 { - local($caller,$s, $norm_to_ascii_p, $preserve_potential_utf8s_p) = @_; - - return $s if $s =~ /^[\x00-\x7F]*$/; # all ASCII - - $norm_to_ascii_p = 1 unless defined($norm_to_ascii_p); - $preserve_potential_utf8s_p = 1 unless defined($preserve_potential_utf8s_p); - my $result = ""; - my $c = ""; - while ($s ne "") { - $n_bytes = 1; - if ($s =~ /^[\x00-\x7F]/) { - $result .= substr($s, 0, 1); # ASCII - } elsif ($preserve_potential_utf8s_p && ($s =~ /^[\xC0-\xDF][\x80-\xBF]/)) { - $result .= substr($s, 0, 2); # valid 2-byte UTF8 - $n_bytes = 2; - } elsif ($preserve_potential_utf8s_p && ($s =~ /^[\xE0-\xEF][\x80-\xBF][\x80-\xBF]/)) { - $result .= substr($s, 0, 3); # valid 3-byte UTF8 - $n_bytes = 3; - } elsif ($preserve_potential_utf8s_p && ($s =~ /^[\xF0-\xF7][\x80-\xBF][\x80-\xBF][\x80-\xBF]/)) { - $result .= substr($s, 0, 4); # valid 4-byte UTF8 - $n_bytes = 4; - } elsif ($preserve_potential_utf8s_p && ($s =~ /^[\xF8-\xFB][\x80-\xBF][\x80-\xBF][\x80-\xBF][\x80-\xBF]/)) { - $result .= substr($s, 0, 5); # valid 5-byte UTF8 - $n_bytes = 5; - } elsif ($s =~ /^[\xA0-\xBF]/) { - $c = substr($s, 0, 1); - $result .= "\xC2$c"; - } elsif ($s =~ /^[\xC0-\xFF]/) { - $c = substr($s, 0, 1); - $c =~ tr/[\xC0-\xFF]/[\x80-\xBF]/; - $result .= "\xC3$c"; - } elsif ($s =~ /^\x80/) { - $result .= "\xE2\x82\xAC"; # Euro sign - } elsif ($s =~ /^\x82/) { - $result .= "\xE2\x80\x9A"; # single low quotation mark - } elsif ($s =~ /^\x83/) { - $result .= "\xC6\x92"; # Latin small letter f with hook - } elsif ($s =~ /^\x84/) { - $result .= "\xE2\x80\x9E"; # double low quotation mark - } elsif ($s =~ /^\x85/) { - $result .= ($norm_to_ascii_p) ? "..." : "\xE2\x80\xA6"; # horizontal ellipsis (three dots) - } elsif ($s =~ /^\x86/) { - $result .= "\xE2\x80\xA0"; # dagger - } elsif ($s =~ /^\x87/) { - $result .= "\xE2\x80\xA1"; # double dagger - } elsif ($s =~ /^\x88/) { - $result .= "\xCB\x86"; # circumflex - } elsif ($s =~ /^\x89/) { - $result .= "\xE2\x80\xB0"; # per mille sign - } elsif ($s =~ /^\x8A/) { - $result .= "\xC5\xA0"; # Latin capital letter S with caron - } elsif ($s =~ /^\x8B/) { - $result .= "\xE2\x80\xB9"; # single left-pointing angle quotation mark - } elsif ($s =~ /^\x8C/) { - $result .= "\xC5\x92"; # OE ligature - } elsif ($s =~ /^\x8E/) { - $result .= "\xC5\xBD"; # Latin capital letter Z with caron - } elsif ($s =~ /^\x91/) { - $result .= ($norm_to_ascii_p) ? "`" : "\xE2\x80\x98"; # left single quotation mark - } elsif ($s =~ /^\x92/) { - $result .= ($norm_to_ascii_p) ? "'" : "\xE2\x80\x99"; # right single quotation mark - } elsif ($s =~ /^\x93/) { - $result .= "\xE2\x80\x9C"; # left double quotation mark - } elsif ($s =~ /^\x94/) { - $result .= "\xE2\x80\x9D"; # right double quotation mark - } elsif ($s =~ /^\x95/) { - $result .= "\xE2\x80\xA2"; # bullet - } elsif ($s =~ /^\x96/) { - $result .= ($norm_to_ascii_p) ? "-" : "\xE2\x80\x93"; # n dash - } elsif ($s =~ /^\x97/) { - $result .= ($norm_to_ascii_p) ? "-" : "\xE2\x80\x94"; # m dash - } elsif ($s =~ /^\x98/) { - $result .= ($norm_to_ascii_p) ? "~" : "\xCB\x9C"; # small tilde - } elsif ($s =~ /^\x99/) { - $result .= "\xE2\x84\xA2"; # trade mark sign - } elsif ($s =~ /^\x9A/) { - $result .= "\xC5\xA1"; # Latin small letter s with caron - } elsif ($s =~ /^\x9B/) { - $result .= "\xE2\x80\xBA"; # single right-pointing angle quotation mark - } elsif ($s =~ /^\x9C/) { - $result .= "\xC5\x93"; # oe ligature - } elsif ($s =~ /^\x9E/) { - $result .= "\xC5\xBE"; # Latin small letter z with caron - } elsif ($s =~ /^\x9F/) { - $result .= "\xC5\xB8"; # Latin capital letter Y with diaeresis - } else { - $result .= "?"; - } - $s = substr($s, $n_bytes); - } - return $result; -} - -sub delete_weird_stuff { - local($caller, $s) = @_; - - # delete control chacters (except tab and linefeed), zero-width characters, byte order mark, - # directional marks, join marks, variation selectors, Arabic tatweel - $s =~ s/([\x00-\x08\x0B-\x1F\x7F]|\xC2[\x80-\x9F]|\xD9\x80|\xE2\x80[\x8B-\x8F]|\xEF\xB8[\x80-\x8F]|\xEF\xBB\xBF|\xF3\xA0[\x84-\x87][\x80-\xBF])//g; - return $s; -} - -sub number_of_utf8_character { - local($caller, $s) = @_; - - $s2 = $s; - $s2 =~ s/[\x80-\xBF]//g; - return length($s2); -} - -sub cap_letter_reg_exp { - # includes A-Z and other Latin-based capital letters with accents, umlauts and other decorations etc. - return "[A-Z]|\xC3[\x80-\x96\x98-\x9E]|\xC4[\x80\x82\x84\x86\x88\x8A\x8C\x8E\x90\x94\x964\x98\x9A\x9C\x9E\xA0\xA2\xA4\xA6\xA8\xAA\xAC\xAE\xB0\xB2\xB4\xB6\xB9\xBB\xBD\xBF]|\xC5[\x81\x83\x85\x87\x8A\x8C\x8E\x90\x92\x96\x98\x9A\x9C\x9E\xA0\xA2\xA4\xA6\xA8\xAA\xAC\xB0\xB2\xB4\xB6\xB8\xB9\xBB\xBD]"; -} - -sub regex_extended_case_expansion { - local($caller, $s) = @_; - - if ($s =~ /\xC3/) { - $s =~ s/\xC3\xA0/\xC3\[\x80\xA0\]/g; - $s =~ s/\xC3\xA1/\xC3\[\x81\xA1\]/g; - $s =~ s/\xC3\xA2/\xC3\[\x82\xA2\]/g; - $s =~ s/\xC3\xA3/\xC3\[\x83\xA3\]/g; - $s =~ s/\xC3\xA4/\xC3\[\x84\xA4\]/g; - $s =~ s/\xC3\xA5/\xC3\[\x85\xA5\]/g; - $s =~ s/\xC3\xA6/\xC3\[\x86\xA6\]/g; - $s =~ s/\xC3\xA7/\xC3\[\x87\xA7\]/g; - $s =~ s/\xC3\xA8/\xC3\[\x88\xA8\]/g; - $s =~ s/\xC3\xA9/\xC3\[\x89\xA9\]/g; - $s =~ s/\xC3\xAA/\xC3\[\x8A\xAA\]/g; - $s =~ s/\xC3\xAB/\xC3\[\x8B\xAB\]/g; - $s =~ s/\xC3\xAC/\xC3\[\x8C\xAC\]/g; - $s =~ s/\xC3\xAD/\xC3\[\x8D\xAD\]/g; - $s =~ s/\xC3\xAE/\xC3\[\x8E\xAE\]/g; - $s =~ s/\xC3\xAF/\xC3\[\x8F\xAF\]/g; - $s =~ s/\xC3\xB0/\xC3\[\x90\xB0\]/g; - $s =~ s/\xC3\xB1/\xC3\[\x91\xB1\]/g; - $s =~ s/\xC3\xB2/\xC3\[\x92\xB2\]/g; - $s =~ s/\xC3\xB3/\xC3\[\x93\xB3\]/g; - $s =~ s/\xC3\xB4/\xC3\[\x94\xB4\]/g; - $s =~ s/\xC3\xB5/\xC3\[\x95\xB5\]/g; - $s =~ s/\xC3\xB6/\xC3\[\x96\xB6\]/g; - $s =~ s/\xC3\xB8/\xC3\[\x98\xB8\]/g; - $s =~ s/\xC3\xB9/\xC3\[\x99\xB9\]/g; - $s =~ s/\xC3\xBA/\xC3\[\x9A\xBA\]/g; - $s =~ s/\xC3\xBB/\xC3\[\x9B\xBB\]/g; - $s =~ s/\xC3\xBC/\xC3\[\x9C\xBC\]/g; - $s =~ s/\xC3\xBD/\xC3\[\x9D\xBD\]/g; - $s =~ s/\xC3\xBE/\xC3\[\x9E\xBE\]/g; - } - if ($s =~ /\xC5/) { - $s =~ s/\xC5\x91/\xC5\[\x90\x91\]/g; - $s =~ s/\xC5\xA1/\xC5\[\xA0\xA1\]/g; - $s =~ s/\xC5\xB1/\xC5\[\xB0\xB1\]/g; - } - - return $s; -} - -sub extended_lower_case { - local($caller, $s) = @_; - - $s =~ tr/A-Z/a-z/; - - # Latin-1 - if ($s =~ /\xC3[\x80-\x9F]/) { - $s =~ s/À/à/g; - $s =~ s/Á/á/g; - $s =~ s/Â/â/g; - $s =~ s/Ã/ã/g; - $s =~ s/Ä/ä/g; - $s =~ s/Å/å/g; - $s =~ s/Æ/æ/g; - $s =~ s/Ç/ç/g; - $s =~ s/È/è/g; - $s =~ s/É/é/g; - $s =~ s/Ê/ê/g; - $s =~ s/Ë/ë/g; - $s =~ s/Ì/ì/g; - $s =~ s/Í/í/g; - $s =~ s/Î/î/g; - $s =~ s/Ï/ï/g; - $s =~ s/Ð/ð/g; - $s =~ s/Ñ/ñ/g; - $s =~ s/Ò/ò/g; - $s =~ s/Ó/ó/g; - $s =~ s/Ô/ô/g; - $s =~ s/Õ/õ/g; - $s =~ s/Ö/ö/g; - $s =~ s/Ø/ø/g; - $s =~ s/Ù/ù/g; - $s =~ s/Ú/ú/g; - $s =~ s/Û/û/g; - $s =~ s/Ü/ü/g; - $s =~ s/Ý/ý/g; - $s =~ s/Þ/þ/g; - } - # Latin Extended-A - if ($s =~ /[\xC4-\xC5][\x80-\xBF]/) { - $s =~ s/Ā/ā/g; - $s =~ s/Ă/ă/g; - $s =~ s/Ą/ą/g; - $s =~ s/Ć/ć/g; - $s =~ s/Ĉ/ĉ/g; - $s =~ s/Ċ/ċ/g; - $s =~ s/Č/č/g; - $s =~ s/Ď/ď/g; - $s =~ s/Đ/đ/g; - $s =~ s/Ē/ē/g; - $s =~ s/Ĕ/ĕ/g; - $s =~ s/Ė/ė/g; - $s =~ s/Ę/ę/g; - $s =~ s/Ě/ě/g; - $s =~ s/Ĝ/ĝ/g; - $s =~ s/Ğ/ğ/g; - $s =~ s/Ġ/ġ/g; - $s =~ s/Ģ/ģ/g; - $s =~ s/Ĥ/ĥ/g; - $s =~ s/Ħ/ħ/g; - $s =~ s/Ĩ/ĩ/g; - $s =~ s/Ī/ī/g; - $s =~ s/Ĭ/ĭ/g; - $s =~ s/Į/į/g; - $s =~ s/İ/ı/g; - $s =~ s/IJ/ij/g; - $s =~ s/Ĵ/ĵ/g; - $s =~ s/Ķ/ķ/g; - $s =~ s/Ĺ/ĺ/g; - $s =~ s/Ļ/ļ/g; - $s =~ s/Ľ/ľ/g; - $s =~ s/Ŀ/ŀ/g; - $s =~ s/Ł/ł/g; - $s =~ s/Ń/ń/g; - $s =~ s/Ņ/ņ/g; - $s =~ s/Ň/ň/g; - $s =~ s/Ŋ/ŋ/g; - $s =~ s/Ō/ō/g; - $s =~ s/Ŏ/ŏ/g; - $s =~ s/Ő/ő/g; - $s =~ s/Œ/œ/g; - $s =~ s/Ŕ/ŕ/g; - $s =~ s/Ŗ/ŗ/g; - $s =~ s/Ř/ř/g; - $s =~ s/Ś/ś/g; - $s =~ s/Ŝ/ŝ/g; - $s =~ s/Ş/ş/g; - $s =~ s/Š/š/g; - $s =~ s/Ţ/ţ/g; - $s =~ s/Ť/ť/g; - $s =~ s/Ŧ/ŧ/g; - $s =~ s/Ũ/ũ/g; - $s =~ s/Ū/ū/g; - $s =~ s/Ŭ/ŭ/g; - $s =~ s/Ů/ů/g; - $s =~ s/Ű/ű/g; - $s =~ s/Ų/ų/g; - $s =~ s/Ŵ/ŵ/g; - $s =~ s/Ŷ/ŷ/g; - $s =~ s/Ź/ź/g; - $s =~ s/Ż/ż/g; - $s =~ s/Ž/ž/g; - } - # Greek letters - if ($s =~ /\xCE[\x86-\xAB]/) { - $s =~ s/Α/α/g; - $s =~ s/Β/β/g; - $s =~ s/Γ/γ/g; - $s =~ s/Δ/δ/g; - $s =~ s/Ε/ε/g; - $s =~ s/Ζ/ζ/g; - $s =~ s/Η/η/g; - $s =~ s/Θ/θ/g; - $s =~ s/Ι/ι/g; - $s =~ s/Κ/κ/g; - $s =~ s/Λ/λ/g; - $s =~ s/Μ/μ/g; - $s =~ s/Ν/ν/g; - $s =~ s/Ξ/ξ/g; - $s =~ s/Ο/ο/g; - $s =~ s/Π/π/g; - $s =~ s/Ρ/ρ/g; - $s =~ s/Σ/σ/g; - $s =~ s/Τ/τ/g; - $s =~ s/Υ/υ/g; - $s =~ s/Φ/φ/g; - $s =~ s/Χ/χ/g; - $s =~ s/Ψ/ψ/g; - $s =~ s/Ω/ω/g; - $s =~ s/Ϊ/ϊ/g; - $s =~ s/Ϋ/ϋ/g; - $s =~ s/Ά/ά/g; - $s =~ s/Έ/έ/g; - $s =~ s/Ή/ή/g; - $s =~ s/Ί/ί/g; - $s =~ s/Ό/ό/g; - $s =~ s/Ύ/ύ/g; - $s =~ s/Ώ/ώ/g; - } - # Cyrillic letters - if ($s =~ /\xD0[\x80-\xAF]/) { - $s =~ s/А/а/g; - $s =~ s/Б/б/g; - $s =~ s/В/в/g; - $s =~ s/Г/г/g; - $s =~ s/Д/д/g; - $s =~ s/Е/е/g; - $s =~ s/Ж/ж/g; - $s =~ s/З/з/g; - $s =~ s/И/и/g; - $s =~ s/Й/й/g; - $s =~ s/К/к/g; - $s =~ s/Л/л/g; - $s =~ s/М/м/g; - $s =~ s/Н/н/g; - $s =~ s/О/о/g; - $s =~ s/П/п/g; - $s =~ s/Р/р/g; - $s =~ s/С/с/g; - $s =~ s/Т/т/g; - $s =~ s/У/у/g; - $s =~ s/Ф/ф/g; - $s =~ s/Х/х/g; - $s =~ s/Ц/ц/g; - $s =~ s/Ч/ч/g; - $s =~ s/Ш/ш/g; - $s =~ s/Щ/щ/g; - $s =~ s/Ъ/ъ/g; - $s =~ s/Ы/ы/g; - $s =~ s/Ь/ь/g; - $s =~ s/Э/э/g; - $s =~ s/Ю/ю/g; - $s =~ s/Я/я/g; - $s =~ s/Ѐ/ѐ/g; - $s =~ s/Ё/ё/g; - $s =~ s/Ђ/ђ/g; - $s =~ s/Ѓ/ѓ/g; - $s =~ s/Є/є/g; - $s =~ s/Ѕ/ѕ/g; - $s =~ s/І/і/g; - $s =~ s/Ї/ї/g; - $s =~ s/Ј/ј/g; - $s =~ s/Љ/љ/g; - $s =~ s/Њ/њ/g; - $s =~ s/Ћ/ћ/g; - $s =~ s/Ќ/ќ/g; - $s =~ s/Ѝ/ѝ/g; - $s =~ s/Ў/ў/g; - $s =~ s/Џ/џ/g; - } - # Fullwidth A-Z - if ($s =~ /\xEF\xBC[\xA1-\xBA]/) { - $s =~ s/A/a/g; - $s =~ s/B/b/g; - $s =~ s/C/c/g; - $s =~ s/D/d/g; - $s =~ s/E/e/g; - $s =~ s/F/f/g; - $s =~ s/G/g/g; - $s =~ s/H/h/g; - $s =~ s/I/i/g; - $s =~ s/J/j/g; - $s =~ s/K/k/g; - $s =~ s/L/l/g; - $s =~ s/M/m/g; - $s =~ s/N/n/g; - $s =~ s/O/o/g; - $s =~ s/P/p/g; - $s =~ s/Q/q/g; - $s =~ s/R/r/g; - $s =~ s/S/s/g; - $s =~ s/T/t/g; - $s =~ s/U/u/g; - $s =~ s/V/v/g; - $s =~ s/W/w/g; - $s =~ s/X/x/g; - $s =~ s/Y/y/g; - $s =~ s/Z/z/g; - } - - return $s; -} - -sub extended_upper_case { - local($caller, $s) = @_; - - $s =~ tr/a-z/A-Z/; - return $s unless $s =~ /[\xC3-\xC5][\x80-\xBF]/; - - $s =~ s/\xC3\xA0/\xC3\x80/g; - $s =~ s/\xC3\xA1/\xC3\x81/g; - $s =~ s/\xC3\xA2/\xC3\x82/g; - $s =~ s/\xC3\xA3/\xC3\x83/g; - $s =~ s/\xC3\xA4/\xC3\x84/g; - $s =~ s/\xC3\xA5/\xC3\x85/g; - $s =~ s/\xC3\xA6/\xC3\x86/g; - $s =~ s/\xC3\xA7/\xC3\x87/g; - $s =~ s/\xC3\xA8/\xC3\x88/g; - $s =~ s/\xC3\xA9/\xC3\x89/g; - $s =~ s/\xC3\xAA/\xC3\x8A/g; - $s =~ s/\xC3\xAB/\xC3\x8B/g; - $s =~ s/\xC3\xAC/\xC3\x8C/g; - $s =~ s/\xC3\xAD/\xC3\x8D/g; - $s =~ s/\xC3\xAE/\xC3\x8E/g; - $s =~ s/\xC3\xAF/\xC3\x8F/g; - $s =~ s/\xC3\xB0/\xC3\x90/g; - $s =~ s/\xC3\xB1/\xC3\x91/g; - $s =~ s/\xC3\xB2/\xC3\x92/g; - $s =~ s/\xC3\xB3/\xC3\x93/g; - $s =~ s/\xC3\xB4/\xC3\x94/g; - $s =~ s/\xC3\xB5/\xC3\x95/g; - $s =~ s/\xC3\xB6/\xC3\x96/g; - $s =~ s/\xC3\xB8/\xC3\x98/g; - $s =~ s/\xC3\xB9/\xC3\x99/g; - $s =~ s/\xC3\xBA/\xC3\x9A/g; - $s =~ s/\xC3\xBB/\xC3\x9B/g; - $s =~ s/\xC3\xBC/\xC3\x9C/g; - $s =~ s/\xC3\xBD/\xC3\x9D/g; - $s =~ s/\xC3\xBE/\xC3\x9E/g; - - $s =~ s/\xC5\x91/\xC5\x90/g; - $s =~ s/\xC5\xA1/\xC5\xA0/g; - $s =~ s/\xC5\xB1/\xC5\xB0/g; - return $s unless $s =~ /[\xC3-\xC5][\x80-\xBF]/; - - return $s; -} - -sub extended_first_upper_case { - local($caller, $s) = @_; - - if (($first_char, $rest) = ($s =~ /^([\x00-\x7F]|[\xC0-\xDF][\x80-\xBF]|[\xE0-\xEF][\x80-\xBF][\x80-\xBF])(.*)$/)) { - return $caller->extended_upper_case($first_char) . $rest; - } else { - return $s; - } -} - -sub repair_doubly_converted_utf8_strings { - local($caller, $s) = @_; - - if ($s =~ /\xC3[\x82-\x85]\xC2[\x80-\xBF]/) { - $s =~ s/\xC3\x82\xC2([\x80-\xBF])/\xC2$1/g; - $s =~ s/\xC3\x83\xC2([\x80-\xBF])/\xC3$1/g; - $s =~ s/\xC3\x84\xC2([\x80-\xBF])/\xC4$1/g; - $s =~ s/\xC3\x85\xC2([\x80-\xBF])/\xC5$1/g; - } - return $s; -} - -sub repair_misconverted_windows_to_utf8_strings { - local($caller, $s) = @_; - - # correcting conversions of UTF8 using Latin1-to-UTF converter - if ($s =~ /\xC3\xA2\xC2\x80\xC2[\x90-\xEF]/) { - my $result = ""; - while (($pre,$last_c,$post) = ($s =~ /^(.*?)\xC3\xA2\xC2\x80\xC2([\x90-\xEF])(.*)$/s)) { - $result .= "$pre\xE2\x80$last_c"; - $s = $post; - } - $result .= $s; - $s = $result; - } - # correcting conversions of Windows1252-to-UTF8 using Latin1-to-UTF converter - if ($s =~ /\xC2[\x80-\x9F]/) { - my $result = ""; - while (($pre,$c_windows,$post) = ($s =~ /^(.*?)\xC2([\x80-\x9F])(.*)$/s)) { - $c_utf8 = $caller->windows1252_to_utf8($c_windows, 0); - $result .= ($c_utf8 eq "?") ? ($pre . "\xC2" . $c_windows) : "$pre$c_utf8"; - $s = $post; - } - $result .= $s; - $s = $result; - } - if ($s =~ /\xC3/) { - $s =~ s/\xC3\xA2\xE2\x80\x9A\xC2\xAC/\xE2\x82\xAC/g; # x80 -> Euro sign - # x81 codepoint undefined in Windows 1252 - $s =~ s/\xC3\xA2\xE2\x82\xAC\xC5\xA1/\xE2\x80\x9A/g; # x82 -> single low-9 quotation mark - $s =~ s/\xC3\x86\xE2\x80\x99/\xC6\x92/g; # x83 -> Latin small letter f with hook - $s =~ s/\xC3\xA2\xE2\x82\xAC\xC5\xBE/\xE2\x80\x9E/g; # x84 -> double low-9 quotation mark - $s =~ s/\xC3\xA2\xE2\x82\xAC\xC2\xA6/\xE2\x80\xA6/g; # x85 -> horizontal ellipsis - $s =~ s/\xC3\xA2\xE2\x82\xAC\xC2\xA0/\xE2\x80\xA0/g; # x86 -> dagger - $s =~ s/\xC3\xA2\xE2\x82\xAC\xC2\xA1/\xE2\x80\xA1/g; # x87 -> double dagger - $s =~ s/\xC3\x8B\xE2\x80\xA0/\xCB\x86/g; # x88 -> modifier letter circumflex accent - $s =~ s/\xC3\xA2\xE2\x82\xAC\xC2\xB0/\xE2\x80\xB0/g; # x89 -> per mille sign - $s =~ s/\xC3\x85\xC2\xA0/\xC5\xA0/g; # x8A -> Latin capital letter S with caron - $s =~ s/\xC3\xA2\xE2\x82\xAC\xC2\xB9/\xE2\x80\xB9/g; # x8B -> single left-pointing angle quotation mark - $s =~ s/\xC3\x85\xE2\x80\x99/\xC5\x92/g; # x8C -> Latin capital ligature OE - # x8D codepoint undefined in Windows 1252 - $s =~ s/\xC3\x85\xC2\xBD/\xC5\xBD/g; # x8E -> Latin capital letter Z with caron - # x8F codepoint undefined in Windows 1252 - # x90 codepoint undefined in Windows 1252 - $s =~ s/\xC3\xA2\xE2\x82\xAC\xCB\x9C/\xE2\x80\x98/g; # x91 a-circumflex+euro+small tilde -> left single quotation mark - $s =~ s/\xC3\xA2\xE2\x82\xAC\xE2\x84\xA2/\xE2\x80\x99/g; # x92 a-circumflex+euro+trademark -> right single quotation mark - $s =~ s/\xC3\xA2\xE2\x82\xAC\xC5\x93/\xE2\x80\x9C/g; # x93 a-circumflex+euro+Latin small ligature oe -> left double quotation mark - # x94 maps through undefined intermediate code point - $s =~ s/\xC3\xA2\xE2\x82\xAC\xC2\xA2/\xE2\x80\xA2/g; # x95 a-circumflex+euro+cent sign -> bullet - $s =~ s/\xC3\xA2\xE2\x82\xAC\xE2\x80\x9C/\xE2\x80\x93/g; # x96 a-circumflex+euro+left double quotation mark -> en dash - $s =~ s/\xC3\xA2\xE2\x82\xAC\xE2\x80\x9D/\xE2\x80\x94/g; # x97 a-circumflex+euro+right double quotation mark -> em dash - $s =~ s/\xC3\x8B\xC5\x93/\xCB\x9C/g; # x98 Latin capital e diaeresis+Latin small ligature oe -> small tilde - $s =~ s/\xC3\xA2\xE2\x80\x9E\xC2\xA2/\xE2\x84\xA2/g; # x99 -> trade mark sign - $s =~ s/\xC3\x85\xC2\xA1/\xC5\xA1/g; # x9A -> Latin small letter s with caron - $s =~ s/\xC3\xA2\xE2\x82\xAC\xC2\xBA/\xE2\x80\xBA/g; # x9B -> single right-pointing angle quotation mark - $s =~ s/\xC3\x85\xE2\x80\x9C/\xC5\x93/g; # x9C -> Latin small ligature oe - # x9D codepoint undefined in Windows 1252 - $s =~ s/\xC3\x85\xC2\xBE/\xC5\xBE/g; # x9E -> Latin small letter z with caron - $s =~ s/\xC3\x85\xC2\xB8/\xC5\xB8/g; # x9F -> Latin capital letter Y with diaeresis - $s =~ s/\xC3\xAF\xC2\xBF\xC2\xBD/\xEF\xBF\xBD/g; # replacement character - } - - return $s; -} - -sub latin1_to_utf { - local($caller, $s) = @_; - - my $result = ""; - while (($pre,$c,$post) = ($s =~ /^(.*?)([\x80-\xFF])(.*)$/s)) { - $result .= $pre; - if ($c =~ /^[\x80-\xBF]$/) { - $result .= "\xC2$c"; - } elsif ($c =~ /^[\xC0-\xFF]$/) { - $c =~ tr/[\xC0-\xFF]/[\x80-\xBF]/; - $result .= "\xC3$c"; - } - $s = $post; - } - $result .= $s; - return $result; -} - -sub character_type_is_letter_type { - local($caller, $char_type) = @_; - - return ($char_type =~ /\b((CJK|hiragana|kana|katakana)\s+character|diacritic|letter|syllable)\b/); -} - -sub character_type { - local($caller, $c) = @_; - - if ($c =~ /^[\x00-\x7F]/) { - return "XML tag" if $c =~ /^<.*>$/; - return "ASCII Latin letter" if $c =~ /^[a-z]$/i; - return "ASCII digit" if $c =~ /^[0-9]$/i; - return "ASCII whitespace" if $c =~ /^[\x09-\x0D\x20]$/; - return "ASCII control-character" if $c =~ /^[\x00-\x1F\x7F]$/; - return "ASCII currency" if $c eq "\$"; - return "ASCII punctuation"; - } elsif ($c =~ /^[\xC0-\xDF]/) { - return "non-UTF8 (invalid)" unless $c =~ /^[\xC0-\xDF][\x80-\xBF]$/; - return "non-shortest-UTF8 (invalid)" if $c =~ /[\xC0-\xC1]/; - return "non-ASCII control-character" if $c =~ /\xC2[\x80-\x9F]/; - return "non-ASCII whitespace" if $c =~ /\xC2\xA0/; - return "non-ASCII currency" if $c =~ /\xC2[\xA2-\xA5]/; - return "fraction" if $c =~ /\xC2[\xBC-\xBE]/; # NEW - return "superscript digit" if $c =~ /\xC2[\xB2\xB3\xB9]/; - return "non-ASCII Latin letter" if $c =~ /\xC2\xB5/; # micro sign - return "non-ASCII punctuation" if $c =~ /\xC2[\xA0-\xBF]/; - return "non-ASCII punctuation" if $c =~ /\xC3[\x97\xB7]/; - return "non-ASCII Latin letter" if $c =~ /\xC3[\x80-\xBF]/; - return "Latin ligature letter" if $c =~ /\xC4[\xB2\xB3]/; - return "Latin ligature letter" if $c =~ /\xC5[\x92\x93]/; - return "non-ASCII Latin letter" if $c =~ /[\xC4-\xC8]/; - return "non-ASCII Latin letter" if $c =~ /\xC9[\x80-\x8F]/; - return "IPA" if $c =~ /\xC9[\x90-\xBF]/; - return "IPA" if $c =~ /\xCA[\x80-\xBF]/; - return "IPA" if $c =~ /\xCB[\x80-\xBF]/; - return "combining-diacritic" if $c =~ /\xCC[\x80-\xBF]/; - return "combining-diacritic" if $c =~ /\xCD[\x80-\xAF]/; - return "Greek punctuation" if $c =~ /\xCD[\xBE]/; # Greek question mark - return "Greek punctuation" if $c =~ /\xCE[\x87]/; # Greek semicolon - return "Greek letter" if $c =~ /\xCD[\xB0-\xBF]/; - return "Greek letter" if $c =~ /\xCE/; - return "Greek letter" if $c =~ /\xCF[\x80-\xA1\xB3\xB7\xB8\xBA\xBB]/; - return "Coptic letter" if $c =~ /\xCF[\xA2-\xAF]/; - return "Cyrillic letter" if $c =~ /[\xD0-\xD3]/; - return "Cyrillic letter" if $c =~ /\xD4[\x80-\xAF]/; - return "Armenian punctuation" if $c =~ /\xD5[\x9A-\x9F]/; - return "Armenian punctuation" if $c =~ /\xD6[\x89-\x8F]/; - return "Armenian letter" if $c =~ /\xD4[\xB0-\xBF]/; - return "Armenian letter" if $c =~ /\xD5/; - return "Armenian letter" if $c =~ /\xD6[\x80-\x8F]/; - return "Hebrew accent" if $c =~ /\xD6[\x91-\xAE]/; - return "Hebrew punctuation" if $c =~ /\xD6\xBE/; - return "Hebrew punctuation" if $c =~ /\xD7[\x80\x83\x86\xB3\xB4]/; - return "Hebrew point" if $c =~ /\xD6[\xB0-\xBF]/; - return "Hebrew point" if $c =~ /\xD7[\x81\x82\x87]/; - return "Hebrew letter" if $c =~ /\xD7[\x90-\xB2]/; - return "other Hebrew" if $c =~ /\xD6[\x90-\xBF]/; - return "other Hebrew" if $c =~ /\xD7/; - return "Arabic currency" if $c =~ /\xD8\x8B/; # Afghani sign - return "Arabic punctuation" if $c =~ /\xD8[\x89-\x8D\x9B\x9E\x9F]/; - return "Arabic punctuation" if $c =~ /\xD9[\xAA-\xAD]/; - return "Arabic punctuation" if $c =~ /\xDB[\x94]/; - return "Arabic tatweel" if $c =~ /\xD9\x80/; - return "Arabic letter" if $c =~ /\xD8[\xA0-\xBF]/; - return "Arabic letter" if $c =~ /\xD9[\x81-\x9F]/; - return "Arabic letter" if $c =~ /\xD9[\xAE-\xBF]/; - return "Arabic letter" if $c =~ /\xDA[\x80-\xBF]/; - return "Arabic letter" if $c =~ /\xDB[\x80-\x95]/; - return "Arabic Indic digit" if $c =~ /\xD9[\xA0-\xA9]/; - return "Arabic Indic digit" if $c =~ /\xDB[\xB0-\xB9]/; - return "other Arabic" if $c =~ /[\xD8-\xDB]/; - return "Syriac punctuation" if $c =~ /\xDC[\x80-\x8F]/; - return "Syriac letter" if $c =~ /\xDC[\x90-\xAF]/; - return "Syriac diacritic" if $c =~ /\xDC[\xB0-\xBF]/; - return "Syriac diacritic" if $c =~ /\xDD[\x80-\x8A]/; - return "Thaana letter" if $c =~ /\xDE/; - } elsif ($c =~ /^[\xE0-\xEF]/) { - return "non-UTF8 (invalid)" unless $c =~ /^[\xE0-\xEF][\x80-\xBF]{2,2}$/; - return "non-shortest-UTF8 (invalid)" if $c =~ /\xE0[\x80-\x9F]/; - return "Arabic letter" if $c =~ /\xE0\xA2[\xA0-\xBF]/; # extended letters - return "other Arabic" if $c =~ /\xE0\xA3/; # extended characters - return "Devanagari punctuation" if $c =~ /\xE0\xA5[\xA4\xA5]/; # danda, double danda - return "Devanagari digit" if $c =~ /\xE0\xA5[\xA6-\xAF]/; - return "Devanagari letter" if $c =~ /\xE0[\xA4-\xA5]/; - return "Bengali digit" if $c =~ /\xE0\xA7[\xA6-\xAF]/; - return "Bengali currency" if $c =~ /\xE0\xA7[\xB2-\xB9]/; - return "Bengali letter" if $c =~ /\xE0[\xA6-\xA7]/; - return "Gurmukhi digit" if $c =~ /\xE0\xA9[\xA6-\xAF]/; - return "Gurmukhi letter" if $c =~ /\xE0[\xA8-\xA9]/; - return "Gujarati digit" if $c =~ /\xE0\xAB[\xA6-\xAF]/; - return "Gujarati letter" if $c =~ /\xE0[\xAA-\xAB]/; - return "Oriya digit" if $c =~ /\xE0\xAD[\xA6-\xAF]/; - return "Oriya fraction" if $c =~ /\xE0\xAD[\xB2-\xB7]/; - return "Oriya letter" if $c =~ /\xE0[\xAC-\xAD]/; - return "Tamil digit" if $c =~ /\xE0\xAF[\xA6-\xAF]/; - return "Tamil number" if $c =~ /\xE0\xAF[\xB0-\xB2]/; # number (10, 100, 1000) - return "Tamil letter" if $c =~ /\xE0[\xAE-\xAF]/; - return "Telegu digit" if $c =~ /\xE0\xB1[\xA6-\xAF]/; - return "Telegu fraction" if $c =~ /\xE0\xB1[\xB8-\xBE]/; - return "Telegu letter" if $c =~ /\xE0[\xB0-\xB1]/; - return "Kannada digit" if $c =~ /\xE0\xB3[\xA6-\xAF]/; - return "Kannada letter" if $c =~ /\xE0[\xB2-\xB3]/; - return "Malayalam digit" if $c =~ /\xE0\xB5[\x98-\x9E\xA6-\xB8]/; - return "Malayalam punctuation" if $c =~ /\xE0\xB5\xB9/; # date mark - return "Malayalam letter" if $c =~ /\xE0[\xB4-\xB5]/; - return "Sinhala digit" if $c =~ /\xE0\xB7[\xA6-\xAF]/; - return "Sinhala punctuation" if $c =~ /\xE0\xB7\xB4/; - return "Sinhala letter" if $c =~ /\xE0[\xB6-\xB7]/; - return "Thai currency" if $c =~ /\xE0\xB8\xBF/; - return "Thai digit" if $c =~ /\xE0\xB9[\x90-\x99]/; - return "Thai character" if $c =~ /\xE0[\xB8-\xB9]/; - return "Lao punctuation" if $c =~ /\xE0\xBA\xAF/; # Lao ellipsis - return "Lao digit" if $c =~ /\xE0\xBB[\x90-\x99]/; - return "Lao character" if $c =~ /\xE0[\xBA-\xBB]/; - return "Tibetan punctuation" if $c =~ /\xE0\xBC[\x81-\x94]/; - return "Tibetan sign" if $c =~ /\xE0\xBC[\x95-\x9F]/; - return "Tibetan digit" if $c =~ /\xE0\xBC[\xA0-\xB3]/; - return "Tibetan punctuation" if $c =~ /\xE0\xBC[\xB4-\xBD]/; - return "Tibetan letter" if $c =~ /\xE0[\xBC-\xBF]/; - return "Myanmar digit" if $c =~ /\xE1\x81[\x80-\x89]/; - return "Myanmar digit" if $c =~ /\xE1\x82[\x90-\x99]/; # Myanmar Shan digits - return "Myanmar punctuation" if $c =~ /\xE1\x81[\x8A-\x8B]/; - return "Myanmar letter" if $c =~ /\xE1[\x80-\x81]/; - return "Myanmar letter" if $c =~ /\xE1\x82[\x80-\x9F]/; - return "Georgian punctuation" if $c =~ /\xE1\x83\xBB/; - return "Georgian letter" if $c =~ /\xE1\x82[\xA0-\xBF]/; - return "Georgian letter" if $c =~ /\xE1\x83/; - return "Georgian letter" if $c =~ /\xE1\xB2[\x90-\xBF]/; # Georgian Mtavruli capital letters - return "Georgian letter" if $c =~ /\xE2\xB4[\x80-\xAF]/; # Georgian small letters (Khutsuri) - return "Korean Hangul letter" if $c =~ /\xE1[\x84-\x87]/; - return "Ethiopic punctuation" if $c =~ /\xE1\x8D[\xA0-\xA8]/; - return "Ethiopic digit" if $c =~ /\xE1\x8D[\xA9-\xB1]/; - return "Ethiopic number" if $c =~ /\xE1\x8D[\xB2-\xBC]/; - return "Ethiopic syllable" if $c =~ /\xE1[\x88-\x8D]/; - return "Cherokee letter" if $c =~ /\xE1\x8E[\xA0-\xBF]/; - return "Cherokee letter" if $c =~ /\xE1\x8F/; - return "Canadian punctuation" if $c =~ /\xE1\x90\x80/; # Canadian Syllabics hyphen - return "Canadian punctuation" if $c =~ /\xE1\x99\xAE/; # Canadian Syllabics full stop - return "Canadian syllable" if $c =~ /\xE1[\x90-\x99]/; - return "Canadian syllable" if $c =~ /\xE1\xA2[\xB0-\xBF]/; - return "Canadian syllable" if $c =~ /\xE1\xA3/; - return "Ogham whitespace" if $c =~ /\xE1\x9A\x80/; - return "Ogham letter" if $c =~ /\xE1\x9A[\x81-\x9A]/; - return "Ogham punctuation" if $c =~ /\xE1\x9A[\x9B-\x9C]/; - return "Runic punctuation" if $c =~ /\xE1\x9B[\xAB-\xAD]/; - return "Runic letter" if $c =~ /\xE1\x9A[\xA0-\xBF]/; - return "Runic letter" if $c =~ /\xE1\x9B/; - return "Khmer currency" if $c =~ /\xE1\x9F\x9B/; - return "Khmer digit" if $c =~ /\xE1\x9F[\xA0-\xA9]/; - return "Khmer letter" if $c =~ /\xE1[\x9E-\x9F]/; - return "Mongolian punctuation" if $c =~ /\xE1\xA0[\x80-\x8A]/; - return "Mongolian digit" if $c =~ /\xE1\xA0[\x90-\x99]/; - return "Mongolian letter" if $c =~ /\xE1[\xA0-\xA1]/; - return "Mongolian letter" if $c =~ /\xE1\xA2[\x80-\xAF]/; - return "Buginese letter" if $c =~ /\xE1\xA8[\x80-\x9B]/; - return "Buginese punctuation" if $c =~ /\xE1\xA8[\x9E-\x9F]/; - return "Balinese letter" if $c =~ /\xE1\xAC/; - return "Balinese letter" if $c =~ /\xE1\xAD[\x80-\x8F]/; - return "Balinese digit" if $c =~ /\xE1\xAD[\x90-\x99]/; - return "Balinese puncutation" if $c =~ /\xE1\xAD[\x9A-\xA0]/; - return "Balinese symbol" if $c =~ /\xE1\xAD[\xA1-\xBF]/; - return "Sundanese digit" if $c =~ /\xE1\xAE[\xB0-\xB9]/; - return "Sundanese letter" if $c =~ /\xE1\xAE/; - return "Cyrillic letter" if $c =~ /\xE1\xB2[\x80-\x8F]/; - return "Sundanese punctuation" if $c =~ /\xE1\xB3[\x80-\x8F]/; - return "IPA" if $c =~ /\xE1[\xB4-\xB6]/; - return "non-ASCII Latin letter" if $c =~ /\xE1[\xB8-\xBB]/; - return "Greek letter" if $c =~ /\xE1[\xBC-\xBF]/; - return "non-ASCII whitespace" if $c =~ /\xE2\x80[\x80-\x8A\xAF]/; - return "zero-width space" if $c =~ /\xE2\x80\x8B/; - return "zero-width non-space" if $c =~ /\xE2\x80\x8C/; - return "zero-width joiner" if $c =~ /\xE2\x80\x8D/; - return "directional mark" if $c =~ /\xE2\x80[\x8E-\x8F\xAA-\xAE]/; - return "non-ASCII punctuation" if $c =~ /\xE2\x80[\x90-\xBF]/; - return "non-ASCII punctuation" if $c =~ /\xE2\x81[\x80-\x9E]/; - return "superscript letter" if $c =~ /\xE2\x81[\xB1\xBF]/; - return "superscript digit" if $c =~ /\xE2\x81[\xB0-\xB9]/; - return "superscript punctuation" if $c =~ /\xE2\x81[\xBA-\xBE]/; - return "subscript digit" if $c =~ /\xE2\x82[\x80-\x89]/; - return "subscript punctuation" if $c =~ /\xE2\x82[\x8A-\x8E]/; - return "non-ASCII currency" if $c =~ /\xE2\x82[\xA0-\xBF]/; - return "letterlike symbol" if $c =~ /\xE2\x84/; - return "letterlike symbol" if $c =~ /\xE2\x85[\x80-\x8F]/; - return "fraction" if $c =~ /\xE2\x85[\x90-\x9E]/; # NEW - return "Roman number" if $c =~ /\xE2\x85[\xA0-\xBF]/; # NEW - return "arrow symbol" if $c =~ /\xE2\x86[\x90-\xBF]/; - return "arrow symbol" if $c =~ /\xE2\x87/; - return "mathematical operator" if $c =~ /\xE2[\x88-\x8B]/; - return "technical symbol" if $c =~ /\xE2[\x8C-\x8F]/; - return "enclosed alphanumeric" if $c =~ /\xE2\x91[\xA0-\xBF]/; - return "enclosed alphanumeric" if $c =~ /\xE2[\x92-\x93]/; - return "box drawing" if $c =~ /\xE2[\x94-\x95]/; - return "geometric shape" if $c =~ /\xE2\x96[\xA0-\xBF]/; - return "geometric shape" if $c =~ /\xE2\x97/; - return "pictograph" if $c =~ /\xE2[\x98-\x9E]/; - return "arrow symbol" if $c =~ /\xE2\xAC[\x80-\x91\xB0-\xBF]/; - return "geometric shape" if $c =~ /\xE2\xAC[\x92-\xAF]/; - return "arrow symbol" if $c =~ /\xE2\xAD[\x80-\x8F\x9A-\xBF]/; - return "geometric shape" if $c =~ /\xE2\xAD[\x90-\x99]/; - return "arrow symbol" if $c =~ /\xE2\xAE[\x80-\xB9]/; - return "geometric shape" if $c =~ /\xE2\xAE[\xBA-\xBF]/; - return "geometric shape" if $c =~ /\xE2\xAF[\x80-\x88\x8A-\x8F]/; - return "symbol" if $c =~ /\xE2[\xAC-\xAF]/; - return "Coptic fraction" if $c =~ /\xE2\xB3\xBD/; - return "Coptic punctuation" if $c =~ /\xE2\xB3[\xB9-\xBF]/; - return "Coptic letter" if $c =~ /\xE2[\xB2-\xB3]/; - return "Georgian letter" if $c =~ /\xE2\xB4[\x80-\xAF]/; - return "Tifinagh punctuation" if $c =~ /\xE2\xB5\xB0/; - return "Tifinagh letter" if $c =~ /\xE2\xB4[\xB0-\xBF]/; - return "Tifinagh letter" if $c =~ /\xE2\xB5/; - return "Ethiopic syllable" if $c =~ /\xE2\xB6/; - return "Ethiopic syllable" if $c =~ /\xE2\xB7[\x80-\x9F]/; - return "non-ASCII punctuation" if $c =~ /\xE3\x80[\x80-\x91\x94-\x9F\xB0\xBB-\xBD]/; - return "symbol" if $c =~ /\xE3\x80[\x91\x92\xA0\xB6\xB7]/; - return "Japanese hiragana character" if $c =~ /\xE3\x81/; - return "Japanese hiragana character" if $c =~ /\xE3\x82[\x80-\x9F]/; - return "Japanese katakana character" if $c =~ /\xE3\x82[\xA0-\xBF]/; - return "Japanese katakana character" if $c =~ /\xE3\x83/; - return "Bopomofo letter" if $c =~ /\xE3\x84[\x80-\xAF]/; - return "Korean Hangul letter" if $c =~ /\xE3\x84[\xB0-\xBF]/; - return "Korean Hangul letter" if $c =~ /\xE3\x85/; - return "Korean Hangul letter" if $c =~ /\xE3\x86[\x80-\x8F]/; - return "Bopomofo letter" if $c =~ /\xE3\x86[\xA0-\xBF]/; - return "CJK stroke" if $c =~ /\xE3\x87[\x80-\xAF]/; - return "Japanese kana character" if $c =~ /\xE3\x87[\xB0-\xBF]/; - return "CJK symbol" if $c =~ /\xE3[\x88-\x8B]/; - return "CJK square Latin abbreviation" if $c =~ /\xE3\x8D[\xB1-\xBA]/; - return "CJK square Latin abbreviation" if $c =~ /\xE3\x8E/; - return "CJK square Latin abbreviation" if $c =~ /\xE3\x8F[\x80-\x9F\xBF]/; - return "CJK character" if $c =~ /\xE4[\xB8-\xBF]/; - return "CJK character" if $c =~ /[\xE5-\xE9]/; - return "Yi syllable" if $c =~ /\xEA[\x80-\x92]/; - return "Lisu letter" if $c =~ /\xEA\x93[\x90-\xBD]/; - return "Lisu punctuation" if $c =~ /\xEA\x93[\xBE-\xBF]/; - return "Cyrillic letter" if $c =~ /\xEA\x99/; - return "Cyrillic letter" if $c =~ /\xEA\x9A[\x80-\x9F]/; - return "modifier tone" if $c =~ /\xEA\x9C[\x80-\xA1]/; - return "Javanese punctuation" if $c =~ /\xEA\xA7[\x81-\x8D\x9E-\x9F]/; - return "Javanese digit" if $c =~ /\xEA\xA7[\x90-\x99]/; - return "Javanese letter" if $c =~ /\xEA\xA6/; - return "Javanese letter" if $c =~ /\xEA\xA7[\x80-\x9F]/; - return "Ethiopic syllable" if $c =~ /\xEA\xAC[\x80-\xAF]/; - return "Cherokee letter" if $c =~ /\xEA\xAD[\xB0-\xBF]/; - return "Cherokee letter" if $c =~ /\xEA\xAE/; - return "Meetai Mayek digit" if $c =~ /\xEA\xAF[\xB0-\xB9]/; - return "Meetai Mayek letter" if $c =~ /\xEA\xAF/; - return "Korean Hangul syllable" if $c =~ /\xEA[\xB0-\xBF]/; - return "Korean Hangul syllable" if $c =~ /[\xEB-\xEC]/; - return "Korean Hangul syllable" if $c =~ /\xED[\x80-\x9E]/; - return "Klingon letter" if $c =~ /\xEF\xA3[\x90-\xA9]/; - return "Klingon digit" if $c =~ /\xEF\xA3[\xB0-\xB9]/; - return "Klingon punctuation" if $c =~ /\xEF\xA3[\xBD-\xBE]/; - return "Klingon symbol" if $c =~ /\xEF\xA3\xBF/; - return "private use character" if $c =~ /\xEE/; - return "Latin typographic ligature" if $c =~ /\xEF\xAC[\x80-\x86]/; - return "Hebrew presentation letter" if $c =~ /\xEF\xAC[\x9D-\xBF]/; - return "Hebrew presentation letter" if $c =~ /\xEF\xAD[\x80-\x8F]/; - return "Arabic presentation letter" if $c =~ /\xEF\xAD[\x90-\xBF]/; - return "Arabic presentation letter" if $c =~ /\xEF[\xAE-\xB7]/; - return "non-ASCII punctuation" if $c =~ /\xEF\xB8[\x90-\x99]/; - return "non-ASCII punctuation" if $c =~ /\xEF\xB8[\xB0-\xBF]/; - return "non-ASCII punctuation" if $c =~ /\xEF\xB9[\x80-\xAB]/; - return "Arabic presentation letter" if $c =~ /\xEF\xB9[\xB0-\xBF]/; - return "Arabic presentation letter" if $c =~ /\xEF\xBA/; - return "Arabic presentation letter" if $c =~ /\xEF\xBB[\x80-\xBC]/; - return "byte-order mark/zero-width no-break space" if $c eq "\xEF\xBB\xBF"; - return "fullwidth currency" if $c =~ /\xEF\xBC\x84/; - return "fullwidth digit" if $c =~ /\xEF\xBC[\x90-\x99]/; - return "fullwidth Latin letter" if $c =~ /\xEF\xBC[\xA1-\xBA]/; - return "fullwidth Latin letter" if $c =~ /\xEF\xBD[\x81-\x9A]/; - return "fullwidth punctuation" if $c =~ /\xEF\xBC/; - return "fullwidth punctuation" if $c =~ /\xEF\xBD[\x9B-\xA4]/; - return "halfwidth Japanese punctuation" if $c =~ /\xEF\xBD[\xA1-\xA4]/; - return "halfwidth Japanese katakana character" if $c =~ /\xEF\xBD[\xA5-\xBF]/; - return "halfwidth Japanese katakana character" if $c =~ /\xEF\xBE[\x80-\x9F]/; - return "fullwidth currency" if $c =~ /\xEF\xBF[\xA0-\xA6]/; - return "replacement character" if $c eq "\xEF\xBF\xBD"; - } elsif ($c =~ /[\xF0-\xF7]/) { - return "non-UTF8 (invalid)" unless $c =~ /[\xF0-\xF7][\x80-\xBF]{3,3}$/; - return "non-shortest-UTF8 (invalid)" if $c =~ /\xF0[\x80-\x8F]/; - return "Linear B syllable" if $c =~ /\xF0\x90\x80/; - return "Linear B syllable" if $c =~ /\xF0\x90\x81[\x80-\x8F]/; - return "Linear B symbol" if $c =~ /\xF0\x90\x81[\x90-\x9F]/; - return "Linear B ideogram" if $c =~ /\xF0\x90[\x82-\x83]/; - return "Gothic letter" if $c =~ /\xF0\x90\x8C[\xB0-\xBF]/; - return "Gothic letter" if $c =~ /\xF0\x90\x8D[\x80-\x8F]/; - return "Phoenician letter" if $c =~ /\xF0\x90\xA4[\x80-\x95]/; - return "Phoenician number" if $c =~ /\xF0\x90\xA4[\x96-\x9B]/; - return "Phoenician punctuation" if $c =~ /\xF0\x90\xA4\x9F/; # word separator - return "Old Hungarian number" if $c =~ /\xF0\x90\xB3[\xBA-\xBF]/; - return "Old Hungarian letter" if $c =~ /\xF0\x90[\xB2-\xB3]/; - return "Cuneiform digit" if $c =~ /\xF0\x92\x90/; # numberic sign - return "Cuneiform digit" if $c =~ /\xF0\x92\x91[\x80-\xAF]/; # numberic sign - return "Cuneiform punctuation" if $c =~ /\xF0\x92\x91[\xB0-\xBF]/; - return "Cuneiform sign" if $c =~ /\xF0\x92[\x80-\x95]/; - return "Egyptian hieroglyph number" if $c =~ /\xF0\x93\x81\xA8/; - return "Egyptian hieroglyph number" if $c =~ /\xF0\x93\x82[\xAD-\xB6]/; - return "Egyptian hieroglyph number" if $c =~ /\xF0\x93\x86[\x90\xBC-\xBF]/; - return "Egyptian hieroglyph number" if $c =~ /\xF0\x93\x87[\x80-\x84]/; - return "Egyptian hieroglyph number" if $c =~ /\xF0\x93\x8D[\xA2-\xAB]/; - return "Egyptian hieroglyph number" if $c =~ /\xF0\x93\x8E[\x86-\x92]/; - return "Egyptian hieroglyph number" if $c =~ /\xF0\x93\x8F[\xBA-\xBF]/; - return "Egyptian hieroglyph number" if $c =~ /\xF0\x93\x90[\x80-\x83]/; - return "Egyptian hieroglyph" if $c =~ /\xF0\x93[\x80-\x90]/; - return "enclosed alphanumeric" if $c =~ /\xF0\x9F[\x84-\x87]/; - return "Mahjong symbol" if $c =~ /\xF0\x9F\x80[\x80-\xAF]/; - return "Domino symbol" if $c =~ /\xF0\x9F\x80[\xB0-\xBF]/; - return "Domino symbol" if $c =~ /\xF0\x9F\x81/; - return "Domino symbol" if $c =~ /\xF0\x9F\x82[\x80-\x9F]/; - return "Playing card symbol" if $c =~ /\xF0\x9F\x82[\xA0-\xBF]/; - return "Playing card symbol" if $c =~ /\xF0\x9F\x83/; - return "CJK symbol" if $c =~ /\xF0\x9F[\x88-\x8B]/; - return "pictograph" if $c =~ /\xF0\x9F[\x8C-\x9B]/; - return "geometric shape" if $c =~ /\xF0\x9F[\x9E-\x9F]/; - return "non-ASCII punctuation" if $c =~ /\xF0\x9F[\xA0-\xA3]/; - return "pictograph" if $c =~ /\xF0\x9F[\xA4-\xAB]/; - return "CJK character" if $c =~ /\xF0[\xA0-\xAF]/; - return "tag" if $c =~ /\xF3\xA0[\x80-\x81]/; - return "variation selector" if $c =~ /\xF3\xA0[\x84-\x87]/; - return "private use character" if $c =~ /\xF3[\xB0-\xBF]/; - return "private use character" if $c =~ /\xF4[\x80-\x8F]/; - # ... - } elsif ($c =~ /[\xF8-\xFB]/) { - return "non-UTF8 (invalid)" unless $c =~ /[\xF8-\xFB][\x80-\xBF]{4,4}$/; - } elsif ($c =~ /[\xFC-\xFD]/) { - return "non-UTF8 (invalid)" unless $c =~ /[\xFC-\xFD][\x80-\xBF]{5,5}$/; - } elsif ($c =~ /\xFE/) { - return "non-UTF8 (invalid)" unless $c =~ /\xFE][\x80-\xBF]{6,6}$/; - } else { - return "non-UTF8 (invalid)"; - } - return "other character"; -} - -1; - - diff --git a/spaces/jackli888/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/src/rife/model/loss.py b/spaces/jackli888/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/src/rife/model/loss.py deleted file mode 100644 index 72e5de6af050df7d55c2871a69637077970ddfb9..0000000000000000000000000000000000000000 --- a/spaces/jackli888/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/src/rife/model/loss.py +++ /dev/null @@ -1,128 +0,0 @@ -import torch -import numpy as np -import torch.nn as nn -import torch.nn.functional as F -import torchvision.models as models - -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - - -class EPE(nn.Module): - def __init__(self): - super(EPE, self).__init__() - - def forward(self, flow, gt, loss_mask): - loss_map = (flow - gt.detach()) ** 2 - loss_map = (loss_map.sum(1, True) + 1e-6) ** 0.5 - return (loss_map * loss_mask) - - -class Ternary(nn.Module): - def __init__(self): - super(Ternary, self).__init__() - patch_size = 7 - out_channels = patch_size * patch_size - self.w = np.eye(out_channels).reshape( - (patch_size, patch_size, 1, out_channels)) - self.w = np.transpose(self.w, (3, 2, 0, 1)) - self.w = torch.tensor(self.w).float().to(device) - - def transform(self, img): - patches = F.conv2d(img, self.w, padding=3, bias=None) - transf = patches - img - transf_norm = transf / torch.sqrt(0.81 + transf**2) - return transf_norm - - def rgb2gray(self, rgb): - r, g, b = rgb[:, 0:1, :, :], rgb[:, 1:2, :, :], rgb[:, 2:3, :, :] - gray = 0.2989 * r + 0.5870 * g + 0.1140 * b - return gray - - def hamming(self, t1, t2): - dist = (t1 - t2) ** 2 - dist_norm = torch.mean(dist / (0.1 + dist), 1, True) - return dist_norm - - def valid_mask(self, t, padding): - n, _, h, w = t.size() - inner = torch.ones(n, 1, h - 2 * padding, w - 2 * padding).type_as(t) - mask = F.pad(inner, [padding] * 4) - return mask - - def forward(self, img0, img1): - img0 = self.transform(self.rgb2gray(img0)) - img1 = self.transform(self.rgb2gray(img1)) - return self.hamming(img0, img1) * self.valid_mask(img0, 1) - - -class SOBEL(nn.Module): - def __init__(self): - super(SOBEL, self).__init__() - self.kernelX = torch.tensor([ - [1, 0, -1], - [2, 0, -2], - [1, 0, -1], - ]).float() - self.kernelY = self.kernelX.clone().T - self.kernelX = self.kernelX.unsqueeze(0).unsqueeze(0).to(device) - self.kernelY = self.kernelY.unsqueeze(0).unsqueeze(0).to(device) - - def forward(self, pred, gt): - N, C, H, W = pred.shape[0], pred.shape[1], pred.shape[2], pred.shape[3] - img_stack = torch.cat( - [pred.reshape(N*C, 1, H, W), gt.reshape(N*C, 1, H, W)], 0) - sobel_stack_x = F.conv2d(img_stack, self.kernelX, padding=1) - sobel_stack_y = F.conv2d(img_stack, self.kernelY, padding=1) - pred_X, gt_X = sobel_stack_x[:N*C], sobel_stack_x[N*C:] - pred_Y, gt_Y = sobel_stack_y[:N*C], sobel_stack_y[N*C:] - - L1X, L1Y = torch.abs(pred_X-gt_X), torch.abs(pred_Y-gt_Y) - loss = (L1X+L1Y) - return loss - -class MeanShift(nn.Conv2d): - def __init__(self, data_mean, data_std, data_range=1, norm=True): - c = len(data_mean) - super(MeanShift, self).__init__(c, c, kernel_size=1) - std = torch.Tensor(data_std) - self.weight.data = torch.eye(c).view(c, c, 1, 1) - if norm: - self.weight.data.div_(std.view(c, 1, 1, 1)) - self.bias.data = -1 * data_range * torch.Tensor(data_mean) - self.bias.data.div_(std) - else: - self.weight.data.mul_(std.view(c, 1, 1, 1)) - self.bias.data = data_range * torch.Tensor(data_mean) - self.requires_grad = False - -class VGGPerceptualLoss(torch.nn.Module): - def __init__(self, rank=0): - super(VGGPerceptualLoss, self).__init__() - blocks = [] - pretrained = True - self.vgg_pretrained_features = models.vgg19(pretrained=pretrained).features - self.normalize = MeanShift([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], norm=True).cuda() - for param in self.parameters(): - param.requires_grad = False - - def forward(self, X, Y, indices=None): - X = self.normalize(X) - Y = self.normalize(Y) - indices = [2, 7, 12, 21, 30] - weights = [1.0/2.6, 1.0/4.8, 1.0/3.7, 1.0/5.6, 10/1.5] - k = 0 - loss = 0 - for i in range(indices[-1]): - X = self.vgg_pretrained_features[i](X) - Y = self.vgg_pretrained_features[i](Y) - if (i+1) in indices: - loss += weights[k] * (X - Y.detach()).abs().mean() * 0.1 - k += 1 - return loss - -if __name__ == '__main__': - img0 = torch.zeros(3, 3, 256, 256).float().to(device) - img1 = torch.tensor(np.random.normal( - 0, 1, (3, 3, 256, 256))).float().to(device) - ternary_loss = Ternary() - print(ternary_loss(img0, img1).shape) diff --git a/spaces/jatinbittu13/selfie-nonselfie/README.md b/spaces/jatinbittu13/selfie-nonselfie/README.md deleted file mode 100644 index be651ab524412c92cc68ed70858893c73e502985..0000000000000000000000000000000000000000 --- a/spaces/jatinbittu13/selfie-nonselfie/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Selfie Nonselfie -emoji: 🏢 -colorFrom: yellow -colorTo: pink -sdk: gradio -sdk_version: 3.19.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/jhwen/bingo/src/lib/bots/bing/tts.ts b/spaces/jhwen/bingo/src/lib/bots/bing/tts.ts deleted file mode 100644 index cd10b7d1d7581bf9cf46ff6755fcca550c558c9b..0000000000000000000000000000000000000000 --- a/spaces/jhwen/bingo/src/lib/bots/bing/tts.ts +++ /dev/null @@ -1,82 +0,0 @@ -import { sleep } from './utils' - -const synth = window.speechSynthesis - -export class TTS { - currentText = '' - speakText = '' - private controller = new AbortController() - speaking = false - get isSpeaking() { - return this.speaking - } - finished = false - constructor() {} - abort = () => { - this.controller.abort() - } - - reset = () => { - this.speaking = false - this.finished = true - this.currentText = '' - this.speakText = '' - this.abort() - } - - speak = (text: string) => { - if (!synth || text?.trim()?.length < 2) { - return - } - this.currentText = text.replace(/[^\u4e00-\u9fa5_a-zA-Z0-9,。?,:;\.,:]+/g, '') - this.finished = false - this.loop() - } - - private async doSpeek() { - return new Promise((resolve) => { - const endIndex = this.finished ? this.currentText.length : - Math.max( - this.currentText.lastIndexOf('。'), - this.currentText.lastIndexOf(';'), - this.currentText.lastIndexOf('、'), - this.currentText.lastIndexOf('?'), - this.currentText.lastIndexOf('\n') - ) - const startIndex = this.speakText.length ? Math.max(0, this.currentText.lastIndexOf(this.speakText) + this.speakText.length) : 0 - - if (startIndex >= endIndex) { - return resolve(true) - } - const text = this.currentText.slice(startIndex, endIndex) - this.speakText = text - const utterThis = new SpeechSynthesisUtterance(text) - this.controller.signal.onabort = () => { - synth.cancel() - this.finished = true - resolve(false) - } - - utterThis.onend = function (event) { - resolve(true) - } - - utterThis.onerror = function (event) { - resolve(false) - } - - const voice = synth.getVoices().find(v => v.name.includes('Microsoft Yunxi Online')) ?? null - utterThis.voice = voice - synth.speak(utterThis) - }) - } - - private async loop() { - if (this.speaking) return - this.speaking = true - while(!this.finished) { - await Promise.all([sleep(1000), this.doSpeek()]) - } - this.speaking = false - } -} diff --git a/spaces/jiaqingj/ConZIC/gen_utils.py b/spaces/jiaqingj/ConZIC/gen_utils.py deleted file mode 100644 index cc436bd7f48a7f6aa93d6367b327897c265d1c43..0000000000000000000000000000000000000000 --- a/spaces/jiaqingj/ConZIC/gen_utils.py +++ /dev/null @@ -1,324 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -import random -from utils import get_init_text, update_token_mask -import time - - - -def generate_step(out, gen_idx, temperature=None, top_k=0, sample=False, return_list=True): - """ Generate a word from out[gen_idx] - - args: - - out (torch.Tensor): tensor of logits of size batch_size x seq_len x vocab_size - - gen_idx (int): location for which to generate for - - top_k (int): if >0, only sample from the top k most probable words - - sample (Bool): if True, sample from full distribution. Overridden by top_k - """ - logits = out[:, gen_idx] - if temperature is not None: - logits = logits / temperature - if top_k > 0: - kth_vals, kth_idx = logits.topk(top_k, dim=-1) - dist = torch.distributions.categorical.Categorical(logits=kth_vals) - idx = kth_idx.gather(dim=1, index=dist.sample().unsqueeze(-1)).squeeze(-1) - elif sample: - dist = torch.distributions.categorical.Categorical(logits=logits) - idx = dist.sample().squeeze(-1) - else: - idx = torch.argmax(logits, dim=-1) - return idx.tolist() if return_list else idx - -def generate_caption_step(out, gen_idx, mask, temperature=None, top_k=100): - """ Generate a word from out[gen_idx] - args: - - out (torch.Tensor): tensor of logits of size (batch_size, seq_len, vocab_size) - - gen_idx (int): location for which to generate for - - mask (torch.Tensor): (1, vocab_size) - - top_k (int): candidate k - """ - logits = out[:, gen_idx] - if temperature is not None: - logits = logits / temperature - - probs = F.softmax(logits, dim=-1) - probs *= (mask) - top_k_probs, top_k_ids = probs.topk(top_k, dim=-1) - - return top_k_probs, top_k_ids - -def sequential_generation(model, clip, tokenizer, image_instance,token_mask, prompt, logger, - max_len=15, top_k=100,temperature=None, alpha=0.7,beta=1, - max_iters=20,batch_size=1, verbose=True): - """ Generate one word at a time, in L->R order """ - - seed_len = len(prompt.split())+1 - batch = get_init_text(tokenizer, prompt, max_len, batch_size) - image_embeds = clip.compute_image_representation_from_image_instance(image_instance) - clip_score_sequence = [] - best_clip_score = 0 - inp = torch.tensor(batch).to(image_embeds.device) - gen_texts = [] - for iter_num in range(max_iters): - for ii in range(max_len): - token_mask = update_token_mask(tokenizer, token_mask, max_len, ii) - for jj in range(batch_size): - inp[jj][seed_len + ii] = tokenizer.mask_token_id - inp_ = inp.clone().detach() - out = model(inp).logits - probs, idxs = generate_caption_step(out, gen_idx=seed_len + ii,mask=token_mask, top_k=top_k, temperature=temperature) - for jj in range(batch_size): - topk_inp = inp_.repeat(top_k, 1) - idxs_ = (idxs[jj] * token_mask[0][idxs[jj]]).long() - topk_inp[:, ii + seed_len] = idxs_ - batch_text_list = tokenizer.batch_decode(topk_inp, skip_special_tokens=True) - - clip_score, clip_ref = clip.compute_image_text_similarity_via_raw_text(image_embeds, batch_text_list) - final_score = alpha * probs + beta * clip_score - best_clip_id = final_score.argmax() - - inp[jj][seed_len + ii] = idxs_[best_clip_id] - current_clip_score = clip_ref[jj][best_clip_id] - clip_score_sequence.append(current_clip_score.cpu().item()) - - if verbose and np.mod(iter_num + 1, 1) == 0: - for_print = tokenizer.decode(inp[0]) - cur_text = tokenizer.decode(inp[0],skip_special_tokens=True) - if best_clip_score < current_clip_score.cpu().item(): - best_clip_score = current_clip_score.cpu().item() - best_caption = cur_text - gen_texts.append(cur_text) - logger.info(f"iter {iter_num + 1}, clip score {current_clip_score:.3f}: "+ for_print) - - gen_texts.append(best_caption) - clip_score_sequence.append(best_clip_score) - - return gen_texts, clip_score_sequence - -def shuffle_generation(model, clip, tokenizer,image_instance,token_mask, prompt, logger, - max_len=15, top_k=0,temperature=None, alpha=0.7,beta=1, - max_iters=20,batch_size=1, - verbose=True): - """ Generate one word at a time, in random generation order """ - seed_len = len(prompt.split())+1 - batch = get_init_text(tokenizer,prompt, max_len, batch_size) - image_embeds = clip.compute_image_representation_from_image_instance(image_instance) - inp = torch.tensor(batch).to(image_embeds.device) - clip_score_sequence = [] - best_clip_score = 0 - random_lst = list(range(max_len)) - random.shuffle(random_lst) - logger.info(f"Order_list:{random_lst}") - gen_texts = [] - for iter_num in range(max_iters): - for ii in random_lst: - token_mask = update_token_mask(tokenizer, token_mask, max_len, ii) - for jj in range(batch_size): - inp[jj][seed_len + ii] = tokenizer.mask_token_id - inp_ = inp.clone().detach() - out = model(inp).logits - probs, idxs = generate_caption_step(out, gen_idx=seed_len + ii,mask=token_mask, top_k=top_k, temperature=temperature) - for jj in range(batch_size): - topk_inp = inp_.repeat(top_k, 1) - topk_inp[:, ii + seed_len] = (idxs[jj] * token_mask[0][idxs[jj]]).long() - batch_text_list = tokenizer.batch_decode(topk_inp, skip_special_tokens=True) - clip_score,clip_ref = clip.compute_image_text_similarity_via_raw_text(image_embeds, batch_text_list) - final_score = alpha * probs + beta * clip_score - best_clip_id = final_score.argmax() - inp[jj][seed_len + ii] = idxs[jj][best_clip_id] - current_clip_score = clip_ref[jj][best_clip_id] - clip_score_sequence.append(current_clip_score.cpu().item()) - if verbose and np.mod(iter_num + 1, 1) == 0: - for_print = tokenizer.decode(inp[0]) - cur_text = tokenizer.decode(inp[0],skip_special_tokens=True) - gen_texts.append(cur_text) - if best_clip_score < current_clip_score.cpu().item(): - best_clip_score = current_clip_score.cpu().item() - best_caption = cur_text - logger.info(f"iter {iter_num + 1}, clip score {current_clip_score:.3f}: "+for_print) - gen_texts.append(best_caption) - clip_score_sequence.append(best_clip_score) - - return gen_texts, clip_score_sequence - -def span_generation(model, clip, tokenizer,image_instance,token_mask, prompt, logger, - max_len=15, top_k=0,temperature=None, alpha=0.7,beta=1, - max_iters=20,batch_size=1,verbose=True): - """ Generate multiple words at a time (span generation), in L->R order """ - seed_len = len(prompt.split())+1 - span_len = 2 - batch = get_init_text(tokenizer,prompt, max_len, batch_size) - image_embeds = clip.compute_image_representation_from_image_instance(image_instance) - clip_score_sequence = [] - best_clip_score = 0 - inp = torch.tensor(batch).to(image_embeds.device) - gen_texts = [] - for iter_num in range(max_iters): - for span_start in range(0,max_len,span_len): - span_end = min(span_start+span_len,max_len) - for jj in range(batch_size): - inp[jj][seed_len + span_start: seed_len + span_end] = tokenizer.mask_token_id - out = model(inp).logits - - for ii in range(span_start,span_end): - token_mask = update_token_mask(tokenizer, token_mask, max_len, ii) - inp_ = inp.clone().detach() - probs, idxs = generate_caption_step(out, gen_idx=seed_len + ii, mask=token_mask, top_k=top_k, - temperature=temperature) - for jj in range(batch_size): - topk_inp = inp_.repeat(top_k, 1) - idxs_ = (idxs[jj] * token_mask[0][idxs[jj]]).long() - topk_inp[:, ii + seed_len] = idxs_ - batch_text_list = tokenizer.batch_decode(topk_inp, skip_special_tokens=True) - - clip_score, clip_ref = clip.compute_image_text_similarity_via_raw_text(image_embeds, batch_text_list) - final_score = alpha * probs + beta * clip_score - best_clip_id = final_score.argmax() - - inp[jj][seed_len + ii] = idxs_[best_clip_id] - current_clip_score = clip_ref[jj][best_clip_id] - clip_score_sequence.append(current_clip_score.cpu().item()) - - if verbose and np.mod(iter_num + 1, 1) == 0: - for_print = tokenizer.decode(inp[0]) - cur_text = tokenizer.decode(inp[0],skip_special_tokens=True) - if best_clip_score < current_clip_score.cpu().item(): - best_clip_score = current_clip_score.cpu().item() - best_caption = cur_text - gen_texts.append(cur_text) - logger.info(f"iter {iter_num + 1}, clip score {current_clip_score:.3f}: "+ for_print) - gen_texts.append(best_caption) - clip_score_sequence.append(best_clip_score) - - return gen_texts, clip_score_sequence - -def random_generation(model, clip, tokenizer,image_instance,token_mask, prompt, logger, - max_len=15, top_k=0, temperature=None,alpha=0.7,beta=2, - max_iters=300,print_every=10,batch_size=1, - verbose=True): - """ Generate for one random position at a timestep""" - - seed_len = len(prompt.split())+1 - batch = get_init_text(tokenizer, prompt, max_len, batch_size) - image_embeds = clip.compute_image_representation_from_image_instance(image_instance) - clip_score_sequence = [] - best_clip_score = 0 - inp = torch.tensor(batch).to(image_embeds.device) - gen_texts = [] - for ii in range(max_iters): - kk = np.random.randint(0, max_len) - token_mask = update_token_mask(tokenizer, token_mask, max_len, kk) - for jj in range(batch_size): - inp[jj][seed_len + kk] = tokenizer.mask_token_id - inp_ = inp.clone().detach() - out = model(inp).logits - probs, idxs = generate_caption_step(out,gen_idx=seed_len + kk,mask=token_mask, top_k=top_k, temperature=temperature) - for jj in range(batch_size): - topk_inp = inp_.repeat(top_k, 1) - topk_inp[:, kk + seed_len] = (idxs[jj] * token_mask[0][idxs[jj]]).long() - batch_text_list = tokenizer.batch_decode(topk_inp, skip_special_tokens=True) - - clip_score, clip_ref = clip.compute_image_text_similarity_via_raw_text(image_embeds, batch_text_list) - final_score = alpha * probs + beta * clip_score - best_clip_id = final_score.argmax() - - inp[jj][seed_len + kk] = idxs[jj][best_clip_id] - current_clip_score = clip_ref[jj][best_clip_id] - clip_score_sequence.append(current_clip_score.cpu().item()) - if best_clip_score < current_clip_score.cpu().item(): - best_clip_score = current_clip_score.cpu().item() - best_caption = tokenizer.decode(inp[0], skip_special_tokens=True) - - if verbose and np.mod(ii + 1, print_every) == 0: - for_print = tokenizer.decode(inp[0]) - logger.info(f"iter {ii + 1}, clip score {current_clip_score:.3f}: "+for_print) - cur_text = tokenizer.decode(inp[0], skip_special_tokens=True) - gen_texts.append(cur_text) - gen_texts.append(best_caption) - clip_score_sequence.append(best_clip_score) - - return gen_texts, clip_score_sequence - -def parallel_generation(model, clip, tokenizer,image_instance,token_mask, prompt, logger, - max_len=15, top_k=0, temperature=None, alpha=0.1, beta=1, - max_iters=300,batch_size=1,print_every=1, verbose=True): - """ Generate for all positions at a time step """ - seed_len = len(prompt.split())+1 - batch = get_init_text(tokenizer,prompt, max_len, batch_size) - image_embeds = clip.compute_image_representation_from_image_instance(image_instance) - clip_score_sequence = [] - inp = torch.tensor(batch).to(image_embeds.device) - gen_texts = [] - best_clip_score = 0 - - for ii in range(max_iters): - inp_ = inp.clone().detach() - out = model(inp).logits - for kk in range(max_len): - probs, idxs = generate_caption_step(out, gen_idx=seed_len + kk,mask=token_mask, top_k=top_k, temperature=temperature) - for jj in range(batch_size): - topk_inp = inp_.repeat(top_k, 1) - topk_inp[:, ii + seed_len] = (idxs[jj] * token_mask[0][idxs[jj]]).long() - batch_text_list = tokenizer.batch_decode(topk_inp, skip_special_tokens=True) - clip_score,clip_ref = clip.compute_image_text_similarity_via_raw_text(image_embeds, batch_text_list) - final_score = alpha * probs + beta * clip_score - best_clip_id = final_score.argmax() - - inp[jj][seed_len + kk] = idxs[jj][best_clip_id] - current_clip_score = clip_ref[jj][best_clip_id] - clip_score_sequence.append(current_clip_score.cpu().item()) - - if verbose and np.mod(ii, 1) == 0: - logger.info(f"iter{ii + 1}, clip score {current_clip_score:.3f}: " + tokenizer.decode(inp[0])) - cur_text = tokenizer.decode(inp[0], skip_special_tokens=True) - if best_clip_score < current_clip_score.cpu().item(): - best_clip_score = current_clip_score.cpu().item() - best_caption = cur_text - gen_texts.append(cur_text) - gen_texts.append(best_caption) - clip_score_sequence.append(best_clip_score) - - return gen_texts, clip_score_sequence - -def generate_caption(model, clip, tokenizer,image_instance,token_mask,logger, - prompt="", batch_size=1, max_len=15, - top_k=100, temperature=1.0, max_iter=500,alpha=0.7,beta=1, - generate_order="sequential"): - # main generation functions to call - start_time = time.time() - - if generate_order=="sequential": - generate_texts, clip_scores = sequential_generation(model, clip, tokenizer, image_instance, token_mask, prompt, logger, - batch_size=batch_size, max_len=max_len, top_k=top_k, - alpha=alpha,beta=beta,temperature=temperature, - max_iters=max_iter) - - elif generate_order=="shuffle": - # max_iter = 15 - generate_texts, clip_scores = shuffle_generation(model, clip, tokenizer,image_instance,token_mask,prompt, logger, - batch_size=batch_size, max_len=max_len, top_k=top_k, - alpha=alpha,beta=beta,temperature=temperature,max_iters=max_iter) - - elif generate_order=="random": - max_iter *= max_len - print_every = max_len - generate_texts, clip_scores = random_generation(model, clip, tokenizer,image_instance,token_mask,prompt,logger, - max_len=max_len, top_k=top_k,alpha=alpha,beta=beta,print_every=print_every, - temperature=temperature, max_iters=max_iter,verbose=True) - - elif generate_order=="span": - max_iter = max_iter - generate_texts, clip_scores = span_generation(model, clip, tokenizer, image_instance, token_mask, prompt, logger, - batch_size=batch_size, max_len=max_len, top_k=top_k, - alpha=alpha,beta=beta,temperature=temperature, max_iters=max_iter) - - elif generate_order=="parallel": - generate_texts, clip_scores = parallel_generation(model, clip, tokenizer,image_instance,token_mask,prompt, logger, - max_len=max_len, temperature=temperature,top_k=top_k,alpha=alpha,beta=beta, - max_iters=max_iter,verbose=True) - - logger.info("Finished in %.3fs" % (time.time() - start_time)) - logger.info(f"final caption: {generate_texts[-2]}") - logger.info(f"best caption: {generate_texts[-1]}") - return generate_texts, clip_scores \ No newline at end of file diff --git a/spaces/jjie/DeepDanbooru_string/README.md b/spaces/jjie/DeepDanbooru_string/README.md deleted file mode 100644 index 4330b6f969246dc764a34ea254d2e807159f1c55..0000000000000000000000000000000000000000 --- a/spaces/jjie/DeepDanbooru_string/README.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: DeepDanbooru String -emoji: 💬 -colorFrom: blue -colorTo: red -sdk: gradio -sdk_version: 3.6 -app_file: app.py -pinned: false -duplicated_from: NoCrypt/DeepDanbooru_string ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio`, `streamlit`, or `static` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/EpsImagePlugin.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/EpsImagePlugin.py deleted file mode 100644 index 6b1b5947ec0654b36ac15334327e412c0743b925..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/EpsImagePlugin.py +++ /dev/null @@ -1,466 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# EPS file handling -# -# History: -# 1995-09-01 fl Created (0.1) -# 1996-05-18 fl Don't choke on "atend" fields, Ghostscript interface (0.2) -# 1996-08-22 fl Don't choke on floating point BoundingBox values -# 1996-08-23 fl Handle files from Macintosh (0.3) -# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.4) -# 2003-09-07 fl Check gs.close status (from Federico Di Gregorio) (0.5) -# 2014-05-07 e Handling of EPS with binary preview and fixed resolution -# resizing -# -# Copyright (c) 1997-2003 by Secret Labs AB. -# Copyright (c) 1995-2003 by Fredrik Lundh -# -# See the README file for information on usage and redistribution. -# - -import io -import os -import re -import subprocess -import sys -import tempfile - -from . import Image, ImageFile -from ._binary import i32le as i32 -from ._deprecate import deprecate - -# -------------------------------------------------------------------- - - -split = re.compile(r"^%%([^:]*):[ \t]*(.*)[ \t]*$") -field = re.compile(r"^%[%!\w]([^:]*)[ \t]*$") - -gs_windows_binary = None -if sys.platform.startswith("win"): - import shutil - - for binary in ("gswin32c", "gswin64c", "gs"): - if shutil.which(binary) is not None: - gs_windows_binary = binary - break - else: - gs_windows_binary = False - - -def has_ghostscript(): - if gs_windows_binary: - return True - if not sys.platform.startswith("win"): - try: - subprocess.check_call(["gs", "--version"], stdout=subprocess.DEVNULL) - return True - except OSError: - # No Ghostscript - pass - return False - - -def Ghostscript(tile, size, fp, scale=1, transparency=False): - """Render an image using Ghostscript""" - - # Unpack decoder tile - decoder, tile, offset, data = tile[0] - length, bbox = data - - # Hack to support hi-res rendering - scale = int(scale) or 1 - # orig_size = size - # orig_bbox = bbox - size = (size[0] * scale, size[1] * scale) - # resolution is dependent on bbox and size - res = ( - 72.0 * size[0] / (bbox[2] - bbox[0]), - 72.0 * size[1] / (bbox[3] - bbox[1]), - ) - - out_fd, outfile = tempfile.mkstemp() - os.close(out_fd) - - infile_temp = None - if hasattr(fp, "name") and os.path.exists(fp.name): - infile = fp.name - else: - in_fd, infile_temp = tempfile.mkstemp() - os.close(in_fd) - infile = infile_temp - - # Ignore length and offset! - # Ghostscript can read it - # Copy whole file to read in Ghostscript - with open(infile_temp, "wb") as f: - # fetch length of fp - fp.seek(0, io.SEEK_END) - fsize = fp.tell() - # ensure start position - # go back - fp.seek(0) - lengthfile = fsize - while lengthfile > 0: - s = fp.read(min(lengthfile, 100 * 1024)) - if not s: - break - lengthfile -= len(s) - f.write(s) - - device = "pngalpha" if transparency else "ppmraw" - - # Build Ghostscript command - command = [ - "gs", - "-q", # quiet mode - "-g%dx%d" % size, # set output geometry (pixels) - "-r%fx%f" % res, # set input DPI (dots per inch) - "-dBATCH", # exit after processing - "-dNOPAUSE", # don't pause between pages - "-dSAFER", # safe mode - f"-sDEVICE={device}", - f"-sOutputFile={outfile}", # output file - # adjust for image origin - "-c", - f"{-bbox[0]} {-bbox[1]} translate", - "-f", - infile, # input file - # showpage (see https://bugs.ghostscript.com/show_bug.cgi?id=698272) - "-c", - "showpage", - ] - - if gs_windows_binary is not None: - if not gs_windows_binary: - try: - os.unlink(outfile) - if infile_temp: - os.unlink(infile_temp) - except OSError: - pass - - msg = "Unable to locate Ghostscript on paths" - raise OSError(msg) - command[0] = gs_windows_binary - - # push data through Ghostscript - try: - startupinfo = None - if sys.platform.startswith("win"): - startupinfo = subprocess.STARTUPINFO() - startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW - subprocess.check_call(command, startupinfo=startupinfo) - out_im = Image.open(outfile) - out_im.load() - finally: - try: - os.unlink(outfile) - if infile_temp: - os.unlink(infile_temp) - except OSError: - pass - - im = out_im.im.copy() - out_im.close() - return im - - -class PSFile: - """ - Wrapper for bytesio object that treats either CR or LF as end of line. - This class is no longer used internally, but kept for backwards compatibility. - """ - - def __init__(self, fp): - deprecate( - "PSFile", - 11, - action="If you need the functionality of this class " - "you will need to implement it yourself.", - ) - self.fp = fp - self.char = None - - def seek(self, offset, whence=io.SEEK_SET): - self.char = None - self.fp.seek(offset, whence) - - def readline(self): - s = [self.char or b""] - self.char = None - - c = self.fp.read(1) - while (c not in b"\r\n") and len(c): - s.append(c) - c = self.fp.read(1) - - self.char = self.fp.read(1) - # line endings can be 1 or 2 of \r \n, in either order - if self.char in b"\r\n": - self.char = None - - return b"".join(s).decode("latin-1") - - -def _accept(prefix): - return prefix[:4] == b"%!PS" or (len(prefix) >= 4 and i32(prefix) == 0xC6D3D0C5) - - -## -# Image plugin for Encapsulated PostScript. This plugin supports only -# a few variants of this format. - - -class EpsImageFile(ImageFile.ImageFile): - """EPS File Parser for the Python Imaging Library""" - - format = "EPS" - format_description = "Encapsulated Postscript" - - mode_map = {1: "L", 2: "LAB", 3: "RGB", 4: "CMYK"} - - def _open(self): - (length, offset) = self._find_offset(self.fp) - - # go to offset - start of "%!PS" - self.fp.seek(offset) - - self.mode = "RGB" - self._size = None - - byte_arr = bytearray(255) - bytes_mv = memoryview(byte_arr) - bytes_read = 0 - reading_comments = True - - def check_required_header_comments(): - if "PS-Adobe" not in self.info: - msg = 'EPS header missing "%!PS-Adobe" comment' - raise SyntaxError(msg) - if "BoundingBox" not in self.info: - msg = 'EPS header missing "%%BoundingBox" comment' - raise SyntaxError(msg) - - while True: - byte = self.fp.read(1) - if byte == b"": - # if we didn't read a byte we must be at the end of the file - if bytes_read == 0: - break - elif byte in b"\r\n": - # if we read a line ending character, ignore it and parse what - # we have already read. if we haven't read any other characters, - # continue reading - if bytes_read == 0: - continue - else: - # ASCII/hexadecimal lines in an EPS file must not exceed - # 255 characters, not including line ending characters - if bytes_read >= 255: - # only enforce this for lines starting with a "%", - # otherwise assume it's binary data - if byte_arr[0] == ord("%"): - msg = "not an EPS file" - raise SyntaxError(msg) - else: - if reading_comments: - check_required_header_comments() - reading_comments = False - # reset bytes_read so we can keep reading - # data until the end of the line - bytes_read = 0 - byte_arr[bytes_read] = byte[0] - bytes_read += 1 - continue - - if reading_comments: - # Load EPS header - - # if this line doesn't start with a "%", - # or does start with "%%EndComments", - # then we've reached the end of the header/comments - if byte_arr[0] != ord("%") or bytes_mv[:13] == b"%%EndComments": - check_required_header_comments() - reading_comments = False - continue - - s = str(bytes_mv[:bytes_read], "latin-1") - - try: - m = split.match(s) - except re.error as e: - msg = "not an EPS file" - raise SyntaxError(msg) from e - - if m: - k, v = m.group(1, 2) - self.info[k] = v - if k == "BoundingBox": - try: - # Note: The DSC spec says that BoundingBox - # fields should be integers, but some drivers - # put floating point values there anyway. - box = [int(float(i)) for i in v.split()] - self._size = box[2] - box[0], box[3] - box[1] - self.tile = [ - ("eps", (0, 0) + self.size, offset, (length, box)) - ] - except Exception: - pass - else: - m = field.match(s) - if m: - k = m.group(1) - if k[:8] == "PS-Adobe": - self.info["PS-Adobe"] = k[9:] - else: - self.info[k] = "" - elif s[0] == "%": - # handle non-DSC PostScript comments that some - # tools mistakenly put in the Comments section - pass - else: - msg = "bad EPS header" - raise OSError(msg) - elif bytes_mv[:11] == b"%ImageData:": - # Check for an "ImageData" descriptor - # https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/#50577413_pgfId-1035096 - - # Values: - # columns - # rows - # bit depth (1 or 8) - # mode (1: L, 2: LAB, 3: RGB, 4: CMYK) - # number of padding channels - # block size (number of bytes per row per channel) - # binary/ascii (1: binary, 2: ascii) - # data start identifier (the image data follows after a single line - # consisting only of this quoted value) - image_data_values = byte_arr[11:bytes_read].split(None, 7) - columns, rows, bit_depth, mode_id = [ - int(value) for value in image_data_values[:4] - ] - - if bit_depth == 1: - self.mode = "1" - elif bit_depth == 8: - try: - self.mode = self.mode_map[mode_id] - except ValueError: - break - else: - break - - self._size = columns, rows - return - - bytes_read = 0 - - check_required_header_comments() - - if not self._size: - msg = "cannot determine EPS bounding box" - raise OSError(msg) - - def _find_offset(self, fp): - s = fp.read(4) - - if s == b"%!PS": - # for HEAD without binary preview - fp.seek(0, io.SEEK_END) - length = fp.tell() - offset = 0 - elif i32(s) == 0xC6D3D0C5: - # FIX for: Some EPS file not handled correctly / issue #302 - # EPS can contain binary data - # or start directly with latin coding - # more info see: - # https://web.archive.org/web/20160528181353/http://partners.adobe.com/public/developer/en/ps/5002.EPSF_Spec.pdf - s = fp.read(8) - offset = i32(s) - length = i32(s, 4) - else: - msg = "not an EPS file" - raise SyntaxError(msg) - - return length, offset - - def load(self, scale=1, transparency=False): - # Load EPS via Ghostscript - if self.tile: - self.im = Ghostscript(self.tile, self.size, self.fp, scale, transparency) - self.mode = self.im.mode - self._size = self.im.size - self.tile = [] - return Image.Image.load(self) - - def load_seek(self, *args, **kwargs): - # we can't incrementally load, so force ImageFile.parser to - # use our custom load method by defining this method. - pass - - -# -------------------------------------------------------------------- - - -def _save(im, fp, filename, eps=1): - """EPS Writer for the Python Imaging Library.""" - - # make sure image data is available - im.load() - - # determine PostScript image mode - if im.mode == "L": - operator = (8, 1, b"image") - elif im.mode == "RGB": - operator = (8, 3, b"false 3 colorimage") - elif im.mode == "CMYK": - operator = (8, 4, b"false 4 colorimage") - else: - msg = "image mode is not supported" - raise ValueError(msg) - - if eps: - # write EPS header - fp.write(b"%!PS-Adobe-3.0 EPSF-3.0\n") - fp.write(b"%%Creator: PIL 0.1 EpsEncode\n") - # fp.write("%%CreationDate: %s"...) - fp.write(b"%%%%BoundingBox: 0 0 %d %d\n" % im.size) - fp.write(b"%%Pages: 1\n") - fp.write(b"%%EndComments\n") - fp.write(b"%%Page: 1 1\n") - fp.write(b"%%ImageData: %d %d " % im.size) - fp.write(b'%d %d 0 1 1 "%s"\n' % operator) - - # image header - fp.write(b"gsave\n") - fp.write(b"10 dict begin\n") - fp.write(b"/buf %d string def\n" % (im.size[0] * operator[1])) - fp.write(b"%d %d scale\n" % im.size) - fp.write(b"%d %d 8\n" % im.size) # <= bits - fp.write(b"[%d 0 0 -%d 0 %d]\n" % (im.size[0], im.size[1], im.size[1])) - fp.write(b"{ currentfile buf readhexstring pop } bind\n") - fp.write(operator[2] + b"\n") - if hasattr(fp, "flush"): - fp.flush() - - ImageFile._save(im, fp, [("eps", (0, 0) + im.size, 0, None)]) - - fp.write(b"\n%%%%EndBinary\n") - fp.write(b"grestore end\n") - if hasattr(fp, "flush"): - fp.flush() - - -# -------------------------------------------------------------------- - - -Image.register_open(EpsImageFile.format, EpsImageFile, _accept) - -Image.register_save(EpsImageFile.format, _save) - -Image.register_extensions(EpsImageFile.format, [".ps", ".eps"]) - -Image.register_mime(EpsImageFile.format, "application/postscript") diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fsspec/_version.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fsspec/_version.py deleted file mode 100644 index ed9907d764d33da040cf4552bcce6417fc2bbb74..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fsspec/_version.py +++ /dev/null @@ -1,21 +0,0 @@ - -# This file was generated by 'versioneer.py' (0.20) from -# revision-control system data, or from the parent directory name of an -# unpacked source archive. Distribution tarballs contain a pre-generated copy -# of this file. - -import json - -version_json = ''' -{ - "date": "2023-09-22T14:30:26-0400", - "dirty": false, - "error": null, - "full-revisionid": "3ad74814c8bd8620b3aa434080218eb658a81a6c", - "version": "2023.9.2" -} -''' # END VERSION_JSON - - -def get_versions(): - return json.loads(version_json) diff --git a/spaces/johnslegers/bilingual_stable_diffusion/header.html b/spaces/johnslegers/bilingual_stable_diffusion/header.html deleted file mode 100644 index f6523099101ccecb363abfc880eabd6394454f85..0000000000000000000000000000000000000000 --- a/spaces/johnslegers/bilingual_stable_diffusion/header.html +++ /dev/null @@ -1,37 +0,0 @@ -
                    -
                    FlagAI -
                    - -

                    - FlagStudio 项目致力于贡献优秀AI生成艺术作品。此双语文生图模型项目基于 stable diffusion,由BAAI旗下的FlagAI团队提供支持,相关代码和模型权重会在FlagAI中进行开源。 -

                    -

                    - FlagStudio aims to provide high quality AI-generated artwork. Our current bilingual model is based on the original stable diffusion model and is capable to generate images from both Chinese and English text. FlagStudio is developed and supported by the FlagAI team. Relevant code and model weights will be released soon in FlagAI.(open.platform@baai.ac.cn) -

                    - -
                    \ No newline at end of file diff --git a/spaces/johnslegers/stable-diffusion-gui-test/ui/index.html b/spaces/johnslegers/stable-diffusion-gui-test/ui/index.html deleted file mode 100644 index 2deda1893ce65e70d6b6f70cf57c008bd95fa924..0000000000000000000000000000000000000000 --- a/spaces/johnslegers/stable-diffusion-gui-test/ui/index.html +++ /dev/null @@ -1,242 +0,0 @@ - - - - - - - - - - - - - - -
                    -
                    - -
                      - - -
                    -
                    - -
                    -
                    -
                    -
                    - Stable Diffusion is starting.. -
                    -
                    -
                    - - - (or) (one prompt per line) - -
                    - -
                    -
                    - -
                    - - - -
                    - -
                    -
                    -
                    - -
                    - -
                    -
                    - - - -
                    - -
                     
                    - -
                    -

                    Image Settings

                    -
                      -
                    • Image Settings
                    • -
                    • -
                    • (images at once)
                    • -
                    • - -
                    • -
                    • - -
                    • -
                    • - - - - -
                    • -
                    • -
                    • -

                    • -
                    • - -
                    • - -
                      - -
                    • Prompt Settings
                    • -
                    • - -
                      - -
                    • Render Settings
                    • -
                    • -
                    • -
                    • - - -
                    • -
                    • -
                      -
                    • The system-related settings have been moved to the top-right corner.
                    • -
                    -
                    - -
                    -

                    Image Modifiers (art styles, tags etc)

                    -
                    - - -   - - -
                    -
                    -
                    - -
                    -
                    - Type a prompt and press the "Make Image" button.

                    You can set an "Initial Image" if you want to guide the AI.

                    You can also add modifiers like "Realistic", "Pencil Sketch", "ArtStation" etc by browsing through the "Image Modifiers" section and selecting the desired modifiers.

                    Click "Advanced Settings" for additional settings like seed, image size, number of images to generate etc.

                    Enjoy! :) -
                    -
                    - -
                    -
                    -
                    - -
                     
                    - - -
                    - - - - - diff --git a/spaces/jone/Music_Source_Separation/bytesep/inference_many.py b/spaces/jone/Music_Source_Separation/bytesep/inference_many.py deleted file mode 100644 index 154eb7d64a93dfd670dc7cad56c4e67eb8a63fe3..0000000000000000000000000000000000000000 --- a/spaces/jone/Music_Source_Separation/bytesep/inference_many.py +++ /dev/null @@ -1,163 +0,0 @@ -import argparse -import os -import pathlib -import time -from typing import NoReturn - -import librosa -import numpy as np -import soundfile -import torch - -from bytesep.inference import Separator -from bytesep.models.lightning_modules import get_model_class -from bytesep.utils import read_yaml - - -def inference(args) -> NoReturn: - r"""Separate all audios in a directory. - - Args: - config_yaml: str, the config file of a model being trained - checkpoint_path: str, the path of checkpoint to be loaded - audios_dir: str, the directory of audios to be separated - output_dir: str, the directory to write out separated audios - scale_volume: bool, if True then the volume is scaled to the maximum value of 1. - - Returns: - NoReturn - """ - - # Arguments & parameters - config_yaml = args.config_yaml - checkpoint_path = args.checkpoint_path - audios_dir = args.audios_dir - output_dir = args.output_dir - scale_volume = args.scale_volume - device = ( - torch.device('cuda') - if args.cuda and torch.cuda.is_available() - else torch.device('cpu') - ) - - configs = read_yaml(config_yaml) - sample_rate = configs['train']['sample_rate'] - input_channels = configs['train']['channels'] - target_source_types = configs['train']['target_source_types'] - target_sources_num = len(target_source_types) - model_type = configs['train']['model_type'] - mono = input_channels == 1 - - segment_samples = int(30 * sample_rate) - batch_size = 1 - device = "cuda" - - models_contains_inplaceabn = True - - # Need to use torch.distributed if models contain inplace_abn.abn.InPlaceABNSync. - if models_contains_inplaceabn: - - import torch.distributed as dist - - dist.init_process_group( - 'gloo', init_method='file:///tmp/somefile', rank=0, world_size=1 - ) - - print("Using {} for separating ..".format(device)) - - # paths - os.makedirs(output_dir, exist_ok=True) - - # Get model class. - Model = get_model_class(model_type) - - # Create model. - model = Model(input_channels=input_channels, target_sources_num=target_sources_num) - - # Load checkpoint. - checkpoint = torch.load(checkpoint_path, map_location='cpu') - model.load_state_dict(checkpoint["model"]) - - # Move model to device. - model.to(device) - - # Create separator. - separator = Separator( - model=model, - segment_samples=segment_samples, - batch_size=batch_size, - device=device, - ) - - audio_names = sorted(os.listdir(audios_dir)) - - for audio_name in audio_names: - audio_path = os.path.join(audios_dir, audio_name) - - # Load audio. - audio, _ = librosa.load(audio_path, sr=sample_rate, mono=mono) - - if audio.ndim == 1: - audio = audio[None, :] - - input_dict = {'waveform': audio} - - # Separate - separate_time = time.time() - - sep_wav = separator.separate(input_dict) - # (channels_num, audio_samples) - - print('Separate time: {:.3f} s'.format(time.time() - separate_time)) - - # Write out separated audio. - if scale_volume: - sep_wav /= np.max(np.abs(sep_wav)) - - soundfile.write(file='_zz.wav', data=sep_wav.T, samplerate=sample_rate) - - output_path = os.path.join( - output_dir, '{}.mp3'.format(pathlib.Path(audio_name).stem) - ) - os.system('ffmpeg -y -loglevel panic -i _zz.wav "{}"'.format(output_path)) - print('Write out to {}'.format(output_path)) - - -if __name__ == "__main__": - - parser = argparse.ArgumentParser(description="") - parser.add_argument( - "--config_yaml", - type=str, - required=True, - help="The config file of a model being trained.", - ) - parser.add_argument( - "--checkpoint_path", - type=str, - required=True, - help="The path of checkpoint to be loaded.", - ) - parser.add_argument( - "--audios_dir", - type=str, - required=True, - help="The directory of audios to be separated.", - ) - parser.add_argument( - "--output_dir", - type=str, - required=True, - help="The directory to write out separated audios.", - ) - parser.add_argument( - '--scale_volume', - action='store_true', - default=False, - help="set to True if separated audios are scaled to the maximum value of 1.", - ) - parser.add_argument("--cuda", action='store_true', default=True) - - args = parser.parse_args() - - inference(args) diff --git a/spaces/jordonpeter01/MusicGen/tests/modules/test_seanet.py b/spaces/jordonpeter01/MusicGen/tests/modules/test_seanet.py deleted file mode 100644 index e5c51b340a2f94fb2828b14daf83d5fad645073d..0000000000000000000000000000000000000000 --- a/spaces/jordonpeter01/MusicGen/tests/modules/test_seanet.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from itertools import product - -import pytest -import torch - -from audiocraft.modules.seanet import SEANetEncoder, SEANetDecoder, SEANetResnetBlock -from audiocraft.modules import StreamableConv1d, StreamableConvTranspose1d - - -class TestSEANetModel: - - def test_base(self): - encoder = SEANetEncoder() - decoder = SEANetDecoder() - - x = torch.randn(1, 1, 24000) - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def test_causal(self): - encoder = SEANetEncoder(causal=True) - decoder = SEANetDecoder(causal=True) - x = torch.randn(1, 1, 24000) - - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def test_conv_skip_connection(self): - encoder = SEANetEncoder(true_skip=False) - decoder = SEANetDecoder(true_skip=False) - - x = torch.randn(1, 1, 24000) - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def test_seanet_encoder_decoder_final_act(self): - encoder = SEANetEncoder(true_skip=False) - decoder = SEANetDecoder(true_skip=False, final_activation='Tanh') - - x = torch.randn(1, 1, 24000) - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def _check_encoder_blocks_norm(self, encoder: SEANetEncoder, n_disable_blocks: int, norm: str): - n_blocks = 0 - for layer in encoder.model: - if isinstance(layer, StreamableConv1d): - n_blocks += 1 - assert layer.conv.norm_type == 'none' if n_blocks <= n_disable_blocks else norm - elif isinstance(layer, SEANetResnetBlock): - for resnet_layer in layer.block: - if isinstance(resnet_layer, StreamableConv1d): - # here we add + 1 to n_blocks as we increment n_blocks just after the block - assert resnet_layer.conv.norm_type == 'none' if (n_blocks + 1) <= n_disable_blocks else norm - - def test_encoder_disable_norm(self): - n_residuals = [0, 1, 3] - disable_blocks = [0, 1, 2, 3, 4, 5, 6] - norms = ['weight_norm', 'none'] - for n_res, disable_blocks, norm in product(n_residuals, disable_blocks, norms): - encoder = SEANetEncoder(n_residual_layers=n_res, norm=norm, - disable_norm_outer_blocks=disable_blocks) - self._check_encoder_blocks_norm(encoder, disable_blocks, norm) - - def _check_decoder_blocks_norm(self, decoder: SEANetDecoder, n_disable_blocks: int, norm: str): - n_blocks = 0 - for layer in decoder.model: - if isinstance(layer, StreamableConv1d): - n_blocks += 1 - assert layer.conv.norm_type == 'none' if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm - elif isinstance(layer, StreamableConvTranspose1d): - n_blocks += 1 - assert layer.convtr.norm_type == 'none' if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm - elif isinstance(layer, SEANetResnetBlock): - for resnet_layer in layer.block: - if isinstance(resnet_layer, StreamableConv1d): - assert resnet_layer.conv.norm_type == 'none' \ - if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm - - def test_decoder_disable_norm(self): - n_residuals = [0, 1, 3] - disable_blocks = [0, 1, 2, 3, 4, 5, 6] - norms = ['weight_norm', 'none'] - for n_res, disable_blocks, norm in product(n_residuals, disable_blocks, norms): - decoder = SEANetDecoder(n_residual_layers=n_res, norm=norm, - disable_norm_outer_blocks=disable_blocks) - self._check_decoder_blocks_norm(decoder, disable_blocks, norm) - - def test_disable_norm_raises_exception(self): - # Invalid disable_norm_outer_blocks values raise exceptions - with pytest.raises(AssertionError): - SEANetEncoder(disable_norm_outer_blocks=-1) - - with pytest.raises(AssertionError): - SEANetEncoder(ratios=[1, 1, 2, 2], disable_norm_outer_blocks=7) - - with pytest.raises(AssertionError): - SEANetDecoder(disable_norm_outer_blocks=-1) - - with pytest.raises(AssertionError): - SEANetDecoder(ratios=[1, 1, 2, 2], disable_norm_outer_blocks=7) diff --git a/spaces/jordonpeter01/MusicGen2/audiocraft/quantization/__init__.py b/spaces/jordonpeter01/MusicGen2/audiocraft/quantization/__init__.py deleted file mode 100644 index 836d6eb518978480c6b95d6f29ce4f84a9428793..0000000000000000000000000000000000000000 --- a/spaces/jordonpeter01/MusicGen2/audiocraft/quantization/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -# flake8: noqa -from .vq import ResidualVectorQuantizer -from .base import BaseQuantizer, DummyQuantizer, QuantizedResult diff --git a/spaces/jtlowell/stable-diffusion-webui/README.md b/spaces/jtlowell/stable-diffusion-webui/README.md deleted file mode 100644 index 8f3f493265ef3e6a3dcf02bd5b8bb793eed0a104..0000000000000000000000000000000000000000 --- a/spaces/jtlowell/stable-diffusion-webui/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Stable Diffusion Webui -emoji: 💻 -colorFrom: yellow -colorTo: gray -sdk: gradio -sdk_version: 3.12.0 -app_file: app.py -pinned: false -license: openrail -duplicated_from: withapp/stable-diffusion-webui ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/juancopi81/youtube-music-transcribe/t5x/examples/__init__.py b/spaces/juancopi81/youtube-music-transcribe/t5x/examples/__init__.py deleted file mode 100644 index 2ac5693550488d38623ec8e5b56e3fc3de148d40..0000000000000000000000000000000000000000 --- a/spaces/juancopi81/youtube-music-transcribe/t5x/examples/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2022 The T5X Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""This empty file is needed to be recognized as a package by the setuptools.""" diff --git a/spaces/julien-c/cube/app.py b/spaces/julien-c/cube/app.py deleted file mode 100644 index a58805e60480de3a5055c4cfb48282a3ab2f14a4..0000000000000000000000000000000000000000 --- a/spaces/julien-c/cube/app.py +++ /dev/null @@ -1,35 +0,0 @@ -import tempfile -import gradio as gr -from gradio.inputs import Image, Radio -from gradio.outputs import Image3D -from gltflib import GLTF, FileResource - -avocado = GLTF.load('./Avocado.glb') -cube = GLTF.load("./AnimatedCube/AnimatedCube.gltf") - -def load_mesh(im: str, choose: str): - print(im) - # let's first clone the source model - model = (avocado if choose == "avocado" else cube).clone() - resource = FileResource(im.name) - model.resources.append(resource) - # let's do surgery - if choose != "avocado": - model.model.images[0].uri = im.name - with tempfile.NamedTemporaryFile(suffix=".glb", delete=False) as file: - model.export(file.name) - return file.name - -iface = gr.Interface( - fn=load_mesh, - description="draw on a canvas, get a 3D model", - inputs=[ - Image(source="canvas", type="file", label="canvas"), - Radio(choices=["cube", "avocado"], default="cube"), - ], - outputs=Image3D(), - live=True, -) - -if __name__ == "__main__": - iface.launch() diff --git a/spaces/justest/mdn-chatbot/src/app/globals.css b/spaces/justest/mdn-chatbot/src/app/globals.css deleted file mode 100644 index fd81e885836d815b8019694a910a93d86a43cb66..0000000000000000000000000000000000000000 --- a/spaces/justest/mdn-chatbot/src/app/globals.css +++ /dev/null @@ -1,27 +0,0 @@ -@tailwind base; -@tailwind components; -@tailwind utilities; - -:root { - --foreground-rgb: 0, 0, 0; - --background-start-rgb: 214, 219, 220; - --background-end-rgb: 255, 255, 255; -} - -@media (prefers-color-scheme: dark) { - :root { - --foreground-rgb: 255, 255, 255; - --background-start-rgb: 0, 0, 0; - --background-end-rgb: 0, 0, 0; - } -} - -body { - color: rgb(var(--foreground-rgb)); - background: linear-gradient( - to bottom, - transparent, - rgb(var(--background-end-rgb)) - ) - rgb(var(--background-start-rgb)); -} diff --git a/spaces/jw2yang/unicl-img-recog-demo/README.md b/spaces/jw2yang/unicl-img-recog-demo/README.md deleted file mode 100644 index cac4e2a673186681ea0d45262a511c455b553adc..0000000000000000000000000000000000000000 --- a/spaces/jw2yang/unicl-img-recog-demo/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Unicl Demo -emoji: 🔥 -colorFrom: green -colorTo: gray -sdk: gradio -sdk_version: 3.0.11 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/ka1kuk/fastapi/g4f/Provider/Providers/DeepAi.py b/spaces/ka1kuk/fastapi/g4f/Provider/Providers/DeepAi.py deleted file mode 100644 index 955089b480994bf8efe6ae0a67603eadc8a510c0..0000000000000000000000000000000000000000 --- a/spaces/ka1kuk/fastapi/g4f/Provider/Providers/DeepAi.py +++ /dev/null @@ -1,74 +0,0 @@ -import json -import os -import requests -import js2py -from ...typing import sha256, Dict, get_type_hints - - -url = "https://api.deepai.org/" -model = ['gpt-3.5-turbo'] -supports_stream = True -needs_auth = False -working = True - -token_js = """ -var agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36' -var a, b, c, d, e, h, f, l, g, k, m, n, r, x, C, E, N, F, T, O, P, w, D, G, Q, R, W, I, aa, fa, na, oa, ha, ba, X, ia, ja, ka, J, la, K, L, ca, S, U, M, ma, B, da, V, Y; -h = Math.round(1E11 * Math.random()) + ""; -f = function () { - for (var p = [], q = 0; 64 > q;) p[q] = 0 | 4294967296 * Math.sin(++q % Math.PI); - - return function (t) { - var v, y, H, ea = [v = 1732584193, y = 4023233417, ~v, ~y], - Z = [], - A = unescape(encodeURI(t)) + "\u0080", - z = A.length; - t = --z / 4 + 2 | 15; - for (Z[--t] = 8 * z; ~z;) Z[z >> 2] |= A.charCodeAt(z) << 8 * z--; - for (q = A = 0; q < t; q += 16) { - for (z = ea; 64 > A; z = [H = z[3], v + ((H = z[0] + [v & y | ~v & H, H & v | ~H & y, v ^ y ^ H, y ^ (v | ~H)][z = A >> 4] + p[A] + ~~Z[q | [A, 5 * A + 1, 3 * A + 5, 7 * A][z] & 15]) << (z = [7, 12, 17, 22, 5, 9, 14, 20, 4, 11, 16, 23, 6, 10, 15, 21][4 * z + A++ % 4]) | H >>> -z), v, y]) v = z[1] | 0, y = z[2]; - for (A = 4; A;) ea[--A] += z[A] - } - for (t = ""; 32 > A;) t += (ea[A >> 3] >> 4 * (1 ^ A++) & 15).toString(16); - return t.split("").reverse().join("") - } -}(); - -"tryit-" + h + "-" + f(agent + f(agent + f(agent + h + "x"))); -""" - -uuid4_js = """ -function uuidv4() { - for (var a = [], b = 0; 36 > b; b++) a[b] = "0123456789abcdef".substr(Math.floor(16 * Math.random()), 1); - a[14] = "4"; - a[19] = "0123456789abcdef".substr(a[19] & 3 | 8, 1); - a[8] = a[13] = a[18] = a[23] = "-"; - return a.join("") -} -uuidv4();""" - -def create_session(): - url = "https://api.deepai.org/save_chat_session" - - payload = {'uuid': js2py.eval_js(uuid4_js), "title":"", "chat_style": "chat", "messages": '[]'} - headers = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36"} - - response = requests.request("POST", url, headers=headers, data=payload) - return response - -def _create_completion(model: str, messages:list, stream: bool = True, **kwargs): - create_session() - url = "https://api.deepai.org/make_me_a_pizza" - - payload = {'chas_style': "chat", "chatHistory": json.dumps(messages)} - api_key = js2py.eval_js(token_js) - headers = {"api-key": api_key, "User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36"} - - response = requests.request("POST", url, headers=headers, data=payload, stream=True) - for chunk in response.iter_content(chunk_size=None): - response.raise_for_status() - yield chunk.decode() - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join( - [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/spaces/kadirnar/yolov8/README.md b/spaces/kadirnar/yolov8/README.md deleted file mode 100644 index 7522eaf7f558171e243bad81f25e4e4d321b396c..0000000000000000000000000000000000000000 --- a/spaces/kadirnar/yolov8/README.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Yolov8 -emoji: 🌅 -colorFrom: blue -colorTo: yellow -sdk: gradio -sdk_version: 3.16.1 -app_file: app.py -pinned: false -license: gpl-3.0 -tags: -- making-demos ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/karolmajek/YOLOR/darknet/new_layers.md b/spaces/karolmajek/YOLOR/darknet/new_layers.md deleted file mode 100644 index 9f7a35c02ae6564f35688aab29b3aed0f3ad3a2b..0000000000000000000000000000000000000000 --- a/spaces/karolmajek/YOLOR/darknet/new_layers.md +++ /dev/null @@ -1,329 +0,0 @@ -![Implicit Modeling](https://github.com/WongKinYiu/yolor/blob/main/figure/implicit_modeling.png) - -### 1. silence layer - -Usage: - -``` -[silence] -``` - -PyTorch code: - -``` python -class Silence(nn.Module): - def __init__(self): - super(Silence, self).__init__() - def forward(self, x): - return x -``` - - -### 2. implicit_add layer - -Usage: - -``` -[implicit_add] -filters=128 -``` - -PyTorch code: - -``` python -class ImplicitA(nn.Module): - def __init__(self, channel): - super(ImplicitA, self).__init__() - self.channel = channel - self.implicit = nn.Parameter(torch.zeros(1, channel, 1, 1)) - nn.init.normal_(self.implicit, std=.02) - - def forward(self): - return self.implicit -``` - - -### 3. shift_channels layer - -Usage: - -``` -[shift_channels] -from=101 -``` - -PyTorch code: - -``` python -class ShiftChannel(nn.Module): - def __init__(self, layers): - super(ShiftChannel, self).__init__() - self.layers = layers # layer indices - - def forward(self, x, outputs): - a = outputs[self.layers[0]] - return a.expand_as(x) + x -``` - - -### 4. implicit_mul layer - -Usage: - -``` -[implicit_mul] -filters=128 -``` - -PyTorch code: - -``` python -class ImplicitM(nn.Module): - def __init__(self, channel): - super(ImplicitM, self).__init__() - self.channel = channel - self.implicit = nn.Parameter(torch.ones(1, channel, 1, 1)) - nn.init.normal_(self.implicit, mean=1., std=.02) - - def forward(self): - return self.implicit -``` - - -### 5. control_channels layer - -Usage: - -``` -[control_channels] -from=101 -``` - -PyTorch code: - -``` python -class ControlChannel(nn.Module): - def __init__(self, layers): - super(ControlChannel, self).__init__() - self.layers = layers # layer indices - - def forward(self, x, outputs): - a = outputs[self.layers[0]] - return a.expand_as(x) * x -``` - - -### 6. implicit_cat layer - -Usage: - -``` -[implicit_cat] -filters=128 -``` - -PyTorch code: (same as ImplicitA) - -``` python -class ImplicitC(nn.Module): - def __init__(self, channel): - super(ImplicitC, self).__init__() - self.channel = channel - self.implicit = nn.Parameter(torch.zeros(1, channel, 1, 1)) - nn.init.normal_(self.implicit, std=.02) - - def forward(self): - return self.implicit -``` - - -### 7. alternate_channels layer - -Usage: - -``` -[alternate_channels] -from=101 -``` - -PyTorch code: - -``` python -class AlternateChannel(nn.Module): - def __init__(self, layers): - super(AlternateChannel, self).__init__() - self.layers = layers # layer indices - - def forward(self, x, outputs): - a = outputs[self.layers[0]] - return torch.cat([a.expand_as(x), x], dim=1) -``` - - -### 8. implicit_add_2d layer - -Usage: - -``` -[implicit_add_2d] -filters=128 -atoms=128 -``` - -PyTorch code: - -``` python -class Implicit2DA(nn.Module): - def __init__(self, atom, channel): - super(Implicit2DA, self).__init__() - self.channel = channel - self.implicit = nn.Parameter(torch.zeros(1, atom, channel, 1)) - nn.init.normal_(self.implicit, std=.02) - - def forward(self): - return self.implicit -``` - - -### 9. shift_channels_2d layer - -Usage: - -``` -[shift_channels_2d] -from=101 -``` - -PyTorch code: - -``` python -class ShiftChannel2D(nn.Module): - def __init__(self, layers): - super(ShiftChannel2D, self).__init__() - self.layers = layers # layer indices - - def forward(self, x, outputs): - a = outputs[self.layers[0]].view(1,-1,1,1) - return a.expand_as(x) + x -``` - - -### 10. implicit_mul_2d layer - -Usage: - -``` -[implicit_mul_2d] -filters=128 -atoms=128 -``` - -PyTorch code: - -``` python -class Implicit2DM(nn.Module): - def __init__(self, atom, channel): - super(Implicit2DM, self).__init__() - self.channel = channel - self.implicit = nn.Parameter(torch.ones(1, atom, channel, 1)) - nn.init.normal_(self.implicit, mean=1., std=.02) - - def forward(self): - return self.implicit -``` - - -### 11. control_channels_2d layer - -Usage: - -``` -[control_channels_2d] -from=101 -``` - -PyTorch code: - -``` python -class ControlChannel2D(nn.Module): - def __init__(self, layers): - super(ControlChannel2D, self).__init__() - self.layers = layers # layer indices - - def forward(self, x, outputs): - a = outputs[self.layers[0]].view(1,-1,1,1) - return a.expand_as(x) * x -``` - - -### 12. implicit_cat_2d layer - -Usage: - -``` -[implicit_cat_2d] -filters=128 -atoms=128 -``` - -PyTorch code: (same as Implicit2DA) - -``` python -class Implicit2DC(nn.Module): - def __init__(self, atom, channel): - super(Implicit2DC, self).__init__() - self.channel = channel - self.implicit = nn.Parameter(torch.zeros(1, atom, channel, 1)) - nn.init.normal_(self.implicit, std=.02) - - def forward(self): - return self.implicit -``` - - -### 13. alternate_channels_2d layer - -Usage: - -``` -[alternate_channels_2d] -from=101 -``` - -PyTorch code: - -``` python -class AlternateChannel2D(nn.Module): - def __init__(self, layers): - super(AlternateChannel2D, self).__init__() - self.layers = layers # layer indices - - def forward(self, x, outputs): - a = outputs[self.layers[0]].view(1,-1,1,1) - return torch.cat([a.expand_as(x), x], dim=1) -``` - - -### 14. dwt layer - -Usage: - -``` -[dwt] -``` - -PyTorch code: - -``` python -# https://github.com/fbcotter/pytorch_wavelets -from pytorch_wavelets import DWTForward, DWTInverse -class DWT(nn.Module): - def __init__(self): - super(DWT, self).__init__() - self.xfm = DWTForward(J=1, wave='db1', mode='zero') - - def forward(self, x): - b,c,w,h = x.shape - yl, yh = self.xfm(x) - return torch.cat([yl/2., yh[0].view(b,-1,w//2,h//2)/2.+.5], 1) -``` diff --git a/spaces/karolmajek/YOLOR/utils/general.py b/spaces/karolmajek/YOLOR/utils/general.py deleted file mode 100644 index 9b06c8b8955ab437cb21b5866436f9b228a38439..0000000000000000000000000000000000000000 --- a/spaces/karolmajek/YOLOR/utils/general.py +++ /dev/null @@ -1,449 +0,0 @@ -# General utils - -import glob -import logging -import math -import os -import platform -import random -import re -import subprocess -import time -from pathlib import Path - -import cv2 -import matplotlib -import numpy as np -import torch -import yaml - -from utils.google_utils import gsutil_getsize -from utils.metrics import fitness, fitness_p, fitness_r, fitness_ap50, fitness_ap, fitness_f -from utils.torch_utils import init_torch_seeds - -# Set printoptions -torch.set_printoptions(linewidth=320, precision=5, profile='long') -np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 -matplotlib.rc('font', **{'size': 11}) - -# Prevent OpenCV from multithreading (to use PyTorch DataLoader) -cv2.setNumThreads(0) - - -def set_logging(rank=-1): - logging.basicConfig( - format="%(message)s", - level=logging.INFO if rank in [-1, 0] else logging.WARN) - - -def init_seeds(seed=0): - random.seed(seed) - np.random.seed(seed) - init_torch_seeds(seed) - - -def get_latest_run(search_dir='.'): - # Return path to most recent 'last.pt' in /runs (i.e. to --resume from) - last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True) - return max(last_list, key=os.path.getctime) if last_list else '' - - -def check_git_status(): - # Suggest 'git pull' if repo is out of date - if platform.system() in ['Linux', 'Darwin'] and not os.path.isfile('/.dockerenv'): - s = subprocess.check_output('if [ -d .git ]; then git fetch && git status -uno; fi', shell=True).decode('utf-8') - if 'Your branch is behind' in s: - print(s[s.find('Your branch is behind'):s.find('\n\n')] + '\n') - - -def check_img_size(img_size, s=32): - # Verify img_size is a multiple of stride s - new_size = make_divisible(img_size, int(s)) # ceil gs-multiple - if new_size != img_size: - print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size)) - return new_size - - -def check_file(file): - # Search for file if not found - if os.path.isfile(file) or file == '': - return file - else: - files = glob.glob('./**/' + file, recursive=True) # find file - assert len(files), 'File Not Found: %s' % file # assert file was found - assert len(files) == 1, "Multiple files match '%s', specify exact path: %s" % (file, files) # assert unique - return files[0] # return file - - -def check_dataset(dict): - # Download dataset if not found locally - val, s = dict.get('val'), dict.get('download') - if val and len(val): - val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path - if not all(x.exists() for x in val): - print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()]) - if s and len(s): # download script - print('Downloading %s ...' % s) - if s.startswith('http') and s.endswith('.zip'): # URL - f = Path(s).name # filename - torch.hub.download_url_to_file(s, f) - r = os.system('unzip -q %s -d ../ && rm %s' % (f, f)) # unzip - else: # bash script - r = os.system(s) - print('Dataset autodownload %s\n' % ('success' if r == 0 else 'failure')) # analyze return value - else: - raise Exception('Dataset not found.') - - -def make_divisible(x, divisor): - # Returns x evenly divisible by divisor - return math.ceil(x / divisor) * divisor - - -def labels_to_class_weights(labels, nc=80): - # Get class weights (inverse frequency) from training labels - if labels[0] is None: # no labels loaded - return torch.Tensor() - - labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO - classes = labels[:, 0].astype(np.int) # labels = [class xywh] - weights = np.bincount(classes, minlength=nc) # occurrences per class - - # Prepend gridpoint count (for uCE training) - # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image - # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start - - weights[weights == 0] = 1 # replace empty bins with 1 - weights = 1 / weights # number of targets per class - weights /= weights.sum() # normalize - return torch.from_numpy(weights) - - -def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)): - # Produces image weights based on class mAPs - n = len(labels) - class_counts = np.array([np.bincount(labels[i][:, 0].astype(np.int), minlength=nc) for i in range(n)]) - image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1) - # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample - return image_weights - - -def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper) - # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/ - # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n') - # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n') - # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco - # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet - x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, - 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90] - return x - - -def xyxy2xywh(x): - # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right - y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center - y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center - y[:, 2] = x[:, 2] - x[:, 0] # width - y[:, 3] = x[:, 3] - x[:, 1] # height - return y - - -def xywh2xyxy(x): - # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right - y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x - y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y - y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x - y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y - return y - - -def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): - # Rescale coords (xyxy) from img1_shape to img0_shape - if ratio_pad is None: # calculate from img0_shape - gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new - pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding - else: - gain = ratio_pad[0][0] - pad = ratio_pad[1] - - coords[:, [0, 2]] -= pad[0] # x padding - coords[:, [1, 3]] -= pad[1] # y padding - coords[:, :4] /= gain - clip_coords(coords, img0_shape) - return coords - - -def clip_coords(boxes, img_shape): - # Clip bounding xyxy bounding boxes to image shape (height, width) - boxes[:, 0].clamp_(0, img_shape[1]) # x1 - boxes[:, 1].clamp_(0, img_shape[0]) # y1 - boxes[:, 2].clamp_(0, img_shape[1]) # x2 - boxes[:, 3].clamp_(0, img_shape[0]) # y2 - - -def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, EIoU=False, ECIoU=False, eps=1e-9): - # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4 - box2 = box2.T - - # Get the coordinates of bounding boxes - if x1y1x2y2: # x1, y1, x2, y2 = box1 - b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] - b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] - else: # transform from xywh to xyxy - b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2 - b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2 - b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2 - b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2 - - # Intersection area - inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ - (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) - - # Union Area - w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps - w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps - union = w1 * h1 + w2 * h2 - inter + eps - - iou = inter / union - if GIoU or DIoU or CIoU or EIoU or ECIoU: - cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width - ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height - if CIoU or DIoU or EIoU or ECIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 - c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared - rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + - (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared - if DIoU: - return iou - rho2 / c2 # DIoU - elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 - v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) - with torch.no_grad(): - alpha = v / ((1 + eps) - iou + v) - return iou - (rho2 / c2 + v * alpha) # CIoU - elif EIoU: # Efficient IoU https://arxiv.org/abs/2101.08158 - rho3 = (w1-w2) **2 - c3 = cw ** 2 + eps - rho4 = (h1-h2) **2 - c4 = ch ** 2 + eps - return iou - rho2 / c2 - rho3 / c3 - rho4 / c4 # EIoU - elif ECIoU: - v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) - with torch.no_grad(): - alpha = v / ((1 + eps) - iou + v) - rho3 = (w1-w2) **2 - c3 = cw ** 2 + eps - rho4 = (h1-h2) **2 - c4 = ch ** 2 + eps - return iou - v * alpha - rho2 / c2 - rho3 / c3 - rho4 / c4 # ECIoU - else: # GIoU https://arxiv.org/pdf/1902.09630.pdf - c_area = cw * ch + eps # convex area - return iou - (c_area - union) / c_area # GIoU - else: - return iou # IoU - - -def box_iou(box1, box2): - # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py - """ - Return intersection-over-union (Jaccard index) of boxes. - Both sets of boxes are expected to be in (x1, y1, x2, y2) format. - Arguments: - box1 (Tensor[N, 4]) - box2 (Tensor[M, 4]) - Returns: - iou (Tensor[N, M]): the NxM matrix containing the pairwise - IoU values for every element in boxes1 and boxes2 - """ - - def box_area(box): - # box = 4xn - return (box[2] - box[0]) * (box[3] - box[1]) - - area1 = box_area(box1.T) - area2 = box_area(box2.T) - - # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) - inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) - return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter) - - -def wh_iou(wh1, wh2): - # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2 - wh1 = wh1[:, None] # [N,1,2] - wh2 = wh2[None] # [1,M,2] - inter = torch.min(wh1, wh2).prod(2) # [N,M] - return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter) - - -def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, merge=False, classes=None, agnostic=False): - """Performs Non-Maximum Suppression (NMS) on inference results - - Returns: - detections with shape: nx6 (x1, y1, x2, y2, conf, cls) - """ - - nc = prediction[0].shape[1] - 5 # number of classes - xc = prediction[..., 4] > conf_thres # candidates - - # Settings - min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height - max_det = 300 # maximum number of detections per image - time_limit = 10.0 # seconds to quit after - redundant = True # require redundant detections - multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img) - - t = time.time() - output = [torch.zeros(0, 6)] * prediction.shape[0] - for xi, x in enumerate(prediction): # image index, image inference - # Apply constraints - # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height - x = x[xc[xi]] # confidence - - # If none remain process next image - if not x.shape[0]: - continue - - # Compute conf - x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf - - # Box (center x, center y, width, height) to (x1, y1, x2, y2) - box = xywh2xyxy(x[:, :4]) - - # Detections matrix nx6 (xyxy, conf, cls) - if multi_label: - i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T - x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) - else: # best class only - conf, j = x[:, 5:].max(1, keepdim=True) - x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres] - - # Filter by class - if classes: - x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] - - # Apply finite constraint - # if not torch.isfinite(x).all(): - # x = x[torch.isfinite(x).all(1)] - - # If none remain process next image - n = x.shape[0] # number of boxes - if not n: - continue - - # Sort by confidence - # x = x[x[:, 4].argsort(descending=True)] - - # Batched NMS - c = x[:, 5:6] * (0 if agnostic else max_wh) # classes - boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores - i = torch.ops.torchvision.nms(boxes, scores, iou_thres) - if i.shape[0] > max_det: # limit detections - i = i[:max_det] - if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) - # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) - iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix - weights = iou * scores[None] # box weights - x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes - if redundant: - i = i[iou.sum(1) > 1] # require redundancy - - output[xi] = x[i] - if (time.time() - t) > time_limit: - break # time limit exceeded - - return output - - -def strip_optimizer(f='weights/best.pt', s=''): # from utils.general import *; strip_optimizer() - # Strip optimizer from 'f' to finalize training, optionally save as 's' - x = torch.load(f, map_location=torch.device('cpu')) - x['optimizer'] = None - x['training_results'] = None - x['epoch'] = -1 - #x['model'].half() # to FP16 - #for p in x['model'].parameters(): - # p.requires_grad = False - torch.save(x, s or f) - mb = os.path.getsize(s or f) / 1E6 # filesize - print('Optimizer stripped from %s,%s %.1fMB' % (f, (' saved as %s,' % s) if s else '', mb)) - - -def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''): - # Print mutation results to evolve.txt (for use with train.py --evolve) - a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys - b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values - c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3) - print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c)) - - if bucket: - url = 'gs://%s/evolve.txt' % bucket - if gsutil_getsize(url) > (os.path.getsize('evolve.txt') if os.path.exists('evolve.txt') else 0): - os.system('gsutil cp %s .' % url) # download evolve.txt if larger than local - - with open('evolve.txt', 'a') as f: # append result - f.write(c + b + '\n') - x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows - x = x[np.argsort(-fitness(x))] # sort - np.savetxt('evolve.txt', x, '%10.3g') # save sort by fitness - - # Save yaml - for i, k in enumerate(hyp.keys()): - hyp[k] = float(x[0, i + 7]) - with open(yaml_file, 'w') as f: - results = tuple(x[0, :7]) - c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3) - f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n') - yaml.dump(hyp, f, sort_keys=False) - - if bucket: - os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload - - -def apply_classifier(x, model, img, im0): - # applies a second stage classifier to yolo outputs - im0 = [im0] if isinstance(im0, np.ndarray) else im0 - for i, d in enumerate(x): # per image - if d is not None and len(d): - d = d.clone() - - # Reshape and pad cutouts - b = xyxy2xywh(d[:, :4]) # boxes - b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square - b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad - d[:, :4] = xywh2xyxy(b).long() - - # Rescale boxes from img_size to im0 size - scale_coords(img.shape[2:], d[:, :4], im0[i].shape) - - # Classes - pred_cls1 = d[:, 5].long() - ims = [] - for j, a in enumerate(d): # per item - cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])] - im = cv2.resize(cutout, (224, 224)) # BGR - # cv2.imwrite('test%i.jpg' % j, cutout) - - im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 - im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32 - im /= 255.0 # 0 - 255 to 0.0 - 1.0 - ims.append(im) - - pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction - x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections - - return x - - -def increment_path(path, exist_ok=True, sep=''): - # Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc. - path = Path(path) # os-agnostic - if (path.exists() and exist_ok) or (not path.exists()): - return str(path) - else: - dirs = glob.glob(f"{path}{sep}*") # similar paths - matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs] - i = [int(m.groups()[0]) for m in matches if m] # indices - n = max(i) + 1 if i else 2 # increment number - return f"{path}{sep}{n}" # update path diff --git a/spaces/kboaten/MIDI-Audio-Extension/MIDI-song-extender/musicautobot/utils/lamb.py b/spaces/kboaten/MIDI-Audio-Extension/MIDI-song-extender/musicautobot/utils/lamb.py deleted file mode 100644 index 867fe3455f5f47690a6a2de457efd0cae515e110..0000000000000000000000000000000000000000 --- a/spaces/kboaten/MIDI-Audio-Extension/MIDI-song-extender/musicautobot/utils/lamb.py +++ /dev/null @@ -1,106 +0,0 @@ -# SOURCE: https://github.com/cybertronai/pytorch-lamb/ - -import collections -import math - -import torch -from torch.optim import Optimizer - - -class Lamb(Optimizer): - r"""Implements Lamb algorithm. - - It has been proposed in `Reducing BERT Pre-Training Time from 3 Days to 76 Minutes`_. - - Arguments: - params (iterable): iterable of parameters to optimize or dicts defining - parameter groups - lr (float, optional): learning rate (default: 1e-3) - betas (Tuple[float, float], optional): coefficients used for computing - running averages of gradient and its square (default: (0.9, 0.999)) - eps (float, optional): term added to the denominator to improve - numerical stability (default: 1e-8) - weight_decay (float, optional): weight decay (L2 penalty) (default: 0) - adam (bool, optional): always use trust ratio = 1, which turns this into - Adam. Useful for comparison purposes. - - .. _Reducing BERT Pre-Training Time from 3 Days to 76 Minutes: - https://arxiv.org/abs/1904.00962 - """ - - def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-4, - weight_decay=0, adam=False): - if not 0.0 <= lr: - raise ValueError("Invalid learning rate: {}".format(lr)) - if not 0.0 <= eps: - raise ValueError("Invalid epsilon value: {}".format(eps)) - if not 0.0 <= betas[0] < 1.0: - raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) - if not 0.0 <= betas[1] < 1.0: - raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) - defaults = dict(lr=lr, betas=betas, eps=eps, - weight_decay=weight_decay) - self.adam = adam - super(Lamb, self).__init__(params, defaults) - - def step(self, closure=None): - """Performs a single optimization step. - - Arguments: - closure (callable, optional): A closure that reevaluates the model - and returns the loss. - """ - loss = None - if closure is not None: - loss = closure() - - for group in self.param_groups: - for p in group['params']: - if p.grad is None: - continue - grad = p.grad.data - if grad.is_sparse: - raise RuntimeError('Lamb does not support sparse gradients, consider SparseAdam instad.') - - state = self.state[p] - - # State initialization - if len(state) == 0: - state['step'] = 0 - # Exponential moving average of gradient values - state['exp_avg'] = torch.zeros_like(p.data) - # Exponential moving average of squared gradient values - state['exp_avg_sq'] = torch.zeros_like(p.data) - - exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] - beta1, beta2 = group['betas'] - - state['step'] += 1 - - if group['weight_decay'] != 0: - grad.add_(group['weight_decay'], p.data) - - # Decay the first and second moment running average coefficient - exp_avg.mul_(beta1).add_(1 - beta1, grad) - exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) - denom = exp_avg_sq.sqrt().add_(group['eps']) - - bias_correction1 = 1 - beta1 ** state['step'] - bias_correction2 = 1 - beta2 ** state['step'] - # Apply bias to lr to avoid broadcast. - step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1 - - adam_step = exp_avg / denom - # L2 norm uses sum, but here since we're dividing, use mean to avoid overflow. - r1 = p.data.pow(2).mean().sqrt() - r2 = adam_step.pow(2).mean().sqrt() - r = 1 if r1 == 0 or r2 == 0 else min(r1/r2, 10) - state['r1'] = r1 - state['r2'] = r2 - state['r'] = r - if self.adam: - r = 1 - - p.data.add_(-step_size * r, adam_step) - - return loss diff --git a/spaces/kepl/gpt/g4f/__init__.py b/spaces/kepl/gpt/g4f/__init__.py deleted file mode 100644 index a0b4bac6aa4de9c0449095a3874c2cb9716169d7..0000000000000000000000000000000000000000 --- a/spaces/kepl/gpt/g4f/__init__.py +++ /dev/null @@ -1,39 +0,0 @@ -import sys -from . import Provider -from g4f.models import Model, ModelUtils - - -class ChatCompletion: - @staticmethod - def create(model: Model.model or str, messages: list, provider: Provider.Provider = None, stream: bool = False, auth: str = False, **kwargs): - kwargs['auth'] = auth - - if provider and provider.needs_auth and not auth: - print( - f'ValueError: {provider.__name__} requires authentication (use auth="cookie or token or jwt ..." param)', file=sys.stderr) - sys.exit(1) - - try: - if isinstance(model, str): - try: - model = ModelUtils.convert[model] - except KeyError: - raise Exception(f'The model: {model} does not exist') - - engine = model.best_provider if not provider else provider - - if not engine.supports_stream and stream == True: - print( - f"ValueError: {engine.__name__} does not support 'stream' argument", file=sys.stderr) - sys.exit(1) - - print(f'Using {engine.__name__} provider') - - return (engine._create_completion(model.name, messages, stream, **kwargs) - if stream else ''.join(engine._create_completion(model.name, messages, stream, **kwargs))) - except TypeError as e: - print(e) - arg: str = str(e).split("'")[1] - print( - f"ValueError: {engine.__name__} does not support '{arg}' argument", file=sys.stderr) - sys.exit(1) diff --git a/spaces/kevinwang676/Bark-with-Voice-Cloning/cloning/__init__.py b/spaces/kevinwang676/Bark-with-Voice-Cloning/cloning/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/models/arcface_torch/dataset.py b/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/models/arcface_torch/dataset.py deleted file mode 100644 index 96bbb8bb6da99122f350bc8e1a6390245840e32b..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/models/arcface_torch/dataset.py +++ /dev/null @@ -1,124 +0,0 @@ -import numbers -import os -import queue as Queue -import threading - -import mxnet as mx -import numpy as np -import torch -from torch.utils.data import DataLoader, Dataset -from torchvision import transforms - - -class BackgroundGenerator(threading.Thread): - def __init__(self, generator, local_rank, max_prefetch=6): - super(BackgroundGenerator, self).__init__() - self.queue = Queue.Queue(max_prefetch) - self.generator = generator - self.local_rank = local_rank - self.daemon = True - self.start() - - def run(self): - torch.cuda.set_device(self.local_rank) - for item in self.generator: - self.queue.put(item) - self.queue.put(None) - - def next(self): - next_item = self.queue.get() - if next_item is None: - raise StopIteration - return next_item - - def __next__(self): - return self.next() - - def __iter__(self): - return self - - -class DataLoaderX(DataLoader): - - def __init__(self, local_rank, **kwargs): - super(DataLoaderX, self).__init__(**kwargs) - self.stream = torch.cuda.Stream(local_rank) - self.local_rank = local_rank - - def __iter__(self): - self.iter = super(DataLoaderX, self).__iter__() - self.iter = BackgroundGenerator(self.iter, self.local_rank) - self.preload() - return self - - def preload(self): - self.batch = next(self.iter, None) - if self.batch is None: - return None - with torch.cuda.stream(self.stream): - for k in range(len(self.batch)): - self.batch[k] = self.batch[k].to(device=self.local_rank, non_blocking=True) - - def __next__(self): - torch.cuda.current_stream().wait_stream(self.stream) - batch = self.batch - if batch is None: - raise StopIteration - self.preload() - return batch - - -class MXFaceDataset(Dataset): - def __init__(self, root_dir, local_rank): - super(MXFaceDataset, self).__init__() - self.transform = transforms.Compose( - [transforms.ToPILImage(), - transforms.RandomHorizontalFlip(), - transforms.ToTensor(), - transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), - ]) - self.root_dir = root_dir - self.local_rank = local_rank - path_imgrec = os.path.join(root_dir, 'train.rec') - path_imgidx = os.path.join(root_dir, 'train.idx') - self.imgrec = mx.recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r') - s = self.imgrec.read_idx(0) - header, _ = mx.recordio.unpack(s) - if header.flag > 0: - self.header0 = (int(header.label[0]), int(header.label[1])) - self.imgidx = np.array(range(1, int(header.label[0]))) - else: - self.imgidx = np.array(list(self.imgrec.keys)) - - def __getitem__(self, index): - idx = self.imgidx[index] - s = self.imgrec.read_idx(idx) - header, img = mx.recordio.unpack(s) - label = header.label - if not isinstance(label, numbers.Number): - label = label[0] - label = torch.tensor(label, dtype=torch.long) - sample = mx.image.imdecode(img).asnumpy() - if self.transform is not None: - sample = self.transform(sample) - return sample, label - - def __len__(self): - return len(self.imgidx) - - -class SyntheticDataset(Dataset): - def __init__(self, local_rank): - super(SyntheticDataset, self).__init__() - img = np.random.randint(0, 255, size=(112, 112, 3), dtype=np.int32) - img = np.transpose(img, (2, 0, 1)) - img = torch.from_numpy(img).squeeze(0).float() - img = ((img / 255) - 0.5) / 0.5 - self.img = img - self.label = 1 - - def __getitem__(self, index): - return self.img, self.label - - def __len__(self): - return 1000000 diff --git a/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/util/skin_mask.py b/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/util/skin_mask.py deleted file mode 100644 index a8a74e4c3b40d13b0258b83a12f56321a85bb179..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/util/skin_mask.py +++ /dev/null @@ -1,125 +0,0 @@ -"""This script is to generate skin attention mask for Deep3DFaceRecon_pytorch -""" - -import math -import numpy as np -import os -import cv2 - -class GMM: - def __init__(self, dim, num, w, mu, cov, cov_det, cov_inv): - self.dim = dim # feature dimension - self.num = num # number of Gaussian components - self.w = w # weights of Gaussian components (a list of scalars) - self.mu= mu # mean of Gaussian components (a list of 1xdim vectors) - self.cov = cov # covariance matrix of Gaussian components (a list of dimxdim matrices) - self.cov_det = cov_det # pre-computed determinet of covariance matrices (a list of scalars) - self.cov_inv = cov_inv # pre-computed inverse covariance matrices (a list of dimxdim matrices) - - self.factor = [0]*num - for i in range(self.num): - self.factor[i] = (2*math.pi)**(self.dim/2) * self.cov_det[i]**0.5 - - def likelihood(self, data): - assert(data.shape[1] == self.dim) - N = data.shape[0] - lh = np.zeros(N) - - for i in range(self.num): - data_ = data - self.mu[i] - - tmp = np.matmul(data_,self.cov_inv[i]) * data_ - tmp = np.sum(tmp,axis=1) - power = -0.5 * tmp - - p = np.array([math.exp(power[j]) for j in range(N)]) - p = p/self.factor[i] - lh += p*self.w[i] - - return lh - - -def _rgb2ycbcr(rgb): - m = np.array([[65.481, 128.553, 24.966], - [-37.797, -74.203, 112], - [112, -93.786, -18.214]]) - shape = rgb.shape - rgb = rgb.reshape((shape[0] * shape[1], 3)) - ycbcr = np.dot(rgb, m.transpose() / 255.) - ycbcr[:, 0] += 16. - ycbcr[:, 1:] += 128. - return ycbcr.reshape(shape) - - -def _bgr2ycbcr(bgr): - rgb = bgr[..., ::-1] - return _rgb2ycbcr(rgb) - - -gmm_skin_w = [0.24063933, 0.16365987, 0.26034665, 0.33535415] -gmm_skin_mu = [np.array([113.71862, 103.39613, 164.08226]), - np.array([150.19858, 105.18467, 155.51428]), - np.array([183.92976, 107.62468, 152.71820]), - np.array([114.90524, 113.59782, 151.38217])] -gmm_skin_cov_det = [5692842.5, 5851930.5, 2329131., 1585971.] -gmm_skin_cov_inv = [np.array([[0.0019472069, 0.0020450759, -0.00060243998],[0.0020450759, 0.017700525, 0.0051420014],[-0.00060243998, 0.0051420014, 0.0081308950]]), - np.array([[0.0027110141, 0.0011036990, 0.0023122299],[0.0011036990, 0.010707724, 0.010742856],[0.0023122299, 0.010742856, 0.017481629]]), - np.array([[0.0048026871, 0.00022935172, 0.0077668377],[0.00022935172, 0.011729696, 0.0081661865],[0.0077668377, 0.0081661865, 0.025374353]]), - np.array([[0.0011989699, 0.0022453172, -0.0010748957],[0.0022453172, 0.047758564, 0.020332102],[-0.0010748957, 0.020332102, 0.024502251]])] - -gmm_skin = GMM(3, 4, gmm_skin_w, gmm_skin_mu, [], gmm_skin_cov_det, gmm_skin_cov_inv) - -gmm_nonskin_w = [0.12791070, 0.31130761, 0.34245777, 0.21832393] -gmm_nonskin_mu = [np.array([99.200851, 112.07533, 140.20602]), - np.array([110.91392, 125.52969, 130.19237]), - np.array([129.75864, 129.96107, 126.96808]), - np.array([112.29587, 128.85121, 129.05431])] -gmm_nonskin_cov_det = [458703648., 6466488., 90611376., 133097.63] -gmm_nonskin_cov_inv = [np.array([[0.00085371657, 0.00071197288, 0.00023958916],[0.00071197288, 0.0025935620, 0.00076557708],[0.00023958916, 0.00076557708, 0.0015042332]]), - np.array([[0.00024650150, 0.00045542428, 0.00015019422],[0.00045542428, 0.026412144, 0.018419769],[0.00015019422, 0.018419769, 0.037497383]]), - np.array([[0.00037054974, 0.00038146760, 0.00040408765],[0.00038146760, 0.0085505722, 0.0079136286],[0.00040408765, 0.0079136286, 0.010982352]]), - np.array([[0.00013709733, 0.00051228428, 0.00012777430],[0.00051228428, 0.28237113, 0.10528370],[0.00012777430, 0.10528370, 0.23468947]])] - -gmm_nonskin = GMM(3, 4, gmm_nonskin_w, gmm_nonskin_mu, [], gmm_nonskin_cov_det, gmm_nonskin_cov_inv) - -prior_skin = 0.8 -prior_nonskin = 1 - prior_skin - - -# calculate skin attention mask -def skinmask(imbgr): - im = _bgr2ycbcr(imbgr) - - data = im.reshape((-1,3)) - - lh_skin = gmm_skin.likelihood(data) - lh_nonskin = gmm_nonskin.likelihood(data) - - tmp1 = prior_skin * lh_skin - tmp2 = prior_nonskin * lh_nonskin - post_skin = tmp1 / (tmp1+tmp2) # posterior probability - - post_skin = post_skin.reshape((im.shape[0],im.shape[1])) - - post_skin = np.round(post_skin*255) - post_skin = post_skin.astype(np.uint8) - post_skin = np.tile(np.expand_dims(post_skin,2),[1,1,3]) # reshape to H*W*3 - - return post_skin - - -def get_skin_mask(img_path): - print('generating skin masks......') - names = [i for i in sorted(os.listdir( - img_path)) if 'jpg' in i or 'png' in i or 'jpeg' in i or 'PNG' in i] - save_path = os.path.join(img_path, 'mask') - if not os.path.isdir(save_path): - os.makedirs(save_path) - - for i in range(0, len(names)): - name = names[i] - print('%05d' % (i), ' ', name) - full_image_name = os.path.join(img_path, name) - img = cv2.imread(full_image_name).astype(np.float32) - skin_img = skinmask(img) - cv2.imwrite(os.path.join(save_path, name), skin_img.astype(np.uint8)) diff --git a/spaces/kevinwang676/M4Singer/tasks/tts/tts.py b/spaces/kevinwang676/M4Singer/tasks/tts/tts.py deleted file mode 100644 index f803c1e738137cb1eca19a1943196abd2884c0a5..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/M4Singer/tasks/tts/tts.py +++ /dev/null @@ -1,131 +0,0 @@ -from multiprocessing.pool import Pool - -import matplotlib - -from utils.pl_utils import data_loader -from utils.training_utils import RSQRTSchedule -from vocoders.base_vocoder import get_vocoder_cls, BaseVocoder -from modules.fastspeech.pe import PitchExtractor - -matplotlib.use('Agg') -import os -import numpy as np -from tqdm import tqdm -import torch.distributed as dist - -from tasks.base_task import BaseTask -from utils.hparams import hparams -from utils.text_encoder import TokenTextEncoder -import json - -import torch -import torch.optim -import torch.utils.data -import utils - - - -class TtsTask(BaseTask): - def __init__(self, *args, **kwargs): - self.vocoder = None - self.phone_encoder = self.build_phone_encoder(hparams['binary_data_dir']) - self.padding_idx = self.phone_encoder.pad() - self.eos_idx = self.phone_encoder.eos() - self.seg_idx = self.phone_encoder.seg() - self.saving_result_pool = None - self.saving_results_futures = None - self.stats = {} - super().__init__(*args, **kwargs) - - def build_scheduler(self, optimizer): - return RSQRTSchedule(optimizer) - - def build_optimizer(self, model): - self.optimizer = optimizer = torch.optim.AdamW( - model.parameters(), - lr=hparams['lr']) - return optimizer - - def build_dataloader(self, dataset, shuffle, max_tokens=None, max_sentences=None, - required_batch_size_multiple=-1, endless=False, batch_by_size=True): - devices_cnt = torch.cuda.device_count() - if devices_cnt == 0: - devices_cnt = 1 - if required_batch_size_multiple == -1: - required_batch_size_multiple = devices_cnt - - def shuffle_batches(batches): - np.random.shuffle(batches) - return batches - - if max_tokens is not None: - max_tokens *= devices_cnt - if max_sentences is not None: - max_sentences *= devices_cnt - indices = dataset.ordered_indices() - if batch_by_size: - batch_sampler = utils.batch_by_size( - indices, dataset.num_tokens, max_tokens=max_tokens, max_sentences=max_sentences, - required_batch_size_multiple=required_batch_size_multiple, - ) - else: - batch_sampler = [] - for i in range(0, len(indices), max_sentences): - batch_sampler.append(indices[i:i + max_sentences]) - - if shuffle: - batches = shuffle_batches(list(batch_sampler)) - if endless: - batches = [b for _ in range(1000) for b in shuffle_batches(list(batch_sampler))] - else: - batches = batch_sampler - if endless: - batches = [b for _ in range(1000) for b in batches] - num_workers = dataset.num_workers - if self.trainer.use_ddp: - num_replicas = dist.get_world_size() - rank = dist.get_rank() - batches = [x[rank::num_replicas] for x in batches if len(x) % num_replicas == 0] - return torch.utils.data.DataLoader(dataset, - collate_fn=dataset.collater, - batch_sampler=batches, - num_workers=num_workers, - pin_memory=False) - - def build_phone_encoder(self, data_dir): - phone_list_file = os.path.join(data_dir, 'phone_set.json') - - phone_list = json.load(open(phone_list_file)) - return TokenTextEncoder(None, vocab_list=phone_list, replace_oov=',') - - def build_optimizer(self, model): - self.optimizer = optimizer = torch.optim.AdamW( - model.parameters(), - lr=hparams['lr']) - return optimizer - - def test_start(self): - self.saving_result_pool = Pool(8) - self.saving_results_futures = [] - self.vocoder: BaseVocoder = get_vocoder_cls(hparams)() - if hparams.get('pe_enable') is not None and hparams['pe_enable']: - self.pe = PitchExtractor().cuda() - utils.load_ckpt(self.pe, hparams['pe_ckpt'], 'model', strict=True) - self.pe.eval() - def test_end(self, outputs): - self.saving_result_pool.close() - [f.get() for f in tqdm(self.saving_results_futures)] - self.saving_result_pool.join() - return {} - - ########## - # utils - ########## - def weights_nonzero_speech(self, target): - # target : B x T x mel - # Assign weight 1.0 to all labels except for padding (id=0). - dim = target.size(-1) - return target.abs().sum(-1, keepdim=True).ne(0).float().repeat(1, 1, dim) - -if __name__ == '__main__': - TtsTask.start() diff --git a/spaces/kevinwang676/VoiceChangers/src/facerender/modules/discriminator.py b/spaces/kevinwang676/VoiceChangers/src/facerender/modules/discriminator.py deleted file mode 100644 index d4459b07cb075c9f9d345f9b3dffc02cd859313b..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/VoiceChangers/src/facerender/modules/discriminator.py +++ /dev/null @@ -1,90 +0,0 @@ -from torch import nn -import torch.nn.functional as F -from facerender.modules.util import kp2gaussian -import torch - - -class DownBlock2d(nn.Module): - """ - Simple block for processing video (encoder). - """ - - def __init__(self, in_features, out_features, norm=False, kernel_size=4, pool=False, sn=False): - super(DownBlock2d, self).__init__() - self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size) - - if sn: - self.conv = nn.utils.spectral_norm(self.conv) - - if norm: - self.norm = nn.InstanceNorm2d(out_features, affine=True) - else: - self.norm = None - self.pool = pool - - def forward(self, x): - out = x - out = self.conv(out) - if self.norm: - out = self.norm(out) - out = F.leaky_relu(out, 0.2) - if self.pool: - out = F.avg_pool2d(out, (2, 2)) - return out - - -class Discriminator(nn.Module): - """ - Discriminator similar to Pix2Pix - """ - - def __init__(self, num_channels=3, block_expansion=64, num_blocks=4, max_features=512, - sn=False, **kwargs): - super(Discriminator, self).__init__() - - down_blocks = [] - for i in range(num_blocks): - down_blocks.append( - DownBlock2d(num_channels if i == 0 else min(max_features, block_expansion * (2 ** i)), - min(max_features, block_expansion * (2 ** (i + 1))), - norm=(i != 0), kernel_size=4, pool=(i != num_blocks - 1), sn=sn)) - - self.down_blocks = nn.ModuleList(down_blocks) - self.conv = nn.Conv2d(self.down_blocks[-1].conv.out_channels, out_channels=1, kernel_size=1) - if sn: - self.conv = nn.utils.spectral_norm(self.conv) - - def forward(self, x): - feature_maps = [] - out = x - - for down_block in self.down_blocks: - feature_maps.append(down_block(out)) - out = feature_maps[-1] - prediction_map = self.conv(out) - - return feature_maps, prediction_map - - -class MultiScaleDiscriminator(nn.Module): - """ - Multi-scale (scale) discriminator - """ - - def __init__(self, scales=(), **kwargs): - super(MultiScaleDiscriminator, self).__init__() - self.scales = scales - discs = {} - for scale in scales: - discs[str(scale).replace('.', '-')] = Discriminator(**kwargs) - self.discs = nn.ModuleDict(discs) - - def forward(self, x): - out_dict = {} - for scale, disc in self.discs.items(): - scale = str(scale).replace('-', '.') - key = 'prediction_' + scale - feature_maps, prediction_map = disc(x[key]) - out_dict['feature_maps_' + scale] = feature_maps - out_dict['prediction_map_' + scale] = prediction_map - return out_dict diff --git a/spaces/kingfisher/smart-search/README.md b/spaces/kingfisher/smart-search/README.md deleted file mode 100644 index 5e5de6e111acb6ec045edcf4abd995b523a004f6..0000000000000000000000000000000000000000 --- a/spaces/kingfisher/smart-search/README.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Smart Search -emoji: 🐠 -colorFrom: yellow -colorTo: purple -sdk: streamlit -app_file: app.py -pinned: false -license: cc-by-nc-sa-4.0 ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio`, `streamlit`, or `static` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code). -Path is relative to the root of the repository. - -`models`: _List[string]_ -HF model IDs (like "gpt2" or "deepset/roberta-base-squad2") used in the Space. -Will be parsed automatically from your code if not specified here. - -`datasets`: _List[string]_ -HF dataset IDs (like "common_voice" or "oscar-corpus/OSCAR-2109") used in the Space. -Will be parsed automatically from your code if not specified here. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/kira4424/Tacotron-zero-short-voice-clone/ppg_extractor/encoder/encoder.py b/spaces/kira4424/Tacotron-zero-short-voice-clone/ppg_extractor/encoder/encoder.py deleted file mode 100644 index 6b92c0116ef1e207f7f7c94e9162cf0f5b86db7b..0000000000000000000000000000000000000000 --- a/spaces/kira4424/Tacotron-zero-short-voice-clone/ppg_extractor/encoder/encoder.py +++ /dev/null @@ -1,217 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -# Copyright 2019 Shigeki Karita -# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) - -"""Encoder definition.""" - -import logging -import torch - -from espnet.nets.pytorch_backend.conformer.convolution import ConvolutionModule -from espnet.nets.pytorch_backend.conformer.encoder_layer import EncoderLayer -from espnet.nets.pytorch_backend.nets_utils import get_activation -from espnet.nets.pytorch_backend.transducer.vgg import VGG2L -from espnet.nets.pytorch_backend.transformer.attention import ( - MultiHeadedAttention, # noqa: H301 - RelPositionMultiHeadedAttention, # noqa: H301 -) -from espnet.nets.pytorch_backend.transformer.embedding import ( - PositionalEncoding, # noqa: H301 - ScaledPositionalEncoding, # noqa: H301 - RelPositionalEncoding, # noqa: H301 -) -from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm -from espnet.nets.pytorch_backend.transformer.multi_layer_conv import Conv1dLinear -from espnet.nets.pytorch_backend.transformer.multi_layer_conv import MultiLayeredConv1d -from espnet.nets.pytorch_backend.transformer.positionwise_feed_forward import ( - PositionwiseFeedForward, # noqa: H301 -) -from espnet.nets.pytorch_backend.transformer.repeat import repeat -from espnet.nets.pytorch_backend.transformer.subsampling import Conv2dSubsampling - - -class Encoder(torch.nn.Module): - """Conformer encoder module. - - :param int idim: input dim - :param int attention_dim: dimention of attention - :param int attention_heads: the number of heads of multi head attention - :param int linear_units: the number of units of position-wise feed forward - :param int num_blocks: the number of decoder blocks - :param float dropout_rate: dropout rate - :param float attention_dropout_rate: dropout rate in attention - :param float positional_dropout_rate: dropout rate after adding positional encoding - :param str or torch.nn.Module input_layer: input layer type - :param bool normalize_before: whether to use layer_norm before the first block - :param bool concat_after: whether to concat attention layer's input and output - if True, additional linear will be applied. - i.e. x -> x + linear(concat(x, att(x))) - if False, no additional linear will be applied. i.e. x -> x + att(x) - :param str positionwise_layer_type: linear of conv1d - :param int positionwise_conv_kernel_size: kernel size of positionwise conv1d layer - :param str encoder_pos_enc_layer_type: encoder positional encoding layer type - :param str encoder_attn_layer_type: encoder attention layer type - :param str activation_type: encoder activation function type - :param bool macaron_style: whether to use macaron style for positionwise layer - :param bool use_cnn_module: whether to use convolution module - :param int cnn_module_kernel: kernerl size of convolution module - :param int padding_idx: padding_idx for input_layer=embed - """ - - def __init__( - self, - idim, - attention_dim=256, - attention_heads=4, - linear_units=2048, - num_blocks=6, - dropout_rate=0.1, - positional_dropout_rate=0.1, - attention_dropout_rate=0.0, - input_layer="conv2d", - normalize_before=True, - concat_after=False, - positionwise_layer_type="linear", - positionwise_conv_kernel_size=1, - macaron_style=False, - pos_enc_layer_type="abs_pos", - selfattention_layer_type="selfattn", - activation_type="swish", - use_cnn_module=False, - cnn_module_kernel=31, - padding_idx=-1, - ): - """Construct an Encoder object.""" - super(Encoder, self).__init__() - - activation = get_activation(activation_type) - if pos_enc_layer_type == "abs_pos": - pos_enc_class = PositionalEncoding - elif pos_enc_layer_type == "scaled_abs_pos": - pos_enc_class = ScaledPositionalEncoding - elif pos_enc_layer_type == "rel_pos": - assert selfattention_layer_type == "rel_selfattn" - pos_enc_class = RelPositionalEncoding - else: - raise ValueError("unknown pos_enc_layer: " + pos_enc_layer_type) - - if input_layer == "linear": - self.embed = torch.nn.Sequential( - torch.nn.Linear(idim, attention_dim), - torch.nn.LayerNorm(attention_dim), - torch.nn.Dropout(dropout_rate), - pos_enc_class(attention_dim, positional_dropout_rate), - ) - elif input_layer == "conv2d": - self.embed = Conv2dSubsampling( - idim, - attention_dim, - dropout_rate, - pos_enc_class(attention_dim, positional_dropout_rate), - ) - elif input_layer == "vgg2l": - self.embed = VGG2L(idim, attention_dim) - elif input_layer == "embed": - self.embed = torch.nn.Sequential( - torch.nn.Embedding(idim, attention_dim, padding_idx=padding_idx), - pos_enc_class(attention_dim, positional_dropout_rate), - ) - elif isinstance(input_layer, torch.nn.Module): - self.embed = torch.nn.Sequential( - input_layer, - pos_enc_class(attention_dim, positional_dropout_rate), - ) - elif input_layer is None: - self.embed = torch.nn.Sequential( - pos_enc_class(attention_dim, positional_dropout_rate) - ) - else: - raise ValueError("unknown input_layer: " + input_layer) - self.normalize_before = normalize_before - if positionwise_layer_type == "linear": - positionwise_layer = PositionwiseFeedForward - positionwise_layer_args = ( - attention_dim, - linear_units, - dropout_rate, - activation, - ) - elif positionwise_layer_type == "conv1d": - positionwise_layer = MultiLayeredConv1d - positionwise_layer_args = ( - attention_dim, - linear_units, - positionwise_conv_kernel_size, - dropout_rate, - ) - elif positionwise_layer_type == "conv1d-linear": - positionwise_layer = Conv1dLinear - positionwise_layer_args = ( - attention_dim, - linear_units, - positionwise_conv_kernel_size, - dropout_rate, - ) - else: - raise NotImplementedError("Support only linear or conv1d.") - - if selfattention_layer_type == "selfattn": - logging.info("encoder self-attention layer type = self-attention") - encoder_selfattn_layer = MultiHeadedAttention - encoder_selfattn_layer_args = ( - attention_heads, - attention_dim, - attention_dropout_rate, - ) - elif selfattention_layer_type == "rel_selfattn": - assert pos_enc_layer_type == "rel_pos" - encoder_selfattn_layer = RelPositionMultiHeadedAttention - encoder_selfattn_layer_args = ( - attention_heads, - attention_dim, - attention_dropout_rate, - ) - else: - raise ValueError("unknown encoder_attn_layer: " + selfattention_layer_type) - - convolution_layer = ConvolutionModule - convolution_layer_args = (attention_dim, cnn_module_kernel, activation) - - self.encoders = repeat( - num_blocks, - lambda lnum: EncoderLayer( - attention_dim, - encoder_selfattn_layer(*encoder_selfattn_layer_args), - positionwise_layer(*positionwise_layer_args), - positionwise_layer(*positionwise_layer_args) if macaron_style else None, - convolution_layer(*convolution_layer_args) if use_cnn_module else None, - dropout_rate, - normalize_before, - concat_after, - ), - ) - if self.normalize_before: - self.after_norm = LayerNorm(attention_dim) - - def forward(self, xs, masks): - """Encode input sequence. - - :param torch.Tensor xs: input tensor - :param torch.Tensor masks: input mask - :return: position embedded tensor and mask - :rtype Tuple[torch.Tensor, torch.Tensor]: - """ - if isinstance(self.embed, (Conv2dSubsampling, VGG2L)): - xs, masks = self.embed(xs, masks) - else: - xs = self.embed(xs) - - xs, masks = self.encoders(xs, masks) - if isinstance(xs, tuple): - xs = xs[0] - - if self.normalize_before: - xs = self.after_norm(xs) - return xs, masks diff --git a/spaces/kirch/Text2Video-Zero/annotator/uniformer/configs/_base_/models/ocrnet_hr18.py b/spaces/kirch/Text2Video-Zero/annotator/uniformer/configs/_base_/models/ocrnet_hr18.py deleted file mode 100644 index c60f62a7cdf3f5c5096a7a7e725e8268fddcb057..0000000000000000000000000000000000000000 --- a/spaces/kirch/Text2Video-Zero/annotator/uniformer/configs/_base_/models/ocrnet_hr18.py +++ /dev/null @@ -1,68 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='CascadeEncoderDecoder', - num_stages=2, - pretrained='open-mmlab://msra/hrnetv2_w18', - backbone=dict( - type='HRNet', - norm_cfg=norm_cfg, - norm_eval=False, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(18, 36)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(18, 36, 72)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(18, 36, 72, 144)))), - decode_head=[ - dict( - type='FCNHead', - in_channels=[18, 36, 72, 144], - channels=sum([18, 36, 72, 144]), - in_index=(0, 1, 2, 3), - input_transform='resize_concat', - kernel_size=1, - num_convs=1, - concat_input=False, - dropout_ratio=-1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - dict( - type='OCRHead', - in_channels=[18, 36, 72, 144], - in_index=(0, 1, 2, 3), - input_transform='resize_concat', - channels=512, - ocr_channels=256, - dropout_ratio=-1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - ], - # model training and testing settings - train_cfg=dict(), - test_cfg=dict(mode='whole')) diff --git a/spaces/kirch/Text2Video-Zero/annotator/uniformer/configs/_base_/models/pointrend_r50.py b/spaces/kirch/Text2Video-Zero/annotator/uniformer/configs/_base_/models/pointrend_r50.py deleted file mode 100644 index 9d323dbf9466d41e0800aa57ef84045f3d874bdf..0000000000000000000000000000000000000000 --- a/spaces/kirch/Text2Video-Zero/annotator/uniformer/configs/_base_/models/pointrend_r50.py +++ /dev/null @@ -1,56 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='CascadeEncoderDecoder', - num_stages=2, - pretrained='open-mmlab://resnet50_v1c', - backbone=dict( - type='ResNetV1c', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - dilations=(1, 1, 1, 1), - strides=(1, 2, 2, 2), - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - contract_dilation=True), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=4), - decode_head=[ - dict( - type='FPNHead', - in_channels=[256, 256, 256, 256], - in_index=[0, 1, 2, 3], - feature_strides=[4, 8, 16, 32], - channels=128, - dropout_ratio=-1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - dict( - type='PointHead', - in_channels=[256], - in_index=[0], - channels=256, - num_fcs=3, - coarse_pred_each_layer=True, - dropout_ratio=-1, - num_classes=19, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) - ], - # model training and testing settings - train_cfg=dict( - num_points=2048, oversample_ratio=3, importance_sample_ratio=0.75), - test_cfg=dict( - mode='whole', - subdivision_steps=2, - subdivision_num_points=8196, - scale_factor=2)) diff --git a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmseg/models/decode_heads/point_head.py b/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmseg/models/decode_heads/point_head.py deleted file mode 100644 index 3342aa28bb8d264b2c3d01cbf5098d145943c193..0000000000000000000000000000000000000000 --- a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmseg/models/decode_heads/point_head.py +++ /dev/null @@ -1,349 +0,0 @@ -# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend/point_head/point_head.py # noqa - -import torch -import torch.nn as nn -from annotator.uniformer.mmcv.cnn import ConvModule, normal_init -from annotator.uniformer.mmcv.ops import point_sample - -from annotator.uniformer.mmseg.models.builder import HEADS -from annotator.uniformer.mmseg.ops import resize -from ..losses import accuracy -from .cascade_decode_head import BaseCascadeDecodeHead - - -def calculate_uncertainty(seg_logits): - """Estimate uncertainty based on seg logits. - - For each location of the prediction ``seg_logits`` we estimate - uncertainty as the difference between top first and top second - predicted logits. - - Args: - seg_logits (Tensor): Semantic segmentation logits, - shape (batch_size, num_classes, height, width). - - Returns: - scores (Tensor): T uncertainty scores with the most uncertain - locations having the highest uncertainty score, shape ( - batch_size, 1, height, width) - """ - top2_scores = torch.topk(seg_logits, k=2, dim=1)[0] - return (top2_scores[:, 1] - top2_scores[:, 0]).unsqueeze(1) - - -@HEADS.register_module() -class PointHead(BaseCascadeDecodeHead): - """A mask point head use in PointRend. - - ``PointHead`` use shared multi-layer perceptron (equivalent to - nn.Conv1d) to predict the logit of input points. The fine-grained feature - and coarse feature will be concatenate together for predication. - - Args: - num_fcs (int): Number of fc layers in the head. Default: 3. - in_channels (int): Number of input channels. Default: 256. - fc_channels (int): Number of fc channels. Default: 256. - num_classes (int): Number of classes for logits. Default: 80. - class_agnostic (bool): Whether use class agnostic classification. - If so, the output channels of logits will be 1. Default: False. - coarse_pred_each_layer (bool): Whether concatenate coarse feature with - the output of each fc layer. Default: True. - conv_cfg (dict|None): Dictionary to construct and config conv layer. - Default: dict(type='Conv1d')) - norm_cfg (dict|None): Dictionary to construct and config norm layer. - Default: None. - loss_point (dict): Dictionary to construct and config loss layer of - point head. Default: dict(type='CrossEntropyLoss', use_mask=True, - loss_weight=1.0). - """ - - def __init__(self, - num_fcs=3, - coarse_pred_each_layer=True, - conv_cfg=dict(type='Conv1d'), - norm_cfg=None, - act_cfg=dict(type='ReLU', inplace=False), - **kwargs): - super(PointHead, self).__init__( - input_transform='multiple_select', - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg, - **kwargs) - - self.num_fcs = num_fcs - self.coarse_pred_each_layer = coarse_pred_each_layer - - fc_in_channels = sum(self.in_channels) + self.num_classes - fc_channels = self.channels - self.fcs = nn.ModuleList() - for k in range(num_fcs): - fc = ConvModule( - fc_in_channels, - fc_channels, - kernel_size=1, - stride=1, - padding=0, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - self.fcs.append(fc) - fc_in_channels = fc_channels - fc_in_channels += self.num_classes if self.coarse_pred_each_layer \ - else 0 - self.fc_seg = nn.Conv1d( - fc_in_channels, - self.num_classes, - kernel_size=1, - stride=1, - padding=0) - if self.dropout_ratio > 0: - self.dropout = nn.Dropout(self.dropout_ratio) - delattr(self, 'conv_seg') - - def init_weights(self): - """Initialize weights of classification layer.""" - normal_init(self.fc_seg, std=0.001) - - def cls_seg(self, feat): - """Classify each pixel with fc.""" - if self.dropout is not None: - feat = self.dropout(feat) - output = self.fc_seg(feat) - return output - - def forward(self, fine_grained_point_feats, coarse_point_feats): - x = torch.cat([fine_grained_point_feats, coarse_point_feats], dim=1) - for fc in self.fcs: - x = fc(x) - if self.coarse_pred_each_layer: - x = torch.cat((x, coarse_point_feats), dim=1) - return self.cls_seg(x) - - def _get_fine_grained_point_feats(self, x, points): - """Sample from fine grained features. - - Args: - x (list[Tensor]): Feature pyramid from by neck or backbone. - points (Tensor): Point coordinates, shape (batch_size, - num_points, 2). - - Returns: - fine_grained_feats (Tensor): Sampled fine grained feature, - shape (batch_size, sum(channels of x), num_points). - """ - - fine_grained_feats_list = [ - point_sample(_, points, align_corners=self.align_corners) - for _ in x - ] - if len(fine_grained_feats_list) > 1: - fine_grained_feats = torch.cat(fine_grained_feats_list, dim=1) - else: - fine_grained_feats = fine_grained_feats_list[0] - - return fine_grained_feats - - def _get_coarse_point_feats(self, prev_output, points): - """Sample from fine grained features. - - Args: - prev_output (list[Tensor]): Prediction of previous decode head. - points (Tensor): Point coordinates, shape (batch_size, - num_points, 2). - - Returns: - coarse_feats (Tensor): Sampled coarse feature, shape (batch_size, - num_classes, num_points). - """ - - coarse_feats = point_sample( - prev_output, points, align_corners=self.align_corners) - - return coarse_feats - - def forward_train(self, inputs, prev_output, img_metas, gt_semantic_seg, - train_cfg): - """Forward function for training. - Args: - inputs (list[Tensor]): List of multi-level img features. - prev_output (Tensor): The output of previous decode head. - img_metas (list[dict]): List of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmseg/datasets/pipelines/formatting.py:Collect`. - gt_semantic_seg (Tensor): Semantic segmentation masks - used if the architecture supports semantic segmentation task. - train_cfg (dict): The training config. - - Returns: - dict[str, Tensor]: a dictionary of loss components - """ - x = self._transform_inputs(inputs) - with torch.no_grad(): - points = self.get_points_train( - prev_output, calculate_uncertainty, cfg=train_cfg) - fine_grained_point_feats = self._get_fine_grained_point_feats( - x, points) - coarse_point_feats = self._get_coarse_point_feats(prev_output, points) - point_logits = self.forward(fine_grained_point_feats, - coarse_point_feats) - point_label = point_sample( - gt_semantic_seg.float(), - points, - mode='nearest', - align_corners=self.align_corners) - point_label = point_label.squeeze(1).long() - - losses = self.losses(point_logits, point_label) - - return losses - - def forward_test(self, inputs, prev_output, img_metas, test_cfg): - """Forward function for testing. - - Args: - inputs (list[Tensor]): List of multi-level img features. - prev_output (Tensor): The output of previous decode head. - img_metas (list[dict]): List of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmseg/datasets/pipelines/formatting.py:Collect`. - test_cfg (dict): The testing config. - - Returns: - Tensor: Output segmentation map. - """ - - x = self._transform_inputs(inputs) - refined_seg_logits = prev_output.clone() - for _ in range(test_cfg.subdivision_steps): - refined_seg_logits = resize( - refined_seg_logits, - scale_factor=test_cfg.scale_factor, - mode='bilinear', - align_corners=self.align_corners) - batch_size, channels, height, width = refined_seg_logits.shape - point_indices, points = self.get_points_test( - refined_seg_logits, calculate_uncertainty, cfg=test_cfg) - fine_grained_point_feats = self._get_fine_grained_point_feats( - x, points) - coarse_point_feats = self._get_coarse_point_feats( - prev_output, points) - point_logits = self.forward(fine_grained_point_feats, - coarse_point_feats) - - point_indices = point_indices.unsqueeze(1).expand(-1, channels, -1) - refined_seg_logits = refined_seg_logits.reshape( - batch_size, channels, height * width) - refined_seg_logits = refined_seg_logits.scatter_( - 2, point_indices, point_logits) - refined_seg_logits = refined_seg_logits.view( - batch_size, channels, height, width) - - return refined_seg_logits - - def losses(self, point_logits, point_label): - """Compute segmentation loss.""" - loss = dict() - loss['loss_point'] = self.loss_decode( - point_logits, point_label, ignore_index=self.ignore_index) - loss['acc_point'] = accuracy(point_logits, point_label) - return loss - - def get_points_train(self, seg_logits, uncertainty_func, cfg): - """Sample points for training. - - Sample points in [0, 1] x [0, 1] coordinate space based on their - uncertainty. The uncertainties are calculated for each point using - 'uncertainty_func' function that takes point's logit prediction as - input. - - Args: - seg_logits (Tensor): Semantic segmentation logits, shape ( - batch_size, num_classes, height, width). - uncertainty_func (func): uncertainty calculation function. - cfg (dict): Training config of point head. - - Returns: - point_coords (Tensor): A tensor of shape (batch_size, num_points, - 2) that contains the coordinates of ``num_points`` sampled - points. - """ - num_points = cfg.num_points - oversample_ratio = cfg.oversample_ratio - importance_sample_ratio = cfg.importance_sample_ratio - assert oversample_ratio >= 1 - assert 0 <= importance_sample_ratio <= 1 - batch_size = seg_logits.shape[0] - num_sampled = int(num_points * oversample_ratio) - point_coords = torch.rand( - batch_size, num_sampled, 2, device=seg_logits.device) - point_logits = point_sample(seg_logits, point_coords) - # It is crucial to calculate uncertainty based on the sampled - # prediction value for the points. Calculating uncertainties of the - # coarse predictions first and sampling them for points leads to - # incorrect results. To illustrate this: assume uncertainty func( - # logits)=-abs(logits), a sampled point between two coarse - # predictions with -1 and 1 logits has 0 logits, and therefore 0 - # uncertainty value. However, if we calculate uncertainties for the - # coarse predictions first, both will have -1 uncertainty, - # and sampled point will get -1 uncertainty. - point_uncertainties = uncertainty_func(point_logits) - num_uncertain_points = int(importance_sample_ratio * num_points) - num_random_points = num_points - num_uncertain_points - idx = torch.topk( - point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1] - shift = num_sampled * torch.arange( - batch_size, dtype=torch.long, device=seg_logits.device) - idx += shift[:, None] - point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view( - batch_size, num_uncertain_points, 2) - if num_random_points > 0: - rand_point_coords = torch.rand( - batch_size, num_random_points, 2, device=seg_logits.device) - point_coords = torch.cat((point_coords, rand_point_coords), dim=1) - return point_coords - - def get_points_test(self, seg_logits, uncertainty_func, cfg): - """Sample points for testing. - - Find ``num_points`` most uncertain points from ``uncertainty_map``. - - Args: - seg_logits (Tensor): A tensor of shape (batch_size, num_classes, - height, width) for class-specific or class-agnostic prediction. - uncertainty_func (func): uncertainty calculation function. - cfg (dict): Testing config of point head. - - Returns: - point_indices (Tensor): A tensor of shape (batch_size, num_points) - that contains indices from [0, height x width) of the most - uncertain points. - point_coords (Tensor): A tensor of shape (batch_size, num_points, - 2) that contains [0, 1] x [0, 1] normalized coordinates of the - most uncertain points from the ``height x width`` grid . - """ - - num_points = cfg.subdivision_num_points - uncertainty_map = uncertainty_func(seg_logits) - batch_size, _, height, width = uncertainty_map.shape - h_step = 1.0 / height - w_step = 1.0 / width - - uncertainty_map = uncertainty_map.view(batch_size, height * width) - num_points = min(height * width, num_points) - point_indices = uncertainty_map.topk(num_points, dim=1)[1] - point_coords = torch.zeros( - batch_size, - num_points, - 2, - dtype=torch.float, - device=seg_logits.device) - point_coords[:, :, 0] = w_step / 2.0 + (point_indices % - width).float() * w_step - point_coords[:, :, 1] = h_step / 2.0 + (point_indices // - width).float() * h_step - return point_indices, point_coords diff --git a/spaces/kukuhtw/AutoGPT/tests/unit/test_browse_scrape_links.py b/spaces/kukuhtw/AutoGPT/tests/unit/test_browse_scrape_links.py deleted file mode 100644 index 0a3340e7397a997da96b8ab9828954230e1a3c20..0000000000000000000000000000000000000000 --- a/spaces/kukuhtw/AutoGPT/tests/unit/test_browse_scrape_links.py +++ /dev/null @@ -1,118 +0,0 @@ -# Generated by CodiumAI - -# Dependencies: -# pip install pytest-mock -import pytest - -from autogpt.commands.web_requests import scrape_links - -""" -Code Analysis - -Objective: -The objective of the 'scrape_links' function is to scrape hyperlinks from a -given URL and return them in a formatted way. - -Inputs: -- url: a string representing the URL to be scraped. - -Flow: -1. Send a GET request to the given URL using the requests library and the user agent header from the config file. -2. Check if the response contains an HTTP error. If it does, return "error". -3. Parse the HTML content of the response using the BeautifulSoup library. -4. Remove any script and style tags from the parsed HTML. -5. Extract all hyperlinks from the parsed HTML using the 'extract_hyperlinks' function. -6. Format the extracted hyperlinks using the 'format_hyperlinks' function. -7. Return the formatted hyperlinks. - -Outputs: -- A list of formatted hyperlinks. - -Additional aspects: -- The function uses the 'requests' and 'BeautifulSoup' libraries to send HTTP -requests and parse HTML content, respectively. -- The 'extract_hyperlinks' function is called to extract hyperlinks from the parsed HTML. -- The 'format_hyperlinks' function is called to format the extracted hyperlinks. -- The function checks for HTTP errors and returns "error" if any are found. -""" - - -class TestScrapeLinks: - # Tests that the function returns a list of formatted hyperlinks when - # provided with a valid url that returns a webpage with hyperlinks. - def test_valid_url_with_hyperlinks(self): - url = "https://www.google.com" - result = scrape_links(url) - assert len(result) > 0 - assert isinstance(result, list) - assert isinstance(result[0], str) - - # Tests that the function returns correctly formatted hyperlinks when given a valid url. - def test_valid_url(self, mocker): - # Mock the requests.get() function to return a response with sample HTML containing hyperlinks - mock_response = mocker.Mock() - mock_response.status_code = 200 - mock_response.text = ( - "Google" - ) - mocker.patch("requests.Session.get", return_value=mock_response) - - # Call the function with a valid URL - result = scrape_links("https://www.example.com") - - # Assert that the function returns correctly formatted hyperlinks - assert result == ["Google (https://www.google.com)"] - - # Tests that the function returns "error" when given an invalid url. - def test_invalid_url(self, mocker): - # Mock the requests.get() function to return an HTTP error response - mock_response = mocker.Mock() - mock_response.status_code = 404 - mocker.patch("requests.Session.get", return_value=mock_response) - - # Call the function with an invalid URL - result = scrape_links("https://www.invalidurl.com") - - # Assert that the function returns "error" - assert "Error:" in result - - # Tests that the function returns an empty list when the html contains no hyperlinks. - def test_no_hyperlinks(self, mocker): - # Mock the requests.get() function to return a response with sample HTML containing no hyperlinks - mock_response = mocker.Mock() - mock_response.status_code = 200 - mock_response.text = "

                    No hyperlinks here

                    " - mocker.patch("requests.Session.get", return_value=mock_response) - - # Call the function with a URL containing no hyperlinks - result = scrape_links("https://www.example.com") - - # Assert that the function returns an empty list - assert result == [] - - # Tests that scrape_links() correctly extracts and formats hyperlinks from - # a sample HTML containing a few hyperlinks. - def test_scrape_links_with_few_hyperlinks(self, mocker): - # Mock the requests.get() function to return a response with a sample HTML containing hyperlinks - mock_response = mocker.Mock() - mock_response.status_code = 200 - mock_response.text = """ - - - - - - - - """ - mocker.patch("requests.Session.get", return_value=mock_response) - - # Call the function being tested - result = scrape_links("https://www.example.com") - - # Assert that the function returns a list of formatted hyperlinks - assert isinstance(result, list) - assert len(result) == 3 - assert result[0] == "Google (https://www.google.com)" - assert result[1] == "GitHub (https://github.com)" - assert result[2] == "CodiumAI (https://www.codium.ai)" diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/F__e_a_t.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/F__e_a_t.py deleted file mode 100644 index fbcd6ca6e7bc0640263ddab74e1e1c89ea61bbfb..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/F__e_a_t.py +++ /dev/null @@ -1,144 +0,0 @@ -from fontTools.misc import sstruct -from fontTools.misc.fixedTools import floatToFixedToStr -from fontTools.misc.textTools import safeEval -from . import DefaultTable -from . import grUtils -import struct - -Feat_hdr_format = """ - > - version: 16.16F -""" - - -class table_F__e_a_t(DefaultTable.DefaultTable): - """The ``Feat`` table is used exclusively by the Graphite shaping engine - to store features and possible settings specified in GDL. Graphite features - determine what rules are applied to transform a glyph stream. - - Not to be confused with ``feat``, or the OpenType Layout tables - ``GSUB``/``GPOS``.""" - - def __init__(self, tag=None): - DefaultTable.DefaultTable.__init__(self, tag) - self.features = {} - - def decompile(self, data, ttFont): - (_, data) = sstruct.unpack2(Feat_hdr_format, data, self) - self.version = float(floatToFixedToStr(self.version, precisionBits=16)) - (numFeats,) = struct.unpack(">H", data[:2]) - data = data[8:] - allfeats = [] - maxsetting = 0 - for i in range(numFeats): - if self.version >= 2.0: - (fid, nums, _, offset, flags, lid) = struct.unpack( - ">LHHLHH", data[16 * i : 16 * (i + 1)] - ) - offset = int((offset - 12 - 16 * numFeats) / 4) - else: - (fid, nums, offset, flags, lid) = struct.unpack( - ">HHLHH", data[12 * i : 12 * (i + 1)] - ) - offset = int((offset - 12 - 12 * numFeats) / 4) - allfeats.append((fid, nums, offset, flags, lid)) - maxsetting = max(maxsetting, offset + nums) - data = data[16 * numFeats :] - allsettings = [] - for i in range(maxsetting): - if len(data) >= 4 * (i + 1): - (val, lid) = struct.unpack(">HH", data[4 * i : 4 * (i + 1)]) - allsettings.append((val, lid)) - for i, f in enumerate(allfeats): - (fid, nums, offset, flags, lid) = f - fobj = Feature() - fobj.flags = flags - fobj.label = lid - self.features[grUtils.num2tag(fid)] = fobj - fobj.settings = {} - fobj.default = None - fobj.index = i - for i in range(offset, offset + nums): - if i >= len(allsettings): - continue - (vid, vlid) = allsettings[i] - fobj.settings[vid] = vlid - if fobj.default is None: - fobj.default = vid - - def compile(self, ttFont): - fdat = b"" - vdat = b"" - offset = 0 - for f, v in sorted(self.features.items(), key=lambda x: x[1].index): - fnum = grUtils.tag2num(f) - if self.version >= 2.0: - fdat += struct.pack( - ">LHHLHH", - grUtils.tag2num(f), - len(v.settings), - 0, - offset * 4 + 12 + 16 * len(self.features), - v.flags, - v.label, - ) - elif fnum > 65535: # self healing for alphabetic ids - self.version = 2.0 - return self.compile(ttFont) - else: - fdat += struct.pack( - ">HHLHH", - grUtils.tag2num(f), - len(v.settings), - offset * 4 + 12 + 12 * len(self.features), - v.flags, - v.label, - ) - for s, l in sorted( - v.settings.items(), key=lambda x: (-1, x[1]) if x[0] == v.default else x - ): - vdat += struct.pack(">HH", s, l) - offset += len(v.settings) - hdr = sstruct.pack(Feat_hdr_format, self) - return hdr + struct.pack(">HHL", len(self.features), 0, 0) + fdat + vdat - - def toXML(self, writer, ttFont): - writer.simpletag("version", version=self.version) - writer.newline() - for f, v in sorted(self.features.items(), key=lambda x: x[1].index): - writer.begintag( - "feature", - fid=f, - label=v.label, - flags=v.flags, - default=(v.default if v.default else 0), - ) - writer.newline() - for s, l in sorted(v.settings.items()): - writer.simpletag("setting", value=s, label=l) - writer.newline() - writer.endtag("feature") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if name == "version": - self.version = float(safeEval(attrs["version"])) - elif name == "feature": - fid = attrs["fid"] - fobj = Feature() - fobj.flags = int(safeEval(attrs["flags"])) - fobj.label = int(safeEval(attrs["label"])) - fobj.default = int(safeEval(attrs.get("default", "0"))) - fobj.index = len(self.features) - self.features[fid] = fobj - fobj.settings = {} - for element in content: - if not isinstance(element, tuple): - continue - tag, a, c = element - if tag == "setting": - fobj.settings[int(safeEval(a["value"]))] = int(safeEval(a["label"])) - - -class Feature(object): - pass diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-329f8260.css b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-329f8260.css deleted file mode 100644 index 3b53ee465e192f512a964e9050e9aab81384add8..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-329f8260.css +++ /dev/null @@ -1 +0,0 @@ -.min.svelte-1ybaih5{min-height:var(--size-24)}.hide.svelte-1ybaih5{display:none}div.svelte-1ed2p3z{transition:.15s}.pending.svelte-1ed2p3z{opacity:.2} diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/test_data/blocks_configs.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/test_data/blocks_configs.py deleted file mode 100644 index cdcad71479b229ee80503a3d6421f5f4b616467a..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/test_data/blocks_configs.py +++ /dev/null @@ -1,814 +0,0 @@ -XRAY_CONFIG = { - "version": "3.26.0\n", - "mode": "blocks", - "dev_mode": True, - "analytics_enabled": False, - "components": [ - { - "id": 6, - "type": "markdown", - "props": { - "value": "

                    Detect Disease From Scan

                    \n

                    With this model you can lorem ipsum

                    \n
                      \n
                    • ipsum 1
                    • \n
                    • ipsum 2
                    • \n
                    \n", - "name": "markdown", - "visible": True, - "style": {}, - }, - "serializer": "StringSerializable", - "api_info": {"info": {"type": "string"}, "serialized_info": False}, - "example_inputs": {"raw": "Howdy!", "serialized": "Howdy!"}, - }, - { - "id": 7, - "type": "checkboxgroup", - "props": { - "choices": ["Covid", "Malaria", "Lung Cancer"], - "value": [], - "label": "Disease to Scan For", - "show_label": True, - "name": "checkboxgroup", - "visible": True, - "style": {}, - }, - "serializer": "ListStringSerializable", - "api_info": { - "info": {"type": "array", "items": {"type": "string"}}, - "serialized_info": False, - }, - "example_inputs": {"raw": "Covid", "serialized": "Covid"}, - }, - {"id": 8, "type": "tabs", "props": {"visible": True, "style": {}}}, - { - "id": 9, - "type": "tabitem", - "props": {"label": "X-ray", "visible": True, "style": {}}, - }, - { - "id": 10, - "type": "row", - "props": { - "type": "row", - "variant": "default", - "visible": True, - "style": {}, - }, - }, - { - "id": 11, - "type": "image", - "props": { - "image_mode": "RGB", - "source": "upload", - "tool": "editor", - "streaming": False, - "mirror_webcam": True, - "selectable": False, - "show_label": True, - "name": "image", - "visible": True, - "style": {}, - }, - "serializer": "ImgSerializable", - "api_info": { - "info": { - "type": "string", - "description": "base64 representation of an image", - }, - "serialized_info": True, - }, - "example_inputs": { - "raw": "data:image/png;base64,R0lGODlhPQBEAPeoAJosM//AwO/AwHVYZ/z595kzAP/s7P+goOXMv8+fhw/v739/f+8PD98fH/8mJl+fn/9ZWb8/PzWlwv///6wWGbImAPgTEMImIN9gUFCEm/gDALULDN8PAD6atYdCTX9gUNKlj8wZAKUsAOzZz+UMAOsJAP/Z2ccMDA8PD/95eX5NWvsJCOVNQPtfX/8zM8+QePLl38MGBr8JCP+zs9myn/8GBqwpAP/GxgwJCPny78lzYLgjAJ8vAP9fX/+MjMUcAN8zM/9wcM8ZGcATEL+QePdZWf/29uc/P9cmJu9MTDImIN+/r7+/vz8/P8VNQGNugV8AAF9fX8swMNgTAFlDOICAgPNSUnNWSMQ5MBAQEJE3QPIGAM9AQMqGcG9vb6MhJsEdGM8vLx8fH98AANIWAMuQeL8fABkTEPPQ0OM5OSYdGFl5jo+Pj/+pqcsTE78wMFNGQLYmID4dGPvd3UBAQJmTkP+8vH9QUK+vr8ZWSHpzcJMmILdwcLOGcHRQUHxwcK9PT9DQ0O/v70w5MLypoG8wKOuwsP/g4P/Q0IcwKEswKMl8aJ9fX2xjdOtGRs/Pz+Dg4GImIP8gIH0sKEAwKKmTiKZ8aB/f39Wsl+LFt8dgUE9PT5x5aHBwcP+AgP+WltdgYMyZfyywz78AAAAAAAD///8AAP9mZv///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAEAAKgALAAAAAA9AEQAAAj/AFEJHEiwoMGDCBMqXMiwocAbBww4nEhxoYkUpzJGrMixogkfGUNqlNixJEIDB0SqHGmyJSojM1bKZOmyop0gM3Oe2liTISKMOoPy7GnwY9CjIYcSRYm0aVKSLmE6nfq05QycVLPuhDrxBlCtYJUqNAq2bNWEBj6ZXRuyxZyDRtqwnXvkhACDV+euTeJm1Ki7A73qNWtFiF+/gA95Gly2CJLDhwEHMOUAAuOpLYDEgBxZ4GRTlC1fDnpkM+fOqD6DDj1aZpITp0dtGCDhr+fVuCu3zlg49ijaokTZTo27uG7Gjn2P+hI8+PDPERoUB318bWbfAJ5sUNFcuGRTYUqV/3ogfXp1rWlMc6awJjiAAd2fm4ogXjz56aypOoIde4OE5u/F9x199dlXnnGiHZWEYbGpsAEA3QXYnHwEFliKAgswgJ8LPeiUXGwedCAKABACCN+EA1pYIIYaFlcDhytd51sGAJbo3onOpajiihlO92KHGaUXGwWjUBChjSPiWJuOO/LYIm4v1tXfE6J4gCSJEZ7YgRYUNrkji9P55sF/ogxw5ZkSqIDaZBV6aSGYq/lGZplndkckZ98xoICbTcIJGQAZcNmdmUc210hs35nCyJ58fgmIKX5RQGOZowxaZwYA+JaoKQwswGijBV4C6SiTUmpphMspJx9unX4KaimjDv9aaXOEBteBqmuuxgEHoLX6Kqx+yXqqBANsgCtit4FWQAEkrNbpq7HSOmtwag5w57GrmlJBASEU18ADjUYb3ADTinIttsgSB1oJFfA63bduimuqKB1keqwUhoCSK374wbujvOSu4QG6UvxBRydcpKsav++Ca6G8A6Pr1x2kVMyHwsVxUALDq/krnrhPSOzXG1lUTIoffqGR7Goi2MAxbv6O2kEG56I7CSlRsEFKFVyovDJoIRTg7sugNRDGqCJzJgcKE0ywc0ELm6KBCCJo8DIPFeCWNGcyqNFE06ToAfV0HBRgxsvLThHn1oddQMrXj5DyAQgjEHSAJMWZwS3HPxT/QMbabI/iBCliMLEJKX2EEkomBAUCxRi42VDADxyTYDVogV+wSChqmKxEKCDAYFDFj4OmwbY7bDGdBhtrnTQYOigeChUmc1K3QTnAUfEgGFgAWt88hKA6aCRIXhxnQ1yg3BCayK44EWdkUQcBByEQChFXfCB776aQsG0BIlQgQgE8qO26X1h8cEUep8ngRBnOy74E9QgRgEAC8SvOfQkh7FDBDmS43PmGoIiKUUEGkMEC/PJHgxw0xH74yx/3XnaYRJgMB8obxQW6kL9QYEJ0FIFgByfIL7/IQAlvQwEpnAC7DtLNJCKUoO/w45c44GwCXiAFB/OXAATQryUxdN4LfFiwgjCNYg+kYMIEFkCKDs6PKAIJouyGWMS1FSKJOMRB/BoIxYJIUXFUxNwoIkEKPAgCBZSQHQ1A2EWDfDEUVLyADj5AChSIQW6gu10bE/JG2VnCZGfo4R4d0sdQoBAHhPjhIB94v/wRoRKQWGRHgrhGSQJxCS+0pCZbEhAAOw==", - "serialized": "https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png", - }, - }, - { - "id": 12, - "type": "json", - "props": {"show_label": True, "name": "json", "visible": True, "style": {}}, - "serializer": "JSONSerializable", - "api_info": { - "info": {"type": {}, "description": "any valid json"}, - "serialized_info": True, - }, - "example_inputs": {"raw": {"a": 1, "b": 2}, "serialized": None}, - }, - { - "id": 13, - "type": "button", - "props": { - "value": "Run", - "variant": "secondary", - "interactive": True, - "name": "button", - "visible": True, - "style": {}, - }, - "serializer": "StringSerializable", - "api_info": {"info": {"type": "string"}, "serialized_info": False}, - "example_inputs": {"raw": "Howdy!", "serialized": "Howdy!"}, - }, - { - "id": 14, - "type": "tabitem", - "props": {"label": "CT Scan", "visible": True, "style": {}}, - }, - { - "id": 15, - "type": "row", - "props": { - "type": "row", - "variant": "default", - "visible": True, - "style": {}, - }, - }, - { - "id": 16, - "type": "image", - "props": { - "image_mode": "RGB", - "source": "upload", - "tool": "editor", - "streaming": False, - "mirror_webcam": True, - "selectable": False, - "show_label": True, - "name": "image", - "visible": True, - "style": {}, - }, - "serializer": "ImgSerializable", - "api_info": { - "info": { - "type": "string", - "description": "base64 representation of an image", - }, - "serialized_info": True, - }, - "example_inputs": { - "raw": "data:image/png;base64,R0lGODlhPQBEAPeoAJosM//AwO/AwHVYZ/z595kzAP/s7P+goOXMv8+fhw/v739/f+8PD98fH/8mJl+fn/9ZWb8/PzWlwv///6wWGbImAPgTEMImIN9gUFCEm/gDALULDN8PAD6atYdCTX9gUNKlj8wZAKUsAOzZz+UMAOsJAP/Z2ccMDA8PD/95eX5NWvsJCOVNQPtfX/8zM8+QePLl38MGBr8JCP+zs9myn/8GBqwpAP/GxgwJCPny78lzYLgjAJ8vAP9fX/+MjMUcAN8zM/9wcM8ZGcATEL+QePdZWf/29uc/P9cmJu9MTDImIN+/r7+/vz8/P8VNQGNugV8AAF9fX8swMNgTAFlDOICAgPNSUnNWSMQ5MBAQEJE3QPIGAM9AQMqGcG9vb6MhJsEdGM8vLx8fH98AANIWAMuQeL8fABkTEPPQ0OM5OSYdGFl5jo+Pj/+pqcsTE78wMFNGQLYmID4dGPvd3UBAQJmTkP+8vH9QUK+vr8ZWSHpzcJMmILdwcLOGcHRQUHxwcK9PT9DQ0O/v70w5MLypoG8wKOuwsP/g4P/Q0IcwKEswKMl8aJ9fX2xjdOtGRs/Pz+Dg4GImIP8gIH0sKEAwKKmTiKZ8aB/f39Wsl+LFt8dgUE9PT5x5aHBwcP+AgP+WltdgYMyZfyywz78AAAAAAAD///8AAP9mZv///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAEAAKgALAAAAAA9AEQAAAj/AFEJHEiwoMGDCBMqXMiwocAbBww4nEhxoYkUpzJGrMixogkfGUNqlNixJEIDB0SqHGmyJSojM1bKZOmyop0gM3Oe2liTISKMOoPy7GnwY9CjIYcSRYm0aVKSLmE6nfq05QycVLPuhDrxBlCtYJUqNAq2bNWEBj6ZXRuyxZyDRtqwnXvkhACDV+euTeJm1Ki7A73qNWtFiF+/gA95Gly2CJLDhwEHMOUAAuOpLYDEgBxZ4GRTlC1fDnpkM+fOqD6DDj1aZpITp0dtGCDhr+fVuCu3zlg49ijaokTZTo27uG7Gjn2P+hI8+PDPERoUB318bWbfAJ5sUNFcuGRTYUqV/3ogfXp1rWlMc6awJjiAAd2fm4ogXjz56aypOoIde4OE5u/F9x199dlXnnGiHZWEYbGpsAEA3QXYnHwEFliKAgswgJ8LPeiUXGwedCAKABACCN+EA1pYIIYaFlcDhytd51sGAJbo3onOpajiihlO92KHGaUXGwWjUBChjSPiWJuOO/LYIm4v1tXfE6J4gCSJEZ7YgRYUNrkji9P55sF/ogxw5ZkSqIDaZBV6aSGYq/lGZplndkckZ98xoICbTcIJGQAZcNmdmUc210hs35nCyJ58fgmIKX5RQGOZowxaZwYA+JaoKQwswGijBV4C6SiTUmpphMspJx9unX4KaimjDv9aaXOEBteBqmuuxgEHoLX6Kqx+yXqqBANsgCtit4FWQAEkrNbpq7HSOmtwag5w57GrmlJBASEU18ADjUYb3ADTinIttsgSB1oJFfA63bduimuqKB1keqwUhoCSK374wbujvOSu4QG6UvxBRydcpKsav++Ca6G8A6Pr1x2kVMyHwsVxUALDq/krnrhPSOzXG1lUTIoffqGR7Goi2MAxbv6O2kEG56I7CSlRsEFKFVyovDJoIRTg7sugNRDGqCJzJgcKE0ywc0ELm6KBCCJo8DIPFeCWNGcyqNFE06ToAfV0HBRgxsvLThHn1oddQMrXj5DyAQgjEHSAJMWZwS3HPxT/QMbabI/iBCliMLEJKX2EEkomBAUCxRi42VDADxyTYDVogV+wSChqmKxEKCDAYFDFj4OmwbY7bDGdBhtrnTQYOigeChUmc1K3QTnAUfEgGFgAWt88hKA6aCRIXhxnQ1yg3BCayK44EWdkUQcBByEQChFXfCB776aQsG0BIlQgQgE8qO26X1h8cEUep8ngRBnOy74E9QgRgEAC8SvOfQkh7FDBDmS43PmGoIiKUUEGkMEC/PJHgxw0xH74yx/3XnaYRJgMB8obxQW6kL9QYEJ0FIFgByfIL7/IQAlvQwEpnAC7DtLNJCKUoO/w45c44GwCXiAFB/OXAATQryUxdN4LfFiwgjCNYg+kYMIEFkCKDs6PKAIJouyGWMS1FSKJOMRB/BoIxYJIUXFUxNwoIkEKPAgCBZSQHQ1A2EWDfDEUVLyADj5AChSIQW6gu10bE/JG2VnCZGfo4R4d0sdQoBAHhPjhIB94v/wRoRKQWGRHgrhGSQJxCS+0pCZbEhAAOw==", - "serialized": "https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png", - }, - }, - { - "id": 17, - "type": "json", - "props": {"show_label": True, "name": "json", "visible": True, "style": {}}, - "serializer": "JSONSerializable", - "api_info": { - "info": {"type": {}, "description": "any valid json"}, - "serialized_info": True, - }, - "example_inputs": {"raw": {"a": 1, "b": 2}, "serialized": None}, - }, - { - "id": 18, - "type": "button", - "props": { - "value": "Run", - "variant": "secondary", - "interactive": True, - "name": "button", - "visible": True, - "style": {}, - }, - "serializer": "StringSerializable", - "api_info": {"info": {"type": "string"}, "serialized_info": False}, - "example_inputs": {"raw": "Howdy!", "serialized": "Howdy!"}, - }, - { - "id": 19, - "type": "textbox", - "props": { - "lines": 1, - "max_lines": 20, - "value": "", - "type": "text", - "show_label": True, - "name": "textbox", - "visible": True, - "style": {}, - }, - "serializer": "StringSerializable", - "api_info": {"info": {"type": "string"}, "serialized_info": False}, - "example_inputs": {"raw": "Howdy!", "serialized": "Howdy!"}, - }, - { - "id": 20, - "type": "form", - "props": {"type": "form", "visible": True, "style": {}}, - }, - { - "id": 21, - "type": "form", - "props": {"type": "form", "visible": True, "style": {}}, - }, - ], - "css": None, - "title": "Gradio", - "is_space": False, - "enable_queue": None, - "show_error": True, - "show_api": True, - "is_colab": False, - "stylesheets": [ - "https://fonts.googleapis.com/css2?family=Source+Sans+Pro:wght@400;600&display=swap", - "https://fonts.googleapis.com/css2?family=IBM+Plex+Mono:wght@400;600&display=swap", - ], - "theme": "default", - "layout": { - "id": 5, - "children": [ - {"id": 6}, - {"id": 20, "children": [{"id": 7}]}, - { - "id": 8, - "children": [ - { - "id": 9, - "children": [ - {"id": 10, "children": [{"id": 11}, {"id": 12}]}, - {"id": 13}, - ], - }, - { - "id": 14, - "children": [ - {"id": 15, "children": [{"id": 16}, {"id": 17}]}, - {"id": 18}, - ], - }, - ], - }, - {"id": 21, "children": [{"id": 19}]}, - ], - }, - "dependencies": [ - { - "targets": [13], - "trigger": "click", - "inputs": [7, 11], - "outputs": [12], - "backend_fn": True, - "js": None, - "queue": None, - "api_name": None, - "scroll_to_output": False, - "show_progress": True, - "every": None, - "batch": False, - "max_batch_size": 4, - "cancels": [], - "types": {"continuous": False, "generator": False}, - "collects_event_data": False, - "trigger_after": None, - "trigger_only_on_success": False, - }, - { - "targets": [18], - "trigger": "click", - "inputs": [7, 16], - "outputs": [17], - "backend_fn": True, - "js": None, - "queue": None, - "api_name": None, - "scroll_to_output": False, - "show_progress": True, - "every": None, - "batch": False, - "max_batch_size": 4, - "cancels": [], - "types": {"continuous": False, "generator": False}, - "collects_event_data": False, - "trigger_after": None, - "trigger_only_on_success": False, - }, - { - "targets": [], - "trigger": "load", - "inputs": [], - "outputs": [19], - "backend_fn": True, - "js": None, - "queue": None, - "api_name": None, - "scroll_to_output": False, - "show_progress": True, - "every": None, - "batch": False, - "max_batch_size": 4, - "cancels": [], - "types": {"continuous": False, "generator": False}, - "collects_event_data": False, - "trigger_after": None, - "trigger_only_on_success": False, - }, - ], -} - - -XRAY_CONFIG_DIFF_IDS = { - "version": "3.26.0\n", - "mode": "blocks", - "dev_mode": True, - "analytics_enabled": False, - "components": [ - { - "id": 6, - "type": "markdown", - "props": { - "value": "

                    Detect Disease From Scan

                    \n

                    With this model you can lorem ipsum

                    \n
                      \n
                    • ipsum 1
                    • \n
                    • ipsum 2
                    • \n
                    \n", - "name": "markdown", - "visible": True, - "style": {}, - }, - "serializer": "StringSerializable", - "api_info": {"info": {"type": "string"}, "serialized_info": False}, - "example_inputs": {"raw": "Howdy!", "serialized": "Howdy!"}, - }, - { - "id": 7, - "type": "checkboxgroup", - "props": { - "choices": ["Covid", "Malaria", "Lung Cancer"], - "value": [], - "label": "Disease to Scan For", - "show_label": True, - "name": "checkboxgroup", - "visible": True, - "style": {}, - }, - "serializer": "ListStringSerializable", - "api_info": { - "info": {"type": "array", "items": {"type": "string"}}, - "serialized_info": False, - }, - "example_inputs": {"raw": "Covid", "serialized": "Covid"}, - }, - {"id": 8, "type": "tabs", "props": {"visible": True, "style": {}}}, - { - "id": 9, - "type": "tabitem", - "props": {"label": "X-ray", "visible": True, "style": {}}, - }, - { - "id": 10, - "type": "row", - "props": { - "type": "row", - "variant": "default", - "visible": True, - "style": {}, - }, - }, - { - "id": 11, - "type": "image", - "props": { - "image_mode": "RGB", - "source": "upload", - "tool": "editor", - "streaming": False, - "mirror_webcam": True, - "selectable": False, - "show_label": True, - "name": "image", - "visible": True, - "style": {}, - }, - "serializer": "ImgSerializable", - "api_info": { - "info": { - "type": "string", - "description": "base64 representation of an image", - }, - "serialized_info": True, - }, - "example_inputs": { - "raw": "data:image/png;base64,R0lGODlhPQBEAPeoAJosM//AwO/AwHVYZ/z595kzAP/s7P+goOXMv8+fhw/v739/f+8PD98fH/8mJl+fn/9ZWb8/PzWlwv///6wWGbImAPgTEMImIN9gUFCEm/gDALULDN8PAD6atYdCTX9gUNKlj8wZAKUsAOzZz+UMAOsJAP/Z2ccMDA8PD/95eX5NWvsJCOVNQPtfX/8zM8+QePLl38MGBr8JCP+zs9myn/8GBqwpAP/GxgwJCPny78lzYLgjAJ8vAP9fX/+MjMUcAN8zM/9wcM8ZGcATEL+QePdZWf/29uc/P9cmJu9MTDImIN+/r7+/vz8/P8VNQGNugV8AAF9fX8swMNgTAFlDOICAgPNSUnNWSMQ5MBAQEJE3QPIGAM9AQMqGcG9vb6MhJsEdGM8vLx8fH98AANIWAMuQeL8fABkTEPPQ0OM5OSYdGFl5jo+Pj/+pqcsTE78wMFNGQLYmID4dGPvd3UBAQJmTkP+8vH9QUK+vr8ZWSHpzcJMmILdwcLOGcHRQUHxwcK9PT9DQ0O/v70w5MLypoG8wKOuwsP/g4P/Q0IcwKEswKMl8aJ9fX2xjdOtGRs/Pz+Dg4GImIP8gIH0sKEAwKKmTiKZ8aB/f39Wsl+LFt8dgUE9PT5x5aHBwcP+AgP+WltdgYMyZfyywz78AAAAAAAD///8AAP9mZv///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAEAAKgALAAAAAA9AEQAAAj/AFEJHEiwoMGDCBMqXMiwocAbBww4nEhxoYkUpzJGrMixogkfGUNqlNixJEIDB0SqHGmyJSojM1bKZOmyop0gM3Oe2liTISKMOoPy7GnwY9CjIYcSRYm0aVKSLmE6nfq05QycVLPuhDrxBlCtYJUqNAq2bNWEBj6ZXRuyxZyDRtqwnXvkhACDV+euTeJm1Ki7A73qNWtFiF+/gA95Gly2CJLDhwEHMOUAAuOpLYDEgBxZ4GRTlC1fDnpkM+fOqD6DDj1aZpITp0dtGCDhr+fVuCu3zlg49ijaokTZTo27uG7Gjn2P+hI8+PDPERoUB318bWbfAJ5sUNFcuGRTYUqV/3ogfXp1rWlMc6awJjiAAd2fm4ogXjz56aypOoIde4OE5u/F9x199dlXnnGiHZWEYbGpsAEA3QXYnHwEFliKAgswgJ8LPeiUXGwedCAKABACCN+EA1pYIIYaFlcDhytd51sGAJbo3onOpajiihlO92KHGaUXGwWjUBChjSPiWJuOO/LYIm4v1tXfE6J4gCSJEZ7YgRYUNrkji9P55sF/ogxw5ZkSqIDaZBV6aSGYq/lGZplndkckZ98xoICbTcIJGQAZcNmdmUc210hs35nCyJ58fgmIKX5RQGOZowxaZwYA+JaoKQwswGijBV4C6SiTUmpphMspJx9unX4KaimjDv9aaXOEBteBqmuuxgEHoLX6Kqx+yXqqBANsgCtit4FWQAEkrNbpq7HSOmtwag5w57GrmlJBASEU18ADjUYb3ADTinIttsgSB1oJFfA63bduimuqKB1keqwUhoCSK374wbujvOSu4QG6UvxBRydcpKsav++Ca6G8A6Pr1x2kVMyHwsVxUALDq/krnrhPSOzXG1lUTIoffqGR7Goi2MAxbv6O2kEG56I7CSlRsEFKFVyovDJoIRTg7sugNRDGqCJzJgcKE0ywc0ELm6KBCCJo8DIPFeCWNGcyqNFE06ToAfV0HBRgxsvLThHn1oddQMrXj5DyAQgjEHSAJMWZwS3HPxT/QMbabI/iBCliMLEJKX2EEkomBAUCxRi42VDADxyTYDVogV+wSChqmKxEKCDAYFDFj4OmwbY7bDGdBhtrnTQYOigeChUmc1K3QTnAUfEgGFgAWt88hKA6aCRIXhxnQ1yg3BCayK44EWdkUQcBByEQChFXfCB776aQsG0BIlQgQgE8qO26X1h8cEUep8ngRBnOy74E9QgRgEAC8SvOfQkh7FDBDmS43PmGoIiKUUEGkMEC/PJHgxw0xH74yx/3XnaYRJgMB8obxQW6kL9QYEJ0FIFgByfIL7/IQAlvQwEpnAC7DtLNJCKUoO/w45c44GwCXiAFB/OXAATQryUxdN4LfFiwgjCNYg+kYMIEFkCKDs6PKAIJouyGWMS1FSKJOMRB/BoIxYJIUXFUxNwoIkEKPAgCBZSQHQ1A2EWDfDEUVLyADj5AChSIQW6gu10bE/JG2VnCZGfo4R4d0sdQoBAHhPjhIB94v/wRoRKQWGRHgrhGSQJxCS+0pCZbEhAAOw==", - "serialized": "https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png", - }, - }, - { - "id": 2111, - "type": "json", - "props": {"show_label": True, "name": "json", "visible": True, "style": {}}, - "serializer": "JSONSerializable", - "api_info": { - "info": {"type": {}, "description": "any valid json"}, - "serialized_info": True, - }, - "example_inputs": {"raw": {"a": 1, "b": 2}, "serialized": None}, - }, - { - "id": 13, - "type": "button", - "props": { - "value": "Run", - "variant": "secondary", - "interactive": True, - "name": "button", - "visible": True, - "style": {}, - }, - "serializer": "StringSerializable", - "api_info": {"info": {"type": "string"}, "serialized_info": False}, - "example_inputs": {"raw": "Howdy!", "serialized": "Howdy!"}, - }, - { - "id": 14, - "type": "tabitem", - "props": {"label": "CT Scan", "visible": True, "style": {}}, - }, - { - "id": 15, - "type": "row", - "props": { - "type": "row", - "variant": "default", - "visible": True, - "style": {}, - }, - }, - { - "id": 16, - "type": "image", - "props": { - "image_mode": "RGB", - "source": "upload", - "tool": "editor", - "streaming": False, - "mirror_webcam": True, - "selectable": False, - "show_label": True, - "name": "image", - "visible": True, - "style": {}, - }, - "serializer": "ImgSerializable", - "api_info": { - "info": { - "type": "string", - "description": "base64 representation of an image", - }, - "serialized_info": True, - }, - "example_inputs": { - "raw": "data:image/png;base64,R0lGODlhPQBEAPeoAJosM//AwO/AwHVYZ/z595kzAP/s7P+goOXMv8+fhw/v739/f+8PD98fH/8mJl+fn/9ZWb8/PzWlwv///6wWGbImAPgTEMImIN9gUFCEm/gDALULDN8PAD6atYdCTX9gUNKlj8wZAKUsAOzZz+UMAOsJAP/Z2ccMDA8PD/95eX5NWvsJCOVNQPtfX/8zM8+QePLl38MGBr8JCP+zs9myn/8GBqwpAP/GxgwJCPny78lzYLgjAJ8vAP9fX/+MjMUcAN8zM/9wcM8ZGcATEL+QePdZWf/29uc/P9cmJu9MTDImIN+/r7+/vz8/P8VNQGNugV8AAF9fX8swMNgTAFlDOICAgPNSUnNWSMQ5MBAQEJE3QPIGAM9AQMqGcG9vb6MhJsEdGM8vLx8fH98AANIWAMuQeL8fABkTEPPQ0OM5OSYdGFl5jo+Pj/+pqcsTE78wMFNGQLYmID4dGPvd3UBAQJmTkP+8vH9QUK+vr8ZWSHpzcJMmILdwcLOGcHRQUHxwcK9PT9DQ0O/v70w5MLypoG8wKOuwsP/g4P/Q0IcwKEswKMl8aJ9fX2xjdOtGRs/Pz+Dg4GImIP8gIH0sKEAwKKmTiKZ8aB/f39Wsl+LFt8dgUE9PT5x5aHBwcP+AgP+WltdgYMyZfyywz78AAAAAAAD///8AAP9mZv///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAEAAKgALAAAAAA9AEQAAAj/AFEJHEiwoMGDCBMqXMiwocAbBww4nEhxoYkUpzJGrMixogkfGUNqlNixJEIDB0SqHGmyJSojM1bKZOmyop0gM3Oe2liTISKMOoPy7GnwY9CjIYcSRYm0aVKSLmE6nfq05QycVLPuhDrxBlCtYJUqNAq2bNWEBj6ZXRuyxZyDRtqwnXvkhACDV+euTeJm1Ki7A73qNWtFiF+/gA95Gly2CJLDhwEHMOUAAuOpLYDEgBxZ4GRTlC1fDnpkM+fOqD6DDj1aZpITp0dtGCDhr+fVuCu3zlg49ijaokTZTo27uG7Gjn2P+hI8+PDPERoUB318bWbfAJ5sUNFcuGRTYUqV/3ogfXp1rWlMc6awJjiAAd2fm4ogXjz56aypOoIde4OE5u/F9x199dlXnnGiHZWEYbGpsAEA3QXYnHwEFliKAgswgJ8LPeiUXGwedCAKABACCN+EA1pYIIYaFlcDhytd51sGAJbo3onOpajiihlO92KHGaUXGwWjUBChjSPiWJuOO/LYIm4v1tXfE6J4gCSJEZ7YgRYUNrkji9P55sF/ogxw5ZkSqIDaZBV6aSGYq/lGZplndkckZ98xoICbTcIJGQAZcNmdmUc210hs35nCyJ58fgmIKX5RQGOZowxaZwYA+JaoKQwswGijBV4C6SiTUmpphMspJx9unX4KaimjDv9aaXOEBteBqmuuxgEHoLX6Kqx+yXqqBANsgCtit4FWQAEkrNbpq7HSOmtwag5w57GrmlJBASEU18ADjUYb3ADTinIttsgSB1oJFfA63bduimuqKB1keqwUhoCSK374wbujvOSu4QG6UvxBRydcpKsav++Ca6G8A6Pr1x2kVMyHwsVxUALDq/krnrhPSOzXG1lUTIoffqGR7Goi2MAxbv6O2kEG56I7CSlRsEFKFVyovDJoIRTg7sugNRDGqCJzJgcKE0ywc0ELm6KBCCJo8DIPFeCWNGcyqNFE06ToAfV0HBRgxsvLThHn1oddQMrXj5DyAQgjEHSAJMWZwS3HPxT/QMbabI/iBCliMLEJKX2EEkomBAUCxRi42VDADxyTYDVogV+wSChqmKxEKCDAYFDFj4OmwbY7bDGdBhtrnTQYOigeChUmc1K3QTnAUfEgGFgAWt88hKA6aCRIXhxnQ1yg3BCayK44EWdkUQcBByEQChFXfCB776aQsG0BIlQgQgE8qO26X1h8cEUep8ngRBnOy74E9QgRgEAC8SvOfQkh7FDBDmS43PmGoIiKUUEGkMEC/PJHgxw0xH74yx/3XnaYRJgMB8obxQW6kL9QYEJ0FIFgByfIL7/IQAlvQwEpnAC7DtLNJCKUoO/w45c44GwCXiAFB/OXAATQryUxdN4LfFiwgjCNYg+kYMIEFkCKDs6PKAIJouyGWMS1FSKJOMRB/BoIxYJIUXFUxNwoIkEKPAgCBZSQHQ1A2EWDfDEUVLyADj5AChSIQW6gu10bE/JG2VnCZGfo4R4d0sdQoBAHhPjhIB94v/wRoRKQWGRHgrhGSQJxCS+0pCZbEhAAOw==", - "serialized": "https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png", - }, - }, - { - "id": 17, - "type": "json", - "props": {"show_label": True, "name": "json", "visible": True, "style": {}}, - "serializer": "JSONSerializable", - "api_info": { - "info": {"type": {}, "description": "any valid json"}, - "serialized_info": True, - }, - "example_inputs": {"raw": {"a": 1, "b": 2}, "serialized": None}, - }, - { - "id": 18, - "type": "button", - "props": { - "value": "Run", - "variant": "secondary", - "interactive": True, - "name": "button", - "visible": True, - "style": {}, - }, - "serializer": "StringSerializable", - "api_info": {"info": {"type": "string"}, "serialized_info": False}, - "example_inputs": {"raw": "Howdy!", "serialized": "Howdy!"}, - }, - { - "id": 19, - "type": "textbox", - "props": { - "lines": 1, - "max_lines": 20, - "value": "", - "type": "text", - "show_label": True, - "name": "textbox", - "visible": True, - "style": {}, - }, - "serializer": "StringSerializable", - "api_info": {"info": {"type": "string"}, "serialized_info": False}, - "example_inputs": {"raw": "Howdy!", "serialized": "Howdy!"}, - }, - { - "id": 20, - "type": "form", - "props": {"type": "form", "visible": True, "style": {}}, - }, - { - "id": 21, - "type": "form", - "props": {"type": "form", "visible": True, "style": {}}, - }, - ], - "css": None, - "title": "Gradio", - "is_space": False, - "enable_queue": None, - "show_error": True, - "show_api": True, - "is_colab": False, - "stylesheets": [ - "https://fonts.googleapis.com/css2?family=Source+Sans+Pro:wght@400;600&display=swap", - "https://fonts.googleapis.com/css2?family=IBM+Plex+Mono:wght@400;600&display=swap", - ], - "theme": "default", - "layout": { - "id": 5, - "children": [ - {"id": 6}, - {"id": 20, "children": [{"id": 7}]}, - { - "id": 8, - "children": [ - { - "id": 9, - "children": [ - {"id": 10, "children": [{"id": 11}, {"id": 2111}]}, - {"id": 13}, - ], - }, - { - "id": 14, - "children": [ - {"id": 15, "children": [{"id": 16}, {"id": 17}]}, - {"id": 18}, - ], - }, - ], - }, - {"id": 21, "children": [{"id": 19}]}, - ], - }, - "dependencies": [ - { - "targets": [13], - "trigger": "click", - "inputs": [7, 11], - "outputs": [2111], - "backend_fn": True, - "js": None, - "queue": None, - "api_name": None, - "scroll_to_output": False, - "show_progress": True, - "every": None, - "batch": False, - "max_batch_size": 4, - "cancels": [], - "types": {"continuous": False, "generator": False}, - "collects_event_data": False, - "trigger_after": None, - "trigger_only_on_success": False, - }, - { - "targets": [18], - "trigger": "click", - "inputs": [7, 16], - "outputs": [17], - "backend_fn": True, - "js": None, - "queue": None, - "api_name": None, - "scroll_to_output": False, - "show_progress": True, - "every": None, - "batch": False, - "max_batch_size": 4, - "cancels": [], - "types": {"continuous": False, "generator": False}, - "collects_event_data": False, - "trigger_after": None, - "trigger_only_on_success": False, - }, - { - "targets": [], - "trigger": "load", - "inputs": [], - "outputs": [19], - "backend_fn": True, - "js": None, - "queue": None, - "api_name": None, - "scroll_to_output": False, - "show_progress": True, - "every": None, - "batch": False, - "max_batch_size": 4, - "cancels": [], - "types": {"continuous": False, "generator": False}, - "collects_event_data": False, - "trigger_after": None, - "trigger_only_on_success": False, - }, - ], -} - - -XRAY_CONFIG_WITH_MISTAKE = { - "mode": "blocks", - "dev_mode": True, - "analytics_enabled": False, - "theme": "default", - "components": [ - { - "id": 1, - "type": "markdown", - "props": { - "value": "

                    Detect Disease From Scan

                    \n

                    With this model you can lorem ipsum

                    \n
                      \n
                    • ipsum 1
                    • \n
                    • ipsum 2
                    • \n
                    \n", - "name": "markdown", - "style": {}, - }, - }, - { - "id": 2, - "type": "checkboxgroup", - "props": { - "choices": ["Covid", "Malaria", "Lung Cancer"], - "value": [], - "name": "checkboxgroup", - "show_label": True, - "label": "Disease to Scan For", - "style": {}, - }, - }, - { - "id": 3, - "type": "tabs", - "props": { - "style": {}, - "value": True, - }, - }, - { - "id": 4, - "type": "tabitem", - "props": { - "label": "X-ray", - "style": {}, - "value": True, - }, - }, - { - "id": 5, - "type": "row", - "props": {"type": "row", "variant": "default", "style": {}, "value": True}, - }, - { - "id": 6, - "type": "image", - "props": { - "image_mode": "RGB", - "source": "upload", - "streaming": False, - "mirror_webcam": True, - "tool": "editor", - "name": "image", - "style": {}, - "selectable": False, - }, - }, - { - "id": 7, - "type": "json", - "props": { - "name": "json", - "style": {}, - }, - }, - { - "id": 8, - "type": "button", - "props": { - "value": "Run", - "name": "button", - "interactive": True, - "css": {"background-color": "red", "--hover-color": "orange"}, - "variant": "secondary", - }, - }, - { - "id": 9, - "type": "tabitem", - "props": { - "show_label": True, - "label": "CT Scan", - "style": {}, - "value": True, - }, - }, - { - "id": 10, - "type": "row", - "props": {"type": "row", "variant": "default", "style": {}, "value": True}, - }, - { - "id": 11, - "type": "image", - "props": { - "image_mode": "RGB", - "source": "upload", - "tool": "editor", - "streaming": False, - "mirror_webcam": True, - "name": "image", - "style": {}, - "selectable": False, - }, - }, - { - "id": 12, - "type": "json", - "props": { - "name": "json", - "style": {}, - }, - }, - { - "id": 13, - "type": "button", - "props": { - "value": "Run", - "interactive": True, - "name": "button", - "style": {}, - "variant": "secondary", - }, - }, - { - "id": 14, - "type": "textbox", - "props": { - "lines": 1, - "value": "", - "name": "textbox", - "type": "text", - "style": {}, - }, - }, - ], - "layout": { - "id": 0, - "children": [ - {"id": 1}, - {"id": 2}, - { - "id": 3, - "children": [ - { - "id": 4, - "children": [ - {"id": 5, "children": [{"id": 6}, {"id": 7}]}, - {"id": 8}, - ], - }, - { - "id": 9, - "children": [ - {"id": 10, "children": [{"id": 12}, {"id": 11}]}, - {"id": 13}, - ], - }, - ], - }, - {"id": 14}, - ], - }, - "dependencies": [ - { - "targets": [8], - "trigger": "click", - "inputs": [2, 6], - "outputs": [7], - "api_name": None, - "scroll_to_output": False, - "show_progress": True, - "cancels": [], - "trigger_after": None, - "trigger_only_on_success": False, - }, - { - "targets": [13], - "trigger": "click", - "inputs": [2, 11], - "outputs": [12], - "api_name": None, - "scroll_to_output": False, - "show_progress": True, - "cancels": [], - "trigger_after": None, - "trigger_only_on_success": False, - }, - ], -} diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/h11/tests/test_state.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/h11/tests/test_state.py deleted file mode 100644 index bc974e636e9f3e9b66022d2095cd670a9acbdcd9..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/h11/tests/test_state.py +++ /dev/null @@ -1,271 +0,0 @@ -import pytest - -from .._events import ( - ConnectionClosed, - Data, - EndOfMessage, - Event, - InformationalResponse, - Request, - Response, -) -from .._state import ( - _SWITCH_CONNECT, - _SWITCH_UPGRADE, - CLIENT, - CLOSED, - ConnectionState, - DONE, - IDLE, - MIGHT_SWITCH_PROTOCOL, - MUST_CLOSE, - SEND_BODY, - SEND_RESPONSE, - SERVER, - SWITCHED_PROTOCOL, -) -from .._util import LocalProtocolError - - -def test_ConnectionState() -> None: - cs = ConnectionState() - - # Basic event-triggered transitions - - assert cs.states == {CLIENT: IDLE, SERVER: IDLE} - - cs.process_event(CLIENT, Request) - # The SERVER-Request special case: - assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE} - - # Illegal transitions raise an error and nothing happens - with pytest.raises(LocalProtocolError): - cs.process_event(CLIENT, Request) - assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE} - - cs.process_event(SERVER, InformationalResponse) - assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE} - - cs.process_event(SERVER, Response) - assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_BODY} - - cs.process_event(CLIENT, EndOfMessage) - cs.process_event(SERVER, EndOfMessage) - assert cs.states == {CLIENT: DONE, SERVER: DONE} - - # State-triggered transition - - cs.process_event(SERVER, ConnectionClosed) - assert cs.states == {CLIENT: MUST_CLOSE, SERVER: CLOSED} - - -def test_ConnectionState_keep_alive() -> None: - # keep_alive = False - cs = ConnectionState() - cs.process_event(CLIENT, Request) - cs.process_keep_alive_disabled() - cs.process_event(CLIENT, EndOfMessage) - assert cs.states == {CLIENT: MUST_CLOSE, SERVER: SEND_RESPONSE} - - cs.process_event(SERVER, Response) - cs.process_event(SERVER, EndOfMessage) - assert cs.states == {CLIENT: MUST_CLOSE, SERVER: MUST_CLOSE} - - -def test_ConnectionState_keep_alive_in_DONE() -> None: - # Check that if keep_alive is disabled when the CLIENT is already in DONE, - # then this is sufficient to immediately trigger the DONE -> MUST_CLOSE - # transition - cs = ConnectionState() - cs.process_event(CLIENT, Request) - cs.process_event(CLIENT, EndOfMessage) - assert cs.states[CLIENT] is DONE - cs.process_keep_alive_disabled() - assert cs.states[CLIENT] is MUST_CLOSE - - -def test_ConnectionState_switch_denied() -> None: - for switch_type in (_SWITCH_CONNECT, _SWITCH_UPGRADE): - for deny_early in (True, False): - cs = ConnectionState() - cs.process_client_switch_proposal(switch_type) - cs.process_event(CLIENT, Request) - cs.process_event(CLIENT, Data) - assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE} - - assert switch_type in cs.pending_switch_proposals - - if deny_early: - # before client reaches DONE - cs.process_event(SERVER, Response) - assert not cs.pending_switch_proposals - - cs.process_event(CLIENT, EndOfMessage) - - if deny_early: - assert cs.states == {CLIENT: DONE, SERVER: SEND_BODY} - else: - assert cs.states == { - CLIENT: MIGHT_SWITCH_PROTOCOL, - SERVER: SEND_RESPONSE, - } - - cs.process_event(SERVER, InformationalResponse) - assert cs.states == { - CLIENT: MIGHT_SWITCH_PROTOCOL, - SERVER: SEND_RESPONSE, - } - - cs.process_event(SERVER, Response) - assert cs.states == {CLIENT: DONE, SERVER: SEND_BODY} - assert not cs.pending_switch_proposals - - -_response_type_for_switch = { - _SWITCH_UPGRADE: InformationalResponse, - _SWITCH_CONNECT: Response, - None: Response, -} - - -def test_ConnectionState_protocol_switch_accepted() -> None: - for switch_event in [_SWITCH_UPGRADE, _SWITCH_CONNECT]: - cs = ConnectionState() - cs.process_client_switch_proposal(switch_event) - cs.process_event(CLIENT, Request) - cs.process_event(CLIENT, Data) - assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE} - - cs.process_event(CLIENT, EndOfMessage) - assert cs.states == {CLIENT: MIGHT_SWITCH_PROTOCOL, SERVER: SEND_RESPONSE} - - cs.process_event(SERVER, InformationalResponse) - assert cs.states == {CLIENT: MIGHT_SWITCH_PROTOCOL, SERVER: SEND_RESPONSE} - - cs.process_event(SERVER, _response_type_for_switch[switch_event], switch_event) - assert cs.states == {CLIENT: SWITCHED_PROTOCOL, SERVER: SWITCHED_PROTOCOL} - - -def test_ConnectionState_double_protocol_switch() -> None: - # CONNECT + Upgrade is legal! Very silly, but legal. So we support - # it. Because sometimes doing the silly thing is easier than not. - for server_switch in [None, _SWITCH_UPGRADE, _SWITCH_CONNECT]: - cs = ConnectionState() - cs.process_client_switch_proposal(_SWITCH_UPGRADE) - cs.process_client_switch_proposal(_SWITCH_CONNECT) - cs.process_event(CLIENT, Request) - cs.process_event(CLIENT, EndOfMessage) - assert cs.states == {CLIENT: MIGHT_SWITCH_PROTOCOL, SERVER: SEND_RESPONSE} - cs.process_event( - SERVER, _response_type_for_switch[server_switch], server_switch - ) - if server_switch is None: - assert cs.states == {CLIENT: DONE, SERVER: SEND_BODY} - else: - assert cs.states == {CLIENT: SWITCHED_PROTOCOL, SERVER: SWITCHED_PROTOCOL} - - -def test_ConnectionState_inconsistent_protocol_switch() -> None: - for client_switches, server_switch in [ - ([], _SWITCH_CONNECT), - ([], _SWITCH_UPGRADE), - ([_SWITCH_UPGRADE], _SWITCH_CONNECT), - ([_SWITCH_CONNECT], _SWITCH_UPGRADE), - ]: - cs = ConnectionState() - for client_switch in client_switches: # type: ignore[attr-defined] - cs.process_client_switch_proposal(client_switch) - cs.process_event(CLIENT, Request) - with pytest.raises(LocalProtocolError): - cs.process_event(SERVER, Response, server_switch) - - -def test_ConnectionState_keepalive_protocol_switch_interaction() -> None: - # keep_alive=False + pending_switch_proposals - cs = ConnectionState() - cs.process_client_switch_proposal(_SWITCH_UPGRADE) - cs.process_event(CLIENT, Request) - cs.process_keep_alive_disabled() - cs.process_event(CLIENT, Data) - assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE} - - # the protocol switch "wins" - cs.process_event(CLIENT, EndOfMessage) - assert cs.states == {CLIENT: MIGHT_SWITCH_PROTOCOL, SERVER: SEND_RESPONSE} - - # but when the server denies the request, keep_alive comes back into play - cs.process_event(SERVER, Response) - assert cs.states == {CLIENT: MUST_CLOSE, SERVER: SEND_BODY} - - -def test_ConnectionState_reuse() -> None: - cs = ConnectionState() - - with pytest.raises(LocalProtocolError): - cs.start_next_cycle() - - cs.process_event(CLIENT, Request) - cs.process_event(CLIENT, EndOfMessage) - - with pytest.raises(LocalProtocolError): - cs.start_next_cycle() - - cs.process_event(SERVER, Response) - cs.process_event(SERVER, EndOfMessage) - - cs.start_next_cycle() - assert cs.states == {CLIENT: IDLE, SERVER: IDLE} - - # No keepalive - - cs.process_event(CLIENT, Request) - cs.process_keep_alive_disabled() - cs.process_event(CLIENT, EndOfMessage) - cs.process_event(SERVER, Response) - cs.process_event(SERVER, EndOfMessage) - - with pytest.raises(LocalProtocolError): - cs.start_next_cycle() - - # One side closed - - cs = ConnectionState() - cs.process_event(CLIENT, Request) - cs.process_event(CLIENT, EndOfMessage) - cs.process_event(CLIENT, ConnectionClosed) - cs.process_event(SERVER, Response) - cs.process_event(SERVER, EndOfMessage) - - with pytest.raises(LocalProtocolError): - cs.start_next_cycle() - - # Succesful protocol switch - - cs = ConnectionState() - cs.process_client_switch_proposal(_SWITCH_UPGRADE) - cs.process_event(CLIENT, Request) - cs.process_event(CLIENT, EndOfMessage) - cs.process_event(SERVER, InformationalResponse, _SWITCH_UPGRADE) - - with pytest.raises(LocalProtocolError): - cs.start_next_cycle() - - # Failed protocol switch - - cs = ConnectionState() - cs.process_client_switch_proposal(_SWITCH_UPGRADE) - cs.process_event(CLIENT, Request) - cs.process_event(CLIENT, EndOfMessage) - cs.process_event(SERVER, Response) - cs.process_event(SERVER, EndOfMessage) - - cs.start_next_cycle() - assert cs.states == {CLIENT: IDLE, SERVER: IDLE} - - -def test_server_request_is_illegal() -> None: - # There used to be a bug in how we handled the Request special case that - # made this allowed... - cs = ConnectionState() - with pytest.raises(LocalProtocolError): - cs.process_event(SERVER, Request) diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/tests/test_backend_pdf.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/tests/test_backend_pdf.py deleted file mode 100644 index 8ffca8295ea5ae2a1ba49a81e25dfb7f14ec2308..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/tests/test_backend_pdf.py +++ /dev/null @@ -1,413 +0,0 @@ -import datetime -import decimal -import io -import os -from pathlib import Path -from tempfile import NamedTemporaryFile - -import numpy as np -import pytest - -import matplotlib as mpl -from matplotlib import ( - pyplot as plt, rcParams, font_manager as fm -) -from matplotlib.cbook import _get_data_path -from matplotlib.ft2font import FT2Font -from matplotlib.font_manager import findfont, FontProperties -from matplotlib.backends._backend_pdf_ps import get_glyphs_subset -from matplotlib.backends.backend_pdf import PdfPages -from matplotlib.patches import Rectangle -from matplotlib.testing.decorators import check_figures_equal, image_comparison -from matplotlib.testing._markers import needs_usetex - - -@image_comparison(['pdf_use14corefonts.pdf']) -def test_use14corefonts(): - rcParams['pdf.use14corefonts'] = True - rcParams['font.family'] = 'sans-serif' - rcParams['font.size'] = 8 - rcParams['font.sans-serif'] = ['Helvetica'] - rcParams['pdf.compression'] = 0 - - text = '''A three-line text positioned just above a blue line -and containing some French characters and the euro symbol: -"Merci pépé pour les 10 €"''' - - fig, ax = plt.subplots() - ax.set_title('Test PDF backend with option use14corefonts=True') - ax.text(0.5, 0.5, text, horizontalalignment='center', - verticalalignment='bottom', - fontsize=14) - ax.axhline(0.5, linewidth=0.5) - - -@pytest.mark.parametrize('fontname, fontfile', [ - ('DejaVu Sans', 'DejaVuSans.ttf'), - ('WenQuanYi Zen Hei', 'wqy-zenhei.ttc'), -]) -@pytest.mark.parametrize('fonttype', [3, 42]) -def test_embed_fonts(fontname, fontfile, fonttype): - if Path(findfont(FontProperties(family=[fontname]))).name != fontfile: - pytest.skip(f'Font {fontname!r} may be missing') - - rcParams['pdf.fonttype'] = fonttype - fig, ax = plt.subplots() - ax.plot([1, 2, 3]) - ax.set_title('Axes Title', font=fontname) - fig.savefig(io.BytesIO(), format='pdf') - - -def test_multipage_pagecount(): - with PdfPages(io.BytesIO()) as pdf: - assert pdf.get_pagecount() == 0 - fig, ax = plt.subplots() - ax.plot([1, 2, 3]) - fig.savefig(pdf, format="pdf") - assert pdf.get_pagecount() == 1 - pdf.savefig() - assert pdf.get_pagecount() == 2 - - -def test_multipage_properfinalize(): - pdfio = io.BytesIO() - with PdfPages(pdfio) as pdf: - for i in range(10): - fig, ax = plt.subplots() - ax.set_title('This is a long title') - fig.savefig(pdf, format="pdf") - s = pdfio.getvalue() - assert s.count(b'startxref') == 1 - assert len(s) < 40000 - - -def test_multipage_keep_empty(): - # test empty pdf files - # test that an empty pdf is left behind with keep_empty=True (default) - with NamedTemporaryFile(delete=False) as tmp: - with PdfPages(tmp) as pdf: - filename = pdf._file.fh.name - assert os.path.exists(filename) - os.remove(filename) - # test if an empty pdf is deleting itself afterwards with keep_empty=False - with PdfPages(filename, keep_empty=False) as pdf: - pass - assert not os.path.exists(filename) - # test pdf files with content, they should never be deleted - fig, ax = plt.subplots() - ax.plot([1, 2, 3]) - # test that a non-empty pdf is left behind with keep_empty=True (default) - with NamedTemporaryFile(delete=False) as tmp: - with PdfPages(tmp) as pdf: - filename = pdf._file.fh.name - pdf.savefig() - assert os.path.exists(filename) - os.remove(filename) - # test that a non-empty pdf is left behind with keep_empty=False - with NamedTemporaryFile(delete=False) as tmp: - with PdfPages(tmp, keep_empty=False) as pdf: - filename = pdf._file.fh.name - pdf.savefig() - assert os.path.exists(filename) - os.remove(filename) - - -def test_composite_image(): - # Test that figures can be saved with and without combining multiple images - # (on a single set of axes) into a single composite image. - X, Y = np.meshgrid(np.arange(-5, 5, 1), np.arange(-5, 5, 1)) - Z = np.sin(Y ** 2) - fig, ax = plt.subplots() - ax.set_xlim(0, 3) - ax.imshow(Z, extent=[0, 1, 0, 1]) - ax.imshow(Z[::-1], extent=[2, 3, 0, 1]) - plt.rcParams['image.composite_image'] = True - with PdfPages(io.BytesIO()) as pdf: - fig.savefig(pdf, format="pdf") - assert len(pdf._file._images) == 1 - plt.rcParams['image.composite_image'] = False - with PdfPages(io.BytesIO()) as pdf: - fig.savefig(pdf, format="pdf") - assert len(pdf._file._images) == 2 - - -def test_savefig_metadata(monkeypatch): - pikepdf = pytest.importorskip('pikepdf') - monkeypatch.setenv('SOURCE_DATE_EPOCH', '0') - - fig, ax = plt.subplots() - ax.plot(range(5)) - - md = { - 'Author': 'me', - 'Title': 'Multipage PDF', - 'Subject': 'Test page', - 'Keywords': 'test,pdf,multipage', - 'ModDate': datetime.datetime( - 1968, 8, 1, tzinfo=datetime.timezone(datetime.timedelta(0))), - 'Trapped': 'True' - } - buf = io.BytesIO() - fig.savefig(buf, metadata=md, format='pdf') - - with pikepdf.Pdf.open(buf) as pdf: - info = {k: str(v) for k, v in pdf.docinfo.items()} - - assert info == { - '/Author': 'me', - '/CreationDate': 'D:19700101000000Z', - '/Creator': f'Matplotlib v{mpl.__version__}, https://matplotlib.org', - '/Keywords': 'test,pdf,multipage', - '/ModDate': 'D:19680801000000Z', - '/Producer': f'Matplotlib pdf backend v{mpl.__version__}', - '/Subject': 'Test page', - '/Title': 'Multipage PDF', - '/Trapped': '/True', - } - - -def test_invalid_metadata(): - fig, ax = plt.subplots() - - with pytest.warns(UserWarning, - match="Unknown infodict keyword: 'foobar'."): - fig.savefig(io.BytesIO(), format='pdf', metadata={'foobar': 'invalid'}) - - with pytest.warns(UserWarning, - match='not an instance of datetime.datetime.'): - fig.savefig(io.BytesIO(), format='pdf', - metadata={'ModDate': '1968-08-01'}) - - with pytest.warns(UserWarning, - match='not one of {"True", "False", "Unknown"}'): - fig.savefig(io.BytesIO(), format='pdf', metadata={'Trapped': 'foo'}) - - with pytest.warns(UserWarning, match='not an instance of str.'): - fig.savefig(io.BytesIO(), format='pdf', metadata={'Title': 1234}) - - -def test_multipage_metadata(monkeypatch): - pikepdf = pytest.importorskip('pikepdf') - monkeypatch.setenv('SOURCE_DATE_EPOCH', '0') - - fig, ax = plt.subplots() - ax.plot(range(5)) - - md = { - 'Author': 'me', - 'Title': 'Multipage PDF', - 'Subject': 'Test page', - 'Keywords': 'test,pdf,multipage', - 'ModDate': datetime.datetime( - 1968, 8, 1, tzinfo=datetime.timezone(datetime.timedelta(0))), - 'Trapped': 'True' - } - buf = io.BytesIO() - with PdfPages(buf, metadata=md) as pdf: - pdf.savefig(fig) - pdf.savefig(fig) - - with pikepdf.Pdf.open(buf) as pdf: - info = {k: str(v) for k, v in pdf.docinfo.items()} - - assert info == { - '/Author': 'me', - '/CreationDate': 'D:19700101000000Z', - '/Creator': f'Matplotlib v{mpl.__version__}, https://matplotlib.org', - '/Keywords': 'test,pdf,multipage', - '/ModDate': 'D:19680801000000Z', - '/Producer': f'Matplotlib pdf backend v{mpl.__version__}', - '/Subject': 'Test page', - '/Title': 'Multipage PDF', - '/Trapped': '/True', - } - - -def test_text_urls(): - pikepdf = pytest.importorskip('pikepdf') - - test_url = 'https://test_text_urls.matplotlib.org/' - - fig = plt.figure(figsize=(2, 1)) - fig.text(0.1, 0.1, 'test plain 123', url=f'{test_url}plain') - fig.text(0.1, 0.4, 'test mathtext $123$', url=f'{test_url}mathtext') - - with io.BytesIO() as fd: - fig.savefig(fd, format='pdf') - - with pikepdf.Pdf.open(fd) as pdf: - annots = pdf.pages[0].Annots - - # Iteration over Annots must occur within the context manager, - # otherwise it may fail depending on the pdf structure. - for y, fragment in [('0.1', 'plain'), ('0.4', 'mathtext')]: - annot = next( - (a for a in annots if a.A.URI == f'{test_url}{fragment}'), - None) - assert annot is not None - assert getattr(annot, 'QuadPoints', None) is None - # Positions in points (72 per inch.) - assert annot.Rect[1] == decimal.Decimal(y) * 72 - - -def test_text_rotated_urls(): - pikepdf = pytest.importorskip('pikepdf') - - test_url = 'https://test_text_urls.matplotlib.org/' - - fig = plt.figure(figsize=(1, 1)) - fig.text(0.1, 0.1, 'N', rotation=45, url=f'{test_url}') - - with io.BytesIO() as fd: - fig.savefig(fd, format='pdf') - - with pikepdf.Pdf.open(fd) as pdf: - annots = pdf.pages[0].Annots - - # Iteration over Annots must occur within the context manager, - # otherwise it may fail depending on the pdf structure. - annot = next( - (a for a in annots if a.A.URI == f'{test_url}'), - None) - assert annot is not None - assert getattr(annot, 'QuadPoints', None) is not None - # Positions in points (72 per inch) - assert annot.Rect[0] == \ - annot.QuadPoints[6] - decimal.Decimal('0.00001') - - -@needs_usetex -def test_text_urls_tex(): - pikepdf = pytest.importorskip('pikepdf') - - test_url = 'https://test_text_urls.matplotlib.org/' - - fig = plt.figure(figsize=(2, 1)) - fig.text(0.1, 0.7, 'test tex $123$', usetex=True, url=f'{test_url}tex') - - with io.BytesIO() as fd: - fig.savefig(fd, format='pdf') - - with pikepdf.Pdf.open(fd) as pdf: - annots = pdf.pages[0].Annots - - # Iteration over Annots must occur within the context manager, - # otherwise it may fail depending on the pdf structure. - annot = next( - (a for a in annots if a.A.URI == f'{test_url}tex'), - None) - assert annot is not None - # Positions in points (72 per inch.) - assert annot.Rect[1] == decimal.Decimal('0.7') * 72 - - -def test_pdfpages_fspath(): - with PdfPages(Path(os.devnull)) as pdf: - pdf.savefig(plt.figure()) - - -@image_comparison(['hatching_legend.pdf']) -def test_hatching_legend(): - """Test for correct hatching on patches in legend""" - fig = plt.figure(figsize=(1, 2)) - - a = Rectangle([0, 0], 0, 0, facecolor="green", hatch="XXXX") - b = Rectangle([0, 0], 0, 0, facecolor="blue", hatch="XXXX") - - fig.legend([a, b, a, b], ["", "", "", ""]) - - -@image_comparison(['grayscale_alpha.pdf']) -def test_grayscale_alpha(): - """Masking images with NaN did not work for grayscale images""" - x, y = np.ogrid[-2:2:.1, -2:2:.1] - dd = np.exp(-(x**2 + y**2)) - dd[dd < .1] = np.nan - fig, ax = plt.subplots() - ax.imshow(dd, interpolation='none', cmap='gray_r') - ax.set_xticks([]) - ax.set_yticks([]) - - -@mpl.style.context('default') -@check_figures_equal(extensions=["pdf", "eps"]) -def test_pdf_eps_savefig_when_color_is_none(fig_test, fig_ref): - ax_test = fig_test.add_subplot() - ax_test.set_axis_off() - ax_test.plot(np.sin(np.linspace(-5, 5, 100)), "v", c="none") - ax_ref = fig_ref.add_subplot() - ax_ref.set_axis_off() - - -@needs_usetex -def test_failing_latex(): - """Test failing latex subprocess call""" - plt.xlabel("$22_2_2$", usetex=True) # This fails with "Double subscript" - with pytest.raises(RuntimeError): - plt.savefig(io.BytesIO(), format="pdf") - - -def test_empty_rasterized(): - # Check that empty figures that are rasterised save to pdf files fine - fig, ax = plt.subplots() - ax.plot([], [], rasterized=True) - fig.savefig(io.BytesIO(), format="pdf") - - -@image_comparison(['kerning.pdf']) -def test_kerning(): - fig = plt.figure() - s = "AVAVAVAVAVAVAVAV€AAVV" - fig.text(0, .25, s, size=5) - fig.text(0, .75, s, size=20) - - -def test_glyphs_subset(): - fpath = str(_get_data_path("fonts/ttf/DejaVuSerif.ttf")) - chars = "these should be subsetted! 1234567890" - - # non-subsetted FT2Font - nosubfont = FT2Font(fpath) - nosubfont.set_text(chars) - - # subsetted FT2Font - subfont = FT2Font(get_glyphs_subset(fpath, chars)) - subfont.set_text(chars) - - nosubcmap = nosubfont.get_charmap() - subcmap = subfont.get_charmap() - - # all unique chars must be available in subsetted font - assert set(chars) == set(chr(key) for key in subcmap.keys()) - - # subsetted font's charmap should have less entries - assert len(subcmap) < len(nosubcmap) - - # since both objects are assigned same characters - assert subfont.get_num_glyphs() == nosubfont.get_num_glyphs() - - -@image_comparison(["multi_font_type3.pdf"], tol=4.6) -def test_multi_font_type3(): - fp = fm.FontProperties(family=["WenQuanYi Zen Hei"]) - if Path(fm.findfont(fp)).name != "wqy-zenhei.ttc": - pytest.skip("Font may be missing") - - plt.rc('font', family=['DejaVu Sans', 'WenQuanYi Zen Hei'], size=27) - plt.rc('pdf', fonttype=3) - - fig = plt.figure() - fig.text(0.15, 0.475, "There are 几个汉字 in between!") - - -@image_comparison(["multi_font_type42.pdf"], tol=2.2) -def test_multi_font_type42(): - fp = fm.FontProperties(family=["WenQuanYi Zen Hei"]) - if Path(fm.findfont(fp)).name != "wqy-zenhei.ttc": - pytest.skip("Font may be missing") - - plt.rc('font', family=['DejaVu Sans', 'WenQuanYi Zen Hei'], size=27) - plt.rc('pdf', fonttype=42) - - fig = plt.figure() - fig.text(0.15, 0.475, "There are 几个汉字 in between!") diff --git a/spaces/leezhongjun/chatgpt-free/app.py b/spaces/leezhongjun/chatgpt-free/app.py deleted file mode 100644 index 7a0aa0fc6bd5920976bb277c5f1b63cc61f94c8b..0000000000000000000000000000000000000000 --- a/spaces/leezhongjun/chatgpt-free/app.py +++ /dev/null @@ -1,51 +0,0 @@ -import gradio as gr -import chatgpt - -block = gr.Blocks() - -chatgpt_obj = chatgpt.ChatGPT() - -def update(name): - return f"Welcome to Gradio, {name}!" - -with block: - gr.Markdown("""

                    🤖 ChatGPT-free 🐍

                    -

                    ChatGPT-free has web access and uses the gpt-3.5-turbo model

                    -

                    Press Clear before starting a new conversation

                    -

                    View source on GitHub

                    """) - if chatgpt_obj.show_commands: - show_commands_txt = gr.Markdown(f"""

                    Commands and command responses are shown

                    """) - btn_text = "Hide commands and command responses" - else: - show_commands_txt = gr.Markdown(f"""

                    Commands and command responses are hidden

                    """) - btn_text = "Show commands and command responses" - - chatbot = gr.Chatbot() - message = gr.Textbox(label="Message", placeholder="Hi, how are things?") - submit = gr.Button("Send") - message.submit(chatgpt_obj.update, [message, chatbot], [message, chatbot]) - - submit.click(chatgpt_obj.update, [message, chatbot], [message, chatbot]) - - clear = gr.Button("Clear") - clear.click(chatgpt_obj.clear, None, chatbot, queue=False) - - toggle_commands = gr.Button(btn_text) - toggle_commands.click(chatgpt_obj.toggle_commands, chatbot, [chatbot, show_commands_txt, toggle_commands], queue=False) - - gr.Examples( - examples=["What is the Apple stock price now?", - "Who is the CEO of Apple now?", - "Write a poem about artificial intelligence", - "What could the future be like?", - "If x+1=20, what is the value of x?", - "Write a story that gives a message", - "What programming language is the most used in the industry?", - "How can I be more productive?", - "Create me a training schedule to train from home", - "Sums up everything we've talked about", - ], - inputs=message - ) - -block.launch(debug=True) \ No newline at end of file diff --git a/spaces/leo-bourrel/test-streamlit/model.py b/spaces/leo-bourrel/test-streamlit/model.py deleted file mode 100644 index f74ea92913ea805c7939cafbd630531756a83e08..0000000000000000000000000000000000000000 --- a/spaces/leo-bourrel/test-streamlit/model.py +++ /dev/null @@ -1,17 +0,0 @@ -import sqlalchemy -from pgvector.sqlalchemy import Vector -from sqlalchemy.orm import declarative_base - -Base = declarative_base() # type: Any - - -class Article(Base): - """Embedding store.""" - - __tablename__ = "article" - - id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True, nullable=False) - title = sqlalchemy.Column(sqlalchemy.String, nullable=True) - abstract = sqlalchemy.Column(sqlalchemy.String, nullable=True) - embedding: Vector = sqlalchemy.Column("abstract_embedding", Vector(None)) - doi = sqlalchemy.Column(sqlalchemy.String, nullable=True) diff --git a/spaces/leogabraneth/text-generation-webui-main/extensions/openai/script.py b/spaces/leogabraneth/text-generation-webui-main/extensions/openai/script.py deleted file mode 100644 index 72fd16108453c7232d713b8053e2048198e98d96..0000000000000000000000000000000000000000 --- a/spaces/leogabraneth/text-generation-webui-main/extensions/openai/script.py +++ /dev/null @@ -1,352 +0,0 @@ -import json -import os -import ssl -import traceback -from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer -from threading import Thread - -import extensions.openai.completions as OAIcompletions -import extensions.openai.edits as OAIedits -import extensions.openai.embeddings as OAIembeddings -import extensions.openai.images as OAIimages -import extensions.openai.models as OAImodels -import extensions.openai.moderations as OAImoderations -from extensions.openai.defaults import clamp, default, get_default_req_params -from extensions.openai.errors import ( - InvalidRequestError, - OpenAIError, - ServiceUnavailableError -) -from extensions.openai.tokens import token_count, token_decode, token_encode -from extensions.openai.utils import debug_msg -from modules import shared - -import cgi -import speech_recognition as sr -from pydub import AudioSegment - -params = { - # default params - 'port': 5001, - 'embedding_device': 'cpu', - 'embedding_model': 'all-mpnet-base-v2', - - # optional params - 'sd_webui_url': '', - 'debug': 0 -} - -class Handler(BaseHTTPRequestHandler): - def send_access_control_headers(self): - self.send_header("Access-Control-Allow-Origin", "*") - self.send_header("Access-Control-Allow-Credentials", "true") - self.send_header( - "Access-Control-Allow-Methods", - "GET,HEAD,OPTIONS,POST,PUT" - ) - self.send_header( - "Access-Control-Allow-Headers", - "Origin, Accept, X-Requested-With, Content-Type, " - "Access-Control-Request-Method, Access-Control-Request-Headers, " - "Authorization" - ) - - def do_OPTIONS(self): - self.send_response(200) - self.send_access_control_headers() - self.send_header('Content-Type', 'application/json') - self.end_headers() - self.wfile.write("OK".encode('utf-8')) - - def start_sse(self): - self.send_response(200) - self.send_access_control_headers() - self.send_header('Content-Type', 'text/event-stream') - self.send_header('Cache-Control', 'no-cache') - # self.send_header('Connection', 'keep-alive') - self.end_headers() - - def send_sse(self, chunk: dict): - response = 'data: ' + json.dumps(chunk) + '\r\n\r\n' - debug_msg(response[:-4]) - self.wfile.write(response.encode('utf-8')) - - def end_sse(self): - response = 'data: [DONE]\r\n\r\n' - debug_msg(response[:-4]) - self.wfile.write(response.encode('utf-8')) - - def return_json(self, ret: dict, code: int = 200, no_debug=False): - self.send_response(code) - self.send_access_control_headers() - self.send_header('Content-Type', 'application/json') - - response = json.dumps(ret) - r_utf8 = response.encode('utf-8') - - self.send_header('Content-Length', str(len(r_utf8))) - self.end_headers() - - self.wfile.write(r_utf8) - if not no_debug: - debug_msg(r_utf8) - - def openai_error(self, message, code=500, error_type='APIError', param='', internal_message=''): - - error_resp = { - 'error': { - 'message': message, - 'code': code, - 'type': error_type, - 'param': param, - } - } - if internal_message: - print(error_type, message) - print(internal_message) - # error_resp['internal_message'] = internal_message - - self.return_json(error_resp, code) - - def openai_error_handler(func): - def wrapper(self): - try: - func(self) - except InvalidRequestError as e: - self.openai_error(e.message, e.code, e.__class__.__name__, e.param, internal_message=e.internal_message) - except OpenAIError as e: - self.openai_error(e.message, e.code, e.__class__.__name__, internal_message=e.internal_message) - except Exception as e: - self.openai_error(repr(e), 500, 'OpenAIError', internal_message=traceback.format_exc()) - - return wrapper - - @openai_error_handler - def do_GET(self): - debug_msg(self.requestline) - debug_msg(self.headers) - - if self.path.startswith('/v1/engines') or self.path.startswith('/v1/models'): - is_legacy = 'engines' in self.path - is_list = self.path.split('?')[0].split('#')[0] in ['/v1/engines', '/v1/models'] - if is_legacy and not is_list: - model_name = self.path[self.path.find('/v1/engines/') + len('/v1/engines/'):] - resp = OAImodels.load_model(model_name) - elif is_list: - resp = OAImodels.list_models(is_legacy) - else: - model_name = self.path[len('/v1/models/'):] - resp = OAImodels.model_info(model_name) - - self.return_json(resp) - - elif '/billing/usage' in self.path: - # Ex. /v1/dashboard/billing/usage?start_date=2023-05-01&end_date=2023-05-31 - self.return_json({"total_usage": 0}, no_debug=True) - - else: - self.send_error(404) - - @openai_error_handler - def do_POST(self): - - if '/v1/audio/transcriptions' in self.path: - r = sr.Recognizer() - - # Parse the form data - form = cgi.FieldStorage( - fp=self.rfile, - headers=self.headers, - environ={'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': self.headers['Content-Type']} - ) - - audio_file = form['file'].file - audio_data = AudioSegment.from_file(audio_file) - - # Convert AudioSegment to raw data - raw_data = audio_data.raw_data - - # Create AudioData object - audio_data = sr.AudioData(raw_data, audio_data.frame_rate, audio_data.sample_width) - whipser_language = form.getvalue('language', None) - whipser_model = form.getvalue('model', 'tiny') # Use the model from the form data if it exists, otherwise default to tiny - - transcription = {"text": ""} - - try: - transcription["text"] = r.recognize_whisper(audio_data, language=whipser_language, model=whipser_model) - except sr.UnknownValueError: - print("Whisper could not understand audio") - transcription["text"] = "Whisper could not understand audio UnknownValueError" - except sr.RequestError as e: - print("Could not request results from Whisper", e) - transcription["text"] = "Whisper could not understand audio RequestError" - - self.return_json(transcription, no_debug=True) - return - - debug_msg(self.requestline) - debug_msg(self.headers) - - content_length = self.headers.get('Content-Length') - transfer_encoding = self.headers.get('Transfer-Encoding') - - if content_length: - body = json.loads(self.rfile.read(int(content_length)).decode('utf-8')) - elif transfer_encoding == 'chunked': - chunks = [] - while True: - chunk_size = int(self.rfile.readline(), 16) # Read the chunk size - if chunk_size == 0: - break # End of chunks - chunks.append(self.rfile.read(chunk_size)) - self.rfile.readline() # Consume the trailing newline after each chunk - body = json.loads(b''.join(chunks).decode('utf-8')) - else: - self.send_response(400, "Bad Request: Either Content-Length or Transfer-Encoding header expected.") - self.end_headers() - return - - debug_msg(body) - - if '/completions' in self.path or '/generate' in self.path: - - if not shared.model: - raise ServiceUnavailableError("No model loaded.") - - is_legacy = '/generate' in self.path - is_streaming = body.get('stream', False) - - if is_streaming: - self.start_sse() - - response = [] - if 'chat' in self.path: - response = OAIcompletions.stream_chat_completions(body, is_legacy=is_legacy) - else: - response = OAIcompletions.stream_completions(body, is_legacy=is_legacy) - - for resp in response: - self.send_sse(resp) - - self.end_sse() - - else: - response = '' - if 'chat' in self.path: - response = OAIcompletions.chat_completions(body, is_legacy=is_legacy) - else: - response = OAIcompletions.completions(body, is_legacy=is_legacy) - - self.return_json(response) - - elif '/edits' in self.path: - # deprecated - - if not shared.model: - raise ServiceUnavailableError("No model loaded.") - - req_params = get_default_req_params() - - instruction = body['instruction'] - input = body.get('input', '') - temperature = clamp(default(body, 'temperature', req_params['temperature']), 0.001, 1.999) # fixup absolute 0.0 - top_p = clamp(default(body, 'top_p', req_params['top_p']), 0.001, 1.0) - - response = OAIedits.edits(instruction, input, temperature, top_p) - - self.return_json(response) - - elif '/images/generations' in self.path: - if not os.environ.get('SD_WEBUI_URL', params.get('sd_webui_url', '')): - raise ServiceUnavailableError("Stable Diffusion not available. SD_WEBUI_URL not set.") - - prompt = body['prompt'] - size = default(body, 'size', '1024x1024') - response_format = default(body, 'response_format', 'url') # or b64_json - n = default(body, 'n', 1) # ignore the batch limits of max 10 - - response = OAIimages.generations(prompt=prompt, size=size, response_format=response_format, n=n) - - self.return_json(response, no_debug=True) - - elif '/embeddings' in self.path: - encoding_format = body.get('encoding_format', '') - - input = body.get('input', body.get('text', '')) - if not input: - raise InvalidRequestError("Missing required argument input", params='input') - - if type(input) is str: - input = [input] - - response = OAIembeddings.embeddings(input, encoding_format) - - self.return_json(response, no_debug=True) - - elif '/moderations' in self.path: - input = body['input'] - if not input: - raise InvalidRequestError("Missing required argument input", params='input') - - response = OAImoderations.moderations(input) - - self.return_json(response, no_debug=True) - - elif self.path == '/api/v1/token-count': - # NOT STANDARD. lifted from the api extension, but it's still very useful to calculate tokenized length client side. - response = token_count(body['prompt']) - - self.return_json(response, no_debug=True) - - elif self.path == '/api/v1/token/encode': - # NOT STANDARD. needed to support logit_bias, logprobs and token arrays for native models - encoding_format = body.get('encoding_format', '') - - response = token_encode(body['input'], encoding_format) - - self.return_json(response, no_debug=True) - - elif self.path == '/api/v1/token/decode': - # NOT STANDARD. needed to support logit_bias, logprobs and token arrays for native models - encoding_format = body.get('encoding_format', '') - - response = token_decode(body['input'], encoding_format) - - self.return_json(response, no_debug=True) - - else: - self.send_error(404) - - -def run_server(): - port = int(os.environ.get('OPENEDAI_PORT', params.get('port', 5001))) - server_addr = ('0.0.0.0' if shared.args.listen else '127.0.0.1', port) - server = ThreadingHTTPServer(server_addr, Handler) - - ssl_certfile=os.environ.get('OPENEDAI_CERT_PATH', shared.args.ssl_certfile) - ssl_keyfile=os.environ.get('OPENEDAI_KEY_PATH', shared.args.ssl_keyfile) - ssl_verify=True if (ssl_keyfile and ssl_certfile) else False - if ssl_verify: - context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) - context.load_cert_chain(ssl_certfile, ssl_keyfile) - server.socket = context.wrap_socket(server.socket, server_side=True) - - if shared.args.share: - try: - from flask_cloudflared import _run_cloudflared - public_url = _run_cloudflared(port, port + 1) - print(f'OpenAI compatible API ready at: OPENAI_API_BASE={public_url}/v1') - except ImportError: - print('You should install flask_cloudflared manually') - else: - if ssl_verify: - print(f'OpenAI compatible API ready at: OPENAI_API_BASE=https://{server_addr[0]}:{server_addr[1]}/v1') - else: - print(f'OpenAI compatible API ready at: OPENAI_API_BASE=http://{server_addr[0]}:{server_addr[1]}/v1') - - server.serve_forever() - - -def setup(): - Thread(target=run_server, daemon=True).start() diff --git a/spaces/leonardoboulitreau/aitmospheric/README.md b/spaces/leonardoboulitreau/aitmospheric/README.md deleted file mode 100644 index e1db22abcfaf6d2bd7bc9008c322362451f4c268..0000000000000000000000000000000000000000 --- a/spaces/leonardoboulitreau/aitmospheric/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Aitmospheric -emoji: 🐨 -colorFrom: indigo -colorTo: gray -sdk: gradio -sdk_version: 3.50.2 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Chery Spms V1 1 1 Setup.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Chery Spms V1 1 1 Setup.md deleted file mode 100644 index 14966289fc663cfc64dcaa0f2c43fec4395be1ac..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Chery Spms V1 1 1 Setup.md +++ /dev/null @@ -1,39 +0,0 @@ - -

                    What is Chery spms v1 1 1 setup and how to use it?

                    -

                    Chery spms v1 1 1 setup is a software program developed by Chery, a Chinese automobile manufacturer. It is used to diagnose and repair Chery vehicles, as well as to update their software and firmware. Chery spms v1 1 1 setup is compatible with Windows operating systems and requires a USB cable to connect to the vehicle's OBD port.

                    -

                    In this article, we will explain what Chery spms v1 1 1 setup can do, how to download and install it, and how to use it to perform various tasks on your Chery vehicle.

                    -

                    Chery spms v1 1 1 setup


                    Download Ziphttps://bytlly.com/2uGvNL



                    -

                    What can Chery spms v1 1 1 setup do?

                    -

                    Chery spms v1 1 1 setup is a comprehensive tool that can help you diagnose and fix problems with your Chery vehicle, as well as update its software and firmware. Some of the features of Chery spms v1 1 1 setup are:

                    -
                      -
                    • Read and clear fault codes from various systems, such as engine, transmission, ABS, airbag, etc.
                    • -
                    • View live data and freeze frame data from sensors and actuators.
                    • -
                    • Perform actuator tests and adjustments.
                    • -
                    • Reset service intervals and maintenance reminders.
                    • -
                    • Update the software and firmware of the vehicle's ECU, TCU, BCM, etc.
                    • -
                    • Calibrate steering angle sensor, throttle position sensor, etc.
                    • -
                    • Program keys and immobilizer.
                    • -
                    -

                    How to download and install Chery spms v1 1 1 setup?

                    -

                    To download and install Chery spms v1 1 1 setup, you need to follow these steps:

                    -
                      -
                    1. Go to the official website of Chery (https://www.cheryinternational.com/) and register an account.
                    2. -
                    3. Login to your account and go to the download section.
                    4. -
                    5. Find the link for Chery spms v1 1 1 setup and click on it. You will be redirected to a third-party website (urluso.com) where you can download the file.
                    6. -
                    7. Save the file (Chery-SPMS(V1.1.1)setup.exe) on your computer and run it as administrator.
                    8. -
                    9. Follow the instructions on the screen to complete the installation process. You may need to restart your computer after the installation.
                    10. -
                    -

                    How to use Chery spms v1 1 1 setup?

                    -

                    To use Chery spms v1 1 1 setup, you need to follow these steps:

                    -
                      -
                    1. Connect your Chery vehicle to your computer using a USB cable. Make sure the ignition is on but the engine is off.
                    2. -
                    3. Launch Chery spms v1 1 1 setup on your computer and select your vehicle model and year.
                    4. -
                    5. Select the system you want to diagnose or update from the menu.
                    6. -
                    7. Follow the instructions on the screen to perform the desired task. You may need to enter some information or confirm some actions during the process.
                    8. -
                    9. When you are done, disconnect your vehicle from your computer and turn off the ignition.
                    10. -
                    - -

                    Note: Before using Chery spms v1 1 1 setup, make sure you have a stable internet connection and a fully charged battery on your vehicle. Do not interrupt the process or turn off your computer or vehicle while using Chery spms v1 1 1 setup. Doing so may cause damage to your vehicle or its systems.

                    -

                    d5da3c52bf
                    -
                    -
                    \ No newline at end of file diff --git a/spaces/lingbionlp/PhenoTagger_v1.2_Demo/src/nn_represent.py b/spaces/lingbionlp/PhenoTagger_v1.2_Demo/src/nn_represent.py deleted file mode 100644 index 4e1cf12a5872393be60d371fd0051c6066890076..0000000000000000000000000000000000000000 --- a/spaces/lingbionlp/PhenoTagger_v1.2_Demo/src/nn_represent.py +++ /dev/null @@ -1,289 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Fri Jun 12 10:02:20 2020 - -@author: luol2 -""" -import time -import os, sys -import numpy as np -from tensorflow.keras.preprocessing.sequence import pad_sequences -# from keras_bert import Tokenizer -from transformers import AutoTokenizer - - -class CNN_RepresentationLayer(object): - - - def __init__(self, wordvec_file, vocab_file=[],\ - vec_size=50, word_size=10000, frequency=10000): - - ''' - wordvec_file : the file path of word embedding - vec_size : the dimension size of word vector - learned by word2vec tool - - word_size : the size of word vocabulary - - frequency : the threshold for the words left according to - their frequency appeared in the text - for example, when frequency is 10000, the most - frequent appeared 10000 words are considered - - ''' - #load word embedding - file = open(wordvec_file) - first_line = file.readline().strip() - file.close() - self.word_size = int(first_line.split()[0]) - self.vec_size = int(first_line.split()[1]) - self.frequency = frequency - - if self.frequency>self.word_size: - self.vec_table = np.zeros((self.word_size + 2, self.vec_size)) - else: - self.vec_table = np.zeros((self.frequency + 2, self.vec_size)) - self.word_2_index = {} - self.load_wordvecs(wordvec_file) - - #other fea - self.char_2_index={} - self.char_table_size=0 - if 'char' in vocab_file.keys(): - self.load_fea_vocab(vocab_file['char'],self.char_2_index) - self.char_table_size=len(self.char_2_index) - #print(self.char_table_size) - #print(self.char_2_index) - - self.label_2_index={} - self.label_table_size=0 - if 'label' in vocab_file.keys(): - self.load_label_vocab(vocab_file['label'],self.label_2_index) - self.label_table_size=len(self.label_2_index) - #print(self.label_table_size) - #print(self.char_2_index) - - self.pos_2_index={} - self.pos_table_size=0 - if 'pos' in vocab_file.keys(): - self.load_fea_vocab(vocab_file['pos'],self.pos_2_index) - self.pos_table_size=len(self.pos_2_index) - #print(self.pos_table_size) - - - - def load_wordvecs(self, wordvec_file): - - file = open(wordvec_file,'r',encoding='utf-8') - file.readline() - #print(self.word_size) - #print(self.vec_size) - row = 0 - self.word_2_index['padding_0'] = row #oov-zero vector - row+=1 - for line in file: - if row <= self.word_size and row <= self.frequency: - line_split = line.strip().split(' ') - self.word_2_index[line_split[0]] = row - for col in range(self.vec_size): - self.vec_table[row][col] = float(line_split[col + 1]) - row += 1 - else: - break - - self.word_2_index['sparse_vectors'] = row #oov-zero vector - file.close() - - def load_fea_vocab(self,fea_file,fea_index): - fin=open(fea_file,'r',encoding='utf-8') - i=0 - fea_index['padding_0']=i - i+=1 - fea_index['oov_padding']=i - i+=1 - for line in fin: - fea_index[line.strip()]=i - i+=1 - fin.close() - - def load_label_vocab(self,fea_file,fea_index): - fin=open(fea_file,'r',encoding='utf-8') - i=0 - for line in fin: - fea_index[line.strip()]=i - i+=1 - fin.close() - - ''' - def generate_label_list(self,labels): - label_list=[] - - for label in labels: - temp_label=[0]*self.label_table_size - temp_label[self.label_2_index[label]]=1 - label_list.append(temp_label) - return label_list - ''' - def generate_label_list(self,labels): - sparse_labels=[] - for ele in labels: - sparse_labels.append(self.label_2_index[ele]) - return(sparse_labels) - - def represent_instances_all_feas(self, instances, labels, word_max_len=100, char_max_len=50, training=False): - - x_text_list=[] - x_word_list=[] - x_char_list=[] - x_lemma_list=[] - x_pos_list=[] - - y_list=[] - - for sentence in instances: - sentence_list=[] - sentence_word_list=[] - sentence_lemma_list=[] - sentence_pos_list=[] - sentence_text=[] - for j in range(0,len(sentence)): - word=sentence[j] - #char fea - char_list=[0]*char_max_len - for i in range(len(word[0])): - if i "$@" - -sinclude $(DEPFILES) - -$(LIB_TARGET): $(OBJS) - @echo "[link] $(LIB_TARGET) ..." - @$(CXX) $(OBJS) -o $@ $(CXXFLAGS) $(LDFLAGS) - -clean: - rm -rf $(OBJ_DIR) $(LIB_TARGET) - -rebuild: - +@make clean - +@make - -# vim:ft=make -# diff --git a/spaces/lusea/rvc-Qinggan/README.md b/spaces/lusea/rvc-Qinggan/README.md deleted file mode 100644 index af72a9ccbf12353b281211aa0907f2a58082705f..0000000000000000000000000000000000000000 --- a/spaces/lusea/rvc-Qinggan/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: RVC TalkTalkAI -emoji: 🌊 -colorFrom: blue -colorTo: yellow -sdk: gradio -sdk_version: 3.35.2 -app_file: app.py -pinned: false -license: mit -duplicated_from: kevinwang676/rvc-mlbb-v2 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/ma-xu/LIVE/pybind11/tests/test_eigen.cpp b/spaces/ma-xu/LIVE/pybind11/tests/test_eigen.cpp deleted file mode 100644 index 56aa1a4a6fe6b60a1d85c54cd40ee70ddde3528f..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/pybind11/tests/test_eigen.cpp +++ /dev/null @@ -1,327 +0,0 @@ -/* - tests/eigen.cpp -- automatic conversion of Eigen types - - Copyright (c) 2016 Wenzel Jakob - - All rights reserved. Use of this source code is governed by a - BSD-style license that can be found in the LICENSE file. -*/ - -#include "pybind11_tests.h" -#include "constructor_stats.h" -#include -#include - -#if defined(_MSC_VER) -# pragma warning(disable: 4996) // C4996: std::unary_negation is deprecated -#endif - -#include - -using MatrixXdR = Eigen::Matrix; - - - -// Sets/resets a testing reference matrix to have values of 10*r + c, where r and c are the -// (1-based) row/column number. -template void reset_ref(M &x) { - for (int i = 0; i < x.rows(); i++) for (int j = 0; j < x.cols(); j++) - x(i, j) = 11 + 10*i + j; -} - -// Returns a static, column-major matrix -Eigen::MatrixXd &get_cm() { - static Eigen::MatrixXd *x; - if (!x) { - x = new Eigen::MatrixXd(3, 3); - reset_ref(*x); - } - return *x; -} -// Likewise, but row-major -MatrixXdR &get_rm() { - static MatrixXdR *x; - if (!x) { - x = new MatrixXdR(3, 3); - reset_ref(*x); - } - return *x; -} -// Resets the values of the static matrices returned by get_cm()/get_rm() -void reset_refs() { - reset_ref(get_cm()); - reset_ref(get_rm()); -} - -// Returns element 2,1 from a matrix (used to test copy/nocopy) -double get_elem(Eigen::Ref m) { return m(2, 1); }; - - -// Returns a matrix with 10*r + 100*c added to each matrix element (to help test that the matrix -// reference is referencing rows/columns correctly). -template Eigen::MatrixXd adjust_matrix(MatrixArgType m) { - Eigen::MatrixXd ret(m); - for (int c = 0; c < m.cols(); c++) for (int r = 0; r < m.rows(); r++) - ret(r, c) += 10*r + 100*c; - return ret; -} - -struct CustomOperatorNew { - CustomOperatorNew() = default; - - Eigen::Matrix4d a = Eigen::Matrix4d::Zero(); - Eigen::Matrix4d b = Eigen::Matrix4d::Identity(); - - EIGEN_MAKE_ALIGNED_OPERATOR_NEW; -}; - -TEST_SUBMODULE(eigen, m) { - using FixedMatrixR = Eigen::Matrix; - using FixedMatrixC = Eigen::Matrix; - using DenseMatrixR = Eigen::Matrix; - using DenseMatrixC = Eigen::Matrix; - using FourRowMatrixC = Eigen::Matrix; - using FourColMatrixC = Eigen::Matrix; - using FourRowMatrixR = Eigen::Matrix; - using FourColMatrixR = Eigen::Matrix; - using SparseMatrixR = Eigen::SparseMatrix; - using SparseMatrixC = Eigen::SparseMatrix; - - // various tests - m.def("double_col", [](const Eigen::VectorXf &x) -> Eigen::VectorXf { return 2.0f * x; }); - m.def("double_row", [](const Eigen::RowVectorXf &x) -> Eigen::RowVectorXf { return 2.0f * x; }); - m.def("double_complex", [](const Eigen::VectorXcf &x) -> Eigen::VectorXcf { return 2.0f * x; }); - m.def("double_threec", [](py::EigenDRef x) { x *= 2; }); - m.def("double_threer", [](py::EigenDRef x) { x *= 2; }); - m.def("double_mat_cm", [](Eigen::MatrixXf x) -> Eigen::MatrixXf { return 2.0f * x; }); - m.def("double_mat_rm", [](DenseMatrixR x) -> DenseMatrixR { return 2.0f * x; }); - - // test_eigen_ref_to_python - // Different ways of passing via Eigen::Ref; the first and second are the Eigen-recommended - m.def("cholesky1", [](Eigen::Ref x) -> Eigen::MatrixXd { return x.llt().matrixL(); }); - m.def("cholesky2", [](const Eigen::Ref &x) -> Eigen::MatrixXd { return x.llt().matrixL(); }); - m.def("cholesky3", [](const Eigen::Ref &x) -> Eigen::MatrixXd { return x.llt().matrixL(); }); - m.def("cholesky4", [](Eigen::Ref x) -> Eigen::MatrixXd { return x.llt().matrixL(); }); - - // test_eigen_ref_mutators - // Mutators: these add some value to the given element using Eigen, but Eigen should be mapping into - // the numpy array data and so the result should show up there. There are three versions: one that - // works on a contiguous-row matrix (numpy's default), one for a contiguous-column matrix, and one - // for any matrix. - auto add_rm = [](Eigen::Ref x, int r, int c, double v) { x(r,c) += v; }; - auto add_cm = [](Eigen::Ref x, int r, int c, double v) { x(r,c) += v; }; - - // Mutators (Eigen maps into numpy variables): - m.def("add_rm", add_rm); // Only takes row-contiguous - m.def("add_cm", add_cm); // Only takes column-contiguous - // Overloaded versions that will accept either row or column contiguous: - m.def("add1", add_rm); - m.def("add1", add_cm); - m.def("add2", add_cm); - m.def("add2", add_rm); - // This one accepts a matrix of any stride: - m.def("add_any", [](py::EigenDRef x, int r, int c, double v) { x(r,c) += v; }); - - // Return mutable references (numpy maps into eigen variables) - m.def("get_cm_ref", []() { return Eigen::Ref(get_cm()); }); - m.def("get_rm_ref", []() { return Eigen::Ref(get_rm()); }); - // The same references, but non-mutable (numpy maps into eigen variables, but is !writeable) - m.def("get_cm_const_ref", []() { return Eigen::Ref(get_cm()); }); - m.def("get_rm_const_ref", []() { return Eigen::Ref(get_rm()); }); - - m.def("reset_refs", reset_refs); // Restores get_{cm,rm}_ref to original values - - // Increments and returns ref to (same) matrix - m.def("incr_matrix", [](Eigen::Ref m, double v) { - m += Eigen::MatrixXd::Constant(m.rows(), m.cols(), v); - return m; - }, py::return_value_policy::reference); - - // Same, but accepts a matrix of any strides - m.def("incr_matrix_any", [](py::EigenDRef m, double v) { - m += Eigen::MatrixXd::Constant(m.rows(), m.cols(), v); - return m; - }, py::return_value_policy::reference); - - // Returns an eigen slice of even rows - m.def("even_rows", [](py::EigenDRef m) { - return py::EigenDMap( - m.data(), (m.rows() + 1) / 2, m.cols(), - py::EigenDStride(m.outerStride(), 2 * m.innerStride())); - }, py::return_value_policy::reference); - - // Returns an eigen slice of even columns - m.def("even_cols", [](py::EigenDRef m) { - return py::EigenDMap( - m.data(), m.rows(), (m.cols() + 1) / 2, - py::EigenDStride(2 * m.outerStride(), m.innerStride())); - }, py::return_value_policy::reference); - - // Returns diagonals: a vector-like object with an inner stride != 1 - m.def("diagonal", [](const Eigen::Ref &x) { return x.diagonal(); }); - m.def("diagonal_1", [](const Eigen::Ref &x) { return x.diagonal<1>(); }); - m.def("diagonal_n", [](const Eigen::Ref &x, int index) { return x.diagonal(index); }); - - // Return a block of a matrix (gives non-standard strides) - m.def("block", [](const Eigen::Ref &x, int start_row, int start_col, int block_rows, int block_cols) { - return x.block(start_row, start_col, block_rows, block_cols); - }); - - // test_eigen_return_references, test_eigen_keepalive - // return value referencing/copying tests: - class ReturnTester { - Eigen::MatrixXd mat = create(); - public: - ReturnTester() { print_created(this); } - ~ReturnTester() { print_destroyed(this); } - static Eigen::MatrixXd create() { return Eigen::MatrixXd::Ones(10, 10); } - static const Eigen::MatrixXd createConst() { return Eigen::MatrixXd::Ones(10, 10); } - Eigen::MatrixXd &get() { return mat; } - Eigen::MatrixXd *getPtr() { return &mat; } - const Eigen::MatrixXd &view() { return mat; } - const Eigen::MatrixXd *viewPtr() { return &mat; } - Eigen::Ref ref() { return mat; } - Eigen::Ref refConst() { return mat; } - Eigen::Block block(int r, int c, int nrow, int ncol) { return mat.block(r, c, nrow, ncol); } - Eigen::Block blockConst(int r, int c, int nrow, int ncol) const { return mat.block(r, c, nrow, ncol); } - py::EigenDMap corners() { return py::EigenDMap(mat.data(), - py::EigenDStride(mat.outerStride() * (mat.outerSize()-1), mat.innerStride() * (mat.innerSize()-1))); } - py::EigenDMap cornersConst() const { return py::EigenDMap(mat.data(), - py::EigenDStride(mat.outerStride() * (mat.outerSize()-1), mat.innerStride() * (mat.innerSize()-1))); } - }; - using rvp = py::return_value_policy; - py::class_(m, "ReturnTester") - .def(py::init<>()) - .def_static("create", &ReturnTester::create) - .def_static("create_const", &ReturnTester::createConst) - .def("get", &ReturnTester::get, rvp::reference_internal) - .def("get_ptr", &ReturnTester::getPtr, rvp::reference_internal) - .def("view", &ReturnTester::view, rvp::reference_internal) - .def("view_ptr", &ReturnTester::view, rvp::reference_internal) - .def("copy_get", &ReturnTester::get) // Default rvp: copy - .def("copy_view", &ReturnTester::view) // " - .def("ref", &ReturnTester::ref) // Default for Ref is to reference - .def("ref_const", &ReturnTester::refConst) // Likewise, but const - .def("ref_safe", &ReturnTester::ref, rvp::reference_internal) - .def("ref_const_safe", &ReturnTester::refConst, rvp::reference_internal) - .def("copy_ref", &ReturnTester::ref, rvp::copy) - .def("copy_ref_const", &ReturnTester::refConst, rvp::copy) - .def("block", &ReturnTester::block) - .def("block_safe", &ReturnTester::block, rvp::reference_internal) - .def("block_const", &ReturnTester::blockConst, rvp::reference_internal) - .def("copy_block", &ReturnTester::block, rvp::copy) - .def("corners", &ReturnTester::corners, rvp::reference_internal) - .def("corners_const", &ReturnTester::cornersConst, rvp::reference_internal) - ; - - // test_special_matrix_objects - // Returns a DiagonalMatrix with diagonal (1,2,3,...) - m.def("incr_diag", [](int k) { - Eigen::DiagonalMatrix m(k); - for (int i = 0; i < k; i++) m.diagonal()[i] = i+1; - return m; - }); - - // Returns a SelfAdjointView referencing the lower triangle of m - m.def("symmetric_lower", [](const Eigen::MatrixXi &m) { - return m.selfadjointView(); - }); - // Returns a SelfAdjointView referencing the lower triangle of m - m.def("symmetric_upper", [](const Eigen::MatrixXi &m) { - return m.selfadjointView(); - }); - - // Test matrix for various functions below. - Eigen::MatrixXf mat(5, 6); - mat << 0, 3, 0, 0, 0, 11, - 22, 0, 0, 0, 17, 11, - 7, 5, 0, 1, 0, 11, - 0, 0, 0, 0, 0, 11, - 0, 0, 14, 0, 8, 11; - - // test_fixed, and various other tests - m.def("fixed_r", [mat]() -> FixedMatrixR { return FixedMatrixR(mat); }); - m.def("fixed_r_const", [mat]() -> const FixedMatrixR { return FixedMatrixR(mat); }); - m.def("fixed_c", [mat]() -> FixedMatrixC { return FixedMatrixC(mat); }); - m.def("fixed_copy_r", [](const FixedMatrixR &m) -> FixedMatrixR { return m; }); - m.def("fixed_copy_c", [](const FixedMatrixC &m) -> FixedMatrixC { return m; }); - // test_mutator_descriptors - m.def("fixed_mutator_r", [](Eigen::Ref) {}); - m.def("fixed_mutator_c", [](Eigen::Ref) {}); - m.def("fixed_mutator_a", [](py::EigenDRef) {}); - // test_dense - m.def("dense_r", [mat]() -> DenseMatrixR { return DenseMatrixR(mat); }); - m.def("dense_c", [mat]() -> DenseMatrixC { return DenseMatrixC(mat); }); - m.def("dense_copy_r", [](const DenseMatrixR &m) -> DenseMatrixR { return m; }); - m.def("dense_copy_c", [](const DenseMatrixC &m) -> DenseMatrixC { return m; }); - // test_sparse, test_sparse_signature - m.def("sparse_r", [mat]() -> SparseMatrixR { return Eigen::SparseView(mat); }); - m.def("sparse_c", [mat]() -> SparseMatrixC { return Eigen::SparseView(mat); }); - m.def("sparse_copy_r", [](const SparseMatrixR &m) -> SparseMatrixR { return m; }); - m.def("sparse_copy_c", [](const SparseMatrixC &m) -> SparseMatrixC { return m; }); - // test_partially_fixed - m.def("partial_copy_four_rm_r", [](const FourRowMatrixR &m) -> FourRowMatrixR { return m; }); - m.def("partial_copy_four_rm_c", [](const FourColMatrixR &m) -> FourColMatrixR { return m; }); - m.def("partial_copy_four_cm_r", [](const FourRowMatrixC &m) -> FourRowMatrixC { return m; }); - m.def("partial_copy_four_cm_c", [](const FourColMatrixC &m) -> FourColMatrixC { return m; }); - - // test_cpp_casting - // Test that we can cast a numpy object to a Eigen::MatrixXd explicitly - m.def("cpp_copy", [](py::handle m) { return m.cast()(1, 0); }); - m.def("cpp_ref_c", [](py::handle m) { return m.cast>()(1, 0); }); - m.def("cpp_ref_r", [](py::handle m) { return m.cast>()(1, 0); }); - m.def("cpp_ref_any", [](py::handle m) { return m.cast>()(1, 0); }); - - - // test_nocopy_wrapper - // Test that we can prevent copying into an argument that would normally copy: First a version - // that would allow copying (if types or strides don't match) for comparison: - m.def("get_elem", &get_elem); - // Now this alternative that calls the tells pybind to fail rather than copy: - m.def("get_elem_nocopy", [](Eigen::Ref m) -> double { return get_elem(m); }, - py::arg().noconvert()); - // Also test a row-major-only no-copy const ref: - m.def("get_elem_rm_nocopy", [](Eigen::Ref> &m) -> long { return m(2, 1); }, - py::arg().noconvert()); - - // test_issue738 - // Issue #738: 1xN or Nx1 2D matrices were neither accepted nor properly copied with an - // incompatible stride value on the length-1 dimension--but that should be allowed (without - // requiring a copy!) because the stride value can be safely ignored on a size-1 dimension. - m.def("iss738_f1", &adjust_matrix &>, py::arg().noconvert()); - m.def("iss738_f2", &adjust_matrix> &>, py::arg().noconvert()); - - // test_issue1105 - // Issue #1105: when converting from a numpy two-dimensional (Nx1) or (1xN) value into a dense - // eigen Vector or RowVector, the argument would fail to load because the numpy copy would fail: - // numpy won't broadcast a Nx1 into a 1-dimensional vector. - m.def("iss1105_col", [](Eigen::VectorXd) { return true; }); - m.def("iss1105_row", [](Eigen::RowVectorXd) { return true; }); - - // test_named_arguments - // Make sure named arguments are working properly: - m.def("matrix_multiply", [](const py::EigenDRef A, const py::EigenDRef B) - -> Eigen::MatrixXd { - if (A.cols() != B.rows()) throw std::domain_error("Nonconformable matrices!"); - return A * B; - }, py::arg("A"), py::arg("B")); - - // test_custom_operator_new - py::class_(m, "CustomOperatorNew") - .def(py::init<>()) - .def_readonly("a", &CustomOperatorNew::a) - .def_readonly("b", &CustomOperatorNew::b); - - // test_eigen_ref_life_support - // In case of a failure (the caster's temp array does not live long enough), creating - // a new array (np.ones(10)) increases the chances that the temp array will be garbage - // collected and/or that its memory will be overridden with different values. - m.def("get_elem_direct", [](Eigen::Ref v) { - py::module::import("numpy").attr("ones")(10); - return v(5); - }); - m.def("get_elem_indirect", [](std::vector> v) { - py::module::import("numpy").attr("ones")(10); - return v[0](5); - }); -} diff --git a/spaces/ma-xu/LIVE/pydiffvg_tensorflow/render_tensorflow.py b/spaces/ma-xu/LIVE/pydiffvg_tensorflow/render_tensorflow.py deleted file mode 100644 index 3a7efaa3fddef32fc2619c3fcaa88881354a7e9f..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/pydiffvg_tensorflow/render_tensorflow.py +++ /dev/null @@ -1,664 +0,0 @@ -import os -import tensorflow as tf -import diffvg -import pydiffvg_tensorflow as pydiffvg -import time -from enum import IntEnum -import warnings - -print_timing = False -__EMPTY_TENSOR = tf.constant([]) - -def is_empty_tensor(tensor): - return tf.equal(tf.size(tensor), 0) - -def set_print_timing(val): - global print_timing - print_timing=val - -class OutputType(IntEnum): - color = 1 - sdf = 2 - -class ShapeType: - __shapetypes = [ - diffvg.ShapeType.circle, - diffvg.ShapeType.ellipse, - diffvg.ShapeType.path, - diffvg.ShapeType.rect - ] - - @staticmethod - def asTensor(type): - for i in range(len(ShapeType.__shapetypes)): - if ShapeType.__shapetypes[i] == type: - return tf.constant(i) - - @staticmethod - def asShapeType(index: tf.Tensor): - if is_empty_tensor(index): - return None - try: - type = ShapeType.__shapetypes[index] - except IndexError: - print(f'{index} is out of range: [0, {len(ShapeType.__shapetypes)})') - import sys - sys.exit() - else: - return type - -class ColorType: - __colortypes = [ - diffvg.ColorType.constant, - diffvg.ColorType.linear_gradient, - diffvg.ColorType.radial_gradient - ] - - @staticmethod - def asTensor(type): - for i in range(len(ColorType.__colortypes)): - if ColorType.__colortypes[i] == type: - return tf.constant(i) - - @staticmethod - def asColorType(index: tf.Tensor): - if is_empty_tensor(index): - return None - try: - type = ColorType.__colortypes[index] - except IndexError: - print(f'{index} is out of range: [0, {len(ColorType.__colortypes)})') - import sys - sys.exit() - else: - return type - -class FilterType: - __filtertypes = [ - diffvg.FilterType.box, - diffvg.FilterType.tent, - diffvg.FilterType.hann - ] - - @staticmethod - def asTensor(type): - for i in range(len(FilterType.__filtertypes)): - if FilterType.__filtertypes[i] == type: - return tf.constant(i) - - @staticmethod - def asFilterType(index: tf.Tensor): - if is_empty_tensor(index): - return None - try: - type = FilterType.__filtertypes[index] - except IndexError: - print(f'{index} is out of range: [0, {len(FilterType.__filtertypes)})') - import sys - sys.exit() - else: - return type - -def serialize_scene(canvas_width, - canvas_height, - shapes, - shape_groups, - filter = pydiffvg.PixelFilter(type = diffvg.FilterType.box, - radius = tf.constant(0.5)), - output_type = OutputType.color, - use_prefiltering = False): - """ - Given a list of shapes, convert them to a linear list of argument, - so that we can use it in TF. - """ - with tf.device('/device:cpu:' + str(pydiffvg.get_cpu_device_id())): - num_shapes = len(shapes) - num_shape_groups = len(shape_groups) - args = [] - args.append(tf.constant(canvas_width)) - args.append(tf.constant(canvas_height)) - args.append(tf.constant(num_shapes)) - args.append(tf.constant(num_shape_groups)) - args.append(tf.constant(output_type)) - args.append(tf.constant(use_prefiltering)) - for shape in shapes: - if isinstance(shape, pydiffvg.Circle): - args.append(ShapeType.asTensor(diffvg.ShapeType.circle)) - args.append(tf.identity(shape.radius)) - args.append(tf.identity(shape.center)) - elif isinstance(shape, pydiffvg.Ellipse): - args.append(ShapeType.asTensor(diffvg.ShapeType.ellipse)) - args.append(tf.identity(shape.radius)) - args.append(tf.identity(shape.center)) - elif isinstance(shape, pydiffvg.Path): - assert(shape.points.shape[1] == 2) - args.append(ShapeType.asTensor(diffvg.ShapeType.path)) - args.append(tf.identity(shape.num_control_points)) - args.append(tf.identity(shape.points)) - args.append(tf.constant(shape.is_closed)) - args.append(tf.constant(shape.use_distance_approx)) - elif isinstance(shape, pydiffvg.Polygon): - assert(shape.points.shape[1] == 2) - args.append(ShapeType.asTensor(diffvg.ShapeType.path)) - if shape.is_closed: - args.append(tf.zeros(shape.points.shape[0], dtype = tf.int32)) - else: - args.append(tf.zeros(shape.points.shape[0] - 1, dtype = tf.int32)) - args.append(tf.identity(shape.points)) - args.append(tf.constant(shape.is_closed)) - elif isinstance(shape, pydiffvg.Rect): - args.append(ShapeType.asTensor(diffvg.ShapeType.rect)) - args.append(tf.identity(shape.p_min)) - args.append(tf.identity(shape.p_max)) - else: - assert(False) - args.append(tf.identity(shape.stroke_width)) - - for shape_group in shape_groups: - args.append(tf.identity(shape_group.shape_ids)) - # Fill color - if shape_group.fill_color is None: - args.append(__EMPTY_TENSOR) - elif tf.is_tensor(shape_group.fill_color): - args.append(ColorType.asTensor(diffvg.ColorType.constant)) - args.append(tf.identity(shape_group.fill_color)) - elif isinstance(shape_group.fill_color, pydiffvg.LinearGradient): - args.append(ColorType.asTensor(diffvg.ColorType.linear_gradient)) - args.append(tf.identity(shape_group.fill_color.begin)) - args.append(tf.identity(shape_group.fill_color.end)) - args.append(tf.identity(shape_group.fill_color.offsets)) - args.append(tf.identity(shape_group.fill_color.stop_colors)) - elif isinstance(shape_group.fill_color, pydiffvg.RadialGradient): - args.append(ColorType.asTensor(diffvg.ColorType.radial_gradient)) - args.append(tf.identity(shape_group.fill_color.center)) - args.append(tf.identity(shape_group.fill_color.radius)) - args.append(tf.identity(shape_group.fill_color.offsets)) - args.append(tf.identity(shape_group.fill_color.stop_colors)) - - if shape_group.fill_color is not None: - # go through the underlying shapes and check if they are all closed - for shape_id in shape_group.shape_ids: - if isinstance(shapes[shape_id], pydiffvg.Path): - if not shapes[shape_id].is_closed: - warnings.warn("Detected non-closed paths with fill color. This might causes unexpected results.", Warning) - - # Stroke color - if shape_group.stroke_color is None: - args.append(__EMPTY_TENSOR) - elif tf.is_tensor(shape_group.stroke_color): - args.append(tf.constant(0)) - args.append(tf.identity(shape_group.stroke_color)) - elif isinstance(shape_group.stroke_color, pydiffvg.LinearGradient): - args.append(ColorType.asTensor(diffvg.ColorType.linear_gradient)) - args.append(tf.identity(shape_group.stroke_color.begin)) - args.append(tf.identity(shape_group.stroke_color.end)) - args.append(tf.identity(shape_group.stroke_color.offsets)) - args.append(tf.identity(shape_group.stroke_color.stop_colors)) - elif isinstance(shape_group.stroke_color, pydiffvg.RadialGradient): - args.append(ColorType.asTensor(diffvg.ColorType.radial_gradient)) - args.append(tf.identity(shape_group.stroke_color.center)) - args.append(tf.identity(shape_group.stroke_color.radius)) - args.append(tf.identity(shape_group.stroke_color.offsets)) - args.append(tf.identity(shape_group.stroke_color.stop_colors)) - args.append(tf.constant(shape_group.use_even_odd_rule)) - # Transformation - args.append(tf.identity(shape_group.shape_to_canvas)) - args.append(FilterType.asTensor(filter.type)) - args.append(tf.constant(filter.radius)) - return args - -class Context: pass - -def forward(width, - height, - num_samples_x, - num_samples_y, - seed, - *args): - """ - Forward rendering pass: given a serialized scene and output an image. - """ - # Unpack arguments - with tf.device('/device:cpu:' + str(pydiffvg.get_cpu_device_id())): - current_index = 0 - canvas_width = int(args[current_index]) - current_index += 1 - canvas_height = int(args[current_index]) - current_index += 1 - num_shapes = int(args[current_index]) - current_index += 1 - num_shape_groups = int(args[current_index]) - current_index += 1 - output_type = OutputType(int(args[current_index])) - current_index += 1 - use_prefiltering = bool(args[current_index]) - current_index += 1 - shapes = [] - shape_groups = [] - shape_contents = [] # Important to avoid GC deleting the shapes - color_contents = [] # Same as above - for shape_id in range(num_shapes): - shape_type = ShapeType.asShapeType(args[current_index]) - current_index += 1 - if shape_type == diffvg.ShapeType.circle: - radius = args[current_index] - current_index += 1 - center = args[current_index] - current_index += 1 - shape = diffvg.Circle(float(radius), - diffvg.Vector2f(float(center[0]), float(center[1]))) - elif shape_type == diffvg.ShapeType.ellipse: - radius = args[current_index] - current_index += 1 - center = args[current_index] - current_index += 1 - shape = diffvg.Ellipse(diffvg.Vector2f(float(radius[0]), float(radius[1])), - diffvg.Vector2f(float(center[0]), float(center[1]))) - elif shape_type == diffvg.ShapeType.path: - num_control_points = args[current_index] - current_index += 1 - points = args[current_index] - current_index += 1 - is_closed = args[current_index] - current_index += 1 - use_distance_approx = args[current_index] - current_index += 1 - shape = diffvg.Path(diffvg.int_ptr(pydiffvg.data_ptr(num_control_points)), - diffvg.float_ptr(pydiffvg.data_ptr(points)), - diffvg.float_ptr(0), # thickness - num_control_points.shape[0], - points.shape[0], - is_closed, - use_distance_approx) - elif shape_type == diffvg.ShapeType.rect: - p_min = args[current_index] - current_index += 1 - p_max = args[current_index] - current_index += 1 - shape = diffvg.Rect(diffvg.Vector2f(float(p_min[0]), float(p_min[1])), - diffvg.Vector2f(float(p_max[0]), float(p_max[1]))) - else: - assert(False) - stroke_width = args[current_index] - current_index += 1 - shapes.append(diffvg.Shape(\ - shape_type, shape.get_ptr(), float(stroke_width))) - shape_contents.append(shape) - - for shape_group_id in range(num_shape_groups): - shape_ids = args[current_index] - current_index += 1 - fill_color_type = ColorType.asColorType(args[current_index]) - current_index += 1 - if fill_color_type == diffvg.ColorType.constant: - color = args[current_index] - current_index += 1 - fill_color = diffvg.Constant(\ - diffvg.Vector4f(color[0], color[1], color[2], color[3])) - elif fill_color_type == diffvg.ColorType.linear_gradient: - beg = args[current_index] - current_index += 1 - end = args[current_index] - current_index += 1 - offsets = args[current_index] - current_index += 1 - stop_colors = args[current_index] - current_index += 1 - assert(offsets.shape[0] == stop_colors.shape[0]) - fill_color = diffvg.LinearGradient(diffvg.Vector2f(float(beg[0]), float(beg[1])), - diffvg.Vector2f(float(end[0]), float(end[1])), - offsets.shape[0], - diffvg.float_ptr(pydiffvg.data_ptr(offsets)), - diffvg.float_ptr(pydiffvg.data_ptr(stop_colors))) - elif fill_color_type == diffvg.ColorType.radial_gradient: - center = args[current_index] - current_index += 1 - radius = args[current_index] - current_index += 1 - offsets = args[current_index] - current_index += 1 - stop_colors = args[current_index] - current_index += 1 - assert(offsets.shape[0] == stop_colors.shape[0]) - fill_color = diffvg.RadialGradient(diffvg.Vector2f(float(center[0]), float(center[1])), - diffvg.Vector2f(float(radius[0]), float(radius[1])), - offsets.shape[0], - diffvg.float_ptr(pydiffvg.data_ptr(offsets)), - diffvg.float_ptr(pydiffvg.data_ptr(stop_colors))) - elif fill_color_type is None: - fill_color = None - else: - assert(False) - - stroke_color_type = ColorType.asColorType(args[current_index]) - current_index += 1 - if stroke_color_type == diffvg.ColorType.constant: - color = args[current_index] - current_index += 1 - stroke_color = diffvg.Constant(\ - diffvg.Vector4f(float(color[0]), - float(color[1]), - float(color[2]), - float(color[3]))) - elif stroke_color_type == diffvg.ColorType.linear_gradient: - beg = args[current_index] - current_index += 1 - end = args[current_index] - current_index += 1 - offsets = args[current_index] - current_index += 1 - stop_colors = args[current_index] - current_index += 1 - assert(offsets.shape[0] == stop_colors.shape[0]) - stroke_color = diffvg.LinearGradient(\ - diffvg.Vector2f(float(beg[0]), float(beg[1])), - diffvg.Vector2f(float(end[0]), float(end[1])), - offsets.shape[0], - diffvg.float_ptr(pydiffvg.data_ptr(offsets)), - diffvg.float_ptr(stop_colors.data_ptr())) - elif stroke_color_type == diffvg.ColorType.radial_gradient: - center = args[current_index] - current_index += 1 - radius = args[current_index] - current_index += 1 - offsets = args[current_index] - current_index += 1 - stop_colors = args[current_index] - current_index += 1 - assert(offsets.shape[0] == stop_colors.shape[0]) - stroke_color = diffvg.RadialGradient(\ - diffvg.Vector2f(float(center[0]), float(center[1])), - diffvg.Vector2f(float(radius[0]), float(radius[1])), - offsets.shape[0], - diffvg.float_ptr(pydiffvg.data_ptr(offsets)), - diffvg.float_ptr(pydiffvg.data_ptr(stop_colors))) - elif stroke_color_type is None: - stroke_color = None - else: - assert(False) - use_even_odd_rule = bool(args[current_index]) - current_index += 1 - shape_to_canvas = args[current_index] - current_index += 1 - - if fill_color is not None: - color_contents.append(fill_color) - if stroke_color is not None: - color_contents.append(stroke_color) - shape_groups.append(diffvg.ShapeGroup(\ - diffvg.int_ptr(pydiffvg.data_ptr(shape_ids)), - shape_ids.shape[0], - diffvg.ColorType.constant if fill_color_type is None else fill_color_type, - diffvg.void_ptr(0) if fill_color is None else fill_color.get_ptr(), - diffvg.ColorType.constant if stroke_color_type is None else stroke_color_type, - diffvg.void_ptr(0) if stroke_color is None else stroke_color.get_ptr(), - use_even_odd_rule, - diffvg.float_ptr(pydiffvg.data_ptr(shape_to_canvas)))) - - filter_type = FilterType.asFilterType(args[current_index]) - current_index += 1 - filter_radius = args[current_index] - current_index += 1 - filt = diffvg.Filter(filter_type, filter_radius) - - device_name = pydiffvg.get_device_name() - device_spec = tf.DeviceSpec.from_string(device_name) - use_gpu = device_spec.device_type == 'GPU' - gpu_index = device_spec.device_index if device_spec.device_index is not None else 0 - - start = time.time() - scene = diffvg.Scene(canvas_width, - canvas_height, - shapes, - shape_groups, - filt, - use_gpu, - gpu_index) - time_elapsed = time.time() - start - global print_timing - if print_timing: - print('Scene construction, time: %.5f s' % time_elapsed) - - with tf.device(device_name): - if output_type == OutputType.color: - rendered_image = tf.zeros((int(height), int(width), 4), dtype = tf.float32) - else: - assert(output_type == OutputType.sdf) - rendered_image = tf.zeros((int(height), int(width), 1), dtype = tf.float32) - - start = time.time() - diffvg.render(scene, - diffvg.float_ptr(0), # background image - diffvg.float_ptr(pydiffvg.data_ptr(rendered_image) if output_type == OutputType.color else 0), - diffvg.float_ptr(pydiffvg.data_ptr(rendered_image) if output_type == OutputType.sdf else 0), - width, - height, - int(num_samples_x), - int(num_samples_y), - seed, - diffvg.float_ptr(0), # d_background_image - diffvg.float_ptr(0), # d_render_image - diffvg.float_ptr(0), # d_render_sdf - diffvg.float_ptr(0), # d_translation - use_prefiltering, - diffvg.float_ptr(0), # eval_positions - 0 ) # num_eval_positions (automatically set to entire raster) - time_elapsed = time.time() - start - if print_timing: - print('Forward pass, time: %.5f s' % time_elapsed) - - ctx = Context() - ctx.scene = scene - ctx.shape_contents = shape_contents - ctx.color_contents = color_contents - ctx.filter = filt - ctx.width = width - ctx.height = height - ctx.num_samples_x = num_samples_x - ctx.num_samples_y = num_samples_y - ctx.seed = seed - ctx.output_type = output_type - ctx.use_prefiltering = use_prefiltering - return rendered_image, ctx - -@tf.custom_gradient -def render(*x): - """ - The main TensorFlow interface of C++ diffvg. - """ - assert(tf.executing_eagerly()) - if pydiffvg.get_use_gpu() and os.environ.get('TF_FORCE_GPU_ALLOW_GROWTH') != 'true': - print('******************** WARNING ********************') - print('Tensorflow by default allocates all GPU memory,') - print('causing huge amount of page faults when rendering.') - print('Please set the environment variable TF_FORCE_GPU_ALLOW_GROWTH to true,') - print('so that Tensorflow allocates memory on demand.') - print('*************************************************') - - width = x[0] - height = x[1] - num_samples_x = x[2] - num_samples_y = x[3] - seed = x[4] - args = x[5:] - img, ctx = forward(width, height, num_samples_x, num_samples_y, seed, *args) - - def backward(grad_img): - scene = ctx.scene - width = ctx.width - height = ctx.height - num_samples_x = ctx.num_samples_x - num_samples_y = ctx.num_samples_y - seed = ctx.seed - output_type = ctx.output_type - use_prefiltering = ctx.use_prefiltering - - start = time.time() - with tf.device(pydiffvg.get_device_name()): - diffvg.render(scene, - diffvg.float_ptr(0), # background_image - diffvg.float_ptr(0), # render_image - diffvg.float_ptr(0), # render_sdf - width, - height, - num_samples_x, - num_samples_y, - seed, - diffvg.float_ptr(0), # d_background_image - diffvg.float_ptr(pydiffvg.data_ptr(grad_img) if output_type == OutputType.color else 0), - diffvg.float_ptr(pydiffvg.data_ptr(grad_img) if output_type == OutputType.sdf else 0), - diffvg.float_ptr(0), # d_translation - use_prefiltering, - diffvg.float_ptr(0), # eval_positions - 0 ) # num_eval_positions (automatically set to entire raster)) - time_elapsed = time.time() - start - global print_timing - if print_timing: - print('Backward pass, time: %.5f s' % time_elapsed) - - with tf.device('/device:cpu:' + str(pydiffvg.get_cpu_device_id())): - d_args = [] - d_args.append(None) # width - d_args.append(None) # height - d_args.append(None) # num_samples_x - d_args.append(None) # num_samples_y - d_args.append(None) # seed - d_args.append(None) # canvas_width - d_args.append(None) # canvas_height - d_args.append(None) # num_shapes - d_args.append(None) # num_shape_groups - d_args.append(None) # output_type - d_args.append(None) # use_prefiltering - for shape_id in range(scene.num_shapes): - d_args.append(None) # type - d_shape = scene.get_d_shape(shape_id) - if d_shape.type == diffvg.ShapeType.circle: - d_circle = d_shape.as_circle() - radius = tf.constant(d_circle.radius) - d_args.append(radius) - c = d_circle.center - c = tf.constant((c.x, c.y)) - d_args.append(c) - elif d_shape.type == diffvg.ShapeType.ellipse: - d_ellipse = d_shape.as_ellipse() - r = d_ellipse.radius - r = tf.constant((d_ellipse.radius.x, d_ellipse.radius.y)) - d_args.append(r) - c = d_ellipse.center - c = tf.constant((c.x, c.y)) - d_args.append(c) - elif d_shape.type == diffvg.ShapeType.path: - d_path = d_shape.as_path() - points = tf.zeros((d_path.num_points, 2), dtype=tf.float32) - d_path.copy_to(diffvg.float_ptr(pydiffvg.data_ptr(points)),diffvg.float_ptr(0)) - d_args.append(None) # num_control_points - d_args.append(points) - d_args.append(None) # is_closed - d_args.append(None) # use_distance_approx - elif d_shape.type == diffvg.ShapeType.rect: - d_rect = d_shape.as_rect() - p_min = tf.constant((d_rect.p_min.x, d_rect.p_min.y)) - p_max = tf.constant((d_rect.p_max.x, d_rect.p_max.y)) - d_args.append(p_min) - d_args.append(p_max) - else: - assert(False) - w = tf.constant((d_shape.stroke_width)) - d_args.append(w) - - for group_id in range(scene.num_shape_groups): - d_shape_group = scene.get_d_shape_group(group_id) - d_args.append(None) # shape_ids - d_args.append(None) # fill_color_type - if d_shape_group.has_fill_color(): - if d_shape_group.fill_color_type == diffvg.ColorType.constant: - d_constant = d_shape_group.fill_color_as_constant() - c = d_constant.color - d_args.append(tf.constant((c.x, c.y, c.z, c.w))) - elif d_shape_group.fill_color_type == diffvg.ColorType.linear_gradient: - d_linear_gradient = d_shape_group.fill_color_as_linear_gradient() - beg = d_linear_gradient.begin - d_args.append(tf.constant((beg.x, beg.y))) - end = d_linear_gradient.end - d_args.append(tf.constant((end.x, end.y))) - offsets = tf.zeros((d_linear_gradient.num_stops), dtype=tf.float32) - stop_colors = tf.zeros((d_linear_gradient.num_stops, 4), dtype=tf.float32) - # HACK: tensorflow's eager mode uses a cache to store scalar - # constants to avoid memory copy. If we pass scalar tensors - # into the C++ code and modify them, we would corrupt the - # cache, causing incorrect result in future scalar constant - # creations. Thus we force tensorflow to copy by plusing a zero. - # (also see https://github.com/tensorflow/tensorflow/issues/11186 - # for more discussion regarding copying tensors) - if offsets.shape.num_elements() == 1: - offsets = offsets + 0 - d_linear_gradient.copy_to(\ - diffvg.float_ptr(pydiffvg.data_ptr(offsets)), - diffvg.float_ptr(pydiffvg.data_ptr(stop_colors))) - d_args.append(offsets) - d_args.append(stop_colors) - elif d_shape_group.fill_color_type == diffvg.ColorType.radial_gradient: - d_radial_gradient = d_shape_group.fill_color_as_radial_gradient() - center = d_radial_gradient.center - d_args.append(tf.constant((center.x, center.y))) - radius = d_radial_gradient.radius - d_args.append(tf.constant((radius.x, radius.y))) - offsets = tf.zeros((d_radial_gradient.num_stops)) - if offsets.shape.num_elements() == 1: - offsets = offsets + 0 - stop_colors = tf.zeros((d_radial_gradient.num_stops, 4)) - d_radial_gradient.copy_to(\ - diffvg.float_ptr(pydiffvg.data_ptr(offsets)), - diffvg.float_ptr(pydiffvg.data_ptr(stop_colors))) - d_args.append(offsets) - d_args.append(stop_colors) - else: - assert(False) - d_args.append(None) # stroke_color_type - if d_shape_group.has_stroke_color(): - if d_shape_group.stroke_color_type == diffvg.ColorType.constant: - d_constant = d_shape_group.stroke_color_as_constant() - c = d_constant.color - d_args.append(tf.constant((c.x, c.y, c.z, c.w))) - elif d_shape_group.stroke_color_type == diffvg.ColorType.linear_gradient: - d_linear_gradient = d_shape_group.stroke_color_as_linear_gradient() - beg = d_linear_gradient.begin - d_args.append(tf.constant((beg.x, beg.y))) - end = d_linear_gradient.end - d_args.append(tf.constant((end.x, end.y))) - offsets = tf.zeros((d_linear_gradient.num_stops)) - stop_colors = tf.zeros((d_linear_gradient.num_stops, 4)) - if offsets.shape.num_elements() == 1: - offsets = offsets + 0 - d_linear_gradient.copy_to(\ - diffvg.float_ptr(pydiffvg.data_ptr(offsets)), - diffvg.float_ptr(pydiffvg.data_ptr(stop_colors))) - d_args.append(offsets) - d_args.append(stop_colors) - elif d_shape_group.fill_color_type == diffvg.ColorType.radial_gradient: - d_radial_gradient = d_shape_group.stroke_color_as_radial_gradient() - center = d_radial_gradient.center - d_args.append(tf.constant((center.x, center.y))) - radius = d_radial_gradient.radius - d_args.append(tf.constant((radius.x, radius.y))) - offsets = tf.zeros((d_radial_gradient.num_stops)) - stop_colors = tf.zeros((d_radial_gradient.num_stops, 4)) - if offsets.shape.num_elements() == 1: - offsets = offsets + 0 - d_radial_gradient.copy_to(\ - diffvg.float_ptr(pydiffvg.data_ptr(offsets)), - diffvg.float_ptr(pydiffvg.data_ptr(stop_colors))) - d_args.append(offsets) - d_args.append(stop_colors) - else: - assert(False) - d_args.append(None) # use_even_odd_rule - d_shape_to_canvas = tf.zeros((3, 3), dtype = tf.float32) - d_shape_group.copy_to(diffvg.float_ptr(pydiffvg.data_ptr(d_shape_to_canvas))) - d_args.append(d_shape_to_canvas) - d_args.append(None) # filter_type - d_args.append(tf.constant(scene.get_d_filter_radius())) - - return d_args - - return img, backward diff --git a/spaces/ma-xu/LIVE/thrust/thrust/iterator/detail/minimum_system.h b/spaces/ma-xu/LIVE/thrust/thrust/iterator/detail/minimum_system.h deleted file mode 100644 index 45b5a592fc5796892c143bf677ca988d788ec20d..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/iterator/detail/minimum_system.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include -#include -#include -#include - -namespace thrust -{ -namespace detail -{ - - -template - struct unrelated_systems {}; - - -// if a minimum system exists for these arguments, return it -// otherwise, collect the arguments and report them as unrelated -template - struct minimum_system - : thrust::detail::eval_if< - is_metafunction_defined< - minimum_type - >::value, - minimum_type, - thrust::detail::identity_< - unrelated_systems - > - > -{}; // end minimum_system - - -} // end detail -} // end thrust - diff --git a/spaces/marcusj83/MusicGenbruh/tests/modules/test_codebooks_patterns.py b/spaces/marcusj83/MusicGenbruh/tests/modules/test_codebooks_patterns.py deleted file mode 100644 index b658f4779a369f9ec8dde692a61b7f0fe3485724..0000000000000000000000000000000000000000 --- a/spaces/marcusj83/MusicGenbruh/tests/modules/test_codebooks_patterns.py +++ /dev/null @@ -1,246 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import pytest -import torch - -from audiocraft.modules.codebooks_patterns import ( - DelayedPatternProvider, - ParallelPatternProvider, - Pattern, - UnrolledPatternProvider, -) - - -class TestParallelPatternProvider: - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [0, 1, 16, 100]) - def test_get_pattern(self, n_q: int, timesteps: int): - provider = ParallelPatternProvider(n_q) - pattern = provider.get_pattern(timesteps) - # + 1 to account for 1st step - assert len(pattern.layout) == timesteps + 1 - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [8, 16, 100]) - def test_pattern_content(self, n_q: int, timesteps: int): - provider = ParallelPatternProvider(n_q) - pattern = provider.get_pattern(timesteps) - for s, v in enumerate(pattern.layout): - for i, code in enumerate(v): - assert i == code.q - assert code.t == s - 1 # account for the 1st empty step - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [8, 16, 100]) - def test_pattern_max_delay(self, n_q: int, timesteps: int): - provider = ParallelPatternProvider(n_q) - pattern = provider.get_pattern(timesteps) - assert pattern.max_delay == 0 - assert len(pattern.valid_layout) == len(pattern.layout) - pattern.max_delay - - -class TestDelayedPatternProvider: - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [0, 1, 16, 100]) - def test_get_pattern(self, n_q: int, timesteps: int): - delays = [ - list(range(n_q)), - [0] + [1] * (n_q - 1), - [0] + [4] * (n_q - 1), - ] - for delay in delays: - provider = DelayedPatternProvider(n_q, delay) - pattern = provider.get_pattern(timesteps) - # + 1 to account for 1st step - assert len(pattern.layout) == timesteps + max(delay) + 1 - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [8, 16, 100]) - def test_pattern_content(self, n_q: int, timesteps: int): - provider = DelayedPatternProvider(n_q) - pattern = provider.get_pattern(timesteps) - for s, v in enumerate(pattern.layout): - for i, code in enumerate(v): - assert i == code.q - assert code.t == max(0, s - code.q - 1) - - @pytest.mark.parametrize("timesteps", [8, 16, 100]) - @pytest.mark.parametrize("delay", [[0, 1, 2, 3], [0, 1, 1, 1], [0, 3, 3, 3], [0, 3]]) - def test_pattern_max_delay(self, timesteps: int, delay: list): - provider = DelayedPatternProvider(len(delay), delay) - pattern = provider.get_pattern(timesteps) - assert pattern.max_delay == max(delay) - assert len(pattern.valid_layout) == len(pattern.layout) - pattern.max_delay - - -class TestUnrolledPatternProvider: - - @pytest.mark.parametrize("timesteps", [0, 1, 16]) - @pytest.mark.parametrize("flattening", [[0, 1, 2], [0, 1, 1]]) - @pytest.mark.parametrize("delays", [[0, 0, 0], [0, 5, 5]]) - def test_get_pattern(self, timesteps: int, flattening: list, delays: list): - n_q = len(flattening) - max_delay = max(delays) - provider = UnrolledPatternProvider(n_q, flattening, delays) - pattern = provider.get_pattern(timesteps) - assert len(pattern.layout) == provider.num_virtual_steps(timesteps) + max_delay - - @pytest.mark.parametrize("timesteps", [0, 1, 16]) - @pytest.mark.parametrize("flattening", [[0, 1, 2], [0, 1, 1]]) - @pytest.mark.parametrize("delays", [[0, 0, 0], [0, 5, 5]]) - def test_pattern_max_delay(self, timesteps: int, flattening: list, delays: list): - n_q = len(flattening) - max_delay = max(delays) - provider = UnrolledPatternProvider(n_q, flattening, delays) - pattern = provider.get_pattern(timesteps) - assert pattern.max_delay == max_delay - - -class TestPattern: - - def ref_build_pattern_sequence(self, z: torch.Tensor, pattern: Pattern, special_token: int): - """Reference method to build the sequence from the pattern without using fancy scatter.""" - bs, n_q, T = z.shape - z = z.cpu().numpy() - assert n_q == pattern.n_q - assert T <= pattern.timesteps - inp = torch.full((bs, n_q, len(pattern.layout)), special_token, dtype=torch.long).numpy() - inp[:] = special_token - for s, v in enumerate(pattern.layout): - for (t, q) in v: - if t < T: - inp[:, q, s] = z[:, q, t] - return torch.from_numpy(inp) - - def ref_revert_pattern_sequence(self, z: torch.Tensor, pattern: Pattern, special_token: int): - """Reference method to revert the sequence from the pattern without using fancy scatter.""" - z = z.cpu().numpy() - bs, n_q, S = z.shape - assert pattern.n_q == n_q - inp = torch.full((bs, pattern.n_q, pattern.timesteps), special_token, dtype=torch.long).numpy() - inp[:] = special_token - for s, v in enumerate(pattern.layout): - for (t, q) in v: - if t < pattern.timesteps: - inp[:, q, t] = z[:, q, s] - return torch.from_numpy(inp) - - def ref_revert_pattern_logits(self, z: torch.Tensor, pattern: Pattern, special_token: float): - """Reference method to revert the logits from the pattern without using fancy scatter.""" - z = z.cpu().numpy() - bs, card, n_q, S = z.shape - assert pattern.n_q == n_q - ref_layout = pattern.layout - inp = torch.full((bs, card, pattern.n_q, pattern.timesteps), special_token, dtype=torch.float).numpy() - inp[:] = special_token - for s, v in enumerate(ref_layout[1:]): - if s < S: - for (t, q) in v: - if t < pattern.timesteps: - inp[:, :, q, t] = z[:, :, q, s] - return torch.from_numpy(inp) - - def _get_pattern_providers(self, n_q: int): - pattern_provider_1 = ParallelPatternProvider(n_q) - pattern_provider_2 = DelayedPatternProvider(n_q, list(range(n_q))) - pattern_provider_3 = DelayedPatternProvider(n_q, [0] + [1] * (n_q - 1)) - pattern_provider_4 = UnrolledPatternProvider( - n_q, flattening=list(range(n_q)), delays=[0] * n_q - ) - pattern_provider_5 = UnrolledPatternProvider( - n_q, flattening=[0] + [1] * (n_q - 1), delays=[0] * n_q - ) - pattern_provider_6 = UnrolledPatternProvider( - n_q, flattening=[0] + [1] * (n_q - 1), delays=[0] + [5] * (n_q - 1) - ) - return [ - pattern_provider_1, - pattern_provider_2, - pattern_provider_3, - pattern_provider_4, - pattern_provider_5, - pattern_provider_6, - ] - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [16, 72]) - def test_build_pattern_sequence(self, n_q: int, timesteps: int): - bs = 2 - card = 256 - special_token = card - - pattern_providers = self._get_pattern_providers(n_q) - for pattern_provider in pattern_providers: - pattern = pattern_provider.get_pattern(timesteps) - # we can correctly build the sequence from the pattern - z = torch.randint(0, card, (bs, n_q, timesteps)) - ref_res = self.ref_build_pattern_sequence(z, pattern, special_token) - res, indexes, mask = pattern.build_pattern_sequence(z, special_token) - assert (res == ref_res).float().mean() == 1.0 - - # expected assertion fails on the number of timesteps - invalid_timesteps = [timesteps + 1] - if pattern.num_sequence_steps != pattern.timesteps: - invalid_timesteps.append(pattern.num_sequence_steps) - for i_timesteps in invalid_timesteps: - z2 = torch.randint(0, card, (bs, n_q, i_timesteps)) - with pytest.raises(AssertionError): - pattern.build_pattern_sequence(z2, special_token) - - # expected assertion fails on the number of codebooks - invalid_qs = [0, n_q - 1, n_q + 1] - for i_q in invalid_qs: - z3 = torch.randint(0, card, (bs, i_q, timesteps)) - with pytest.raises(AssertionError): - pattern.build_pattern_sequence(z3, special_token) - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [16, 72]) - def test_revert_pattern_sequence(self, n_q: int, timesteps: int): - bs = 2 - card = 256 - special_token = card - - pattern_providers = self._get_pattern_providers(n_q) - for pattern_provider in pattern_providers: - pattern = pattern_provider.get_pattern(timesteps) - # this works assuming previous tests are successful - z = torch.randint(0, card, (bs, n_q, timesteps)) - s = self.ref_build_pattern_sequence(z, pattern, special_token) - ref_out = self.ref_revert_pattern_sequence(s, pattern, special_token) - # ensure our reference script retrieve the original sequence - assert z.shape == ref_out.shape - assert (z == ref_out).float().mean() == 1.0 - # now we can test the scatter version - out, indexes, mask = pattern.revert_pattern_sequence(s, special_token) - assert out.shape == ref_out.shape - assert (out == ref_out).float().mean() == 1.0 - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [16, 72]) - @pytest.mark.parametrize("card", [1, 2, 256, 1024]) - def test_revert_pattern_logits(self, n_q: int, timesteps: int, card: int): - bs = 2 - special_token = card - logits_special_token = float('nan') - - pattern_providers = self._get_pattern_providers(n_q) - for pattern_provider in pattern_providers: - pattern = pattern_provider.get_pattern(timesteps) - # this works assuming previous tests are successful - z = torch.randint(0, card, (bs, n_q, timesteps)) - s = self.ref_build_pattern_sequence(z, pattern, special_token) - logits = torch.randn((bs, card, n_q, s.shape[-1])) - ref_out = self.ref_revert_pattern_logits(logits, pattern, logits_special_token) - # ensure our reference script retrieve the original sequence - assert ref_out.shape == torch.Size([bs, card, n_q, timesteps]) - # now we can test the scatter version - out, indexes, mask = pattern.revert_pattern_logits(logits, logits_special_token) - assert out.shape == ref_out.shape - assert (out == ref_out).float().mean() == 1.0 diff --git a/spaces/matthoffner/chatbot-mini/types/settings.ts b/spaces/matthoffner/chatbot-mini/types/settings.ts deleted file mode 100644 index d4b6335e6c99ff6432aebbc046214e3ca2c424a8..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/chatbot-mini/types/settings.ts +++ /dev/null @@ -1,3 +0,0 @@ -export interface Settings { - theme: 'light' | 'dark'; -} diff --git a/spaces/merve/fill-in-the-blank/public/private-and-fair/index.html b/spaces/merve/fill-in-the-blank/public/private-and-fair/index.html deleted file mode 100644 index e85df1babd2619b3dd0c8c989634bf5ba7f6d937..0000000000000000000000000000000000000000 --- a/spaces/merve/fill-in-the-blank/public/private-and-fair/index.html +++ /dev/null @@ -1,199 +0,0 @@ - - - - - - - - - - - - - - - - - - Can a Model Be Differentially Private and Fair? - - - - - - - - - - - - - - - -
                    - -
                    - -

                    Can a Model Be Differentially Private and Fair?

                    -
                    Training models with differential privacy stops models from inadvertently leaking sensitive data, but there's an unexpected side-effect: reduced accuracy on underrepresented subgroups.
                    -

                    Imagine you want to use machine learning to suggest new bands to listen to. You could do this by having lots of people list their favorite bands and using them to train a model. The trained model might be quite useful and fun, but if someone pokes and prods at the model in just the right way, they could extract the music preferences of someone whose data was used to train the model. Other kinds of models are potentially vulnerable; credit card numbers have been pulled out of language models and actual faces reconstructed from image models.

                    -

                    Training with differential privacy limits the information about any one data point that is extractable but in some cases there’s an unexpected side-effect: reduced accuracy with underrepresented subgroups disparately impacted.

                    -
                    - -

                    Recall that machine learning models are typically trained with gradient descent, a series of small steps taken to minimize an error function. To show how a model can leak its training data, we’ve trained two simple models to separate red and blue dots using two simple datasets that differ in one way: a single isolated data point in the upper left has been switched from red to blue.

                    -
                    - -

                    Notice that the two models have very different boundary lines near the isolated point by the end of the training. Someone with access to the trained model might be able to infer if the point in the upper left is red or blue — if the color represented sensitive information, like someone’s voting record, that could be quite bad!

                    -

                    Protecting the Privacy of Training Points

                    -

                    We can prevent a single data point from drastically altering the model by adding two operations to each training step:²

                    -
                      -
                    • ⚬ Clipping the gradient (here, limiting how much the boundary line can move with each step) to bound the maximum impact a single data point can have on the final model.
                    • -
                    • ⚬ Adding random noise to the gradient.
                    • -
                    -

                    Try increasing the random noise below. We’re now training lots of differentially private models; the more the potential models for the red and blue outlier points overlap, the more plausible deniability the person in the upper left has.

                    -
                    - -

                    You can also try dragging the other points around and adjusting the gradient clipping. Are points in the center or outliers more likely to modify the boundary lines? In two dimensions there’s a limited number of outliers, but in higher dimensions more points are outliers and much more information can be extracted from a trained model.

                    -

                    Correctly combined, adding gradient clipping and random noise to gradient descent make it possible to train a model with differential privacy – we can guarantee that a model trained on a given dataset is essentially indistinguishable from a model trained on the same dataset with a single point changed.

                    -

                    Predictions on Outliers Change the Most

                    -

                    What does this look like in practice? In Distribution Density, Tails, and Outliers in Machine Learning, a series of increasingly differentially private models were trained on MNIST digits. Every digit in the training set was ranked according to the highest level of privacy that correctly classified it.

                    -
                    - -

                    On the lower left, you can see digits labeled as “3” in the training data that look more like a “2” and a “9”. They’re very different from the other “3”s in the training data so adding just a bit of privacy protection causes the model to no longer classify them as “3”. Under some specific circumstances, differential privacy can actually improve how well the model generalizes to data it wasn’t trained on by limiting the influence of spurious examples.

                    -

                    The right side shows more canonical digits which are classified correctly even with high levels of privacy because they’re quite similar to other digits in the training data.

                    -

                    The Accuracy Tradeoff

                    -

                    Limiting how much a model can learn from a single example does have a downside: it can also decrease the model’s accuracy. With 7,500 training points, 90% accuracy on MNIST digits is only achievable with an extremely low level of privacy protection; increasing privacy quickly lowers the model’s accuracy.

                    -

                    Collecting more training data offers a way out of this accuracy/privacy tradeoff. With 60,000 training points, 90% accuracy can be reached with a higher privacy level than almost all real-world deployments of differential privacy.

                    -
                    - -

                    Looking at the differences between predictions by digit class shows another potential complication: some classes are harder to identify than others. Detecting an “8” with high confidence requires more training data and/or lower privacy than detecting a “0” with high confidence.

                    -
                    - -

                    This problem is exacerbated if the training data has fewer examples of one class than the others. Trying to predict an uncommon event with a differentially private model can require an enormous amount of data.

                    -

                    Implications for Fairness

                    -

                    Outliers also aren’t evenly distributed within a class. Below, MNIST digits are colored by their sensitivity to higher privacy levels and projected with UMAP, forming several clusters of privacy-sensitive yellow digits. It’s possible to inadvertently train a model with good overall accuracy on a class but very low accuracy on a smaller group within the class.

                    -
                    - -

                    There’s nothing that makes a “1” slanted to the left intrinsically harder to classify, but because there are only a few slanted “1”s in the training data it’s difficult to make a model that classifies them accurately without leaking information.

                    -

                    This disparate impact doesn’t just happen in datasets of differently drawn digits: increased levels of differential privacy in a range of image and language models disproportionality decreased accuracy on underrepresented subgroups. And adding differential privacy to a medical model reduced the influence of Black patients’ data on the model while increasing the influence of white patients’ data.

                    -

                    Lowering the privacy level might not help non-majoritarian data points either – they’re the ones most susceptible to having their information exposed. Again, escaping the accuracy/privacy tradeoff requires collecting more data – this time from underrepresented subgroups.

                    -

                    More Reading

                    -

                    There are deep connections between generalization, memorization and privacy that are still not well understood. Slightly changing the privacy constraints, for example, can create new options. If public, unlabeled data exists, a “Private Aggregation of Teacher Ensembles“ could be used instead of gradient clipping and random noise to train a differentially private model with a smaller disparate impact on accuracy.

                    -

                    Finding ways to increase privacy with a smaller impact on accuracy is an active area of research – model architectures designed with privacy in mind and better dataset cleaning look like promising avenues.

                    -

                    There are also additional accuracy/privacy/fairness tradeoffs beyond what’s discussed in this post. Even if a differentially private model doesn’t have large accuracy gaps between subgroups, enforcing fairness metrics can reduce privacy or accuracy.

                    -

                    This post focuses on protecting the privacy of individual data points. In practice more work might be necessary to ensure that the privacy of users – who could contribute much more than a single data point each – is also protected.

                    -

                    These questions are also significant outside of machine learning. Allocating resources based on a differentially private dataset – with no machine learning model involved – can also disproportionately affect different groups. The 2020 Census is the first to use differential privacy and this could have a wide range of impacts, including how congressional districts are drawn.

                    -

                    Credits

                    -

                    Adam Pearce // January 2022

                    -

                    Thanks to Abhradeep Thakurta, Andreas Terzis, Andy Coenen, Asma Ghandeharioun, Brendan McMahan, Ellen Jiang, Emily Reif, Fernanda Viégas, James Wexler, Kevin Robinson, Matthew Jagielski, Martin Wattenberg, Meredith Morris, Miguel Guevara, Nicolas Papernot and Nithum Thain for their help with this piece.

                    -

                    Footnotes

                    -

                    To speed up training at the cost of looser privacy bounds, gradients, clipping and noise can be calculated on a group of data points instead of individual data points.

                    -

                    The “ε” in ε-differential privacy essentially measures the overlap in two distributions after changing a single data point.

                    -

                    Clipping and noising are also used outside of differential privacy as regularization techniques to improve accuracy.

                    In addition to accidently mislabeled examples, differential privacy can also provide some protection against data poisoning attacks.

                    -

                    While visually similar digits aren’t necessarily interpreted in similar ways by the model, the clustering of visually similar digits in the UMAP diagram at the bottom of the page (which projects embedding from the penultimate layer of digit classifier) suggests there is a close connection here.

                    -

                    Rebalancing the dataset without collecting more data doesn’t avoid this privacy/accuracy tradeoff – upsampling the smaller class reduces privacy and downsampling the larger class reduces data and lowers accuracy.

                    -

                    See the appendix on Subgroup Size and Accuracy for more detail.

                    -

                    Appendix: Subgroup Size and Accuracy

                    -

                    How, exactly, does the amount of training data, the privacy level and the percentage of data from a subgroup impact accuracy? Using MNIST digits rotated 90° as a stand-in for a smaller subgroup, we can see how the accuracy of a series of simple models that classify “1”s and “7”s change based on these attributes.

                    -

                    On the far left, models without any rotated digits in the training data never classify those digits more accurately than random guessing. By rotating 5% of the training digits, a small slice of models with lots of training data and low privacy can accurately classify rotated digits.

                    -
                    - -

                    Increasing the proportion of rotated digits to 10% or 20% or even more makes it possible to train a higher privacy model that performs well on both types of digits with the same amount of training data.

                    -

                    Click on one of the models above and you can see how the accuracy gap shifts as number of training points, privacy level and percentage of rotated digits are independently changed.

                    -
                    - -

                    Intuitively, adding more training data has diminishing marginal increases to accuracy. Accuracy on the smaller group of rotated digits, which may just be on the cusp of being learned, falls off faster as the effective amount of training data is decreased — a disparate reduction in accuracy.

                    -

                    More Explorables

                    -

                    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/spaces/merve/fill-in-the-blank/source/third_party/d3-scale-chromatic.v1.min.js b/spaces/merve/fill-in-the-blank/source/third_party/d3-scale-chromatic.v1.min.js deleted file mode 100644 index 90b8e6953cea11cade766bc4f143ecce4bd9edf1..0000000000000000000000000000000000000000 --- a/spaces/merve/fill-in-the-blank/source/third_party/d3-scale-chromatic.v1.min.js +++ /dev/null @@ -1,2 +0,0 @@ -// https://d3js.org/d3-scale-chromatic/ v1.5.0 Copyright 2019 Mike Bostock -!function(f,e){"object"==typeof exports&&"undefined"!=typeof module?e(exports,require("d3-interpolate"),require("d3-color")):"function"==typeof define&&define.amd?define(["exports","d3-interpolate","d3-color"],e):e((f=f||self).d3=f.d3||{},f.d3,f.d3)}(this,function(f,e,d){"use strict";function a(f){for(var e=f.length/6|0,d=new Array(e),a=0;a1)&&(f-=Math.floor(f));var e=Math.abs(f-.5);return wf.h=360*f-100,wf.s=1.5-1.5*e,wf.l=.8-.9*e,wf+""},f.interpolateRdBu=x,f.interpolateRdGy=g,f.interpolateRdPu=N,f.interpolateRdYlBu=v,f.interpolateRdYlGn=C,f.interpolateReds=hf,f.interpolateSinebow=function(f){var e;return f=(.5-f)*Math.PI,Af.r=255*(e=Math.sin(f))*e,Af.g=255*(e=Math.sin(f+Pf))*e,Af.b=255*(e=Math.sin(f+Bf))*e,Af+""},f.interpolateSpectral=I,f.interpolateTurbo=function(f){return f=Math.max(0,Math.min(1,f)),"rgb("+Math.max(0,Math.min(255,Math.round(34.61+f*(1172.33-f*(10793.56-f*(33300.12-f*(38394.49-14825.05*f)))))))+", "+Math.max(0,Math.min(255,Math.round(23.31+f*(557.33+f*(1225.33-f*(3574.96-f*(1073.77+707.56*f)))))))+", "+Math.max(0,Math.min(255,Math.round(27.2+f*(3211.1-f*(15327.97-f*(27814-f*(22569.18-6838.66*f)))))))+")"},f.interpolateViridis=xf,f.interpolateWarm=yf,f.interpolateYlGn=Z,f.interpolateYlGnBu=U,f.interpolateYlOrBr=ff,f.interpolateYlOrRd=df,f.schemeAccent=b,f.schemeBlues=af,f.schemeBrBG=u,f.schemeBuGn=L,f.schemeBuPu=q,f.schemeCategory10=c,f.schemeDark2=t,f.schemeGnBu=T,f.schemeGreens=bf,f.schemeGreys=nf,f.schemeOrRd=k,f.schemeOranges=pf,f.schemePRGn=y,f.schemePaired=n,f.schemePastel1=r,f.schemePastel2=o,f.schemePiYG=w,f.schemePuBu=E,f.schemePuBuGn=W,f.schemePuOr=P,f.schemePuRd=H,f.schemePurples=of,f.schemeRdBu=G,f.schemeRdGy=R,f.schemeRdPu=K,f.schemeRdYlBu=Y,f.schemeRdYlGn=O,f.schemeReds=mf,f.schemeSet1=i,f.schemeSet2=l,f.schemeSet3=m,f.schemeSpectral=S,f.schemeTableau10=h,f.schemeYlGn=X,f.schemeYlGnBu=Q,f.schemeYlOrBr=$,f.schemeYlOrRd=ef,Object.defineProperty(f,"__esModule",{value:!0})}); \ No newline at end of file diff --git a/spaces/merve/hidden-bias/public/third_party/d3-scale-chromatic.v1.min.js b/spaces/merve/hidden-bias/public/third_party/d3-scale-chromatic.v1.min.js deleted file mode 100644 index 90b8e6953cea11cade766bc4f143ecce4bd9edf1..0000000000000000000000000000000000000000 --- a/spaces/merve/hidden-bias/public/third_party/d3-scale-chromatic.v1.min.js +++ /dev/null @@ -1,2 +0,0 @@ -// https://d3js.org/d3-scale-chromatic/ v1.5.0 Copyright 2019 Mike Bostock -!function(f,e){"object"==typeof exports&&"undefined"!=typeof module?e(exports,require("d3-interpolate"),require("d3-color")):"function"==typeof define&&define.amd?define(["exports","d3-interpolate","d3-color"],e):e((f=f||self).d3=f.d3||{},f.d3,f.d3)}(this,function(f,e,d){"use strict";function a(f){for(var e=f.length/6|0,d=new Array(e),a=0;a1)&&(f-=Math.floor(f));var e=Math.abs(f-.5);return wf.h=360*f-100,wf.s=1.5-1.5*e,wf.l=.8-.9*e,wf+""},f.interpolateRdBu=x,f.interpolateRdGy=g,f.interpolateRdPu=N,f.interpolateRdYlBu=v,f.interpolateRdYlGn=C,f.interpolateReds=hf,f.interpolateSinebow=function(f){var e;return f=(.5-f)*Math.PI,Af.r=255*(e=Math.sin(f))*e,Af.g=255*(e=Math.sin(f+Pf))*e,Af.b=255*(e=Math.sin(f+Bf))*e,Af+""},f.interpolateSpectral=I,f.interpolateTurbo=function(f){return f=Math.max(0,Math.min(1,f)),"rgb("+Math.max(0,Math.min(255,Math.round(34.61+f*(1172.33-f*(10793.56-f*(33300.12-f*(38394.49-14825.05*f)))))))+", "+Math.max(0,Math.min(255,Math.round(23.31+f*(557.33+f*(1225.33-f*(3574.96-f*(1073.77+707.56*f)))))))+", "+Math.max(0,Math.min(255,Math.round(27.2+f*(3211.1-f*(15327.97-f*(27814-f*(22569.18-6838.66*f)))))))+")"},f.interpolateViridis=xf,f.interpolateWarm=yf,f.interpolateYlGn=Z,f.interpolateYlGnBu=U,f.interpolateYlOrBr=ff,f.interpolateYlOrRd=df,f.schemeAccent=b,f.schemeBlues=af,f.schemeBrBG=u,f.schemeBuGn=L,f.schemeBuPu=q,f.schemeCategory10=c,f.schemeDark2=t,f.schemeGnBu=T,f.schemeGreens=bf,f.schemeGreys=nf,f.schemeOrRd=k,f.schemeOranges=pf,f.schemePRGn=y,f.schemePaired=n,f.schemePastel1=r,f.schemePastel2=o,f.schemePiYG=w,f.schemePuBu=E,f.schemePuBuGn=W,f.schemePuOr=P,f.schemePuRd=H,f.schemePurples=of,f.schemeRdBu=G,f.schemeRdGy=R,f.schemeRdPu=K,f.schemeRdYlBu=Y,f.schemeRdYlGn=O,f.schemeReds=mf,f.schemeSet1=i,f.schemeSet2=l,f.schemeSet3=m,f.schemeSpectral=S,f.schemeTableau10=h,f.schemeYlGn=X,f.schemeYlGnBu=Q,f.schemeYlOrBr=$,f.schemeYlOrRd=ef,Object.defineProperty(f,"__esModule",{value:!0})}); \ No newline at end of file diff --git a/spaces/merve/measuring-fairness/public/measuring-fairness/init.js b/spaces/merve/measuring-fairness/public/measuring-fairness/init.js deleted file mode 100644 index 5a8df63793d90464eb148443787eb91e2b34180b..0000000000000000000000000000000000000000 --- a/spaces/merve/measuring-fairness/public/measuring-fairness/init.js +++ /dev/null @@ -1,200 +0,0 @@ -/* Copyright 2020 Google LLC. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - - - - -d3.select('body').selectAppend('div.tooltip.tooltip-hidden') - -nCols = 12 - -window.colors = { - well: d3.color('#669399') + '', - sick: d3.color('#EE2A2A') + '', - - // well: d3.color('green') + '', - // sick: d3.color('purple'), - - // well: d3.color('#e9a3c9') + '', - // sick: d3.color('#a1d76a'), - - // well: d3.color('#e9a3c9') + '', - // sick: d3.color('#a1d76a'), - - // well: d3.color('#e9a3c9') + '', - // sick: d3.color('#a1d76a'), - - // well: d3.color('#865327') + '', - // sick: d3.color('#012394'), - - // well: d3.color('#012394') + '', - // sick: d3.color('#FBC20F') + '', - - // well: d3.color('#012394') + '', - // sick: d3.color('#E71E24') + '', - - // well: d3.color('#A9159C') + '', - // sick: d3.color('#E71E24') + '', - - // well: d3.color('#A9159C') + '', - // sick: d3.color('#012394') + '', - - // well: d3.color('orange') + '', - // sick: d3.color('#012394') + '', - - -} - -window.colors = { - well: d3.interpolate(colors.well, '#fff')(.5), - sick: d3.interpolate(colors.sick, '#fff')(.2), -} - -window.lcolors = { - well: d3.interpolate(colors.well, '#fff')(.5), - sick: d3.interpolate(colors.sick, '#fff')(.35) -} -window.llcolors = { - well: d3.interpolate(colors.well, '#fff')(.5), - sick: d3.interpolate(colors.sick, '#fff')(1) -} -window.dcolors = { - well: d3.interpolate(colors.well, '#000')(.65), - sick: d3.interpolate(colors.sick, '#000')(.65) -} - -// window.colors = { -// well: d3.color('#BEF5FF') + '', -// sick: d3.color('#FCC5C3') + '', -// } - -// window.colors = { -// well: d3.color('#669399') + '', -// sick: d3.color('#EE2A2A') + '', -// } - -// window.lcolors = { -// well: d3.interpolate(colors.well, '#fff')(.3), -// sick: d3.interpolate(colors.sick, '#fff')(.3) -// } -// window.llcolors = { -// well: d3.interpolate(colors.well, '#fff')(.2), -// sick: d3.interpolate(colors.sick, '#fff')(.2) -// } - -// window.lcolors = { -// well: '#CFFCF6', -// sick: '#FFBD96' -// } - -// copy(logColors()) -function logColors(){ - return ` - body{ - --colors-well: ${d3.rgb(colors.well)}; - --colors-sick: ${d3.rgb(colors.sick)}; - --lcolors-well: ${d3.rgb(lcolors.well)}; - --lcolors-sick: ${d3.rgb(lcolors.sick)}; - --dcolors-well: ${d3.rgb(dcolors.well)}; - --dcolors-sick: ${d3.rgb(dcolors.sick)}; - } - ` -} - - - -window.init = function(){ - console.clear() - - graphSel = d3.select('#graph').html('').append('div') - totalWidth = graphSel.node().offsetWidth - totalWidth = 400 - - c = d3.conventions({ - sel: graphSel.st({marginTop: 40}), - margin: {top: 20}, - totalWidth, - totalHeight: totalWidth, - }) - - students = makeStudents() - sel = makeSel() - mini = makeMini() - slider = makeSlider() - slides = makeSlides() - gs = makeGS() - - function sizeGraphSel(){ - var scale = (totalWidth + 35)/(innerWidth - 10) // off by one, s is 35 - scale = d3.clamp(1, scale, 2) - - graphSel.st({ - transform: `scale(${1/scale})`, - transformOrigin: '0px 0px', - - }) - } - sizeGraphSel() - d3.select(window).on('resize', sizeGraphSel) - -} -init() - - - - - -!(function(){ - var footnums = '¹²³' - - d3.selectAll('.footstart').each(function(d, i){ - d3.select(this) - .at({ - href: '#footend-' + i, - }) - .text(footnums[i]) - .parent().at({id: 'footstart-' + i}) - }) - - d3.selectAll('.footend').each(function(d, i){ - d3.select(this) - .at({ - href: '#footstart-' + i, - id: 'footend-' + i, - }) - .text(footnums[i]) - }) - - - d3.selectAll('#sections wee, #graph .weepeople').attr('aria-hidden', true) - -})() - - - - - - - - - - - - - - - - - diff --git a/spaces/mfkeles/Track-Anything/tracker/model/memory_util.py b/spaces/mfkeles/Track-Anything/tracker/model/memory_util.py deleted file mode 100644 index faf6197b8c4ea990317476e2e3aeb8952a78aedf..0000000000000000000000000000000000000000 --- a/spaces/mfkeles/Track-Anything/tracker/model/memory_util.py +++ /dev/null @@ -1,80 +0,0 @@ -import math -import numpy as np -import torch -from typing import Optional - - -def get_similarity(mk, ms, qk, qe): - # used for training/inference and memory reading/memory potentiation - # mk: B x CK x [N] - Memory keys - # ms: B x 1 x [N] - Memory shrinkage - # qk: B x CK x [HW/P] - Query keys - # qe: B x CK x [HW/P] - Query selection - # Dimensions in [] are flattened - CK = mk.shape[1] - mk = mk.flatten(start_dim=2) - ms = ms.flatten(start_dim=1).unsqueeze(2) if ms is not None else None - qk = qk.flatten(start_dim=2) - qe = qe.flatten(start_dim=2) if qe is not None else None - - if qe is not None: - # See appendix for derivation - # or you can just trust me ヽ(ー_ー )ノ - mk = mk.transpose(1, 2) - a_sq = (mk.pow(2) @ qe) - two_ab = 2 * (mk @ (qk * qe)) - b_sq = (qe * qk.pow(2)).sum(1, keepdim=True) - similarity = (-a_sq+two_ab-b_sq) - else: - # similar to STCN if we don't have the selection term - a_sq = mk.pow(2).sum(1).unsqueeze(2) - two_ab = 2 * (mk.transpose(1, 2) @ qk) - similarity = (-a_sq+two_ab) - - if ms is not None: - similarity = similarity * ms / math.sqrt(CK) # B*N*HW - else: - similarity = similarity / math.sqrt(CK) # B*N*HW - - return similarity - -def do_softmax(similarity, top_k: Optional[int]=None, inplace=False, return_usage=False): - # normalize similarity with top-k softmax - # similarity: B x N x [HW/P] - # use inplace with care - if top_k is not None: - values, indices = torch.topk(similarity, k=top_k, dim=1) - - x_exp = values.exp_() - x_exp /= torch.sum(x_exp, dim=1, keepdim=True) - if inplace: - similarity.zero_().scatter_(1, indices, x_exp) # B*N*HW - affinity = similarity - else: - affinity = torch.zeros_like(similarity).scatter_(1, indices, x_exp) # B*N*HW - else: - maxes = torch.max(similarity, dim=1, keepdim=True)[0] - x_exp = torch.exp(similarity - maxes) - x_exp_sum = torch.sum(x_exp, dim=1, keepdim=True) - affinity = x_exp / x_exp_sum - indices = None - - if return_usage: - return affinity, affinity.sum(dim=2) - - return affinity - -def get_affinity(mk, ms, qk, qe): - # shorthand used in training with no top-k - similarity = get_similarity(mk, ms, qk, qe) - affinity = do_softmax(similarity) - return affinity - -def readout(affinity, mv): - B, CV, T, H, W = mv.shape - - mo = mv.view(B, CV, T*H*W) - mem = torch.bmm(mo, affinity) - mem = mem.view(B, CV, H, W) - - return mem diff --git a/spaces/mfrashad/CharacterGAN/models/stylegan2/stylegan2-pytorch/op/setup.py b/spaces/mfrashad/CharacterGAN/models/stylegan2/stylegan2-pytorch/op/setup.py deleted file mode 100644 index 5b926d450579990c8f09b93cbc5ae4c06128ef8d..0000000000000000000000000000000000000000 --- a/spaces/mfrashad/CharacterGAN/models/stylegan2/stylegan2-pytorch/op/setup.py +++ /dev/null @@ -1,33 +0,0 @@ -from setuptools import setup -from torch.utils.cpp_extension import CUDAExtension, BuildExtension -from pathlib import Path - -# Usage: -# python setup.py install (or python setup.py bdist_wheel) -# NB: Windows: run from VS2017 x64 Native Tool Command Prompt - -rootdir = (Path(__file__).parent / '..' / 'op').resolve() - -setup( - name='upfirdn2d', - ext_modules=[ - CUDAExtension('upfirdn2d_op', - [str(rootdir / 'upfirdn2d.cpp'), str(rootdir / 'upfirdn2d_kernel.cu')], - ) - ], - cmdclass={ - 'build_ext': BuildExtension - } -) - -setup( - name='fused', - ext_modules=[ - CUDAExtension('fused', - [str(rootdir / 'fused_bias_act.cpp'), str(rootdir / 'fused_bias_act_kernel.cu')], - ) - ], - cmdclass={ - 'build_ext': BuildExtension - } -) \ No newline at end of file diff --git a/spaces/mishtert/tracer/pharmap_url.py b/spaces/mishtert/tracer/pharmap_url.py deleted file mode 100644 index 57a6ecae7ddbc6836b19ffec2655ffc60f813d74..0000000000000000000000000000000000000000 --- a/spaces/mishtert/tracer/pharmap_url.py +++ /dev/null @@ -1,154 +0,0 @@ -import streamlit as st -import pandas as pd -# import utils.pharmap_utils.layout as lt -from batutils import * -# import stanza - -import requests -# import os.path -import io -# import PyPDF2 -from pypdf.pdf import PdfFileReader -from urllib.request import Request, urlopen -from bs4 import BeautifulSoup -from bs4.element import Comment - -# from utils.pharmap_utils.dtxutils import * -# from utils.pharmap_utils.dictutils import * - -from stanzautils import * - - -# @st.cache(show_spinner=True) -def get_ner(contents): - print('inside get ner') - content_list = [] - st.write('Reading the page...') - nlp = call_nlp_pipeline() - doc = nlp(contents.strip()) - st.write('Getting disease names...') - for ent in doc.entities: - if ent.type == 'DISEASE': - content_list.append(ent.text.replace('\n', '')) - content_list = list(set(content_list)) - print('got the disease names', content_list) - st.write('Got the disease names...') - return content_list - - -def get_ta_mapped_url(content_list): - print('inside get_ta_mapped') - st.write(content_list) - # content_list = content_list - st.write('Trying to get Mesh Name..') - print('Trying to get Mesh Name..') - ta_list = [] - ta = [] - for condition_text in content_list: - # print("printing inside the for loop",condition_text) - ta = non_url_flow(condition_text) - # print(ta) - ta_list.append(ta) - # print(ta_list) - flat_list = [item for sublist in ta_list for item in sublist] - ta = list(set(flat_list)) - print("Outside the loop", ta) - return ta - - -def check_pdf_html(url): - r = requests.get(url) - content_type = r.headers.get('content-type') - print(content_type) - if 'application/pdf' in content_type: - ext = 'pdf' - elif 'text/html' in content_type: - ext = 'html' - else: - ext = '' - print('Unknown type: {}'.format(content_type)) - print(ext) - return ext - - -# @st.cache -def get_disease_html(u): - print('inside get disease html') - # u="https://www.exelixis.com/pipeline/" - # "https://www.roche.com/dam/jcr:22160102-e04d-4484-ae3b-0f474105647e/en/diaq321.pdf" - url = Request(u, headers={'User-Agent': 'Mozilla/5.0'}) - html = urlopen(url).read() - soup = BeautifulSoup(html, features="html.parser") - for script in soup(["script", "style"]): - script.extract() - for footer in soup.findAll('header'): - footer.decompose() - for footer in soup.findAll('footer'): - footer.decompose() - text = soup.get_text() - lines = (line.strip() for line in text.splitlines()) - chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) - text = '\n'.join(chunk for chunk in chunks if chunk) - # st.write(text) - result = get_ner(text) - return result - - -# @st.cache(persist=True,show_spinner=True) -def get_disease_pdf(url): - st.write('get pdf disease') - r = requests.get(url) - f = io.BytesIO(r.content) - reader = PdfFileReader(f) - # pnum = reader.getNumPages() - # p_num = [] - data = [] - df = pd.DataFrame() - content_list = [] - pnum = 2 - for p in range(pnum): - contents = reader.getPage(p).extractText() - content_list = get_ner(contents) - # doc = nlp(contents.strip()) - # for ent in doc.entities: - # if ent.type=='DISEASE': - # content_list.append(ent.text.replace('\n','')) - # content_list = list(set(content_list)) - # print(content_list) - # p_num = [p+1] - # print('pagenum',p_num) - # print('values',content_list) - a_dictionary = {'pno:': [p + 1], - 'conditions': content_list - } - content_list = [] - # print('a_dictionary',a_dictionary) - data.append(a_dictionary) - f.close() - df = df.append(data, True) - return df - - -def get_link_mapped(url): - # st.write(url) - # url = 'https://www.gene.com/medical-professionals/pipeline' - try: - get = check_pdf_html(url) - # st.write(get) - except: - get = 'invalid URL' - if get == 'pdf': - # st.write('inside pdf') - pdf_mapped_df = get_disease_pdf(url) - st.dataframe(pdf_mapped_df) - elif get == 'html': - # st.write('inside html') - # st.write(url) - # print('html') - content_list = get_disease_html(url) - ta = get_ta_mapped_url(content_list) - st.write(ta) - - elif get == 'invalid URL': - print('invalid') - diff --git a/spaces/mohdelgaar/Clinical_Decisions/app.py b/spaces/mohdelgaar/Clinical_Decisions/app.py deleted file mode 100644 index 1f07236c32140937b74f8ce8f9e2d22455e4f3b4..0000000000000000000000000000000000000000 --- a/spaces/mohdelgaar/Clinical_Decisions/app.py +++ /dev/null @@ -1,45 +0,0 @@ -import argparse -import torch -from data import load_tokenizer -from model import load_model -from demo import run_gradio - -parser = argparse.ArgumentParser() -parser.add_argument('--task', default='token', choices=['seq', 'token']) -parser.add_argument('--ckpt', default='Bio_ClinicalBERT-roberta-base.pt') -parser.add_argument('--max_len', type=int, default=512) -parser.add_argument('--num_layers', type=int, default=3) -parser.add_argument('--kernels', nargs=3, type=int, default=[1,2,3]) -parser.add_argument('--model', default='roberta-base',) -parser.add_argument('--model_name', default='emilyalsentzer/Bio_ClinicalBERT',) -parser.add_argument('--gpu', default='0') -parser.add_argument('--grad_accumulation', default=1, type=int) -parser.add_argument('--pheno_id', type=int) -parser.add_argument('--text_subset') -parser.add_argument('--pheno_n', type=int, default=500) -parser.add_argument('--hidden_size', type=int, default=100) -parser.add_argument('--emb_size', type=int, default=400) -parser.add_argument('--total_steps', type=int, default=3000) -parser.add_argument('--seed', default = '0') -parser.add_argument('--num_phenos', type=int, default=10) -parser.add_argument('--num_decs', type=int, default=9) -parser.add_argument('--batch_size', type=int, default=20) -parser.add_argument('--pos_weight', type=float, default=1.25) -parser.add_argument('--include_nolabel', action='store_true') -parser.add_argument('--truncate_train', action='store_true') -parser.add_argument('--truncate_eval', action='store_true') -parser.add_argument('--gradio', action='store_true') -parser.add_argument('--optuna', action='store_true') -parser.add_argument('--lr', type=float, default=4e-5) -parser.add_argument('--resample', default='') -parser.add_argument('--verbose', type=bool, default=True) -args = parser.parse_args() - -args.num_labels = args.num_decs -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - -tokenizer = load_tokenizer(args.model_name) -model = load_model(args, device)[0] -model.eval() -torch.set_grad_enabled(False) -run_gradio(model, tokenizer) diff --git a/spaces/monra/freegpt-webui-chimera/run.py b/spaces/monra/freegpt-webui-chimera/run.py deleted file mode 100644 index 65f603f88f4a30ce02fb4f5554d2c5fc6259575d..0000000000000000000000000000000000000000 --- a/spaces/monra/freegpt-webui-chimera/run.py +++ /dev/null @@ -1,48 +0,0 @@ -import secrets - -from server.bp import bp -from server.website import Website -from server.backend import Backend_Api -from server.babel import create_babel -from json import load -from flask import Flask - -if __name__ == '__main__': - - # Load configuration from config.json - config = load(open('config.json', 'r')) - site_config = config['site_config'] - url_prefix = config.pop('url_prefix') - - # Create the app - app = Flask(__name__) - app.secret_key = secrets.token_hex(16) - - # Set up Babel - create_babel(app) - - # Set up the website routes - site = Website(bp, url_prefix) - for route in site.routes: - bp.add_url_rule( - route, - view_func=site.routes[route]['function'], - methods=site.routes[route]['methods'], - ) - - # Set up the backend API routes - backend_api = Backend_Api(bp, config) - for route in backend_api.routes: - bp.add_url_rule( - route, - view_func=backend_api.routes[route]['function'], - methods=backend_api.routes[route]['methods'], - ) - - # Register the blueprint - app.register_blueprint(bp, url_prefix=url_prefix) - - # Run the Flask server - print(f"Running on {site_config['port']}{url_prefix}") - app.run(**site_config) - print(f"Closing port {site_config['port']}") \ No newline at end of file diff --git a/spaces/nakamura196/yolov5-kunshujo/ultralytics/yolov5/utils/callbacks.py b/spaces/nakamura196/yolov5-kunshujo/ultralytics/yolov5/utils/callbacks.py deleted file mode 100644 index c51c268f20d63581014d569671cc5473f112eadc..0000000000000000000000000000000000000000 --- a/spaces/nakamura196/yolov5-kunshujo/ultralytics/yolov5/utils/callbacks.py +++ /dev/null @@ -1,78 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Callback utils -""" - - -class Callbacks: - """" - Handles all registered callbacks for YOLOv5 Hooks - """ - - def __init__(self): - # Define the available callbacks - self._callbacks = { - 'on_pretrain_routine_start': [], - 'on_pretrain_routine_end': [], - - 'on_train_start': [], - 'on_train_epoch_start': [], - 'on_train_batch_start': [], - 'optimizer_step': [], - 'on_before_zero_grad': [], - 'on_train_batch_end': [], - 'on_train_epoch_end': [], - - 'on_val_start': [], - 'on_val_batch_start': [], - 'on_val_image_end': [], - 'on_val_batch_end': [], - 'on_val_end': [], - - 'on_fit_epoch_end': [], # fit = train + val - 'on_model_save': [], - 'on_train_end': [], - 'on_params_update': [], - 'teardown': [], - } - self.stop_training = False # set True to interrupt training - - def register_action(self, hook, name='', callback=None): - """ - Register a new action to a callback hook - - Args: - hook The callback hook name to register the action to - name The name of the action for later reference - callback The callback to fire - """ - assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" - assert callable(callback), f"callback '{callback}' is not callable" - self._callbacks[hook].append({'name': name, 'callback': callback}) - - def get_registered_actions(self, hook=None): - """" - Returns all the registered actions by callback hook - - Args: - hook The name of the hook to check, defaults to all - """ - if hook: - return self._callbacks[hook] - else: - return self._callbacks - - def run(self, hook, *args, **kwargs): - """ - Loop through the registered actions and fire all callbacks - - Args: - hook The name of the hook to check, defaults to all - args Arguments to receive from YOLOv5 - kwargs Keyword Arguments to receive from YOLOv5 - """ - - assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" - - for logger in self._callbacks[hook]: - logger['callback'](*args, **kwargs) diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Advanced Computer Architecture Kai Hwang Second Edition Pdf Free [Extra Quality] Download.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Advanced Computer Architecture Kai Hwang Second Edition Pdf Free [Extra Quality] Download.md deleted file mode 100644 index 70af70dca42ca20adc9e6462ac39b3521261b565..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Advanced Computer Architecture Kai Hwang Second Edition Pdf Free [Extra Quality] Download.md +++ /dev/null @@ -1,98 +0,0 @@ -## Advanced Computer Architecture Kai Hwang Second Edition Pdf Free Download - - - - - - - - - -**CLICK HERE >> [https://maudaracte.blogspot.com/?file=2tyUBA](https://maudaracte.blogspot.com/?file=2tyUBA)** - - - - - - - - - - - - ```html - -# Download Advanced Computer Architecture by Kai Hwang for Free - - - -If you are looking for a comprehensive and up-to-date textbook on advanced computer architecture, you might want to check out *Advanced Computer Architecture* by Kai Hwang and Naresh Jotwani. This book covers the latest developments and trends in parallel and distributed computing, including multicore processors, cloud computing, big data analytics, and GPU architectures. - - - -The second edition of this book has been revised and updated to reflect the changes and challenges in the field of computer architecture. It includes new chapters on memory systems, interconnection networks, and performance evaluation. It also features more examples, exercises, and case studies to help students master the concepts and techniques of parallel and distributed computing. - - - -The best part is that you can download this book for free from Google Drive. Just click on the link below and enjoy learning about advanced computer architecture from one of the leading experts in the field. - - - -[Advanced Computer Architecture Kai Hwang 2nd edition.pdf](https://drive.google.com/file/d/0B2ocTDj7zqFKbHNNbUI0eDVzTUU/view) - - ``` ```html - -What is advanced computer architecture? It is the study of the design and organization of computer systems that can perform multiple tasks simultaneously and efficiently. It involves the analysis and optimization of various aspects of computer hardware and software, such as instruction sets, processors, memory, caches, buses, interconnects, operating systems, compilers, and applications. - - - -Why is advanced computer architecture important? It is important because it enables the development of faster, cheaper, and more powerful computer systems that can handle the increasing demands of modern computing applications. It also helps to solve some of the major challenges in computing, such as scalability, reliability, security, energy efficiency, and fault tolerance. - - - -How can you learn advanced computer architecture? You can learn advanced computer architecture by reading books, taking courses, doing projects, and following research papers and blogs. One of the best books to start with is *Advanced Computer Architecture* by Kai Hwang and Naresh Jotwani. This book provides a comprehensive and accessible introduction to the field of parallel and distributed computing. It covers both the theoretical foundations and the practical implementations of various parallel and distributed architectures. - - ``` ```html - -What are some of the topics covered in *Advanced Computer Architecture* by Kai Hwang and Naresh Jotwani? Some of the topics covered in this book are: - - - -- Parallel computer models and program paradigms - -- Pipelining and superscalar techniques - -- Instruction-level and thread-level parallelism - -- Multicore and multiprocessor architectures - -- Shared-memory and message-passing systems - -- Cache coherence and memory consistency - -- Distributed shared memory and directory-based protocols - -- Cluster, grid, and cloud computing - -- Vector, symbolic, and neural processors - -- Dataflow and systolic architectures - -- Scalable, multithreaded, and dataflow architectures - -- GPU and heterogeneous computing - -- Big data processing and analytics - -- Performance evaluation and optimization - - - -Who can benefit from reading *Advanced Computer Architecture* by Kai Hwang and Naresh Jotwani? This book is suitable for anyone who wants to learn about the principles and practices of parallel and distributed computing. It can be used as a textbook for undergraduate and graduate courses in computer science and engineering. It can also be used as a reference book for researchers, practitioners, and educators in the field of computer architecture. - - ``` 145887f19f - - - - - diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Al Ameen Accounting Software Cra !!TOP!!.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Al Ameen Accounting Software Cra !!TOP!!.md deleted file mode 100644 index e8dd4968ff0bc47473fadfd5a4251ebf92f2045e..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Al Ameen Accounting Software Cra !!TOP!!.md +++ /dev/null @@ -1,34 +0,0 @@ -
                    -

                    How Al Ameen Accounting Software Can Help You Manage Your Business

                    -

                    Al Ameen Accounting Software is a comprehensive and user-friendly solution that offers various features and modules to help you manage your business operations and finances. Whether you are a small, medium or large enterprise, Al Ameen Accounting Software can cater to your needs and provide you with the tools you need to succeed.

                    -

                    Some of the features and modules that Al Ameen Accounting Software offers are:

                    -

                    Al Ameen Accounting Software Cra


                    Download Ziphttps://urlcod.com/2uIaGi



                    -
                      -
                    • Smart Reporting: You can generate various reports and analyses based on your data, such as financial statements, inventory reports, sales reports, cost center reports, audit reports, financial ratio analysis, income statement per brand or profit center, and more. You can also customize your reports and export them to different formats.
                    • -
                    • Rich Printing: You can print your documents and invoices with high quality and professional design, using different templates, fonts, colors, logos, and signatures. You can also print barcodes, QR codes, labels, and receipts.
                    • -
                    • Archiving System: You can save and manage your documents and files within Al Ameen Accounting Software, without the need for another program. You can attach any document to any record or transaction in the software, and easily search and review them later.
                    • -
                    • SMS and Email Integration: You can automatically send SMS and emails to your customers and employees when an event occurs in Al Ameen Accounting Software, such as the maturity of an invoice, a payment reminder, a birthday greeting, or a promotional offer. You can also send bulk SMS and emails to your contacts.
                    • -
                    • Job Order Costing: You can track and control the costs and revenues of each job or project that you undertake, and compare them with the budget and the actual results. You can also allocate the costs and revenues to different cost centers or profit centers.
                    • -
                    • Other Modules: Al Ameen Accounting Software also offers other modules that cover various aspects of your business, such as inventory management, sales management, purchase management, production management, human resources management, payroll management, fixed assets management, point of sale management, and more.
                    • -
                    -

                    Al Ameen Accounting Software is a reliable and trusted solution that has been serving thousands of customers in the region for over 25 years. It is constantly updated and enhanced to meet the changing needs and expectations of the market. It is also compatible with Windows, Web, Mobile, and Tablets platforms.

                    -

                    If you are looking for a powerful and flexible accounting software that can help you manage your business effectively and efficiently, look no further than Al Ameen Accounting Software. To learn more about Al Ameen Accounting Software or to request a free demo, visit their website at https://alameensoft.com/en/.

                    - -

                    But don't just take our word for it. Listen to what some of our satisfied customers have to say about Al Ameen Accounting Software:

                    -
                    -

                    "Al Ameen Accounting Software has been a great asset for our company. It has helped us streamline our accounting processes, improve our financial reporting, and manage our inventory and sales efficiently. We are very happy with the software and the support we receive from Al Ameen team."

                    -

                    -- Rawaa, a manufacturing company -
                    -
                    -

                    "We have been using Al Ameen Accounting Software for over 10 years, and we can say that it is the best accounting software in the market. It is easy to use, flexible, and reliable. It has all the features and modules we need to run our business smoothly. We highly recommend Al Ameen Accounting Software to anyone looking for a quality accounting solution."

                    -- Ahmad, a trading company -
                    -
                    -

                    "Al Ameen Accounting Software is a powerful and comprehensive solution that meets all our accounting and business needs. It has helped us improve our productivity, accuracy, and profitability. It also integrates well with other systems and platforms, such as SMS, email, web, and mobile. We are very satisfied with Al Ameen Accounting Software and its customer service."

                    -- Fatima, a service company -
                    -

                    As you can see, Al Ameen Accounting Software is a trusted and proven solution that has helped thousands of customers achieve their business goals and objectives. Whether you are a new or existing customer, you can benefit from Al Ameen Accounting Software and its features and modules.

                    -

                    So what are you waiting for? Contact us today and get started with Al Ameen Accounting Software. You will not regret it!

                    cec2833e83
                    -
                    -
                    \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Alonso Serai-Securing The White Fillies.pdf.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Alonso Serai-Securing The White Fillies.pdf.md deleted file mode 100644 index a7edff1abc6c6a448c9c1639b15e347ab4281aa3..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Alonso Serai-Securing The White Fillies.pdf.md +++ /dev/null @@ -1,17 +0,0 @@ -
                    -

                    Alonso Serai-Securing The White Fillies: A Thrilling Romance Novel

                    -

                    If you are looking for a captivating romance novel that will keep you on the edge of your seat, you should check out Alonso Serai-Securing The White Fillies by Lila Rose. This book tells the story of Alonso Serai, a handsome and mysterious billionaire who owns a horse racing empire. He is determined to win the prestigious White Fillies Cup, but he needs the help of a talented and beautiful jockey named Mia Carter.

                    -

                    Mia Carter is a young and ambitious woman who loves horses and racing. She has a troubled past that haunts her, but she is determined to overcome her fears and prove herself in the male-dominated sport. When she meets Alonso Serai, she feels an instant attraction that she can't deny. But she also senses that he is hiding something from her, something that could put them both in danger.

                    -

                    Alonso Serai-Securing The White Fillies.pdf


                    Download Ziphttps://urlcod.com/2uIacT



                    -

                    As they work together to train for the White Fillies Cup, they discover that they have more in common than they thought. They also face many challenges and enemies that threaten to tear them apart. Will they be able to trust each other and win the race of their lives? Or will they lose everything they have worked for?

                    -

                    Alonso Serai-Securing The White Fillies is a thrilling romance novel that will take you on a roller coaster of emotions. It is full of suspense, drama, passion, and twists that will keep you hooked until the end. If you love romance novels with strong characters, exotic settings, and exciting plots, you will love this book.

                    -

                    You can download Alonso Serai-Securing The White Fillies.pdf from the link below and start reading it today. You won't regret it!

                    -Download Alonso Serai-Securing The White Fillies.pdf - -

                    Alonso Serai is not your typical billionaire. He has a dark and mysterious past that he keeps hidden from everyone. He was born in Spain, but he moved to the United States when he was a teenager. He started his horse racing empire from scratch, and he became one of the most powerful and influential men in the industry. He is ruthless, cunning, and charismatic, and he always gets what he wants.

                    -

                    Mia Carter is not your typical jockey. She has a passion and talent for horse racing that is unmatched by anyone. She grew up in a poor and abusive family, but she escaped from them when she was 18. She worked hard to make a name for herself in the sport, and she earned the respect and admiration of many people. She is brave, loyal, and independent, and she never gives up.

                    -

                    -

                    When Alonso Serai and Mia Carter meet, sparks fly between them. They feel a connection that is deeper than anything they have ever experienced before. But they also face many obstacles that stand in their way. Alonso Serai has a secret that could destroy everything he has built, and Mia Carter has a past that could ruin everything she has dreamed of. They also have to deal with the competition, the media, the fans, and the enemies that want to see them fail.

                    -

                    Will they be able to overcome all the challenges and find their happily ever after? Or will they lose each other in the process? Find out in Alonso Serai-Securing The White Fillies, a thrilling romance novel that will make you fall in love with the characters and their story.

                    e93f5a0c3f
                    -
                    -
                    \ No newline at end of file diff --git a/spaces/neuralmagic/nlp-ner/app.py b/spaces/neuralmagic/nlp-ner/app.py deleted file mode 100644 index 0ae3db81ef21ab602dbb6ac1dde83e850e7ab092..0000000000000000000000000000000000000000 --- a/spaces/neuralmagic/nlp-ner/app.py +++ /dev/null @@ -1,103 +0,0 @@ -from deepsparse import Pipeline -import time -import gradio as gr - -markdownn = ''' -# Named Entity Recognition Pipeline with DeepSparse -Named Entity Recognition is the task of extracting and locating named entities in a sentence. The entities include, people's names, location, organizations, etc. -![Named Entity Recognition Pipeline with DeepSparse](https://huggingface.co/spaces/neuralmagic/nlp-ner/resolve/main/named.png) - -## What is DeepSparse? -DeepSparse is an inference runtime offering GPU-class performance on CPUs and APIs to integrate ML into your application. Sparsification is a powerful technique for optimizing models for inference, reducing the compute needed with a limited accuracy tradeoff. DeepSparse is designed to take advantage of model sparsity, enabling you to deploy models with the flexibility and scalability of software on commodity CPUs with the best-in-class performance of hardware accelerators, enabling you to standardize operations and reduce infrastructure costs. -Similar to Hugging Face, DeepSparse provides off-the-shelf pipelines for computer vision and NLP that wrap the model with proper pre- and post-processing to run performantly on CPUs by using sparse models. - -SparseML Named Entity Recognition Pipelines integrate with Hugging Face’s Transformers library to enable the sparsification of a large set of transformers models. -### Inference API Example -Here is sample code for a token classification pipeline: -```python -from deepsparse import Pipeline -pipeline = Pipeline.create(task="ner", model_path="zoo:nlp/token_classification/distilbert-none/pytorch/huggingface/conll2003/pruned80_quant-none-vnni") -text = "Mary is flying from Nairobi to New York" -inference = pipeline(text) -print(inference) -``` -## Use Case Description -The Named Entity Recognition Pipeline can process text before storing the information in a database. -For example, you may want to process text and store the entities in different columns depending on the entity type. - -[Want to train a sparse model on your data? Checkout the documentation on sparse transfer learning](https://docs.neuralmagic.com/use-cases/natural-language-processing/question-answering) -''' -task = "ner" -sparse_qa_pipeline = Pipeline.create( - task=task, - model_path="zoo:nlp/token_classification/bert-base/pytorch/huggingface/conll2003/12layer_pruned80_quant-none-vnni", - ) - -def map_ner(inference): - entities = [] - for item in dict(inference)['predictions'][0]: - dictionary = dict(item) - entity = dictionary['entity'] - if entity == "LABEL_0": - value = "O" - elif entity == "LABEL_1": - value = "B-PER" - elif entity == "LABEL_2": - value = "I-PER" - elif entity == "LABEL_3": - value = "-ORG" - elif entity == "LABEL_4": - value = "I-ORG" - elif entity == "LABEL_5": - value = "B-LOC" - elif entity == "LABEL_6": - value = "I-LOC" - elif entity == "LABEL_7": - value = "B-MISC" - else: - value = "I-MISC" - dictionary['entity'] = value - entities.append(dictionary) - return entities - -def run_pipeline(text): - sparse_start = time.perf_counter() - sparse_output = sparse_qa_pipeline(text) - sparse_entities = map_ner(sparse_output) - sparse_output = {"text": text, "entities": sparse_entities} - sparse_result = dict(sparse_output) - sparse_end = time.perf_counter() - sparse_duration = (sparse_end - sparse_start) * 1000.0 - - return sparse_output, sparse_duration - - -with gr.Blocks() as demo: - with gr.Row(): - with gr.Column(): - gr.Markdown(markdownn) - - with gr.Column(): - gr.Markdown(""" - ### Named Entity Recognition Demo - Using [token_classification/distilbert](https://sparsezoo.neuralmagic.com/models/nlp%2Ftoken_classification%2Fdistilbert-none%2Fpytorch%2Fhuggingface%2Fconll2003%2Fpruned80_quant-none-vnni) - - """) - text = gr.Text(label="Text") - btn = gr.Button("Submit") - - sparse_answers = gr.HighlightedText(label="Sparse model answers") - sparse_duration = gr.Number(label="Sparse Latency (ms):") - gr.Examples( [["We are flying from Texas to California"],["Mary is flying from Nairobi to New York"],["Norway is beautiful and has great hotels"] ],inputs=[text],) - - - - - btn.click( - run_pipeline, - inputs=[text], - outputs=[sparse_answers,sparse_duration], - ) - -if __name__ == "__main__": - demo.launch() \ No newline at end of file diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/projects/DensePose/densepose/data/samplers/__init__.py b/spaces/nikitaPDL2023/assignment4/detectron2/projects/DensePose/densepose/data/samplers/__init__.py deleted file mode 100644 index 7dba87ea1c6f37ab56071d2f5d715bd78fe8816f..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/projects/DensePose/densepose/data/samplers/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. - -from .densepose_uniform import DensePoseUniformSampler -from .densepose_confidence_based import DensePoseConfidenceBasedSampler -from .densepose_cse_uniform import DensePoseCSEUniformSampler -from .densepose_cse_confidence_based import DensePoseCSEConfidenceBasedSampler -from .mask_from_densepose import MaskFromDensePoseSampler -from .prediction_to_gt import PredictionToGroundTruthSampler diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/tests/config/dir1/bad_import.py b/spaces/nikitaPDL2023/assignment4/detectron2/tests/config/dir1/bad_import.py deleted file mode 100644 index d7452c4dfc211223c946f22df7a2eb6bdc2cd829..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/tests/config/dir1/bad_import.py +++ /dev/null @@ -1,2 +0,0 @@ -# import from directory is not allowed -from . import dir1a diff --git a/spaces/nugrahatheo/Prediction-of-Credit-Card-Default/prediction.py b/spaces/nugrahatheo/Prediction-of-Credit-Card-Default/prediction.py deleted file mode 100644 index 2da0cfd3085fcf4338de22e201bbe8bc6205a617..0000000000000000000000000000000000000000 --- a/spaces/nugrahatheo/Prediction-of-Credit-Card-Default/prediction.py +++ /dev/null @@ -1,114 +0,0 @@ -import streamlit as st -import pandas as pd -import numpy as np -import pickle -import json - -# Load all files -with open('list_cat_cols.txt', 'r') as file_1: - list_cat_cols = json.load(file_1) - -with open('list_num_cols.txt', 'r') as file_2: - list_num_cols = json.load(file_2) - -with open('model_scaler.pkl', 'rb') as file_3: - scaler = pickle.load(file_3) - -with open('model_encoder.pkl', 'rb') as file_4: - encoder = pickle.load(file_4) - -with open('model_svc.pkl', 'rb') as file_5: - model_svc = pickle.load(file_5) - -with open('model_grid_rf.pkl', 'rb') as file_6: - model_grid_rf = pickle.load(file_6) - -def run(): - st.write('##### Form Prediction Credit Card Default') - - # Making Form - with st.form(key='Form Prediction Credit Card Default'): - Limit_Balance = st.number_input('limit_balance', min_value=10000, max_value=800000, value=10000, step=1, help='Limit Balance') - Sex = st.selectbox('sex', (1,2), index=1, help='1=Male, 2=Female') - Education_Level = st.selectbox('education_level', (1,2,3,4), index=1, help='1=Graduate School, 2=University, 3=High School, 4=Others') - Marital_Status = st.selectbox('marital_status', (1,2,3), index=1, help='1=married, 2=single, 3=others') - Age = st.number_input('age', min_value=20, max_value=70, value=22) - st.markdown('---') - Pay_0 = st.number_input('pay_0', min_value=-1, max_value=12, value=0, help='-1=pay duly, 1=payment delay for one month, 2=payment delay for two months, ... 8=payment delay for eight months, 9=payment delay for nine months and above') - Pay_2 = st.number_input('pay_2', min_value=-1, max_value=12, value=0, help='-1=pay duly, 1=payment delay for one month, 2=payment delay for two months, ... 8=payment delay for eight months, 9=payment delay for nine months and above') - Pay_3 = st.number_input('pay_3', min_value=-1, max_value=12, value=0, help='-1=pay duly, 1=payment delay for one month, 2=payment delay for two months, ... 8=payment delay for eight months, 9=payment delay for nine months and above') - Pay_4 = st.number_input('pay_4', min_value=-1, max_value=12, value=0, help='-1=pay duly, 1=payment delay for one month, 2=payment delay for two months, ... 8=payment delay for eight months, 9=payment delay for nine months and above') - Pay_5 = st.number_input('pay_5', min_value=-1, max_value=12, value=0, help='-1=pay duly, 1=payment delay for one month, 2=payment delay for two months, ... 8=payment delay for eight months, 9=payment delay for nine months and above') - Pay_6 = st.number_input('pay_6', min_value=-1, max_value=12, value=0, help='-1=pay duly, 1=payment delay for one month, 2=payment delay for two months, ... 8=payment delay for eight months, 9=payment delay for nine months and above') - st.markdown('---') - Bill_Amount_1 = st.number_input('bill_amt_1', min_value=-80000, max_value=650000, value=0) - Bill_Amount_2 = st.number_input('bill_amt_2', min_value=-80000, max_value=650000, value=0) - Bill_Amount_3 = st.number_input('bill_amt_3', min_value=-80000, max_value=650000, value=0) - Bill_Amount_4 = st.number_input('bill_amt_4', min_value=-80000, max_value=650000, value=0) - Bill_Amount_5 = st.number_input('bill_amt_5', min_value=-80000, max_value=650000, value=0) - Bill_Amount_6 = st.number_input('bill_amt_6', min_value=-80000, max_value=650000, value=0) - st.markdown('---') - Pay_Amount_1 = st.number_input('pay_amt_1', min_value=-0, max_value=650000, value=0) - Pay_Amount_2 = st.number_input('pay_amt_2', min_value=-0, max_value=650000, value=0) - Pay_Amount_3 = st.number_input('pay_amt_3', min_value=-0, max_value=650000, value=0) - Pay_Amount_4 = st.number_input('pay_amt_4', min_value=-0, max_value=650000, value=0) - Pay_Amount_5 = st.number_input('pay_amt_5', min_value=-0, max_value=650000, value=0) - Pay_Amount_6 = st.number_input('pay_amt_6', min_value=-0, max_value=650000, value=0) - - submited_1 = st.form_submit_button('Predict using SVC') - submited_2 = st.form_submit_button('Predict using RFC') - - data_inf = { - 'limit_balance' : Limit_Balance, - 'sex' : Sex, - 'education_level' : Education_Level, - 'marital_status' : Marital_Status, - 'age' : Age, - 'pay_0' : Pay_0, - 'pay_2' : Pay_2, - 'pay_3' : Pay_3, - 'pay_4' : Pay_4, - 'pay_5' : Pay_5, - 'pay_6' : Pay_6, - 'bill_amt_1' : Bill_Amount_1, - 'bill_amt_2' : Bill_Amount_2, - 'bill_amt_3' : Bill_Amount_3, - 'bill_amt_4' : Bill_Amount_4, - 'bill_amt_5' : Bill_Amount_5, - 'bill_amt_6' : Bill_Amount_6, - 'pay_amt_1' : Pay_Amount_1, - 'pay_amt_2' : Pay_Amount_2, - 'pay_amt_3' : Pay_Amount_3, - 'pay_amt_4' : Pay_Amount_4, - 'pay_amt_5' : Pay_Amount_5, - 'pay_amt_6' : Pay_Amount_6 - } - - data_inf = pd.DataFrame([data_inf]) - st.dataframe(data_inf) - - if submited_1: - #Split between numerical columns and categorical columns - data_inf_num = data_inf[list_num_cols] - data_inf_cat = data_inf[list_cat_cols] - #Feature scaling and feature encoding - data_inf_num_scaled = scaler.transform(data_inf_num) - data_inf_cat_encoded = encoder.transform(data_inf_cat) - data_inf_final = np.concatenate([data_inf_num_scaled, data_inf_cat_encoded], axis = 1) - #Predict using SVC - y_pred_inf = model_svc.predict(data_inf_final) - st.write('# Result : ', str(int(y_pred_inf))) - else: - #Split between numerical columns and categorical columns - data_inf_num = data_inf[list_num_cols] - data_inf_cat = data_inf[list_cat_cols] - #Feature scaling and feature encoding - data_inf_num_scaled = scaler.transform(data_inf_num) - data_inf_cat_encoded = encoder.transform(data_inf_cat) - data_inf_final = np.concatenate([data_inf_num_scaled, data_inf_cat_encoded], axis = 1) - #Predict using RFC - y_pred_inf = model_grid_rf.predict(data_inf_final) - st.write('# Result : ', str(int(y_pred_inf))) - -if __name__ == '__main__': - run() \ No newline at end of file diff --git a/spaces/osbm/token_merger_demo/README.md b/spaces/osbm/token_merger_demo/README.md deleted file mode 100644 index a5a724d6a95fbe478e959f3c4d0a1d0d287a89a1..0000000000000000000000000000000000000000 --- a/spaces/osbm/token_merger_demo/README.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Token Merger Demo -emoji: 🐺 -colorFrom: red -colorTo: red -sdk: gradio -app_file: app.py -pinned: false ---- - -# Token Merging: Your ViT but Faster - -github: https://github.com/facebookresearch/tome -paper: https://arxiv.org/abs/2210.09461 - -# Citation -```bibtex -@inproceedings{bolya2022tome, - title={Token Merging: Your {ViT} but Faster}, - author={Bolya, Daniel and Fu, Cheng-Yang and Dai, Xiaoliang and Zhang, Peizhao and Feichtenhofer, Christoph and Hoffman, Judy}, - booktitle={International Conference on Learning Representations}, - year={2023} -} -``` \ No newline at end of file diff --git a/spaces/paulengstler/interpretable-vertebral-fracture-diagnosis/netdissect/tool/allunitsample.py b/spaces/paulengstler/interpretable-vertebral-fracture-diagnosis/netdissect/tool/allunitsample.py deleted file mode 100644 index 9f86e196ce63ebfcad1fcee8bd2b7358463ff3d1..0000000000000000000000000000000000000000 --- a/spaces/paulengstler/interpretable-vertebral-fracture-diagnosis/netdissect/tool/allunitsample.py +++ /dev/null @@ -1,199 +0,0 @@ -''' -A simple tool to generate sample of output of a GAN, -subject to filtering, sorting, or intervention. -''' - -import torch, numpy, os, argparse, sys, shutil, errno, numbers -from PIL import Image -from torch.utils.data import TensorDataset -from netdissect.zdataset import standard_z_sample -from netdissect.progress import default_progress, verbose_progress -from netdissect.autoeval import autoimport_eval -from netdissect.workerpool import WorkerBase, WorkerPool -from netdissect.nethook import retain_layers -from netdissect.runningstats import RunningTopK - -def main(): - parser = argparse.ArgumentParser(description='GAN sample making utility') - parser.add_argument('--model', type=str, default=None, - help='constructor for the model to test') - parser.add_argument('--pthfile', type=str, default=None, - help='filename of .pth file for the model') - parser.add_argument('--outdir', type=str, default='images', - help='directory for image output') - parser.add_argument('--size', type=int, default=100, - help='number of images to output') - parser.add_argument('--test_size', type=int, default=None, - help='number of images to test') - parser.add_argument('--layer', type=str, default=None, - help='layer to inspect') - parser.add_argument('--seed', type=int, default=1, - help='seed') - parser.add_argument('--quiet', action='store_true', default=False, - help='silences console output') - if len(sys.argv) == 1: - parser.print_usage(sys.stderr) - sys.exit(1) - args = parser.parse_args() - verbose_progress(not args.quiet) - - # Instantiate the model - model = autoimport_eval(args.model) - if args.pthfile is not None: - data = torch.load(args.pthfile) - if 'state_dict' in data: - meta = {} - for key in data: - if isinstance(data[key], numbers.Number): - meta[key] = data[key] - data = data['state_dict'] - model.load_state_dict(data) - # Unwrap any DataParallel-wrapped model - if isinstance(model, torch.nn.DataParallel): - model = next(model.children()) - # Examine first conv in model to determine input feature size. - first_layer = [c for c in model.modules() - if isinstance(c, (torch.nn.Conv2d, torch.nn.ConvTranspose2d, - torch.nn.Linear))][0] - # 4d input if convolutional, 2d input if first layer is linear. - if isinstance(first_layer, (torch.nn.Conv2d, torch.nn.ConvTranspose2d)): - z_channels = first_layer.in_channels - spatialdims = (1, 1) - else: - z_channels = first_layer.in_features - spatialdims = () - # Instrument the model - retain_layers(model, [args.layer]) - model.cuda() - - if args.test_size is None: - args.test_size = args.size * 20 - z_universe = standard_z_sample(args.test_size, z_channels, - seed=args.seed) - z_universe = z_universe.view(tuple(z_universe.shape) + spatialdims) - indexes = get_all_highest_znums( - model, z_universe, args.size, seed=args.seed) - save_chosen_unit_images(args.outdir, model, z_universe, indexes, - lightbox=True) - - -def get_all_highest_znums(model, z_universe, size, - batch_size=10, seed=1): - # The model should have been instrumented already - retained_items = list(model.retained.items()) - assert len(retained_items) == 1 - layer = retained_items[0][0] - # By default, a 10% sample - progress = default_progress() - num_units = None - with torch.no_grad(): - # Pass 1: collect max activation stats - z_loader = torch.utils.data.DataLoader(TensorDataset(z_universe), - batch_size=batch_size, num_workers=2, - pin_memory=True) - rtk = RunningTopK(k=size) - for [z] in progress(z_loader, desc='Finding max activations'): - z = z.cuda() - model(z) - feature = model.retained[layer] - num_units = feature.shape[1] - max_feature = feature.view( - feature.shape[0], num_units, -1).max(2)[0] - rtk.add(max_feature) - td, ti = rtk.result() - highest = ti.sort(1)[0] - return highest - -def save_chosen_unit_images(dirname, model, z_universe, indices, - shared_dir="shared_images", - unitdir_template="unit_{}", - name_template="image_{}.jpg", - lightbox=False, batch_size=50, seed=1): - all_indices = torch.unique(indices.view(-1), sorted=True) - z_sample = z_universe[all_indices] - progress = default_progress() - sdir = os.path.join(dirname, shared_dir) - created_hashdirs = set() - for index in range(len(z_universe)): - hd = hashdir(index) - if hd not in created_hashdirs: - created_hashdirs.add(hd) - os.makedirs(os.path.join(sdir, hd), exist_ok=True) - with torch.no_grad(): - # Pass 2: now generate images - z_loader = torch.utils.data.DataLoader(TensorDataset(z_sample), - batch_size=batch_size, num_workers=2, - pin_memory=True) - saver = WorkerPool(SaveImageWorker) - for batch_num, [z] in enumerate(progress(z_loader, - desc='Saving images')): - z = z.cuda() - start_index = batch_num * batch_size - im = ((model(z) + 1) / 2 * 255).clamp(0, 255).byte().permute( - 0, 2, 3, 1).cpu() - for i in range(len(im)): - index = all_indices[i + start_index].item() - filename = os.path.join(sdir, hashdir(index), - name_template.format(index)) - saver.add(im[i].numpy(), filename) - saver.join() - linker = WorkerPool(MakeLinkWorker) - for u in progress(range(len(indices)), desc='Making links'): - udir = os.path.join(dirname, unitdir_template.format(u)) - os.makedirs(udir, exist_ok=True) - for r in range(indices.shape[1]): - index = indices[u,r].item() - fn = name_template.format(index) - # sourcename = os.path.join('..', shared_dir, fn) - sourcename = os.path.join(sdir, hashdir(index), fn) - targname = os.path.join(udir, fn) - linker.add(sourcename, targname) - if lightbox: - copy_lightbox_to(udir) - linker.join() - -def copy_lightbox_to(dirname): - srcdir = os.path.realpath( - os.path.join(os.getcwd(), os.path.dirname(__file__))) - shutil.copy(os.path.join(srcdir, 'lightbox.html'), - os.path.join(dirname, '+lightbox.html')) - -def hashdir(index): - # To keep the number of files the shared directory lower, split it - # into 100 subdirectories named as follows. - return '%02d' % (index % 100) - -class SaveImageWorker(WorkerBase): - # Saving images can be sped up by sending jpeg encoding and - # file-writing work to a pool. - def work(self, data, filename): - Image.fromarray(data).save(filename, optimize=True, quality=100) - -class MakeLinkWorker(WorkerBase): - # Creating symbolic links is a bit slow and can be done faster - # in parallel rather than waiting for each to be created. - def work(self, sourcename, targname): - try: - os.link(sourcename, targname) - except OSError as e: - if e.errno == errno.EEXIST: - os.remove(targname) - os.link(sourcename, targname) - else: - raise - -class MakeSyminkWorker(WorkerBase): - # Creating symbolic links is a bit slow and can be done faster - # in parallel rather than waiting for each to be created. - def work(self, sourcename, targname): - try: - os.symlink(sourcename, targname) - except OSError as e: - if e.errno == errno.EEXIST: - os.remove(targname) - os.symlink(sourcename, targname) - else: - raise - -if __name__ == '__main__': - main() diff --git a/spaces/pcuenq/paella/Paella/utils/modules.py b/spaces/pcuenq/paella/Paella/utils/modules.py deleted file mode 100644 index e358ad2fa4c1a57a4230a0dbea1f09d094f4f67a..0000000000000000000000000000000000000000 --- a/spaces/pcuenq/paella/Paella/utils/modules.py +++ /dev/null @@ -1,291 +0,0 @@ -import torch -from torch import nn -import numpy as np -import math - - -class Attention2D(nn.Module): - def __init__(self, c, nhead, dropout=0.0): - super().__init__() - self.attn = torch.nn.MultiheadAttention(c, nhead, dropout=dropout, bias=True, batch_first=True) - - def forward(self, x, kv, self_attn=False, **kwargs): - orig_shape = x.shape - x = x.view(x.size(0), x.size(1), -1).permute(0, 2, 1) # Bx4xHxW -> Bx(HxW)x4 - if self_attn: - kv = torch.cat([x, kv], dim=1) - x = self.attn(x, kv, kv, need_weights=False, **kwargs)[0] - x = x.permute(0, 2, 1).view(*orig_shape) - return x - - -class LayerNorm2d(nn.LayerNorm): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - def forward(self, x): - return super().forward(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) - - -class GlobalResponseNorm(nn.Module): - "Taken from https://github.com/facebookresearch/ConvNeXt-V2/blob/3608f67cc1dae164790c5d0aead7bf2d73d9719b/models/utils.py#L105" - def __init__(self, dim): - super().__init__() - self.gamma = nn.Parameter(torch.zeros(1, 1, 1, dim)) - self.beta = nn.Parameter(torch.zeros(1, 1, 1, dim)) - - def forward(self, x): - Gx = torch.norm(x, p=2, dim=(1, 2), keepdim=True) - Nx = Gx / (Gx.mean(dim=-1, keepdim=True) + 1e-6) - return self.gamma * (x * Nx) + self.beta + x - - -class ResBlock(nn.Module): - def __init__(self, c, c_skip=None, kernel_size=3, dropout=0.0): - super().__init__() - self.depthwise = nn.Conv2d(c + c_skip, c, kernel_size=kernel_size, padding=kernel_size // 2, groups=c) - self.norm = LayerNorm2d(c, elementwise_affine=False, eps=1e-6) - self.channelwise = nn.Sequential( - nn.Linear(c, c * 4), - nn.GELU(), - GlobalResponseNorm(c * 4), - nn.Dropout(dropout), - nn.Linear(c * 4, c) - ) - - def forward(self, x, x_skip=None): - x_res = x - if x_skip is not None: - x = torch.cat([x, x_skip], dim=1) - x = self.norm(self.depthwise(x)).permute(0, 2, 3, 1) - x = self.channelwise(x).permute(0, 3, 1, 2) - return x + x_res - - -class AttnBlock(nn.Module): - def __init__(self, c, c_cond, nhead, self_attn=True, dropout=0.0): - super().__init__() - self.self_attn = self_attn - self.norm = LayerNorm2d(c, elementwise_affine=False, eps=1e-6) - self.attention = Attention2D(c, nhead, dropout) - self.kv_mapper = nn.Sequential( - nn.SiLU(), - nn.Linear(c_cond, c) - ) - - def forward(self, x, kv, **kwargs): - kv = self.kv_mapper(kv) - x = x + self.attention(self.norm(x), kv, self_attn=self.self_attn, **kwargs) - return x - - -class FeedForwardBlock(nn.Module): - def __init__(self, c, dropout=0.0): - super().__init__() - self.norm = LayerNorm2d(c, elementwise_affine=False, eps=1e-6) - self.channelwise = nn.Sequential( - nn.Linear(c, c * 4), - nn.GELU(), - GlobalResponseNorm(c * 4), - nn.Dropout(dropout), - nn.Linear(c * 4, c) - ) - - def forward(self, x): - x = x + self.channelwise(self.norm(x).permute(0, 2, 3, 1)).permute(0, 3, 1, 2) - return x - - -class TimestepBlock(nn.Module): - def __init__(self, c, c_timestep): - super().__init__() - self.mapper = nn.Linear(c_timestep, c * 2) - - def forward(self, x, t): - a, b = self.mapper(t)[:, :, None, None].chunk(2, dim=1) - return x * (1 + a) + b - - -class Paella(nn.Module): - def __init__(self, c_in=256, c_out=256, num_labels=8192, c_r=64, patch_size=2, c_cond=1024, - c_hidden=[640, 1280, 1280], nhead=[-1, 16, 16], blocks=[6, 16, 6], level_config=['CT', 'CTA', 'CTA'], - clip_embd=1024, byt5_embd=1536, clip_seq_len=4, kernel_size=3, dropout=0.1, self_attn=True): - super().__init__() - self.c_r = c_r - self.c_cond = c_cond - self.num_labels = num_labels - if not isinstance(dropout, list): - dropout = [dropout] * len(c_hidden) - - # CONDITIONING - self.byt5_mapper = nn.Linear(byt5_embd, c_cond) - self.clip_mapper = nn.Linear(clip_embd, c_cond * clip_seq_len) - self.clip_image_mapper = nn.Linear(clip_embd, c_cond * clip_seq_len) - self.seq_norm = nn.LayerNorm(c_cond, elementwise_affine=False, eps=1e-6) - - self.in_mapper = nn.Sequential( - nn.Embedding(num_labels, c_in), - nn.LayerNorm(c_in, elementwise_affine=False, eps=1e-6) - ) - self.embedding = nn.Sequential( - nn.PixelUnshuffle(patch_size), - nn.Conv2d(c_in * (patch_size ** 2), c_hidden[0], kernel_size=1), - LayerNorm2d(c_hidden[0], elementwise_affine=False, eps=1e-6) - ) - - def get_block(block_type, c_hidden, nhead, c_skip=0, dropout=0): - if block_type == 'C': - return ResBlock(c_hidden, c_skip, kernel_size=kernel_size, dropout=dropout) - elif block_type == 'A': - return AttnBlock(c_hidden, c_cond, nhead, self_attn=self_attn, dropout=dropout) - elif block_type == 'F': - return FeedForwardBlock(c_hidden, dropout=dropout) - elif block_type == 'T': - return TimestepBlock(c_hidden, c_r) - else: - raise Exception(f'Block type {block_type} not supported') - - # DOWN BLOCK - self.down_blocks = nn.ModuleList() - for i in range(len(c_hidden)): - down_block = nn.ModuleList() - if i > 0: - down_block.append(nn.Sequential( - LayerNorm2d(c_hidden[i - 1], elementwise_affine=False, eps=1e-6), - nn.Conv2d(c_hidden[i - 1], c_hidden[i], kernel_size=2, stride=2), - )) - for _ in range(blocks[i]): - for block_type in level_config[i]: - down_block.append(get_block(block_type, c_hidden[i], nhead[i], dropout=dropout[i])) - self.down_blocks.append(down_block) - - # UP BLOCKS - self.up_blocks = nn.ModuleList() - for i in reversed(range(len(c_hidden))): - up_block = nn.ModuleList() - for j in range(blocks[i]): - for k, block_type in enumerate(level_config[i]): - up_block.append(get_block(block_type, c_hidden[i], nhead[i], - c_skip=c_hidden[i] if i < len(c_hidden) - 1 and j == k == 0 else 0, - dropout=dropout[i])) - if i > 0: - up_block.append(nn.Sequential( - LayerNorm2d(c_hidden[i], elementwise_affine=False, eps=1e-6), - nn.ConvTranspose2d(c_hidden[i], c_hidden[i - 1], kernel_size=2, stride=2), - )) - self.up_blocks.append(up_block) - - # OUTPUT - self.clf = nn.Sequential( - LayerNorm2d(c_hidden[0], elementwise_affine=False, eps=1e-6), - nn.Conv2d(c_hidden[0], c_out * (patch_size ** 2), kernel_size=1), - nn.PixelShuffle(patch_size), - ) - self.out_mapper = nn.Sequential( - LayerNorm2d(c_out, elementwise_affine=False, eps=1e-6), - nn.Conv2d(c_out, num_labels, kernel_size=1, bias=False) - ) - - # --- WEIGHT INIT --- - self.apply(self._init_weights) - nn.init.normal_(self.byt5_mapper.weight, std=0.02) - nn.init.normal_(self.clip_mapper.weight, std=0.02) - nn.init.normal_(self.clip_image_mapper.weight, std=0.02) - torch.nn.init.xavier_uniform_(self.embedding[1].weight, 0.02) # inputs - nn.init.constant_(self.clf[1].weight, 0) # outputs - nn.init.normal_(self.in_mapper[0].weight, std=np.sqrt(1 / num_labels)) # out mapper - self.out_mapper[-1].weight.data = self.in_mapper[0].weight.data[:, :, None, None].clone() - - for level_block in self.down_blocks + self.up_blocks: - for block in level_block: - if isinstance(block, ResBlock) or isinstance(block, FeedForwardBlock): - block.channelwise[-1].weight.data *= np.sqrt(1 / sum(blocks)) - elif isinstance(block, TimestepBlock): - nn.init.constant_(block.mapper.weight, 0) - - def _init_weights(self, m): - if isinstance(m, (nn.Conv2d, nn.Linear)): - torch.nn.init.xavier_uniform_(m.weight) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - - def gen_r_embedding(self, r, max_positions=10000): - r = r * max_positions - half_dim = self.c_r // 2 - emb = math.log(max_positions) / (half_dim - 1) - emb = torch.arange(half_dim, device=r.device).float().mul(-emb).exp() - emb = r[:, None] * emb[None, :] - emb = torch.cat([emb.sin(), emb.cos()], dim=1) - if self.c_r % 2 == 1: # zero pad - emb = nn.functional.pad(emb, (0, 1), mode='constant') - return emb - - def gen_c_embeddings(self, byt5, clip, clip_image): - seq = self.byt5_mapper(byt5) - if clip is not None: - clip = self.clip_mapper(clip).view(clip.size(0), -1, self.c_cond) - seq = torch.cat([seq, clip], dim=1) - if clip_image is not None: - if isinstance(clip_image, list): - for ci in clip_image: - ci = self.clip_image_mapper(ci).view(ci.size(0), -1, self.c_cond) - seq = torch.cat([seq, ci], dim=1) - else: - clip_image = self.clip_image_mapper(clip_image).view(clip_image.size(0), -1, self.c_cond) - seq = torch.cat([seq, clip_image], dim=1) - seq = self.seq_norm(seq) - return seq - - def _down_encode(self, x, r_embed, c_embed, **kwargs): - level_outputs = [] - for down_block in self.down_blocks: - for block in down_block: - if isinstance(block, ResBlock): - x = block(x) - elif isinstance(block, AttnBlock): - x = block(x, c_embed, **kwargs) - elif isinstance(block, TimestepBlock): - x = block(x, r_embed) - else: - x = block(x) - level_outputs.insert(0, x) - return level_outputs - - def _up_decode(self, level_outputs, r_embed, c_embed, **kwargs): - x = level_outputs[0] - for i, up_block in enumerate(self.up_blocks): - for j, block in enumerate(up_block): - if isinstance(block, ResBlock): - x = block(x, level_outputs[i] if j == 0 and i > 0 else None) - elif isinstance(block, AttnBlock): - x = block(x, c_embed, **kwargs) - elif isinstance(block, TimestepBlock): - x = block(x, r_embed) - else: - x = block(x) - return x - - def forward(self, x, r, byt5, clip=None, clip_image=None, x_cat=None, **kwargs): - if x_cat is not None: - x = torch.cat([x, x_cat], dim=1) - # Process the conditioning embeddings - r_embed = self.gen_r_embedding(r) - c_embed = self.gen_c_embeddings(byt5, clip, clip_image) - - # Model Blocks - x = self.embedding(self.in_mapper(x).permute(0, 3, 1, 2)) - level_outputs = self._down_encode(x, r_embed, c_embed, **kwargs) - x = self._up_decode(level_outputs, r_embed, c_embed, **kwargs) - x = self.out_mapper(self.clf(x)) - return x - - def add_noise(self, x, t, mask=None, random_x=None): - if mask is None: - mask = (torch.rand_like(x.float()) <= t[:, None, None]).long() - if random_x is None: - random_x = torch.randint_like(x, 0, self.num_labels) - x = x * (1 - mask) + random_x * mask - return x, mask - - def get_loss_weight(self, t, mask, min_val=0.3): - return 1 - (1 - mask) * ((1 - t) * (1 - min_val))[:, None, None] diff --git a/spaces/perilli/tortoise-tts-v2/tortoise/models/arch_util.py b/spaces/perilli/tortoise-tts-v2/tortoise/models/arch_util.py deleted file mode 100644 index 5d8c36e9d4dfeabb82c46cdbc083bbf12fb8f757..0000000000000000000000000000000000000000 --- a/spaces/perilli/tortoise-tts-v2/tortoise/models/arch_util.py +++ /dev/null @@ -1,367 +0,0 @@ -import functools -import math - -import torch -import torch.nn as nn -import torch.nn.functional as F -import torchaudio -from tortoise.models.xtransformers import ContinuousTransformerWrapper, RelativePositionBias - - -def zero_module(module): - """ - Zero out the parameters of a module and return it. - """ - for p in module.parameters(): - p.detach().zero_() - return module - - -class GroupNorm32(nn.GroupNorm): - def forward(self, x): - return super().forward(x.float()).type(x.dtype) - - -def normalization(channels): - """ - Make a standard normalization layer. - - :param channels: number of input channels. - :return: an nn.Module for normalization. - """ - groups = 32 - if channels <= 16: - groups = 8 - elif channels <= 64: - groups = 16 - while channels % groups != 0: - groups = int(groups / 2) - assert groups > 2 - return GroupNorm32(groups, channels) - - -class QKVAttentionLegacy(nn.Module): - """ - A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping - """ - - def __init__(self, n_heads): - super().__init__() - self.n_heads = n_heads - - def forward(self, qkv, mask=None, rel_pos=None): - """ - Apply QKV attention. - - :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs. - :return: an [N x (H * C) x T] tensor after attention. - """ - bs, width, length = qkv.shape - assert width % (3 * self.n_heads) == 0 - ch = width // (3 * self.n_heads) - q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1) - scale = 1 / math.sqrt(math.sqrt(ch)) - weight = torch.einsum( - "bct,bcs->bts", q * scale, k * scale - ) # More stable with f16 than dividing afterwards - if rel_pos is not None: - weight = rel_pos(weight.reshape(bs, self.n_heads, weight.shape[-2], weight.shape[-1])).reshape(bs * self.n_heads, weight.shape[-2], weight.shape[-1]) - weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) - if mask is not None: - # The proper way to do this is to mask before the softmax using -inf, but that doesn't work properly on CPUs. - mask = mask.repeat(self.n_heads, 1).unsqueeze(1) - weight = weight * mask - a = torch.einsum("bts,bcs->bct", weight, v) - - return a.reshape(bs, -1, length) - - -class AttentionBlock(nn.Module): - """ - An attention block that allows spatial positions to attend to each other. - - Originally ported from here, but adapted to the N-d case. - https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. - """ - - def __init__( - self, - channels, - num_heads=1, - num_head_channels=-1, - do_checkpoint=True, - relative_pos_embeddings=False, - ): - super().__init__() - self.channels = channels - self.do_checkpoint = do_checkpoint - if num_head_channels == -1: - self.num_heads = num_heads - else: - assert ( - channels % num_head_channels == 0 - ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}" - self.num_heads = channels // num_head_channels - self.norm = normalization(channels) - self.qkv = nn.Conv1d(channels, channels * 3, 1) - # split heads before split qkv - self.attention = QKVAttentionLegacy(self.num_heads) - - self.proj_out = zero_module(nn.Conv1d(channels, channels, 1)) - if relative_pos_embeddings: - self.relative_pos_embeddings = RelativePositionBias(scale=(channels // self.num_heads) ** .5, causal=False, heads=num_heads, num_buckets=32, max_distance=64) - else: - self.relative_pos_embeddings = None - - def forward(self, x, mask=None): - b, c, *spatial = x.shape - x = x.reshape(b, c, -1) - qkv = self.qkv(self.norm(x)) - h = self.attention(qkv, mask, self.relative_pos_embeddings) - h = self.proj_out(h) - return (x + h).reshape(b, c, *spatial) - - -class Upsample(nn.Module): - """ - An upsampling layer with an optional convolution. - - :param channels: channels in the inputs and outputs. - :param use_conv: a bool determining if a convolution is applied. - """ - - def __init__(self, channels, use_conv, out_channels=None, factor=4): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.factor = factor - if use_conv: - ksize = 5 - pad = 2 - self.conv = nn.Conv1d(self.channels, self.out_channels, ksize, padding=pad) - - def forward(self, x): - assert x.shape[1] == self.channels - x = F.interpolate(x, scale_factor=self.factor, mode="nearest") - if self.use_conv: - x = self.conv(x) - return x - - -class Downsample(nn.Module): - """ - A downsampling layer with an optional convolution. - - :param channels: channels in the inputs and outputs. - :param use_conv: a bool determining if a convolution is applied. - """ - - def __init__(self, channels, use_conv, out_channels=None, factor=4, ksize=5, pad=2): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - self.use_conv = use_conv - - stride = factor - if use_conv: - self.op = nn.Conv1d( - self.channels, self.out_channels, ksize, stride=stride, padding=pad - ) - else: - assert self.channels == self.out_channels - self.op = nn.AvgPool1d(kernel_size=stride, stride=stride) - - def forward(self, x): - assert x.shape[1] == self.channels - return self.op(x) - - -class ResBlock(nn.Module): - def __init__( - self, - channels, - dropout, - out_channels=None, - use_conv=False, - use_scale_shift_norm=False, - up=False, - down=False, - kernel_size=3, - ): - super().__init__() - self.channels = channels - self.dropout = dropout - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.use_scale_shift_norm = use_scale_shift_norm - padding = 1 if kernel_size == 3 else 2 - - self.in_layers = nn.Sequential( - normalization(channels), - nn.SiLU(), - nn.Conv1d(channels, self.out_channels, kernel_size, padding=padding), - ) - - self.updown = up or down - - if up: - self.h_upd = Upsample(channels, False) - self.x_upd = Upsample(channels, False) - elif down: - self.h_upd = Downsample(channels, False) - self.x_upd = Downsample(channels, False) - else: - self.h_upd = self.x_upd = nn.Identity() - - self.out_layers = nn.Sequential( - normalization(self.out_channels), - nn.SiLU(), - nn.Dropout(p=dropout), - zero_module( - nn.Conv1d(self.out_channels, self.out_channels, kernel_size, padding=padding) - ), - ) - - if self.out_channels == channels: - self.skip_connection = nn.Identity() - elif use_conv: - self.skip_connection = nn.Conv1d( - channels, self.out_channels, kernel_size, padding=padding - ) - else: - self.skip_connection = nn.Conv1d(channels, self.out_channels, 1) - - def forward(self, x): - if self.updown: - in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] - h = in_rest(x) - h = self.h_upd(h) - x = self.x_upd(x) - h = in_conv(h) - else: - h = self.in_layers(x) - h = self.out_layers(h) - return self.skip_connection(x) + h - - -class AudioMiniEncoder(nn.Module): - def __init__(self, - spec_dim, - embedding_dim, - base_channels=128, - depth=2, - resnet_blocks=2, - attn_blocks=4, - num_attn_heads=4, - dropout=0, - downsample_factor=2, - kernel_size=3): - super().__init__() - self.init = nn.Sequential( - nn.Conv1d(spec_dim, base_channels, 3, padding=1) - ) - ch = base_channels - res = [] - for l in range(depth): - for r in range(resnet_blocks): - res.append(ResBlock(ch, dropout, kernel_size=kernel_size)) - res.append(Downsample(ch, use_conv=True, out_channels=ch*2, factor=downsample_factor)) - ch *= 2 - self.res = nn.Sequential(*res) - self.final = nn.Sequential( - normalization(ch), - nn.SiLU(), - nn.Conv1d(ch, embedding_dim, 1) - ) - attn = [] - for a in range(attn_blocks): - attn.append(AttentionBlock(embedding_dim, num_attn_heads,)) - self.attn = nn.Sequential(*attn) - self.dim = embedding_dim - - def forward(self, x): - h = self.init(x) - h = self.res(h) - h = self.final(h) - h = self.attn(h) - return h[:, :, 0] - - -class TorchMelSpectrogram(nn.Module): - def __init__(self, filter_length=1024, hop_length=256, win_length=1024, n_mel_channels=80, mel_fmin=0, mel_fmax=8000, - sampling_rate=22050, normalize=False, mel_norm_file='tortoise/data/mel_norms.pth'): - super().__init__() - # These are the default tacotron values for the MEL spectrogram. - self.filter_length = filter_length - self.hop_length = hop_length - self.win_length = win_length - self.n_mel_channels = n_mel_channels - self.mel_fmin = mel_fmin - self.mel_fmax = mel_fmax - self.sampling_rate = sampling_rate - self.mel_stft = torchaudio.transforms.MelSpectrogram(n_fft=self.filter_length, hop_length=self.hop_length, - win_length=self.win_length, power=2, normalized=normalize, - sample_rate=self.sampling_rate, f_min=self.mel_fmin, - f_max=self.mel_fmax, n_mels=self.n_mel_channels, - norm="slaney") - self.mel_norm_file = mel_norm_file - if self.mel_norm_file is not None: - self.mel_norms = torch.load(self.mel_norm_file) - else: - self.mel_norms = None - - def forward(self, inp): - if len(inp.shape) == 3: # Automatically squeeze out the channels dimension if it is present (assuming mono-audio) - inp = inp.squeeze(1) - assert len(inp.shape) == 2 - self.mel_stft = self.mel_stft.to(inp.device) - mel = self.mel_stft(inp) - # Perform dynamic range compression - mel = torch.log(torch.clamp(mel, min=1e-5)) - if self.mel_norms is not None: - self.mel_norms = self.mel_norms.to(mel.device) - mel = mel / self.mel_norms.unsqueeze(0).unsqueeze(-1) - return mel - - -class CheckpointedLayer(nn.Module): - """ - Wraps a module. When forward() is called, passes kwargs that require_grad through torch.checkpoint() and bypasses - checkpoint for all other args. - """ - def __init__(self, wrap): - super().__init__() - self.wrap = wrap - - def forward(self, x, *args, **kwargs): - for k, v in kwargs.items(): - assert not (isinstance(v, torch.Tensor) and v.requires_grad) # This would screw up checkpointing. - partial = functools.partial(self.wrap, **kwargs) - return torch.utils.checkpoint.checkpoint(partial, x, *args) - - -class CheckpointedXTransformerEncoder(nn.Module): - """ - Wraps a ContinuousTransformerWrapper and applies CheckpointedLayer to each layer and permutes from channels-mid - to channels-last that XTransformer expects. - """ - def __init__(self, needs_permute=True, exit_permute=True, checkpoint=True, **xtransformer_kwargs): - super().__init__() - self.transformer = ContinuousTransformerWrapper(**xtransformer_kwargs) - self.needs_permute = needs_permute - self.exit_permute = exit_permute - - if not checkpoint: - return - for i in range(len(self.transformer.attn_layers.layers)): - n, b, r = self.transformer.attn_layers.layers[i] - self.transformer.attn_layers.layers[i] = nn.ModuleList([n, CheckpointedLayer(b), r]) - - def forward(self, x, **kwargs): - if self.needs_permute: - x = x.permute(0,2,1) - h = self.transformer(x, **kwargs) - if self.exit_permute: - h = h.permute(0,2,1) - return h \ No newline at end of file diff --git a/spaces/pix2pix-zero-library/pix2pix-zero-demo/submodules/pix2pix-zero/README.md b/spaces/pix2pix-zero-library/pix2pix-zero-demo/submodules/pix2pix-zero/README.md deleted file mode 100644 index 754661045993b6f44a7d993e00e02e975c1eb3e1..0000000000000000000000000000000000000000 --- a/spaces/pix2pix-zero-library/pix2pix-zero-demo/submodules/pix2pix-zero/README.md +++ /dev/null @@ -1,154 +0,0 @@ -# pix2pix-zero - -## [**[website]**](https://pix2pixzero.github.io/) - - -This is author's reimplementation of "Zero-shot Image-to-Image Translation" using the diffusers library.
                    -The results in the paper are based on the [CompVis](https://github.com/CompVis/stable-diffusion) library, which will be released later. - -**[New!]** Code for editing real and synthetic images released! - - - -
                    -
                    -

                    - -

                    -
                    - - -We propose pix2pix-zero, a diffusion-based image-to-image approach that allows users to specify the edit direction on-the-fly (e.g., cat to dog). Our method can directly use pre-trained [Stable Diffusion](https://github.com/CompVis/stable-diffusion), for editing real and synthetic images while preserving the input image's structure. Our method is training-free and prompt-free, as it requires neither manual text prompting for each input image nor costly fine-tuning for each task. - -**TL;DR**: no finetuning required, no text input needed, input structure preserved. - -## Results -All our results are based on [stable-diffusion-v1-4](https://github.com/CompVis/stable-diffusion) model. Please the website for more results. - -
                    -

                    - -

                    -
                    -
                    - -The top row for each of the results below show editing of real images, and the bottom row shows synthetic image editing. -
                    -

                    - -

                    -

                    - -

                    -

                    - -

                    -

                    - -

                    -

                    - -

                    -
                    - -## Real Image Editing -
                    -

                    - -

                    -
                    - -## Synthetic Image Editing -
                    -

                    - -

                    -
                    - -## Method Details - -Given an input image, we first generate text captions using [BLIP](https://github.com/salesforce/LAVIS) and apply regularized DDIM inversion to obtain our inverted noise map. -Then, we obtain reference cross-attention maps that correspoind to the structure of the input image by denoising, guided with the CLIP embeddings -of our generated text (c). Next, we denoise with edited text embeddings, while enforcing a loss to match current cross-attention maps with the -reference cross-attention maps. - -
                    -

                    - -

                    -
                    - - -## Getting Started - -**Environment Setup** -- We provide a [conda env file](environment.yml) that contains all the required dependencies - ``` - conda env create -f environment.yml - ``` -- Following this, you can activate the conda environment with the command below. - ``` - conda activate pix2pix-zero - ``` - -**Real Image Translation** -- First, run the inversion command below to obtain the input noise that reconstructs the image. - The command below will save the inversion in the results folder as `output/test_cat/inversion/cat_1.pt` - and the BLIP-generated prompt as `output/test_cat/prompt/cat_1.txt` - ``` - python src/inversion.py \ - --input_image "assets/test_images/cats/cat_1.png" \ - --results_folder "output/test_cat" - ``` -- Next, we can perform image editing with the editing direction as shown below. - The command below will save the edited image as `output/test_cat/edit/cat_1.png` - ``` - python src/edit_real.py \ - --inversion "output/test_cat/inversion/cat_1.pt" \ - --prompt "output/test_cat/prompt/cat_1.txt" \ - --task_name "cat2dog" \ - --results_folder "output/test_cat/" - ``` - -**Editing Synthetic Images** -- Similarly, we can edit the synthetic images generated by Stable Diffusion with the following command. - ``` - python src/edit_synthetic.py \ - --results_folder "output/synth_editing" \ - --prompt_str "a high resolution painting of a cat in the style of van gough" \ - --task "cat2dog" - ``` - -### **Tips and Debugging** - - **Controlling the Image Structure:**
                    - The `--xa_guidance` flag controls the amount of cross-attention guidance to be applied when performing the edit. If the output edited image does not retain the structure from the input, increasing the value will typically address the issue. We recommend changing the value in increments of 0.05. - - - **Improving Image Quality:**
                    - If the output image quality is low or has some artifacts, using more steps for both the inversion and editing would be helpful. - This can be controlled with the `--num_ddim_steps` flag. - - - **Reducing the VRAM Requirements:**
                    - We can reduce the VRAM requirements using lower precision and setting the flag `--use_float_16`. - -
                    - -**Finding Custom Edit Directions**
                    - - We provide some pre-computed directions in the assets [folder](assets/embeddings_sd_1.4). - To generate new edit directions, users can first generate two files containing a large number of sentences (~1000) and then run the command as shown below. - ``` - python src/make_edit_direction.py \ - --file_source_sentences sentences/apple.txt \ - --file_target_sentences sentences/orange.txt \ - --output_folder assets/embeddings_sd_1.4 - ``` -- After running the above command, you can set the flag `--task apple2orange` for the new edit. - - - -## Comparison -Comparisons with different baselines, including, SDEdit + word swap, DDIM + word swap, and prompt-to-propmt. Our method successfully applies the edit, while preserving the structure of the input image. -
                    -

                    - -

                    -
                    diff --git a/spaces/pn23/HackGT2023/scraping.py b/spaces/pn23/HackGT2023/scraping.py deleted file mode 100644 index 0db9b0398268a402e736d069ba951304e09e420c..0000000000000000000000000000000000000000 --- a/spaces/pn23/HackGT2023/scraping.py +++ /dev/null @@ -1,37 +0,0 @@ -import requests -from bs4 import BeautifulSoup -def scraper(dining_hall,meal, date): - - y = "" - if dining_hall == "brittain": - y += "This is what the Brittain Dining Hall is serving for "+ meal + ": " - elif dining_hall == "north-ave-dining-hall": - y += "This is what the North Avenue Dining Hall is serving for "+ meal + ": " - elif dining_hall == "west-village": - y += "This is what the West Village Dining Hall is serving for "+ meal + ": " - - url = "https://techdining.api.nutrislice.com/menu/carbcounts/"+ dining_hall +"/" + meal +"/2023-"+ date - html = requests.get(url).content - - soup = BeautifulSoup(html) - table = soup.select_one("table.carbs-table") - - temp = set() - a = table.find_all("tr")[1:] - for i in range(len(a)): - if (i == len(a) - 1): - y += ((a[i].find_all("td")[0].find_all("p")[0].text.strip()) + ". ") - else: - y += ((a[i].find_all("td")[0].find_all("p")[0].text.strip()) + " ") - - - return y - -def prompty(date, userinput): - ans = "" - for hall in ["brittain","north-ave-dining-hall","west-village"]: - for ml in ["breakfast","lunch","dinner"]: - ans += scraper(hall,ml,date) - ans += "\n " - - return "The user wants to know where to eat today for breakfast, lunch, and dinner. Here are Georgia Tech's menus for today, separated by the specific dining halls Brittain, North Avenue, and West Village: " + ans + " Based on the user's following dietary restrcitions: " + userinput + ". Which dining halls should the user go to for breakfast, lunch, and dinner today. You may provide a different dining hall for each meal to best fit the user's diet. Provide why the user should go to your suggested dining halls for each meal and how your recommendations fit the user's diet. Please also provide 2-3 items from each of the recommended dining halls as examples. Please provide a pargraph description for each meal." \ No newline at end of file diff --git a/spaces/pngwn/nextjs/out/_next/static/chunks/framework-6e4ba497ae0c8a3f.js b/spaces/pngwn/nextjs/out/_next/static/chunks/framework-6e4ba497ae0c8a3f.js deleted file mode 100644 index f6334f9bc1354c2e496e107fed242c09f26dc1cb..0000000000000000000000000000000000000000 --- a/spaces/pngwn/nextjs/out/_next/static/chunks/framework-6e4ba497ae0c8a3f.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[774],{4448:function(e,t,n){var r=n(7294),l=n(6086),a=n(3840);function o(e){for(var t="https://reactjs.org/docs/error-decoder.html?invariant="+e,n=1;n
                    - - - - -
                    NamePriceServersCountries
                    ExpressVPN$6.67/month (with 49% off + 3 months free)3000+94+
                    NordVPN$3.71/month (with 68% off + 3 months free)5400+59+
                    Surfshark$2.49/month (with 81% off + 3 months free)3200+65+
                    -

                    How to choose a VPN service for Bojezafm8free?

                    -

                    To choose a VPN service for using with Bojezafm8free, you need to consider several factors such as:

                    -
                      -
                    • Your budget: How much are you willing to pay for a VPN service? Do you prefer a monthly or yearly plan? Do they offer any discounts or free trials?
                    • -
                    • Your location: Where are you located? Do they have servers near your location or in the countries where the music platforms are available?
                    • -platforms do you want to access? Do they support them or have any compatibility issues? -
                    • Your expectation: What kind of streaming experience do you want? Do they offer fast and reliable servers, high-quality sound, or advanced features?
                    • -
                    -

                    To help you choose a VPN service for Bojezafm8free, you can read reviews, compare features, or test them out yourself. You can also use our VPN comparison tool to find the best VPN service for your needs.

                    -

                    What are the alternatives to Bojezafm8free?

                    -

                    If you are not comfortable with using Bojezafm8free or a VPN service, there are other ways to stream music online for free. There are many free and legal music streaming services that you can use without violating any laws or risking your security. However, these services may have some limitations or drawbacks compared to Bojezafm8free.

                    -

                    What are the best free and legal music streaming services?

                    -

                    Some of the best free and legal music streaming services that you can use are:

                    - - - - - -
                    NameDescriptionProsCons
                    SpotifyThe most popular music streaming service in the world with over 70 million songs and podcasts.- Large and diverse catalogue
                    - Personalized recommendations
                    - Social features
                    - Offline mode (with premium)
                    - Ads (with free)
                    - Limited skips (with free)
                    - Lower quality (with free)
                    - Not available in some countries
                    YouTube MusicThe music streaming service from YouTube with over 60 million songs and videos.- Access to YouTube videos
                    - Smart playlists
                    - Background play (with premium)
                    - Offline mode (with premium)
                    - Ads (with free)
                    - Limited skips (with free)
                    - Lower quality (with free)
                    - Not available in some countries
                    SoundCloudThe largest online audio platform with over 200 million tracks from independent artists and creators.- Discover new and emerging music
                    - Upload your own tracks
                    - Interact with other users
                    - Offline mode (with premium)
                    - Ads (with free)
                    - Limited skips (with free)
                    - Lower quality (with free)
                    - Not available in some countries
                    -

                    How to compare free and legal music streaming services?

                    -

                    To compare free and legal music streaming services, you need to consider several factors such as:

                    -
                      -
                    • Catalogue: How many songs and genres do they offer? Do they have the artists and songs that you like?
                    • -
                    • Features: What kind of features do they offer? Do they have personalized recommendations, playlists, radio stations, podcasts, etc.?
                    • -
                    • Quality: What is the sound quality of their streams? Do they offer high-quality or lossless audio?
                    • -
                    • Ads: Do they have ads or interruptions while streaming? How often and how long are they?
                    • -
                    • Offline mode: Do they allow you to download songs or playlists for offline listening? How many songs or playlists can you download?
                    • -
                    -

                    To help you compare free and legal music streaming services, you can read reviews, compare features, or test them out yourself. You can also use our music streaming comparison tool to find the best music streaming service for your needs.

                    -

                    Conclusion

                    -

                    In conclusion, Bojezafm8free is a service that claims to offer unlimited music streaming for free from any music platform. However, it has some serious drawbacks that make it illegal, unsafe, and unreliable. If you want to use Bojezafm8free safely and legally, you need to use a VPN service that can protect your privacy and security while bypassing geo-restrictions and censorship. Alternatively, you can use free and legal music streaming services that offer a better and more ethical way to enjoy music online.

                    -

                    FAQs

                    -

                    Here are some frequently asked questions about Bojezafm8free:

                    -
                      -
                    1. Is Bojezafm8free a virus?
                      No, Bojezafm8free is not a virus itself. However, it may contain malware or viruses that can infect your device or steal your data. Therefore, you should be careful when downloading or using Bojezafm8free.
                    2. -
                    3. Can I get sued for using Bojezafm8free?
                      Possibly. By using Bojezafm8free, you are breaking the law and infringing the rights of the music platforms and the artists. You may face legal consequences if you get caught or reported by them.
                    4. -
                    5. Does Bojezafm8free work with Netflix?
                      No, Bojezafm8free does not work with Netflix or any other video streaming service. It only works with music streaming platforms such as Spotify, Apple Music, Deezer, etc.
                    6. -
                    7. How do I uninstall Bojezafm8free?
                      To uninstall Bojezafm8free from your device, you need to follow these steps:

                      -
                        -
                      • Delete the software or the app from your device.
                      • -
                      • Delete any files or folders related to Bojezafm8free from your device.
                      • -
                      • Scan your device with an antivirus software to remove any malware or viruses that may be left behind by Bojezafm8free.
                      • -
                      • Clear your browser history and cache to remove any traces of Bojezafm8free from your online activity.
                      • -
                    8. -
                    9. How do I contact Bojezafm8free?
                      You can't. Bojezafm8free does not have any official website or contact information. It is an anonymous and illegal service that does not provide any customer support or feedback.
                    10. -
                    -

                    0a6ba089eb
                    -
                    -
                    \ No newline at end of file diff --git a/spaces/ramkamal2000/voice-conversion-ddp/speaker_encoder/data_objects/speaker.py b/spaces/ramkamal2000/voice-conversion-ddp/speaker_encoder/data_objects/speaker.py deleted file mode 100644 index 07379847a854d85623db02ce5e5409c1566eb80c..0000000000000000000000000000000000000000 --- a/spaces/ramkamal2000/voice-conversion-ddp/speaker_encoder/data_objects/speaker.py +++ /dev/null @@ -1,40 +0,0 @@ -from speaker_encoder.data_objects.random_cycler import RandomCycler -from speaker_encoder.data_objects.utterance import Utterance -from pathlib import Path - -# Contains the set of utterances of a single speaker -class Speaker: - def __init__(self, root: Path): - self.root = root - self.name = root.name - self.utterances = None - self.utterance_cycler = None - - def _load_utterances(self): - with self.root.joinpath("_sources.txt").open("r") as sources_file: - sources = [l.split(",") for l in sources_file] - sources = {frames_fname: wave_fpath for frames_fname, wave_fpath in sources} - self.utterances = [Utterance(self.root.joinpath(f), w) for f, w in sources.items()] - self.utterance_cycler = RandomCycler(self.utterances) - - def random_partial(self, count, n_frames): - """ - Samples a batch of unique partial utterances from the disk in a way that all - utterances come up at least once every two cycles and in a random order every time. - - :param count: The number of partial utterances to sample from the set of utterances from - that speaker. Utterances are guaranteed not to be repeated if is not larger than - the number of utterances available. - :param n_frames: The number of frames in the partial utterance. - :return: A list of tuples (utterance, frames, range) where utterance is an Utterance, - frames are the frames of the partial utterances and range is the range of the partial - utterance with regard to the complete utterance. - """ - if self.utterances is None: - self._load_utterances() - - utterances = self.utterance_cycler.sample(count) - - a = [(u,) + u.random_partial(n_frames) for u in utterances] - - return a diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Adapt Pt 2010 With Crack LINK.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Adapt Pt 2010 With Crack LINK.md deleted file mode 100644 index 9bf766470de96b1637e420e4d78d075a5d1a3c0f..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Adapt Pt 2010 With Crack LINK.md +++ /dev/null @@ -1,76 +0,0 @@ -

                    adapt pt 2010 with crack


                    Downloadhttps://urlgoal.com/2uCMJZ



                    - -Designing for or utilizing ESD protection at the integrated circuit level is a complex undertaking. Write to us: If the DSO load matches the one in the spec, then the IC will.( - -a - -) - - - -0 - -. - -4 - -( - -b - -- - -1 - -/ - -5 - -c - -d - -2 - -7 - -e - -8 - -W - -h - -i - -s - -t - -r - -g - -v - -l - -u - -? - -3 - -n - -, - -6 - -f - -o 4fefd39f24
                    -
                    -
                    -

                    diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Adobe Photoshop CC 2014 (preactivated) RePack By D!akov 64 Bit [BETTER].md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Adobe Photoshop CC 2014 (preactivated) RePack By D!akov 64 Bit [BETTER].md deleted file mode 100644 index cf85400e4c6ed35614d13392812dabcc033ee96d..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Adobe Photoshop CC 2014 (preactivated) RePack By D!akov 64 Bit [BETTER].md +++ /dev/null @@ -1,6 +0,0 @@ -

                    Adobe Photoshop CC 2014 (preactivated) RePack by D!akov 64 bit


                    Download Zip ✸✸✸ https://urlgoal.com/2uCKTt



                    - -Adobe InDesign CC 2014.10.0.0.70 RePack by D!akov 15 torrent download locations ... Spyware ... Adobe Photoshop CC 2014 (64 bit) . 4d29de3e1b
                    -
                    -
                    -

                    diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Best Service Era Medieval Legendsl.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Best Service Era Medieval Legendsl.md deleted file mode 100644 index 3589df80e36a6db6495eb55147a5305ba067dc2b..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Best Service Era Medieval Legendsl.md +++ /dev/null @@ -1,6 +0,0 @@ -

                    Best Service Era Medieval Legendsl


                    DOWNLOAD –––––>>> https://urlgoal.com/2uCLss



                    -
                    - 3cee63e6c2
                    -
                    -
                    -

                    diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Epson Wic Reset Utility Keygen BETTER.zip.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Epson Wic Reset Utility Keygen BETTER.zip.md deleted file mode 100644 index 43a09fd8f58f324b512d605e0866286f0d3cb2ff..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Epson Wic Reset Utility Keygen BETTER.zip.md +++ /dev/null @@ -1,8 +0,0 @@ - -

                    Download the appropriate package for your device, download the proper package to your epson printer Firmware. You can download the epson WIC Reset from several websites for free. Get the epson wicreset.exe program.

                    -

                    Q. When I attempt to reset, my printer drops to the console screen or starts a “Power Management” message. Unable to access the printer via anything but a physical console.
                    A. Check the power cord and plug are not the fault. If the cord is damaged, get a new one.
                    If the plug is damaged, get a new one. Restart the printer.

                    -

                    Epson Wic Reset Utility Keygen.zip


                    Download --->>> https://urlgoal.com/2uCKkA



                    -

                    Q. Reset operation failed with error code D91C A1. This is a common error code returned from the printer, meaning that the attempt to delete EEPROM failed. Q. is this a problem with the printer or my PC? A1. This is a common error returned by some printers and its usually not a problem with the printer or the PC. It may be a firmware issue or it could be due to, for example, an inefficient printer driver. This issue is usually resolved by simply restarting the printer and retrying the Reset operation. Try downloading, installing and using the 2manuals version of WICReset.

                    -

                    Q. Sometimes after using my Epson ESP-DT printer with Autodoc, it does not print the fields in the document properly. Can this be fixed?
                    A. Autodoc is a convenient utility for those using a thermal printer but its not a very good solution for best quality printing. The regular utility will work just fine but Autodoc seems to be associated with the printer. If you have to use Autodoc, be sure to use the regular utility to print.

                    899543212b
                    -
                    -
                    \ No newline at end of file diff --git a/spaces/riccorl/relik-entity-linking/relik/reader/utils/special_symbols.py b/spaces/riccorl/relik-entity-linking/relik/reader/utils/special_symbols.py deleted file mode 100644 index 170909ad6cb2b69e1d6a8384af34cba441e60ce4..0000000000000000000000000000000000000000 --- a/spaces/riccorl/relik-entity-linking/relik/reader/utils/special_symbols.py +++ /dev/null @@ -1,11 +0,0 @@ -from typing import List - -NME_SYMBOL = "--NME--" - - -def get_special_symbols(num_entities: int) -> List[str]: - return [NME_SYMBOL] + [f"[E-{i}]" for i in range(num_entities)] - - -def get_special_symbols_re(num_entities: int) -> List[str]: - return [NME_SYMBOL] + [f"[R-{i}]" for i in range(num_entities)] diff --git a/spaces/riccorl/relik-entity-linking/relik/retriever/callbacks/utils_callbacks.py b/spaces/riccorl/relik-entity-linking/relik/retriever/callbacks/utils_callbacks.py deleted file mode 100644 index ba73e0d9ee02d9e1424611551befc002bdaaecf3..0000000000000000000000000000000000000000 --- a/spaces/riccorl/relik-entity-linking/relik/retriever/callbacks/utils_callbacks.py +++ /dev/null @@ -1,287 +0,0 @@ -import json -import logging -import os -from pathlib import Path -from typing import Any, Dict, Optional, Union - -import lightning as pl -import torch -from lightning.pytorch.trainer.states import RunningStage - -from relik.common.log import get_console_logger, get_logger -from relik.retriever.callbacks.base import NLPTemplateCallback, PredictionCallback -from relik.retriever.pytorch_modules.hf import GoldenRetrieverModel - -console_logger = get_console_logger() -logger = get_logger(__name__, level=logging.INFO) - - -class SavePredictionsCallback(NLPTemplateCallback): - def __init__( - self, - saving_dir: Optional[Union[str, os.PathLike]] = None, - verbose: bool = False, - *args, - **kwargs, - ): - super().__init__() - self.saving_dir = saving_dir - self.verbose = verbose - - @torch.no_grad() - def __call__( - self, - trainer: pl.Trainer, - pl_module: pl.LightningModule, - predictions: Dict, - callback: PredictionCallback, - *args, - **kwargs, - ) -> dict: - # write the predictions to a file inside the experiment folder - if self.saving_dir is None and trainer.logger is None: - logger.info( - "You need to specify an output directory (`saving_dir`) or a logger to save the predictions.\n" - "Skipping saving predictions." - ) - return - datasets = callback.datasets - for dataloader_idx, dataloader_predictions in predictions.items(): - # save to file - if self.saving_dir is not None: - prediction_folder = Path(self.saving_dir) - else: - try: - prediction_folder = ( - Path(trainer.logger.experiment.dir) / "predictions" - ) - except Exception: - logger.info( - "You need to specify an output directory (`saving_dir`) or a logger to save the predictions.\n" - "Skipping saving predictions." - ) - return - prediction_folder.mkdir(exist_ok=True) - predictions_path = ( - prediction_folder - / f"{datasets[dataloader_idx].name}_{dataloader_idx}.json" - ) - if self.verbose: - logger.info(f"Saving predictions to {predictions_path}") - with open(predictions_path, "w") as f: - for prediction in dataloader_predictions: - for k, v in prediction.items(): - if isinstance(v, set): - prediction[k] = list(v) - f.write(json.dumps(prediction) + "\n") - - -class ResetModelCallback(pl.Callback): - def __init__( - self, - question_encoder: str, - passage_encoder: Optional[str] = None, - verbose: bool = True, - ) -> None: - super().__init__() - self.question_encoder = question_encoder - self.passage_encoder = passage_encoder - self.verbose = verbose - - def on_train_epoch_start( - self, trainer: pl.Trainer, pl_module: pl.LightningModule, *args, **kwargs - ) -> None: - if trainer.current_epoch == 0: - if self.verbose: - logger.info("Current epoch is 0, skipping resetting model") - return - - if self.verbose: - logger.info("Resetting model, optimizer and lr scheduler") - # reload model from scratch - previous_device = pl_module.device - trainer.model.model.question_encoder = GoldenRetrieverModel.from_pretrained( - self.question_encoder - ) - trainer.model.model.question_encoder.to(previous_device) - if self.passage_encoder is not None: - trainer.model.model.passage_encoder = GoldenRetrieverModel.from_pretrained( - self.passage_encoder - ) - trainer.model.model.passage_encoder.to(previous_device) - - trainer.strategy.setup_optimizers(trainer) - - -class FreeUpIndexerVRAMCallback(pl.Callback): - def __call__( - self, - pl_module: pl.LightningModule, - *args, - **kwargs, - ) -> Any: - logger.info("Freeing up GPU memory") - - # remove the index from the GPU memory - # remove the embeddings from the GPU memory first - if pl_module.model.document_index is not None: - if pl_module.model.document_index.embeddings is not None: - pl_module.model.document_index.embeddings.cpu() - pl_module.model.document_index.embeddings = None - - import gc - - gc.collect() - torch.cuda.empty_cache() - - def on_train_epoch_start( - self, trainer: pl.Trainer, pl_module: pl.LightningModule, *args, **kwargs - ) -> None: - return self(pl_module) - - def on_test_epoch_start( - self, trainer: pl.Trainer, pl_module: pl.LightningModule, *args, **kwargs - ) -> None: - return self(pl_module) - - -class ShuffleTrainDatasetCallback(pl.Callback): - def __init__(self, seed: int = 42, verbose: bool = True) -> None: - super().__init__() - self.seed = seed - self.verbose = verbose - self.previous_epoch = -1 - - def on_validation_epoch_end(self, trainer: pl.Trainer, *args, **kwargs): - if self.verbose: - if trainer.current_epoch != self.previous_epoch: - logger.info(f"Shuffling train dataset at epoch {trainer.current_epoch}") - - # logger.info(f"Shuffling train dataset at epoch {trainer.current_epoch}") - if trainer.current_epoch != self.previous_epoch: - trainer.datamodule.train_dataset.shuffle_data( - seed=self.seed + trainer.current_epoch + 1 - ) - self.previous_epoch = trainer.current_epoch - - -class PrefetchTrainDatasetCallback(pl.Callback): - def __init__(self, verbose: bool = True) -> None: - super().__init__() - self.verbose = verbose - # self.previous_epoch = -1 - - def on_validation_epoch_end(self, trainer: pl.Trainer, *args, **kwargs): - if trainer.datamodule.train_dataset.prefetch_batches: - if self.verbose: - # if trainer.current_epoch != self.previous_epoch: - logger.info( - f"Prefetching train dataset at epoch {trainer.current_epoch}" - ) - # if trainer.current_epoch != self.previous_epoch: - trainer.datamodule.train_dataset.prefetch() - self.previous_epoch = trainer.current_epoch - - -class SubsampleTrainDatasetCallback(pl.Callback): - def __init__(self, seed: int = 43, verbose: bool = True) -> None: - super().__init__() - self.seed = seed - self.verbose = verbose - - def on_validation_epoch_end(self, trainer: pl.Trainer, *args, **kwargs): - if self.verbose: - logger.info(f"Subsampling train dataset at epoch {trainer.current_epoch}") - trainer.datamodule.train_dataset.random_subsample( - seed=self.seed + trainer.current_epoch + 1 - ) - - -class SaveRetrieverCallback(pl.Callback): - def __init__( - self, - saving_dir: Optional[Union[str, os.PathLike]] = None, - verbose: bool = True, - *args, - **kwargs, - ): - super().__init__() - self.saving_dir = saving_dir - self.verbose = verbose - self.free_up_indexer_callback = FreeUpIndexerVRAMCallback() - - @torch.no_grad() - def __call__( - self, - trainer: pl.Trainer, - pl_module: pl.LightningModule, - *args, - **kwargs, - ): - if self.saving_dir is None and trainer.logger is None: - logger.info( - "You need to specify an output directory (`saving_dir`) or a logger to save the retriever.\n" - "Skipping saving retriever." - ) - return - if self.saving_dir is not None: - retriever_folder = Path(self.saving_dir) - else: - try: - retriever_folder = Path(trainer.logger.experiment.dir) / "retriever" - except Exception: - logger.info( - "You need to specify an output directory (`saving_dir`) or a logger to save the retriever.\n" - "Skipping saving retriever." - ) - return - retriever_folder.mkdir(exist_ok=True, parents=True) - if self.verbose: - logger.info(f"Saving retriever to {retriever_folder}") - pl_module.model.save_pretrained(retriever_folder) - - def on_save_checkpoint( - self, - trainer: pl.Trainer, - pl_module: pl.LightningModule, - checkpoint: Dict[str, Any], - ): - self(trainer, pl_module) - # self.free_up_indexer_callback(pl_module) - - -class SampleNegativesDatasetCallback(pl.Callback): - def __init__(self, seed: int = 42, verbose: bool = True) -> None: - super().__init__() - self.seed = seed - self.verbose = verbose - - def on_validation_epoch_end(self, trainer: pl.Trainer, *args, **kwargs): - if self.verbose: - f"Sampling negatives for train dataset at epoch {trainer.current_epoch}" - trainer.datamodule.train_dataset.sample_dataset_negatives( - seed=self.seed + trainer.current_epoch - ) - - -class SubsampleDataCallback(pl.Callback): - def __init__(self, seed: int = 42, verbose: bool = True) -> None: - super().__init__() - self.seed = seed - self.verbose = verbose - - def on_validation_epoch_start(self, trainer: pl.Trainer, *args, **kwargs): - if self.verbose: - f"Subsampling data for train dataset at epoch {trainer.current_epoch}" - if trainer.state.stage == RunningStage.SANITY_CHECKING: - return - trainer.datamodule.train_dataset.subsample_data( - seed=self.seed + trainer.current_epoch - ) - - def on_fit_start(self, trainer: pl.Trainer, *args, **kwargs): - if self.verbose: - f"Subsampling data for train dataset at epoch {trainer.current_epoch}" - trainer.datamodule.train_dataset.subsample_data( - seed=self.seed + trainer.current_epoch - ) diff --git a/spaces/robin0307/MMOCR/configs/textdet/panet/panet_r18_fpem_ffm_600e_ctw1500.py b/spaces/robin0307/MMOCR/configs/textdet/panet/panet_r18_fpem_ffm_600e_ctw1500.py deleted file mode 100644 index 91d23af68417b0c589964f0908d4de60dfcfc4e4..0000000000000000000000000000000000000000 --- a/spaces/robin0307/MMOCR/configs/textdet/panet/panet_r18_fpem_ffm_600e_ctw1500.py +++ /dev/null @@ -1,35 +0,0 @@ -_base_ = [ - '../../_base_/default_runtime.py', - '../../_base_/schedules/schedule_adam_600e.py', - '../../_base_/det_models/panet_r18_fpem_ffm.py', - '../../_base_/det_datasets/ctw1500.py', - '../../_base_/det_pipelines/panet_pipeline.py' -] - -model = {{_base_.model_poly}} - -train_list = {{_base_.train_list}} -test_list = {{_base_.test_list}} - -train_pipeline_ctw1500 = {{_base_.train_pipeline_ctw1500}} -test_pipeline_ctw1500 = {{_base_.test_pipeline_ctw1500}} - -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - val_dataloader=dict(samples_per_gpu=1), - test_dataloader=dict(samples_per_gpu=1), - train=dict( - type='UniformConcatDataset', - datasets=train_list, - pipeline=train_pipeline_ctw1500), - val=dict( - type='UniformConcatDataset', - datasets=test_list, - pipeline=test_pipeline_ctw1500), - test=dict( - type='UniformConcatDataset', - datasets=test_list, - pipeline=test_pipeline_ctw1500)) - -evaluation = dict(interval=10, metric='hmean-iou') diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/core/optimizers/__init__.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/core/optimizers/__init__.py deleted file mode 100644 index e867d0761cb54a6f228a0fb3e0560dea67b67881..0000000000000000000000000000000000000000 --- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/core/optimizers/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .builder import OPTIMIZER_BUILDERS, build_optimizer -from .layer_decay_optimizer_constructor import \ - LearningRateDecayOptimizerConstructor - -__all__ = [ - 'LearningRateDecayOptimizerConstructor', 'OPTIMIZER_BUILDERS', - 'build_optimizer' -] diff --git a/spaces/ronvolutional/http-server/app.py b/spaces/ronvolutional/http-server/app.py deleted file mode 100644 index d64fe01479a04a7589744cbdfc059cbd8d596ca1..0000000000000000000000000000000000000000 --- a/spaces/ronvolutional/http-server/app.py +++ /dev/null @@ -1,79 +0,0 @@ -import os -import json -import requests -from http.server import SimpleHTTPRequestHandler, ThreadingHTTPServer -from urllib.parse import parse_qs, urlparse - -from inference import infer_t5 -from dataset import query_emotion - -# https://huggingface.co/settings/tokens -# https://huggingface.co/spaces/{username}/{space}/settings -API_TOKEN = os.getenv("BIG_GAN_TOKEN") - - -class RequestHandler(SimpleHTTPRequestHandler): - def do_GET(self): - if self.path == "/": - self.path = "index.html" - - return SimpleHTTPRequestHandler.do_GET(self) - - if self.path.startswith("/infer_biggan"): - url = urlparse(self.path) - query = parse_qs(url.query) - input = query.get("input", None)[0] - - output = requests.request( - "POST", - "https://api-inference.huggingface.co/models/osanseviero/BigGAN-deep-128", - headers={"Authorization": f"Bearer {API_TOKEN}"}, - data=json.dumps(input), - ) - - self.send_response(200) - self.send_header("Content-Type", "application/json") - self.end_headers() - - self.wfile.write(output.content) - - return SimpleHTTPRequestHandler - - elif self.path.startswith("/infer_t5"): - url = urlparse(self.path) - query = parse_qs(url.query) - input = query.get("input", None)[0] - - output = infer_t5(input) - - self.send_response(200) - self.send_header("Content-Type", "application/json") - self.end_headers() - - self.wfile.write(json.dumps({"output": output}).encode("utf-8")) - - return SimpleHTTPRequestHandler - - elif self.path.startswith("/query_emotion"): - url = urlparse(self.path) - query = parse_qs(url.query) - start = int(query.get("start", None)[0]) - end = int(query.get("end", None)[0]) - - output = query_emotion(start, end) - - self.send_response(200) - self.send_header("Content-Type", "application/json") - self.end_headers() - - self.wfile.write(json.dumps({"output": output}).encode("utf-8")) - - return SimpleHTTPRequestHandler - - else: - return SimpleHTTPRequestHandler.do_GET(self) - - -server = ThreadingHTTPServer(("", 7860), RequestHandler) - -server.serve_forever() diff --git a/spaces/rorallitri/biomedical-language-models/logs/Christopher Sommer Handstand Chronicles PDF Learn from the Expert Coach of Gymnastics Bodies.md b/spaces/rorallitri/biomedical-language-models/logs/Christopher Sommer Handstand Chronicles PDF Learn from the Expert Coach of Gymnastics Bodies.md deleted file mode 100644 index c75045abba434d655ac1e7d789eb1b506b78ad09..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Christopher Sommer Handstand Chronicles PDF Learn from the Expert Coach of Gymnastics Bodies.md +++ /dev/null @@ -1,9 +0,0 @@ -
                    -

                    This person is located in springfield il. Age 44, of twinsburg. Beloved husband of samantha ( nee gold) ; loving father of anthony j. , book christopher sommer eleanor coletta and evelyn lucille; cherished son of john f. ( nee flynn) macko; dearest brother of michael christopher ( sommer) macko and kathleen book christopher sommer mary ( christopher) phillips; dear uncle of many. Christopher sommers, los angeles, california. Projects, acting related things, set photos, ruminations and film stuff. Hartshorne notes/ solutions christopher eur. This book christopher sommer document was created to help the author study the book, and as a result is likely lled with abundance of inelegance if not inaccuracies. Please use with caution. The solutions written up here are: exercises that are really propositions, exercises that serve as good examples, exercises.

                    -

                    christopher sommer handstand chronicles pdf


                    DOWNLOAD 🌟 https://tinurll.com/2uzm7E



                    -

                    Building the gymnastic book christopher sommer body: the science of gymnastics strength training di christopher sommer e una grande book christopher sommer selezione di libri, arte e articoli da collezione disponibile su abebooks. By christopher sommer building the gymnastic body: the science of gymnastics strength training by christopher sommer this is an example product description. Building the book christopher sommer gymnastic body: the science of gymnastics strength training by christopher sommer bibliography sales rank: # 972847 book christopher sommer in books brand: example product brand published on:. Christopher sommer - building book christopher sommer the gimnastic body pliki uytkownika tantor5 przechowywane w serwisie chomikuj. Pl mastering gst fundamentals rings one. All the pretty book christopher sommer horses book online free - > > book christopher sommer - > > - > >. Handstand one by christopher sommer gymnasticbodies. Com published by olympic bodies, llc. 28- day- handstand- challenge.

                    -

                    12 of the best podcast episodes for christopher sommer. A collection of podcasts episodes with or about christopher book christopher sommer sommer, often where they are interviewed. Updated daily with the latest book christopher sommer episodes. Publicrecordsnow. Com has been visited by 10k+ users book christopher sommer in the past month.

                    -

                    This book was far more effective for me than the heavily marketed convict conditioning. The author is the real deal, has succesfully trained high level gymnasts, book christopher sommer and his protocols work. I took excerpts from the tim ferriss interview with gymnastics strength training coach christopher sommer and illustrated the movements they talk about with. Book summary: the title of this book is building the gymnastic body and it was written by christopher sommer. This particular edition is book christopher sommer in a paperback format. This books publish date is unknown and it book christopher sommer has a suggested retail price of $ 368. It was published by olympic bodies llc and has book christopher sommer a total of 195 pages in the book. Christopher sommer book christopher sommer we have all book christopher sommer seen them on television during book christopher sommer the olympics; these powerful book christopher sommer men performing amazing skills with ease and grace. Watching them perform the question inevitably arises - are they as powerful as they look? And the book christopher sommer answer is - yes.

                    -

                    aaccfb2cb3
                    -
                    -
                    \ No newline at end of file diff --git a/spaces/rorallitri/biomedical-language-models/logs/DJSoft RadioBOSS Advanced V5.6.0.6 Download Pc.md b/spaces/rorallitri/biomedical-language-models/logs/DJSoft RadioBOSS Advanced V5.6.0.6 Download Pc.md deleted file mode 100644 index de89e1e253da344416be284b37567b8f2ef52ffa..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/DJSoft RadioBOSS Advanced V5.6.0.6 Download Pc.md +++ /dev/null @@ -1,37 +0,0 @@ - -

                    How to Download and Install DJSoft RadioBOSS Advanced V5.6.0.6 on Your PC

                    -

                    If you are looking for a simple, affordable and reliable solution to automate your broadcasting needs, you may want to try DJSoft RadioBOSS Advanced V5.6.0.6. This software allows you to create and manage playlists, add jingles, commercials, announcements and more, and stream your audio to your local or Internet radio station.

                    -

                    DJSoft RadioBOSS Advanced V5.6.0.6 Download Pc


                    DOWNLOAD 🆗 https://tinurll.com/2uzoCB



                    -

                    In this article, we will show you how to download and install DJSoft RadioBOSS Advanced V5.6.0.6 on your PC in a few easy steps.

                    -

                    Step 1: Download the software

                    -

                    The first thing you need to do is to download the software from the official website of DJSoft.net. You can find the link at the end of this article. The file size is about 31.6 MB and it is compatible with Windows XP, Vista, 7, 8 and 10.

                    -

                    Once you have downloaded the file, double-click on it to start the installation process.

                    -

                    Step 2: Install the software

                    -

                    The installation wizard will guide you through the steps to install the software on your PC. You can choose the language, the destination folder and the components you want to install. You can also create a desktop shortcut and a start menu entry for easy access.

                    -

                    -

                    After you have completed the installation, you can launch the software by clicking on the shortcut or the start menu entry.

                    -

                    Step 3: Activate the software

                    -

                    If you have purchased a license for DJSoft RadioBOSS Advanced V5.6.0.6, you can activate the software by entering your registration name and key in the activation window that appears when you run the software for the first time.

                    -

                    If you have not purchased a license yet, you can use the software for free for 150 times as a trial version. You can also request a free 30-day trial key from the DJSoft.net website.

                    -

                    Step 4: Enjoy the software

                    -

                    Now that you have downloaded and installed DJSoft RadioBOSS Advanced V5.6.0.6 on your PC, you can start creating and broadcasting your audio content with ease and professionalism.

                    -

                    You can use the built-in music library to add your audio files, create playlists with various features and options, add effects and plugins, schedule events and tasks, stream your audio to your radio server and more.

                    -

                    You can also use other tools from DJSoft.net such as RadioLogger to record your broadcasts or RadioCaster to stream any audio source to your radio station.

                    -

                    For more information and support, you can visit the DJSoft.net website or the community forums where you can find tutorials, tips, updates and feedback from other users.

                    -

                    Download link:

                    -

                    https://www.djsoft.net/download.htm

                    - -

                    Step 5: Update the software

                    -

                    It is recommended that you keep your DJSoft RadioBOSS Advanced V5.6.0.6 software up to date with the latest version available. This way, you can enjoy the new features, improvements and bug fixes that are released regularly by the developers.

                    -

                    To update the software, you can use the built-in update checker that will notify you when a new version is available. You can also check manually by clicking on the Help menu and selecting Check for Updates.

                    -

                    If there is a new version available, you can download and install it over your existing installation without losing your settings or data.

                    -

                    Step 6: Customize the software

                    -

                    One of the advantages of DJSoft RadioBOSS Advanced V5.6.0.6 is that it allows you to customize the software according to your preferences and needs. You can change the appearance, the layout, the language, the keyboard shortcuts and more.

                    -

                    To access the customization options, you can click on the Settings menu and select Options. There, you will find various tabs where you can adjust the settings for different aspects of the software such as playback, playlist, scheduler, streaming, plugins and more.

                    -

                    You can also create and switch between different profiles for different scenarios or users. To do so, you can click on the File menu and select Profiles.

                    -

                    Step 7: Explore the software

                    -

                    The last step is to explore the software and discover all its features and capabilities. DJSoft RadioBOSS Advanced V5.6.0.6 is a powerful and versatile software that can handle any broadcasting task you throw at it.

                    -

                    You can use the built-in help system to learn more about each function and option of the software. You can also visit the DJSoft.net website or the community forums to find more resources and support.

                    -

                    We hope this article has helped you download and install DJSoft RadioBOSS Advanced V5.6.0.6 on your PC and get started with your broadcasting projects.

                    d5da3c52bf
                    -
                    -
                    \ No newline at end of file diff --git a/spaces/rorallitri/biomedical-language-models/logs/Hela Na Palach Jar Bearshare Motivo Poe.md b/spaces/rorallitri/biomedical-language-models/logs/Hela Na Palach Jar Bearshare Motivo Poe.md deleted file mode 100644 index 01c5d677e21800274696cfa6eb2b98d725277c88..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Hela Na Palach Jar Bearshare Motivo Poe.md +++ /dev/null @@ -1,6 +0,0 @@ -

                    Hela Na Palach Jar bearshare motivo poe


                    Download File ››››› https://tinurll.com/2uzm57



                    - - aaccfb2cb3
                    -
                    -
                    -

                    diff --git a/spaces/rorallitri/biomedical-language-models/logs/Links 2003 24 Courses Experience the Realism and Beauty of 20 Championship Courses on Your PC.md b/spaces/rorallitri/biomedical-language-models/logs/Links 2003 24 Courses Experience the Realism and Beauty of 20 Championship Courses on Your PC.md deleted file mode 100644 index 6246e3ce72148b65a0e1f89a66aabd067c14dc32..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Links 2003 24 Courses Experience the Realism and Beauty of 20 Championship Courses on Your PC.md +++ /dev/null @@ -1,10 +0,0 @@ - -

                    Yes the link is valid and I did click on that link and bought it. The problem is that after I bought it I was sent a receipt which contained a link to linkscountryclub.com to download the game. And again I couldn't find a way to download the game on linkscountryclub.com.

                    -

                    Links 2003 24 Courses pc game


                    Download Zip ✸✸✸ https://tinurll.com/2uznj2



                    -

                    I just tried to get to the downloads page at linkscountryclub,com and had success- Strange it's not working for you- On the page with your license code I simply clicked again on the my downloads tab again and it showed me the base game download as well as the extras-

                    -

                    Back to top

                var pid = parseInt(97364);if ( pid > ipb.topic.topPid )ipb.topic.topPid = pid;// Show multiquote for JS browsersif ( $('multiq_97364') )$('multiq_97364').show();if( $('toggle_post_97364') )$('toggle_post_97364').show();// Add perm dataipb.topic.deletePerms[97364] = 'canDelete' : 0, 'canSoftDelete' : 0 ;
              Page 1 of 2 ipb.global.registerPageJump( '0632013001', url: " -where-to-download-links-2003/", stKey: 'st', perPage: 20, totalPages: 2, anchor: '' );
            • 12
            Next
          Back to Off Topic
        • (function(d, s, id) var js, fjs = d.getElementsByTagName(s)[0]; if (d.getElementById(id)) return; js = d.createElement(s); js.id = id; js.src = "//connect.facebook.net/en_US/all.js#xfbml=1&appId=378252998960387"; fjs.parentNode.insertBefore(js, fjs);(document, 'script', 'facebook-jssdk'));lang: 'en-GB'
        ipb.sharelinks.url = " -where-to-download-links-2003/";ipb.sharelinks.title = "Where to Download Links 2003?";ipb.sharelinks.bname = "Perfect Parallel";
        0 user(s) are reading this topic0 members, 0 guests, 0 anonymous users

        -

        Team Fortress Classic is a class-based multiplayer online first-person shooter video game developed by Valve and published by Sierra Studios. A port of the Team Fortress mod for Quake/QuakeWorld, Team Fortress Classic was originally released for Windows on April 1, 1999, as a mod for Half-Life and based on the Half-Life Engine (GoldSource Engine). A standalone version was later released on January 16, 2003. The development of Team Fortress Classic was led by John Cook and Robin Walker, two of the three designers from Team Fortress.[1]

        -

        -

        The game itself involves a number of teams, each with access to ten classes, competing in a variety of modes such as Capture the Flag, Control Points, and Escort. In June 2000, the game underwent a significant upgrade, adding new player character models and game modes. As of 2003, the game was one of the ten most played Half-Life mods according to GameSpy.[2] Today, as with its predecessor, Quake Team Fortress, it is not very active, and as such, there are few servers with humans on them, and bots prosper in the majority of servers. Your server browser can pick up at most 100 or more servers.

        aaccfb2cb3
        -
        -
        \ No newline at end of file diff --git a/spaces/rossellison/kpop-face-generator/stylegan3-fun/gui_utils/glfw_window.py b/spaces/rossellison/kpop-face-generator/stylegan3-fun/gui_utils/glfw_window.py deleted file mode 100644 index 83264eb89a855ec5038cf255994ee2b4b3ddb5ee..0000000000000000000000000000000000000000 --- a/spaces/rossellison/kpop-face-generator/stylegan3-fun/gui_utils/glfw_window.py +++ /dev/null @@ -1,229 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -import time -import glfw -import OpenGL.GL as gl -from . import gl_utils - -#---------------------------------------------------------------------------- - -class GlfwWindow: # pylint: disable=too-many-public-methods - def __init__(self, *, title='GlfwWindow', window_width=1920, window_height=1080, deferred_show=True, close_on_esc=True): - self._glfw_window = None - self._drawing_frame = False - self._frame_start_time = None - self._frame_delta = 0 - self._fps_limit = None - self._vsync = None - self._skip_frames = 0 - self._deferred_show = deferred_show - self._close_on_esc = close_on_esc - self._esc_pressed = False - self._drag_and_drop_paths = None - self._capture_next_frame = False - self._captured_frame = None - - # Create window. - glfw.init() - glfw.window_hint(glfw.VISIBLE, False) - self._glfw_window = glfw.create_window(width=window_width, height=window_height, title=title, monitor=None, share=None) - self._attach_glfw_callbacks() - self.make_context_current() - - # Adjust window. - self.set_vsync(False) - self.set_window_size(window_width, window_height) - if not self._deferred_show: - glfw.show_window(self._glfw_window) - - def close(self): - if self._drawing_frame: - self.end_frame() - if self._glfw_window is not None: - glfw.destroy_window(self._glfw_window) - self._glfw_window = None - #glfw.terminate() # Commented out to play it nice with other glfw clients. - - def __del__(self): - try: - self.close() - except: - pass - - @property - def window_width(self): - return self.content_width - - @property - def window_height(self): - return self.content_height + self.title_bar_height - - @property - def content_width(self): - width, _height = glfw.get_window_size(self._glfw_window) - return width - - @property - def content_height(self): - _width, height = glfw.get_window_size(self._glfw_window) - return height - - @property - def title_bar_height(self): - _left, top, _right, _bottom = glfw.get_window_frame_size(self._glfw_window) - return top - - @property - def monitor_width(self): - _, _, width, _height = glfw.get_monitor_workarea(glfw.get_primary_monitor()) - return width - - @property - def monitor_height(self): - _, _, _width, height = glfw.get_monitor_workarea(glfw.get_primary_monitor()) - return height - - @property - def frame_delta(self): - return self._frame_delta - - def set_title(self, title): - glfw.set_window_title(self._glfw_window, title) - - def set_window_size(self, width, height): - width = min(width, self.monitor_width) - height = min(height, self.monitor_height) - glfw.set_window_size(self._glfw_window, width, max(height - self.title_bar_height, 0)) - if width == self.monitor_width and height == self.monitor_height: - self.maximize() - - def set_content_size(self, width, height): - self.set_window_size(width, height + self.title_bar_height) - - def maximize(self): - glfw.maximize_window(self._glfw_window) - - def set_position(self, x, y): - glfw.set_window_pos(self._glfw_window, x, y + self.title_bar_height) - - def center(self): - self.set_position((self.monitor_width - self.window_width) // 2, (self.monitor_height - self.window_height) // 2) - - def set_vsync(self, vsync): - vsync = bool(vsync) - if vsync != self._vsync: - glfw.swap_interval(1 if vsync else 0) - self._vsync = vsync - - def set_fps_limit(self, fps_limit): - self._fps_limit = int(fps_limit) - - def should_close(self): - return glfw.window_should_close(self._glfw_window) or (self._close_on_esc and self._esc_pressed) - - def skip_frame(self): - self.skip_frames(1) - - def skip_frames(self, num): # Do not update window for the next N frames. - self._skip_frames = max(self._skip_frames, int(num)) - - def is_skipping_frames(self): - return self._skip_frames > 0 - - def capture_next_frame(self): - self._capture_next_frame = True - - def pop_captured_frame(self): - frame = self._captured_frame - self._captured_frame = None - return frame - - def pop_drag_and_drop_paths(self): - paths = self._drag_and_drop_paths - self._drag_and_drop_paths = None - return paths - - def draw_frame(self): # To be overridden by subclass. - self.begin_frame() - # Rendering code goes here. - self.end_frame() - - def make_context_current(self): - if self._glfw_window is not None: - glfw.make_context_current(self._glfw_window) - - def begin_frame(self): - # End previous frame. - if self._drawing_frame: - self.end_frame() - - # Apply FPS limit. - if self._frame_start_time is not None and self._fps_limit is not None: - delay = self._frame_start_time - time.perf_counter() + 1 / self._fps_limit - if delay > 0: - time.sleep(delay) - cur_time = time.perf_counter() - if self._frame_start_time is not None: - self._frame_delta = cur_time - self._frame_start_time - self._frame_start_time = cur_time - - # Process events. - glfw.poll_events() - - # Begin frame. - self._drawing_frame = True - self.make_context_current() - - # Initialize GL state. - gl.glViewport(0, 0, self.content_width, self.content_height) - gl.glMatrixMode(gl.GL_PROJECTION) - gl.glLoadIdentity() - gl.glTranslate(-1, 1, 0) - gl.glScale(2 / max(self.content_width, 1), -2 / max(self.content_height, 1), 1) - gl.glMatrixMode(gl.GL_MODELVIEW) - gl.glLoadIdentity() - gl.glEnable(gl.GL_BLEND) - gl.glBlendFunc(gl.GL_ONE, gl.GL_ONE_MINUS_SRC_ALPHA) # Pre-multiplied alpha. - - # Clear. - gl.glClearColor(0, 0, 0, 1) - gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT) - - def end_frame(self): - assert self._drawing_frame - self._drawing_frame = False - - # Skip frames if requested. - if self._skip_frames > 0: - self._skip_frames -= 1 - return - - # Capture frame if requested. - if self._capture_next_frame: - self._captured_frame = gl_utils.read_pixels(self.content_width, self.content_height) - self._capture_next_frame = False - - # Update window. - if self._deferred_show: - glfw.show_window(self._glfw_window) - self._deferred_show = False - glfw.swap_buffers(self._glfw_window) - - def _attach_glfw_callbacks(self): - glfw.set_key_callback(self._glfw_window, self._glfw_key_callback) - glfw.set_drop_callback(self._glfw_window, self._glfw_drop_callback) - - def _glfw_key_callback(self, _window, key, _scancode, action, _mods): - if action == glfw.PRESS and key == glfw.KEY_ESCAPE: - self._esc_pressed = True - - def _glfw_drop_callback(self, _window, paths): - self._drag_and_drop_paths = paths - -#---------------------------------------------------------------------------- diff --git a/spaces/rzzgate/Stable-Diffusion-ControlNet-WebUI/diffusion_webui/utils/__init__.py b/spaces/rzzgate/Stable-Diffusion-ControlNet-WebUI/diffusion_webui/utils/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/salemamassi/PdfChatBot/README.md b/spaces/salemamassi/PdfChatBot/README.md deleted file mode 100644 index 7d446e23024b705c220b311061f1dff1207690e0..0000000000000000000000000000000000000000 --- a/spaces/salemamassi/PdfChatBot/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: PdfChatBot -emoji: 🌍 -colorFrom: blue -colorTo: purple -sdk: gradio -sdk_version: 3.44.3 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/sarinam/speaker-anonymization-gan/IMSToucan/Utility/SpeakerVisualization.py b/spaces/sarinam/speaker-anonymization-gan/IMSToucan/Utility/SpeakerVisualization.py deleted file mode 100644 index f753fb2c0d83b44feadeec11a5fcbd1c80473664..0000000000000000000000000000000000000000 --- a/spaces/sarinam/speaker-anonymization-gan/IMSToucan/Utility/SpeakerVisualization.py +++ /dev/null @@ -1,94 +0,0 @@ -import matplotlib -import numpy -import soundfile as sf -from matplotlib import pyplot as plt -from matplotlib import cm -matplotlib.use("tkAgg") -from sklearn.manifold import TSNE -from sklearn.decomposition import PCA - -from tqdm import tqdm - -from Preprocessing.ProsodicConditionExtractor import ProsodicConditionExtractor - - -class Visualizer: - - def __init__(self, sr=48000, device="cpu"): - """ - Args: - sr: The sampling rate of the audios you want to visualize. - """ - self.tsne = TSNE(n_jobs=-1) - self.pca = PCA(n_components=2) - self.pros_cond_ext = ProsodicConditionExtractor(sr=sr, device=device) - self.sr = sr - - def visualize_speaker_embeddings(self, label_to_filepaths, title_of_plot, save_file_path=None, include_pca=True, legend=True): - label_list = list() - embedding_list = list() - for label in tqdm(label_to_filepaths): - for filepath in tqdm(label_to_filepaths[label]): - wave, sr = sf.read(filepath) - if len(wave) / sr < 1: - continue - if self.sr != sr: - print("One of the Audios you included doesn't match the sampling rate of this visualizer object, " - "creating a new condition extractor. Results will be correct, but if there are too many cases " - "of changing samplingrate, this will run very slowly.") - self.pros_cond_ext = ProsodicConditionExtractor(sr=sr) - self.sr = sr - embedding_list.append(self.pros_cond_ext.extract_condition_from_reference_wave(wave).squeeze().numpy()) - label_list.append(label) - embeddings_as_array = numpy.array(embedding_list) - - dimensionality_reduced_embeddings_tsne = self.tsne.fit_transform(embeddings_as_array) - self._plot_embeddings(projected_data=dimensionality_reduced_embeddings_tsne, - labels=label_list, - title=title_of_plot + " t-SNE" if include_pca else title_of_plot, - save_file_path=save_file_path, - legend=legend) - - if include_pca: - dimensionality_reduced_embeddings_pca = self.pca.fit_transform(embeddings_as_array) - self._plot_embeddings(projected_data=dimensionality_reduced_embeddings_pca, - labels=label_list, - title=title_of_plot + " PCA", - save_file_path=save_file_path, - legend=legend) - - def _plot_embeddings(self, projected_data, labels, title, save_file_path, legend): - colors = cm.gist_rainbow(numpy.linspace(0, 1, len(set(labels)))) - label_to_color = dict() - for index, label in enumerate(list(set(labels))): - label_to_color[label] = colors[index] - - labels_to_points_x = dict() - labels_to_points_y = dict() - for label in labels: - labels_to_points_x[label] = list() - labels_to_points_y[label] = list() - for index, label in enumerate(labels): - labels_to_points_x[label].append(projected_data[index][0]) - labels_to_points_y[label].append(projected_data[index][1]) - - fig, ax = plt.subplots() - for label in set(labels): - x = numpy.array(labels_to_points_x[label]) - y = numpy.array(labels_to_points_y[label]) - ax.scatter(x=x, - y=y, - c=label_to_color[label], - label=label, - alpha=0.9) - if legend: - ax.legend() - fig.tight_layout() - ax.axis('off') - fig.subplots_adjust(top=0.9, bottom=0.0, right=1.0, left=0.0) - ax.set_title(title) - if save_file_path is not None: - plt.savefig(save_file_path) - else: - plt.show() - plt.close() diff --git a/spaces/sbroy10/02-NLP-Seq2SeqQAGenerator/qasrl_model_pipeline.py b/spaces/sbroy10/02-NLP-Seq2SeqQAGenerator/qasrl_model_pipeline.py deleted file mode 100644 index 50135f76849bc8537fcae83b72532da661487da6..0000000000000000000000000000000000000000 --- a/spaces/sbroy10/02-NLP-Seq2SeqQAGenerator/qasrl_model_pipeline.py +++ /dev/null @@ -1,183 +0,0 @@ -from typing import Optional -import json -from argparse import Namespace -from pathlib import Path -from transformers import Text2TextGenerationPipeline, AutoModelForSeq2SeqLM, AutoTokenizer - -def get_markers_for_model(is_t5_model: bool) -> Namespace: - special_tokens_constants = Namespace() - if is_t5_model: - # T5 model have 100 special tokens by default - special_tokens_constants.separator_input_question_predicate = "" - special_tokens_constants.separator_output_answers = "" - special_tokens_constants.separator_output_questions = "" # if using only questions - special_tokens_constants.separator_output_question_answer = "" - special_tokens_constants.separator_output_pairs = "" - special_tokens_constants.predicate_generic_marker = "" - special_tokens_constants.predicate_verb_marker = "" - special_tokens_constants.predicate_nominalization_marker = "" - - else: - special_tokens_constants.separator_input_question_predicate = "" - special_tokens_constants.separator_output_answers = "" - special_tokens_constants.separator_output_questions = "" # if using only questions - special_tokens_constants.separator_output_question_answer = "" - special_tokens_constants.separator_output_pairs = "" - special_tokens_constants.predicate_generic_marker = "" - special_tokens_constants.predicate_verb_marker = "" - special_tokens_constants.predicate_nominalization_marker = "" - return special_tokens_constants - -def load_trained_model(name_or_path): - import huggingface_hub as HFhub - tokenizer = AutoTokenizer.from_pretrained(name_or_path) - model = AutoModelForSeq2SeqLM.from_pretrained(name_or_path) - # load preprocessing_kwargs from the model repo on HF hub, or from the local model directory - kwargs_filename = None - if name_or_path.startswith("kleinay/"): # and 'preprocessing_kwargs.json' in HFhub.list_repo_files(name_or_path): # the supported version of HFhub doesn't support list_repo_files - kwargs_filename = HFhub.hf_hub_download(repo_id=name_or_path, filename="preprocessing_kwargs.json") - elif Path(name_or_path).is_dir() and (Path(name_or_path) / "experiment_kwargs.json").exists(): - kwargs_filename = Path(name_or_path) / "experiment_kwargs.json" - - if kwargs_filename: - preprocessing_kwargs = json.load(open(kwargs_filename)) - # integrate into model.config (for decoding args, e.g. "num_beams"), and save also as standalone object for preprocessing - model.config.preprocessing_kwargs = Namespace(**preprocessing_kwargs) - model.config.update(preprocessing_kwargs) - return model, tokenizer - - -class QASRL_Pipeline(Text2TextGenerationPipeline): - def __init__(self, model_repo: str, **kwargs): - model, tokenizer = load_trained_model(model_repo) - super().__init__(model, tokenizer, framework="pt") - self.is_t5_model = "t5" in model.config.model_type - self.special_tokens = get_markers_for_model(self.is_t5_model) - self.data_args = model.config.preprocessing_kwargs - # backward compatibility - default keyword values implemeted in `run_summarization`, thus not saved in `preprocessing_kwargs` - if "predicate_marker_type" not in vars(self.data_args): - self.data_args.predicate_marker_type = "generic" - if "use_bilateral_predicate_marker" not in vars(self.data_args): - self.data_args.use_bilateral_predicate_marker = True - if "append_verb_form" not in vars(self.data_args): - self.data_args.append_verb_form = True - self._update_config(**kwargs) - - def _update_config(self, **kwargs): - " Update self.model.config with initialization parameters and necessary defaults. " - # set default values that will always override model.config, but can overriden by __init__ kwargs - kwargs["max_length"] = kwargs.get("max_length", 80) - # override model.config with kwargs - for k,v in kwargs.items(): - self.model.config.__dict__[k] = v - - def _sanitize_parameters(self, **kwargs): - preprocess_kwargs, forward_kwargs, postprocess_kwargs = {}, {}, {} - if "predicate_marker" in kwargs: - preprocess_kwargs["predicate_marker"] = kwargs["predicate_marker"] - if "predicate_type" in kwargs: - preprocess_kwargs["predicate_type"] = kwargs["predicate_type"] - if "verb_form" in kwargs: - preprocess_kwargs["verb_form"] = kwargs["verb_form"] - return preprocess_kwargs, forward_kwargs, postprocess_kwargs - - def preprocess(self, inputs, predicate_marker="", predicate_type=None, verb_form=None): - # Here, inputs is string or list of strings; apply string postprocessing - if isinstance(inputs, str): - processed_inputs = self._preprocess_string(inputs, predicate_marker, predicate_type, verb_form) - elif hasattr(inputs, "__iter__"): - processed_inputs = [self._preprocess_string(s, predicate_marker, predicate_type, verb_form) for s in inputs] - else: - raise ValueError("inputs must be str or Iterable[str]") - # Now pass to super.preprocess for tokenization - return super().preprocess(processed_inputs) - - def _preprocess_string(self, seq: str, predicate_marker: str, predicate_type: Optional[str], verb_form: Optional[str]) -> str: - sent_tokens = seq.split(" ") - assert predicate_marker in sent_tokens, f"Input sentence must include a predicate-marker token ('{predicate_marker}') before the target predicate word" - predicate_idx = sent_tokens.index(predicate_marker) - sent_tokens.remove(predicate_marker) - sentence_before_predicate = " ".join([sent_tokens[i] for i in range(predicate_idx)]) - predicate = sent_tokens[predicate_idx] - sentence_after_predicate = " ".join([sent_tokens[i] for i in range(predicate_idx+1, len(sent_tokens))]) - - if self.data_args.predicate_marker_type == "generic": - predicate_marker = self.special_tokens.predicate_generic_marker - # In case we want special marker for each predicate type: """ - elif self.data_args.predicate_marker_type == "pred_type": - assert predicate_type is not None, "For this model, you must provide the `predicate_type` either when initializing QASRL_Pipeline(...) or when applying __call__(...) on it" - assert predicate_type in ("verbal", "nominal"), f"`predicate_type` must be either 'verbal' or 'nominal'; got '{predicate_type}'" - predicate_marker = {"verbal": self.special_tokens.predicate_verb_marker , - "nominal": self.special_tokens.predicate_nominalization_marker - }[predicate_type] - - if self.data_args.use_bilateral_predicate_marker: - seq = f"{sentence_before_predicate} {predicate_marker} {predicate} {predicate_marker} {sentence_after_predicate}" - else: - seq = f"{sentence_before_predicate} {predicate_marker} {predicate} {sentence_after_predicate}" - - # embed also verb_form - if self.data_args.append_verb_form and verb_form is None: - raise ValueError(f"For this model, you must provide the `verb_form` of the predicate when applying __call__(...)") - elif self.data_args.append_verb_form: - seq = f"{seq} {self.special_tokens.separator_input_question_predicate} {verb_form} " - else: - seq = f"{seq} " - - # append source prefix (for t5 models) - prefix = self._get_source_prefix(predicate_type) - - return prefix + seq - - def _get_source_prefix(self, predicate_type: Optional[str]): - if not self.is_t5_model or self.data_args.source_prefix is None: - return '' - if not self.data_args.source_prefix.startswith("<"): # Regular prefix - not dependent on input row x - return self.data_args.source_prefix - if self.data_args.source_prefix == "": - if predicate_type is None: - raise ValueError("source_prefix is '' but input no `predicate_type`.") - else: - return f"Generate QAs for {predicate_type} QASRL: " - - def _forward(self, *args, **kwargs): - outputs = super()._forward(*args, **kwargs) - return outputs - - - def postprocess(self, model_outputs): - output_seq = self.tokenizer.decode( - model_outputs["output_ids"].squeeze(), - skip_special_tokens=False, - clean_up_tokenization_spaces=False, - ) - output_seq = output_seq.strip(self.tokenizer.pad_token).strip(self.tokenizer.eos_token).strip() - qa_subseqs = output_seq.split(self.special_tokens.separator_output_pairs) - qas = [self._postrocess_qa(qa_subseq) for qa_subseq in qa_subseqs] - return {"generated_text": output_seq, - "QAs": qas} - - def _postrocess_qa(self, seq: str) -> str: - # split question and answers - if self.special_tokens.separator_output_question_answer in seq: - question, answer = seq.split(self.special_tokens.separator_output_question_answer)[:2] - else: - print("invalid format: no separator between question and answer found...") - return None - # question, answer = seq, '' # Or: backoff to only question - # skip "_" slots in questions - question = ' '.join(t for t in question.split(' ') if t != '_') - answers = [a.strip() for a in answer.split(self.special_tokens.separator_output_answers)] - return {"question": question, "answers": answers} - - -if __name__ == "__main__": - pipe = QASRL_Pipeline("kleinay/qanom-seq2seq-model-baseline") - res1 = pipe("The student was interested in Luke 's research about sea animals .", verb_form="research", predicate_type="nominal") - res2 = pipe(["The doctor was interested in Luke 's treatment .", - "The Veterinary student was interested in Luke 's treatment of sea animals ."], verb_form="treat", predicate_type="nominal", num_beams=10) - res3 = pipe("A number of professions have developed that specialize in the treatment of mental disorders .", verb_form="develop", predicate_type="verbal") - print(res1) - print(res2) - print(res3) - \ No newline at end of file diff --git a/spaces/scedlatioru/img-to-music/example/Guerrini Champion Accordion For NI Kontakt VST.md b/spaces/scedlatioru/img-to-music/example/Guerrini Champion Accordion For NI Kontakt VST.md deleted file mode 100644 index f73ce73bdffbefa8fc0b3c9caf991bef7e8f150a..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Guerrini Champion Accordion For NI Kontakt VST.md +++ /dev/null @@ -1,10 +0,0 @@ -

        Guerrini Champion accordion for NI Kontakt VST


        Download File » https://gohhs.com/2uEzEZ



        -
        -Mar 30, 2021 - Virtual Acoustic - Guerrini Superior 2 Accordion For NI Kontakt VST ... Guerrini Champion Accordion For NI Kontakt 24 guanto torrent paolo . Download Guerrini Champion Accordion For NI Kontakt VST VST3 AAX x86 x64 R2 R7 R11 R12 - morphology.org. -Guerrini Superior 2 Accordion VST for Kontakt VSTi VST3 AAX x86 x64 - morphology.org. -Guerrini Champion Accordion VST for Kontakt VSTi VST3 AAX x86 x64 R2, R7 R11 R12 - morphology.org. -Guerrini Champion Accordion for NI Kontakt VST VST3 AAX x86 x64 R2 R7 R11 R12 - morphology.org. -Mar 30, 2021 - Virtual Acoustic - Guerrini Superior 2 Accordion For NI Kontakt VST ... 8a78ff9644
        -
        -
        -

        diff --git a/spaces/segments-tobias/conex/espnet/nets/pytorch_backend/e2e_asr_mix_transformer.py b/spaces/segments-tobias/conex/espnet/nets/pytorch_backend/e2e_asr_mix_transformer.py deleted file mode 100644 index 176c504966137da475caf0928983b16a949f5c41..0000000000000000000000000000000000000000 --- a/spaces/segments-tobias/conex/espnet/nets/pytorch_backend/e2e_asr_mix_transformer.py +++ /dev/null @@ -1,462 +0,0 @@ -#!/usr/bin/env python3 -# encoding: utf-8 - -# Copyright 2020 Johns Hopkins University (Xuankai Chang) -# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) - -""" -Transformer speech recognition model for single-channel multi-speaker mixture speech. - -It is a fusion of `e2e_asr_mix.py` and `e2e_asr_transformer.py`. Refer to: - https://arxiv.org/pdf/2002.03921.pdf -1. The Transformer-based Encoder now consists of three stages: - (a): Enc_mix: encoding input mixture speech; - (b): Enc_SD: separating mixed speech representations; - (c): Enc_rec: transforming each separated speech representation. -2. PIT is used in CTC to determine the permutation with minimum loss. -""" - -from argparse import Namespace -import logging -import math - -import numpy -import torch - -from espnet.nets.asr_interface import ASRInterface -from espnet.nets.ctc_prefix_score import CTCPrefixScore -from espnet.nets.e2e_asr_common import end_detect -from espnet.nets.pytorch_backend.ctc import CTC -from espnet.nets.pytorch_backend.e2e_asr import CTC_LOSS_THRESHOLD -from espnet.nets.pytorch_backend.e2e_asr_mix import E2E as E2EASRMIX -from espnet.nets.pytorch_backend.e2e_asr_mix import PIT -from espnet.nets.pytorch_backend.e2e_asr_transformer import E2E as E2EASR -from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask -from espnet.nets.pytorch_backend.nets_utils import th_accuracy -from espnet.nets.pytorch_backend.rnn.decoders import CTC_SCORING_RATIO -from espnet.nets.pytorch_backend.transformer.add_sos_eos import add_sos_eos -from espnet.nets.pytorch_backend.transformer.encoder_mix import EncoderMix -from espnet.nets.pytorch_backend.transformer.mask import subsequent_mask -from espnet.nets.pytorch_backend.transformer.mask import target_mask - - -class E2E(E2EASR, ASRInterface, torch.nn.Module): - """E2E module. - - :param int idim: dimension of inputs - :param int odim: dimension of outputs - :param Namespace args: argument Namespace containing options - """ - - @staticmethod - def add_arguments(parser): - """Add arguments.""" - E2EASR.add_arguments(parser) - E2EASRMIX.encoder_mix_add_arguments(parser) - return parser - - def __init__(self, idim, odim, args, ignore_id=-1): - """Construct an E2E object. - - :param int idim: dimension of inputs - :param int odim: dimension of outputs - :param Namespace args: argument Namespace containing options - """ - super(E2E, self).__init__(idim, odim, args, ignore_id=-1) - if args.transformer_attn_dropout_rate is None: - args.transformer_attn_dropout_rate = args.dropout_rate - self.encoder = EncoderMix( - idim=idim, - attention_dim=args.adim, - attention_heads=args.aheads, - linear_units=args.eunits, - num_blocks_sd=args.elayers_sd, - num_blocks_rec=args.elayers, - input_layer=args.transformer_input_layer, - dropout_rate=args.dropout_rate, - positional_dropout_rate=args.dropout_rate, - attention_dropout_rate=args.transformer_attn_dropout_rate, - num_spkrs=args.num_spkrs, - ) - - if args.mtlalpha > 0.0: - self.ctc = CTC( - odim, args.adim, args.dropout_rate, ctc_type=args.ctc_type, reduce=False - ) - else: - self.ctc = None - - self.num_spkrs = args.num_spkrs - self.pit = PIT(self.num_spkrs) - - def forward(self, xs_pad, ilens, ys_pad): - """E2E forward. - - :param torch.Tensor xs_pad: batch of padded source sequences (B, Tmax, idim) - :param torch.Tensor ilens: batch of lengths of source sequences (B) - :param torch.Tensor ys_pad: batch of padded target sequences - (B, num_spkrs, Lmax) - :return: ctc loass value - :rtype: torch.Tensor - :return: attention loss value - :rtype: torch.Tensor - :return: accuracy in attention decoder - :rtype: float - """ - # 1. forward encoder - xs_pad = xs_pad[:, : max(ilens)] # for data parallel - src_mask = make_non_pad_mask(ilens.tolist()).to(xs_pad.device).unsqueeze(-2) - hs_pad, hs_mask = self.encoder(xs_pad, src_mask) # list: speaker differentiate - self.hs_pad = hs_pad - - # 2. ctc - # TODO(karita) show predicted text - # TODO(karita) calculate these stats - cer_ctc = None - assert self.mtlalpha > 0.0 - batch_size = xs_pad.size(0) - ys_pad = ys_pad.transpose(0, 1) # (num_spkrs, B, Lmax) - hs_len = [hs_mask[i].view(batch_size, -1).sum(1) for i in range(self.num_spkrs)] - loss_ctc_perm = torch.stack( - [ - self.ctc( - hs_pad[i // self.num_spkrs].view(batch_size, -1, self.adim), - hs_len[i // self.num_spkrs], - ys_pad[i % self.num_spkrs], - ) - for i in range(self.num_spkrs ** 2) - ], - dim=1, - ) # (B, num_spkrs^2) - loss_ctc, min_perm = self.pit.pit_process(loss_ctc_perm) - logging.info("ctc loss:" + str(float(loss_ctc))) - - # Permute the labels according to loss - for b in range(batch_size): # B - ys_pad[:, b] = ys_pad[min_perm[b], b] # (num_spkrs, B, Lmax) - ys_out_len = [ - float(torch.sum(ys_pad[i] != self.ignore_id)) for i in range(self.num_spkrs) - ] - - # TODO(karita) show predicted text - # TODO(karita) calculate these stats - if self.error_calculator is not None: - cer_ctc = [] - for i in range(self.num_spkrs): - ys_hat = self.ctc.argmax(hs_pad[i].view(batch_size, -1, self.adim)).data - cer_ctc.append( - self.error_calculator(ys_hat.cpu(), ys_pad[i].cpu(), is_ctc=True) - ) - cer_ctc = sum(map(lambda x: x[0] * x[1], zip(cer_ctc, ys_out_len))) / sum( - ys_out_len - ) - else: - cer_ctc = None - - # 3. forward decoder - if self.mtlalpha == 1.0: - loss_att, self.acc, cer, wer = None, None, None, None - else: - pred_pad, pred_mask = [None] * self.num_spkrs, [None] * self.num_spkrs - loss_att, acc = [None] * self.num_spkrs, [None] * self.num_spkrs - for i in range(self.num_spkrs): - ( - pred_pad[i], - pred_mask[i], - loss_att[i], - acc[i], - ) = self.decoder_and_attention( - hs_pad[i], hs_mask[i], ys_pad[i], batch_size - ) - - # 4. compute attention loss - # The following is just an approximation - loss_att = sum(map(lambda x: x[0] * x[1], zip(loss_att, ys_out_len))) / sum( - ys_out_len - ) - self.acc = sum(map(lambda x: x[0] * x[1], zip(acc, ys_out_len))) / sum( - ys_out_len - ) - - # 5. compute cer/wer - if self.training or self.error_calculator is None: - cer, wer = None, None - else: - ys_hat = pred_pad.argmax(dim=-1) - cer, wer = self.error_calculator(ys_hat.cpu(), ys_pad.cpu()) - - # copyied from e2e_asr - alpha = self.mtlalpha - if alpha == 0: - self.loss = loss_att - loss_att_data = float(loss_att) - loss_ctc_data = None - elif alpha == 1: - self.loss = loss_ctc - loss_att_data = None - loss_ctc_data = float(loss_ctc) - else: - self.loss = alpha * loss_ctc + (1 - alpha) * loss_att - loss_att_data = float(loss_att) - loss_ctc_data = float(loss_ctc) - - loss_data = float(self.loss) - if loss_data < CTC_LOSS_THRESHOLD and not math.isnan(loss_data): - self.reporter.report( - loss_ctc_data, loss_att_data, self.acc, cer_ctc, cer, wer, loss_data - ) - else: - logging.warning("loss (=%f) is not correct", loss_data) - return self.loss - - def decoder_and_attention(self, hs_pad, hs_mask, ys_pad, batch_size): - """Forward decoder and attention loss.""" - # forward decoder - ys_in_pad, ys_out_pad = add_sos_eos(ys_pad, self.sos, self.eos, self.ignore_id) - ys_mask = target_mask(ys_in_pad, self.ignore_id) - pred_pad, pred_mask = self.decoder(ys_in_pad, ys_mask, hs_pad, hs_mask) - - # compute attention loss - loss_att = self.criterion(pred_pad, ys_out_pad) - acc = th_accuracy( - pred_pad.view(-1, self.odim), ys_out_pad, ignore_label=self.ignore_id - ) - return pred_pad, pred_mask, loss_att, acc - - def encode(self, x): - """Encode acoustic features. - - :param ndarray x: source acoustic feature (T, D) - :return: encoder outputs - :rtype: torch.Tensor - """ - self.eval() - x = torch.as_tensor(x).unsqueeze(0) - enc_output, _ = self.encoder(x, None) - return enc_output - - def recog(self, enc_output, recog_args, char_list=None, rnnlm=None, use_jit=False): - """Recognize input speech of each speaker. - - :param ndnarray enc_output: encoder outputs (B, T, D) or (T, D) - :param Namespace recog_args: argment Namespace contraining options - :param list char_list: list of characters - :param torch.nn.Module rnnlm: language model module - :return: N-best decoding results - :rtype: list - """ - if recog_args.ctc_weight > 0.0: - lpz = self.ctc.log_softmax(enc_output) - lpz = lpz.squeeze(0) - else: - lpz = None - - h = enc_output.squeeze(0) - - logging.info("input lengths: " + str(h.size(0))) - # search parms - beam = recog_args.beam_size - penalty = recog_args.penalty - ctc_weight = recog_args.ctc_weight - - # preprare sos - y = self.sos - vy = h.new_zeros(1).long() - - if recog_args.maxlenratio == 0: - maxlen = h.shape[0] - else: - # maxlen >= 1 - maxlen = max(1, int(recog_args.maxlenratio * h.size(0))) - minlen = int(recog_args.minlenratio * h.size(0)) - logging.info("max output length: " + str(maxlen)) - logging.info("min output length: " + str(minlen)) - - # initialize hypothesis - if rnnlm: - hyp = {"score": 0.0, "yseq": [y], "rnnlm_prev": None} - else: - hyp = {"score": 0.0, "yseq": [y]} - if lpz is not None: - ctc_prefix_score = CTCPrefixScore(lpz.detach().numpy(), 0, self.eos, numpy) - hyp["ctc_state_prev"] = ctc_prefix_score.initial_state() - hyp["ctc_score_prev"] = 0.0 - if ctc_weight != 1.0: - # pre-pruning based on attention scores - ctc_beam = min(lpz.shape[-1], int(beam * CTC_SCORING_RATIO)) - else: - ctc_beam = lpz.shape[-1] - hyps = [hyp] - ended_hyps = [] - - import six - - traced_decoder = None - for i in six.moves.range(maxlen): - logging.debug("position " + str(i)) - - hyps_best_kept = [] - for hyp in hyps: - vy[0] = hyp["yseq"][i] - - # get nbest local scores and their ids - ys_mask = subsequent_mask(i + 1).unsqueeze(0) - ys = torch.tensor(hyp["yseq"]).unsqueeze(0) - # FIXME: jit does not match non-jit result - if use_jit: - if traced_decoder is None: - traced_decoder = torch.jit.trace( - self.decoder.forward_one_step, (ys, ys_mask, enc_output) - ) - local_att_scores = traced_decoder(ys, ys_mask, enc_output)[0] - else: - local_att_scores = self.decoder.forward_one_step( - ys, ys_mask, enc_output - )[0] - - if rnnlm: - rnnlm_state, local_lm_scores = rnnlm.predict(hyp["rnnlm_prev"], vy) - local_scores = ( - local_att_scores + recog_args.lm_weight * local_lm_scores - ) - else: - local_scores = local_att_scores - - if lpz is not None: - local_best_scores, local_best_ids = torch.topk( - local_att_scores, ctc_beam, dim=1 - ) - ctc_scores, ctc_states = ctc_prefix_score( - hyp["yseq"], local_best_ids[0], hyp["ctc_state_prev"] - ) - local_scores = (1.0 - ctc_weight) * local_att_scores[ - :, local_best_ids[0] - ] + ctc_weight * torch.from_numpy( - ctc_scores - hyp["ctc_score_prev"] - ) - if rnnlm: - local_scores += ( - recog_args.lm_weight * local_lm_scores[:, local_best_ids[0]] - ) - local_best_scores, joint_best_ids = torch.topk( - local_scores, beam, dim=1 - ) - local_best_ids = local_best_ids[:, joint_best_ids[0]] - else: - local_best_scores, local_best_ids = torch.topk( - local_scores, beam, dim=1 - ) - - for j in six.moves.range(beam): - new_hyp = {} - new_hyp["score"] = hyp["score"] + float(local_best_scores[0, j]) - new_hyp["yseq"] = [0] * (1 + len(hyp["yseq"])) - new_hyp["yseq"][: len(hyp["yseq"])] = hyp["yseq"] - new_hyp["yseq"][len(hyp["yseq"])] = int(local_best_ids[0, j]) - if rnnlm: - new_hyp["rnnlm_prev"] = rnnlm_state - if lpz is not None: - new_hyp["ctc_state_prev"] = ctc_states[joint_best_ids[0, j]] - new_hyp["ctc_score_prev"] = ctc_scores[joint_best_ids[0, j]] - # will be (2 x beam) hyps at most - hyps_best_kept.append(new_hyp) - - hyps_best_kept = sorted( - hyps_best_kept, key=lambda x: x["score"], reverse=True - )[:beam] - - # sort and get nbest - hyps = hyps_best_kept - logging.debug("number of pruned hypothes: " + str(len(hyps))) - if char_list is not None: - logging.debug( - "best hypo: " - + "".join([char_list[int(x)] for x in hyps[0]["yseq"][1:]]) - ) - - # add eos in the final loop to avoid that there are no ended hyps - if i == maxlen - 1: - logging.info("adding in the last postion in the loop") - for hyp in hyps: - hyp["yseq"].append(self.eos) - - # add ended hypothes to a final list, and removed them from current hypothes - # (this will be a probmlem, number of hyps < beam) - remained_hyps = [] - for hyp in hyps: - if hyp["yseq"][-1] == self.eos: - # only store the sequence that has more than minlen outputs - # also add penalty - if len(hyp["yseq"]) > minlen: - hyp["score"] += (i + 1) * penalty - if rnnlm: # Word LM needs to add final score - hyp["score"] += recog_args.lm_weight * rnnlm.final( - hyp["rnnlm_prev"] - ) - ended_hyps.append(hyp) - else: - remained_hyps.append(hyp) - - # end detection - - if end_detect(ended_hyps, i) and recog_args.maxlenratio == 0.0: - logging.info("end detected at %d", i) - break - - hyps = remained_hyps - if len(hyps) > 0: - logging.debug("remeined hypothes: " + str(len(hyps))) - else: - logging.info("no hypothesis. Finish decoding.") - break - - if char_list is not None: - for hyp in hyps: - logging.debug( - "hypo: " + "".join([char_list[int(x)] for x in hyp["yseq"][1:]]) - ) - - logging.debug("number of ended hypothes: " + str(len(ended_hyps))) - - nbest_hyps = sorted(ended_hyps, key=lambda x: x["score"], reverse=True)[ - : min(len(ended_hyps), recog_args.nbest) - ] - - # check number of hypotheis - if len(nbest_hyps) == 0: - logging.warning( - "there is no N-best results, perform recognition " - "again with smaller minlenratio." - ) - # should copy becasuse Namespace will be overwritten globally - recog_args = Namespace(**vars(recog_args)) - recog_args.minlenratio = max(0.0, recog_args.minlenratio - 0.1) - return self.recog(enc_output, recog_args, char_list, rnnlm) - - logging.info("total log probability: " + str(nbest_hyps[0]["score"])) - logging.info( - "normalized log probability: " - + str(nbest_hyps[0]["score"] / len(nbest_hyps[0]["yseq"])) - ) - return nbest_hyps - - def recognize(self, x, recog_args, char_list=None, rnnlm=None, use_jit=False): - """Recognize input speech of each speaker. - - :param ndnarray x: input acoustic feature (B, T, D) or (T, D) - :param Namespace recog_args: argment Namespace contraining options - :param list char_list: list of characters - :param torch.nn.Module rnnlm: language model module - :return: N-best decoding results - :rtype: list - """ - # Encoder - enc_output = self.encode(x) - - # Decoder - nbest_hyps = [] - for enc_out in enc_output: - nbest_hyps.append( - self.recog(enc_out, recog_args, char_list, rnnlm, use_jit) - ) - return nbest_hyps diff --git a/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/uvr5_pack/lib_v5/spec_utils.py b/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/uvr5_pack/lib_v5/spec_utils.py deleted file mode 100644 index a3fd46d333da7becc7f09f42c084ac7cde661035..0000000000000000000000000000000000000000 --- a/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/uvr5_pack/lib_v5/spec_utils.py +++ /dev/null @@ -1,667 +0,0 @@ -import os, librosa -import numpy as np -import soundfile as sf -from tqdm import tqdm -import json, math, hashlib - - -def crop_center(h1, h2): - h1_shape = h1.size() - h2_shape = h2.size() - - if h1_shape[3] == h2_shape[3]: - return h1 - elif h1_shape[3] < h2_shape[3]: - raise ValueError("h1_shape[3] must be greater than h2_shape[3]") - - # s_freq = (h2_shape[2] - h1_shape[2]) // 2 - # e_freq = s_freq + h1_shape[2] - s_time = (h1_shape[3] - h2_shape[3]) // 2 - e_time = s_time + h2_shape[3] - h1 = h1[:, :, :, s_time:e_time] - - return h1 - - -def wave_to_spectrogram( - wave, hop_length, n_fft, mid_side=False, mid_side_b2=False, reverse=False -): - if reverse: - wave_left = np.flip(np.asfortranarray(wave[0])) - wave_right = np.flip(np.asfortranarray(wave[1])) - elif mid_side: - wave_left = np.asfortranarray(np.add(wave[0], wave[1]) / 2) - wave_right = np.asfortranarray(np.subtract(wave[0], wave[1])) - elif mid_side_b2: - wave_left = np.asfortranarray(np.add(wave[1], wave[0] * 0.5)) - wave_right = np.asfortranarray(np.subtract(wave[0], wave[1] * 0.5)) - else: - wave_left = np.asfortranarray(wave[0]) - wave_right = np.asfortranarray(wave[1]) - - spec_left = librosa.stft(wave_left, n_fft, hop_length=hop_length) - spec_right = librosa.stft(wave_right, n_fft, hop_length=hop_length) - - spec = np.asfortranarray([spec_left, spec_right]) - - return spec - - -def wave_to_spectrogram_mt( - wave, hop_length, n_fft, mid_side=False, mid_side_b2=False, reverse=False -): - import threading - - if reverse: - wave_left = np.flip(np.asfortranarray(wave[0])) - wave_right = np.flip(np.asfortranarray(wave[1])) - elif mid_side: - wave_left = np.asfortranarray(np.add(wave[0], wave[1]) / 2) - wave_right = np.asfortranarray(np.subtract(wave[0], wave[1])) - elif mid_side_b2: - wave_left = np.asfortranarray(np.add(wave[1], wave[0] * 0.5)) - wave_right = np.asfortranarray(np.subtract(wave[0], wave[1] * 0.5)) - else: - wave_left = np.asfortranarray(wave[0]) - wave_right = np.asfortranarray(wave[1]) - - def run_thread(**kwargs): - global spec_left - spec_left = librosa.stft(**kwargs) - - thread = threading.Thread( - target=run_thread, - kwargs={"y": wave_left, "n_fft": n_fft, "hop_length": hop_length}, - ) - thread.start() - spec_right = librosa.stft(wave_right, n_fft, hop_length=hop_length) - thread.join() - - spec = np.asfortranarray([spec_left, spec_right]) - - return spec - - -def combine_spectrograms(specs, mp): - l = min([specs[i].shape[2] for i in specs]) - spec_c = np.zeros(shape=(2, mp.param["bins"] + 1, l), dtype=np.complex64) - offset = 0 - bands_n = len(mp.param["band"]) - - for d in range(1, bands_n + 1): - h = mp.param["band"][d]["crop_stop"] - mp.param["band"][d]["crop_start"] - spec_c[:, offset : offset + h, :l] = specs[d][ - :, mp.param["band"][d]["crop_start"] : mp.param["band"][d]["crop_stop"], :l - ] - offset += h - - if offset > mp.param["bins"]: - raise ValueError("Too much bins") - - # lowpass fiter - if ( - mp.param["pre_filter_start"] > 0 - ): # and mp.param['band'][bands_n]['res_type'] in ['scipy', 'polyphase']: - if bands_n == 1: - spec_c = fft_lp_filter( - spec_c, mp.param["pre_filter_start"], mp.param["pre_filter_stop"] - ) - else: - gp = 1 - for b in range( - mp.param["pre_filter_start"] + 1, mp.param["pre_filter_stop"] - ): - g = math.pow( - 10, -(b - mp.param["pre_filter_start"]) * (3.5 - gp) / 20.0 - ) - gp = g - spec_c[:, b, :] *= g - - return np.asfortranarray(spec_c) - - -def spectrogram_to_image(spec, mode="magnitude"): - if mode == "magnitude": - if np.iscomplexobj(spec): - y = np.abs(spec) - else: - y = spec - y = np.log10(y**2 + 1e-8) - elif mode == "phase": - if np.iscomplexobj(spec): - y = np.angle(spec) - else: - y = spec - - y -= y.min() - y *= 255 / y.max() - img = np.uint8(y) - - if y.ndim == 3: - img = img.transpose(1, 2, 0) - img = np.concatenate([np.max(img, axis=2, keepdims=True), img], axis=2) - - return img - - -def reduce_vocal_aggressively(X, y, softmask): - v = X - y - y_mag_tmp = np.abs(y) - v_mag_tmp = np.abs(v) - - v_mask = v_mag_tmp > y_mag_tmp - y_mag = np.clip(y_mag_tmp - v_mag_tmp * v_mask * softmask, 0, np.inf) - - return y_mag * np.exp(1.0j * np.angle(y)) - - -def mask_silence(mag, ref, thres=0.2, min_range=64, fade_size=32): - if min_range < fade_size * 2: - raise ValueError("min_range must be >= fade_area * 2") - - mag = mag.copy() - - idx = np.where(ref.mean(axis=(0, 1)) < thres)[0] - starts = np.insert(idx[np.where(np.diff(idx) != 1)[0] + 1], 0, idx[0]) - ends = np.append(idx[np.where(np.diff(idx) != 1)[0]], idx[-1]) - uninformative = np.where(ends - starts > min_range)[0] - if len(uninformative) > 0: - starts = starts[uninformative] - ends = ends[uninformative] - old_e = None - for s, e in zip(starts, ends): - if old_e is not None and s - old_e < fade_size: - s = old_e - fade_size * 2 - - if s != 0: - weight = np.linspace(0, 1, fade_size) - mag[:, :, s : s + fade_size] += weight * ref[:, :, s : s + fade_size] - else: - s -= fade_size - - if e != mag.shape[2]: - weight = np.linspace(1, 0, fade_size) - mag[:, :, e - fade_size : e] += weight * ref[:, :, e - fade_size : e] - else: - e += fade_size - - mag[:, :, s + fade_size : e - fade_size] += ref[ - :, :, s + fade_size : e - fade_size - ] - old_e = e - - return mag - - -def align_wave_head_and_tail(a, b): - l = min([a[0].size, b[0].size]) - - return a[:l, :l], b[:l, :l] - - -def cache_or_load(mix_path, inst_path, mp): - mix_basename = os.path.splitext(os.path.basename(mix_path))[0] - inst_basename = os.path.splitext(os.path.basename(inst_path))[0] - - cache_dir = "mph{}".format( - hashlib.sha1(json.dumps(mp.param, sort_keys=True).encode("utf-8")).hexdigest() - ) - mix_cache_dir = os.path.join("cache", cache_dir) - inst_cache_dir = os.path.join("cache", cache_dir) - - os.makedirs(mix_cache_dir, exist_ok=True) - os.makedirs(inst_cache_dir, exist_ok=True) - - mix_cache_path = os.path.join(mix_cache_dir, mix_basename + ".npy") - inst_cache_path = os.path.join(inst_cache_dir, inst_basename + ".npy") - - if os.path.exists(mix_cache_path) and os.path.exists(inst_cache_path): - X_spec_m = np.load(mix_cache_path) - y_spec_m = np.load(inst_cache_path) - else: - X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {} - - for d in range(len(mp.param["band"]), 0, -1): - bp = mp.param["band"][d] - - if d == len(mp.param["band"]): # high-end band - X_wave[d], _ = librosa.load( - mix_path, bp["sr"], False, dtype=np.float32, res_type=bp["res_type"] - ) - y_wave[d], _ = librosa.load( - inst_path, - bp["sr"], - False, - dtype=np.float32, - res_type=bp["res_type"], - ) - else: # lower bands - X_wave[d] = librosa.resample( - X_wave[d + 1], - mp.param["band"][d + 1]["sr"], - bp["sr"], - res_type=bp["res_type"], - ) - y_wave[d] = librosa.resample( - y_wave[d + 1], - mp.param["band"][d + 1]["sr"], - bp["sr"], - res_type=bp["res_type"], - ) - - X_wave[d], y_wave[d] = align_wave_head_and_tail(X_wave[d], y_wave[d]) - - X_spec_s[d] = wave_to_spectrogram( - X_wave[d], - bp["hl"], - bp["n_fft"], - mp.param["mid_side"], - mp.param["mid_side_b2"], - mp.param["reverse"], - ) - y_spec_s[d] = wave_to_spectrogram( - y_wave[d], - bp["hl"], - bp["n_fft"], - mp.param["mid_side"], - mp.param["mid_side_b2"], - mp.param["reverse"], - ) - - del X_wave, y_wave - - X_spec_m = combine_spectrograms(X_spec_s, mp) - y_spec_m = combine_spectrograms(y_spec_s, mp) - - if X_spec_m.shape != y_spec_m.shape: - raise ValueError("The combined spectrograms are different: " + mix_path) - - _, ext = os.path.splitext(mix_path) - - np.save(mix_cache_path, X_spec_m) - np.save(inst_cache_path, y_spec_m) - - return X_spec_m, y_spec_m - - -def spectrogram_to_wave(spec, hop_length, mid_side, mid_side_b2, reverse): - spec_left = np.asfortranarray(spec[0]) - spec_right = np.asfortranarray(spec[1]) - - wave_left = librosa.istft(spec_left, hop_length=hop_length) - wave_right = librosa.istft(spec_right, hop_length=hop_length) - - if reverse: - return np.asfortranarray([np.flip(wave_left), np.flip(wave_right)]) - elif mid_side: - return np.asfortranarray( - [np.add(wave_left, wave_right / 2), np.subtract(wave_left, wave_right / 2)] - ) - elif mid_side_b2: - return np.asfortranarray( - [ - np.add(wave_right / 1.25, 0.4 * wave_left), - np.subtract(wave_left / 1.25, 0.4 * wave_right), - ] - ) - else: - return np.asfortranarray([wave_left, wave_right]) - - -def spectrogram_to_wave_mt(spec, hop_length, mid_side, reverse, mid_side_b2): - import threading - - spec_left = np.asfortranarray(spec[0]) - spec_right = np.asfortranarray(spec[1]) - - def run_thread(**kwargs): - global wave_left - wave_left = librosa.istft(**kwargs) - - thread = threading.Thread( - target=run_thread, kwargs={"stft_matrix": spec_left, "hop_length": hop_length} - ) - thread.start() - wave_right = librosa.istft(spec_right, hop_length=hop_length) - thread.join() - - if reverse: - return np.asfortranarray([np.flip(wave_left), np.flip(wave_right)]) - elif mid_side: - return np.asfortranarray( - [np.add(wave_left, wave_right / 2), np.subtract(wave_left, wave_right / 2)] - ) - elif mid_side_b2: - return np.asfortranarray( - [ - np.add(wave_right / 1.25, 0.4 * wave_left), - np.subtract(wave_left / 1.25, 0.4 * wave_right), - ] - ) - else: - return np.asfortranarray([wave_left, wave_right]) - - -def cmb_spectrogram_to_wave(spec_m, mp, extra_bins_h=None, extra_bins=None): - wave_band = {} - bands_n = len(mp.param["band"]) - offset = 0 - - for d in range(1, bands_n + 1): - bp = mp.param["band"][d] - spec_s = np.ndarray( - shape=(2, bp["n_fft"] // 2 + 1, spec_m.shape[2]), dtype=complex - ) - h = bp["crop_stop"] - bp["crop_start"] - spec_s[:, bp["crop_start"] : bp["crop_stop"], :] = spec_m[ - :, offset : offset + h, : - ] - - offset += h - if d == bands_n: # higher - if extra_bins_h: # if --high_end_process bypass - max_bin = bp["n_fft"] // 2 - spec_s[:, max_bin - extra_bins_h : max_bin, :] = extra_bins[ - :, :extra_bins_h, : - ] - if bp["hpf_start"] > 0: - spec_s = fft_hp_filter(spec_s, bp["hpf_start"], bp["hpf_stop"] - 1) - if bands_n == 1: - wave = spectrogram_to_wave( - spec_s, - bp["hl"], - mp.param["mid_side"], - mp.param["mid_side_b2"], - mp.param["reverse"], - ) - else: - wave = np.add( - wave, - spectrogram_to_wave( - spec_s, - bp["hl"], - mp.param["mid_side"], - mp.param["mid_side_b2"], - mp.param["reverse"], - ), - ) - else: - sr = mp.param["band"][d + 1]["sr"] - if d == 1: # lower - spec_s = fft_lp_filter(spec_s, bp["lpf_start"], bp["lpf_stop"]) - wave = librosa.resample( - spectrogram_to_wave( - spec_s, - bp["hl"], - mp.param["mid_side"], - mp.param["mid_side_b2"], - mp.param["reverse"], - ), - bp["sr"], - sr, - res_type="sinc_fastest", - ) - else: # mid - spec_s = fft_hp_filter(spec_s, bp["hpf_start"], bp["hpf_stop"] - 1) - spec_s = fft_lp_filter(spec_s, bp["lpf_start"], bp["lpf_stop"]) - wave2 = np.add( - wave, - spectrogram_to_wave( - spec_s, - bp["hl"], - mp.param["mid_side"], - mp.param["mid_side_b2"], - mp.param["reverse"], - ), - ) - # wave = librosa.core.resample(wave2, bp['sr'], sr, res_type="sinc_fastest") - wave = librosa.core.resample(wave2, bp["sr"], sr, res_type="scipy") - - return wave.T - - -def fft_lp_filter(spec, bin_start, bin_stop): - g = 1.0 - for b in range(bin_start, bin_stop): - g -= 1 / (bin_stop - bin_start) - spec[:, b, :] = g * spec[:, b, :] - - spec[:, bin_stop:, :] *= 0 - - return spec - - -def fft_hp_filter(spec, bin_start, bin_stop): - g = 1.0 - for b in range(bin_start, bin_stop, -1): - g -= 1 / (bin_start - bin_stop) - spec[:, b, :] = g * spec[:, b, :] - - spec[:, 0 : bin_stop + 1, :] *= 0 - - return spec - - -def mirroring(a, spec_m, input_high_end, mp): - if "mirroring" == a: - mirror = np.flip( - np.abs( - spec_m[ - :, - mp.param["pre_filter_start"] - - 10 - - input_high_end.shape[1] : mp.param["pre_filter_start"] - - 10, - :, - ] - ), - 1, - ) - mirror = mirror * np.exp(1.0j * np.angle(input_high_end)) - - return np.where( - np.abs(input_high_end) <= np.abs(mirror), input_high_end, mirror - ) - - if "mirroring2" == a: - mirror = np.flip( - np.abs( - spec_m[ - :, - mp.param["pre_filter_start"] - - 10 - - input_high_end.shape[1] : mp.param["pre_filter_start"] - - 10, - :, - ] - ), - 1, - ) - mi = np.multiply(mirror, input_high_end * 1.7) - - return np.where(np.abs(input_high_end) <= np.abs(mi), input_high_end, mi) - - -def ensembling(a, specs): - for i in range(1, len(specs)): - if i == 1: - spec = specs[0] - - ln = min([spec.shape[2], specs[i].shape[2]]) - spec = spec[:, :, :ln] - specs[i] = specs[i][:, :, :ln] - - if "min_mag" == a: - spec = np.where(np.abs(specs[i]) <= np.abs(spec), specs[i], spec) - if "max_mag" == a: - spec = np.where(np.abs(specs[i]) >= np.abs(spec), specs[i], spec) - - return spec - - -def stft(wave, nfft, hl): - wave_left = np.asfortranarray(wave[0]) - wave_right = np.asfortranarray(wave[1]) - spec_left = librosa.stft(wave_left, nfft, hop_length=hl) - spec_right = librosa.stft(wave_right, nfft, hop_length=hl) - spec = np.asfortranarray([spec_left, spec_right]) - - return spec - - -def istft(spec, hl): - spec_left = np.asfortranarray(spec[0]) - spec_right = np.asfortranarray(spec[1]) - - wave_left = librosa.istft(spec_left, hop_length=hl) - wave_right = librosa.istft(spec_right, hop_length=hl) - wave = np.asfortranarray([wave_left, wave_right]) - - -if __name__ == "__main__": - import cv2 - import sys - import time - import argparse - from model_param_init import ModelParameters - - p = argparse.ArgumentParser() - p.add_argument( - "--algorithm", - "-a", - type=str, - choices=["invert", "invert_p", "min_mag", "max_mag", "deep", "align"], - default="min_mag", - ) - p.add_argument( - "--model_params", - "-m", - type=str, - default=os.path.join("modelparams", "1band_sr44100_hl512.json"), - ) - p.add_argument("--output_name", "-o", type=str, default="output") - p.add_argument("--vocals_only", "-v", action="store_true") - p.add_argument("input", nargs="+") - args = p.parse_args() - - start_time = time.time() - - if args.algorithm.startswith("invert") and len(args.input) != 2: - raise ValueError("There should be two input files.") - - if not args.algorithm.startswith("invert") and len(args.input) < 2: - raise ValueError("There must be at least two input files.") - - wave, specs = {}, {} - mp = ModelParameters(args.model_params) - - for i in range(len(args.input)): - spec = {} - - for d in range(len(mp.param["band"]), 0, -1): - bp = mp.param["band"][d] - - if d == len(mp.param["band"]): # high-end band - wave[d], _ = librosa.load( - args.input[i], - bp["sr"], - False, - dtype=np.float32, - res_type=bp["res_type"], - ) - - if len(wave[d].shape) == 1: # mono to stereo - wave[d] = np.array([wave[d], wave[d]]) - else: # lower bands - wave[d] = librosa.resample( - wave[d + 1], - mp.param["band"][d + 1]["sr"], - bp["sr"], - res_type=bp["res_type"], - ) - - spec[d] = wave_to_spectrogram( - wave[d], - bp["hl"], - bp["n_fft"], - mp.param["mid_side"], - mp.param["mid_side_b2"], - mp.param["reverse"], - ) - - specs[i] = combine_spectrograms(spec, mp) - - del wave - - if args.algorithm == "deep": - d_spec = np.where(np.abs(specs[0]) <= np.abs(spec[1]), specs[0], spec[1]) - v_spec = d_spec - specs[1] - sf.write( - os.path.join("{}.wav".format(args.output_name)), - cmb_spectrogram_to_wave(v_spec, mp), - mp.param["sr"], - ) - - if args.algorithm.startswith("invert"): - ln = min([specs[0].shape[2], specs[1].shape[2]]) - specs[0] = specs[0][:, :, :ln] - specs[1] = specs[1][:, :, :ln] - - if "invert_p" == args.algorithm: - X_mag = np.abs(specs[0]) - y_mag = np.abs(specs[1]) - max_mag = np.where(X_mag >= y_mag, X_mag, y_mag) - v_spec = specs[1] - max_mag * np.exp(1.0j * np.angle(specs[0])) - else: - specs[1] = reduce_vocal_aggressively(specs[0], specs[1], 0.2) - v_spec = specs[0] - specs[1] - - if not args.vocals_only: - X_mag = np.abs(specs[0]) - y_mag = np.abs(specs[1]) - v_mag = np.abs(v_spec) - - X_image = spectrogram_to_image(X_mag) - y_image = spectrogram_to_image(y_mag) - v_image = spectrogram_to_image(v_mag) - - cv2.imwrite("{}_X.png".format(args.output_name), X_image) - cv2.imwrite("{}_y.png".format(args.output_name), y_image) - cv2.imwrite("{}_v.png".format(args.output_name), v_image) - - sf.write( - "{}_X.wav".format(args.output_name), - cmb_spectrogram_to_wave(specs[0], mp), - mp.param["sr"], - ) - sf.write( - "{}_y.wav".format(args.output_name), - cmb_spectrogram_to_wave(specs[1], mp), - mp.param["sr"], - ) - - sf.write( - "{}_v.wav".format(args.output_name), - cmb_spectrogram_to_wave(v_spec, mp), - mp.param["sr"], - ) - else: - if not args.algorithm == "deep": - sf.write( - os.path.join("ensembled", "{}.wav".format(args.output_name)), - cmb_spectrogram_to_wave(ensembling(args.algorithm, specs), mp), - mp.param["sr"], - ) - - if args.algorithm == "align": - trackalignment = [ - { - "file1": '"{}"'.format(args.input[0]), - "file2": '"{}"'.format(args.input[1]), - } - ] - - for i, e in tqdm(enumerate(trackalignment), desc="Performing Alignment..."): - os.system(f"python lib/align_tracks.py {e['file1']} {e['file2']}") - - # print('Total time: {0:.{1}f}s'.format(time.time() - start_time, 1)) diff --git a/spaces/shravankumar147/IsCat/README.md b/spaces/shravankumar147/IsCat/README.md deleted file mode 100644 index 2716b3f339826bcb08b425856a6d2d218330a538..0000000000000000000000000000000000000000 --- a/spaces/shravankumar147/IsCat/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: IsCat -emoji: 🚀 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.9.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/sidharthism/fashion-eye/models/stylegan/stylegan_tf/dnnlib/submission/_internal/run.py b/spaces/sidharthism/fashion-eye/models/stylegan/stylegan_tf/dnnlib/submission/_internal/run.py deleted file mode 100644 index 18f830d81ead15fece09382cc30654fb89d14d1b..0000000000000000000000000000000000000000 --- a/spaces/sidharthism/fashion-eye/models/stylegan/stylegan_tf/dnnlib/submission/_internal/run.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. -# -# This work is licensed under the Creative Commons Attribution-NonCommercial -# 4.0 International License. To view a copy of this license, visit -# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to -# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. - -"""Helper for launching run functions in computing clusters. - -During the submit process, this file is copied to the appropriate run dir. -When the job is launched in the cluster, this module is the first thing that -is run inside the docker container. -""" - -import os -import pickle -import sys - -# PYTHONPATH should have been set so that the run_dir/src is in it -import dnnlib - -def main(): - if not len(sys.argv) >= 4: - raise RuntimeError("This script needs three arguments: run_dir, task_name and host_name!") - - run_dir = str(sys.argv[1]) - task_name = str(sys.argv[2]) - host_name = str(sys.argv[3]) - - submit_config_path = os.path.join(run_dir, "submit_config.pkl") - - # SubmitConfig should have been pickled to the run dir - if not os.path.exists(submit_config_path): - raise RuntimeError("SubmitConfig pickle file does not exist!") - - submit_config: dnnlib.SubmitConfig = pickle.load(open(submit_config_path, "rb")) - dnnlib.submission.submit.set_user_name_override(submit_config.user_name) - - submit_config.task_name = task_name - submit_config.host_name = host_name - - dnnlib.submission.submit.run_wrapper(submit_config) - -if __name__ == "__main__": - main() diff --git a/spaces/skf15963/summary/fengshen/examples/zen2_finetune/fengshen_sequence_level_ft_task.py b/spaces/skf15963/summary/fengshen/examples/zen2_finetune/fengshen_sequence_level_ft_task.py deleted file mode 100644 index ed400468cc3d0820d4b34385f270639014039ad1..0000000000000000000000000000000000000000 --- a/spaces/skf15963/summary/fengshen/examples/zen2_finetune/fengshen_sequence_level_ft_task.py +++ /dev/null @@ -1,649 +0,0 @@ -# coding=utf-8 -# Copyright 2021 The IDEA Authors. All rights reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from fengshen.models.zen2.modeling import ZenForSequenceClassification -from fengshen.models.zen2.ngram_utils import ZenNgramDict -from fengshen.models.zen2.tokenization import BertTokenizer -from pytorch_lightning.callbacks import LearningRateMonitor -import csv -from dataclasses import dataclass -import logging -import math -import numpy as np -import os -from tqdm import tqdm -import json -import torch -import pytorch_lightning as pl -import argparse -from pytorch_lightning.callbacks import ModelCheckpoint -from torch.utils.data import Dataset, DataLoader - -logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', - datefmt='%m/%d/%Y %H:%M:%S', - level=logging.INFO) -logger = logging.getLogger(__name__) - - -class InputExample(object): - """A single training/test example for simple sequence classification.""" - - def __init__(self, guid, text_a, text_b=None, label=None, qid=0): - """Constructs a InputExample. - - Args: - guid: Unique id for the example. - text_a: string. The untokenized text of the first sequence. For single - sequence tasks, only this sequence must be specified. - text_b: (Optional) string. The untokenized text of the second sequence. - Only must be specified for sequence pair tasks. - label: (Optional) string. The label of the example. This should be - specified for train and dev examples, but not for test examples. - """ - self.guid = guid - self.text_a = text_a - self.text_b = text_b - self.label = label - self.qid = qid - - -class InputFeatures(object): - """A single set of features of data.""" - - def __init__(self, input_ids, input_mask, segment_ids, label_id, - ngram_ids, ngram_starts, ngram_lengths, ngram_tuples, ngram_seg_ids, ngram_masks, ngram_freqs, - qid=-1): - self.input_ids = input_ids - self.input_mask = input_mask - self.segment_ids = segment_ids - self.label_id = label_id - self.qid = qid - - self.ngram_ids = ngram_ids - self.ngram_starts = ngram_starts - self.ngram_lengths = ngram_lengths - self.ngram_tuples = ngram_tuples - self.ngram_seg_ids = ngram_seg_ids - self.ngram_masks = ngram_masks - self.ngram_freqs = ngram_freqs - - -class DataProcessor(object): - """Base class for data converters for sequence classification data sets.""" - - def get_examples(self, data_path, mode): - """Gets a collection of `InputExample`s for the train set.""" - raise NotImplementedError() - - @classmethod - def _read_tsv(cls, input_file, quotechar=None): - """Reads a tab separated value file.""" - with open(input_file, "r") as f: - reader = csv.reader(f, delimiter="\t", quotechar=quotechar) - lines = [] - for line in reader: - # if sys.version_info[0] == 2: - # line = list(unicode(cell, 'utf-8') for cell in line) - lines.append(line) - return lines - - @classmethod - def _read_json(cls, input_file): - """Reads a jsonl file.""" - with open(input_file, "r", encoding="utf-8") as f: - lines = f.readlines() - samples = [] - for line in tqdm(lines): - data = json.loads(line) - samples.append(data) - return samples - - -class TnewsProcessor(DataProcessor): - """Processor for the tnews data set (HIT version).""" - - def get_train_examples(self, data_dir): - """See base class.""" - return self._create_examples( - self._read_json(os.path.join(data_dir, "train.json")), "train") - - def get_examples(self, data_path, mode): - return self._create_examples( - self._read_json(data_path), - set_type=mode - ) - - def _create_examples(self, lines, set_type): - """Creates examples for the training and dev sets.""" - examples = [] - for (i, line) in enumerate(lines): - # if i == 0: - # continue - guid = "%s-%s" % (set_type, i) - # text_a = line[0] - text_a = line['sentence'] - label = line['label'] if 'label' in line.keys() else None - examples.append( - InputExample(guid=guid, text_a=text_a, label=label)) - return examples - - -class OcnliProcessor(DataProcessor): - """Processor for the ocnli or cmnli data set (HIT version).""" - - def get_examples(self, data_path, mode): - return self._create_examples( - self._read_json(data_path), - set_type=mode - ) - - def _create_examples(self, lines, set_type): - """Creates examples for the training and dev sets.""" - examples = [] - for (i, line) in enumerate(lines): - # if i == 0: - # continue - guid = "%s-%s" % (set_type, i) - # text_a = line[0] - text_a = line['sentence1'] - text_b = line['sentence2'] - label = line['label'] if 'label' in line.keys() else None - # 特殊处理,cmnli有label为-的 - if label == '-': - label = None - examples.append( - InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) - return examples - - -class IflytekProcessor(DataProcessor): - """Processor for the iflytek data set (HIT version).""" - - def get_examples(self, data_path, mode): - return self._create_examples( - self._read_json(data_path), - set_type=mode - ) - - def _create_examples(self, lines, set_type): - """Creates examples for the training and dev sets.""" - examples = [] - for (i, line) in enumerate(lines): - # if i == 0: - # continue - guid = "%s-%s" % (set_type, i) - # text_a = line[0] - text_a = line['sentence'] - label = line['label'] if 'label' in line.keys() else None - examples.append( - InputExample(guid=guid, text_a=text_a, label=label)) - return examples - - -def convert_examples_to_features(examples, label_map, max_seq_length, tokenizer, ngram_dict): - """Loads a data file into a list of `InputBatch`s.""" - - # label_map = {label : i for i, label in enumerate(label_list)} - features = [] - for (ex_index, example) in enumerate(examples): - tokens_a = tokenizer.tokenize(example.text_a) - - tokens_b = None - if example.text_b: - tokens_b = tokenizer.tokenize(example.text_b) - # Modifies `tokens_a` and `tokens_b` in place so that the total - # length is less than the specified length. - # Account for [CLS], [SEP], [SEP] with "- 3" - _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) - else: - # Account for [CLS] and [SEP] with "- 2" - if len(tokens_a) > max_seq_length - 2: - tokens_a = tokens_a[:(max_seq_length - 2)] - - # The convention in BERT is: - # (a) For sequence pairs: - # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] - # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 - # (b) For single sequences: - # tokens: [CLS] the dog is hairy . [SEP] - # type_ids: 0 0 0 0 0 0 0 - # - # Where "type_ids" are used to indicate whether this is the first - # sequence or the second sequence. The embedding vectors for `type=0` and - # `type=1` were learned during pre-training and are added to the wordpiece - # embedding vector (and position vector). This is not *strictly* necessary - # since the [SEP] token unambigiously separates the sequences, but it makes - # it easier for the model to learn the concept of sequences. - # - # For classification tasks, the first vector (corresponding to [CLS]) is - # used as as the "sentence vector". Note that this only makes sense because - # the entire model is fine-tuned. - tokens = ["[CLS]"] + tokens_a + ["[SEP]"] - segment_ids = [0] * len(tokens) - - if tokens_b: - tokens += tokens_b + ["[SEP]"] - segment_ids += [1] * (len(tokens_b) + 1) - - input_ids = tokenizer.convert_tokens_to_ids(tokens) - - # The mask has 1 for real tokens and 0 for padding tokens. Only real - # tokens are attended to. - input_mask = [1] * len(input_ids) - - # Zero-pad up to the sequence length. - padding = [0] * (max_seq_length - len(input_ids)) - input_ids += padding - input_mask += padding - segment_ids += padding - - assert len(input_ids) == max_seq_length - assert len(input_mask) == max_seq_length - assert len(segment_ids) == max_seq_length - - # ----------- code for ngram BEGIN----------- - ngram_matches = [] - # Filter the word segment from 2 to max_ngram_len to check whether there is a word - max_gram_n = ngram_dict.max_ngram_len - for p in range(2, max_gram_n): - for q in range(0, len(tokens) - p + 1): - character_segment = tokens[q:q + p] - # j is the starting position of the word - # i is the length of the current word - character_segment = tuple(character_segment) - if character_segment in ngram_dict.ngram_to_id_dict: - ngram_index = ngram_dict.ngram_to_id_dict[character_segment] - ngram_freq = ngram_dict.ngram_to_freq_dict[character_segment] - ngram_matches.append([ngram_index, q, p, character_segment, ngram_freq]) - - # shuffle(ngram_matches) - ngram_matches = sorted(ngram_matches, key=lambda s: s[0]) - # max_word_in_seq_proportion = max_word_in_seq - max_word_in_seq_proportion = math.ceil((len(tokens) / max_seq_length) * ngram_dict.max_ngram_in_seq) - if len(ngram_matches) > max_word_in_seq_proportion: - ngram_matches = ngram_matches[:max_word_in_seq_proportion] - ngram_ids = [ngram[0] for ngram in ngram_matches] - ngram_positions = [ngram[1] for ngram in ngram_matches] - ngram_lengths = [ngram[2] for ngram in ngram_matches] - ngram_tuples = [ngram[3] for ngram in ngram_matches] - ngram_freqs = [ngram[4] for ngram in ngram_matches] - ngram_seg_ids = [0 if position < len([id for id in segment_ids if id == 0]) else 1 for position in - ngram_positions] - - ngram_mask_array = np.zeros(ngram_dict.max_ngram_in_seq, dtype=np.bool) - ngram_mask_array[:len(ngram_ids)] = 1 - - # Zero-pad up to the max word in seq length. - padding = [0] * (ngram_dict.max_ngram_in_seq - len(ngram_ids)) - ngram_ids += padding - ngram_positions += padding - ngram_lengths += padding - ngram_seg_ids += padding - ngram_freqs += padding - - # ----------- code for ngram END----------- - - label_id = label_map[example.label] if example.label is not None else 0 - # if ex_index < 5: - # logger.info("*** Example ***") - # logger.info("guid: %s" % (example.guid)) - # logger.info("tokens: %s" % " ".join( - # [str(x) for x in tokens])) - # logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) - # logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) - # logger.info( - # "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) - # logger.info("label: %s (id = %d)" % (example.label, label_id)) - # logger.info("ngram_ids: %s" % " ".join([str(x) for x in ngram_ids])) - # logger.info("ngram_positions: %s" % " ".join([str(x) for x in ngram_positions])) - # logger.info("ngram_lengths: %s" % " ".join([str(x) for x in ngram_lengths])) - # logger.info("ngram_tuples: %s" % " ".join([str(x) for x in ngram_tuples])) - # logger.info("ngram_seg_ids: %s" % " ".join([str(x) for x in ngram_seg_ids])) - # logger.info("ngram_freqs: %s" % " ".join([str(x) for x in ngram_freqs])) - - features.append( - InputFeatures(input_ids=input_ids, - input_mask=input_mask, - segment_ids=segment_ids, - label_id=label_id, - ngram_ids=ngram_ids, - ngram_starts=ngram_positions, - ngram_lengths=ngram_lengths, - ngram_tuples=ngram_tuples, - ngram_seg_ids=ngram_seg_ids, - ngram_masks=ngram_mask_array, - ngram_freqs=ngram_freqs, - qid=example.qid)) - return features - - -def _truncate_seq_pair(tokens_a, tokens_b, max_length): - """Truncates a sequence pair in place to the maximum length.""" - - # This is a simple heuristic which will always truncate the longer sequence - # one token at a time. This makes more sense than truncating an equal percent - # of tokens from each, since if one sequence is very short then each token - # that's truncated likely contains more information than a longer sequence. - while True: - total_length = len(tokens_a) + len(tokens_b) - if total_length <= max_length: - break - if len(tokens_a) > len(tokens_b): - tokens_a.pop() - else: - tokens_b.pop() - - -class TaskDataset(Dataset): - def __init__(self, data_path, processor, mode='train'): - super().__init__() - self.data = self.load_data(data_path, processor, mode) - - def __len__(self): - return len(self.data) - - def __getitem__(self, index): - return self.data[index] - - def load_data(self, data_path, processor, mode): - if mode == "train": - examples = processor.get_examples(data_path, mode) - elif mode == "test": - examples = processor.get_examples(data_path, mode) - elif mode == "dev": - examples = processor.get_examples(data_path, mode) - return examples - - -@dataclass -class TaskCollator: - args = None - tokenizer = None - ngram_dict = None - label2id = None - - def __call__(self, samples): - features = convert_examples_to_features(samples, self.label2id, self.args.max_seq_length, self.tokenizer, self.ngram_dict) - # logger.info(" Num examples = %d", len(samples)) - input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) - input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) - segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long) - label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long) - # qids = torch.tensor([f.qid for f in features], dtype=torch.long) - - ngram_ids = torch.tensor([f.ngram_ids for f in features], dtype=torch.long) - ngram_starts = torch.tensor([f.ngram_starts for f in features], dtype=torch.long) - ngram_lengths = torch.tensor([f.ngram_lengths for f in features], dtype=torch.long) - # ngram_seg_ids = torch.tensor([f.ngram_seg_ids for f in features], dtype=torch.long) - # ngram_masks = torch.tensor([f.ngram_masks for f in features], dtype=torch.long) - ngram_freqs = torch.tensor([f.ngram_freqs for f in features], dtype=torch.long) - - batch_size = len(samples) - ngram_positions_matrix = torch.zeros( - size=(batch_size, self.args.max_seq_length, self.ngram_dict.max_ngram_in_seq), - dtype=torch.int) - for batch_id in range(batch_size): - ngram_id = ngram_ids[batch_id] - ngram_start = ngram_starts[batch_id] - ngram_length = ngram_lengths[batch_id] - for i in range(len(ngram_id)): - ngram_positions_matrix[batch_id][ngram_start[i]:ngram_start[i] + ngram_length[i], i] = ngram_freqs[batch_id][i] - ngram_positions_matrix[batch_id] \ - = torch.div(ngram_positions_matrix[batch_id], - torch.stack([torch.sum(ngram_positions_matrix[batch_id], 1)] * - ngram_positions_matrix[batch_id].size(1)).t() + 1e-10) - - return { - 'input_ids': input_ids, - 'input_ngram_ids': ngram_ids, - 'ngram_position_matrix': ngram_positions_matrix, - 'attention_mask': input_mask, - 'token_type_ids': segment_ids, - 'labels': label_ids - - } - - # return default_collate(sample_list) - - -class TaskDataModel(pl.LightningDataModule): - @staticmethod - def add_data_specific_args(parent_args): - parser = parent_args.add_argument_group('TASK NAME DataModel') - parser.add_argument('--data_dir', default='./data', type=str) - parser.add_argument('--num_workers', default=8, type=int) - parser.add_argument('--train_data', default='train.json', type=str) - parser.add_argument('--valid_data', default='dev.json', type=str) - parser.add_argument('--test_data', default='test.json', type=str) - parser.add_argument('--train_batchsize', default=16, type=int) - parser.add_argument('--valid_batchsize', default=32, type=int) - parser.add_argument('--max_seq_length', default=128, type=int) - - parser.add_argument('--texta_name', default='text', type=str) - parser.add_argument('--textb_name', default='sentence2', type=str) - parser.add_argument('--label_name', default='label', type=str) - parser.add_argument('--id_name', default='id', type=str) - - parser.add_argument('--dataset_name', default=None, type=str) - parser.add_argument('--vocab_file', - type=str, default=None, - help="Vocabulary mapping/file BERT was pretrainined on") - parser.add_argument("--do_lower_case", - action='store_true', - help="Set this flag if you are using an uncased model.") - parser.add_argument('--task_name', default='tnews', type=str) - - return parent_args - - def __init__(self, args): - super().__init__() - self.train_batchsize = args.train_batchsize - self.valid_batchsize = args.valid_batchsize - self.collator = TaskCollator() - self.collator.args = args - self.collator.tokenizer = BertTokenizer.from_pretrained(args.pretrained_model_path, do_lower_case=args.do_lower_case) - self.collator.ngram_dict = ZenNgramDict.from_pretrained(args.pretrained_model_path, tokenizer=self.collator.tokenizer) - - processors = { - 'afqmc': OcnliProcessor, - 'tnews': TnewsProcessor, - 'ocnli': OcnliProcessor, - 'cmnli': OcnliProcessor, - 'iflytek': IflytekProcessor, - } - if args.task_name not in processors: - raise ValueError("Task not found: %s" % (args.task_name)) - processor = processors[args.task_name]() - if args.dataset_name is None: - self.label2id, self.id2label = self.load_schema(os.path.join( - args.data_dir, args.train_data), args) - self.train_data = TaskDataset(os.path.join( - args.data_dir, args.train_data), processor, mode='train') - self.valid_data = TaskDataset(os.path.join( - args.data_dir, args.valid_data), processor, mode='dev') - self.test_data = TaskDataset(os.path.join( - args.data_dir, args.test_data), processor, mode='test') - self.collator.label2id = self.label2id - else: - import datasets - ds = datasets.load_dataset(args.dataset_name) - self.train_data = ds['train'] - self.valid_data = ds['validation'] - self.test_data = ds['test'] - self.save_hyperparameters(args) - - def train_dataloader(self): - return DataLoader(self.train_data, shuffle=True, batch_size=self.train_batchsize, pin_memory=False, - collate_fn=self.collator) - - def val_dataloader(self): - return DataLoader(self.valid_data, shuffle=False, batch_size=self.valid_batchsize, pin_memory=False, - collate_fn=self.collator) - - def predict_dataloader(self): - return DataLoader(self.test_data, shuffle=False, batch_size=self.valid_batchsize, pin_memory=False, - collate_fn=self.collator) - - def load_schema(self, data_path, args): - with open(data_path, 'r', encoding='utf8') as f: - lines = f.readlines() - label_list = [] - for line in tqdm(lines): - data = json.loads(line) - labels = data[args.label_name] if args.label_name in data.keys( - ) else 0 - if labels not in label_list: - label_list.append(labels) - - label2id, id2label = {}, {} - for i, k in enumerate(label_list): - label2id[k] = i - id2label[i] = k - return label2id, id2label - - -class LitModel(pl.LightningModule): - - @staticmethod - def add_model_specific_args(parent_args): - parser = parent_args.add_argument_group('BaseModel') - parser.add_argument('--num_labels', default=2, type=int) - - return parent_args - - def __init__(self, args): - super().__init__() - self.model = ZenForSequenceClassification.from_pretrained(args.pretrained_model_path, num_labels=args.num_labels) - self.save_hyperparameters(args) - - def setup(self, stage) -> None: - if stage == 'fit': - train_loader = self.trainer._data_connector._train_dataloader_source.dataloader() - - # Calculate total steps - if self.trainer.max_epochs > 0: - world_size = self.trainer.world_size - tb_size = self.hparams.train_batchsize * max(1, world_size) - ab_size = self.trainer.accumulate_grad_batches - self.total_steps = (len(train_loader.dataset) * - self.trainer.max_epochs // tb_size) // ab_size - else: - self.total_steps = self.trainer.max_steps // self.trainer.accumulate_grad_batches - - print('Total steps: {}' .format(self.total_steps)) - - def training_step(self, batch, batch_idx): - loss, logits = self.model(**batch) - acc = self.comput_metrix(logits, batch['labels']) - self.log('train_loss', loss) - self.log('train_acc', acc) - return loss - - def comput_metrix(self, logits, labels): - y_pred = torch.argmax(logits, dim=-1) - y_pred = y_pred.view(size=(-1,)) - y_true = labels.view(size=(-1,)).float() - corr = torch.eq(y_pred, y_true) - acc = torch.sum(corr.float())/labels.size()[0] - return acc - - def validation_step(self, batch, batch_idx): - loss, logits = self.model(**batch) - acc = self.comput_metrix(logits, batch['labels']) - self.log('val_loss', loss) - self.log('val_acc', acc) - - def predict_step(self, batch, batch_idx): - output = self.model(**batch) - return output.logits - - def configure_optimizers(self): - from fengshen.models.model_utils import configure_optimizers - return configure_optimizers(self) - - -class TaskModelCheckpoint: - @staticmethod - def add_argparse_args(parent_args): - parser = parent_args.add_argument_group('BaseModel') - - parser.add_argument('--monitor', default='train_loss', type=str) - parser.add_argument('--mode', default='min', type=str) - parser.add_argument('--dirpath', default='./log/', type=str) - parser.add_argument( - '--filename', default='model-{epoch:02d}-{train_loss:.4f}', type=str) - - parser.add_argument('--save_top_k', default=3, type=float) - parser.add_argument('--every_n_train_steps', default=100, type=float) - parser.add_argument('--save_weights_only', default=True, type=bool) - - return parent_args - - def __init__(self, args): - self.callbacks = ModelCheckpoint(monitor=args.monitor, - save_top_k=args.save_top_k, - mode=args.mode, - every_n_train_steps=args.every_n_train_steps, - save_weights_only=args.save_weights_only, - dirpath=args.dirpath, - filename=args.filename) - - -def save_test(data, args, data_model): - with open(args.output_save_path, 'w', encoding='utf-8') as f: - idx = 0 - for i in range(len(data)): - batch = data[i] - for sample in batch: - tmp_result = dict() - label_id = np.argmax(sample.numpy()) - tmp_result['id'] = data_model.test_data.data[idx]['id'] - tmp_result['label'] = data_model.id2label[label_id] - json_data = json.dumps(tmp_result, ensure_ascii=False) - f.write(json_data+'\n') - idx += 1 - print('save the result to '+args.output_save_path) - - -def main(): - total_parser = argparse.ArgumentParser("TASK NAME") - total_parser.add_argument('--pretrained_model_path', default='', type=str) - total_parser.add_argument('--output_save_path', - default='./predict.json', type=str) - # * Args for data preprocessing - total_parser = TaskDataModel.add_data_specific_args(total_parser) - # * Args for training - total_parser = pl.Trainer.add_argparse_args(total_parser) - total_parser = TaskModelCheckpoint.add_argparse_args(total_parser) - - # * Args for base model - from fengshen.models.model_utils import add_module_args - total_parser = add_module_args(total_parser) - total_parser = LitModel.add_model_specific_args(total_parser) - - args = total_parser.parse_args() - - checkpoint_callback = TaskModelCheckpoint(args).callbacks - lr_monitor = LearningRateMonitor(logging_interval='step') - trainer = pl.Trainer.from_argparse_args(args, - callbacks=[checkpoint_callback, lr_monitor] - ) - - data_model = TaskDataModel(args) - model = LitModel(args) - trainer.fit(model, data_model) - - -if __name__ == "__main__": - main() diff --git a/spaces/skf15963/summary/fengshen/models/zen2/__init__.py b/spaces/skf15963/summary/fengshen/models/zen2/__init__.py deleted file mode 100644 index c88bea7b30667f90437256466401f5f73d03f398..0000000000000000000000000000000000000000 --- a/spaces/skf15963/summary/fengshen/models/zen2/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -from .configuration_zen2 import ZenConfig -from .modeling import ZenForPreTraining, ZenForTokenClassification, ZenForSequenceClassification, ZenForQuestionAnswering, ZenModel, ZenForMaskedLM -from .tokenization import BertTokenizer, BasicTokenizer, WordpieceTokenizer, _is_whitespace, whitespace_tokenize, convert_to_unicode, _is_punctuation, _is_control, VOCAB_NAME -from .ngram_utils import ZenNgramDict, NGRAM_DICT_NAME, extract_ngram_feature, construct_ngram_matrix -__all__ = [ - 'ZenConfig', 'ZenForPreTraining', 'ZenForTokenClassification', 'ZenForSequenceClassification', - 'ZenForQuestionAnswering', 'ZenModel', 'ZenForMaskedLM', 'BertTokenizer', 'BasicTokenizer', - 'WordpieceTokenizer', '_is_whitespace', 'whitespace_tokenize', 'convert_to_unicode', - '_is_punctuation', '_is_control', 'VOCAB_NAME', 'ZenNgramDict', 'NGRAM_DICT_NAME', - 'extract_ngram_feature', 'construct_ngram_matrix', -] -version = "0.1.0" diff --git a/spaces/snoop2head/Gomoku-GPT2/app.py b/spaces/snoop2head/Gomoku-GPT2/app.py deleted file mode 100644 index 1eb2a36739a4843c620bd734111814909a5dbc47..0000000000000000000000000000000000000000 --- a/spaces/snoop2head/Gomoku-GPT2/app.py +++ /dev/null @@ -1,399 +0,0 @@ -""" -- This is a simple gomoku game built with Streamlit by TeddyHuang-00 (huang_nan_2019@pku.edu.cn). -- For Gomoku-GPT2, please refer to Young-Jin Ahn (young_ahn@yonsei.ac.kr). - -Shared under MIT license -""" - -import time -from copy import deepcopy -from uuid import uuid4 - -import torch -import numpy as np -import streamlit as st -from scipy.signal import convolve -from streamlit import session_state -from streamlit_server_state import server_state, server_state_lock - -from ai import ( - BOS_TOKEN_ID, - generate_gpt2, - load_model, -) - - -# Utils -class Room: - def __init__(self, room_id) -> None: - self.ROOM_ID = room_id - self.BOARD = np.zeros(shape=(20, 20), dtype=int) - self.PLAYER = _BLACK - self.TURN = self.PLAYER - self.HISTORY = (0, 0) - self.WINNER = _BLANK - self.TIME = time.time() - self.COORDINATE_1D = [BOS_TOKEN_ID] - - -gpt2 = load_model() - - -_BLANK = 0 -_BLACK = 1 -_WHITE = -1 -_PLAYER_SYMBOL = { - _WHITE: "⚪", - _BLANK: "➕", - _BLACK: "⚫", -} -_PLAYER_COLOR = { - _WHITE: "Gomoku-GPT", - _BLANK: "Blank", - _BLACK: "YOU HUMAN", -} -_HORIZONTAL = np.array( - [ - [0, 0, 0, 0, 0], - [0, 0, 0, 0, 0], - [1, 1, 1, 1, 1], - [0, 0, 0, 0, 0], - [0, 0, 0, 0, 0], - ] -) -_VERTICAL = np.array( - [ - [0, 0, 1, 0, 0], - [0, 0, 1, 0, 0], - [0, 0, 1, 0, 0], - [0, 0, 1, 0, 0], - [0, 0, 1, 0, 0], - ] -) -_DIAGONAL_UP_LEFT = np.array( - [ - [1, 0, 0, 0, 0], - [0, 1, 0, 0, 0], - [0, 0, 1, 0, 0], - [0, 0, 0, 1, 0], - [0, 0, 0, 0, 1], - ] -) -_DIAGONAL_UP_RIGHT = np.array( - [ - [0, 0, 0, 0, 1], - [0, 0, 0, 1, 0], - [0, 0, 1, 0, 0], - [0, 1, 0, 0, 0], - [1, 0, 0, 0, 0], - ] -) - -_ROOM_COLOR = { - True: _BLACK, - False: _WHITE, -} - -# Initialize the game -if "ROOM" not in session_state: - session_state.ROOM = Room("local") -if "OWNER" not in session_state: - session_state.OWNER = False - -# Check server health -if "ROOMS" not in server_state: - with server_state_lock["ROOMS"]: - server_state.ROOMS = {} - -# # Layout -# Main -TITLE = st.empty() -ROUND_INFO = st.empty() -BOARD_PLATE = [ - [cell.empty() for cell in st.columns([1 for _ in range(20)])] for _ in range(20) -] -WAIT_FOR_OPPONENT = st.empty() - -# Sidebar -SCORE_TAG = st.sidebar.empty() -SCORE_PLATE = st.sidebar.columns(2) -PLAY_MODE_INFO = st.sidebar.container() -MULTIPLAYER_TAG = st.sidebar.empty() -with st.sidebar.container(): - ANOTHER_ROUND = st.empty() - RESTART = st.empty() - EXIT = st.empty() -GAME_INFO = st.sidebar.container() - - -# Draw the board -def gomoku(): - """ - Draw the board. - - Handle the main logic. - """ - - # Restart the game - def restart() -> None: - """ - Restart the game. - """ - session_state.ROOM = Room(session_state.ROOM.ROOM_ID) - - # Continue new round - def another_round() -> None: - """ - Continue new round. - """ - session_state.ROOM = deepcopy(session_state.ROOM) - session_state.ROOM.BOARD = np.zeros(shape=(20, 20), dtype=int) - session_state.ROOM.PLAYER = -session_state.ROOM.PLAYER - session_state.ROOM.TURN = session_state.ROOM.PLAYER - session_state.ROOM.WINNER = _BLANK - session_state.ROOM.COORDINATE_1D = [BOS_TOKEN_ID] - - # Room status sync - def sync_room() -> bool: - room_id = session_state.ROOM.ROOM_ID - if room_id not in server_state.ROOMS.keys(): - session_state.ROOM = Room("local") - return False - elif server_state.ROOMS[room_id].TIME == session_state.ROOM.TIME: - return False - elif server_state.ROOMS[room_id].TIME < session_state.ROOM.TIME: - # Only acquire the lock when writing to the server state - with server_state_lock["ROOMS"]: - server_rooms = server_state.ROOMS - server_rooms[room_id] = session_state.ROOM - server_state.ROOMS = server_rooms - return True - else: - session_state.ROOM = server_state.ROOMS[room_id] - return True - - # Check if winner emerge from move - def check_win() -> int: - """ - Use convolution to check if any player wins. - """ - vertical = convolve( - session_state.ROOM.BOARD, - _VERTICAL, - mode="same", - ) - horizontal = convolve( - session_state.ROOM.BOARD, - _HORIZONTAL, - mode="same", - ) - diagonal_up_left = convolve( - session_state.ROOM.BOARD, - _DIAGONAL_UP_LEFT, - mode="same", - ) - diagonal_up_right = convolve( - session_state.ROOM.BOARD, - _DIAGONAL_UP_RIGHT, - mode="same", - ) - if ( - np.max( - [ - np.max(vertical), - np.max(horizontal), - np.max(diagonal_up_left), - np.max(diagonal_up_right), - ] - ) - == 5 * _BLACK - ): - winner = _BLACK - elif ( - np.min( - [ - np.min(vertical), - np.min(horizontal), - np.min(diagonal_up_left), - np.min(diagonal_up_right), - ] - ) - == 5 * _WHITE - ): - winner = _WHITE - else: - winner = _BLANK - return winner - - # Triggers the board response on click - def handle_click(x, y): - """ - Controls whether to pass on / continue current board / may start new round - """ - if session_state.ROOM.BOARD[x][y] != _BLANK: - pass - elif ( - session_state.ROOM.ROOM_ID in server_state.ROOMS.keys() - and _ROOM_COLOR[session_state.OWNER] - != server_state.ROOMS[session_state.ROOM.ROOM_ID].TURN - ): - sync_room() - - # normal play situation - elif session_state.ROOM.WINNER == _BLANK: - session_state.ROOM = deepcopy(session_state.ROOM) - - session_state.ROOM.BOARD[x][y] = session_state.ROOM.TURN - session_state.ROOM.COORDINATE_1D.append(x * 20 + y) - - session_state.ROOM.TURN = -session_state.ROOM.TURN - session_state.ROOM.WINNER = check_win() - session_state.ROOM.HISTORY = ( - session_state.ROOM.HISTORY[0] - + int(session_state.ROOM.WINNER == _WHITE), - session_state.ROOM.HISTORY[1] - + int(session_state.ROOM.WINNER == _BLACK), - ) - session_state.ROOM.TIME = time.time() - - # Draw board - def draw_board(response: bool): - """construct each buttons for all cells of the board""" - - if response and session_state.ROOM.TURN == 1: # human turn - print("Your turn") - # construction of clickable buttons - for i, row in enumerate(session_state.ROOM.BOARD): - for j, cell in enumerate(row): - BOARD_PLATE[i][j].button( - _PLAYER_SYMBOL[cell], - key=f"{i}:{j}", - on_click=handle_click, - args=(i, j), - ) - - elif response and session_state.ROOM.TURN == -1: # AI turn - print("AI's turn") - gpt_predictions = generate_gpt2( - gpt2, - torch.tensor(session_state.ROOM.COORDINATE_1D).unsqueeze(0), - ) - print(gpt_predictions) - gpt_response = gpt_predictions[len(session_state.ROOM.COORDINATE_1D)] - gpt_i, gpt_j = gpt_response // 20, gpt_response % 20 - print(gpt_i, gpt_j) - session_state.ROOM.BOARD[gpt_i][gpt_j] = session_state.ROOM.TURN - session_state.ROOM.COORDINATE_1D.append(gpt_i * 20 + gpt_j) - - # construction of clickable buttons - for i, row in enumerate(session_state.ROOM.BOARD): - for j, cell in enumerate(row): - if ( - i * 20 + j - in gpt_predictions[: len(session_state.ROOM.COORDINATE_1D)] - ): - # disable click for GPT choices - BOARD_PLATE[i][j].button( - _PLAYER_SYMBOL[cell], - key=f"{i}:{j}", - on_click=False, - args=(i, j), - ) - else: - # enable click for other cells available for human choices - BOARD_PLATE[i][j].button( - _PLAYER_SYMBOL[cell], - key=f"{i}:{j}", - on_click=handle_click, - args=(i, j), - ) - - # change turn - session_state.ROOM.TURN = -session_state.ROOM.TURN - session_state.ROOM.WINNER = check_win() - session_state.ROOM.HISTORY = ( - session_state.ROOM.HISTORY[0] - + int(session_state.ROOM.WINNER == _WHITE), - session_state.ROOM.HISTORY[1] - + int(session_state.ROOM.WINNER == _BLACK), - ) - session_state.ROOM.TIME = time.time() - - if not response or session_state.ROOM.WINNER != _BLANK: - print("Game over") - for i, row in enumerate(session_state.ROOM.BOARD): - for j, cell in enumerate(row): - BOARD_PLATE[i][j].write( - _PLAYER_SYMBOL[cell], - key=f"{i}:{j}", - ) - - # Game process control - def game_control(): - if session_state.ROOM.WINNER != _BLANK: - draw_board(False) - else: - draw_board(True) - if session_state.ROOM.WINNER != _BLANK or 0 not in session_state.ROOM.BOARD: - ANOTHER_ROUND.button( - "Play Next round!", - on_click=another_round, - help="Clear board and swap first player", - ) - if session_state.ROOM.ROOM_ID == "local" or session_state.OWNER: - RESTART.button( - "Reset", - on_click=restart, - help="Clear the board as well as the scores", - ) - - # Infos - def draw_info() -> None: - # Text information - TITLE.subheader("**🤖 Do you wanna have a bad time?**") - PLAY_MODE_INFO.write("---\n\n**You are Black, AI is White.**") - GAME_INFO.markdown( - """ - --- - - ## Freestyle Gomoku game. - - - Freestyle Gomoku - - - no restrictions - - no regrets - - swap players after one round is over - - ##### Design by TeddyHuang-00Github repo - ##### Gomoku-GPT by snoop2headGithub repo - - """, - unsafe_allow_html=True, - ) - # History scores - SCORE_TAG.subheader("Scores") - SCORE_PLATE[0].metric("Gomoku-GPT", session_state.ROOM.HISTORY[0]) - SCORE_PLATE[1].metric("Black", session_state.ROOM.HISTORY[1]) - - # Additional information - if session_state.ROOM.WINNER != _BLANK: - st.balloons() - ROUND_INFO.write( - f"#### **{_PLAYER_COLOR[session_state.ROOM.WINNER]} wins!**\n**Click buttons on the left for more plays.**" - ) - - elif 0 not in session_state.ROOM.BOARD: - ROUND_INFO.write("#### **Tie**") - else: - ROUND_INFO.write( - f"#### **{_PLAYER_SYMBOL[session_state.ROOM.TURN]} {_PLAYER_COLOR[session_state.ROOM.TURN]}'s turn...**" - ) - - # The main game loop - game_control() - draw_info() - - -if __name__ == "__main__": - gomoku() diff --git a/spaces/sriramelango/Social_Classification_Public/data/mm_data/__init__.py b/spaces/sriramelango/Social_Classification_Public/data/mm_data/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/data/noising.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/data/noising.py deleted file mode 100644 index 2b1cc347203bfbdc9f1cba29e2e36427b7b5be57..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/data/noising.py +++ /dev/null @@ -1,335 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import numpy as np -import torch -from fairseq.data import data_utils - - -class WordNoising(object): - """Generate a noisy version of a sentence, without changing words themselves.""" - - def __init__(self, dictionary, bpe_cont_marker="@@", bpe_end_marker=None): - self.dictionary = dictionary - self.bpe_end = None - if bpe_cont_marker: - self.bpe_end = np.array( - [ - not self.dictionary[i].endswith(bpe_cont_marker) - for i in range(len(self.dictionary)) - ] - ) - elif bpe_end_marker: - self.bpe_end = np.array( - [ - self.dictionary[i].endswith(bpe_end_marker) - for i in range(len(self.dictionary)) - ] - ) - - self.get_word_idx = ( - self._get_bpe_word_idx if self.bpe_end is not None else self._get_token_idx - ) - - def noising(self, x, lengths, noising_prob=0.0): - raise NotImplementedError() - - def _get_bpe_word_idx(self, x): - """ - Given a list of BPE tokens, for every index in the tokens list, - return the index of the word grouping that it belongs to. - For example, for input x corresponding to ["how", "are", "y@@", "ou"], - return [[0], [1], [2], [2]]. - """ - # x: (T x B) - bpe_end = self.bpe_end[x] - - if x.size(0) == 1 and x.size(1) == 1: - # Special case when we only have one word in x. If x = [[N]], - # bpe_end is a scalar (bool) instead of a 2-dim array of bools, - # which makes the sum operation below fail. - return np.array([[0]]) - - # do a reduce front sum to generate word ids - word_idx = bpe_end[::-1].cumsum(0)[::-1] - word_idx = word_idx.max(0)[None, :] - word_idx - return word_idx - - def _get_token_idx(self, x): - """ - This is to extend noising functions to be able to apply to non-bpe - tokens, e.g. word or characters. - """ - x = torch.t(x) - word_idx = np.array([range(len(x_i)) for x_i in x]) - return np.transpose(word_idx) - - -class WordDropout(WordNoising): - """Randomly drop input words. If not passing blank_idx (default is None), - then dropped words will be removed. Otherwise, it will be replaced by the - blank_idx.""" - - def __init__( - self, - dictionary, - default_dropout_prob=0.1, - bpe_cont_marker="@@", - bpe_end_marker=None, - ): - super().__init__(dictionary, bpe_cont_marker, bpe_end_marker) - self.default_dropout_prob = default_dropout_prob - - def noising(self, x, lengths, dropout_prob=None, blank_idx=None): - if dropout_prob is None: - dropout_prob = self.default_dropout_prob - # x: (T x B), lengths: B - if dropout_prob == 0: - return x, lengths - - assert 0 < dropout_prob < 1 - - # be sure to drop entire words - word_idx = self.get_word_idx(x) - sentences = [] - modified_lengths = [] - for i in range(lengths.size(0)): - # Since dropout probabilities need to apply over non-pad tokens, - # it is not trivial to generate the keep mask without consider - # input lengths; otherwise, this could be done outside the loop - - # We want to drop whole words based on word_idx grouping - num_words = max(word_idx[:, i]) + 1 - - # ith example: [x0, x1, ..., eos, pad, ..., pad] - # We should only generate keep probs for non-EOS tokens. Thus if the - # input sentence ends in EOS, the last word idx is not included in - # the dropout mask generation and we append True to always keep EOS. - # Otherwise, just generate the dropout mask for all word idx - # positions. - has_eos = x[lengths[i] - 1, i] == self.dictionary.eos() - if has_eos: # has eos? - keep = np.random.rand(num_words - 1) >= dropout_prob - keep = np.append(keep, [True]) # keep EOS symbol - else: - keep = np.random.rand(num_words) >= dropout_prob - - words = x[: lengths[i], i].tolist() - - # TODO: speed up the following loop - # drop words from the input according to keep - new_s = [ - w if keep[word_idx[j, i]] else blank_idx for j, w in enumerate(words) - ] - new_s = [w for w in new_s if w is not None] - # we need to have at least one word in the sentence (more than the - # start / end sentence symbols) - if len(new_s) <= 1: - # insert at beginning in case the only token left is EOS - # EOS should be at end of list. - new_s.insert(0, words[np.random.randint(0, len(words))]) - assert len(new_s) >= 1 and ( - not has_eos # Either don't have EOS at end or last token is EOS - or (len(new_s) >= 2 and new_s[-1] == self.dictionary.eos()) - ), "New sentence is invalid." - sentences.append(new_s) - modified_lengths.append(len(new_s)) - # re-construct input - modified_lengths = torch.LongTensor(modified_lengths) - modified_x = torch.LongTensor( - modified_lengths.max(), modified_lengths.size(0) - ).fill_(self.dictionary.pad()) - for i in range(modified_lengths.size(0)): - modified_x[: modified_lengths[i], i].copy_(torch.LongTensor(sentences[i])) - - return modified_x, modified_lengths - - -class WordShuffle(WordNoising): - """Shuffle words by no more than k positions.""" - - def __init__( - self, - dictionary, - default_max_shuffle_distance=3, - bpe_cont_marker="@@", - bpe_end_marker=None, - ): - super().__init__(dictionary, bpe_cont_marker, bpe_end_marker) - self.default_max_shuffle_distance = 3 - - def noising(self, x, lengths, max_shuffle_distance=None): - if max_shuffle_distance is None: - max_shuffle_distance = self.default_max_shuffle_distance - # x: (T x B), lengths: B - if max_shuffle_distance == 0: - return x, lengths - - # max_shuffle_distance < 1 will return the same sequence - assert max_shuffle_distance > 1 - - # define noise word scores - noise = np.random.uniform( - 0, - max_shuffle_distance, - size=(x.size(0), x.size(1)), - ) - noise[0] = -1 # do not move start sentence symbol - # be sure to shuffle entire words - word_idx = self.get_word_idx(x) - x2 = x.clone() - for i in range(lengths.size(0)): - length_no_eos = lengths[i] - if x[lengths[i] - 1, i] == self.dictionary.eos(): - length_no_eos = lengths[i] - 1 - # generate a random permutation - scores = word_idx[:length_no_eos, i] + noise[word_idx[:length_no_eos, i], i] - # ensure no reordering inside a word - scores += 1e-6 * np.arange(length_no_eos.item()) - permutation = scores.argsort() - # shuffle words - x2[:length_no_eos, i].copy_( - x2[:length_no_eos, i][torch.from_numpy(permutation)] - ) - return x2, lengths - - -class UnsupervisedMTNoising(WordNoising): - """ - Implements the default configuration for noising in UnsupervisedMT - (github.com/facebookresearch/UnsupervisedMT) - """ - - def __init__( - self, - dictionary, - max_word_shuffle_distance, - word_dropout_prob, - word_blanking_prob, - bpe_cont_marker="@@", - bpe_end_marker=None, - ): - super().__init__(dictionary) - self.max_word_shuffle_distance = max_word_shuffle_distance - self.word_dropout_prob = word_dropout_prob - self.word_blanking_prob = word_blanking_prob - - self.word_dropout = WordDropout( - dictionary=dictionary, - bpe_cont_marker=bpe_cont_marker, - bpe_end_marker=bpe_end_marker, - ) - self.word_shuffle = WordShuffle( - dictionary=dictionary, - bpe_cont_marker=bpe_cont_marker, - bpe_end_marker=bpe_end_marker, - ) - - def noising(self, x, lengths): - # 1. Word Shuffle - noisy_src_tokens, noisy_src_lengths = self.word_shuffle.noising( - x=x, - lengths=lengths, - max_shuffle_distance=self.max_word_shuffle_distance, - ) - # 2. Word Dropout - noisy_src_tokens, noisy_src_lengths = self.word_dropout.noising( - x=noisy_src_tokens, - lengths=noisy_src_lengths, - dropout_prob=self.word_dropout_prob, - ) - # 3. Word Blanking - noisy_src_tokens, noisy_src_lengths = self.word_dropout.noising( - x=noisy_src_tokens, - lengths=noisy_src_lengths, - dropout_prob=self.word_blanking_prob, - blank_idx=self.dictionary.unk(), - ) - - return noisy_src_tokens - - -class NoisingDataset(torch.utils.data.Dataset): - def __init__( - self, - src_dataset, - src_dict, - seed, - noiser=None, - noising_class=UnsupervisedMTNoising, - **kwargs - ): - """ - Wrap a :class:`~torch.utils.data.Dataset` and apply noise to the - samples based on the supplied noising configuration. - - Args: - src_dataset (~torch.utils.data.Dataset): dataset to wrap. - to build self.src_dataset -- - a LanguagePairDataset with src dataset as the source dataset and - None as the target dataset. Should NOT have padding so that - src_lengths are accurately calculated by language_pair_dataset - collate function. - We use language_pair_dataset here to encapsulate the tgt_dataset - so we can re-use the LanguagePairDataset collater to format the - batches in the structure that SequenceGenerator expects. - src_dict (~fairseq.data.Dictionary): source dictionary - seed (int): seed to use when generating random noise - noiser (WordNoising): a pre-initialized :class:`WordNoising` - instance. If this is None, a new instance will be created using - *noising_class* and *kwargs*. - noising_class (class, optional): class to use to initialize a - default :class:`WordNoising` instance. - kwargs (dict, optional): arguments to initialize the default - :class:`WordNoising` instance given by *noiser*. - """ - self.src_dataset = src_dataset - self.src_dict = src_dict - self.seed = seed - self.noiser = ( - noiser - if noiser is not None - else noising_class( - dictionary=src_dict, - **kwargs, - ) - ) - self.sizes = src_dataset.sizes - - - def __getitem__(self, index): - """ - Returns a single noisy sample. Multiple samples are fed to the collater - create a noising dataset batch. - """ - src_tokens = self.src_dataset[index] - src_lengths = torch.LongTensor([len(src_tokens)]) - src_tokens = src_tokens.unsqueeze(0) - - # Transpose src tokens to fit expected shape of x in noising function - # (batch size, sequence length) -> (sequence length, batch size) - src_tokens_t = torch.t(src_tokens) - - with data_utils.numpy_seed(self.seed + index): - noisy_src_tokens = self.noiser.noising(src_tokens_t, src_lengths) - - # Transpose back to expected src_tokens format - # (sequence length, 1) -> (1, sequence length) - noisy_src_tokens = torch.t(noisy_src_tokens) - return noisy_src_tokens[0] - - def __len__(self): - """ - The length of the noising dataset is the length of src. - """ - return len(self.src_dataset) - - @property - def supports_prefetch(self): - return self.src_dataset.supports_prefetch - - def prefetch(self, indices): - if self.src_dataset.supports_prefetch: - self.src_dataset.prefetch(indices) diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/data/resampling_dataset.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/data/resampling_dataset.py deleted file mode 100644 index 3d3b993164dc3962df48bacff26714328e843e80..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/data/resampling_dataset.py +++ /dev/null @@ -1,139 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging - -import numpy as np -from fairseq.data import BaseWrapperDataset, plasma_utils - - -logger = logging.getLogger(__name__) - - -class ResamplingDataset(BaseWrapperDataset): - """Randomly samples from a given dataset at each epoch. - - Sampling is done with or without replacement, depending on the "replace" - parameter. - - Optionally, the epoch size can be rescaled. This is potentially desirable - to increase per-epoch coverage of the base dataset (since sampling with - replacement means that many items in the dataset will be left out). In the - case of sampling without replacement, size_ratio should be strictly less - than 1. - - Args: - dataset (~torch.utils.data.Dataset): dataset on which to sample. - weights (List[float]): list of probability weights - (default: None, which corresponds to uniform sampling). - replace (bool): sampling mode; True for "with replacement", or False - for "without replacement" (default: True) - size_ratio (float): the ratio to subsample to; must be positive - (default: 1.0). - batch_by_size (bool): whether or not to batch by sequence length - (default: True). - seed (int): RNG seed to use (default: 0). - epoch (int): starting epoch number (default: 1). - """ - - def __init__( - self, - dataset, - weights=None, - replace=True, - size_ratio=1.0, - batch_by_size=True, - seed=0, - epoch=1, - ): - super().__init__(dataset) - - if weights is None: - self.weights = None - - else: - assert len(weights) == len(dataset) - weights_arr = np.array(weights, dtype=np.float64) - weights_arr /= weights_arr.sum() - self.weights = plasma_utils.PlasmaArray(weights_arr) - - self.replace = replace - - assert size_ratio > 0.0 - if not self.replace: - assert size_ratio < 1.0 - self.size_ratio = float(size_ratio) - self.actual_size = np.ceil(len(dataset) * self.size_ratio).astype(int) - - self.batch_by_size = batch_by_size - self.seed = seed - - self._cur_epoch = None - self._cur_indices = None - - self.set_epoch(epoch) - - def __getitem__(self, index): - return self.dataset[self._cur_indices.array[index]] - - def __len__(self): - return self.actual_size - - @property - def sizes(self): - if isinstance(self.dataset.sizes, list): - return [s[self._cur_indices.array] for s in self.dataset.sizes] - return self.dataset.sizes[self._cur_indices.array] - - def num_tokens(self, index): - return self.dataset.num_tokens(self._cur_indices.array[index]) - - def size(self, index): - return self.dataset.size(self._cur_indices.array[index]) - - def ordered_indices(self): - if self.batch_by_size: - order = [ - np.arange(len(self)), - self.sizes, - ] # No need to handle `self.shuffle == True` - return np.lexsort(order) - else: - return np.arange(len(self)) - - def prefetch(self, indices): - self.dataset.prefetch(self._cur_indices.array[indices]) - - @property - def can_reuse_epoch_itr_across_epochs(self): - return False - - def set_epoch(self, epoch): - logger.debug("ResamplingDataset.set_epoch: {}".format(epoch)) - super().set_epoch(epoch) - - if epoch == self._cur_epoch: - return - - self._cur_epoch = epoch - - # Generate a weighted sample of indices as a function of the - # random seed and the current epoch. - - rng = np.random.RandomState( - [ - 42, # magic number - self.seed % (2 ** 32), # global seed - self._cur_epoch, # epoch index - ] - ) - self._cur_indices = plasma_utils.PlasmaArray( - rng.choice( - len(self.dataset), - self.actual_size, - replace=self.replace, - p=(None if self.weights is None else self.weights.array), - ) - ) diff --git a/spaces/stomexserde/gpt4-ui/Examples/Easeus Data Recovery Wizard 8.5 Keygen LINK Torrent.md b/spaces/stomexserde/gpt4-ui/Examples/Easeus Data Recovery Wizard 8.5 Keygen LINK Torrent.md deleted file mode 100644 index c6b4408bf942407053b3ae7332aab976c017187d..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Easeus Data Recovery Wizard 8.5 Keygen LINK Torrent.md +++ /dev/null @@ -1,34 +0,0 @@ - -

        How to Recover Lost Data with Easeus Data Recovery Wizard 8.5 Keygen Torrent

        -

        Have you ever lost your important files, photos, videos, or documents due to accidental deletion, formatting, virus attack, or partition loss? If yes, then you might be looking for a reliable and effective data recovery solution to get back your lost data. One of the most popular data recovery tools on the market is Easeus Data Recovery Wizard 8.5, which can recover unlimited types of files from various storage devices, such as PC, laptop, hard drive, USB drive, SD card, digital camera, etc. However, Easeus Data Recovery Wizard 8.5 is not a free software, and you need to purchase a license code to activate it and enjoy its full features. But what if you don't want to spend money on it? Is there a way to use Easeus Data Recovery Wizard 8.5 for free?

        -

        The answer is yes, but it comes with a risk. Some people may try to download Easeus Data Recovery Wizard 8.5 keygen torrent from the internet, which is a program that can generate a fake license code for Easeus Data Recovery Wizard 8.5 and bypass its security system. However, using Easeus Data Recovery Wizard 8.5 keygen torrent is not recommended for several reasons:

        -

        Easeus Data Recovery Wizard 8.5 Keygen Torrent


        Download Ziphttps://urlgoal.com/2uIaLL



        -
          -
        • It is illegal and unethical to use a pirated software without paying for it.
        • -
        • It may contain viruses, malware, or spyware that can harm your computer or steal your personal information.
        • -
        • It may not work properly or cause more data loss or damage to your storage device.
        • -
        • It may not be compatible with the latest version of Easeus Data Recovery Wizard 8.5 or the latest Windows operating system.
        • -
        • It may not provide any technical support or customer service in case of any problem.
        • -
        -

        Therefore, it is better to avoid using Easeus Data Recovery Wizard 8.5 keygen torrent and look for a safer and more reliable alternative. One of the best alternatives is to use the official trial version of Easeus Data Recovery Wizard 8.5, which allows you to scan and preview your lost files for free. You can also recover up to 2 GB of data for free with the trial version. If you need to recover more data, you can upgrade to the full version by purchasing a valid license code from the official website of Easeus.

        -

        By using the official trial version of Easeus Data Recovery Wizard 8.5, you can enjoy the following benefits:

        -
          -
        • You can download it from a trusted source without any risk of virus or malware infection.
        • -
        • You can use it legally and ethically without violating any copyright laws.
        • -
        • You can use it smoothly and effectively without any errors or glitches.
        • -
        • You can use it with the latest version of Easeus Data Recovery Wizard 8.5 and the latest Windows operating system.
        • -
        • You can get professional technical support and customer service from Easeus in case of any problem.
        • -
        -

        To use the official trial version of Easeus Data Recovery Wizard 8.5, you need to follow these simple steps:

        -
          -
        1. Download Easeus Data Recovery Wizard 8.5 from the official website of Easeus and install it on your computer.
        2. -
        3. Launch Easeus Data Recovery Wizard 8.5 and select the location where you lost your data.
        4. -
        5. Click "Scan" to start scanning your storage device for lost files.
        6. -
        7. After the scan is completed, preview and select the files you want to recover.
        8. -
        9. Click "Recover" to save your recovered files to another location.
        10. -
        -

        Note: Do not save your recovered files to the same location where you lost them, as it may overwrite your original data and make them unrecoverable.

        -

        In conclusion, Easeus Data Recovery Wizard 8.5 is a powerful and easy-to-use data recovery tool that can help you recover your lost data in various situations. However, using Easeus Data Recovery Wizard 8.5 keygen torrent is not a wise choice, as it may

        -

        81aa517590
        -
        -
        \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Edraw Max 7 6 Crack Keygen Serial 17.md b/spaces/stomexserde/gpt4-ui/Examples/Edraw Max 7 6 Crack Keygen Serial 17.md deleted file mode 100644 index 597abde8ea8613c8c02b73704129ad9295297006..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Edraw Max 7 6 Crack Keygen Serial 17.md +++ /dev/null @@ -1,31 +0,0 @@ - -

        How to Download and Install Edraw Max 7 6 Crack Keygen Serial 17

        -

        Edraw Max is a popular diagramming software that allows you to create professional-looking graphics, charts, diagrams, and more. It has a user-friendly interface and a rich set of features that make it easy to design and edit various types of diagrams. However, if you want to use Edraw Max without any limitations, you need to purchase a license key or activate it with a crack.

        -

        Edraw Max 7 6 Crack Keygen Serial 17


        Download File » https://urlgoal.com/2uIaFd



        -

        In this article, we will show you how to download and install Edraw Max 7 6 Crack Keygen Serial 17, which is a cracked version of Edraw Max that can bypass the activation process and unlock all the features of the software. This way, you can use Edraw Max for free and enjoy its full potential.

        -

        Step 1: Download Edraw Max 7 6 Crack Keygen Serial 17

        -

        The first step is to download Edraw Max 7 6 Crack Keygen Serial 17 from a reliable source. You can find many websites that offer this cracked version of Edraw Max, but some of them may contain viruses or malware that can harm your computer. Therefore, you should be careful and choose a trusted site that has positive reviews and feedback from other users.

        -

        One of the sites that we recommend is [^1^], which is a Wix site that provides a direct link to download Edraw Max 7 6 Crack Keygen Serial 17. You can access this site by clicking on the reference number below or by typing the URL in your browser. Once you are on the site, you will see a button that says "Download". Click on it and wait for the download to start.

        -

        Step 2: Install Edraw Max 7 6 Crack Keygen Serial 17

        -

        After you have downloaded Edraw Max 7 6 Crack Keygen Serial 17, you need to install it on your computer. To do this, follow these steps:

        -

        -
          -
        • Locate the downloaded file on your computer. It should be a ZIP file named "EdrawMax76FullCrack.zip".
        • -
        • Extract the ZIP file using a program like WinRAR or 7-Zip. You will get a folder named "EdrawMax76FullCrack".
        • -
        • Open the folder and double-click on the file named "Setup.exe". This will launch the installation wizard of Edraw Max.
        • -
        • Follow the instructions on the screen and choose the destination folder where you want to install Edraw Max.
        • -
        • When the installation is complete, do not run Edraw Max yet. You need to apply the crack first.
        • -
        -

        Step 3: Apply the Crack

        -

        The final step is to apply the crack that will activate Edraw Max and unlock all its features. To do this, follow these steps:

        -
          -
        • Go back to the folder "EdrawMax76FullCrack" and open the subfolder named "Crack".
        • -
        • Copy the file named "EDRAW.EXE" and paste it in the installation folder of Edraw Max. This is usually located at "C:\Program Files\EdrawSoft\EdrawMax".
        • -
        • When prompted, choose to replace the existing file with the copied one.
        • -
        • Run Edraw Max from your desktop or start menu. You should see a message that says "Edraw Max has been activated successfully".
        • -
        • Congratulations! You have successfully installed Edraw Max 7 6 Crack Keygen Serial 17 and can use it for free.
        • -
        -

        Conclusion

        -

        Edraw Max is a powerful diagramming software that can help you create stunning graphics, charts, diagrams, and more. However, it is not free and requires a license key or activation code to use it without any limitations. If you want to use Edraw Max for free, you can download and install Edraw Max 7 6 Crack Keygen Serial 17, which is a cracked version of Edraw Max that can bypass the activation process and unlock all

        e93f5a0c3f
        -
        -
        \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Free BEST Download Ek Anokhi Dulhan Saavi Movies 720p.md b/spaces/stomexserde/gpt4-ui/Examples/Free BEST Download Ek Anokhi Dulhan Saavi Movies 720p.md deleted file mode 100644 index fa94510160a76b8794c4d66bd36d8e01b459c485..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Free BEST Download Ek Anokhi Dulhan Saavi Movies 720p.md +++ /dev/null @@ -1,17 +0,0 @@ - -

        Free Download Ek Anokhi Dulhan Saavi Movies 720p

        -

        Ek Anokhi Dulhan Saavi is a Hindi drama movie that was released in 2017. The movie is directed by Rajeev Kumar and stars Jaspinder Cheema, Nirmal Rishi, Ashish Duggal, Manni Boparai, Mahabir Bhullar, Jeet Soni and Ambrish Saxena. The movie tells the story of Saavi, a young woman who is forced to marry a tree as part of a tradition in her village. She faces many challenges and struggles to find her identity and happiness in a patriarchal society.

        -

        Free Download Ek Anokhi Dulhan Saavi Movies 720p


        Download File ->->->-> https://urlgoal.com/2uI7sN



        -

        If you are interested in watching this movie, you can download it for free in 720p quality from this link: https://example.com/download/ek-anokhi-dulhan-saavi-720p. This link is safe and secure and does not contain any viruses or malware. You can enjoy this movie on your laptop, tablet or smartphone with a good internet connection.

        -

        Ek Anokhi Dulhan Saavi is a movie that explores the issues of gender discrimination, social norms and human rights. It is a movie that will make you think and feel for the characters and their situations. It is a movie that will inspire you to stand up for your rights and dignity. Download Ek Anokhi Dulhan Saavi today and watch this powerful and emotional drama.

        - -

        Ek Anokhi Dulhan Saavi is not just a movie, but a message to the society. It shows the harsh realities of some of the customs and traditions that are still prevalent in some parts of India. It shows how women are treated as objects and commodities and how they are denied their basic rights and freedoms. It shows how Saavi, the protagonist, fights against all odds and tries to break free from the shackles of oppression and injustice.

        -

        The movie also showcases the brilliant performances of the actors, especially Jaspinder Cheema, who plays the role of Saavi. She portrays the emotions and expressions of Saavi with utmost sincerity and conviction. She makes the audience empathize with her character and root for her. The other actors also do justice to their roles and bring out the essence of the story. The movie also has a good music score by Harpreet Singh, Siddharth Sharma, Shashwant Sachdev and R Sheen, which adds to the mood and atmosphere of the movie.

        -

        -

        Ek Anokhi Dulhan Saavi is a movie that deserves to be watched by everyone who believes in equality, justice and humanity. It is a movie that will touch your heart and soul and make you appreciate the value of life and freedom. It is a movie that will make you proud of being a woman or respect women more. Download Ek Anokhi Dulhan Saavi now and witness this amazing and inspiring movie.

        - -

        Ek Anokhi Dulhan Saavi is a movie that has received positive reviews from critics and audiences alike. The movie has been praised for its realistic and authentic portrayal of the social issues and the rural setting. The movie has also been appreciated for its bold and courageous theme and its strong message. The movie has been nominated for several awards and has won some of them, such as the Best Actress award for Jaspinder Cheema at the 2018 PTC Punjabi Film Awards.

        -

        Ek Anokhi Dulhan Saavi is a movie that is not only entertaining, but also educational and enlightening. It is a movie that raises awareness and creates dialogue about the problems that many women face in India and around the world. It is a movie that encourages people to question and challenge the norms and traditions that are harmful and oppressive to women. It is a movie that empowers women to stand up for themselves and their rights.

        -

        Ek Anokhi Dulhan Saavi is a movie that you should not miss. It is a movie that will make you laugh, cry, angry, hopeful and inspired. It is a movie that will make you a better person. Download Ek Anokhi Dulhan Saavi today and watch this masterpiece of cinema.

        81aa517590
        -
        -
        \ No newline at end of file diff --git a/spaces/studiobrn/SplitTrack/audiocraft/data/audio.py b/spaces/studiobrn/SplitTrack/audiocraft/data/audio.py deleted file mode 100644 index 1829d7db4ef832ad65598b471caa7d256a06d012..0000000000000000000000000000000000000000 --- a/spaces/studiobrn/SplitTrack/audiocraft/data/audio.py +++ /dev/null @@ -1,213 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Audio IO methods are defined in this module (info, read, write), -We rely on av library for faster read when possible, otherwise on torchaudio. -""" - -from dataclasses import dataclass -from pathlib import Path -import logging -import typing as tp - -import numpy as np -import soundfile -import torch -from torch.nn import functional as F -import torchaudio as ta - -import av - -from .audio_utils import f32_pcm, i16_pcm, normalize_audio - - -_av_initialized = False - - -def _init_av(): - global _av_initialized - if _av_initialized: - return - logger = logging.getLogger('libav.mp3') - logger.setLevel(logging.ERROR) - _av_initialized = True - - -@dataclass(frozen=True) -class AudioFileInfo: - sample_rate: int - duration: float - channels: int - - -def _av_info(filepath: tp.Union[str, Path]) -> AudioFileInfo: - _init_av() - with av.open(str(filepath)) as af: - stream = af.streams.audio[0] - sample_rate = stream.codec_context.sample_rate - duration = float(stream.duration * stream.time_base) - channels = stream.channels - return AudioFileInfo(sample_rate, duration, channels) - - -def _soundfile_info(filepath: tp.Union[str, Path]) -> AudioFileInfo: - info = soundfile.info(filepath) - return AudioFileInfo(info.samplerate, info.duration, info.channels) - - -def audio_info(filepath: tp.Union[str, Path]) -> AudioFileInfo: - # torchaudio no longer returns useful duration informations for some formats like mp3s. - filepath = Path(filepath) - if filepath.suffix in ['.flac', '.ogg']: # TODO: Validate .ogg can be safely read with av_info - # ffmpeg has some weird issue with flac. - return _soundfile_info(filepath) - else: - return _av_info(filepath) - - -def _av_read(filepath: tp.Union[str, Path], seek_time: float = 0, duration: float = -1.) -> tp.Tuple[torch.Tensor, int]: - """FFMPEG-based audio file reading using PyAV bindings. - Soundfile cannot read mp3 and av_read is more efficient than torchaudio. - - Args: - filepath (str or Path): Path to audio file to read. - seek_time (float): Time at which to start reading in the file. - duration (float): Duration to read from the file. If set to -1, the whole file is read. - Returns: - Tuple[torch.Tensor, int]: Tuple containing audio data and sample rate - """ - _init_av() - with av.open(str(filepath)) as af: - stream = af.streams.audio[0] - sr = stream.codec_context.sample_rate - num_frames = int(sr * duration) if duration >= 0 else -1 - frame_offset = int(sr * seek_time) - # we need a small negative offset otherwise we get some edge artifact - # from the mp3 decoder. - af.seek(int(max(0, (seek_time - 0.1)) / stream.time_base), stream=stream) - frames = [] - length = 0 - for frame in af.decode(streams=stream.index): - current_offset = int(frame.rate * frame.pts * frame.time_base) - strip = max(0, frame_offset - current_offset) - buf = torch.from_numpy(frame.to_ndarray()) - if buf.shape[0] != stream.channels: - buf = buf.view(-1, stream.channels).t() - buf = buf[:, strip:] - frames.append(buf) - length += buf.shape[1] - if num_frames > 0 and length >= num_frames: - break - assert frames - # If the above assert fails, it is likely because we seeked past the end of file point, - # in which case ffmpeg returns a single frame with only zeros, and a weird timestamp. - # This will need proper debugging, in due time. - wav = torch.cat(frames, dim=1) - assert wav.shape[0] == stream.channels - if num_frames > 0: - wav = wav[:, :num_frames] - return f32_pcm(wav), sr - - -def audio_read(filepath: tp.Union[str, Path], seek_time: float = 0., - duration: float = -1., pad: bool = False) -> tp.Tuple[torch.Tensor, int]: - """Read audio by picking the most appropriate backend tool based on the audio format. - - Args: - filepath (str or Path): Path to audio file to read. - seek_time (float): Time at which to start reading in the file. - duration (float): Duration to read from the file. If set to -1, the whole file is read. - pad (bool): Pad output audio if not reaching expected duration. - Returns: - Tuple[torch.Tensor, int]: Tuple containing audio data and sample rate. - """ - fp = Path(filepath) - if fp.suffix in ['.flac', '.ogg']: # TODO: check if we can safely use av_read for .ogg - # There is some bug with ffmpeg and reading flac - info = _soundfile_info(filepath) - frames = -1 if duration <= 0 else int(duration * info.sample_rate) - frame_offset = int(seek_time * info.sample_rate) - wav, sr = soundfile.read(filepath, start=frame_offset, frames=frames, dtype=np.float32) - assert info.sample_rate == sr, f"Mismatch of sample rates {info.sample_rate} {sr}" - wav = torch.from_numpy(wav).t().contiguous() - if len(wav.shape) == 1: - wav = torch.unsqueeze(wav, 0) - elif ( - fp.suffix in ['.wav', '.mp3'] and fp.suffix[1:] in ta.utils.sox_utils.list_read_formats() - and duration <= 0 and seek_time == 0 - ): - # Torchaudio is faster if we load an entire file at once. - wav, sr = ta.load(fp) - else: - wav, sr = _av_read(filepath, seek_time, duration) - if pad and duration > 0: - expected_frames = int(duration * sr) - wav = F.pad(wav, (0, expected_frames - wav.shape[-1])) - return wav, sr - - -def audio_write(stem_name: tp.Union[str, Path], - wav: torch.Tensor, sample_rate: int, - format: str = 'wav', mp3_rate: int = 320, normalize: bool = True, - strategy: str = 'peak', peak_clip_headroom_db: float = 1, - rms_headroom_db: float = 18, loudness_headroom_db: float = 14, - log_clipping: bool = True, make_parent_dir: bool = True, - add_suffix: bool = True) -> Path: - """Convenience function for saving audio to disk. Returns the filename the audio was written to. - - Args: - stem_name (str or Path): Filename without extension which will be added automatically. - format (str): Either "wav" or "mp3". - mp3_rate (int): kbps when using mp3s. - normalize (bool): if `True` (default), normalizes according to the prescribed - strategy (see after). If `False`, the strategy is only used in case clipping - would happen. - strategy (str): Can be either 'clip', 'peak', or 'rms'. Default is 'peak', - i.e. audio is normalized by its largest value. RMS normalizes by root-mean-square - with extra headroom to avoid clipping. 'clip' just clips. - peak_clip_headroom_db (float): Headroom in dB when doing 'peak' or 'clip' strategy. - rms_headroom_db (float): Headroom in dB when doing 'rms' strategy. This must be much larger - than the `peak_clip` one to avoid further clipping. - loudness_headroom_db (float): Target loudness for loudness normalization. - log_clipping (bool): If True, basic logging on stderr when clipping still - occurs despite strategy (only for 'rms'). - make_parent_dir (bool): Make parent directory if it doesn't exist. - Returns: - Path: Path of the saved audio. - """ - assert wav.dtype.is_floating_point, "wav is not floating point" - if wav.dim() == 1: - wav = wav[None] - elif wav.dim() > 2: - raise ValueError("Input wav should be at most 2 dimension.") - assert wav.isfinite().all() - wav = normalize_audio(wav, normalize, strategy, peak_clip_headroom_db, - rms_headroom_db, loudness_headroom_db, log_clipping=log_clipping, - sample_rate=sample_rate, stem_name=str(stem_name)) - kwargs: dict = {} - if format == 'mp3': - suffix = '.mp3' - kwargs.update({"compression": mp3_rate}) - elif format == 'wav': - wav = i16_pcm(wav) - suffix = '.wav' - kwargs.update({"encoding": "PCM_S", "bits_per_sample": 16}) - else: - raise RuntimeError(f"Invalid format {format}. Only wav or mp3 are supported.") - if not add_suffix: - suffix = '' - path = Path(str(stem_name) + suffix) - if make_parent_dir: - path.parent.mkdir(exist_ok=True, parents=True) - try: - ta.save(path, wav, sample_rate, **kwargs) - except Exception: - if path.exists(): - # we do not want to leave half written files around. - path.unlink() - raise - return path diff --git a/spaces/sub314xxl/MetaGPT/tests/metagpt/tools/test_web_browser_engine_selenium.py b/spaces/sub314xxl/MetaGPT/tests/metagpt/tools/test_web_browser_engine_selenium.py deleted file mode 100644 index 278c35c91b2c174f1ff3d543b055a0ebff05ea9f..0000000000000000000000000000000000000000 --- a/spaces/sub314xxl/MetaGPT/tests/metagpt/tools/test_web_browser_engine_selenium.py +++ /dev/null @@ -1,41 +0,0 @@ -""" -@Modified By: mashenquan, 2023/8/20. Remove global configuration `CONFIG`, enable configuration support for business isolation. -""" - -import pytest - -from metagpt.config import Config -from metagpt.tools import web_browser_engine_selenium - - -@pytest.mark.asyncio -@pytest.mark.parametrize( - "browser_type, use_proxy, url, urls", - [ - ("chrome", True, "https://fuzhi.ai", ("https://fuzhi.ai",)), - ("firefox", False, "https://fuzhi.ai", ("https://fuzhi.ai",)), - ("edge", False, "https://fuzhi.ai", ("https://fuzhi.ai",)), - ], - ids=["chrome-normal", "firefox-normal", "edge-normal"], -) -async def test_scrape_web_page(browser_type, use_proxy, url, urls, proxy, capfd): - conf = Config() - global_proxy = conf.global_proxy - try: - if use_proxy: - conf.global_proxy = proxy - browser = web_browser_engine_selenium.SeleniumWrapper(options=conf.runtime_options, browser_type=browser_type) - result = await browser.run(url) - result = result.inner_text - assert isinstance(result, str) - assert "Deepwisdom" in result - - if urls: - results = await browser.run(url, *urls) - assert isinstance(results, list) - assert len(results) == len(urls) + 1 - assert all(("Deepwisdom" in i.inner_text) for i in results) - if use_proxy: - assert "Proxy:" in capfd.readouterr().out - finally: - conf.global_proxy = global_proxy diff --git a/spaces/suchun/chatGPT_acdemic/config.py b/spaces/suchun/chatGPT_acdemic/config.py deleted file mode 100644 index 3c4370af8f61e4c9256f38cc33b8b5c5e66c21ce..0000000000000000000000000000000000000000 --- a/spaces/suchun/chatGPT_acdemic/config.py +++ /dev/null @@ -1,63 +0,0 @@ -# [step 1]>> 例如: API_KEY = "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" (此key无效) -import os -API_KEY = os.environ.get("key", "请输入您的KEY") # 可同时填写多个API-KEY,用英文逗号分割,例如API_KEY = "sk-openaikey1,sk-openaikey2,fkxxxx-api2dkey1,fkxxxx-api2dkey2" - -# [step 2]>> 改为True应用代理,如果直接在海外服务器部署,此处不修改 -USE_PROXY = False -if USE_PROXY: - # 填写格式是 [协议]:// [地址] :[端口],填写之前不要忘记把USE_PROXY改成True,如果直接在海外服务器部署,此处不修改 - # 例如 "socks5h://localhost:11284" - # [协议] 常见协议无非socks5h/http; 例如 v2**y 和 ss* 的默认本地协议是socks5h; 而cl**h 的默认本地协议是http - # [地址] 懂的都懂,不懂就填localhost或者127.0.0.1肯定错不了(localhost意思是代理软件安装在本机上) - # [端口] 在代理软件的设置里找。虽然不同的代理软件界面不一样,但端口号都应该在最显眼的位置上 - - # 代理网络的地址,打开你的科学上网软件查看代理的协议(socks5/http)、地址(localhost)和端口(11284) - proxies = { - # [协议]:// [地址] :[端口] - "http": "socks5h://localhost:11284", - "https": "socks5h://localhost:11284", - } -else: - proxies = None - -# [step 3]>> 多线程函数插件中,默认允许多少路线程同时访问OpenAI。Free trial users的限制是每分钟3次,Pay-as-you-go users的限制是每分钟3500次 -# 一言以蔽之:免费用户填3,OpenAI绑了信用卡的用户可以填 16 或者更高。提高限制请查询:https://platform.openai.com/docs/guides/rate-limits/overview -DEFAULT_WORKER_NUM = 3 - - -# [step 4]>> 以下配置可以优化体验,但大部分场合下并不需要修改 -# 对话窗的高度 -CHATBOT_HEIGHT = 1115 - -# 代码高亮 -CODE_HIGHLIGHT = True - -# 窗口布局 -LAYOUT = "LEFT-RIGHT" # "LEFT-RIGHT"(左右布局) # "TOP-DOWN"(上下布局) - -# 发送请求到OpenAI后,等待多久判定为超时 -TIMEOUT_SECONDS = 30 - -# 网页的端口, -1代表随机端口 -WEB_PORT = -1 - -# 如果OpenAI不响应(网络卡顿、代理失败、KEY失效),重试的次数限制 -MAX_RETRY = 2 - -# OpenAI模型选择是(gpt4现在只对申请成功的人开放) -if API_KEY.startswith("fk"): - LLM_MODEL = "api2d-gpt-3.5-turbo" # 可选 "chatglm" -else: - LLM_MODEL = "gpt-3.5-turbo" # 可选 "gpt-4" -print(f"using {LLM_MODEL}") -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "api2d-gpt-3.5-turbo"] - -# 本地LLM模型如ChatGLM的执行方式 CPU/GPU -LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda" - -# 设置gradio的并行线程数(不需要修改) -CONCURRENT_COUNT = 100 - -# 设置用户名和密码(不需要修改)(相关功能不稳定,与gradio版本和网络都相关,如果本地使用不建议加这个) -# [("username", "password"), ("username2", "password2"), ...] -AUTHENTICATION = [] diff --git a/spaces/sunmaiyyyy/combined-GI-RVC-model/infer_pack/attentions.py b/spaces/sunmaiyyyy/combined-GI-RVC-model/infer_pack/attentions.py deleted file mode 100644 index 77cb63ffccf3e33badf22d50862a64ba517b487f..0000000000000000000000000000000000000000 --- a/spaces/sunmaiyyyy/combined-GI-RVC-model/infer_pack/attentions.py +++ /dev/null @@ -1,417 +0,0 @@ -import copy -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -from infer_pack import commons -from infer_pack import modules -from infer_pack.modules import LayerNorm - - -class Encoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - window_size=10, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - window_size=window_size, - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - proximal_bias=False, - proximal_init=True, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - proximal_bias=proximal_bias, - proximal_init=proximal_init, - ) - ) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append( - MultiHeadAttention( - hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - causal=True, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to( - device=x.device, dtype=x.dtype - ) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__( - self, - channels, - out_channels, - n_heads, - p_dropout=0.0, - window_size=None, - heads_share=True, - block_length=None, - proximal_bias=False, - proximal_init=False, - ): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - self.emb_rel_v = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert ( - t_s == t_t - ), "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys( - query / math.sqrt(self.k_channels), key_relative_embeddings - ) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to( - device=scores.device, dtype=scores.dtype - ) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert ( - t_s == t_t - ), "Local attention is only available for self-attention." - block_mask = ( - torch.ones_like(scores) - .triu(-self.block_length) - .tril(self.block_length) - ) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings( - self.emb_rel_v, t_s - ) - output = output + self._matmul_with_relative_values( - relative_weights, value_relative_embeddings - ) - output = ( - output.transpose(2, 3).contiguous().view(b, d, t_t) - ) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]), - ) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[ - :, slice_start_position:slice_end_position - ] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad( - x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]) - ) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[ - :, :, :length, length - 1 : - ] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad( - x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]) - ) - x_flat = x.view([batch, heads, length**2 + length * (length - 1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__( - self, - in_channels, - out_channels, - filter_channels, - kernel_size, - p_dropout=0.0, - activation=None, - causal=False, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Iveco Power Truck 012013 Torrent.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Iveco Power Truck 012013 Torrent.md deleted file mode 100644 index 08f6df2b99a177757d911eaadd59b936b85608ce..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Iveco Power Truck 012013 Torrent.md +++ /dev/null @@ -1,12 +0,0 @@ -

        Iveco Power Truck 012013 Torrent


        DOWNLOADhttps://cinurl.com/2uEYmU



        - -iveco power truck 012013 › logbook › iveco stralis repair, diagnostics and maintenance. -Diagnostics and maintenance as well as repair. -iveco stralis repair and iveco eurocargo repair. -iveco daily repair.iveco daily repair. -iveco daily repair. -The accumulation of experience in the operation and repair of iveco daily cars in our car service has allowed us to create the most complete and high-quality list of services for the maintenance and repair of iveco daily cars, as well as its modifications. Repair iveco daily. -Repair iveco daily. 8a78ff9644
        -
        -
        -

        diff --git a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Aashiqui 2 Full Movie In Tamil Dubbed Download.md b/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Aashiqui 2 Full Movie In Tamil Dubbed Download.md deleted file mode 100644 index 08f0dddbc5c829c5d8c53871c2720b13e98cd2fe..0000000000000000000000000000000000000000 --- a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Aashiqui 2 Full Movie In Tamil Dubbed Download.md +++ /dev/null @@ -1,6 +0,0 @@ -

        aashiqui 2 full movie in tamil dubbed download


        DOWNLOAD ☆☆☆☆☆ https://urluss.com/2uCFlA



        - -(Tum Hi Ho Tamil Version) Aashiqui 2 | Aditya Roy Kapur, Shraddha Kapoor ... Aashiqui 3 - 2018 Full Hindi Dubbed Movie | New Hindi Romantic ... 1fdad05405
        -
        -
        -

        diff --git a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Aastha In The Prison Of Spring 1997 Hindi Movie DvDRip XviD.md b/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Aastha In The Prison Of Spring 1997 Hindi Movie DvDRip XviD.md deleted file mode 100644 index 6d52d329a9872fe04438a97823c350f9e1cb014f..0000000000000000000000000000000000000000 --- a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Aastha In The Prison Of Spring 1997 Hindi Movie DvDRip XviD.md +++ /dev/null @@ -1,6 +0,0 @@ -
        -

        cdvipcrack 4.3 crackwblivehifi dvdrip
        cantata (tuotekaniemaim-hu) 三位牧师Podcast MagnetRcok
        When it comes to home security, it's vital that you find the best security cameras for your home. These high-quality video surveillance systems are designed to be a great investment that you can depend on for years to come.
        Download Para Windows 7 Pro 64bit Iso
        Download Nissan Skyline R34 GT-R Cracked/Activated iSO
        Autodesk AutoCAD 2020 Crack Keygen Key Download Pc
        Download OFX Toolkit 5.0.5 Crack Full Version Free
        Bleach 2.0.1 Full Game Hd Game And Torrent
        Putlocker Movie Torrent
        ", sender, reciever, startTime, endTime); } //Method for reading data from the socket protected void processRead(Socket socket) { StringBuilder stringBuilder = new StringBuilder(); try { DataInputStream dataInputStream = new DataInputStream(socket.

        -

        5bd35b6a26 Free 49 Pros 4 Cons Men At Work 2 Full Movie Download 720p Subtitre In French 720p Online Watch Free Version 32bit.rar Exe File Jost de Rider "Hapbilen" Top Lyrics Torrent Download.rar V13.5 Crack Serial Number Vw State Farm 1.5.2 Code Generator Another Rar Junta Pro 5 Patch.rar Watch Online Free Full Movie For Windows Mac Os Free Full Version Trademarks Hd Download Utorrent 320 Iso Windows 7 Pro SoftLokalisation MPEG4 4Klippe Download Full Watch Online Zippyshare Com Download Free Full Version Driver.rar Hello My Name Is Free Trial Full Version Pdf 3.0.2 rar Watch Online Free Full Movie For Windows Mac Os Movie Download In Hindi 1080p Torrent Free Full Version For All PC 720p Full cracked 60 Free Download.rar Ebook PDF Dvd Non Aptai Error Code Otorrent Falstar Vol 7 Chk Full Version Free All Version Crack Download.rar Movie Online Downloader License Keygen Mac In Hindi Version Drivers 64 Free Download.rar What Is This All About Mac Free Full Version Windows 10 Dvd Alternative Xvid Full Download For Windows 7 32 Bit.rar Watch Online Free Full Movie For Windows Mac Os Free Full Movie Download In Hindi 1080p Torrent Free Full Version For All PC.rar Watch Online Free Full Movie For Windows Mac Os Full Episode Download 720p Torrent.rar Windows 8.1 Offline Install Pdf Tutorial How To Download Paid Full Version Mac How To Use 1000 Word Thesaurus X32 X64rar Mac Full Version Crack Download Iso - ThumbsUp Viral Videos Le Canvas Pro Activation Code Fire Mobile Download Utorrent 32 X32 Latest Crack With Keygen Mac Full Version Windows 10 Download.rar Cheap Coffee Break Free Download Full Version Iso Pdf 720p Watch Online Bc Google Docs 2017 Mac Version FREE Torrent.rar All I Need is Music Amiibos Free Download Full Version With Cracked Mod X32 Full.rar Watch Online Free Full Movie For Windows Mac Os Full Episode Download 720p Torrent Free Full Version For All PC.rar Watch Online Free Full Movie For Windows Mac Os Free Full Movie Download In Hindi 1080p Torrent Free Full Version For All PC.rar Watch Online Free Full Movie For Windows Mac Os Free Full Movie Download In Hindi 1080p Torrent Free Full Version For All PC.rar Watch Online Free Full Movie For Windows Mac Os Full Episode Download 720p Torrent Free Full Version For All PC.rar Whispers of War Dunfell Free Full Download Pdf Watch Online FREE Full Movie For Windows Mac Os Full Episode Download 720p Torrent Free Full Version For All PC.rar Watch Online Free Full Movie For Windows Mac Os Full Episode Download 720p Torrent Free Full Version For All PC.rar Watch Online Free Full Movie For Windows Mac Os Full Episode Download 720p Torrent Free Full Version For All PC.rar Watch Online Free Full Movie For Windows Mac Os Full Episode Download 720p Torrent Free Full Version For All PC.rar Watch Online Free Full Movie For Windows Mac Os Full Episode Download 720p Torrent Free Full Version For All PC.rar Watch Online Free Full Movie For Windows Mac Os Full Episode Download 720p Torrent Free Full Version For All PC.rar Watch Online Free Full Movie For Windows Mac Os Full Episode Download 720p Torrent Free Full Version For All PC.rar Watch Online Free Full Movie For Windows Mac Os Full Episode Download 720p Torrent Free Full Version For All PC.rar Watch Online Free Full Movie For Windows Mac Os Full Episode Download 720p Torrent Free Full Version For All PC.rar Watch Online Free Full Movie For Windows Mac Os Full Episode Download 720p Torrent Free Full Version For All PC.

        -

        Aastha In The Prison Of Spring 1997 Hindi Movie DvDRip XviD


        Download Ziphttps://urluss.com/2uCH1m



        899543212b
        -
        -
        \ No newline at end of file diff --git a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Activar Adobe Acrobat 11 Crack ((HOT)).md b/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Activar Adobe Acrobat 11 Crack ((HOT)).md deleted file mode 100644 index 8105964bfe23e46063f40846f40a5418bdee4e0f..0000000000000000000000000000000000000000 --- a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Activar Adobe Acrobat 11 Crack ((HOT)).md +++ /dev/null @@ -1,6 +0,0 @@ -

        activar adobe acrobat 11 crack


        DOWNLOAD ✪✪✪ https://urluss.com/2uCFtH



        - - d5da3c52bf
        -
        -
        -

        diff --git a/spaces/suvash/food-101-resnet50/app.py b/spaces/suvash/food-101-resnet50/app.py deleted file mode 100644 index 3651e326670cae37d70c68195862b40892c92dac..0000000000000000000000000000000000000000 --- a/spaces/suvash/food-101-resnet50/app.py +++ /dev/null @@ -1,45 +0,0 @@ -import gradio -from fastai.vision.all import * - -MODELS_PATH = Path('./models') -EXAMPLES_PATH = Path('./examples') - -# Required function expected by fastai learn object -# it wasn't exported as a part of the pickle -# as it was defined externally to the learner object -# during the training time dataloaders setup -def label_func(filepath): - return filepath.parent.name - -LEARN = load_learner(MODELS_PATH/'food-101-resnet50.pkl') -LABELS = LEARN.dls.vocab - -def gradio_predict(img): - img = PILImage.create(img) - _pred, _pred_idx, probs = LEARN.predict(img) - labels_probs = {LABELS[i]: float(probs[i]) for i, _ in enumerate(LABELS)} - return labels_probs - -with open('gradio_article.md') as f: - article = f.read() - -interface_options = { - "title": "Food Image Classifier (Food-101|ResNet50|fast.ai)", - "description": "A food image classifier trained on the Food-101 dataset, using ResNet50 via fast.ai.(Dataset from : https://data.vision.ee.ethz.ch/cvl/datasets_extra/food-101/)", - "article": article, - "examples" : [f'{EXAMPLES_PATH}/{f.name}' for f in EXAMPLES_PATH.iterdir()], - "layout": "horizontal", - "theme": "default", -} - -demo = gradio.Interface(fn=gradio_predict, - inputs=gradio.inputs.Image(shape=(512, 512)), - outputs=gradio.outputs.Label(num_top_classes=5), - **interface_options) - -launch_options = { - "enable_queue": True, - "share": False, -} - -demo.launch(**launch_options) diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/models/decode_heads/da_head.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/models/decode_heads/da_head.py deleted file mode 100644 index 5cd49fcfdc7c0a70f9485cc71843dcf3e0cb1774..0000000000000000000000000000000000000000 --- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/models/decode_heads/da_head.py +++ /dev/null @@ -1,178 +0,0 @@ -import torch -import torch.nn.functional as F -from annotator.uniformer.mmcv.cnn import ConvModule, Scale -from torch import nn - -from annotator.uniformer.mmseg.core import add_prefix -from ..builder import HEADS -from ..utils import SelfAttentionBlock as _SelfAttentionBlock -from .decode_head import BaseDecodeHead - - -class PAM(_SelfAttentionBlock): - """Position Attention Module (PAM) - - Args: - in_channels (int): Input channels of key/query feature. - channels (int): Output channels of key/query transform. - """ - - def __init__(self, in_channels, channels): - super(PAM, self).__init__( - key_in_channels=in_channels, - query_in_channels=in_channels, - channels=channels, - out_channels=in_channels, - share_key_query=False, - query_downsample=None, - key_downsample=None, - key_query_num_convs=1, - key_query_norm=False, - value_out_num_convs=1, - value_out_norm=False, - matmul_norm=False, - with_out=False, - conv_cfg=None, - norm_cfg=None, - act_cfg=None) - - self.gamma = Scale(0) - - def forward(self, x): - """Forward function.""" - out = super(PAM, self).forward(x, x) - - out = self.gamma(out) + x - return out - - -class CAM(nn.Module): - """Channel Attention Module (CAM)""" - - def __init__(self): - super(CAM, self).__init__() - self.gamma = Scale(0) - - def forward(self, x): - """Forward function.""" - batch_size, channels, height, width = x.size() - proj_query = x.view(batch_size, channels, -1) - proj_key = x.view(batch_size, channels, -1).permute(0, 2, 1) - energy = torch.bmm(proj_query, proj_key) - energy_new = torch.max( - energy, -1, keepdim=True)[0].expand_as(energy) - energy - attention = F.softmax(energy_new, dim=-1) - proj_value = x.view(batch_size, channels, -1) - - out = torch.bmm(attention, proj_value) - out = out.view(batch_size, channels, height, width) - - out = self.gamma(out) + x - return out - - -@HEADS.register_module() -class DAHead(BaseDecodeHead): - """Dual Attention Network for Scene Segmentation. - - This head is the implementation of `DANet - `_. - - Args: - pam_channels (int): The channels of Position Attention Module(PAM). - """ - - def __init__(self, pam_channels, **kwargs): - super(DAHead, self).__init__(**kwargs) - self.pam_channels = pam_channels - self.pam_in_conv = ConvModule( - self.in_channels, - self.channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - self.pam = PAM(self.channels, pam_channels) - self.pam_out_conv = ConvModule( - self.channels, - self.channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - self.pam_conv_seg = nn.Conv2d( - self.channels, self.num_classes, kernel_size=1) - - self.cam_in_conv = ConvModule( - self.in_channels, - self.channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - self.cam = CAM() - self.cam_out_conv = ConvModule( - self.channels, - self.channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - self.cam_conv_seg = nn.Conv2d( - self.channels, self.num_classes, kernel_size=1) - - def pam_cls_seg(self, feat): - """PAM feature classification.""" - if self.dropout is not None: - feat = self.dropout(feat) - output = self.pam_conv_seg(feat) - return output - - def cam_cls_seg(self, feat): - """CAM feature classification.""" - if self.dropout is not None: - feat = self.dropout(feat) - output = self.cam_conv_seg(feat) - return output - - def forward(self, inputs): - """Forward function.""" - x = self._transform_inputs(inputs) - pam_feat = self.pam_in_conv(x) - pam_feat = self.pam(pam_feat) - pam_feat = self.pam_out_conv(pam_feat) - pam_out = self.pam_cls_seg(pam_feat) - - cam_feat = self.cam_in_conv(x) - cam_feat = self.cam(cam_feat) - cam_feat = self.cam_out_conv(cam_feat) - cam_out = self.cam_cls_seg(cam_feat) - - feat_sum = pam_feat + cam_feat - pam_cam_out = self.cls_seg(feat_sum) - - return pam_cam_out, pam_out, cam_out - - def forward_test(self, inputs, img_metas, test_cfg): - """Forward function for testing, only ``pam_cam`` is used.""" - return self.forward(inputs)[0] - - def losses(self, seg_logit, seg_label): - """Compute ``pam_cam``, ``pam``, ``cam`` loss.""" - pam_cam_seg_logit, pam_seg_logit, cam_seg_logit = seg_logit - loss = dict() - loss.update( - add_prefix( - super(DAHead, self).losses(pam_cam_seg_logit, seg_label), - 'pam_cam')) - loss.update( - add_prefix( - super(DAHead, self).losses(pam_seg_logit, seg_label), 'pam')) - loss.update( - add_prefix( - super(DAHead, self).losses(cam_seg_logit, seg_label), 'cam')) - return loss diff --git a/spaces/tabeina/bingo1/src/app/page.tsx b/spaces/tabeina/bingo1/src/app/page.tsx deleted file mode 100644 index 0dff3431b098ce4fe282cc83fc87a93a28a43090..0000000000000000000000000000000000000000 --- a/spaces/tabeina/bingo1/src/app/page.tsx +++ /dev/null @@ -1,15 +0,0 @@ -import dynamic from 'next/dynamic' - -const DynamicComponentWithNoSSR = dynamic( - () => import('../components/chat'), - { ssr: false } -) - -export default function IndexPage() { - return ( - <> -
        - - - ) -} diff --git a/spaces/tabeina/bingo1/src/components/tone-selector.tsx b/spaces/tabeina/bingo1/src/components/tone-selector.tsx deleted file mode 100644 index 5c6e464c91f564b895acd121f0a4a79ed9c5c356..0000000000000000000000000000000000000000 --- a/spaces/tabeina/bingo1/src/components/tone-selector.tsx +++ /dev/null @@ -1,43 +0,0 @@ -import React from 'react' -import { BingConversationStyle } from '@/lib/bots/bing/types' -import { cn } from '@/lib/utils' - -type ToneItem = { - type: BingConversationStyle, - name: string -} - -const ToneList: ToneItem[] = [ - { name: '有创造力', type: BingConversationStyle.Creative }, - { name: '更平衡', type: BingConversationStyle.Balanced }, - { name: '更精确', type: BingConversationStyle.Precise } -] - -interface ToneSelectorProps { - type: BingConversationStyle | '' - onChange?: (type: BingConversationStyle) => void -} - -export function ToneSelector({ type, onChange }: ToneSelectorProps) { - return ( -
        -
        - 选择对话样式 -
        -
        -
          - { - ToneList.map(tone => ( -
        • onChange?.(tone.type)}> - -
        • - )) - } -
        -
        -
        - ) -} diff --git a/spaces/terfces0erbo/CollegeProjectV2/All Activation Windows 7-8-10 V10.5 Office Activator [SadeemPC FREE Crack.md b/spaces/terfces0erbo/CollegeProjectV2/All Activation Windows 7-8-10 V10.5 Office Activator [SadeemPC FREE Crack.md deleted file mode 100644 index 2263500de40aba7db7052bf58917138720c86d77..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/All Activation Windows 7-8-10 V10.5 Office Activator [SadeemPC FREE Crack.md +++ /dev/null @@ -1,6 +0,0 @@ -

        All Activation Windows 7-8-10 v10.5 Office Activator [SadeemPC crack


        Download ::: https://bytlly.com/2uGleo



        - -February 5, 2020 ... Windows KMS Activator Ultimate 2019 Crack is the most Simple ... All Activation Windows 7-8-10 v19.6.2018 (Windows & Office Activator). 4d29de3e1b
        -
        -
        -

        diff --git a/spaces/teticio/audio-diffusion/README.md b/spaces/teticio/audio-diffusion/README.md deleted file mode 100644 index 9763d53cadc8547a633a978be2d4f9219e2479be..0000000000000000000000000000000000000000 --- a/spaces/teticio/audio-diffusion/README.md +++ /dev/null @@ -1,234 +0,0 @@ ---- -title: Audio Diffusion -emoji: 🎵 -colorFrom: pink -colorTo: blue -sdk: gradio -sdk_version: 3.1.4 -app_file: app.py -pinned: false -license: gpl-3.0 ---- -# audio-diffusion [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/teticio/audio-diffusion/blob/master/notebooks/gradio_app.ipynb) - -## Apply diffusion models to synthesize music instead of images using the new Hugging Face [diffusers](https://github.com/huggingface/diffusers) package - ---- -#### Sample automatically generated loop - -https://user-images.githubusercontent.com/44233095/204103172-27f25d63-5e77-40ca-91ab-d04a45d4726f.mp4 - -Go to https://soundcloud.com/teticio2/sets/audio-diffusion-loops for more examples. - ---- -#### Updates - -**25/12/2022**. Now it is possible to train models conditional on an encoding (of text or audio, for example). See the section on Conditional Audio Generation below. - -**5/12/2022**. 🤗 Exciting news! `AudioDiffusionPipeline` has been migrated to the Hugging Face `diffusers` package so that it is even easier for others to use and contribute. - -**2/12/2022**. Added Mel to pipeline and updated the pretrained models to save Mel config (they are now no longer compatible with previous versions of this repo). It is relatively straightforward to migrate previously trained models to the new format (see https://huggingface.co/teticio/audio-diffusion-256). - -**7/11/2022**. Added pre-trained latent audio diffusion models [teticio/latent-audio-diffusion-256](https://huggingface.co/teticio/latent-audio-diffusion-256) and [teticio/latent-audio-diffusion-ddim-256](https://huggingface.co/teticio/latent-audio-diffusion-ddim-256). You can use the pre-trained VAE to train your own latent diffusion models on a different set of audio files. - -**22/10/2022**. Added DDIM encoder and ability to interpolate between audios in latent "noise" space. Mel spectrograms no longer have to be square (thanks to Tristan for this one), so you can set the vertical (frequency) and horizontal (time) resolutions independently. - -**15/10/2022**. Added latent audio diffusion (see below). Also added the possibility to train a DDIM ([De-noising Diffusion Implicit Models](https://arxiv.org/pdf/2010.02502.pdf)). These have the benefit that samples can be generated with much fewer steps (~50) than used in training. - -**4/10/2022**. It is now possible to mask parts of the input audio during generation which means you can stitch several samples together (think "out-painting"). - -**27/9/2022**. You can now generate an audio based on a previous one. You can use this to generate variations of the same audio or even to "remix" a track (via a sort of "style transfer"). You can find examples of how to do this in the [`test_model.ipynb`](https://colab.research.google.com/github/teticio/audio-diffusion/blob/master/notebooks/test_model.ipynb) notebook. - ---- - -![mel spectrogram](https://user-images.githubusercontent.com/44233095/205305826-8b39c917-26c5-49b4-887c-776f5d69e970.png) - ---- - -## DDPM ([De-noising Diffusion Probabilistic Models](https://arxiv.org/abs/2006.11239)) - -Audio can be represented as images by transforming to a [mel spectrogram](https://en.wikipedia.org/wiki/Mel-frequency_cepstrum), such as the one shown above. The class `Mel` in `mel.py` can convert a slice of audio into a mel spectrogram of `x_res` x `y_res` and vice versa. The higher the resolution, the less audio information will be lost. You can see how this works in the [`test_mel.ipynb`](https://github.com/teticio/audio-diffusion/blob/main/notebooks/test_mel.ipynb) notebook. - -A DDPM is trained on a set of mel spectrograms that have been generated from a directory of audio files. It is then used to synthesize similar mel spectrograms, which are then converted back into audio. - -You can play around with some pre-trained models on [Google Colab](https://colab.research.google.com/github/teticio/audio-diffusion/blob/master/notebooks/test_model.ipynb) or [Hugging Face spaces](https://huggingface.co/spaces/teticio/audio-diffusion). Check out some automatically generated loops [here](https://soundcloud.com/teticio2/sets/audio-diffusion-loops). - -| Model | Dataset | Description | -|-------|---------|-------------| -| [teticio/audio-diffusion-256](https://huggingface.co/teticio/audio-diffusion-256) | [teticio/audio-diffusion-256](https://huggingface.co/datasets/teticio/audio-diffusion-256) | My "liked" Spotify playlist | -| [teticio/audio-diffusion-breaks-256](https://huggingface.co/teticio/audio-diffusion-breaks-256) | [teticio/audio-diffusion-breaks-256](https://huggingface.co/datasets/teticio/audio-diffusion-breaks-256) | Samples that have been used in music, sourced from [WhoSampled](https://whosampled.com) and [YouTube](https://youtube.com) | -| [teticio/audio-diffusion-instrumental-hiphop-256](https://huggingface.co/teticio/audio-diffusion-instrumental-hiphop-256) | [teticio/audio-diffusion-instrumental-hiphop-256](https://huggingface.co/datasets/teticio/audio-diffusion-instrumental-hiphop-256) | Instrumental Hip Hop music | -| [teticio/audio-diffusion-ddim-256](https://huggingface.co/teticio/audio-diffusion-ddim-256) | [teticio/audio-diffusion-256](https://huggingface.co/datasets/teticio/audio-diffusion-256) | De-noising Diffusion Implicit Model | -| [teticio/latent-audio-diffusion-256](https://huggingface.co/teticio/latent-audio-diffusion-256) | [teticio/audio-diffusion-256](https://huggingface.co/datasets/teticio/audio-diffusion-256) | Latent Audio Diffusion model | -| [teticio/latent-audio-diffusion-ddim-256](https://huggingface.co/teticio/latent-audio-diffusion-ddim-256) | [teticio/audio-diffusion-256](https://huggingface.co/datasets/teticio/audio-diffusion-256) | Latent Audio Diffusion Implicit Model | -| [teticio/conditional-latent-audio-diffusion-512](https://huggingface.co/teticio/latent-audio-diffusion-512) | [teticio/audio-diffusion-512](https://huggingface.co/datasets/teticio/audio-diffusion-512) | Conditional Latent Audio Diffusion Model | - ---- - -## Generate Mel spectrogram dataset from directory of audio files - -#### Install from GitHub (includes training scripts) - -```bash -git clone https://github.com/teticio/audio-diffusion.git -cd audio-diffusion -pip install . -``` - -#### Install from PyPI - -```bash -pip install audiodiffusion -``` - -#### Training can be run with Mel spectrograms of resolution 64x64 on a single commercial grade GPU (e.g. RTX 2080 Ti). The `hop_length` should be set to 1024 for better results - -```bash -python scripts/audio_to_images.py \ ---resolution 64,64 \ ---hop_length 1024 \ ---input_dir path-to-audio-files \ ---output_dir path-to-output-data -``` - -#### Generate dataset of 256x256 Mel spectrograms and push to hub (you will need to be authenticated with `huggingface-cli login`) - -```bash -python scripts/audio_to_images.py \ ---resolution 256 \ ---input_dir path-to-audio-files \ ---output_dir data/audio-diffusion-256 \ ---push_to_hub teticio/audio-diffusion-256 -``` - -Note that the default `sample_rate` is 22050 and audios will be resampled if they are at a different rate. If you change this value, you may find that the results in the `test_mel.ipynb` notebook are not good (for example, if `sample_rate` is 48000) and that it is necessary to adjust `n_fft` (for example, to 2000 instead of the default value of 2048; alternatively, you can resample to a `sample_rate` of 44100). Make sure you use the same parameters for training and inference. You should also bear in mind that not all resolutions work with the neural network architecture as currently configured - you should be safe if you stick to powers of 2. - -## Train model - -#### Run training on local machine - -```bash -accelerate launch --config_file config/accelerate_local.yaml \ -scripts/train_unet.py \ ---dataset_name data/audio-diffusion-64 \ ---hop_length 1024 \ ---output_dir models/ddpm-ema-audio-64 \ ---train_batch_size 16 \ ---num_epochs 100 \ ---gradient_accumulation_steps 1 \ ---learning_rate 1e-4 \ ---lr_warmup_steps 500 \ ---mixed_precision no -``` - -#### Run training on local machine with `batch_size` of 2 and `gradient_accumulation_steps` 8 to compensate, so that 256x256 resolution model fits on commercial grade GPU and push to hub - -```bash -accelerate launch --config_file config/accelerate_local.yaml \ -scripts/train_unet.py \ ---dataset_name teticio/audio-diffusion-256 \ ---output_dir models/audio-diffusion-256 \ ---num_epochs 100 \ ---train_batch_size 2 \ ---eval_batch_size 2 \ ---gradient_accumulation_steps 8 \ ---learning_rate 1e-4 \ ---lr_warmup_steps 500 \ ---mixed_precision no \ ---push_to_hub True \ ---hub_model_id audio-diffusion-256 \ ---hub_token $(cat $HOME/.huggingface/token) -``` - -#### Run training on SageMaker - -```bash -accelerate launch --config_file config/accelerate_sagemaker.yaml \ -scripts/train_unet.py \ ---dataset_name teticio/audio-diffusion-256 \ ---output_dir models/ddpm-ema-audio-256 \ ---train_batch_size 16 \ ---num_epochs 100 \ ---gradient_accumulation_steps 1 \ ---learning_rate 1e-4 \ ---lr_warmup_steps 500 \ ---mixed_precision no -``` - -## DDIM ([De-noising Diffusion Implicit Models](https://arxiv.org/pdf/2010.02502.pdf)) - -#### A DDIM can be trained by adding the parameter - -```bash ---scheduler ddim -``` - -Inference can the be run with far fewer steps than the number used for training (e.g., ~50), allowing for much faster generation. Without retraining, the parameter `eta` can be used to replicate a DDPM if it is set to 1 or a DDIM if it is set to 0, with all values in between being valid. When `eta` is 0 (the default value), the de-noising procedure is deterministic, which means that it can be run in reverse as a kind of encoder that recovers the original noise used in generation. A function `encode` has been added to `AudioDiffusionPipeline` for this purpose. It is then possible to interpolate between audios in the latent "noise" space using the function `slerp` (Spherical Linear intERPolation). - -## Latent Audio Diffusion - -Rather than de-noising images directly, it is interesting to work in the "latent space" after first encoding images using an autoencoder. This has a number of advantages. Firstly, the information in the images is compressed into a latent space of a much lower dimension, so it is much faster to train de-noising diffusion models and run inference with them. Secondly, similar images tend to be clustered together and interpolating between two images in latent space can produce meaningful combinations. - -At the time of writing, the Hugging Face `diffusers` library is geared towards inference and lacking in training functionality (rather like its cousin `transformers` in the early days of development). In order to train a VAE (Variational AutoEncoder), I use the [stable-diffusion](https://github.com/CompVis/stable-diffusion) repo from CompVis and convert the checkpoints to `diffusers` format. Note that it uses a perceptual loss function for images; it would be nice to try a perceptual *audio* loss function. - -#### Train latent diffusion model using pre-trained VAE - -```bash -accelerate launch ... -... ---vae teticio/latent-audio-diffusion-256 -``` - -#### Install dependencies to train with Stable Diffusion - -```bash -pip install omegaconf pytorch_lightning==1.7.7 torchvision einops -pip install -e git+https://github.com/CompVis/stable-diffusion.git@main#egg=latent-diffusion -pip install -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers -``` - -#### Train an autoencoder - -```bash -python scripts/train_vae.py \ ---dataset_name teticio/audio-diffusion-256 \ ---batch_size 2 \ ---gradient_accumulation_steps 12 -``` - -#### Train latent diffusion model - -```bash -accelerate launch ... -... ---vae models/autoencoder-kl -``` - -## Conditional Audio Generation - -We can generate audio conditional on a text prompt - or indeed anything which can be encoded into a bunch of numbers - much like DALL-E2, Midjourney and Stable Diffusion. It is generally harder to find good quality datasets of audios together with descriptions, although the people behind the dataset used to train Stable Diffusion are making some very interesting progress [here](https://github.com/LAION-AI/audio-dataset). I have chosen to encode the audio directly instead based on "how it sounds", using a [model which I trained on hundreds of thousands of Spotify playlists](https://github.com/teticio/Deej-AI). To encode an audio into a 100 dimensional vector - -```python -from audiodiffusion.audio_encoder import AudioEncoder - -audio_encoder = AudioEncoder.from_pretrained("teticio/audio-encoder") -audio_encoder.encode(['/home/teticio/Music/liked/Agua Re - Holy Dance - Large Sound Mix.mp3']) -``` - -Once you have prepared a dataset, you can encode the audio files with this script - -```bash -python scripts/encode_audio \ ---dataset_name teticio/audio-diffusion-256 \ ---out_file data/encodings.p -``` - -Then you can train a model with - -```bash -accelerate launch ... -... ---encodings data/encodings.p -``` - -When generating audios, you will need to pass an `encodings` Tensor. See the [`conditional_generation.ipynb`](https://colab.research.google.com/github/teticio/audio-diffusion/blob/master/notebooks/conditional_generation.ipynb) notebook for an example that uses encodings of Spotify track previews to influence the generation. diff --git a/spaces/teticio/inBERTolate/app.py b/spaces/teticio/inBERTolate/app.py deleted file mode 100644 index 90e1960f8294b9e0208b7ef0baacbee0f4a042be..0000000000000000000000000000000000000000 --- a/spaces/teticio/inBERTolate/app.py +++ /dev/null @@ -1,205 +0,0 @@ -# force update - -import argparse - -import nltk -import torch -import numpy as np -import gradio as gr -from nltk import sent_tokenize - -from transformers import ( - RobertaTokenizer, - RobertaForMaskedLM, - LogitsProcessorList, - TopKLogitsWarper, - TemperatureLogitsWarper, - TypicalLogitsWarper, -) - -nltk.download('punkt') - -device = "cuda" if torch.cuda.is_available() else "cpu" -pretrained = "roberta-large" if device == "cuda" else "roberta-base" -tokenizer = RobertaTokenizer.from_pretrained(pretrained) -model = RobertaForMaskedLM.from_pretrained(pretrained) -model = model.to(device) - -max_len = 20 -top_k = 100 -temperature = 1 -typical_p = 0 -burnin = 250 -max_iter = 500 - - -# adapted from https://github.com/nyu-dl/bert-gen -def generate_step(out: object, - gen_idx: int, - top_k: int = top_k, - temperature: float = temperature, - typical_p: float = typical_p, - sample: bool = False) -> list: - """ Generate a word from from out[gen_idx] - - args: - - out (torch.Tensor): tensor of logits of size batch_size x seq_len x vocab_size - - gen_idx (int): location for which to generate - - top_k (int): if >0, only sample from the top k most probable words - - temperature (float): sampling temperature - - typical_p (float): if >0 use typical sampling - - sample (bool): if True, sample from full distribution. - - returns: - - list: batch_size tokens - """ - logits = out.logits[:, gen_idx] - warpers = LogitsProcessorList() - if temperature: - warpers.append(TemperatureLogitsWarper(temperature)) - if top_k > 0: - warpers.append(TopKLogitsWarper(top_k)) - if typical_p > 0: - if typical_p >= 1: - typical_p = 0.999 - warpers.append(TypicalLogitsWarper(typical_p)) - logits = warpers(None, logits) - - if sample: - probs = torch.nn.functional.softmax(logits, dim=-1) - next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1) - else: - next_tokens = torch.argmax(logits, dim=-1) - - return next_tokens.tolist() - - -# adapted from https://github.com/nyu-dl/bert-gen -def parallel_sequential_generation(seed_text: str, - seed_end_text: str, - max_len: int = max_len, - top_k: int = top_k, - temperature: float = temperature, - typical_p: float = typical_p, - max_iter: int = max_iter, - burnin: int = burnin) -> str: - """ Generate text consistent with preceding and following text - - Args: - - seed_text (str): preceding text - - seed_end_text (str): following text - - top_k (int): if >0, only sample from the top k most probable words - - temperature (float): sampling temperature - - typical_p (float): if >0 use typical sampling - - max_iter (int): number of iterations in MCMC - - burnin: during burn-in period, sample from full distribution; afterwards take argmax - - Returns: - - string: generated text to insert between seed_text and seed_end_text - """ - inp = tokenizer(seed_text + tokenizer.mask_token * max_len + seed_end_text, - return_tensors='pt') - masked_tokens = np.where( - inp['input_ids'][0].numpy() == tokenizer.mask_token_id)[0] - seed_len = masked_tokens[0] - inp = inp.to(device) - - for ii in range(max_iter): - kk = np.random.randint(0, max_len) - idxs = generate_step(model(**inp), - gen_idx=seed_len + kk, - top_k=top_k if (ii >= burnin) else 0, - temperature=temperature, - typical_p=typical_p, - sample=(ii < burnin)) - inp['input_ids'][0][seed_len + kk] = idxs[0] - - tokens = inp['input_ids'].cpu().numpy()[0][masked_tokens] - tokens = tokens[(np.where((tokens != tokenizer.eos_token_id) - & (tokens != tokenizer.bos_token_id)))] - return tokenizer.decode(tokens) - - -def inbertolate(doc: str, - max_len: int = max_len, - top_k: int = top_k, - temperature: float = temperature, - typical_p: float = typical_p, - max_iter: int = max_iter, - burnin: int = burnin) -> str: - """ Pad out document generating every other sentence - - Args: - - doc (str): document text - - max_len (int): number of tokens to insert between sentences - - top_k (int): if >0, only sample from the top k most probable words - - temperature (float): sampling temperature - - typical_p (float): if >0 use typical sampling - - max_iter (int): number of iterations in MCMC - - burnin: during burn-in period, sample from full distribution; afterwards take argmax - - Returns: - - string: generated text to insert between seed_text and seed_end_text - """ - new_doc = '' - paras = doc.split('\n') - - for para in paras: - para = sent_tokenize(para) - if para == '': - new_doc += '\n' - continue - para += [''] - - for sentence in range(len(para) - 1): - new_doc += para[sentence] + ' ' - new_doc += parallel_sequential_generation( - para[sentence], - para[sentence + 1], - max_len=max_len, - top_k=top_k, - temperature=float(temperature), - typical_p=typical_p, - burnin=burnin, - max_iter=max_iter) + ' ' - - new_doc += '\n' - return new_doc - -demo = gr.Interface( - fn=inbertolate, - title="inBERTolate", - description=f"Hit your word count by using BERT ({pretrained}) to pad out your essays!", - inputs=[ - gr.Textbox(label="Text", lines=10), - gr.Slider(label="Maximum length to insert between sentences", - minimum=1, - maximum=40, - step=1, - value=max_len), - gr.Slider(label="Top k", minimum=0, maximum=200, value=top_k), - gr.Slider(label="Temperature", - minimum=0, - maximum=2, - value=temperature), - gr.Slider(label="Typical p", - minimum=0, - maximum=1, - value=typical_p), - gr.Slider(label="Maximum iterations", - minimum=0, - maximum=1000, - value=max_iter), - gr.Slider(label="Burn-in", - minimum=0, - maximum=500, - value=burnin), - ], - outputs=gr.Textbox(label="Expanded text", lines=30)) - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--port', type=int) - parser.add_argument('--server', type=int) - args = parser.parse_args() - demo.launch(server_name=args.server or '0.0.0.0', server_port=args.port) diff --git a/spaces/thliang01/Dogs-V-Cats-Classifier/app.py b/spaces/thliang01/Dogs-V-Cats-Classifier/app.py deleted file mode 100644 index b65eb52b1f52bcae549fe8ac6ff5bf7769b552ea..0000000000000000000000000000000000000000 --- a/spaces/thliang01/Dogs-V-Cats-Classifier/app.py +++ /dev/null @@ -1,34 +0,0 @@ -from fastai.vision.all import * -import gradio as gr - -def is_cat(x): - return x[0].isupper() - -learn = load_learner('model.pkl') - -categories = ('Dog', 'Cat') - -def classify_image(img): - pred, idx, probs = learn.predict(img) - return dict(zip(categories, map(float, probs))) - -image = gr.inputs.Image(shape=(192, 192)) -label = gr.outputs.Label() -examples = ['Dog.jpg', 'cat.jpg', 'dunno.jpg'] -title = "Dogs V Cats Classifier" -description = "A classifier trained on the Oxford Pets dataset with fastai. Created as a demo for Gradio and HuggingFace Spaces." -interpretation='default' -enable_queue=True - -intf = gr.Interface( - fn=classify_image, - inputs=image, - outputs=label, - examples=examples, - title=title, - description=description, - interpretation=interpretation, - enable_queue=enable_queue -) - -intf.launch(inline=False) \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/Enemy Territory Quake Wars License Code.md b/spaces/tialenAdioni/chat-gpt-api/Enemy Territory Quake Wars License Code.md deleted file mode 100644 index f04fcaee91017c4c02853dde98725a7e0128004f..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/Enemy Territory Quake Wars License Code.md +++ /dev/null @@ -1,76 +0,0 @@ -## Enemy Territory Quake Wars License Code - - - - - - - - - -**LINK ……… [https://ekporriola.blogspot.com/?c=2txKmc](https://ekporriola.blogspot.com/?c=2txKmc)** - - - - - - - - - - - - - -# How to Get a License Code for Enemy Territory: Quake Wars - - - -Enemy Territory: Quake Wars is a first-person shooter game that pits two factions, the Global Defense Force and the Strogg, against each other in a futuristic war. The game features both single-player and multiplayer modes, as well as a variety of weapons, vehicles and deployables. - - - -If you want to play Enemy Territory: Quake Wars, you need a license code to activate the game. A license code is a unique alphanumeric string that verifies your ownership of the game. Without a license code, you cannot install or run the game. - - - -There are different ways to get a license code for Enemy Territory: Quake Wars, depending on how you acquired the game. Here are some of them: - - - -- If you bought a physical copy of the game on DVD, you can find the license code inside the game case or on the back of the manual. You need to enter this code during the installation process or when prompted by the game launcher. - -- If you bought a digital copy of the game from an online retailer, such as Steam or Amazon, you can find the license code in your account page or in your email confirmation. You need to enter this code when activating the game on your computer or when prompted by the game launcher. - -- If you downloaded a pirated copy of the game from an illegal source, such as a torrent site or a file-sharing network, you may not have a valid license code at all. In this case, you are violating the terms of service and the intellectual property rights of the game developers and publishers. You may also expose your computer to viruses, malware and other security risks. We do not recommend or condone this method of obtaining the game. - - - -If you have lost or forgotten your license code, you may be able to retrieve it by contacting the customer support of the game developer (Splash Damage) or publisher (Activision). You may need to provide proof of purchase, such as a receipt, an invoice or a registration email. Alternatively, you may need to buy a new copy of the game with a new license code. - - - -We hope this article has helped you understand how to get a license code for Enemy Territory: Quake Wars. Enjoy playing this exciting and action-packed game! - - - -Enemy Territory: Quake Wars is a game that requires both skill and strategy to succeed. You can choose to play as either the human Global Defense Force or the alien Strogg, each with their own strengths and weaknesses. You can also customize your character with different classes, weapons and abilities. - - - -The game features 12 maps, each with a different setting and objective. Some maps require you to capture or defend a territory, while others require you to escort or destroy a vehicle. You can also play in different modes, such as campaign, stopwatch or objective. - - - -Enemy Territory: Quake Wars is a game that supports online multiplayer for up to 32 players. You can join a server and play with or against other people from around the world. You can also create your own server and invite your friends to join. You can communicate with your teammates using voice chat or text chat. - - - -Enemy Territory: Quake Wars is a game that offers a lot of fun and challenge for fans of first-person shooters. If you want to learn more about the game, you can visit the official website or the wiki page. You can also watch some gameplay videos or read some reviews online. - - 1b8d091108 - - - - - diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/(PDF) SOLUCIONARIO KENNETH WARK TERMODINMICA SEXTA EDICIN DESARROLLADO INACAP SEDE COPIAP PRIMAVERA 2011[1].md b/spaces/tialenAdioni/chat-gpt-api/logs/(PDF) SOLUCIONARIO KENNETH WARK TERMODINMICA SEXTA EDICIN DESARROLLADO INACAP SEDE COPIAP PRIMAVERA 2011[1].md deleted file mode 100644 index f8b2b2488a7e7491b057e87dcb332afa532e64bd..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/(PDF) SOLUCIONARIO KENNETH WARK TERMODINMICA SEXTA EDICIN DESARROLLADO INACAP SEDE COPIAP PRIMAVERA 2011[1].md +++ /dev/null @@ -1,74 +0,0 @@ -
        -

        ¿Dónde encontrar el solucionario de termodinámica de Wark 6 edición gratis?

        -

        La termodinámica es una rama de la física que estudia las relaciones entre el calor, el trabajo y la energía. Es una materia fundamental para los estudiantes de ingeniería, ya que les permite comprender y analizar los procesos térmicos que ocurren en las máquinas y los sistemas energéticos.

        -

        Solucionario Termodinamica Wark 6 Edicion Gratis


        Download Zip - https://urlcod.com/2uK8y5



        -

        Uno de los libros más utilizados para el aprendizaje de la termodinámica es el de Kenneth Wark Jr. y Donald E. Richards, que en su sexta edición ofrece una exposición clara y rigurosa de los conceptos y las herramientas de esta disciplina, así como una gran variedad de ejercicios resueltos y propuestos para practicar y reforzar los conocimientos adquiridos.

        -

        Sin embargo, muchos estudiantes se encuentran con la dificultad de no tener acceso al solucionario del libro, que les permitiría verificar sus respuestas y corregir sus errores. ¿Dónde se puede encontrar el solucionario de termodinámica de Wark 6 edición gratis?

        -

        Existen algunas páginas web que ofrecen el solucionario en formato PDF para descargar o abrir online, como por ejemplo:

        - -

        Estas páginas afirman que el solucionario contiene todas las soluciones de los ejercicios del libro oficial, con una explicación detallada y paso a paso. Sin embargo, hay que tener en cuenta que estas páginas no son oficiales ni están autorizadas por la editorial o los autores del libro, por lo que pueden contener errores o inexactitudes. Además, pueden violar los derechos de autor o tener fines lucrativos o publicitarios.

        -

        Descargar solucionario termodinamica wark 6 edicion pdf
        -Solucionario termodinamica wark 6 edicion gratis online
        -Solucionario termodinamica wark 6 edicion completo
        -Solucionario termodinamica wark 6 edicion mega
        -Solucionario termodinamica wark 6 edicion mediafire
        -Solucionario termodinamica wark 6 edicion google drive
        -Solucionario termodinamica wark 6 edicion libro
        -Solucionario termodinamica wark 6 edicion ejercicios resueltos
        -Solucionario termodinamica wark 6 edicion capitulo 1
        -Solucionario termodinamica wark 6 edicion capitulo 2
        -Solucionario termodinamica wark 6 edicion capitulo 3
        -Solucionario termodinamica wark 6 edicion capitulo 4
        -Solucionario termodinamica wark 6 edicion capitulo 5
        -Solucionario termodinamica wark 6 edicion capitulo 6
        -Solucionario termodinamica wark 6 edicion capitulo 7
        -Solucionario termodinamica wark 6 edicion capitulo 8
        -Solucionario termodinamica wark 6 edicion capitulo 9
        -Solucionario termodinamica wark 6 edicion capitulo 10
        -Solucionario termodinamica wark 6 edicion capitulo 11
        -Solucionario termodinamica wark 6 edicion capitulo 12
        -Solucionario termodinamica wark 6 edicion capitulo 13
        -Solucionario termodinamica wark 6 edicion capitulo 14
        -Solucionario termodinamica wark 6 edicion capitulo 15
        -Solucionario termodinamica wark 6 edicion capitulo 16
        -Solucionario termodinamica wark 6 edicion capitulo 17
        -Solucionario termodinamica wark 6 edicion soluciones
        -Solucionario termodinamica wark 6 edicion problemas propuestos
        -Solucionario termodinamica wark 6 edicion examenes resueltos
        -Solucionario termodinamica wark 6 edicion español
        -Solucionario termodinamica wark sexta edicion gratis
        -Termodinamica wark solucionario gratis pdf download
        -Termodinamica wark solucionario gratis online access
        -Termodinamica wark solucionario gratis full version
        -Termodinamica wark solucionario gratis sin registrarse
        -Termodinamica wark solucionario gratis sin virus
        -Termodinamica wark solucionario gratis facil y rapido
        -Termodinamica wark solucionario gratis para estudiantes de ingenieria
        -Termodinamica wark solucionario gratis para profesores de fisica
        -Termodinamica wark solucionario gratis para aprender la materia
        -Termodinamica wark solucionario gratis para repasar los conceptos
        -Termodinamica wark solucionario gratis para mejorar las notas
        -Termodinamica wark solucionario gratis para preparar el examen final
        -Termodinamica wark solucionario gratis para entender la teoria y la practica
        -Termodinamica wark solucionario gratis para aplicar los conocimientos a casos reales
        -Termodinamica wark solucionario gratis para dominar la asignatura de termodinamica
        -Termodinamica de kenneth e. WARK Jr. soluciones pdf gratis
        -Termodinámicas : fundamentos y aplicaciones / Kenneth E. WARK Jr. ; traducción José Luis Morante García. -- Madrid : McGraw-Hill, cop.1995. -- ISBN:84--481--0209--0. -- (Serie de ingeniería) -- Traducción de: Thermodynamics : fundamentals and applications / Kenneth E. WARK Jr., Donald E. Richards. -- New York : McGraw-Hill, cop.1992. -- ISBN:0--07--068286--4. -- Incluye índice. -- Bibliografía: p. [1001]--1008. -- Contenido: Introducción a la ingeniería y a la ciencia de la energía ; Propiedades de las sustancias puras ; Primera ley de la ...

        -

        Por lo tanto, se recomienda a los estudiantes que utilicen el solucionario con precaución y responsabilidad, y que lo consulten solo como una ayuda complementaria y no como un sustituto del estudio y el razonamiento propio. Asimismo, se aconseja que adquieran el libro original y el solucionario oficial si tienen la posibilidad, ya que así estarán apoyando el trabajo de los autores y la calidad de la enseñanza.

        - -

        Además de ayudar a los estudiantes a resolver los ejercicios del libro de Wark, el estudio de la termodinámica tiene muchos otros beneficios, tanto académicos como personales. La termodinámica es una ciencia que desarrolla el pensamiento crítico, la creatividad y la capacidad de resolver problemas complejos y multidisciplinarios. También permite comprender mejor los fenómenos naturales y las tecnologías que nos rodean, así como sus implicaciones sociales y ambientales.

        -

        Algunos ejemplos de cómo la termodinámica influye en nuestra vida diaria son los siguientes:

        -
          -
        • La termodinámica nos enseña que las ideas y los conceptos pueden fluir en ambas direcciones, entre lo básico y lo aplicado, y que las necesidades de aplicaciones prácticas pueden conducir a conceptos y relaciones muy generales y básicas [^1^].
        • -
        • La termodinámica nos muestra cómo usar la energía de manera eficiente y minimizar los desperdicios que inevitablemente acompañan a ese uso. Nos indica los límites teóricos y prácticos de la eficiencia de las máquinas y los procesos térmicos, así como las formas de optimizarlos [^2^].
        • -
        • La termodinámica nos ayuda a diseñar y operar dispositivos que utilizamos a diario, como sistemas de calefacción y refrigeración, motores de vehículos, electrodomésticos, etc. También nos permite evaluar su rendimiento, su consumo energético y su impacto ambiental [^2^].
        • -
        • La termodinámica nos permite explorar nuevas fuentes de energía renovable y sostenible, como la solar, la eólica, la geotérmica, la hidroeléctrica, la biomasa, etc. También nos ayuda a mejorar la conversión y el almacenamiento de esa energía [^2^].
        • -
        • La termodinámica nos facilita el entendimiento de otros campos de la ciencia y la ingeniería, como la mecánica de fluidos, la transferencia de calor, la química, la biología, la astrofísica, etc. También nos permite aplicar sus principios y métodos a problemas interdisciplinarios [^3^].
        • -
        -

        En conclusión, el estudio de la termodinámica no solo es útil para resolver los ejercicios del libro de Wark 6 edición gratis, sino que también es una forma de ampliar nuestra visión del mundo y de mejorar nuestra capacidad de innovar y crear soluciones para los desafíos actuales y futuros.

        e753bf7129
        -
        -
        \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Akruti 7.0 Oriya Software Free Download The Ultimate Tool for Odia Writing and Publishing.md b/spaces/tialenAdioni/chat-gpt-api/logs/Akruti 7.0 Oriya Software Free Download The Ultimate Tool for Odia Writing and Publishing.md deleted file mode 100644 index c174bb54a53f72ed6e987dd40a24817daa770ad8..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Akruti 7.0 Oriya Software Free Download The Ultimate Tool for Odia Writing and Publishing.md +++ /dev/null @@ -1,91 +0,0 @@ - -

        Akruti 7.0: A Powerful Software for Odia Typing and Editing

        -

        Akruti 7.0 is a software that enables you to type and edit Odia text on your computer with ease. Whether you want to write Odia letters, documents, emails, blogs, or websites, Akruti 7.0 can help you with its rich features and user-friendly interface.

        -

        Akruti 7.0 supports Unicode and ANSI fonts, which means you can use it with any application that supports these formats. You can also choose from a variety of Odia fonts and keyboard layouts to suit your preference. Akruti 7.0 also has a spell checker, a word suggestion tool, a dictionary, and a converter that can convert Odia text from one font to another.

        -

        akruti 7.0 oriya software free download


        Download File » https://urlcod.com/2uK52y



        -

        One of the best features of Akruti 7.0 is that it allows you to type Odia using phonetic transliteration. This means you can type Odia words using English letters and Akruti 7.0 will automatically convert them to Odia script. For example, if you type "mora nam subhra", Akruti 7.0 will display "ମୋର ନାମ ସୁଭ୍ର". This makes typing Odia fast and easy for anyone who knows English.

        -

        If you want to download Akruti 7.0 for free, you can visit the official website of Akruti[^1^] and register as a user. You will then get access to the download area where you can find the installation file and the user manual. Alternatively, you can also watch this YouTube video[^2^] by Technical Subhra that shows you how to download and install Akruti 7.0 step by step.

        -

        Akruti 7.0 is a must-have software for anyone who wants to type and edit Odia text on their computer. It is compatible with Windows XP, Vista, 7, 8, and 10 operating systems. It is also available in other Indian languages such as Hindi, Bengali, Gujarati, Marathi, Tamil, Telugu, Kannada, Malayalam, Punjabi, and Sanskrit.

        - -

        In this article, we will show you some tips and tricks to use Akruti 7.0 more effectively and efficiently. You will learn how to customize the settings, use the shortcuts, and troubleshoot some common problems.

        -

        How to Customize the Settings in Akruti 7.0

        -

        Akruti 7.0 allows you to customize various settings to suit your needs and preferences. You can access the settings menu by clicking on the Akruti icon on the taskbar and selecting "Settings". Here are some of the settings you can change:

        -

        akruti 7.0 oriya typing software free download
        -akruti 7.0 oriya font free download for windows 10
        -akruti 7.0 oriya keyboard layout free download
        -akruti 7.0 oriya software crack free download
        -akruti 7.0 oriya software full version free download
        -akruti 7.0 oriya software installation guide free download
        -akruti 7.0 oriya software license key free download
        -akruti 7.0 oriya software online activation free download
        -akruti 7.0 oriya software offline activation free download
        -akruti 7.0 oriya software system requirements free download
        -akruti 7.0 oriya software user manual free download
        -akruti 7.0 oriya software video tutorial free download
        -akruti 7.0 oriya software for mac free download
        -akruti 7.0 oriya software for linux free download
        -akruti 7.0 oriya software for android free download
        -akruti 7.0 oriya software for ios free download
        -akruti 7.0 oriya software for web free download
        -akruti 7.0 oriya software for ms word free download
        -akruti 7.0 oriya software for ms excel free download
        -akruti 7.0 oriya software for ms powerpoint free download
        -akruti 7.0 oriya software for photoshop free download
        -akruti 7.0 oriya software for coreldraw free download
        -akruti 7.0 oriya software for illustrator free download
        -akruti 7.0 oriya software for indesign free download
        -akruti 7.0 oriya software for premiere pro free download
        -akruti 7.0 oriya software for after effects free download
        -akruti 7.0 oriya software for audacity free download
        -akruti 7.0 oriya software for wordpress free download
        -akruti 7.0 oriya software for blogger free download
        -akruti 7.0 oriya software for wix free download
        -akruti 7.0 oriya software for shopify free download
        -akruti 7.0 oriya software for magento free download
        -akruti 7.0 oriya software for woocommerce free download
        -akruti 7.0 oriya software for opencart free download
        -akruti 7.0 oriya software for prestashop free download
        -akruti 7.0 oriya software for joomla free download
        -akruti 7.0 oriya software for drupal free download
        -akruti 7.0 oriya software for squarespace free download
        -akruti 7.0 oriya software for weebly free download
        -akruti 7.0 oriya software for mailchimp free download
        -akruti 7.0 oriya software for aweber free download
        -akruti 7.0 oriya software for getresponse free download
        -akruti 7.0 oriya software for constant contact free download
        -akruti 7.0 oriya software for sendinblue free download
        -akruti 7.0 oriya software for activecampaign free download
        -akruti 7.0 oriya software for convertkit free download
        -akruti 7.0 oriya software for hubspot free download
        -akruti 7.0 oriya software for infusionsoft free download
        -akruti 7.0 oriya software reviews and ratings free download

        -
          -
        • Language: You can select the language you want to type in from the drop-down list. You can also switch between languages by pressing Ctrl+Shift.
        • -
        • Font: You can select the font you want to use from the drop-down list. You can also change the font size and style by clicking on the "Font" button.
        • -
        • Keyboard: You can select the keyboard layout you want to use from the drop-down list. You can also view the keyboard map by clicking on the "Keyboard" button.
        • -
        • Phonetic: You can enable or disable the phonetic transliteration feature by checking or unchecking the box. You can also adjust the transliteration rules by clicking on the "Phonetic" button.
        • -
        • Spell Check: You can enable or disable the spell checker feature by checking or unchecking the box. You can also add or delete words from the custom dictionary by clicking on the "Spell Check" button.
        • -
        • Word Suggestion: You can enable or disable the word suggestion feature by checking or unchecking the box. You can also adjust the suggestion settings by clicking on the "Word Suggestion" button.
        • -
        • Dictionary: You can enable or disable the dictionary feature by checking or unchecking the box. You can also access the dictionary by clicking on the "Dictionary" button.
        • -
        • Converter: You can enable or disable the converter feature by checking or unchecking the box. You can also access the converter by clicking on the "Converter" button.
        • -
        -

        You can apply the changes by clicking on the "OK" button or cancel them by clicking on the "Cancel" button. You can also restore the default settings by clicking on the "Default" button.

        -

        How to Use Shortcuts in Akruti 7.0

        -

        Akruti 7.0 provides some useful shortcuts that can help you type faster and easier. Here are some of them:

        -
          -
        • Ctrl+Space: This shortcut toggles between English and Odia mode. You can use it to switch between languages quickly.
        • -
        • Ctrl+Shift: This shortcut switches between different languages that you have selected in the settings menu. You can use it to type in multiple languages without changing the settings.
        • -
        • Ctrl+Alt+P: This shortcut opens the phonetic transliteration window where you can type Odia words using English letters and see them converted to Odia script.
        • -
        • Ctrl+Alt+S: This shortcut opens the spell checker window where you can check and correct your spelling errors.
        • -
        • Ctrl+Alt+W: This shortcut opens the word suggestion window where you can see and select suggested words based on your typing.
        • -
        • Ctrl+Alt+D: This shortcut opens the dictionary window where you can look up meanings and synonyms of Odia words.
        • -
        • Ctrl+Alt+C: This shortcut opens the converter window where you can convert Odia text from one font to another.
        • -
        -

        You can also create your own shortcuts by clicking on the Akruti icon on the taskbar and selecting "Shortcuts". Here you can assign any key combination to any function of Akruti 7.0.

        -

        How to Troubleshoot Common Problems in Akruti 7.0

        -

        Akruti 7.0 is a reliable software that works smoothly with most applications and systems. However, sometimes you may encounter some problems that affect its performance or functionality. Here are some of them and how to fix them:

        -
          -
        • The Akruti icon does not appear on the taskbar: This may happen if you have disabled or hidden the Akruti icon in your system settings. To fix this, right-click on an empty space on your taskbar and select "Taskbar settings". Then click on "Select which icons appear on -the taskbar" and make sure that Akruti is turned on. e753bf7129
          -
          -
          \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Ampl 64 Bit Crack Everything You Need to Know About This Powerful Tool.md b/spaces/tialenAdioni/chat-gpt-api/logs/Ampl 64 Bit Crack Everything You Need to Know About This Powerful Tool.md deleted file mode 100644 index 6c771928654e44e70655c7fd6d16d902b2e6aa6f..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Ampl 64 Bit Crack Everything You Need to Know About This Powerful Tool.md +++ /dev/null @@ -1,64 +0,0 @@ -
          -

          How to Install and Use AMPL 64 Bit on Your Computer

          -

          AMPL is a powerful and expressive modeling language for optimization problems. It allows you to formulate complex mathematical models in a natural and concise way, and to solve them using various solvers. AMPL can handle linear, nonlinear, integer, and constraint programming problems, as well as global optimization and stochastic programming.

          -

          If you want to use AMPL on your computer, you need to download and install the appropriate bundle for your operating system. AMPL supports all widely used versions of Windows, Linux and macOS. It also offers an Integrated Development Environment (IDE) that provides a convenient interface for editing, running, and debugging AMPL models and commands.

          -

          Ampl 64 Bit Crack


          Download ☆☆☆ https://urlcod.com/2uK3Rd



          -

          In this article, we will show you how to install and use AMPL 64 bit on your computer, and how to access some of the free open-source solvers that are compatible with AMPL.

          - -

          Step 1: Download AMPL 64 Bit Bundle

          -

          To download AMPL 64 bit bundle, you need to create an account or login to the AMPL License Portal. There, you can choose from different license options, depending on your needs and preferences. For example, you can get a free Community Edition license that allows you to use AMPL with all open-source solvers, or a 30-day full trial license that includes all commercial solvers as well.

          -

          Ampl 64 Bit Crack download free
          -Ampl 64 Bit Crack full version
          -Ampl 64 Bit Crack activation key
          -Ampl 64 Bit Crack license code
          -Ampl 64 Bit Crack serial number
          -Ampl 64 Bit Crack torrent link
          -Ampl 64 Bit Crack patch file
          -Ampl 64 Bit Crack keygen generator
          -Ampl 64 Bit Crack for windows 10
          -Ampl 64 Bit Crack for mac os
          -Ampl 64 Bit Crack latest update
          -Ampl 64 Bit Crack offline installer
          -Ampl 64 Bit Crack online activation
          -Ampl 64 Bit Crack with crack file
          -Ampl 64 Bit Crack without virus
          -Ampl 64 Bit Crack working fine
          -Ampl 64 Bit Crack installation guide
          -Ampl 64 Bit Crack system requirements
          -Ampl 64 Bit Crack features and benefits
          -Ampl 64 Bit Crack reviews and ratings
          -Ampl 64 Bit Crack alternatives and competitors
          -Ampl 64 Bit Crack customer support
          -Ampl 64 Bit Crack refund policy
          -Ampl 64 Bit Crack discount coupon
          -Ampl 64 Bit Crack trial version
          -How to get Ampl 64 Bit Crack for free
          -How to use Ampl 64 Bit Crack effectively
          -How to fix Ampl 64 Bit Crack errors and bugs
          -How to uninstall Ampl 64 Bit Crack completely
          -How to upgrade Ampl 64 Bit Crack to the latest version
          -Is Ampl 64 Bit Crack safe and legal
          -Is Ampl 64 Bit Crack compatible with my device
          -Is Ampl 64 Bit Crack worth buying or downloading
          -What is Ampl 64 Bit Crack and what does it do
          -What are the advantages and disadvantages of Ampl 64 Bit Crack
          -What are the best practices and tips for using Ampl 64 Bit Crack
          -What are the common problems and solutions for Ampl 64 Bit Crack users
          -What are the new features and improvements in Ampl 64 Bit Crack
          -Where can I find more information and resources about Ampl 64 Bit Crack
          -Where can I buy or download Ampl 64 Bit Crack with the best price and quality

          -

          Once you select a license option, you will see a list of available bundles for different operating systems. Choose the one that matches your system (e.g., Windows 64 bit) and click on the download button. You will get a zip file that contains the AMPL program, the IDE, and the solvers.

          - -

          Step 2: Install AMPL 64 Bit Bundle

          -

          To install AMPL 64 bit bundle, you need to unzip the downloaded file and copy the contents to a folder of your choice. For example, you can create a folder named "AMPL" in your C: drive and copy the files there. You don't need to run any installer or setup program.

          -

          To start the AMPL IDE, go to the folder where you copied the files and look for a program named amplide.exe (under Windows) or amplide (under Linux or macOS). The program will have a black cat's-head icon. Double-click the program file to start the AMPL IDE under Windows or macOS; use the command ./amplide to start the AMPL IDE under Linux.

          - -

          Step 3: Use AMPL 64 Bit Bundle

          -

          To use AMPL 64 bit bundle, you need to write or load an AMPL model file (.mod) and an optional data file (.dat). You can use the IDE's editor window to create or edit these files, or use any other text editor of your choice. The IDE's editor window has syntax highlighting for model and data files, and quick links to error locations.

          -

          Once you have a model file and a data file ready, you can run them using the IDE's command window. The command window allows you to type commands at an AMPL prompt in the usual way. For example, you can use the "model" command to load a model file, the "data" command to load a data file, the "option" command to set solver options, the "solve" command to solve the problem using a solver of your choice, and the "display" command to show the results.

          -

          All installed solvers can be accessed directly through the IDE. You can specify which solver you want to use by typing its name after the "option solver" command. For example, if you want to use CBC (an open-source mixed-integer linear programming solver), you can type "option solver cbc;" before solving your problem. You can also use other solvers such as Ipopt (an open-source nonlinear programming solver), Bonmin (an open-source mixed-integer nonlinear programming solver), Couenne (an open-source global optimization solver), or Choco (an open-source constraint programming solver).

          - -

          Conclusion

          -

          In this article, we showed you how to install and use AMPL 64 bit on your computer, and how to access some of the

          e753bf7129
          -
          -
          \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Api 520 Free Download Pdf.md b/spaces/tialenAdioni/chat-gpt-api/logs/Api 520 Free Download Pdf.md deleted file mode 100644 index fe316f704c6514757e7c541d8e911bfd114cd519..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Api 520 Free Download Pdf.md +++ /dev/null @@ -1,26 +0,0 @@ -
          -

          How to Download API 520 Standard for Free

          - -

          If you are looking for a free download of API 520 standard, you might be disappointed to find out that this document is not available for free online. API 520 is a standard that covers the sizing, selection, and installation of pressure-relieving devices used in refineries, chemical facilities, and related industries. It is a valuable resource for engineers, designers, and operators who need to ensure the safety and efficiency of their equipment.

          - -

          However, API 520 is not a public domain document. It is copyrighted by the American Petroleum Institute (API), which is a trade association that represents the oil and gas industry. API sells its standards and publications through its website and authorized distributors. The price of API 520 Part 1 (Sizing and Selection) is $340.00, and the price of API 520 Part 2 (Installation) is $210.00. You can order them online at www.api.org/pubs or by phone or fax.

          -

          Api 520 Free Download Pdf


          Download Zip »»» https://urlcod.com/2uK5W0



          - -

          So, how can you get a free download of API 520? There are some possible ways, but they are not legal or ethical. Some websites may claim to offer free downloads of API 520 or other standards, but they are likely to be fraudulent, outdated, incomplete, or infected with malware. Downloading or sharing copyrighted documents without permission or payment is also a violation of intellectual property rights and may result in legal consequences.

          - -

          The best way to access API 520 standard for free is to use a library or an academic institution that has a subscription or a license to access API standards online. You can search for libraries near you that have access to API standards at www.api.org/products-and-services/standards/standards-access. You can also contact your local API representative or distributor to inquire about possible discounts or waivers for students, educators, researchers, or non-profit organizations.

          - -

          API 520 standard is a comprehensive and authoritative guide for pressure-relief devices design and installation. It is worth investing in if you need it for your professional or academic purposes. However, if you are looking for a free download of API 520, you should be aware of the risks and limitations of doing so.

          - -

          If you want to learn more about API 520 standard and its applications, you can also check out some of the resources below:

          - -
            -
          • Design of Safety Valves: ASME VIII / API 520 - This is a presentation by LESER, a leading manufacturer of safety valves, that explains the codes and standards, formulas, and factors influencing the design of safety valves in compliance with ASME VIII / API 520.
          • -
          • API 520-Part I.pdf - This is a PDF document that contains the table of contents and some excerpts from API 520 Part 1 standard. It gives an overview of the scope, definitions, types, and procedures for sizing and selection of pressure-relief devices.
          • -
          • API 520 Sizing and Selection of Pressure Relief Devices - This is a video lecture by Engineering Training Services that covers the basics of API 520 standard and how to use it for sizing and selection of pressure-relief devices. It includes examples and exercises to test your understanding.
          • -
          - -

          API 520 standard is a useful and important document for anyone who works with pressure-relief devices in the oil and gas industry. However, it is not free to download online. You should respect the intellectual property rights of API and purchase the standard from its official website or authorized distributors. Alternatively, you can use a library or an academic institution that has access to API standards online. You can also learn more about API 520 standard from other sources such as presentations, PDF documents, or video lectures.

          -

          7196e7f11a
          -
          -
          \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Battlefield 3 Trainer Pc.epub Get Ready for the Ultimate Battlefield Adventure.md b/spaces/tialenAdioni/chat-gpt-api/logs/Battlefield 3 Trainer Pc.epub Get Ready for the Ultimate Battlefield Adventure.md deleted file mode 100644 index 2b2933a05c8fa29100cc3ce09507e99f326d44ba..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Battlefield 3 Trainer Pc.epub Get Ready for the Ultimate Battlefield Adventure.md +++ /dev/null @@ -1,181 +0,0 @@ -
          -

          What is Battlefield 3 Trainer Pc.epub and How to Use It?

          - -

          If you are a fan of Battlefield 3, you might have heard of Battlefield 3 Trainer Pc.epub. This is a file that contains various cheats and trainers for the game, which can help you enhance your gaming experience. In this article, we will explain what Battlefield 3 Trainer Pc.epub is, how to download and use it, and some tips and warnings for using it.

          - -

          What is Battlefield 3 Trainer Pc.epub?

          - -

          Battlefield 3 Trainer Pc.epub is a file that contains cheats and trainers for Battlefield 3. Cheats are codes that can modify the game's behavior, such as giving you unlimited health, ammo, or accuracy. Trainers are programs that can run alongside the game and activate cheats with the press of a button.

          -

          Battlefield 3 Trainer Pc.epub


          DOWNLOAD --->>> https://urlcod.com/2uK1HL



          - -

          By using Battlefield 3 Trainer Pc.epub, you can enjoy the game without worrying about dying, running out of ammo, or missing your shots. You can also explore the game's maps, unlock all the weapons and vehicles, and customize your character. You can even disable the HUD and FPS counter for a more cinematic experience.

          - -

          How to Download and Use Battlefield 3 Trainer Pc.epub?

          - -

          Downloading and using Battlefield 3 Trainer Pc.epub is easy and safe. Here are the steps you need to follow:

          - -
            -
          1. Download Battlefield 3 Trainer Pc.epub from a reliable source. You can find many websites that offer this file for free or for a small fee. Make sure to scan the file with an antivirus program before opening it.
          2. -
          3. Extract the file using a program like 7-Zip or WinRAR. You will need a password to unzip the file. The password is usually provided by the website where you downloaded the file from.
          4. -
          5. Run the trainer program. You will see a window with various options and buttons. Some trainers may require you to run them as administrator.
          6. -
          7. Start the game. You can either launch the game from the trainer window or from your desktop shortcut.
          8. -
          9. Activate the cheats you want to use. You can either click on the buttons on the trainer window or press the corresponding keys on your keyboard. Some trainers may have an info button that shows you the list of cheats and their hotkeys.
          10. -
          11. Enjoy the game with your cheats enabled. You can deactivate the cheats anytime by pressing the same keys or buttons again.
          12. -
          - -

          Tips and Warnings for Using Battlefield 3 Trainer Pc.epub

          - -

          Using Battlefield 3 Trainer Pc.epub can make your gaming experience more fun and exciting. However, there are some tips and warnings you should keep in mind:

          - -
            -
          • Use Battlefield 3 Trainer Pc.epub only for single-player mode. Using cheats in multiplayer mode can get you banned from online servers and ruin the game for other players.
          • -
          • Use Battlefield 3 Trainer Pc.epub only when you need it. Using cheats all the time can make the game too easy and boring. Try to challenge yourself by playing without cheats sometimes.
          • -
          • Use Battlefield 3 Trainer Pc.epub at your own risk. Cheats and trainers can sometimes cause glitches, crashes, or errors in the game. Make sure to backup your save files before using them.
          • -
          • Use Battlefield 3 Trainer Pc.epub responsibly. Cheats and trainers are meant to enhance your gaming experience, not to harm or offend anyone. Do not use them to cheat in competitions, harass other players, or promote illegal activities.
          • -
          - -

          Conclusion

          - -

          Battlefield 3 Trainer Pc.epub is a useful tool that can help you enjoy Battlefield 3 more. By using cheats and trainers, you can modify the game's behavior and customize your gameplay experience. However, you should also be careful and respectful when using them.

          - -

          If you want to download and use Battlefield 3 Trainer Pc.epub, you can follow the steps above and find a reliable source online. You can also check out other websites that offer more cheats and trainers for other games.

          -

          Battlefield 3 cheats and hacks for pc.epub
          -How to download and install Battlefield 3 trainer for pc.epub
          -Battlefield 3 trainer pc free download full version.epub
          -Battlefield 3 trainer pc unlimited ammo and health.epub
          -Battlefield 3 trainer pc offline mode.epub
          -Battlefield 3 trainer pc game guide and walkthrough.epub
          -Battlefield 3 trainer pc best settings and tips.epub
          -Battlefield 3 trainer pc multiplayer crack.epub
          -Battlefield 3 trainer pc mod menu and customizations.epub
          -Battlefield 3 trainer pc system requirements and compatibility.epub
          -Battlefield 3 trainer pc review and ratings.epub
          -Battlefield 3 trainer pc gameplay and features.epub
          -Battlefield 3 trainer pc latest updates and patches.epub
          -Battlefield 3 trainer pc troubleshooting and errors.epub
          -Battlefield 3 trainer pc keyboard shortcuts and commands.epub
          -Battlefield 3 trainer pc steam and origin versions.epub
          -Battlefield 3 trainer pc windows 10 and mac support.epub
          -Battlefield 3 trainer pc comparison with other games.epub
          -Battlefield 3 trainer pc secrets and easter eggs.epub
          -Battlefield 3 trainer pc achievements and trophies.epub
          -Battlefield 3 trainer pc save file location and backup.epub
          -Battlefield 3 trainer pc graphics and performance optimization.epub
          -Battlefield 3 trainer pc sound and music settings.epub
          -Battlefield 3 trainer pc controller support and configuration.epub
          -Battlefield 3 trainer pc video tutorials and guides.epub
          -Battlefield 3 trainer pc fan art and wallpapers.epub
          -Battlefield 3 trainer pc memes and jokes.epub
          -Battlefield 3 trainer pc forums and communities.epub
          -Battlefield 3 trainer pc news and rumors.epub
          -Battlefield 3 trainer pc release date and price.epub
          -Battlefield 3 trainer pc torrent download link.epub
          -Battlefield 3 trainer pc alternative download sources.epub
          -Battlefield 3 trainer pc virus scan and safety check.epub
          -Battlefield 3 trainer pc legal issues and disclaimer.epub
          -Battlefield 3 trainer pc refund policy and customer service.epub
          -Battlefield 3 trainer pc testimonials and feedbacks.epub
          -Battlefield 3 trainer pc frequently asked questions (FAQ).epub
          -Battlefield 3 trainer pc bonus content and extras.epub
          -Battlefield 3 trainer pc history and development.epub
          -Battlefield 3 trainer pc fun facts and trivia.epub
          -Battlefield 3 vs. Call of Duty comparison for pc gamers.epub
          -Best weapons and vehicles in Battlefield 3 for pc players.epub
          -How to master the multiplayer mode in Battlefield 3 for pc users.epub
          -How to unlock all the missions and maps in Battlefield 3 for pc fans.epub
          -How to customize your character and loadout in Battlefield 3 for pc enthusiasts.epub
          -How to improve your skills and rank in Battlefield 3 for pc experts.epub
          -How to fix common bugs and glitches in Battlefield 3 for pc users.epub
          -How to enjoy the story mode in Battlefield 3 for pc players.epub
          -How to get the best deals and discounts on Battlefield 3 for pc buyers.epub
          -How to stream and record your gameplay of Battlefield 3 for pc users.epub

          - -

          We hope this article was helpful and informative for you. Have fun playing Battlefield 3 with your cheats enabled!

          -

          What are the Benefits of Using Battlefield 3 Trainer Pc.epub?

          - -

          Using Battlefield 3 Trainer Pc.epub can have many benefits for your gaming experience. Here are some of them:

          - -
            -
          • You can have more fun and excitement. By using cheats and trainers, you can experience the game in a different way. You can try new things, experiment with different strategies, and overcome any challenge.
          • -
          • You can save time and effort. By using cheats and trainers, you can skip the parts of the game that you find boring or tedious. You can also avoid repeating the same missions or levels over and over again.
          • -
          • You can learn more about the game. By using cheats and trainers, you can discover hidden secrets, easter eggs, and glitches in the game. You can also learn more about the game's mechanics, features, and story.
          • -
          • You can customize your game. By using cheats and trainers, you can modify your game to suit your preferences. You can change the graphics, sound, difficulty, and gameplay options.
          • -
          - -

          What are the Best Sources for Battlefield 3 Trainer Pc.epub?

          - -

          If you want to download and use Battlefield 3 Trainer Pc.epub, you need to find a reliable source that offers high-quality and safe files. There are many websites that claim to offer cheats and trainers for Battlefield 3, but not all of them are trustworthy. Some of them may contain viruses, malware, or spyware that can harm your computer or steal your personal information.

          - -

          To avoid these risks, you should look for sources that have the following features:

          - -
            -
          • They have positive reviews and ratings from other users. You can check the comments section, forums, or social media platforms to see what other people say about the website.
          • -
          • They have clear and detailed instructions on how to download and use the file. You should be able to follow the steps easily and without any confusion.
          • -
          • They have a customer support service that can help you with any issues or questions. You should be able to contact them via email, phone, or chat.
          • -
          • They have a refund policy that can protect your money in case the file does not work or causes any problems. You should be able to request a refund within a reasonable time frame.
          • -
          - -

          Some examples of websites that offer Battlefield 3 Trainer Pc.epub are:

          - -
            -
          1. Gamepressure.com: This website offers a free download of Battlefield 3 Trainer Pc.epub that works with version 1.10 of the game. It has over 60 thousand downloads and 148 positive ratings.
          2. -
          3. WeMod.com: This website offers a free download of Battlefield 3 Trainer Pc.epub that works with PC and EA versions of the game. It has over 7 cheats and supports Windows 11, Windows 10, Windows 8.1, and Windows 7.
          4. -
          5. MegaGames.com: This website offers a free download of Battlefield 3 Trainer Pc.epub that works with version 1.3 of the game. It has over 11 cheats and has an info button that shows you how to use them.
          6. -
          - -

          These are just some examples of websites that offer Battlefield 3 Trainer Pc.epub. You can also search for other websites that meet your needs and preferences.

          -

          What are the Features of Battlefield 3 Trainer Pc.epub?

          - -

          Battlefield 3 Trainer Pc.epub has many features that can make your game more enjoyable and exciting. Here are some of them:

          - -
            -
          • It has a variety of cheats and trainers. You can choose from different options and buttons that can modify the game's behavior. You can also customize the hotkeys for each cheat or trainer.
          • -
          • It has a user-friendly interface. You can easily access and use the trainer program. You can also view the info button that shows you how to use each cheat or trainer.
          • -
          • It has a high compatibility. You can use the trainer program with different versions of the game, such as PC and EA. You can also use it with Windows 11, Windows 10, Windows 8.1, and Windows 7.
          • -
          • It has a low file size. You can download and use the trainer program without taking up too much space on your computer. You can also unzip the file using a free software like 7-Zip or WinRAR.
          • -
          - -

          What are the Alternatives to Battlefield 3 Trainer Pc.epub?

          - -

          If you want to try other ways to enhance your gaming experience, you can also check out some alternatives to Battlefield 3 Trainer Pc.epub. Here are some of them:

          - -
            -
          1. Gamepressure.com: This website offers a free download of Battlefield 3 Trainer Pc.epub that works with version 1.10 of the game. It has over 60 thousand downloads and 148 positive ratings.
          2. -
          3. WeMod.com: This website offers a free download of Battlefield 3 Trainer Pc.epub that works with PC and EA versions of the game. It has over 7 cheats and supports Windows 11, Windows 10, Windows 8.1, and Windows 7.
          4. -
          5. MegaGames.com: This website offers a free download of Battlefield 3 Trainer Pc.epub that works with version 1.3 of the game. It has over 11 cheats and has an info button that shows you how to use them.
          6. -
          - -

          These are just some examples of alternatives to Battlefield 3 Trainer Pc.epub. You can also search for other websites that offer cheats and trainers for other games.

          -

          What are the Drawbacks of Using Battlefield 3 Trainer Pc.epub?

          - -

          While using Battlefield 3 Trainer Pc.epub can have many benefits, it can also have some drawbacks. Here are some of them:

          - -
            -
          • It can reduce the challenge and satisfaction of the game. By using cheats and trainers, you can make the game too easy and lose the sense of accomplishment and reward. You can also miss out on some of the game's features and content that require skill and effort.
          • -
          • It can cause compatibility and performance issues. By using cheats and trainers, you can interfere with the game's normal functioning and cause glitches, crashes, or errors. You can also affect the game's speed, graphics, and sound quality.
          • -
          • It can violate the game's terms and conditions. By using cheats and trainers, you can break the rules and agreements of the game's developers and publishers. You can also risk legal actions or penalties if you use them for illegal or unethical purposes.
          • -
          • It can ruin the game for other players. By using cheats and trainers, you can spoil the game's balance and fairness. You can also annoy or offend other players who play without cheats or trainers.
          • -
          - -

          How to Uninstall Battlefield 3 Trainer Pc.epub?

          - -

          If you want to uninstall Battlefield 3 Trainer Pc.epub, you can follow these steps:

          - -
            -
          1. Delete the trainer program from your computer. You can either move it to the recycle bin or permanently delete it.
          2. -
          3. Delete the trainer file from your computer. You can either move it to the recycle bin or permanently delete it.
          4. -
          5. Delete any backup files or folders that contain the trainer program or file. You can either move them to the recycle bin or permanently delete them.
          6. -
          7. Scan your computer with an antivirus program to remove any viruses, malware, or spyware that may have come with the trainer program or file.
          8. -
          9. Restore your game to its original state. You can either reinstall the game from your disc or download it from your online account.
          10. -
          - -

          These are some steps you can take to uninstall Battlefield 3 Trainer Pc.epub. You can also check out other websites that offer more instructions on how to uninstall cheats and trainers for other games.

          -

          Conclusion

          - -

          Battlefield 3 Trainer Pc.epub is a file that contains cheats and trainers for Battlefield 3, a popular shooter game. By using it, you can modify the game's behavior and customize your gameplay experience. However, you should also be careful and respectful when using it, as it can have some drawbacks and risks.

          - -

          If you want to download and use Battlefield 3 Trainer Pc.epub, you can follow the steps in this article and find a reliable source online. You can also check out other websites that offer more cheats and trainers for other games.

          - -

          We hope this article was helpful and informative for you. Have fun playing Battlefield 3 with your cheats enabled!

          679dcb208e
          -
          -
          \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Cracked Front Tooth Causes Symptoms and Solutions.md b/spaces/tialenAdioni/chat-gpt-api/logs/Cracked Front Tooth Causes Symptoms and Solutions.md deleted file mode 100644 index 0349fc76ce8b78df6c7dfb1024a7d09d82655139..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Cracked Front Tooth Causes Symptoms and Solutions.md +++ /dev/null @@ -1,37 +0,0 @@ -
          -

          Can a Cracked Front Tooth Be Saved?

          -

          A cracked front tooth can be a source of pain, sensitivity, and embarrassment. You may wonder if your tooth can be saved or if you need to have it extracted. The answer depends on the type, location, and extent of the crack. In this article, we will explain the different types of cracks that can affect your teeth and the possible treatments for each one.

          -

          can a cracked front tooth be saved


          Download Ziphttps://urlcod.com/2uK3z0



          -

          Types of Cracks

          -

          According to the American Association of Endodontists, there are five types of cracks that can affect your teeth:

          -
            -
          • Craze lines: These are tiny cracks in the enamel (the hard outer layer) of your teeth. They are very common and usually do not cause any pain or problems. They do not require any treatment.
          • -
          • Fractured cusp: This is a crack that occurs around a dental filling or a tooth that has been weakened by decay. It usually does not affect the pulp (the soft inner part) of the tooth and does not cause much pain. It can be treated by replacing the filling or placing a crown over the tooth.
          • -
          • Crack that extends into the gum line: This is a crack that runs from the chewing surface of the tooth down to the root. If the crack has not reached the gum line, it may be possible to save the tooth with a root canal treatment and a crown. However, if the crack extends below the gum line, it may be impossible to save the tooth and extraction may be necessary.
          • -
          • Split tooth: This is a crack that splits the tooth into two or more pieces. This usually happens when a crack that extends into the gum line is left untreated. It is very unlikely that the entire tooth can be saved, but sometimes a portion of it can be preserved with endodontic surgery.
          • -
          • Vertical root fracture: This is a crack that starts at the root of the tooth and goes upward. It often does not cause any symptoms until the tooth becomes infected. It is very difficult to detect and treat, and usually requires extraction.
          • -
          -

          Treatment Options

          -

          The best way to treat a cracked tooth is to see your dentist as soon as possible. The sooner your tooth is treated, the better the outcome. Your dentist will examine your tooth and determine the type and severity of the crack. They may also take x-rays or use other tools to diagnose your condition.

          -

          The treatment options for a cracked tooth depend on several factors, such as:

          -
            -
          • The location and direction of the crack
          • -
          • The size and depth of the crack
          • -
          • The extent of damage to the pulp and surrounding tissues
          • -
          • Your symptoms and preferences
          • -
          -

          Some of the possible treatments for a cracked tooth are:

          -
            -
          • Bonding: This is a procedure where your dentist uses a tooth-colored resin material to fill in or cover up the crack. This can improve the appearance and function of your tooth.
          • -
          • Veneer: This is a thin shell of porcelain or composite material that is bonded to the front surface of your tooth. This can hide minor cracks and improve the shape and color of your tooth.
          • -
          • Crown: This is a cap that covers the entire visible part of your tooth. This can protect and strengthen your tooth and restore its shape and appearance.
          • -
          • Root canal treatment: This is a procedure where your dentist removes the infected or damaged pulp from inside your tooth and fills it with a rubber-like material. This can save your tooth from extraction and prevent further infection.
          • -
          • Endodontic surgery: This is a surgery where your dentist makes an incision in your gum and removes part of the root or bone around your tooth. This can help save some teeth that cannot be treated with conventional root canal treatment.
          • -
          • Extraction: This is a procedure where your dentist removes your entire tooth from its socket. This is usually done as a last resort when no other treatment can save your tooth.
          • -
          -

          Complications and Prevention

          -

          A cracked tooth can lead to several complications if left untreated, such as:

          -

          -

            ddb901b051
            -
            -
            \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/All Characters and Skins Unlocked in Soul Knight 4.2.0 - Mod APK Installation Guide.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/All Characters and Skins Unlocked in Soul Knight 4.2.0 - Mod APK Installation Guide.md deleted file mode 100644 index 90ac978fd50c055cab66f72b0e4d35327da02cb2..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/All Characters and Skins Unlocked in Soul Knight 4.2.0 - Mod APK Installation Guide.md +++ /dev/null @@ -1,128 +0,0 @@ - -

            Soul Knight Mod APK: Unlock All Characters and Skins 4.2.0

            -

            If you are looking for a fast-paced dungeon crawler game with pixel graphics and tons of weapons, then you should try Soul Knight. This game is available for Android, iOS, and Nintendo Switch, and it has been downloaded over 50 million times on Google Play Store alone. However, if you want to enjoy the game to the fullest, you might want to use Soul Knight Mod APK, which will unlock all the characters and skins in the game for free. In this article, we will tell you what Soul Knight is, why you should use Soul Knight Mod APK, and how to download and install it. We will also give you some tips and tricks for playing Soul Knight like a pro.

            -

            soul knight mod apk unlock all characters and skins 4.2.0


            Download ✒ ✒ ✒ https://bltlly.com/2uOmYR



            -

            What is Soul Knight?

            -

            A pixel roguelike RPG with endless adventure

            -

            Soul Knight is a game made by ChillyRoom Inc. that was released in 2017. The game is inspired by Enter The Gungeon, a bullet-hell rogue-lite game produced by Dodge Roll and Devolver Digital. The story of Soul Knight is simple: in a time of gun and sword, the magical stone that maintains the balance of the world is stolen by high-tech aliens. The world is hanging on a thin thread, and it all depends on you retrieving the magical stone.

            -

            The gameplay of Soul Knight is simple as well: you choose one of the 20+ unique heroes, each with their own abilities and skills, and enter randomly generated dungeons filled with enemies, traps, chests, and bosses. You can use over 400 different weapons, ranging from guns, swords, shovels, lasers, rockets, grenades, and more. You can also find NPCs that will fight by your side or offer you services. The game has an auto-aim mechanism for super intuitive control, and it supports both online and offline multiplayer modes.

            -

            Features of Soul Knight

            -

            Some of the features of Soul Knight are:

            -
              -
            • 20+ unique heroes with different abilities and skills
            • -
            • 400+ weapons with various effects and properties
            • -
            • Randomly generated dungeons with different themes and enemies
            • -
            • NPCs that can help you or hinder you
            • -
            • Auto-aim mechanism for easy control
            • -
            • Online and offline multiplayer modes
            • -
            • Assorted game modes and features, such as tower defense, boss rush, daily challenges, etc.
            • -
            -

            Why use Soul Knight Mod APK?

            -

            Benefits of using Soul Knight Mod APK

            -

            Soul Knight is a fun and addictive game, but it can also be challenging and frustrating at times. Some of the characters and skins in the game are locked behind in-app purchases or require a lot of grinding to unlock. If you don't want to spend real money or waste time on unlocking them, you can use Soul Knight Mod APK, which will give you access to all the characters and skins in the game for free.

            -

            By using Soul Knight Mod APK, you can enjoy playing with any hero you want, without worrying about their cost or availability. You can also customize your appearance with any skin you like, from cute animals to cool robots. You can mix and match different characters and skins to create your own unique combination.

            -

            soul knight mod apk all characters and skins unlocked 4.0.2
            -soul knight mod apk infinite money and unlock everything 4.2.0
            -soul knight mod apk no godmode but all skins and characters 4.2.0
            -soul knight mod apk free download with all unlocked 4.2.0
            -soul knight mod apk latest version unlock all characters and skins
            -soul knight mod apk 4.2.0 with unlimited gems and unlockables
            -soul knight mod apk no mod menu but all characters and skins 4.2.0
            -soul knight mod apk reddit download link for 4.2.0 unlock all
            -soul knight mod apk hack with all skins and characters 4.2.0
            -soul knight mod apk no root required for unlock all 4.2.0
            -soul knight mod apk 4.2.0 unlock all characters and skins android
            -soul knight mod apk ios version with all unlocked 4.2.0
            -soul knight mod apk offline play with unlock all 4.2.0
            -soul knight mod apk new update 4.2.0 with all characters and skins
            -soul knight mod apk easy install and unlock all 4.2.0
            -soul knight mod apk best site to download 4.2.0 unlock all
            -soul knight mod apk how to get all characters and skins 4.2.0
            -soul knight mod apk no ads and unlock all 4.2.0
            -soul knight mod apk full game with all unlocked 4.2.0
            -soul knight mod apk premium features unlock all 4.2.0
            -soul knight mod apk original game with unlock all 4.2.0
            -soul knight mod apk safe and secure download 4.2.0 unlock all
            -soul knight mod apk unlimited everything and unlock all 4.2.0
            -soul knight mod apk without virus and malware 4.2.0 unlock all
            -soul knight mod apk fun and addictive game with unlock all 4.2.0
            -soul knight mod apk cheats and tips for unlock all 4.2.0
            -soul knight mod apk guide and tutorial for unlock all 4.2.0
            -soul knight mod apk review and rating for 4.2.0 unlock all
            -soul knight mod apk gameplay and features of unlock all 4.2.0
            -soul knight mod apk support and feedback for 4.2.0 unlock all
            -soul knight mod apk latest news and updates for unlock all 4.2.0
            -soul knight mod apk special offers and deals for unlock all 4.2.0
            -soul knight mod apk bonus content and rewards for unlock all 4.2.0
            -soul knight mod apk challenges and missions for unlock all 4.2.0
            -soul knight mod apk secrets and easter eggs for unlock all 4.2.0
            -soul knight mod apk comparison and alternatives for unlock all 4.2.0
            -soul knight mod apk recommendations and suggestions for unlock all 4.2.

            -

            How to download and install Soul Knight Mod APK

            -

            If you want to download and install Soul Knight Mod APK, you need to follow these steps:

            -
              Download the Soul Knight Mod APK file from a trusted source, such as [this one]. -
            1. Enable the installation of apps from unknown sources on your device. You can do this by going to Settings > Security > Unknown Sources and toggling it on.
            2. -
            3. Locate the downloaded Soul Knight Mod APK file on your device and tap on it to start the installation process.
            4. -
            5. Follow the instructions on the screen and wait for the installation to finish.
            6. -
            7. Launch the Soul Knight game and enjoy playing with all the characters and skins unlocked.
            8. -
            -

            Tips and tricks for playing Soul Knight

            -

            Choose your hero wisely

            -

            Soul Knight has over 20 different heroes, each with their own abilities and skills. Some of them are more suitable for beginners, while others are more challenging to master. Some of them are more effective in certain situations, while others are more versatile. You should choose your hero based on your preference and playstyle, but here are some general tips:

            -
              -
            • The Knight is a balanced hero that can use any weapon and has a high health pool. He is a good choice for beginners who want to try different weapons and strategies.
            • -
            • The Rogue is a fast and agile hero that can dodge bullets and deal critical damage. He is a good choice for advanced players who want to play aggressively and rely on their reflexes.
            • -
            • The Alchemist is a support hero that can throw potions that have various effects, such as healing, freezing, or exploding. He is a good choice for players who want to help their teammates or control the battlefield.
            • -
            • The Engineer is a defensive hero that can build turrets and shields. He is a good choice for players who want to play strategically and create a safe zone for themselves and their allies.
            • -
            -

            Use the best weapons for your playstyle

            -

            Soul Knight has over 400 different weapons, each with their own stats, effects, and properties. Some of them are more common, while others are more rare. Some of them are more powerful, while others are more fun. You should use the weapons that suit your playstyle, but here are some general tips:

            -
              -
            • The melee weapons are good for close-range combat and saving energy. They can also break barrels and crates that may contain items or coins. However, they expose you to more danger and require you to get close to your enemies.
            • -
            • The ranged weapons are good for long-range combat and dealing damage from a safe distance. They can also hit enemies behind cover or around corners. However, they consume energy and may run out of ammo quickly.
            • -
            • The elemental weapons are good for dealing extra damage or applying status effects to your enemies. They can also interact with the environment, such as setting fire to wooden objects or freezing water pools. However, they may have lower damage or accuracy than other weapons.
            • -
            • The special weapons are good for creating unique effects or situations that can change the course of the game. They can also be very fun and satisfying to use. However, they may have drawbacks or limitations that make them less reliable or efficient than other weapons.
            • -
            -

            Explore the dungeons and loot everything

            -

            Soul Knight has randomly generated dungeons with different themes and enemies. Each dungeon has several floors, each with several rooms. You should explore every room and floor as much as possible, as you may find valuable items or secrets that can help you in your quest. Here are some things to look out for:

            -
              Chests that contain weapons, items, coins, or gems. Some chests are locked and require keys to open, while others are free or have traps. -
            • Shops that sell weapons, items, services, or buffs. You can use coins or gems to buy things from the shopkeepers, but be careful not to anger them by stealing or attacking them.
            • -
            • Statues that grant you buffs or skills. You can use gems to activate the statues and receive their benefits, but be aware that some statues may have negative effects as well.
            • -
            • Plants that heal you or give you energy. You can use your melee weapon to harvest the plants and restore your health or energy, but be careful not to destroy them by accident.
            • -
            • Secret rooms that hide treasures or surprises. You can find secret rooms by looking for cracks on the walls or floors, and use your melee weapon or explosives to break them open.
            • -
            -

            Avoid traps and use barrels to your advantage

            -

            Soul Knight has many traps and hazards that can harm you or your enemies. You should avoid them as much as possible, or use them to your advantage. Here are some examples of traps and hazards:

            -
              -
            • Spikes that deal damage when you step on them. You can avoid them by jumping over them or using a shield.
            • -
            • Laser beams that shoot across the room periodically. You can avoid them by timing your movements or using a shield.
            • -
            • Mines that explode when you get close to them. You can avoid them by keeping your distance or using a shield.
            • -
            • Barrels that explode when hit by bullets or melee attacks. You can use them to damage your enemies by shooting them or throwing them.
            • -
            • Fire pits that ignite when hit by fire weapons or explosives. You can use them to set fire to your enemies or wooden objects.
            • -
            • Water pools that freeze when hit by ice weapons or explosives. You can use them to freeze your enemies or create slippery surfaces.
            • -
            -

            Upgrade your skills and buffs

            -

            Soul Knight has many skills and buffs that can enhance your performance and survival. You should upgrade them as much as possible, as they can make a big difference in your quest. Here are some ways to upgrade your skills and buffs:

            -
              Level up your hero by gaining experience points from killing enemies or completing quests. You can choose one of three random skills to upgrade every time you level up. -
            • Collect gems from chests, enemies, or plants, and use them to buy buffs from statues, shops, or vending machines. You can also find free buffs from chests, NPCs, or events.
            • -
            • Find items from chests, enemies, or shops, and use them to boost your stats, abilities, or effects. You can also find free items from chests, NPCs, or events.
            • -
            • Complete achievements and challenges to unlock more skills, buffs, and items. You can also earn coins and gems from completing them.
            • -
            -

            Conclusion

            -

            Soul Knight is a fun and addictive game that will keep you entertained for hours. You can play with different heroes, weapons, and modes, and enjoy the pixel graphics and smooth gameplay. However, if you want to unlock all the characters and skins in the game for free, you can use Soul Knight Mod APK, which will give you unlimited access to them. You can download and install Soul Knight Mod APK easily by following the steps we provided above. You can also use our tips and tricks to improve your skills and strategies in Soul Knight. We hope you enjoyed this article and found it helpful. Happy gaming!

            -

            FAQs

            -

            Here are some frequently asked questions about Soul Knight Mod APK:

            -
              -
            • Q: Is Soul Knight Mod APK safe to use?
            • -
            • A: Yes, Soul Knight Mod APK is safe to use as long as you download it from a trusted source. However, you should always be careful when installing apps from unknown sources and scan them for viruses or malware before using them.
            • -
            • Q: Will Soul Knight Mod APK affect my progress in the original game?
            • -
            • A: No, Soul Knight Mod APK will not affect your progress in the original game. You can play both versions of the game separately without any interference.
            • -
            • Q: Can I play online multiplayer mode with Soul Knight Mod APK?
            • -
            • A: Yes, you can play online multiplayer mode with Soul Knight Mod APK. However, you may encounter some compatibility issues or errors with other players who are using the original version of the game.
            • -
            • Q: How can I update Soul Knight Mod APK?
            • -
            • A: You can update Soul Knight Mod APK by downloading the latest version of the mod from the same source you downloaded it from before. You may need to uninstall the previous version of the mod before installing the new one.
            • -
            • Q: Where can I find more information about Soul Knight?
            • -
            • A: You can find more information about Soul Knight on its official website [here], or on its social media pages [here] and [here]. You can also join its fan community [here] and [here].
            • -

            401be4b1e0
            -
            -
            \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Car Parking 3D APK - The Ultimate Car Simulation Game for Android.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Car Parking 3D APK - The Ultimate Car Simulation Game for Android.md deleted file mode 100644 index ff92a8451f58147159b29d3ce7807fd4931a4722..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Car Parking 3D APK - The Ultimate Car Simulation Game for Android.md +++ /dev/null @@ -1,108 +0,0 @@ -
            -

            Car Parking 3D Game APK: A Realistic and Fun Driving Simulation

            -

            If you are looking for a realistic and fun driving simulation game for your Android device, you should check out Car Parking 3D Game APK. This game is not just about parking your car, but also about drifting, racing, tuning, and exploring a huge city with your friends online. In this article, we will tell you everything you need to know about this game, including its features, how to download and install it, and its pros and cons.

            -

            car parking 3d game apk


            Download Filehttps://bltlly.com/2uOh1G



            -

            Features of Car Parking 3D Game APK

            -

            Car Parking 3D Game APK is a game that offers you various features that make it stand out from other driving simulation games. Here are some of them:

            -

            Modification Options and Garage

            -

            You can customize your car with various upgrades and options in this game. You can improve your car's performance with performance and nos upgrades. You can also change the appearance of your car with rim, color, window tinting, spoiler, roof scoop, exhaust, plate, bass system, and LED light options. You can adjust the suspension height and camber as well. You can control your car's park lights, fog lights, high and low beam headlights. You can store your modified cars in your garage.

            -

            Career Modes and Free Modes

            -

            You can complete different missions and challenges in this game in five different modes. You can complete parking, drifting, racing against time missions. You can improve your driving skills with 560 levels. You can step on the gas, drift, and jump ramp on newly added maps. You can also drive freely in the free roam mode.

            -

            Multiplayer Mode

            -

            You can drive online with your friends and compete in races and drifts in this game. You can have a pleasant time with your friends with the online car game. The multiplayer car driving game offers you various challenges and free ride with other players.

            -

            Race Tracks

            -

            You can test your skills on new race tracks in this game. You can compete with your friends on the racetrack. You can set track records with 27 different cars.

            -

            Parking in the City Mode

            -

            You can experience realistic parking scenarios in a detailed city in this game. You can park your modified car in new parking lots in the city that has high detailed buildings and bridges. You can find your destination easily with the new navigation features. You can also switch to interior driving camera.

            -

            Drift Mode

            -

            You can earn drift points by sliding your car sideways in this game. You can increase your drift score by hitting bonus drift point and drift multiplier. You can collect stars and complete levels by reaching three tier goal points. You can also use the handbrake to drift your car.

            -

            Time Race

            -

            You can reach the destination on time and avoid accidents in this game. You can drive fast and carefully on the city roads. You can use the nitro boost to speed up your car. You can also use the horn to warn other vehicles.

            -

            car parking 3d online drift game apk
            -car parking 3d simulation game apk
            -car parking 3d mod apk unlimited money
            -car parking 3d pro apk download
            -car parking 3d hd game apk
            -car parking 3d multiplayer game apk
            -car parking 3d offline game apk
            -car parking 3d hack apk free download
            -car parking 3d latest version apk
            -car parking 3d realistic game apk
            -car parking 3d android game apk
            -car parking 3d classic game apk
            -car parking 3d premium apk unlocked
            -car parking 3d racing game apk
            -car parking 3d city game apk
            -car parking 3d mod apk revdl
            -car parking 3d new game apk
            -car parking 3d fun game apk
            -car parking 3d full version apk
            -car parking 3d best game apk
            -car parking 3d extreme game apk
            -car parking 3d mod apk android 1
            -car parking 3d old game apk
            -car parking 3d easy game apk
            -car parking 3d cracked apk download
            -car parking 3d adventure game apk
            -car parking 3d sport game apk
            -car parking 3d mod apk rexdl
            -car parking 3d update game apk
            -car parking 3d hard game apk
            -car parking 3d deluxe apk free download
            -car parking 3d challenge game apk
            -car parking 3d luxury game apk
            -car parking 3d mod apk happymod
            -car parking 3d original game apk
            -car parking 3d cool game apk
            -car parking 3d mega mod apk
            -car parking 3d super game apk
            -car parking 3d modern game apk
            -car parking 3d modded apk download
            -car parking 3d ultimate game apk
            -car parking 3d awesome game apk
            -car parking 3d unlimited coins apk
            -car parking 3d master game apk
            -car parking 3d futuristic game apk
            -car parking 3d hacked version apk download

            -

            Parking Mode

            -

            You can improve your driving skills with 400 levels of parking difficulty in this game. You can park your car in different parking spots without hitting any obstacles. You can use the steering wheel, gas pedal, brake pedal, and gear shift to control your car. You can also use the rearview mirror and parking sensor to park your car easily.

            -

            How to Download and Install Car Parking 3D Game APK

            -

            If you want to download and install Car Parking 3D Game APK on your Android device, you need to follow these steps:

            -

            Download the APK file from a trusted source

            -

            You can download the APK file of Car Parking 3D Game from a trusted source such as [APKPure] or [Uptodown]. Make sure you download the latest version of the game that is compatible with your device.

            -

            Enable unknown sources on your device settings

            -

            Before you install the APK file, you need to enable unknown sources on your device settings. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on.

            -

            Install the APK file and launch the game

            -

            After you download the APK file, locate it on your device storage and tap on it to install it. Follow the instructions on the screen and wait for the installation to finish. Once it is done, you can launch the game from your app drawer or home screen.

            -

            Pros and Cons of Car Parking 3D Game APK

            -

            Car Parking 3D Game APK is a game that has its pros and cons. Here are some of them:

            -

            Pros

            -
              -
            • High-quality graphics: The game has realistic and detailed graphics that make you feel like you are driving in a real city.
            • -
            • Realistic physics: The game has realistic physics that simulate the behavior of different cars and environments.
            • -
            • Various game modes: The game offers you various game modes that suit your preferences and skills.
            • -
            • Online multiplayer: The game allows you to play online with your friends and other players from around the world.
            • -
            • Car customization: The game lets you customize your car with various options and upgrades.
            • -
            -

            Cons

            -
              -
            • Large file size: The game has a large file size that may take up a lot of space on your device storage.
            • -
            • Requires internet connection: The game requires an internet connection to play online multiplayer and access some features.
            • -
            • May contain ads: The game may contain ads that may interrupt your gameplay or consume your data.
            • -
            -

            Conclusion

            -

            Car Parking 3D Game APK is a realistic and fun driving simulation game that you should try if you love cars and driving. The game offers you various features such as modification options, career modes, free modes, multiplayer mode, race tracks, parking in the city mode, drift mode, time race, and parking mode. The game also has high-quality graphics, realistic physics, and online multiplayer. However, the game also has some drawbacks such as large file size, internet connection requirement, and ads. Overall, we recommend you to download Car Parking 3D Game APK and enjoy driving in a huge city with your friends online.

            -

            Frequently Asked Questions

            -

            Here are some frequently asked questions about Car Parking 3D Game APK:

            -

            Q: Is Car Parking 3D Game APK safe to download?

            -

            A: Yes, Car Parking 3D Game APK is safe to download if you download it from a trusted source such as [APKPure] or [Uptodown]. However, you should always scan the APK file with an antivirus app before installing it.

            -

            Q: Is Car Parking 3D Game APK free to play?

            -

            A: Yes, Car Parking 3D Game APK is free to play. However, some features may require in-app purchases or watching ads.

            -

            Q: How can I play Car Parking 3D Game APK on PC?

            -

            A: You can play Car Parking 3D Game APK on PC by using an Android emulator such as [BlueStacks] or [NoxPlayer]. You need to download and install the emulator on your PC, then download and install the APK file on the emulator. Then you can launch the game and play it with your keyboard and mouse.

            -

            Q: How can I contact the developer of Car Parking 3D Game APK?

            -

            A: You can contact the developer of Car Parking 3D Game APK by sending an email to [carparking3dgame@gmail.com] or visiting their [Facebook page]. You can also leave a review or a comment on the game's page on [Google Play Store].

            -

            Q: What are some similar games to Car Parking 3D Game APK?

            -

            A: Some similar games to Car Parking 3D Game APK are [Real Car Parking 2], [Dr. Parking 4], [Car Simulator 2], [Parking Master], and [Parking Jam 3D]. You can download these games from the Google Play Store or other sources.

            401be4b1e0
            -
            -
            \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Download Digifish Aqua Real 2 V1.04 Full Seriall HOT.md b/spaces/tioseFevbu/cartoon-converter/scripts/Download Digifish Aqua Real 2 V1.04 Full Seriall HOT.md deleted file mode 100644 index 2839d0a46afa8a7d62530d4da0a0e4089547017d..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Download Digifish Aqua Real 2 V1.04 Full Seriall HOT.md +++ /dev/null @@ -1,91 +0,0 @@ - -

            Download Digifish Aqua Real 2 V1.04 Full Seriall

            -

            Do you love the ocean and its amazing creatures? Do you want to experience the beauty and the wonder of the underwater world without leaving your home? If you answered yes, then you need to download Digifish Aqua Real 2 V1.04 Full Seriall, the most realistic and immersive ocean simulation program ever created.

            -

            Download Digifish Aqua Real 2 V1.04 Full Seriall


            DOWNLOADhttps://urlcod.com/2uHxcO



            -

            What is Digifish Aqua Real 2?

            -

            A realistic and immersive ocean simulation program

            -

            Digifish Aqua Real 2 is a program that transports you deep below the ocean surface, where you can swim with some of the planet's most gorgeous and colorful creatures. Through the magic of the DigiFish 3D Engine, you can see every detail of their scales, fins, eyes, and movements. You can also interact with them by feeding them, playing with them, or even scaring them away.

            -

            But Digifish Aqua Real 2 is not just a screensaver or a game. It is also an educational tool that teaches you about the ocean and its inhabitants. You can learn about their names, habitats, behaviors, and characteristics by clicking on them or reading the information panel. You can also customize your own aquarium by choosing from different backgrounds, plants, rocks, corals, and decorations.

            -

            Features and benefits of Digifish Aqua Real 2

            -

            Some of the features and benefits of Digifish Aqua Real 2 are:

            -

            -
              -
            • It has over 40 species of fish and marine animals, including clownfish, angelfish, sharks, turtles, dolphins, whales, and more.
            • -
            • It has realistic water effects, such as bubbles, waves, reflections, ripples, and caustics.
            • -
            • It has dynamic lighting and shadows that change according to the time of day and the position of the sun.
            • -
            • It has soothing sound effects and music that create a relaxing atmosphere.
            • -
            • It has multiple modes of operation, such as auto mode, manual mode, music mode, and sleep mode.
            • -
            • It has a user-friendly interface that allows you to easily control the settings, preferences, and functions of the program.
            • -
            • It has a low system requirement that makes it compatible with most computers and devices.
            • -
            -

            How to download Digifish Aqua Real 2 V1.04 Full Seriall?

            -

            Requirements and compatibility

            -

            To download Digifish Aqua Real 2 V1.04 Full Seriall, you need to have a computer or a device that meets the following requirements:

            - - - - - - -Steps to download and install Digifish Aqua Real 2 V1.04 Full Seriall -

            To download and install Digifish Aqua Real 2 V1.04 Full Seriall, you need to follow these steps:

            -
              -
            1. Click on this link to go to the official website of Digifish Aqua Real 2: [Digifish Aqua Real 2].
            2. -
            3. Click on the "Download" button and choose a location to save the file.
            4. -
            5. Once the download is complete, open the file and follow the instructions to install the program.
            6. -
            7. Enter the serial number that you received when you purchased the program. If you don't have one, you can buy one from the website or use a trial version for a limited time.
            8. -
            9. Enjoy your ocean adventure with Digifish Aqua Real 2 V1.04 Full Seriall!
            10. -
            -

            Tips and tricks to optimize your experience with Digifish Aqua Real 2

            -

            Here are some tips and tricks to optimize your experience with Digifish Aqua Real 2:

            -
              -
            • You can use the keyboard shortcuts to control the program, such as F1 for help, F2 for settings, F3 for information, F4 for music, F5 for sleep mode, F6 for auto mode, F7 for manual mode, F8 for feeding mode, F9 for scaring mode, and F10 for exit.
            • -
            • You can use the mouse to interact with the fish and marine animals, such as clicking on them to see their names and information, dragging them to move them around, or double-clicking on them to feed them.
            • -
            • You can use the right-click menu to access more options, such as changing the background, adding or removing plants, rocks, corals, and decorations, adjusting the water quality and lighting effects, and selecting different fish and marine animals.
            • -
            • You can use the music mode to play your own music files or playlists while watching the ocean scene. You can also adjust the volume and mute the sound effects.
            • -
            • You can use the sleep mode to turn your computer or device into a relaxing night light that displays a soothing ocean scene with dimmed lights and soft music.
            • -
            -

            Why choose Digifish Aqua Real 2 V1.04 Full Seriall?

            -

            The advantages of using the latest version of Digifish Aqua Real 2

            -

            Digifish Aqua Real 2 V1.04 Full Seriall is the latest version of Digifish Aqua Real 2 that has been updated and improved with new features and enhancements. Some of the advantages of using this version are:

            -
              -
            • It has more fish and marine animals than ever before, including some rare and exotic species that you won't find anywhere else.
            • -
            • It has more backgrounds and environments to choose from, such as coral reefs, shipwrecks, underwater caves, and sunken cities.
            • -
            • It has more customization options to create your own unique aquarium that suits your taste and style.
            • -
            • It has better graphics and performance that make the ocean scene more realistic and smooth.
            • -
            • It has fewer bugs and errors that might affect your enjoyment of the program.
            • -
            -

            The testimonials and reviews from satisfied users of Digifish Aqua Real 2

            -

            Digifish Aqua Real 2 has received many positive testimonials and reviews from satisfied users who have downloaded and installed it on their computers or devices. Here are some of them:

            -
            "Digifish Aqua Real 2 is amazing! I love how realistic and beautiful it is. It makes me feel like I'm in the ocean with all those fish. It's also very relaxing and soothing. I use it every day as a screensaver and a stress reliever."
            -
            "I'm a big fan of Digifish Aqua Real 2. It's not just a program, it's an experience. It's like having a virtual aquarium that I can customize and interact with. It's also very educational and fun. I learned a lot about the ocean and its creatures from it."
            -
            "Digifish Aqua Real 2 is awesome! I downloaded it for my kids who love animals and nature. They were amazed by how lifelike and colorful it is. They also enjoyed feeding and playing with the fish. It's a great way to entertain and educate them."
            -

            The best deals and offers for Digifish Aqua Real 2 V1.04 Full Seriall

            -

            If you want to download Digifish Aqua Real 2 V1.04 Full Seriall, you don't have to pay a fortune for it. You

            If you want to download Digifish Aqua Real 2 V1.04 Full Seriall, you don't have to pay a fortune for it. You can find the best deals and offers for it on the internet. Here are some of them:

            -
              -
            • You can get a 50% discount on Digifish Aqua Real 2 V1.04 Full Seriall if you buy it from this link: [Digifish Aqua Real 2 V1.04 Full Seriall 50% Off]. This is a limited time offer, so hurry up and grab it before it expires.
            • -
            • You can get a free trial of Digifish Aqua Real 2 V1.04 Full Seriall if you download it from this link: [Digifish Aqua Real 2 V1.04 Full Seriall Free Trial]. You can use the program for 30 days without any restrictions or obligations. If you like it, you can buy the full version later.
            • -
            • You can get a bonus pack of Digifish Aqua Real 2 V1.04 Full Seriall if you buy it from this link: [Digifish Aqua Real 2 V1.04 Full Seriall Bonus Pack]. The bonus pack includes 10 extra fish and marine animals, 5 additional backgrounds, and 3 exclusive music tracks. This is a special offer that you won't find anywhere else.
            • -
            -

            Conclusion

            -

            Digifish Aqua Real 2 V1.04 Full Seriall is the ultimate ocean simulation program that you need to download and install on your computer or device. It is realistic, immersive, educational, and fun. It is also easy to use, customize, and control. It has many features and benefits that make it worth your time and money. It has also received many positive testimonials and reviews from satisfied users who have enjoyed their ocean adventure with Digifish Aqua Real 2.

            -

            So what are you waiting for? Download Digifish Aqua Real 2 V1.04 Full Seriall today and experience the beauty and the wonder of the underwater world without leaving your home.

            -

            FAQs

            -

            Q: What is the difference between Digifish Aqua Real 2 and Digifish Aqua Real?

            -

            A: Digifish Aqua Real 2 is the sequel to Digifish Aqua Real, which was released in 2004. Digifish Aqua Real 2 has improved graphics, performance, features, and content compared to Digifish Aqua Real.

            -

            Q: How can I update my version of Digifish Aqua Real 2?

            -

            A: You can update your version of Digifish Aqua Real 2 by downloading and installing the latest patch from the official website of Digifish Aqua Real 2: [Digifish Aqua Real 2 Update]. The patch will fix any bugs and errors that might occur in your version of Digifish Aqua Real 2.

            -

            Q: How can I contact the support team of Digifish Aqua Real 2?

            -

            A: You can contact the support team of Digifish Aqua Real 2 by sending an email to support@digifishaquareal.com or by filling out the contact form on the official website of Digifish Aqua Real 2: [Digifish Aqua Real 2 Contact]. The support team will respond to your queries and issues as soon as possible.

            -

            Q: How can I share my aquarium with others?

            -

            A: You can share your aquarium with others by using the screenshot function of Digifish Aqua Real 2. You can take a screenshot of your aquarium by pressing the Print Screen key on your keyboard or by clicking on the camera icon on the interface of Digifish Aqua Real 2. You can then save the screenshot as an image file and share it with others via email, social media, or other platforms.

            -

            Q: How can I uninstall Digifish Aqua Real 2 from my computer or device?

            -

            A: You can uninstall Digifish Aqua Real 2 from your computer or device by following these steps:

            -
              -
            1. Go to the Start menu and click on Control Panel.
            2. -
            3. Click on Programs and Features or Add or Remove Programs.
            4. -
            5. Select Digifish Aqua Real 2 from the list of programs and click on Uninstall or Remove.
            6. -
            7. Follow the instructions to complete the uninstallation process.
            8. -

            b2dd77e56b
            -
            -
            \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Google Chrome 128 Bit Encryption Download.md b/spaces/tioseFevbu/cartoon-converter/scripts/Google Chrome 128 Bit Encryption Download.md deleted file mode 100644 index cb795266e19955b4fbf7a1354b87e28c28c3c92a..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Google Chrome 128 Bit Encryption Download.md +++ /dev/null @@ -1,185 +0,0 @@ - -

            Google Chrome 128 Bit Encryption Download: How to Secure Your Web Browsing

            -

            Do you want to browse the web safely and securely? Do you want to protect your personal information, online transactions, and passwords from hackers and prying eyes? Do you want to enjoy a fast, reliable, and customizable web browser that offers a lot of features and options? If you answered yes to any of these questions, then you might want to consider downloading and installing Google Chrome with 128 bit encryption.

            -

            google chrome 128 bit encryption download


            DOWNLOAD --->>> https://urlcod.com/2uHwiU



            -

            Google Chrome is one of the most popular and widely used web browsers in the world. It is known for its speed, security, simplicity, and versatility. It also supports 128 bit encryption, which is a high level of encryption that makes your web browsing more secure and private. In this article, we will explain what encryption is and why it is important for web browsers, what Google Chrome is and what are its features, how to download and install Google Chrome with 128 bit encryption, and how to compare Google Chrome with other web browsers. By the end of this article, you will have a better understanding of how to secure your web browsing with Google Chrome.

            -

            What is encryption and why is it important for web browsers?

            -

            Encryption is a way of scrambling data so that only authorized parties can understand the information. It is a mathematical process that transforms human-readable plaintext into incomprehensible ciphertext. Encryption requires the use of a cryptographic key, which is a string of characters that both the sender and the receiver of an encrypted message agree on. Encryption can be used to protect data at rest (when it is stored) or in transit (when it is transmitted).

            -

            Encryption is important for web browsers because it ensures that no one can read or modify your online communications or data without your permission. This prevents attackers, advertisers, internet service providers, or governments from intercepting and spying on your web browsing activities. Encryption also protects your online identity and credentials from identity theft and fraud. Encryption also enables you to access websites that are blocked or censored by your network or location.

            -

            Encryption basics: how it works and what are the types

            -

            There are two main types of encryption: symmetric encryption and asymmetric encryption. Symmetric encryption uses the same key for both encryption and decryption. Asymmetric encryption uses two keys: one for encryption (public key) and one for decryption (private key). The public key can be shared with anyone, while the private key must be kept secret.

            -

            Symmetric encryption is faster and simpler than asymmetric encryption, but it requires a secure way of exchanging the key between the parties. Asymmetric encryption does not require a secure way of exchanging the key, but it is slower and more complex than symmetric encryption. A common way of combining the advantages of both types is to use asymmetric encryption to exchange a symmetric key, then use symmetric encryption to encrypt the data.

            -

            -

            The strength of encryption depends on the length and complexity of the key. The longer and more complex the key, the harder it is for someone to guess or break it. The unit of measurement for key length is bits. A bit is a binary digit that can have only two values: 0 or 1. A key of 128 bits means that it has 2^128 possible combinations, which is a very large number. A key of 256 bits means that it has 2^256 possible combinations, which is an even larger number. Generally, the higher the bit length, the more secure the encryption.

            -

            Encryption benefits: how it protects your privacy, data, and identity online

            -

            Encryption has many benefits for web browsers and users. Here are some of them:

            -
              -
            • Encryption protects your privacy by preventing third parties from snooping on your web browsing activities. For example, if you visit a website that uses encryption, such as https://www.google.com, the URL, the content, and the cookies of the website are encrypted and cannot be seen by anyone else on the network. This means that no one can track what you search for, what you read, or what you buy online.
            • -
            • Encryption protects your data by preventing third parties from tampering with your web browsing activities. For example, if you visit a website that uses encryption, such as https://www.amazon.com, the data that you send and receive from the website are encrypted and cannot be modified by anyone else on the network. This means that no one can alter the prices, the products, or the reviews of the website.
            • -
            • Encryption protects your identity by preventing third parties from stealing your web browsing credentials. For example, if you visit a website that uses encryption, such as https://www.facebook.com, the username and password that you enter to log in to the website are encrypted and cannot be captured by anyone else on the network. This means that no one can access your account, your messages, or your photos online.
            • -
            -

            Encryption also enables you to access websites that are blocked or censored by your network or location. For example, if you visit a website that uses encryption, such as https://www.wikipedia.org, the network or the government cannot block or filter the website based on its content or domain name. This means that you can access information and knowledge that might otherwise be unavailable to you.

            -

            What is Google Chrome and what are its features?

            -

            Google Chrome is a web browser developed by Google. It was first released in 2008 and has since become one of the most popular and widely used web browsers in the world. According to StatCounter, Google Chrome had a global market share of 65.89% as of May 2021, making it the dominant web browser across all platforms.

            -

            Google Chrome is known for its speed, security, simplicity, and versatility. It offers a lot of features and options that make web browsing easier, faster, safer, and more enjoyable. Here are some of them:

            -

            Google Chrome overview: history, popularity, and platform support

            -

            Google Chrome was launched in 2008 as a beta version for Windows. It was based on the open-source Chromium project and the WebKit rendering engine. It aimed to provide a faster, more secure, and more stable web browser than its competitors at the time. It also introduced a minimalist user interface design that focused on the content rather than the browser itself.

            -

            Google Chrome quickly gained popularity among users and developers due to its performance, innovation, and compatibility. It also expanded its platform support to include Mac OS X, Linux, Android, iOS, and Chrome OS. It also switched its rendering engine from WebKit to Blink in 2013, which is a fork of WebKit developed by Google and other Chromium contributors.

            -

            Google Chrome also became the basis for other web browsers that use Chromium as their core. Some examples are Microsoft Edge (since 2019), Opera (since 2013), Brave (since 2016), and Vivaldi (since 2016).

            -

            Google Chrome features: speed, security, customization, and more

            -

            Google Chrome has many features that make it stand out from other web browsers. Here are some of them:

            -
              -
            • Speed: Google Chrome is designed to be fast in every possible way. It loads web pages quickly, runs web applications smoothly, and launches itself rapidly. It also has a built-in task manager that shows how much memory and CPU each tab and extension is using, allowing you to close or end any process that is slowing down your browser.
            • -
            • Security: Google Chrome is designed to be secure in every possible way. It supports encryption protocols such as HTTPS and SSL/TLS that protect your web browsing from eavesdropping and tampering. It also has a built-in malware and phishing protection that warns you of dangerous websites and downloads. It also has an incognito mode that allows you to browse the web without leaving any traces on your device.
            • Customization: Google Chrome is designed to be customizable in every possible way. It allows you to change the appearance, settings, and functionality of your browser according to your preferences and needs. You can choose from thousands of themes, extensions, and apps that enhance your web browsing experience. You can also sync your bookmarks, history, passwords, and settings across your devices using your Google account.
            • -
            • More: Google Chrome has many other features that make it more than just a web browser. It has a built-in PDF viewer, a built-in media player, a built-in translator, a built-in voice search, and a built-in password manager. It also supports web standards such as HTML5, CSS3, JavaScript, and WebAssembly. It also integrates with other Google services such as Gmail, Google Drive, Google Photos, Google Maps, and Google Assistant.
            • -
            -

            How to download and install Google Chrome with 128 bit encryption?

            -

            Downloading and installing Google Chrome with 128 bit encryption is easy and straightforward. You just need to follow these steps:

            -

            Downloading Google Chrome: where to find the official installer and how to verify it

            -

            The official installer for Google Chrome can be found on the Google Chrome website: https://www.google.com/chrome/. You can also use this link to download the installer directly: https://www.google.com/chrome/thank-you.html?statcb=1&installdataindex=empty&defaultbrowser=0.

            -

            To verify that the installer is authentic and safe, you can check its digital signature. A digital signature is a way of confirming that the file has not been altered or corrupted by anyone. To check the digital signature of the installer, you can follow these steps:

            -
              -
            1. Right-click on the installer file and select Properties.
            2. -
            3. Click on the Digital Signatures tab.
            4. -
            5. Select the signature from the list and click on Details.
            6. -
            7. Click on View Certificate.
            8. -
            9. Check that the certificate is issued by Google LLC and that it is valid.
            10. -
            11. Click on OK to close the windows.
            12. -
            -

            Installing Google Chrome: how to run the setup and choose the options

            -

            To install Google Chrome on your device, you just need to run the setup file and follow the instructions. You can also choose some options during the installation process. Here are some of them:

            -
              -
            • You can choose whether to make Google Chrome your default browser or not. This means that whenever you open a web link from another program, it will open in Google Chrome instead of another browser.
            • -
            • You can choose whether to send usage statistics and crash reports to Google or not. This helps Google improve its products and services by collecting anonymous data about how you use Google Chrome and what problems you encounter.
            • -
            • You can choose whether to sign in to Google Chrome with your Google account or not. This allows you to sync your bookmarks, history, passwords, and settings across your devices using your Google account.
            • -
            -

            Enabling 128 bit encryption: how to check and change the encryption settings in Chrome

            -

            To enable 128 bit encryption in Google Chrome, you need to check and change the encryption settings in Chrome. Here is how:

            -
              -
            1. Open Google Chrome and click on the menu icon (three dots) at the top right corner of the browser window.
            2. -
            3. Select Settings from the menu.
            4. -
            5. Scroll down and click on Advanced.
            6. -
            7. Under Privacy and security, click on Security.
            8. -
            9. Under Advanced, click on Manage security keys.
            10. -
            11. Under Encryption preferences, select Require 128-bit or stronger encryption for HTTPS connections.
            12. -
            13. Click on Save changes.
            14. -
            -

            You can also check the encryption level of any website you visit by clicking on the lock icon next to the URL bar. This will show you whether the website is secure or not, what type of encryption it uses, and what certificate it has.

            -

            How to compare Google Chrome with other web browsers?

            -

            To compare Google Chrome with other web browsers, you need to consider some factors that affect your web browsing experience. These factors include speed, security, privacy, compatibility, features, customization, and support. You can also use a comparison table that shows how Google Chrome stacks up against other popular browsers based on these factors.

            -

            Comparison criteria: what factors to consider when choosing a web browser

            -

            Here are some factors that you should consider when choosing a web browser:

            -
              -
            • Speed: This refers to how fast a web browser can load web pages, run web applications, and launch itself. Speed depends on various factors such as network conditions, device specifications, browser settings, and browser optimization. A faster web browser can save you time and frustration, especially if you have a slow or unreliable internet connection.
            • -
            • Security: This refers to how well a web browser can protect your web browsing from malicious attacks, such as malware, phishing, ransomware, and hacking. Security depends on various factors such as encryption protocols, malware and phishing protection, certificate validation, and browser updates. A more secure web browser can prevent you from losing your data, money, or identity online.
            • -
            • Privacy: This refers to how well a web browser can protect your web browsing from unwanted tracking, such as cookies, trackers, ads, and analytics. Privacy depends on various factors such as encryption preferences, incognito mode, do not track option, and third-party extensions. A more private web browser can prevent you from being tracked, profiled, or targeted online.
            • -
            • Compatibility: This refers to how well a web browser can support the latest web standards and technologies, such as HTML5, CSS3, JavaScript, and WebAssembly. Compatibility depends on various factors such as rendering engine, browser version, and browser extensions. A more compatible web browser can ensure that you can access and enjoy all the features and functions of any website you visit.
            • -
            • Features: This refers to what additional functions and options a web browser can offer beyond the basic web browsing functionality, such as PDF viewer, media player, translator, voice search, and password manager. Features depend on various factors such as browser design, browser integration, and browser extensions. A more feature-rich web browser can enhance your web browsing experience and convenience.
            • -
            • Customization: This refers to how much you can change the appearance, settings, and functionality of your web browser according to your preferences and needs, such as themes, extensions, and apps. Customization depends on various factors such as browser flexibility, browser availability, and browser community. A more customizable web browser can make your web browsing more personalized and enjoyable.
            • -
            • Support: This refers to how much help and guidance you can get from the web browser developer or provider in case you encounter any problems or issues with your web browser, such as FAQs, forums, tutorials, and customer service. Support depends on various factors such as browser popularity, browser reputation, and browser feedback. A more supportive web browser can make your web browsing more reliable and satisfactory.
            • -
            -

            Comparison table: how Google Chrome stacks up against other popular browsers

            -

            To compare Google Chrome with other popular browsers based on the factors mentioned above, we can use a comparison table that shows the ratings of each browser for each factor. The ratings are based on a scale of 1 to 5 stars (1 being the lowest and 5 being the highest). The ratings are also based on our own research and evaluation of each browser's performance and features. The comparison table is shown below:

            -
            Operating systemWindows XP/Vista/7/8/10
            CPUPentium III or higher
            RAM256 MB or more
            Hard disk space100 MB or more
            Graphics cardDirectX 9.0 compatible or higher
            Sound cardDirectX compatible or higher
            - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -★★★☆☆★★★☆☆ - - - - - - - -★★★☆☆ - - - - - - - - -★★★☆☆ - - - - - - - -★★★☆☆★★★☆☆ - - - -★★★☆☆★★★☆☆★★★☆☆★★★☆☆★★★☆☆★★★☆☆ - -
            BrowserSpeedSecurityPrivacyCompatibilityFeaturesCustomizationSupport
            Google Chrome★★★★★★★★★☆★★★☆☆★★★★★★★★★☆★★★★☆★★★☆☆
            Mozilla Firefox★★★★☆★★★★☆★★★★★★★★★☆★★★☆☆★★★★★★★★☆☆
            Safari★★★☆☆★★★☆☆★★★☆☆★★★☆☆★★★☆☆
            Microsoft Edge★★★★☆★★★★☆★★★☆☆★★★★★★★★☆☆★★★☆☆
            Opera★★★☆☆★★★☆☆★★★☆☆★★★☆☆★★★☆☆★★★☆☆
            Brave★★★★☆★★★★★★★★★★★★★☆☆★★★☆☆
            Vivaldi★★★☆☆
            -

            The comparison table shows that Google Chrome is the best web browser in terms of speed and compatibility, and one of the best in terms of security and customization. However, it also shows that Google Chrome is not the best web browser in terms of privacy and support, and that it has some room for improvement in terms of features. Therefore, depending on your preferences and needs, you might want to consider other web browsers as well.

            -

            Conclusion and FAQs

            -

            In conclusion, Google Chrome is a fast, secure, simple, and versatile web browser that supports 128 bit encryption. It offers a lot of features and options that make web browsing easier, faster, safer, and more enjoyable. It also protects your web browsing from eavesdropping, tampering, tracking, and blocking. To download and install Google Chrome with 128 bit encryption, you just need to follow the steps outlined in this article. To compare Google Chrome with other web browsers, you just need to consider the factors and the table presented in this article.

            -

            We hope that this article has helped you understand how to secure your web browsing with Google Chrome. If you have any questions or comments, please feel free to leave them below. Here are some FAQs that might answer some of your queries:

            -

            FAQs: answers to some common questions about Google Chrome and encryption

            -
              -
            1. What is the difference between 128 bit encryption and 256 bit encryption?
            2. -

              The difference between 128 bit encryption and 256 bit encryption is the length and complexity of the key. A key of 128 bits has 2^128 possible combinations, while a key of 256 bits has 2^256 possible combinations. Generally, the longer and more complex the key, the more secure the encryption. However, both 128 bit encryption and 256 bit encryption are considered very strong and secure by modern standards.

              -
            3. How can I tell if a website is using encryption or not?
            4. -

              You can tell if a website is using encryption or not by looking at the URL bar of your web browser. If the website is using encryption, you will see a lock icon next to the URL and the URL will start with https:// instead of http://. This means that the website is secure and that your web browsing is encrypted. If the website is not using encryption, you will not see a lock icon next to the URL and the URL will start with http:// instead of https://. This means that the website is not secure and that your web browsing is not encrypted.

              -
            5. How can I update Google Chrome to the latest version?
            6. -

              You can update Google Chrome to the latest version by following these steps:

              -
                -
              1. Open Google Chrome and click on the menu icon (three dots) at the top right corner of the browser window.
              2. -
              3. Select Help from the menu.
              4. -
              5. Select About Google Chrome from the submenu.
              6. The browser will automatically check for updates and download them if available.If an update is available, click on Relaunch to apply it.If no update is available, you will see a message saying "Google Chrome is up to date". -
              7. How can I clear my browsing data in Google Chrome?
              8. -

                You can clear your browsing data in Google Chrome by following these steps:

                -
                  -
                1. Open Google Chrome and click on the menu icon (three dots) at the top right corner of the browser window.
                2. -
                3. Select More tools from the menu.
                4. -
                5. Select Clear browsing data from the submenu.
                6. -
                7. A new window will open with various options to choose from.
                8. -
                9. You can select the time range, the data types, and the devices that you want to clear.
                10. -
                11. Click on Clear data to confirm your choices.
                12. -
                -
              9. How can I change my default search engine in Google Chrome?
              10. -

                You can change your default search engine in Google Chrome by following these steps:

                -
                  -
                1. Open Google Chrome and click on the menu icon (three dots) at the top right corner of the browser window.
                2. -
                3. Select Settings from the menu.
                4. -
                5. Scroll down and click on Search engine.
                6. You will see a list of available search engines that you can choose from.Select the one that you want to use as your default search engine.

                  b2dd77e56b
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/req/req_set.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/req/req_set.py deleted file mode 100644 index ec7a6e07a25acfa978030c65ae7c1d8609163249..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/req/req_set.py +++ /dev/null @@ -1,82 +0,0 @@ -import logging -from collections import OrderedDict -from typing import Dict, List - -from pip._vendor.packaging.utils import canonicalize_name - -from pip._internal.req.req_install import InstallRequirement - -logger = logging.getLogger(__name__) - - -class RequirementSet: - def __init__(self, check_supported_wheels: bool = True) -> None: - """Create a RequirementSet.""" - - self.requirements: Dict[str, InstallRequirement] = OrderedDict() - self.check_supported_wheels = check_supported_wheels - - self.unnamed_requirements: List[InstallRequirement] = [] - - def __str__(self) -> str: - requirements = sorted( - (req for req in self.requirements.values() if not req.comes_from), - key=lambda req: canonicalize_name(req.name or ""), - ) - return " ".join(str(req.req) for req in requirements) - - def __repr__(self) -> str: - requirements = sorted( - self.requirements.values(), - key=lambda req: canonicalize_name(req.name or ""), - ) - - format_string = "<{classname} object; {count} requirement(s): {reqs}>" - return format_string.format( - classname=self.__class__.__name__, - count=len(requirements), - reqs=", ".join(str(req.req) for req in requirements), - ) - - def add_unnamed_requirement(self, install_req: InstallRequirement) -> None: - assert not install_req.name - self.unnamed_requirements.append(install_req) - - def add_named_requirement(self, install_req: InstallRequirement) -> None: - assert install_req.name - - project_name = canonicalize_name(install_req.name) - self.requirements[project_name] = install_req - - def has_requirement(self, name: str) -> bool: - project_name = canonicalize_name(name) - - return ( - project_name in self.requirements - and not self.requirements[project_name].constraint - ) - - def get_requirement(self, name: str) -> InstallRequirement: - project_name = canonicalize_name(name) - - if project_name in self.requirements: - return self.requirements[project_name] - - raise KeyError(f"No project with the name {name!r}") - - @property - def all_requirements(self) -> List[InstallRequirement]: - return self.unnamed_requirements + list(self.requirements.values()) - - @property - def requirements_to_install(self) -> List[InstallRequirement]: - """Return the list of requirements that need to be installed. - - TODO remove this property together with the legacy resolver, since the new - resolver only returns requirements that need to be installed. - """ - return [ - install_req - for install_req in self.all_requirements - if not install_req.constraint and not install_req.satisfied_by - ] diff --git a/spaces/tomofi/MMOCR/configs/_base_/recog_pipelines/seg_pipeline.py b/spaces/tomofi/MMOCR/configs/_base_/recog_pipelines/seg_pipeline.py deleted file mode 100644 index 378474dfb5341ec93e73bb61047c43ba72d5e127..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/configs/_base_/recog_pipelines/seg_pipeline.py +++ /dev/null @@ -1,66 +0,0 @@ -img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) - -gt_label_convertor = dict( - type='SegConvertor', dict_type='DICT36', with_unknown=True, lower=True) - -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='RandomPaddingOCR', - max_ratio=[0.15, 0.2, 0.15, 0.2], - box_type='char_quads'), - dict(type='OpencvToPil'), - dict( - type='RandomRotateImageBox', - min_angle=-17, - max_angle=17, - box_type='char_quads'), - dict(type='PilToOpencv'), - dict( - type='ResizeOCR', - height=64, - min_width=64, - max_width=512, - keep_aspect_ratio=True), - dict( - type='OCRSegTargets', - label_convertor=gt_label_convertor, - box_type='char_quads'), - dict(type='RandomRotateTextDet', rotate_ratio=0.5, max_angle=15), - dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4), - dict(type='ToTensorOCR'), - dict(type='FancyPCA'), - dict(type='NormalizeOCR', **img_norm_cfg), - dict( - type='CustomFormatBundle', - keys=['gt_kernels'], - visualize=dict(flag=False, boundary_key=None), - call_super=False), - dict( - type='Collect', - keys=['img', 'gt_kernels'], - meta_keys=['filename', 'ori_shape', 'resize_shape']) -] - -test_img_norm_cfg = dict( - mean=[x * 255 for x in img_norm_cfg['mean']], - std=[x * 255 for x in img_norm_cfg['std']]) - -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='ResizeOCR', - height=64, - min_width=64, - max_width=None, - keep_aspect_ratio=True), - dict(type='Normalize', **test_img_norm_cfg), - dict(type='DefaultFormatBundle'), - dict( - type='Collect', - keys=['img'], - meta_keys=[ - 'filename', 'resize_shape', 'img_norm_cfg', 'ori_filename', - 'img_shape', 'ori_shape' - ]) -] diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco.py deleted file mode 100644 index 230181cbeeb9c070dad926892f62d8f482d0ab1e..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './mask_rcnn_r50_caffe_fpn_1x_coco.py' -model = dict( - pretrained='open-mmlab://detectron2/resnet101_caffe', - backbone=dict(depth=101)) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/necks/__init__.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/necks/__init__.py deleted file mode 100644 index 5bf95580201e7c9197c5aed702ae982357dcbfae..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/necks/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -from .bfp import BFP -from .channel_mapper import ChannelMapper -from .dilated_encoder import DilatedEncoder -from .fpg import FPG -from .fpn import FPN -from .fpn_carafe import FPN_CARAFE -from .hrfpn import HRFPN -from .nas_fpn import NASFPN -from .nasfcos_fpn import NASFCOS_FPN -from .pafpn import PAFPN -from .rfp import RFP -from .yolo_neck import YOLOV3Neck - -__all__ = [ - 'FPN', 'BFP', 'ChannelMapper', 'HRFPN', 'NASFPN', 'FPN_CARAFE', 'PAFPN', - 'NASFCOS_FPN', 'RFP', 'YOLOV3Neck', 'FPG', 'DilatedEncoder' -] diff --git a/spaces/tosta86/Flowise/Dockerfile b/spaces/tosta86/Flowise/Dockerfile deleted file mode 100644 index 9c0ad22929159b8c4d192856163699570fd27307..0000000000000000000000000000000000000000 --- a/spaces/tosta86/Flowise/Dockerfile +++ /dev/null @@ -1,26 +0,0 @@ -FROM node:18-alpine -USER root - -# Arguments that can be passed at build time -ARG FLOWISE_PATH=/usr/local/lib/node_modules/flowise -ARG BASE_PATH=/root/.flowise -ARG DATABASE_PATH=$BASE_PATH -ARG APIKEY_PATH=$BASE_PATH -ARG SECRETKEY_PATH=$BASE_PATH -ARG LOG_PATH=$BASE_PATH/logs - -# Install dependencies -RUN apk add --no-cache git python3 py3-pip make g++ build-base cairo-dev pango-dev chromium - -ENV PUPPETEER_SKIP_DOWNLOAD=true -ENV PUPPETEER_EXECUTABLE_PATH=/usr/bin/chromium-browser - -# Install Flowise globally -RUN npm install -g flowise - -# Configure Flowise directories using the ARG -RUN mkdir -p $LOG_PATH $FLOWISE_PATH/uploads && chmod -R 777 $LOG_PATH $FLOWISE_PATH - -WORKDIR /data - -CMD ["npx", "flowise", "start"] \ No newline at end of file diff --git a/spaces/trysem/image-matting-app/ppmatting/models/human_matting.py b/spaces/trysem/image-matting-app/ppmatting/models/human_matting.py deleted file mode 100644 index cf315edfa563fe231a119dd15b749c41157c988c..0000000000000000000000000000000000000000 --- a/spaces/trysem/image-matting-app/ppmatting/models/human_matting.py +++ /dev/null @@ -1,454 +0,0 @@ -# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from collections import defaultdict -import time - -import paddle -import paddle.nn as nn -import paddle.nn.functional as F -import paddleseg -from paddleseg.models import layers -from paddleseg import utils -from paddleseg.cvlibs import manager - -from ppmatting.models.losses import MRSD - - -def conv_up_psp(in_channels, out_channels, up_sample): - return nn.Sequential( - layers.ConvBNReLU( - in_channels, out_channels, 3, padding=1), - nn.Upsample( - scale_factor=up_sample, mode='bilinear', align_corners=False)) - - -@manager.MODELS.add_component -class HumanMatting(nn.Layer): - """A model for """ - - def __init__(self, - backbone, - pretrained=None, - backbone_scale=0.25, - refine_kernel_size=3, - if_refine=True): - super().__init__() - if if_refine: - if backbone_scale > 0.5: - raise ValueError( - 'Backbone_scale should not be greater than 1/2, but it is {}' - .format(backbone_scale)) - else: - backbone_scale = 1 - - self.backbone = backbone - self.backbone_scale = backbone_scale - self.pretrained = pretrained - self.if_refine = if_refine - if if_refine: - self.refiner = Refiner(kernel_size=refine_kernel_size) - self.loss_func_dict = None - - self.backbone_channels = backbone.feat_channels - ###################### - ### Decoder part - Glance - ###################### - self.psp_module = layers.PPModule( - self.backbone_channels[-1], - 512, - bin_sizes=(1, 3, 5), - dim_reduction=False, - align_corners=False) - self.psp4 = conv_up_psp(512, 256, 2) - self.psp3 = conv_up_psp(512, 128, 4) - self.psp2 = conv_up_psp(512, 64, 8) - self.psp1 = conv_up_psp(512, 64, 16) - # stage 5g - self.decoder5_g = nn.Sequential( - layers.ConvBNReLU( - 512 + self.backbone_channels[-1], 512, 3, padding=1), - layers.ConvBNReLU( - 512, 512, 3, padding=2, dilation=2), - layers.ConvBNReLU( - 512, 256, 3, padding=2, dilation=2), - nn.Upsample( - scale_factor=2, mode='bilinear', align_corners=False)) - # stage 4g - self.decoder4_g = nn.Sequential( - layers.ConvBNReLU( - 512, 256, 3, padding=1), - layers.ConvBNReLU( - 256, 256, 3, padding=1), - layers.ConvBNReLU( - 256, 128, 3, padding=1), - nn.Upsample( - scale_factor=2, mode='bilinear', align_corners=False)) - # stage 3g - self.decoder3_g = nn.Sequential( - layers.ConvBNReLU( - 256, 128, 3, padding=1), - layers.ConvBNReLU( - 128, 128, 3, padding=1), - layers.ConvBNReLU( - 128, 64, 3, padding=1), - nn.Upsample( - scale_factor=2, mode='bilinear', align_corners=False)) - # stage 2g - self.decoder2_g = nn.Sequential( - layers.ConvBNReLU( - 128, 128, 3, padding=1), - layers.ConvBNReLU( - 128, 128, 3, padding=1), - layers.ConvBNReLU( - 128, 64, 3, padding=1), - nn.Upsample( - scale_factor=2, mode='bilinear', align_corners=False)) - # stage 1g - self.decoder1_g = nn.Sequential( - layers.ConvBNReLU( - 128, 64, 3, padding=1), - layers.ConvBNReLU( - 64, 64, 3, padding=1), - layers.ConvBNReLU( - 64, 64, 3, padding=1), - nn.Upsample( - scale_factor=2, mode='bilinear', align_corners=False)) - # stage 0g - self.decoder0_g = nn.Sequential( - layers.ConvBNReLU( - 64, 64, 3, padding=1), - layers.ConvBNReLU( - 64, 64, 3, padding=1), - nn.Conv2D( - 64, 3, 3, padding=1)) - - ########################## - ### Decoder part - FOCUS - ########################## - self.bridge_block = nn.Sequential( - layers.ConvBNReLU( - self.backbone_channels[-1], 512, 3, dilation=2, padding=2), - layers.ConvBNReLU( - 512, 512, 3, dilation=2, padding=2), - layers.ConvBNReLU( - 512, 512, 3, dilation=2, padding=2)) - # stage 5f - self.decoder5_f = nn.Sequential( - layers.ConvBNReLU( - 512 + self.backbone_channels[-1], 512, 3, padding=1), - layers.ConvBNReLU( - 512, 512, 3, padding=2, dilation=2), - layers.ConvBNReLU( - 512, 256, 3, padding=2, dilation=2), - nn.Upsample( - scale_factor=2, mode='bilinear', align_corners=False)) - # stage 4f - self.decoder4_f = nn.Sequential( - layers.ConvBNReLU( - 256 + self.backbone_channels[-2], 256, 3, padding=1), - layers.ConvBNReLU( - 256, 256, 3, padding=1), - layers.ConvBNReLU( - 256, 128, 3, padding=1), - nn.Upsample( - scale_factor=2, mode='bilinear', align_corners=False)) - # stage 3f - self.decoder3_f = nn.Sequential( - layers.ConvBNReLU( - 128 + self.backbone_channels[-3], 128, 3, padding=1), - layers.ConvBNReLU( - 128, 128, 3, padding=1), - layers.ConvBNReLU( - 128, 64, 3, padding=1), - nn.Upsample( - scale_factor=2, mode='bilinear', align_corners=False)) - # stage 2f - self.decoder2_f = nn.Sequential( - layers.ConvBNReLU( - 64 + self.backbone_channels[-4], 128, 3, padding=1), - layers.ConvBNReLU( - 128, 128, 3, padding=1), - layers.ConvBNReLU( - 128, 64, 3, padding=1), - nn.Upsample( - scale_factor=2, mode='bilinear', align_corners=False)) - # stage 1f - self.decoder1_f = nn.Sequential( - layers.ConvBNReLU( - 64 + self.backbone_channels[-5], 64, 3, padding=1), - layers.ConvBNReLU( - 64, 64, 3, padding=1), - layers.ConvBNReLU( - 64, 64, 3, padding=1), - nn.Upsample( - scale_factor=2, mode='bilinear', align_corners=False)) - # stage 0f - self.decoder0_f = nn.Sequential( - layers.ConvBNReLU( - 64, 64, 3, padding=1), - layers.ConvBNReLU( - 64, 64, 3, padding=1), - nn.Conv2D( - 64, 1 + 1 + 32, 3, padding=1)) - self.init_weight() - - def forward(self, data): - src = data['img'] - src_h, src_w = paddle.shape(src)[2:] - if self.if_refine: - # It is not need when exporting. - if isinstance(src_h, paddle.Tensor): - if (src_h % 4 != 0) or (src_w % 4) != 0: - raise ValueError( - 'The input image must have width and height that are divisible by 4' - ) - - # Downsample src for backbone - src_sm = F.interpolate( - src, - scale_factor=self.backbone_scale, - mode='bilinear', - align_corners=False) - - # Base - fea_list = self.backbone(src_sm) - ########################## - ### Decoder part - GLANCE - ########################## - #psp: N, 512, H/32, W/32 - psp = self.psp_module(fea_list[-1]) - #d6_g: N, 512, H/16, W/16 - d5_g = self.decoder5_g(paddle.concat((psp, fea_list[-1]), 1)) - #d5_g: N, 512, H/8, W/8 - d4_g = self.decoder4_g(paddle.concat((self.psp4(psp), d5_g), 1)) - #d4_g: N, 256, H/4, W/4 - d3_g = self.decoder3_g(paddle.concat((self.psp3(psp), d4_g), 1)) - #d4_g: N, 128, H/2, W/2 - d2_g = self.decoder2_g(paddle.concat((self.psp2(psp), d3_g), 1)) - #d2_g: N, 64, H, W - d1_g = self.decoder1_g(paddle.concat((self.psp1(psp), d2_g), 1)) - #d0_g: N, 3, H, W - d0_g = self.decoder0_g(d1_g) - # The 1st channel is foreground. The 2nd is transition region. The 3rd is background. - # glance_sigmoid = F.sigmoid(d0_g) - glance_sigmoid = F.softmax(d0_g, axis=1) - - ########################## - ### Decoder part - FOCUS - ########################## - bb = self.bridge_block(fea_list[-1]) - #bg: N, 512, H/32, W/32 - d5_f = self.decoder5_f(paddle.concat((bb, fea_list[-1]), 1)) - #d5_f: N, 256, H/16, W/16 - d4_f = self.decoder4_f(paddle.concat((d5_f, fea_list[-2]), 1)) - #d4_f: N, 128, H/8, W/8 - d3_f = self.decoder3_f(paddle.concat((d4_f, fea_list[-3]), 1)) - #d3_f: N, 64, H/4, W/4 - d2_f = self.decoder2_f(paddle.concat((d3_f, fea_list[-4]), 1)) - #d2_f: N, 64, H/2, W/2 - d1_f = self.decoder1_f(paddle.concat((d2_f, fea_list[-5]), 1)) - #d1_f: N, 64, H, W - d0_f = self.decoder0_f(d1_f) - #d0_f: N, 1, H, W - focus_sigmoid = F.sigmoid(d0_f[:, 0:1, :, :]) - pha_sm = self.fusion(glance_sigmoid, focus_sigmoid) - err_sm = d0_f[:, 1:2, :, :] - err_sm = paddle.clip(err_sm, 0., 1.) - hid_sm = F.relu(d0_f[:, 2:, :, :]) - - # Refiner - if self.if_refine: - pha = self.refiner( - src=src, pha=pha_sm, err=err_sm, hid=hid_sm, tri=glance_sigmoid) - # Clamp outputs - pha = paddle.clip(pha, 0., 1.) - - if self.training: - logit_dict = { - 'glance': glance_sigmoid, - 'focus': focus_sigmoid, - 'fusion': pha_sm, - 'error': err_sm - } - if self.if_refine: - logit_dict['refine'] = pha - loss_dict = self.loss(logit_dict, data) - return logit_dict, loss_dict - else: - return pha if self.if_refine else pha_sm - - def loss(self, logit_dict, label_dict, loss_func_dict=None): - if loss_func_dict is None: - if self.loss_func_dict is None: - self.loss_func_dict = defaultdict(list) - self.loss_func_dict['glance'].append(nn.NLLLoss()) - self.loss_func_dict['focus'].append(MRSD()) - self.loss_func_dict['cm'].append(MRSD()) - self.loss_func_dict['err'].append(paddleseg.models.MSELoss()) - self.loss_func_dict['refine'].append(paddleseg.models.L1Loss()) - else: - self.loss_func_dict = loss_func_dict - - loss = {} - - # glance loss computation - # get glance label - glance_label = F.interpolate( - label_dict['trimap'], - logit_dict['glance'].shape[2:], - mode='nearest', - align_corners=False) - glance_label_trans = (glance_label == 128).astype('int64') - glance_label_bg = (glance_label == 0).astype('int64') - glance_label = glance_label_trans + glance_label_bg * 2 - loss_glance = self.loss_func_dict['glance'][0]( - paddle.log(logit_dict['glance'] + 1e-6), glance_label.squeeze(1)) - loss['glance'] = loss_glance - - # focus loss computation - focus_label = F.interpolate( - label_dict['alpha'], - logit_dict['focus'].shape[2:], - mode='bilinear', - align_corners=False) - loss_focus = self.loss_func_dict['focus'][0]( - logit_dict['focus'], focus_label, glance_label_trans) - loss['focus'] = loss_focus - - # collaborative matting loss - loss_cm_func = self.loss_func_dict['cm'] - # fusion_sigmoid loss - loss_cm = loss_cm_func[0](logit_dict['fusion'], focus_label) - loss['cm'] = loss_cm - - # error loss - err = F.interpolate( - logit_dict['error'], - label_dict['alpha'].shape[2:], - mode='bilinear', - align_corners=False) - err_label = (F.interpolate( - logit_dict['fusion'], - label_dict['alpha'].shape[2:], - mode='bilinear', - align_corners=False) - label_dict['alpha']).abs() - loss_err = self.loss_func_dict['err'][0](err, err_label) - loss['err'] = loss_err - - loss_all = 0.25 * loss_glance + 0.25 * loss_focus + 0.25 * loss_cm + loss_err - - # refine loss - if self.if_refine: - loss_refine = self.loss_func_dict['refine'][0](logit_dict['refine'], - label_dict['alpha']) - loss['refine'] = loss_refine - loss_all = loss_all + loss_refine - - loss['all'] = loss_all - return loss - - def fusion(self, glance_sigmoid, focus_sigmoid): - # glance_sigmoid [N, 3, H, W]. - # In index, 0 is foreground, 1 is transition, 2 is backbone. - # After fusion, the foreground is 1, the background is 0, and the transion is between (0, 1). - index = paddle.argmax(glance_sigmoid, axis=1, keepdim=True) - transition_mask = (index == 1).astype('float32') - fg = (index == 0).astype('float32') - fusion_sigmoid = focus_sigmoid * transition_mask + fg - return fusion_sigmoid - - def init_weight(self): - if self.pretrained is not None: - utils.load_entire_model(self, self.pretrained) - - -class Refiner(nn.Layer): - ''' - Refiner refines the coarse output to full resolution. - - Args: - kernel_size: The convolution kernel_size. Options: [1, 3]. Default: 3. - ''' - - def __init__(self, kernel_size=3): - super().__init__() - if kernel_size not in [1, 3]: - raise ValueError("kernel_size must be in [1, 3]") - - self.kernel_size = kernel_size - - channels = [32, 24, 16, 12, 1] - self.conv1 = layers.ConvBNReLU( - channels[0] + 4 + 3, - channels[1], - kernel_size, - padding=0, - bias_attr=False) - self.conv2 = layers.ConvBNReLU( - channels[1], channels[2], kernel_size, padding=0, bias_attr=False) - self.conv3 = layers.ConvBNReLU( - channels[2] + 3, - channels[3], - kernel_size, - padding=0, - bias_attr=False) - self.conv4 = nn.Conv2D( - channels[3], channels[4], kernel_size, padding=0, bias_attr=True) - - def forward(self, src, pha, err, hid, tri): - ''' - Args: - src: (B, 3, H, W) full resolution source image. - pha: (B, 1, Hc, Wc) coarse alpha prediction. - err: (B, 1, Hc, Hc) coarse error prediction. - hid: (B, 32, Hc, Hc) coarse hidden encoding. - tri: (B, 1, Hc, Hc) trimap prediction. - ''' - h_full, w_full = paddle.shape(src)[2:] - h_half, w_half = h_full // 2, w_full // 2 - h_quat, w_quat = h_full // 4, w_full // 4 - - x = paddle.concat([hid, pha, tri], axis=1) - x = F.interpolate( - x, - paddle.concat((h_half, w_half)), - mode='bilinear', - align_corners=False) - y = F.interpolate( - src, - paddle.concat((h_half, w_half)), - mode='bilinear', - align_corners=False) - - if self.kernel_size == 3: - x = F.pad(x, [3, 3, 3, 3]) - y = F.pad(y, [3, 3, 3, 3]) - - x = self.conv1(paddle.concat([x, y], axis=1)) - x = self.conv2(x) - - if self.kernel_size == 3: - x = F.interpolate(x, paddle.concat((h_full + 4, w_full + 4))) - y = F.pad(src, [2, 2, 2, 2]) - else: - x = F.interpolate( - x, paddle.concat((h_full, w_full)), mode='nearest') - y = src - - x = self.conv3(paddle.concat([x, y], axis=1)) - x = self.conv4(x) - - pha = x - return pha diff --git a/spaces/uragankatrrin/MHN-React/mhnreact/plotutils.py b/spaces/uragankatrrin/MHN-React/mhnreact/plotutils.py deleted file mode 100644 index f0cafff4a65f1afedc504c36ae5c1ced7f65c615..0000000000000000000000000000000000000000 --- a/spaces/uragankatrrin/MHN-React/mhnreact/plotutils.py +++ /dev/null @@ -1,158 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Author: Philipp Seidl - ELLIS Unit Linz, LIT AI Lab, Institute for Machine Learning - Johannes Kepler University Linz -Contact: seidl@ml.jku.at - -Plot utils -""" - -import numpy as np -import pandas as pd -import matplotlib.pyplot as plt -from matplotlib import pyplot as plt - -plt.style.use('default') - - -def normal_approx_interval(p_hat, n, z=1.96): - """ approximating the distribution of error about a binomially-distributed observation, {\hat {p)), with a normal distribution - z = 1.96 --> alpha =0.05 - z = 1 --> std - https://www.wikiwand.com/en/Binomial_proportion_confidence_interval""" - return z*((p_hat*(1-p_hat))/n)**(1/2) - - -our_colors = { - "lightblue": ( 0/255, 132/255, 187/255), - "red": (217/255, 92/255, 76/255), - "blue": ( 0/255, 132/255, 187/255), - "green": ( 91/255, 167/255, 85/255), - "yellow": (241/255, 188/255, 63/255), - "cyan": ( 79/255, 176/255, 191/255), - "grey": (125/255, 130/255, 140/255), - "lightgreen":(191/255, 206/255, 82/255), - "violett": (174/255, 97/255, 157/255), -} - - -def plot_std(p_hats, n_samples,z=1.96, color=our_colors['red'], alpha=0.2, xs=None): - p_hats = np.array(p_hats) - stds = np.array([normal_approx_interval(p_hats[ii], n_samples[ii], z=z) for ii in range(len(p_hats))]) - xs = range(len(p_hats)) if xs is None else xs - plt.fill_between(xs, p_hats-(stds), p_hats+stds, color=color, alpha=alpha) - #plt.errorbar(range(13), asdf, [normal_approx_interval(asdf[ii], n_samples[ii], z=z) for ii in range(len(asdf))], - # c=our_colors['red'], linestyle='None', marker='.', ecolor=our_colors['red']) - - -def plot_loss(hist): - plt.plot(hist['step'], hist['loss'] ) - plt.plot(hist['steps_valid'], np.array(hist['loss_valid'])) - plt.legend(['train','validation']) - plt.xlabel('update-step') - plt.ylabel('loss (categorical-crossentropy-loss)') - - -def plot_topk(hist, sets=['train', 'valid', 'test'], with_last = 2): - ks = [1, 2, 3, 4, 5, 10, 20, 30, 40, 50, 100] - baseline_val_res = {1:0.4061, 10:0.6827, 50: 0.7883, 100:0.8400} - plt.plot(list(baseline_val_res.keys()), list(baseline_val_res.values()), 'k.--') - for i in range(1,with_last): - for s in sets: - plt.plot(ks, [hist[f't{k}_acc_{s}'][-i] for k in ks],'.--', alpha=1/i) - plt.xlabel('top-k') - plt.ylabel('Accuracy') - plt.legend(sets) - plt.title('Hopfield-NN') - plt.ylim([-0.02,1]) - - -def plot_nte(hist, dataset='Sm', last_cpt=1, include_bar=True, model_legend='MHN (ours)', - draw_std=True, z=1.96, n_samples=None, group_by_template_fp=False, schwaller_hist=None, fortunato_hist=None): #1.96 for 95%CI - markers = ['.']*4#['1','2','3','4']#['8','P','p','*'] - lw = 2 - ms = 8 - k = 100 - ntes = range(13) - if dataset=='Sm': - basel_values = [0. , 0.38424785, 0.66807858, 0.7916149 , 0.9051132 , - 0.92531258, 0.87295875, 0.94865587, 0.91830721, 0.95993717, - 0.97215858, 0.9896713 , 0.99917817] #old basel_values = [0.0, 0.3882, 0.674, 0.7925, 0.9023, 0.9272, 0.874, 0.947, 0.9185, 0.959, 0.9717, 0.9927, 1.0] - pretr_values = [0.08439423, 0.70743412, 0.85555528, 0.95200267, 0.96513376, - 0.96976397, 0.98373613, 0.99960286, 0.98683919, 0.96684724, - 0.95907246, 0.9839079 , 0.98683919]# old [0.094, 0.711, 0.8584, 0.952, 0.9683, 0.9717, 0.988, 1.0, 1.0, 0.984, 0.9717, 1.0, 1.0] - staticQK = [0.2096, 0.1992, 0.2291, 0.1787, 0.2301, 0.1753, 0.2142, 0.2693, 0.2651, 0.1786, 0.2834, 0.5366, 0.6636] - if group_by_template_fp: - staticQK = [0.2651, 0.2617, 0.261 , 0.2181, 0.2622, 0.2393, 0.2157, 0.2184, 0.2 , 0.225 , 0.2039, 0.4568, 0.5293] - if dataset=='Lg': - pretr_values = [0.03410448, 0.65397054, 0.7254572 , 0.78969294, 0.81329924, - 0.8651173 , 0.86775655, 0.8593128 , 0.88184124, 0.87764794, - 0.89734215, 0.93328846, 0.99531597] - basel_values = [0. , 0.62478044, 0.68784314, 0.75089511, 0.77044644, - 0.81229423, 0.82968149, 0.82965544, 0.83778338, 0.83049176, - 0.8662873 , 0.92308414, 1.00042408] - #staticQK = [0.03638, 0.0339 , 0.03732, 0.03506, 0.03717, 0.0331 , 0.03003, 0.03613, 0.0304 , 0.02109, 0.0297 , 0.02632, 0.02217] # on 90k templates - staticQK = [0.006416,0.00686, 0.00616, 0.00825, 0.005085,0.006718,0.01041, 0.0015335,0.006668,0.004673,0.001706,0.02551,0.04074] - if dataset=='Golden': - staticQK = [0]*13 - pretr_values = [0]*13 - basel_values = [0]*13 - - if schwaller_hist: - midx = np.argmin(schwaller_hist['loss_valid']) - basel_values = ([schwaller_hist[f't100_acc_nte_{k}'][midx] for k in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, '>10', '>49']]) - if fortunato_hist: - midx = np.argmin(fortunato_hist['loss_valid']) - pretr_values = ([fortunato_hist[f't100_acc_nte_{k}'][midx] for k in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, '>10', '>49']]) - - #hand_val = [0.0 , 0.4, 0.68, 0.79, 0.89, 0.91, 0.86, 0.9,0.88, 0.9, 0.93] - - - if include_bar: - if dataset=='Sm': - if n_samples is None: - n_samples = [610, 1699, 287, 180, 143, 105, 70, 48, 124, 86, 68, 2539, 1648] - if group_by_template_fp: - n_samples = [460, 993, 433, 243, 183, 117, 102, 87, 110, 80, 103, 3048, 2203] - if dataset=='Lg': - if n_samples is None: - n_samples = [18861, 32226, 4220, 2546, 1573, 1191, 865, 652, 1350, 642, 586, 11638, 4958] #new - if group_by_template_fp: - n_samples = [13923, 17709, 7637, 4322, 2936, 2137, 1586, 1260, 1272, 1044, 829, 21695, 10559] - #[5169, 15904, 2814, 1853, 1238, 966, 766, 609, 1316, 664, 640, 30699, 21471] - #[13424,17246, 7681, 4332, 2844,2129,1698,1269, 1336,1067, 833, 22491, 11202] #grouped fp - plt.bar(range(11+2), np.array(n_samples)/sum(n_samples[:-1]), alpha=0.4, color=our_colors['grey']) - - xti = [*[str(i) for i in range(11)], '>10', '>49'] - asdf = [] - for nte in xti: - try: - asdf.append( hist[f't{k}_acc_nte_{nte}'][-last_cpt]) - except: - asdf.append(None) - - plt.plot(range(13), asdf,f'{markers[3]}--', markersize=ms,c=our_colors['red'], linewidth=lw,alpha=1) - plt.plot(ntes, pretr_values,f'{markers[1]}--', c=our_colors['green'], - linewidth=lw, alpha=1,markersize=ms) #old [0.08, 0.7, 0.85, 0.9, 0.91, 0.95, 0.98, 0.97,0.98, 1, 1] - plt.plot(ntes, basel_values,f'{markers[0]}--',linewidth=lw, - c=our_colors['blue'], markersize=ms,alpha=1) - plt.plot(range(len(staticQK)), staticQK, f'{markers[2]}--',markersize=ms,c=our_colors['yellow'],linewidth=lw, alpha=1) - - plt.title(f'USPTO-{dataset}') - plt.xlabel('number of training examples') - plt.ylabel('top-100 test-accuracy') - plt.legend([model_legend, 'Fortunato et al.','FNN baseline',"FPM baseline", #static${\\xi X}: \\dfrac{|{\\xi} \\cap {X}|}{|{X}|}$ - 'test sample proportion']) - - if draw_std: - alpha=0.2 - plot_std(asdf, n_samples, z=z, color=our_colors['red'], alpha=alpha) - plot_std(pretr_values, n_samples, z=z, color=our_colors['green'], alpha=alpha) - plot_std(basel_values, n_samples, z=z, color=our_colors['blue'], alpha=alpha) - plot_std(staticQK, n_samples, z=z, color=our_colors['yellow'], alpha=alpha) - - - plt.xticks(range(13),xti); - plt.yticks(np.arange(0,1.05,0.1)) - plt.grid('on', alpha=0.3) \ No newline at end of file diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Bibcam 12yo Boy With Man 46 [BETTER].md b/spaces/usbethFlerru/sovits-modelsV2/example/Bibcam 12yo Boy With Man 46 [BETTER].md deleted file mode 100644 index ee85260e6c8d52e133026f2352beef619f683835..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Bibcam 12yo Boy With Man 46 [BETTER].md +++ /dev/null @@ -1,20 +0,0 @@ - -

                  Recent studies from China reporting mental illness in less than 50% of suicide completers have raised important questions (3). Reports from India (4,5) have found mental disorder in less than 40% of completers, and a report from the West (Faroe Islands) has found evidence of psychiatric or drug disorders in only 61% of those who completed suicide (6). In a recent psychological autopsy study across several countries (7), up to two-thirds of suicide cases remained without psychiatric diagnosis in those studies that only examined Axis I disorders.

                  -

                  bibcam 12yo boy with man 46


                  DOWNLOADhttps://urlcod.com/2uyUQK



                  -

                  Bruce Crosby (59 years; 2006) was an educated US citizen living in Panama. His computer was confiscated by the Panamanian police. Four days later, while charges were being prepared, he asphyxiated himself with butane cooking gas in his bathroom, on the island of Bastimentos. He was believed to have been an active pedophile, using local boys, and made a living by selling pornography and arranging pedophile sex tours of Panama to clients from around the world (21).

                  -

                  Wolfgang Priklopil (44 years; 2006) abducted 10-year-old school girl, Natasha Kampusch, near Vienna, in 1998. He kept her prisoner in the basement of his house for 8 years. When she escaped and the police were alerted, Priklopil suicided by jumping in front of a train. Details of actual sexual offences have not been reported, and Natasha Kampusch has remained silent on the subject. Priklopil was a communication technician with a good work history. He owned his own house (which he inherited from his grandfather) and a luxury car (23).

                  -

                  Helmet Nehls (64 years; 2007) was being held in gaol in Udon Thani (Thailand). He had been arrested and charged with having abducted and raped a 12 year old girl, a crime punishable by life in prison. He stuffed socks in his mouth, cut his wrists and died in custody. Nehls was a computer technician, a German national, from Zeppernick, East Germany. He had frequently visited Thailand over a period of 15 years and was fluent in the language. Police claimed he was in the habit of paying families to have the use of pre-pubescent children for weekends, and produced pornographic material which they found at his home (26).

                  -

                  -

                  Bob Collins (61 years; 2007) died of an overdose of medication and alcohol in Northern Territory, Australia, following a battle with bowel cancer, and three days before he was to face a committal hearing on 21 charges of sex with children dating back 30 years. Other charges were in preparation, including possession of child pornography images. Collins lived a high profile political life. At 35 years of age, he became a member of the Northern Territory Legislative Assembly. At 41 years, he was elected to the Australian Senate and was soon elevated to the Cabinet. His party lost power in 1996, and Collins retired from parliament two years later, at 52 years of age. He then held senior government positions, until he was charged with child-sex offences. He had received a civil decoration for his community work (27).

                  -

                  David Dewees (32 years; 2009) had been teaching English and Latin at Jarvis Collegiate Institute (Toronto, Canada) for six years. He was charged with sex offences involving two children in Ontario. He was released on bail, and lay on a railway track (in clear view of witnesses) and was killed by a train. "He was an excellent teacher, he was very popular with the kids," said a representative of Toronto District School Board (29).

                  -

                  William Evans (57 years; 2009) was charged with sexual offences claimed to have been committed against a young female nearly 30 years previously. Minutes before he was found not guilty on technical grounds (a statute of limitations issue), Evans, who had not returned to court after the lunch break, shot himself at his Florida home. He had written to the woman apologizing for his actions 8 years before he died. He had been supported in court by his wife, daughter, son-inlaw and brother (30).

                  -

                  Ivan Bennett (64 years; 2010) hanged himself in jail in Nevada while awaiting a court appearance on charges of possessing child pornography. He had been convicted seven years earlier for possession of child pornography and eighteen years earlier for molestation of a child. Bennett had served 22 years in the US Army and was married with three children (31).

                  -

                  Robert Carlson (68 years; 2011), founder and president of Penobscot Community Health Care, was a married, highly respected figure in the Greater Bangor area. Three days before plunging to his death from a bridge above the Penobscot River, police had commenced investigating an allegation that Reverend Carlson had sexually abused a young boy while he was pastor at East Orrington Congregational Church, a position he had held from the late 1970s for 25 years. Hours before his death, Carlson had met with the alleged victim, now a middle-aged adult living in the area (35).

                  -

                  haninick 70238732e0 " >cara crack adobe premiere pro cs6

                  _3J_JnQaLkc2lgI92Ws-C" >Kim Jung-gi 2011 Sketch Collection Art Book Download

                  " >Biblioscape.9.0.with.Serial

                  _AgyiK-QWivCk" >tweet adder registration code 90

                  " >PlistEdit Pro 1.8.7 Cracked Version free Download

                  " >Michael Franks Discography [LOSSLESS] (1973-2011)

                  " >DEEP SPACE WAIFU ACADEMY game hack

                  " >Virtual DJ 5.0 (With Serial) full version

                  " >Susan Wong Live Stories 24 96 Flac Download

                  -YbYv1eYdFaP05QMxvEu4" >Latest Tenorshare UltData Windows 7.1.1.23 Full Crack 2020

                  -prince2.sakura.ne.jp/cgi-bin/bbs/reportbbs/yybbs.cgi" > -prince2.sakura.ne.jp/cgi-bin/bbs/reportbbs/yybbs.cgi
                  -elections.aspx?... > -elections.aspx?... href=" -lgroup.pl/showthread.php?tid=17081&pid=254443#pid254443 -henry-not-mary-li... > -henry-not-mary-li...

                  -

                  fordwal 0531ecd6aa -hongkong.com/profile/Corel-X3-Keygen-Software-Gen... >corel x3 keygen software generator

                  " >720p Koi Mil Gaya Download

                  -Payne-3/profile" >Crack Max Payne 3 127 CRACK

                  " >Express VPN Crack 7.7.11.4 Key 2020 [Latest]

                  " >Vethathiri Maharishi Exercise Book Pdf 55

                  " >Typing Master 2003 Full Version 17

                  " >Manual Fresadora Universal Romi U30

                  " >download xforce keygen AutoCAD Map 3D 2018 Free Download

                  " >world wide web design with html c xavier pdf free download

                  " >opel GlobalTIS v.29.0 B Multilanguage Setup Keygen

                  -lgroup.pl/showthread.php?tid=45236&pid=257289#pid257289" > -lgroup.pl/showthread.php?tid=45236&pid=257289#pid257289 -yanen.com/sole/joyful/joyful.cgi" > -yanen.com/sole/joyful/joyful.cgi
                  -chiraultpneus.fr/actualites/48_DU-10-DECEMBRE-au-26-JANVI... > -chiraultpneus.fr/actualites/48_DU-10-DECEMBRE-au-26-JANVI... href=" =profile&id=28674&action=guestbook =193" > =193

                  -

                  frabai 0531ecd6aa -Dvd-Tv-7997-Bt-Manual-LINK/p... >napoli dvd tv 7997 bt manual

                  -Xperia-M4-Aqua-Unlock-Code-Gene... >Sony xperia m4 aqua unlock code generator

                  " >Nelson Ned Gospel Download Gratis

                  -Enquiry-Pro-For-Woocomme... >Product Enquiry Pro For Woocommerce Nulled

                  -Swahili-10/profile" >Swahili Bible Free Download Pdf

                  " >Business Tour - Board Game with Online Multiplayer crack gamehackstudios

                  -E1205T-Firmware-Package/profile" >samsung e1205t flash file

                  -Usb-Lan-Driver-Windows-10/prof... >Hlf1081a usb lan driver windows 10

                  -NortVPN-626-Crack-License-... >NordVPN 6.26 Crack Keygen Download [2020]

                  " >Ebs Ticari Crack

                  _post=18" > _post=18 > href=" -leblog/4_.html
                  -rezepte.com/drawing-books-collection-720-dubbed-dual... > -rezepte.com/drawing-books-collection-720-dubbed-dual...

                  -

                  latpal 0531ecd6aa -2010-Toolkit-223-Failed-T... >Office 2010 Toolkit 223 Failed To Inject Memory

                  -keygen-download-Civil-3D-201... >x force keygen Civil 3D 2013 activation

                  " >redline rumble 4 ignitro city download

                  " >Optisystem 13 Free Download With Crack

                  " >colin mcrae dirt 2 crack

                  -c-i-e.org/profile/Tower-3D-Pro-PC-Download/profile" >Tower!3D Pro Download] [crack]

                  " >mega pack styles for proshow producer keygen

                  -232h-Video/profile" >Spectrasonics Omnisphere Software Update 2.3.2h

                  -V20/profile" >Webgamer V2.0

                  -2010-With-UPD-Crack-71/profile" >hasphl 2010 with crack 71

                  _post=41" > _post=41 =3436" > =3436 =0" > =... href="

                  -

                  lavreit 2336c5e09f -Office-Catalog-Exten... >Ashampoo Office Catalog Extension 1.0.0 keygen full version

                  -VImI9SGvmQYYyM8SoxI" >inssider 4 full crack software

                  " >47 Ronin Dvdrip Fr Torrent Hit

                  -j... >instrucciones juegos reunidos geyper pdf 31

                  " >Stellar Phoenix Edb To Pst Serial Keygenrar

                  -usb-stable... >MediCat USB Stable v18.10 - [Jayro] **OFFICIAL** utorrent

                  " >Iec 60694.pdf

                  -12yo-Boy-With-Man-Avigolkes/pr... >bibcam 12yo boy with man avigolkes

                  >[RIN-04] Hana Sakurai - Father and Daughtergolkes

                  _hot_-hd-online-play... >HD Online Player (Fly Away Home 1996 Bluray 1080p Dts )

                  -dich.de/gaestebuch.php" > -dich.de/gaestebuch.php

                  -

                  juangold 79d0ba445c -See-Electrical-V4r1-TOP/prof... >Crack See Electrical V4r1

                  -karting.com/profile/tegmelabtefegon/profile" >Estilos De Vida Rolando Arellano.pdf

                  " >Imagenes Verbales Del Nuevo Testamento Pdf Downloa clanpages knackarsch

                  -Remover-Keygen-HOT/profile" >Dtc Remover Keygen

                  -1194-KeyGenrar-UPD/profile" >PCMSCAN.v2.4.12(Build 1194) - KeyGen.rar

                  -Instruments-Multisim-Crack-Torre... >national instruments multisim crack torrent

                  -10-Free-and-Legal-Sites-with-Games/pro... >Free Yamaha Psr 740 Style Dangdut Koplo.iso

                  >Autocad201864bitProductkeyandXforcekeygenrar

                  " >mercedesbenzxentrykeygendownloadmediafire

                  -Music-RealBand-2020-V20054-Upda... >Band In A Box Realtracks Torrent

                  aaccfb2cb3
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/vict0rsch/climateGAN/figures/metrics.py b/spaces/vict0rsch/climateGAN/figures/metrics.py deleted file mode 100644 index 0b165eeeb3eb6bf975dd91211dbf6349590156ad..0000000000000000000000000000000000000000 --- a/spaces/vict0rsch/climateGAN/figures/metrics.py +++ /dev/null @@ -1,676 +0,0 @@ -""" -This scripts plots examples of the images that get best and worse metrics -""" -print("Imports...", end="") -import os -import sys -from argparse import ArgumentParser -from pathlib import Path - -import matplotlib.patches as mpatches -import matplotlib.pyplot as plt -import numpy as np -import pandas as pd -import seaborn as sns -import yaml -from imageio import imread -from skimage.color import rgba2rgb -from sklearn.metrics.pairwise import euclidean_distances - -sys.path.append("../") - -from climategan.data import encode_mask_label -from climategan.eval_metrics import edges_coherence_std_min -from eval_masker import crop_and_resize - -# ----------------------- -# ----- Constants ----- -# ----------------------- - -# Metrics -metrics = ["error", "f05", "edge_coherence"] - -dict_metrics = { - "names": { - "tpr": "TPR, Recall, Sensitivity", - "tnr": "TNR, Specificity, Selectivity", - "fpr": "FPR", - "fpt": "False positives relative to image size", - "fnr": "FNR, Miss rate", - "fnt": "False negatives relative to image size", - "mpr": "May positive rate (MPR)", - "mnr": "May negative rate (MNR)", - "accuracy": "Accuracy (ignoring may)", - "error": "Error", - "f05": "F05 score", - "precision": "Precision", - "edge_coherence": "Edge coherence", - "accuracy_must_may": "Accuracy (ignoring cannot)", - }, - "key_metrics": ["error", "f05", "edge_coherence"], -} - - -# Colors -colorblind_palette = sns.color_palette("colorblind") -color_cannot = colorblind_palette[1] -color_must = colorblind_palette[2] -color_may = colorblind_palette[7] -color_pred = colorblind_palette[4] - -icefire = sns.color_palette("icefire", as_cmap=False, n_colors=5) -color_tp = icefire[0] -color_tn = icefire[1] -color_fp = icefire[4] -color_fn = icefire[3] - - -def parsed_args(): - """ - Parse and returns command-line args - - Returns: - argparse.Namespace: the parsed arguments - """ - parser = ArgumentParser() - parser.add_argument( - "--input_csv", - default="ablations_metrics_20210311.csv", - type=str, - help="CSV containing the results of the ablation study", - ) - parser.add_argument( - "--output_dir", - default=None, - type=str, - help="Output directory", - ) - parser.add_argument( - "--models_log_path", - default=None, - type=str, - help="Path containing the log files of the models", - ) - parser.add_argument( - "--masker_test_set_dir", - default=None, - type=str, - help="Directory containing the test images", - ) - parser.add_argument( - "--best_model", - default="dada, msd_spade, pseudo", - type=str, - help="The string identifier of the best model", - ) - parser.add_argument( - "--dpi", - default=200, - type=int, - help="DPI for the output images", - ) - parser.add_argument( - "--alpha", - default=0.5, - type=float, - help="Transparency of labels shade", - ) - parser.add_argument( - "--percentile", - default=0.05, - type=float, - help="Transparency of labels shade", - ) - parser.add_argument( - "--seed", - default=None, - type=int, - help="Bootstrap random seed, for reproducibility", - ) - parser.add_argument( - "--no_images", - action="store_true", - default=False, - help="Do not generate images", - ) - - return parser.parse_args() - - -def map_color(arr, input_color, output_color, rtol=1e-09): - """ - Maps one color to another - """ - input_color_arr = np.tile(input_color, (arr.shape[:2] + (1,))) - output = arr.copy() - output[np.all(np.isclose(arr, input_color_arr, rtol=rtol), axis=2)] = output_color - return output - - -def plot_labels(ax, img, label, img_id, do_legend): - label_colmap = label.astype(float) - label_colmap = map_color(label_colmap, (255, 0, 0), color_cannot) - label_colmap = map_color(label_colmap, (0, 0, 255), color_must) - label_colmap = map_color(label_colmap, (0, 0, 0), color_may) - - ax.imshow(img) - ax.imshow(label_colmap, alpha=0.5) - ax.axis("off") - - # Annotation - ax.annotate( - xy=(0.05, 0.95), - xycoords="axes fraction", - xytext=(0.05, 0.95), - textcoords="axes fraction", - text=img_id, - fontsize="x-large", - verticalalignment="top", - color="white", - ) - - # Legend - if do_legend: - handles = [] - lw = 1.0 - handles.append( - mpatches.Patch(facecolor=color_must, label="must", linewidth=lw, alpha=0.66) - ) - handles.append( - mpatches.Patch(facecolor=color_may, label="must", linewidth=lw, alpha=0.66) - ) - handles.append( - mpatches.Patch( - facecolor=color_cannot, label="must", linewidth=lw, alpha=0.66 - ) - ) - labels = ["Must-be-flooded", "May-be-flooded", "Cannot-be-flooded"] - ax.legend( - handles=handles, - labels=labels, - bbox_to_anchor=(0.0, 1.0, 1.0, 0.075), - ncol=3, - mode="expand", - fontsize="xx-small", - frameon=False, - ) - - -def plot_pred(ax, img, pred, img_id, do_legend): - pred = np.tile(np.expand_dims(pred, axis=2), reps=(1, 1, 3)) - - pred_colmap = pred.astype(float) - pred_colmap = map_color(pred_colmap, (1, 1, 1), color_pred) - pred_colmap_ma = np.ma.masked_not_equal(pred_colmap, color_pred) - pred_colmap_ma = pred_colmap_ma.mask * img + pred_colmap_ma - - ax.imshow(img) - ax.imshow(pred_colmap_ma, alpha=0.5) - ax.axis("off") - - # Annotation - ax.annotate( - xy=(0.05, 0.95), - xycoords="axes fraction", - xytext=(0.05, 0.95), - textcoords="axes fraction", - text=img_id, - fontsize="x-large", - verticalalignment="top", - color="white", - ) - - # Legend - if do_legend: - handles = [] - lw = 1.0 - handles.append( - mpatches.Patch(facecolor=color_pred, label="must", linewidth=lw, alpha=0.66) - ) - labels = ["Prediction"] - ax.legend( - handles=handles, - labels=labels, - bbox_to_anchor=(0.0, 1.0, 1.0, 0.075), - ncol=3, - mode="expand", - fontsize="xx-small", - frameon=False, - ) - - -def plot_correct_incorrect(ax, img_filename, img, label, img_id, do_legend): - # FP - fp_map = imread( - model_path / "eval-metrics/fp" / "{}_fp.png".format(Path(img_filename).stem) - ) - fp_map = np.tile(np.expand_dims(fp_map, axis=2), reps=(1, 1, 3)) - - fp_map_colmap = fp_map.astype(float) - fp_map_colmap = map_color(fp_map_colmap, (1, 1, 1), color_fp) - - # FN - fn_map = imread( - model_path / "eval-metrics/fn" / "{}_fn.png".format(Path(img_filename).stem) - ) - fn_map = np.tile(np.expand_dims(fn_map, axis=2), reps=(1, 1, 3)) - - fn_map_colmap = fn_map.astype(float) - fn_map_colmap = map_color(fn_map_colmap, (1, 1, 1), color_fn) - - # TP - tp_map = imread( - model_path / "eval-metrics/tp" / "{}_tp.png".format(Path(img_filename).stem) - ) - tp_map = np.tile(np.expand_dims(tp_map, axis=2), reps=(1, 1, 3)) - - tp_map_colmap = tp_map.astype(float) - tp_map_colmap = map_color(tp_map_colmap, (1, 1, 1), color_tp) - - # TN - tn_map = imread( - model_path / "eval-metrics/tn" / "{}_tn.png".format(Path(img_filename).stem) - ) - tn_map = np.tile(np.expand_dims(tn_map, axis=2), reps=(1, 1, 3)) - - tn_map_colmap = tn_map.astype(float) - tn_map_colmap = map_color(tn_map_colmap, (1, 1, 1), color_tn) - - label_colmap = label.astype(float) - label_colmap = map_color(label_colmap, (0, 0, 0), color_may) - label_colmap_ma = np.ma.masked_not_equal(label_colmap, color_may) - label_colmap_ma = label_colmap_ma.mask * img + label_colmap_ma - - # Combine masks - maps = fp_map_colmap + fn_map_colmap + tp_map_colmap + tn_map_colmap - maps_ma = np.ma.masked_equal(maps, (0, 0, 0)) - maps_ma = maps_ma.mask * img + maps_ma - - ax.imshow(img) - ax.imshow(label_colmap_ma, alpha=0.5) - ax.imshow(maps_ma, alpha=0.5) - ax.axis("off") - - # Annotation - ax.annotate( - xy=(0.05, 0.95), - xycoords="axes fraction", - xytext=(0.05, 0.95), - textcoords="axes fraction", - text=img_id, - fontsize="x-large", - verticalalignment="top", - color="white", - ) - - # Legend - if do_legend: - handles = [] - lw = 1.0 - handles.append( - mpatches.Patch(facecolor=color_tp, label="TP", linewidth=lw, alpha=0.66) - ) - handles.append( - mpatches.Patch(facecolor=color_tn, label="TN", linewidth=lw, alpha=0.66) - ) - handles.append( - mpatches.Patch(facecolor=color_fp, label="FP", linewidth=lw, alpha=0.66) - ) - handles.append( - mpatches.Patch(facecolor=color_fn, label="FN", linewidth=lw, alpha=0.66) - ) - handles.append( - mpatches.Patch( - facecolor=color_may, label="May-be-flooded", linewidth=lw, alpha=0.66 - ) - ) - labels = ["TP", "TN", "FP", "FN", "May-be-flooded"] - ax.legend( - handles=handles, - labels=labels, - bbox_to_anchor=(0.0, 1.0, 1.0, 0.075), - ncol=5, - mode="expand", - fontsize="xx-small", - frameon=False, - ) - - -def plot_edge_coherence(ax, img, label, pred, img_id, do_legend): - pred = np.tile(np.expand_dims(pred, axis=2), reps=(1, 1, 3)) - - ec, pred_ec, label_ec = edges_coherence_std_min( - np.squeeze(pred[:, :, 0]), np.squeeze(encode_mask_label(label, "flood")) - ) - - ################## - # Edge distances # - ################## - - # Location of edges - pred_ec_coord = np.argwhere(pred_ec > 0) - label_ec_coord = np.argwhere(label_ec > 0) - - # Normalized pairwise distances between pred and label - dist_mat = np.divide( - euclidean_distances(pred_ec_coord, label_ec_coord), pred_ec.shape[0] - ) - - # Standard deviation of the minimum distance from pred to label - min_dist = np.min(dist_mat, axis=1) # noqa: F841 - - ############# - # Make plot # - ############# - - pred_ec = np.tile( - np.expand_dims(np.asarray(pred_ec > 0, dtype=float), axis=2), reps=(1, 1, 3) - ) - pred_ec_colmap = map_color(pred_ec, (1, 1, 1), color_pred) - pred_ec_colmap_ma = np.ma.masked_not_equal(pred_ec_colmap, color_pred) # noqa: F841 - - label_ec = np.tile( - np.expand_dims(np.asarray(label_ec > 0, dtype=float), axis=2), reps=(1, 1, 3) - ) - label_ec_colmap = map_color(label_ec, (1, 1, 1), color_must) - label_ec_colmap_ma = np.ma.masked_not_equal( # noqa: F841 - label_ec_colmap, color_must - ) - - # Combined pred and label edges - combined_ec = pred_ec_colmap + label_ec_colmap - combined_ec_ma = np.ma.masked_equal(combined_ec, (0, 0, 0)) - combined_ec_img = combined_ec_ma.mask * img + combined_ec - - # Pred - pred_colmap = pred.astype(float) - pred_colmap = map_color(pred_colmap, (1, 1, 1), color_pred) - pred_colmap_ma = np.ma.masked_not_equal(pred_colmap, color_pred) - - # Must - label_colmap = label.astype(float) - label_colmap = map_color(label_colmap, (0, 0, 255), color_must) - label_colmap_ma = np.ma.masked_not_equal(label_colmap, color_must) - - # TP - tp_map = imread( - model_path / "eval-metrics/tp" / "{}_tp.png".format(Path(srs_sel.filename).stem) - ) - tp_map = np.tile(np.expand_dims(tp_map, axis=2), reps=(1, 1, 3)) - tp_map_colmap = tp_map.astype(float) - tp_map_colmap = map_color(tp_map_colmap, (1, 1, 1), color_tp) - tp_map_colmap_ma = np.ma.masked_not_equal(tp_map_colmap, color_tp) - - # Combination - comb_pred = ( - (pred_colmap_ma.mask ^ tp_map_colmap_ma.mask) - & tp_map_colmap_ma.mask - & combined_ec_ma.mask - ) * pred_colmap - comb_label = ( - (label_colmap_ma.mask ^ pred_colmap_ma.mask) - & pred_colmap_ma.mask - & combined_ec_ma.mask - ) * label_colmap - comb_tp = combined_ec_ma.mask * tp_map_colmap.copy() - combined = comb_tp + comb_label + comb_pred - combined_ma = np.ma.masked_equal(combined, (0, 0, 0)) - combined_ma = combined_ma.mask * combined_ec_img + combined_ma - - ax.imshow(combined_ec_img, alpha=1) - ax.imshow(combined_ma, alpha=0.5) - ax.axis("off") - - # Plot lines - idx_sort_x = np.argsort(pred_ec_coord[:, 1]) - offset = 100 - for idx in range(offset, pred_ec_coord.shape[0], offset): - y0, x0 = pred_ec_coord[idx_sort_x[idx], :] - argmin = np.argmin(dist_mat[idx_sort_x[idx]]) - y1, x1 = label_ec_coord[argmin, :] - ax.plot([x0, x1], [y0, y1], color="white", linewidth=0.5) - - # Annotation - ax.annotate( - xy=(0.05, 0.95), - xycoords="axes fraction", - xytext=(0.05, 0.95), - textcoords="axes fraction", - text=img_id, - fontsize="x-large", - verticalalignment="top", - color="white", - ) - # Legend - if do_legend: - handles = [] - lw = 1.0 - handles.append( - mpatches.Patch(facecolor=color_tp, label="TP", linewidth=lw, alpha=0.66) - ) - handles.append( - mpatches.Patch(facecolor=color_pred, label="pred", linewidth=lw, alpha=0.66) - ) - handles.append( - mpatches.Patch( - facecolor=color_must, label="Must-be-flooded", linewidth=lw, alpha=0.66 - ) - ) - labels = ["TP", "Prediction", "Must-be-flooded"] - ax.legend( - handles=handles, - labels=labels, - bbox_to_anchor=(0.0, 1.0, 1.0, 0.075), - ncol=3, - mode="expand", - fontsize="xx-small", - frameon=False, - ) - - -def plot_images_metric(axes, metric, img_filename, img_id, do_legend): - - # Read images - img_path = imgs_orig_path / img_filename - label_path = labels_path / "{}_labeled.png".format(Path(img_filename).stem) - img, label = crop_and_resize(img_path, label_path) - img = rgba2rgb(img) if img.shape[-1] == 4 else img / 255.0 - pred = imread( - model_path / "eval-metrics/pred" / "{}_pred.png".format(Path(img_filename).stem) - ) - - # Label - plot_labels(axes[0], img, label, img_id, do_legend) - - # Prediction - plot_pred(axes[1], img, pred, img_id, do_legend) - - # Correct / incorrect - if metric in ["error", "f05"]: - plot_correct_incorrect(axes[2], img_filename, img, label, img_id, do_legend) - # Edge coherence - elif metric == "edge_coherence": - plot_edge_coherence(axes[2], img, label, pred, img_id, do_legend) - else: - raise ValueError - - -def scatterplot_metrics_pair(ax, df, x_metric, y_metric, dict_images): - - sns.scatterplot(data=df, x=x_metric, y=y_metric, ax=ax) - - # Set X-label - ax.set_xlabel(dict_metrics["names"][x_metric], rotation=0, fontsize="medium") - - # Set Y-label - ax.set_ylabel(dict_metrics["names"][y_metric], rotation=90, fontsize="medium") - - # Change spines - sns.despine(ax=ax, left=True, bottom=True) - - annotate_scatterplot(ax, dict_images, x_metric, y_metric) - - -def scatterplot_metrics(ax, df, dict_images): - - sns.scatterplot(data=df, x="error", y="f05", hue="edge_coherence", ax=ax) - - # Set X-label - ax.set_xlabel(dict_metrics["names"]["error"], rotation=0, fontsize="medium") - - # Set Y-label - ax.set_ylabel(dict_metrics["names"]["f05"], rotation=90, fontsize="medium") - - annotate_scatterplot(ax, dict_images, "error", "f05") - - # Change spines - sns.despine(ax=ax, left=True, bottom=True) - - # Set XY limits - xlim = ax.get_xlim() - ylim = ax.get_ylim() - ax.set_xlim([0.0, xlim[1]]) - ax.set_ylim([ylim[0], 1.0]) - - -def annotate_scatterplot(ax, dict_images, x_metric, y_metric, offset=0.1): - xlim = ax.get_xlim() - ylim = ax.get_ylim() - x_len = xlim[1] - xlim[0] - y_len = ylim[1] - ylim[0] - x_th = xlim[1] - x_len / 2.0 - y_th = ylim[1] - y_len / 2.0 - for text, d in dict_images.items(): - x = d[x_metric] - y = d[y_metric] - x_text = x + x_len * offset if x < x_th else x - x_len * offset - y_text = y + y_len * offset if y < y_th else y - y_len * offset - ax.annotate( - xy=(x, y), - xycoords="data", - xytext=(x_text, y_text), - textcoords="data", - text=text, - arrowprops=dict(facecolor="black", shrink=0.05), - fontsize="medium", - color="black", - ) - - -if __name__ == "__main__": - # ----------------------------- - # ----- Parse arguments ----- - # ----------------------------- - args = parsed_args() - print("Args:\n" + "\n".join([f" {k:20}: {v}" for k, v in vars(args).items()])) - - # Determine output dir - if args.output_dir is None: - output_dir = Path(os.environ["SLURM_TMPDIR"]) - else: - output_dir = Path(args.output_dir) - if not output_dir.exists(): - output_dir.mkdir(parents=True, exist_ok=False) - - # Store args - output_yml = output_dir / "labels.yml" - with open(output_yml, "w") as f: - yaml.dump(vars(args), f) - - # Data dirs - imgs_orig_path = Path(args.masker_test_set_dir) / "imgs" - labels_path = Path(args.masker_test_set_dir) / "labels" - - # Read CSV - df = pd.read_csv(args.input_csv, index_col="model_img_idx") - - # Select best model - df = df.loc[df.model_feats == args.best_model] - v_key, model_dir = df.model.unique()[0].split("/") - model_path = Path(args.models_log_path) / "ablation-{}".format(v_key) / model_dir - - # Set up plot - sns.reset_orig() - sns.set(style="whitegrid") - plt.rcParams.update({"font.family": "serif"}) - plt.rcParams.update( - { - "font.serif": [ - "Computer Modern Roman", - "Times New Roman", - "Utopia", - "New Century Schoolbook", - "Century Schoolbook L", - "ITC Bookman", - "Bookman", - "Times", - "Palatino", - "Charter", - "serif" "Bitstream Vera Serif", - "DejaVu Serif", - ] - } - ) - - if args.seed: - np.random.seed(args.seed) - img_ids = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" - dict_images = {} - idx = 0 - for metric in metrics: - - fig, axes = plt.subplots(nrows=2, ncols=3, dpi=200, figsize=(18, 12)) - - # Select best - if metric == "error": - ascending = True - else: - ascending = False - idx_rand = np.random.permutation(int(args.percentile * len(df)))[0] - srs_sel = df.sort_values(by=metric, ascending=ascending).iloc[idx_rand] - img_id = img_ids[idx] - dict_images.update({img_id: srs_sel}) - - # Read images - img_filename = srs_sel.filename - - if not args.no_images: - axes_row = axes[0, :] - plot_images_metric(axes_row, metric, img_filename, img_id, do_legend=True) - - idx += 1 - - # Select worst - if metric == "error": - ascending = False - else: - ascending = True - idx_rand = np.random.permutation(int(args.percentile * len(df)))[0] - srs_sel = df.sort_values(by=metric, ascending=ascending).iloc[idx_rand] - img_id = img_ids[idx] - dict_images.update({img_id: srs_sel}) - - # Read images - img_filename = srs_sel.filename - - if not args.no_images: - axes_row = axes[1, :] - plot_images_metric(axes_row, metric, img_filename, img_id, do_legend=False) - - idx += 1 - - # Save figure - output_fig = output_dir / "{}.png".format(metric) - fig.savefig(output_fig, dpi=fig.dpi, bbox_inches="tight") - - fig = plt.figure(dpi=200) - scatterplot_metrics(fig.gca(), df, dict_images) - - # fig, axes = plt.subplots(nrows=1, ncols=3, dpi=200, figsize=(18, 5)) - # - # scatterplot_metrics_pair(axes[0], df, 'error', 'f05', dict_images) - # scatterplot_metrics_pair(axes[1], df, 'error', 'edge_coherence', dict_images) - # scatterplot_metrics_pair(axes[2], df, 'f05', 'edge_coherence', dict_images) - # - output_fig = output_dir / "scatterplots.png" - fig.savefig(output_fig, dpi=fig.dpi, bbox_inches="tight") diff --git a/spaces/vinay123/panoptic-segment-anything/segment_anything/setup.py b/spaces/vinay123/panoptic-segment-anything/segment_anything/setup.py deleted file mode 100644 index 2c0986317eb576a14ec774205c88fdee3cc6c0b3..0000000000000000000000000000000000000000 --- a/spaces/vinay123/panoptic-segment-anything/segment_anything/setup.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from setuptools import find_packages, setup - -setup( - name="segment_anything", - version="1.0", - install_requires=[], - packages=find_packages(exclude="notebooks"), - extras_require={ - "all": ["matplotlib", "pycocotools", "opencv-python", "onnx", "onnxruntime"], - "dev": ["flake8", "isort", "black", "mypy"], - }, -) diff --git a/spaces/vumichien/Generate_human_motion/pyrender/tests/unit/test_lights.py b/spaces/vumichien/Generate_human_motion/pyrender/tests/unit/test_lights.py deleted file mode 100644 index ffde856b21e8cce9532f0308fcd1c7eb2d1eba90..0000000000000000000000000000000000000000 --- a/spaces/vumichien/Generate_human_motion/pyrender/tests/unit/test_lights.py +++ /dev/null @@ -1,104 +0,0 @@ -import numpy as np -import pytest - -from pyrender import (DirectionalLight, SpotLight, PointLight, Texture, - PerspectiveCamera, OrthographicCamera) -from pyrender.constants import SHADOW_TEX_SZ - - -def test_directional_light(): - - d = DirectionalLight() - assert d.name is None - assert np.all(d.color == 1.0) - assert d.intensity == 1.0 - - d.name = 'direc' - with pytest.raises(ValueError): - d.color = None - with pytest.raises(TypeError): - d.intensity = None - - d = DirectionalLight(color=[0.0, 0.0, 0.0]) - assert np.all(d.color == 0.0) - - d._generate_shadow_texture() - st = d.shadow_texture - assert isinstance(st, Texture) - assert st.width == st.height == SHADOW_TEX_SZ - - sc = d._get_shadow_camera(scene_scale=5.0) - assert isinstance(sc, OrthographicCamera) - assert sc.xmag == sc.ymag == 5.0 - assert sc.znear == 0.01 * 5.0 - assert sc.zfar == 10 * 5.0 - - -def test_spot_light(): - - s = SpotLight() - assert s.name is None - assert np.all(s.color == 1.0) - assert s.intensity == 1.0 - assert s.innerConeAngle == 0.0 - assert s.outerConeAngle == np.pi / 4.0 - assert s.range is None - - with pytest.raises(ValueError): - s.range = -1.0 - - with pytest.raises(ValueError): - s.range = 0.0 - - with pytest.raises(ValueError): - s.innerConeAngle = -1.0 - - with pytest.raises(ValueError): - s.innerConeAngle = np.pi / 3.0 - - with pytest.raises(ValueError): - s.outerConeAngle = -1.0 - - with pytest.raises(ValueError): - s.outerConeAngle = np.pi - - s.range = 5.0 - s.outerConeAngle = np.pi / 2 - 0.05 - s.innerConeAngle = np.pi / 3 - s.innerConeAngle = 0.0 - s.outerConeAngle = np.pi / 4.0 - - s._generate_shadow_texture() - st = s.shadow_texture - assert isinstance(st, Texture) - assert st.width == st.height == SHADOW_TEX_SZ - - sc = s._get_shadow_camera(scene_scale=5.0) - assert isinstance(sc, PerspectiveCamera) - assert sc.znear == 0.01 * 5.0 - assert sc.zfar == 10 * 5.0 - assert sc.aspectRatio == 1.0 - assert np.allclose(sc.yfov, np.pi / 16.0 * 9.0) # Plus pi / 16 - - -def test_point_light(): - - s = PointLight() - assert s.name is None - assert np.all(s.color == 1.0) - assert s.intensity == 1.0 - assert s.range is None - - with pytest.raises(ValueError): - s.range = -1.0 - - with pytest.raises(ValueError): - s.range = 0.0 - - s.range = 5.0 - - with pytest.raises(NotImplementedError): - s._generate_shadow_texture() - - with pytest.raises(NotImplementedError): - s._get_shadow_camera(scene_scale=5.0) diff --git a/spaces/vumichien/Img_to_prompt/app.py b/spaces/vumichien/Img_to_prompt/app.py deleted file mode 100644 index 8af9c9927403093d521f45e57793e0108b0edfc7..0000000000000000000000000000000000000000 --- a/spaces/vumichien/Img_to_prompt/app.py +++ /dev/null @@ -1,164 +0,0 @@ -import torch -import requests -from PIL import Image -from matplotlib import pyplot as plt -import numpy as np -import pandas as pd - -from lavis.common.gradcam import getAttMap -from lavis.models import load_model_and_preprocess - -from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, AutoModelForSeq2SeqLM -import gradio as gr -import torch, gc -from gpuinfo import GPUInfo -import psutil -import time - -def prepare_data(image, question): - gc.collect() - torch.cuda.empty_cache() - image = vis_processors["eval"](image).unsqueeze(0).to(device) - question = txt_processors["eval"](question) - samples = {"image": image, "text_input": [question]} - return samples - -def running_inf(time_start): - time_end = time.time() - time_diff = time_end - time_start - memory = psutil.virtual_memory() - gpu_utilization, gpu_memory = GPUInfo.gpu_usage() - gpu_utilization = gpu_utilization[0] if len(gpu_utilization) > 0 else 0 - gpu_memory = gpu_memory[0] if len(gpu_memory) > 0 else 0 - system_info = f""" - *Memory: {memory.total / (1024 * 1024 * 1024):.2f}GB, used: {memory.percent}%, available: {memory.available / (1024 * 1024 * 1024):.2f}GB.* - *Processing time: {time_diff:.5} seconds.* - *GPU Utilization: {gpu_utilization}%, GPU Memory: {gpu_memory}MiB.* - """ - return system_info - -def gradcam_attention(image, question): - dst_w = 720 - samples = prepare_data(image, question) - samples = model.forward_itm(samples=samples) - - w, h = image.size - scaling_factor = dst_w / w - - resized_img = image.resize((int(w * scaling_factor), int(h * scaling_factor))) - norm_img = np.float32(resized_img) / 255 - gradcam = samples['gradcams'].reshape(24,24) - - avg_gradcam = getAttMap(norm_img, gradcam, blur=True) - return (avg_gradcam * 255).astype(np.uint8) - -def generate_cap(image, question, cap_number): - time_start = time.time() - samples = prepare_data(image, question) - samples = model.forward_itm(samples=samples) - samples = model.forward_cap(samples=samples, num_captions=cap_number, num_patches=5) - return pd.DataFrame({'Caption': samples['captions'][0][:cap_number]}), running_inf(time_start) - -def postprocess(text): - for i, ans in enumerate(text): - for j, w in enumerate(ans): - if w == '.' or w == '\n': - ans = ans[:j].lower() - break - return ans - -def generate_answer(image, question): - time_start = time.time() - samples = prepare_data(image, question) - samples = model.forward_itm(samples=samples) - samples = model.forward_cap(samples=samples, num_captions=5, num_patches=20) - samples = model.forward_qa_generation(samples) - Img2Prompt = model.prompts_construction(samples) - Img2Prompt_input = tokenizer(Img2Prompt, padding='longest', truncation=True, return_tensors="pt").to(device) - - outputs = llm_model.generate(input_ids=Img2Prompt_input.input_ids, - attention_mask=Img2Prompt_input.attention_mask, - max_length=20+len(Img2Prompt_input.input_ids[0]), - return_dict_in_generate=True, - output_scores=True - ) - pred_answer = tokenizer.batch_decode(outputs.sequences[:, len(Img2Prompt_input.input_ids[0]):]) - pred_answer = postprocess(pred_answer) - print(pred_answer, type(pred_answer)) - return pred_answer, running_inf(time_start) - -# setup device to use -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -print(device) - -def load_model(model_selection): - model = AutoModelForCausalLM.from_pretrained(model_selection) - tokenizer = AutoTokenizer.from_pretrained(model_selection, use_fast=False) - return model,tokenizer - -# Choose LLM to use -# weights for OPT-350M/OPT-6.7B/OPT-13B/OPT-30B/OPT-66B will download automatically -print("Loading Large Language Model (LLM)...") -llm_model, tokenizer = load_model('facebook/opt-350m') # ~700MB (FP16) -llm_model.to(device) -model, vis_processors, txt_processors = load_model_and_preprocess(name="img2prompt_vqa", model_type="base", is_eval=True, device=device) - - -# ---- Gradio Layout ----- -title = "From Images to Textual Prompts: Zero-shot VQA with Frozen Large Language Models" -df_init = pd.DataFrame(columns=['Caption']) -raw_image = gr.Image(label='Input image', type="pil") -question = gr.Textbox(label="Input question", lines=1, interactive=True) -text_output = gr.Textbox(label="Output Answer") -demo = gr.Blocks(title=title) -demo.encrypt = False -cap_df = gr.DataFrame(value=df_init, label="Caption dataframe", row_count=(0, "dynamic"), max_rows = 20, wrap=True, overflow_row_behaviour='paginate') -memory = psutil.virtual_memory() -system_info = gr.Markdown(f"*Memory: {memory.total / (1024 * 1024 * 1024):.2f}GB, used: {memory.percent}%, available: {memory.available / (1024 * 1024 * 1024):.2f}GB*") - -with demo: - with gr.Row(): - gr.Markdown(''' -
                  -

                  From Images to Textual Prompts: Zero-shot VQA with Frozen Large Language Models

                  -
                  - ''') - with gr.Row(): - gr.Markdown(''' - ### How to use this space - ##### 1. Upload your image and fill your question - ##### 2. Creating caption from your image - ##### 3. Answering your question based on uploaded image - ''') - with gr.Row(): - with gr.Column(): - raw_image.render() - with gr.Column(): - question.render() - number_cap = gr.Number(precision=0, value=5, label="Selected number of caption you want to generate", interactive=True) - with gr.Row(): - with gr.Column(): - cap_btn = gr.Button("Generate caption") - cap_btn.click(generate_cap, [raw_image, question, number_cap], [cap_df, system_info]) - with gr.Column(): - anws_btn = gr.Button("Answer") - anws_btn.click(generate_answer, [raw_image, question], outputs=[text_output, system_info]) - with gr.Row(): - with gr.Column(): - # gradcam_btn = gr.Button("Generate Gradcam") - # gradcam_btn.click(gradcam_attention, [raw_image, question], outputs=[avg_gradcam]) - cap_df.render() - with gr.Column(): - text_output.render() - system_info.render() - with gr.Row(): - examples = gr.Examples( - examples= - [["image1.jpg", "What type of bird is this?"], - ["image2.jpg", "What type of bike is on the ground?"], - ["image3.jpg", "What is the person in the photo wearing?"]], - label="Examples", - inputs=[raw_image, question] - ) - -demo.launch(debug=True) \ No newline at end of file diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/ops/ball_query.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/ops/ball_query.py deleted file mode 100644 index d0466847c6e5c1239e359a0397568413ebc1504a..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/ops/ball_query.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -from torch.autograd import Function - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext('_ext', ['ball_query_forward']) - - -class BallQuery(Function): - """Find nearby points in spherical space.""" - - @staticmethod - def forward(ctx, min_radius: float, max_radius: float, sample_num: int, - xyz: torch.Tensor, center_xyz: torch.Tensor) -> torch.Tensor: - """ - Args: - min_radius (float): minimum radius of the balls. - max_radius (float): maximum radius of the balls. - sample_num (int): maximum number of features in the balls. - xyz (Tensor): (B, N, 3) xyz coordinates of the features. - center_xyz (Tensor): (B, npoint, 3) centers of the ball query. - - Returns: - Tensor: (B, npoint, nsample) tensor with the indices of - the features that form the query balls. - """ - assert center_xyz.is_contiguous() - assert xyz.is_contiguous() - assert min_radius < max_radius - - B, N, _ = xyz.size() - npoint = center_xyz.size(1) - idx = xyz.new_zeros(B, npoint, sample_num, dtype=torch.int) - - ext_module.ball_query_forward( - center_xyz, - xyz, - idx, - b=B, - n=N, - m=npoint, - min_radius=min_radius, - max_radius=max_radius, - nsample=sample_num) - if torch.__version__ != 'parrots': - ctx.mark_non_differentiable(idx) - return idx - - @staticmethod - def backward(ctx, a=None): - return None, None, None, None - - -ball_query = BallQuery.apply diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/ops/masked_conv.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/ops/masked_conv.py deleted file mode 100644 index cd514cc204c1d571ea5dc7e74b038c0f477a008b..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/ops/masked_conv.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math - -import torch -import torch.nn as nn -from torch.autograd import Function -from torch.autograd.function import once_differentiable -from torch.nn.modules.utils import _pair - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext( - '_ext', ['masked_im2col_forward', 'masked_col2im_forward']) - - -class MaskedConv2dFunction(Function): - - @staticmethod - def symbolic(g, features, mask, weight, bias, padding, stride): - return g.op( - 'mmcv::MMCVMaskedConv2d', - features, - mask, - weight, - bias, - padding_i=padding, - stride_i=stride) - - @staticmethod - def forward(ctx, features, mask, weight, bias, padding=0, stride=1): - assert mask.dim() == 3 and mask.size(0) == 1 - assert features.dim() == 4 and features.size(0) == 1 - assert features.size()[2:] == mask.size()[1:] - pad_h, pad_w = _pair(padding) - stride_h, stride_w = _pair(stride) - if stride_h != 1 or stride_w != 1: - raise ValueError( - 'Stride could not only be 1 in masked_conv2d currently.') - out_channel, in_channel, kernel_h, kernel_w = weight.size() - - batch_size = features.size(0) - out_h = int( - math.floor((features.size(2) + 2 * pad_h - - (kernel_h - 1) - 1) / stride_h + 1)) - out_w = int( - math.floor((features.size(3) + 2 * pad_w - - (kernel_h - 1) - 1) / stride_w + 1)) - mask_inds = torch.nonzero(mask[0] > 0, as_tuple=False) - output = features.new_zeros(batch_size, out_channel, out_h, out_w) - if mask_inds.numel() > 0: - mask_h_idx = mask_inds[:, 0].contiguous() - mask_w_idx = mask_inds[:, 1].contiguous() - data_col = features.new_zeros(in_channel * kernel_h * kernel_w, - mask_inds.size(0)) - ext_module.masked_im2col_forward( - features, - mask_h_idx, - mask_w_idx, - data_col, - kernel_h=kernel_h, - kernel_w=kernel_w, - pad_h=pad_h, - pad_w=pad_w) - - masked_output = torch.addmm(1, bias[:, None], 1, - weight.view(out_channel, -1), data_col) - ext_module.masked_col2im_forward( - masked_output, - mask_h_idx, - mask_w_idx, - output, - height=out_h, - width=out_w, - channels=out_channel) - return output - - @staticmethod - @once_differentiable - def backward(ctx, grad_output): - return (None, ) * 5 - - -masked_conv2d = MaskedConv2dFunction.apply - - -class MaskedConv2d(nn.Conv2d): - """A MaskedConv2d which inherits the official Conv2d. - - The masked forward doesn't implement the backward function and only - supports the stride parameter to be 1 currently. - """ - - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - groups=1, - bias=True): - super(MaskedConv2d, - self).__init__(in_channels, out_channels, kernel_size, stride, - padding, dilation, groups, bias) - - def forward(self, input, mask=None): - if mask is None: # fallback to the normal Conv2d - return super(MaskedConv2d, self).forward(input) - else: - return masked_conv2d(input, mask, self.weight, self.bias, - self.padding) diff --git a/spaces/xiantian/123/Dockerfile b/spaces/xiantian/123/Dockerfile deleted file mode 100644 index eef259fa372a804549fb0af0913718a13344da34..0000000000000000000000000000000000000000 --- a/spaces/xiantian/123/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM node:18-bullseye-slim -RUN apt-get update && \ - apt-get install -y git -RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app -WORKDIR /app -RUN npm install -COPY Dockerfile greeting.md* .env* ./ -RUN npm run build -EXPOSE 7860 -ENV NODE_ENV=production -CMD [ "npm", "start" ] diff --git a/spaces/yaelvinker/CLIPasso/app.py b/spaces/yaelvinker/CLIPasso/app.py deleted file mode 100644 index 8da3ebc675114af99efd12504a770771b2536659..0000000000000000000000000000000000000000 --- a/spaces/yaelvinker/CLIPasso/app.py +++ /dev/null @@ -1,26 +0,0 @@ -import torch -import gradio as gr -import os - -path = os.getcwd() - -print(path) - -os.system('ls') -os.system('pip install torch==1.7.1+cu101 torchvision==0.8.2+cu101 -f https://download.pytorch.org/whl/torch_stable.html') -os.system('pip install git+https://github.com/openai/CLIP.git') -os.system('git clone https://github.com/BachiLi/diffvg') -os.system('ls') -os.chdir('diffvg') -os.system('ls') -os.system('git submodule update --init --recursive') -os.system('python setup.py install') -os.system('cd ..') - -import pydiffvg - -def greet(name): - return "bka" + name + torch.__version__ - -iface = gr.Interface(fn=greet, inputs="text", outputs="text") -iface.launch() \ No newline at end of file diff --git a/spaces/yderre-aubay/midi-player-demo/src/landing/index.ts b/spaces/yderre-aubay/midi-player-demo/src/landing/index.ts deleted file mode 100644 index db762a68213c59ecf697dbf7bd1b15a25360dfbf..0000000000000000000000000000000000000000 --- a/spaces/yderre-aubay/midi-player-demo/src/landing/index.ts +++ /dev/null @@ -1,25 +0,0 @@ -import { localized } from "../common/localize/localizedString" - -const localizeElement = (e: Element) => { - const key = e.getAttribute("data-i18n") - if (key !== null) { - const text = localized(key) - if (text !== undefined) { - e.textContent = text - } - } -} - -const localize = () => { - document.querySelectorAll("*[data-i18n]").forEach(localizeElement) - - const title = document.getElementsByTagName("title")[0] - if (title) { - localizeElement(title) - } -} - -window.addEventListener("DOMContentLoaded", (e) => { - console.log("DOM fully loaded and parsed") - localize() -}) diff --git a/spaces/yderre-aubay/midi-player-demo/src/main/components/inputs/ScrollBar.tsx b/spaces/yderre-aubay/midi-player-demo/src/main/components/inputs/ScrollBar.tsx deleted file mode 100644 index 98cf0ba56bfb3dd908d152123bb25bb89f3f053c..0000000000000000000000000000000000000000 --- a/spaces/yderre-aubay/midi-player-demo/src/main/components/inputs/ScrollBar.tsx +++ /dev/null @@ -1,302 +0,0 @@ -import styled from "@emotion/styled" -import useComponentSize from "@rehooks/component-size" -import ArrowDropUp from "mdi-react/ArrowDropUpIcon" -import React, { FC, useRef } from "react" -import { IPoint } from "../../../common/geometry" -import { observeDrag, observeDrag2 } from "../../helpers/observeDrag" - -export const BAR_WIDTH = 17 -const BUTTON_SIZE = 15 -const MIN_THUMB_LENGTH = BAR_WIDTH - -const LONG_PRESS_DELAY = 300 -const LONG_PRESS_INTERVAL = 50 -const LONG_PRESS_SPEED = 0.5 -const SCROLL_BASE_AMOUNT = 20 - -function normalize(v: number): number { - return Math.max(0, Math.min(1, v)) -} - -export interface ScrollBarProps { - children?: React.ReactNode - isVertical: boolean - barLength: number - scrollOffset?: number - contentLength?: number - onScroll?: (scroll: number) => void -} - -const Thumb = styled.div` - box-sizing: border-box; -` - -const Container = styled.div` - height: 100%; - width: 100%; - position: absolute; - display: flex; - - &.vertical { - width: ${BAR_WIDTH}px; - height: 100%; - position: absolute; - top: 0; - right: 0; - flex-direction: column; - - .button-backward .triangle { - transform: rotate(0deg) scale(1.1); - } - - .button-forward .triangle { - transform: rotate(180deg) scale(1.1); - } - } - - &.horizontal { - width: 100%; - height: ${BAR_WIDTH}px; - position: absolute; - bottom: 0; - left: 0; - flex-direction: row; - - & > div { - height: 100%; - } - - .button-backward .triangle { - transform: rotate(-90deg) scale(1.1); - } - - .button-forward .triangle { - transform: rotate(90deg) scale(1.1); - } - } - - .triangle { - flex-grow: 1; - pointer-events: none; - width: 15px; - height: 15px; - } - - .button-backward, - .button-forward { - text-align: center; - align-content: center; - display: flex; - } -` - -const _ScrollBar: React.ForwardRefRenderFunction< - HTMLDivElement, - ScrollBarProps -> = ( - { - isVertical, - barLength, - scrollOffset = 50, - contentLength = 1000, - onScroll, - children, - }, - ref, -) => { - const buttonLength = BUTTON_SIZE - const maxOffset = contentLength - barLength - const maxLength = barLength - buttonLength * 2 - const valueRatio = normalize(barLength / contentLength) - const thumbLength = Math.max(MIN_THUMB_LENGTH, maxLength * valueRatio) - const disabled = maxOffset <= 0 - - let pageForwardLength: number - let pageBackwardLength: number - - if (disabled) { - pageForwardLength = 0 - pageBackwardLength = maxLength - } else { - pageForwardLength = Math.floor( - (maxLength - thumbLength) * normalize(scrollOffset / maxOffset), - ) - pageBackwardLength = Math.floor(maxLength - thumbLength - pageForwardLength) - } - - const className = isVertical ? "vertical" : "horizontal" - const lengthProp = isVertical ? "height" : "width" - - const onScroll2 = (scroll: number) => - onScroll?.(Math.min(maxOffset, Math.max(0, scroll))) - - const handleMouseDown = - (delta: number) => (e: React.MouseEvent) => { - e.stopPropagation() - - if (disabled) { - return - } - - const currentTarget = e.target - const startPos = getPoint(e) - - let intervalId = 0 - let scroll = scrollOffset - onScroll2((scroll += delta)) - - const isHoverOnTarget = () => - document.elementFromPoint(startPos.x, startPos.y) === currentTarget - - const startLongPressTimer = (delta: number) => { - // 初回は時間をかける - // Take time for the first time - intervalId = window.setInterval(() => { - clearInterval(intervalId) - - if (!isHoverOnTarget()) { - return - } - - onScroll2((scroll += delta)) - - // 二回目からは素早く繰り返す - // Repeat quickly from the second time - intervalId = window.setInterval(() => { - onScroll2((scroll += delta * LONG_PRESS_SPEED)) - - if (!isHoverOnTarget()) { - stopLongPressTimer() - } - }, LONG_PRESS_INTERVAL) - }, LONG_PRESS_DELAY) - } - - const stopLongPressTimer = () => { - clearInterval(intervalId) - intervalId = 0 - } - - startLongPressTimer(delta) - - observeDrag({ - onMouseMove: (e) => { - if (currentTarget !== e.target) { - stopLongPressTimer() - } - }, - onMouseUp: () => { - stopLongPressTimer() - }, - }) - } - - const onMouseDownThumb = (e: React.MouseEvent) => { - e.stopPropagation() - - if (disabled) { - return - } - - const elm = e.target as HTMLDivElement - const startPos = getPoint(e) - - if (elm.classList.contains("thumb")) { - const startValue = scrollOffset - - observeDrag2(e.nativeEvent, { - onMouseMove: (e, delta) => { - const p = isVertical ? "y" : "x" - const scale = maxOffset / (maxLength - thumbLength) // 移動量とスクロール量の補正値 -> Correction value of movement amount and scroll amount - const value = startValue + delta[p] * scale - onScroll2(value) - }, - }) - } - } - - const triangle = - - return ( - -
                  - {triangle} -
                  -
                  - {!disabled && ( - - )} -
                  -
                  - {triangle} -
                  - {children} - - ) -} - -export const ScrollBar = React.forwardRef(_ScrollBar) - -function getPoint(e: MouseEvent | React.MouseEvent): IPoint { - return { - x: e.pageX, - y: e.pageY, - } -} - -type VerticalScrollBar_Props = Omit -type HorizontalScrollBar_Props = VerticalScrollBar_Props - -const VerticalScrollBar_: FC< - React.PropsWithChildren -> = (props) => { - const ref = useRef(null) - const size = useComponentSize(ref) - return ( - - ) -} - -const HorizontalScrollBar_: FC< - React.PropsWithChildren -> = (props) => { - const ref = useRef(null) - const size = useComponentSize(ref) - return ( - - ) -} - -export type VerticalScrollBarProps = Omit -export type HorizontalScrollBarProps = Omit - -const areEqual = ( - props: VerticalScrollBar_Props, - nextProps: VerticalScrollBar_Props, -) => - props.scrollOffset === nextProps.scrollOffset && - props.contentLength === nextProps.contentLength && - props.onScroll === nextProps.onScroll - -export const VerticalScrollBar = React.memo(VerticalScrollBar_, areEqual) -export const HorizontalScrollBar = React.memo(HorizontalScrollBar_, areEqual) diff --git a/spaces/yerfor/SyntaSpeech/modules/vocoder/parallel_wavegan/layers/__init__.py b/spaces/yerfor/SyntaSpeech/modules/vocoder/parallel_wavegan/layers/__init__.py deleted file mode 100644 index 02e0ac58838f16c95f047d26c34a7ef86d473f07..0000000000000000000000000000000000000000 --- a/spaces/yerfor/SyntaSpeech/modules/vocoder/parallel_wavegan/layers/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .causal_conv import * # NOQA -from .pqmf import * # NOQA -from .residual_block import * # NOQA -from modules.vocoder.parallel_wavegan.layers.residual_stack import * # NOQA -from .upsample import * # NOQA diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/encodec/feature_extraction_encodec.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/encodec/feature_extraction_encodec.py deleted file mode 100644 index 6f7536a52e9f99deeb97ffc9ef8accbbbed664d2..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/encodec/feature_extraction_encodec.py +++ /dev/null @@ -1,206 +0,0 @@ -# coding=utf-8 -# Copyright 2023 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Feature extractor class for EnCodec.""" - -from typing import List, Optional, Union - -import numpy as np - -from ...feature_extraction_sequence_utils import SequenceFeatureExtractor -from ...feature_extraction_utils import BatchFeature -from ...utils import PaddingStrategy, TensorType, logging - - -logger = logging.get_logger(__name__) - - -class EncodecFeatureExtractor(SequenceFeatureExtractor): - r""" - Constructs an EnCodec feature extractor. - - This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains - most of the main methods. Users should refer to this superclass for more information regarding those methods. - - Instantiating a feature extractor with the defaults will yield a similar configuration to that of the - [facebook/encodec_24khz](https://huggingface.co/facebook/encodec_24khz) architecture. - - Args: - feature_size (`int`, *optional*, defaults to 1): - The feature dimension of the extracted features. Use 1 for mono, 2 for stereo. - sampling_rate (`int`, *optional*, defaults to 24000): - The sampling rate at which the audio waveform should be digitalized expressed in hertz (Hz). - padding_value (`float`, *optional*, defaults to 0.0): - The value that is used to fill the padding values. - chunk_length_s (`float`, *optional*): - If defined the audio is pre-processed into chunks of lengths `chunk_length_s` and then encoded. - overlap (`float`, *optional*): - Defines the overlap between each chunk. It is used to compute the `chunk_stride` using the following - formulae : `int((1.0 - self.overlap) * self.chunk_length)`. - """ - - model_input_names = ["input_values", "padding_mask"] - - def __init__( - self, - feature_size: int = 1, - sampling_rate: int = 24000, - padding_value: float = 0.0, - chunk_length_s: float = None, - overlap: float = None, - **kwargs, - ): - super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs) - self.chunk_length_s = chunk_length_s - self.overlap = overlap - - # This is a property because you might want to change the chunk_length_s on the fly - @property - def chunk_length(self) -> Optional[int]: - if self.chunk_length_s is None: - return None - else: - return int(self.chunk_length_s * self.sampling_rate) - - # This is a property because you might want to change the chunk_length_s on the fly - @property - def chunk_stride(self) -> Optional[int]: - if self.chunk_length_s is None or self.overlap is None: - return None - else: - return max(1, int((1.0 - self.overlap) * self.chunk_length)) - - def __call__( - self, - raw_audio: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], - padding: Optional[Union[bool, str, PaddingStrategy]] = None, - truncation: Optional[bool] = False, - max_length: Optional[int] = None, - return_tensors: Optional[Union[str, TensorType]] = None, - sampling_rate: Optional[int] = None, - ) -> BatchFeature: - """ - Main method to featurize and prepare for the model one or several sequence(s). - - Args: - raw_audio (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`): - The sequence or batch of sequences to be processed. Each sequence can be a numpy array, a list of float - values, a list of numpy arrays or a list of list of float values. The numpy array must be of shape - `(num_samples,)` for mono audio (`feature_size = 1`), or `(2, num_samples)` for stereo audio - (`feature_size = 2`). - padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): - Select a strategy to pad the returned sequences (according to the model's padding side and padding - index) among: - - - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single - sequence if provided). - - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum - acceptable input length for the model if that argument is not provided. - - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different - lengths). - truncation (`bool`, *optional*, defaults to `False`): - Activates truncation to cut input sequences longer than `max_length` to `max_length`. - max_length (`int`, *optional*): - Maximum length of the returned list and optionally padding length (see above). - return_tensors (`str` or [`~utils.TensorType`], *optional*): - If set, will return tensors instead of list of python integers. Acceptable values are: - - - `'tf'`: Return TensorFlow `tf.constant` objects. - - `'pt'`: Return PyTorch `torch.Tensor` objects. - - `'np'`: Return Numpy `np.ndarray` objects. - sampling_rate (`int`, *optional*): - The sampling rate at which the `audio` input was sampled. It is strongly recommended to pass - `sampling_rate` at the forward call to prevent silent errors. - """ - if sampling_rate is not None: - if sampling_rate != self.sampling_rate: - raise ValueError( - f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" - f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with" - f" {self.sampling_rate} and not {sampling_rate}." - ) - else: - logger.warning( - "It is strongly recommended to pass the `sampling_rate` argument to this function. " - "Failing to do so can result in silent errors that might be hard to debug." - ) - - if padding and truncation: - raise ValueError("Both padding and truncation were set. Make sure you only set one.") - elif padding is None: - # by default let's pad the inputs - padding = True - - is_batched = bool( - isinstance(raw_audio, (list, tuple)) and (isinstance(raw_audio[0], (np.ndarray, tuple, list))) - ) - - if is_batched: - raw_audio = [np.asarray(audio, dtype=np.float32).T for audio in raw_audio] - elif not is_batched and not isinstance(raw_audio, np.ndarray): - raw_audio = np.asarray(raw_audio, dtype=np.float32) - elif isinstance(raw_audio, np.ndarray) and raw_audio.dtype is np.dtype(np.float64): - raw_audio = raw_audio.astype(np.float32) - - # always return batch - if not is_batched: - raw_audio = [np.asarray(raw_audio).T] - - # verify inputs are valid - for idx, example in enumerate(raw_audio): - if example.ndim > 2: - raise ValueError(f"Expected input shape (channels, length) but got shape {example.shape}") - if self.feature_size == 1 and example.ndim != 1: - raise ValueError(f"Expected mono audio but example has {example.shape[-1]} channels") - if self.feature_size == 2 and example.shape[-1] != 2: - raise ValueError(f"Expected stereo audio but example has {example.shape[-1]} channels") - - padded_inputs = None - input_values = BatchFeature({"input_values": raw_audio}) - if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: - if truncation: - max_length = min(array.shape[0] for array in raw_audio) - nb_step = int(np.floor(max_length / self.chunk_stride)) - max_length = (nb_step - 1) * self.chunk_stride + self.chunk_length - elif padding: - max_length = max(array.shape[0] for array in raw_audio) - nb_step = int(np.ceil(max_length / self.chunk_stride)) - max_length = (nb_step - 1) * self.chunk_stride + self.chunk_length - padding = "max_length" - else: - padded_inputs = input_values - - # normal padding on batch - if padded_inputs is None: - padded_inputs = self.pad( - input_values, - max_length=max_length, - truncation=truncation, - padding=padding, - return_attention_mask=padding, - ) - if padding: - padded_inputs["padding_mask"] = padded_inputs.pop("attention_mask") - - input_values = [] - for example in padded_inputs.pop("input_values"): - if self.feature_size == 1: - example = example[..., None] - input_values.append(example.T) - - padded_inputs["input_values"] = input_values - if return_tensors is not None: - padded_inputs = padded_inputs.convert_to_tensors(return_tensors) - - return padded_inputs diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/gpt2/tokenization_gpt2_fast.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/gpt2/tokenization_gpt2_fast.py deleted file mode 100644 index 189a355084088564873b4cc79a105d99fb49b15c..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/gpt2/tokenization_gpt2_fast.py +++ /dev/null @@ -1,186 +0,0 @@ -# coding=utf-8 -# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Tokenization classes for OpenAI GPT.""" - - -import json -from typing import Optional, Tuple - -from tokenizers import pre_tokenizers - -from ...tokenization_utils_base import BatchEncoding -from ...tokenization_utils_fast import PreTrainedTokenizerFast -from ...utils import logging -from .tokenization_gpt2 import GPT2Tokenizer - - -logger = logging.get_logger(__name__) - -VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} - -PRETRAINED_VOCAB_FILES_MAP = { - "vocab_file": { - "gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json", - "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json", - "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json", - "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json", - "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json", - }, - "merges_file": { - "gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt", - "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt", - "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt", - "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt", - "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt", - }, - "tokenizer_file": { - "gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json", - "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json", - "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json", - "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json", - "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json", - }, -} - -PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { - "gpt2": 1024, - "gpt2-medium": 1024, - "gpt2-large": 1024, - "gpt2-xl": 1024, - "distilgpt2": 1024, -} - - -class GPT2TokenizerFast(PreTrainedTokenizerFast): - """ - Construct a "fast" GPT-2 tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level - Byte-Pair-Encoding. - - This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will - be encoded differently whether it is at the beginning of the sentence (without space) or not: - - ```python - >>> from transformers import GPT2TokenizerFast - - >>> tokenizer = GPT2TokenizerFast.from_pretrained("gpt2") - >>> tokenizer("Hello world")["input_ids"] - [15496, 995] - - >>> tokenizer(" Hello world")["input_ids"] - [18435, 995] - ``` - - You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since - the model was not pretrained this way, it might yield a decrease in performance. - - - - When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`. - - - - This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should - refer to this superclass for more information regarding those methods. - - Args: - vocab_file (`str`): - Path to the vocabulary file. - merges_file (`str`): - Path to the merges file. - errors (`str`, *optional*, defaults to `"replace"`): - Paradigm to follow when decoding bytes to UTF-8. See - [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. - unk_token (`str`, *optional*, defaults to `<|endoftext|>`): - The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this - token instead. - bos_token (`str`, *optional*, defaults to `<|endoftext|>`): - The beginning of sequence token. - eos_token (`str`, *optional*, defaults to `<|endoftext|>`): - The end of sequence token. - add_prefix_space (`bool`, *optional*, defaults to `False`): - Whether or not to add an initial space to the input. This allows to treat the leading word just as any - other word. (GPT2 tokenizer detect beginning of words by the preceding space). - trim_offsets (`bool`, *optional*, defaults to `True`): - Whether or not the post-processing step should trim offsets to avoid including whitespaces. - """ - - vocab_files_names = VOCAB_FILES_NAMES - pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP - max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES - model_input_names = ["input_ids", "attention_mask"] - slow_tokenizer_class = GPT2Tokenizer - - def __init__( - self, - vocab_file=None, - merges_file=None, - tokenizer_file=None, - unk_token="<|endoftext|>", - bos_token="<|endoftext|>", - eos_token="<|endoftext|>", - add_prefix_space=False, - **kwargs, - ): - super().__init__( - vocab_file, - merges_file, - tokenizer_file=tokenizer_file, - unk_token=unk_token, - bos_token=bos_token, - eos_token=eos_token, - add_prefix_space=add_prefix_space, - **kwargs, - ) - - self.add_bos_token = kwargs.pop("add_bos_token", False) - - pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) - if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space: - pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type")) - pre_tok_state["add_prefix_space"] = add_prefix_space - self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state) - - self.add_prefix_space = add_prefix_space - - def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding: - is_split_into_words = kwargs.get("is_split_into_words", False) - assert self.add_prefix_space or not is_split_into_words, ( - f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " - "to use it with pretokenized inputs." - ) - - return super()._batch_encode_plus(*args, **kwargs) - - def _encode_plus(self, *args, **kwargs) -> BatchEncoding: - is_split_into_words = kwargs.get("is_split_into_words", False) - - assert self.add_prefix_space or not is_split_into_words, ( - f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " - "to use it with pretokenized inputs." - ) - - return super()._encode_plus(*args, **kwargs) - - def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: - files = self._tokenizer.model.save(save_directory, name=filename_prefix) - return tuple(files) - - @property - # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.default_chat_template - def default_chat_template(self): - """ - A simple chat template that ignores role information and just concatenates messages with EOS tokens. - """ - return "{% for message in messages %}" "{{ message.content }}{{ eos_token }}" "{% endfor %}" diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/tests/data/test_coco_evaluation.py b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/tests/data/test_coco_evaluation.py deleted file mode 100644 index 964f00284df64d3378ebfe32913c07deb5a1f819..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/tests/data/test_coco_evaluation.py +++ /dev/null @@ -1,138 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import contextlib -import copy -import io -import json -import numpy as np -import os -import tempfile -import unittest -import torch -from pycocotools.coco import COCO -from pycocotools.cocoeval import COCOeval - -from detectron2.data import DatasetCatalog -from detectron2.evaluation import COCOEvaluator -from detectron2.evaluation.fast_eval_api import COCOeval_opt -from detectron2.structures import Boxes, Instances - - -class TestCOCOeval(unittest.TestCase): - def test_fast_eval(self): - # A small set of images/categories from COCO val - # fmt: off - detections = [{"image_id": 139, "category_id": 1, "bbox": [417.3332824707031, 159.27003479003906, 47.66064453125, 143.00193786621094], "score": 0.9949821829795837, "segmentation": {"size": [426, 640], "counts": "Tc`52W=3N0N4aNN^E7]:4XE1g:8kDMT;U100000001O1gE[Nk8h1dFiNY9Z1aFkN]9g2J3NdN`FlN`9S1cFRN07]9g1bFoM6;X9c1cFoM=8R9g1bFQN>3U9Y30O01OO1O001N2O1N1O4L4L5UNoE3V:CVF6Q:@YF9l9@ZF 0 else 0.0 - msg = "%s: comparing COCO APIs, %s differs by %f" % (name, k, abs_diff) - self.assertTrue(abs_diff < 1e-4, msg=msg) - - def test_unknown_category(self): - dataset = "coco_2017_val_100" - evaluator = COCOEvaluator(dataset) - evaluator.reset() - inputs = DatasetCatalog.get(dataset)[:2] - pred = Instances((100, 100)) - pred.pred_boxes = Boxes(torch.rand(2, 4)) - pred.scores = torch.rand(2) - pred.pred_classes = torch.tensor([10, 80]) - output = {"instances": pred} - evaluator.process(inputs, [output, output]) - with self.assertRaises(AssertionError): - evaluator.evaluate() diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/tools/visualize_json_results.py b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/tools/visualize_json_results.py deleted file mode 100644 index 472190e0b3b38b55773795915badbb5bc4599d42..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/tools/visualize_json_results.py +++ /dev/null @@ -1,90 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) Facebook, Inc. and its affiliates. - -import argparse -import json -import numpy as np -import os -from collections import defaultdict -import cv2 -import tqdm - -from detectron2.data import DatasetCatalog, MetadataCatalog -from detectron2.structures import Boxes, BoxMode, Instances -from detectron2.utils.file_io import PathManager -from detectron2.utils.logger import setup_logger -from detectron2.utils.visualizer import Visualizer - - -def create_instances(predictions, image_size): - ret = Instances(image_size) - - score = np.asarray([x["score"] for x in predictions]) - chosen = (score > args.conf_threshold).nonzero()[0] - score = score[chosen] - bbox = np.asarray([predictions[i]["bbox"] for i in chosen]).reshape(-1, 4) - bbox = BoxMode.convert(bbox, BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) - - labels = np.asarray([dataset_id_map(predictions[i]["category_id"]) for i in chosen]) - - ret.scores = score - ret.pred_boxes = Boxes(bbox) - ret.pred_classes = labels - - try: - ret.pred_masks = [predictions[i]["segmentation"] for i in chosen] - except KeyError: - pass - return ret - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="A script that visualizes the json predictions from COCO or LVIS dataset." - ) - parser.add_argument("--input", required=True, help="JSON file produced by the model") - parser.add_argument("--output", required=True, help="output directory") - parser.add_argument("--dataset", help="name of the dataset", default="coco_2017_val") - parser.add_argument("--conf-threshold", default=0.5, type=float, help="confidence threshold") - args = parser.parse_args() - - logger = setup_logger() - - with PathManager.open(args.input, "r") as f: - predictions = json.load(f) - - pred_by_image = defaultdict(list) - for p in predictions: - pred_by_image[p["image_id"]].append(p) - - dicts = list(DatasetCatalog.get(args.dataset)) - metadata = MetadataCatalog.get(args.dataset) - if hasattr(metadata, "thing_dataset_id_to_contiguous_id"): - - def dataset_id_map(ds_id): - return metadata.thing_dataset_id_to_contiguous_id[ds_id] - - elif "lvis" in args.dataset: - # LVIS results are in the same format as COCO results, but have a different - # mapping from dataset category id to contiguous category id in [0, #categories - 1] - def dataset_id_map(ds_id): - return ds_id - 1 - - else: - raise ValueError("Unsupported dataset: {}".format(args.dataset)) - - os.makedirs(args.output, exist_ok=True) - - for dic in tqdm.tqdm(dicts): - img = cv2.imread(dic["file_name"], cv2.IMREAD_COLOR)[:, :, ::-1] - basename = os.path.basename(dic["file_name"]) - - predictions = create_instances(pred_by_image[dic["image_id"]], img.shape[:2]) - vis = Visualizer(img, metadata) - vis_pred = vis.draw_instance_predictions(predictions).get_image() - - vis = Visualizer(img, metadata) - vis_gt = vis.draw_dataset_dict(dic).get_image() - - concat = np.concatenate((vis_pred, vis_gt), axis=1) - cv2.imwrite(os.path.join(args.output, basename), concat[:, :, ::-1]) diff --git a/spaces/yo2266911/uma_voice/train.py b/spaces/yo2266911/uma_voice/train.py deleted file mode 100644 index 4dff8b280d76c53abdfc2fbce83cafaf3022ab96..0000000000000000000000000000000000000000 --- a/spaces/yo2266911/uma_voice/train.py +++ /dev/null @@ -1,301 +0,0 @@ -import os -import json -import argparse -import itertools -import math -import torch -from torch import nn, optim -from torch.nn import functional as F -from torch.utils.data import DataLoader -from torch.utils.tensorboard import SummaryWriter -import torch.multiprocessing as mp -import torch.distributed as dist -from torch.nn.parallel import DistributedDataParallel as DDP -from torch.cuda.amp import autocast, GradScaler - -import librosa -import logging - -logging.getLogger('numba').setLevel(logging.WARNING) - -import commons -import utils -from data_utils import ( - TextAudioLoader, - TextAudioCollate, - DistributedBucketSampler -) -from models import ( - SynthesizerTrn, - MultiPeriodDiscriminator, -) -from losses import ( - generator_loss, - discriminator_loss, - feature_loss, - kl_loss -) -from mel_processing import mel_spectrogram_torch, spec_to_mel_torch -from text.symbols import symbols - - -torch.backends.cudnn.benchmark = True -global_step = 0 - - -def main(): - """Assume Single Node Multi GPUs Training Only""" - assert torch.cuda.is_available(), "CPU training is not allowed." - - n_gpus = torch.cuda.device_count() - os.environ['MASTER_ADDR'] = 'localhost' - os.environ['MASTER_PORT'] = '8000' - - hps = utils.get_hparams() - mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,)) - - -def run(rank, n_gpus, hps): - global global_step - if rank == 0: - logger = utils.get_logger(hps.model_dir) - logger.info(hps) - utils.check_git_hash(hps.model_dir) - writer = SummaryWriter(log_dir=hps.model_dir) - writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) - - dist.init_process_group(backend='nccl', init_method='env://', world_size=n_gpus, rank=rank) - torch.manual_seed(hps.train.seed) - torch.cuda.set_device(rank) - - train_dataset = TextAudioLoader(hps.data.training_files, hps.data) - train_sampler = DistributedBucketSampler( - train_dataset, - hps.train.batch_size, - [32,300,400,500,600,700,800,900,1000], - num_replicas=n_gpus, - rank=rank, - shuffle=True) - collate_fn = TextAudioCollate() - train_loader = DataLoader(train_dataset, num_workers=8, shuffle=False, pin_memory=True, - collate_fn=collate_fn, batch_sampler=train_sampler) - if rank == 0: - eval_dataset = TextAudioLoader(hps.data.validation_files, hps.data) - eval_loader = DataLoader(eval_dataset, num_workers=8, shuffle=False, - batch_size=hps.train.batch_size, pin_memory=True, - drop_last=False, collate_fn=collate_fn) - - net_g = SynthesizerTrn( - len(symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - **hps.model).cuda(rank) - net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) - optim_g = torch.optim.AdamW( - net_g.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - optim_d = torch.optim.AdamW( - net_d.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - net_g = DDP(net_g, device_ids=[rank]) - net_d = DDP(net_d, device_ids=[rank]) - - try: - _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g) - _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d) - global_step = (epoch_str - 1) * len(train_loader) - except: - epoch_str = 1 - global_step = 0 - - scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str-2) - scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str-2) - - scaler = GradScaler(enabled=hps.train.fp16_run) - - for epoch in range(epoch_str, hps.train.epochs + 1): - if rank==0: - train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, eval_loader], logger, [writer, writer_eval]) - else: - train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, None], None, None) - scheduler_g.step() - scheduler_d.step() - - -def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers): - net_g, net_d = nets - optim_g, optim_d = optims - scheduler_g, scheduler_d = schedulers - train_loader, eval_loader = loaders - if writers is not None: - writer, writer_eval = writers - - train_loader.batch_sampler.set_epoch(epoch) - global global_step - - net_g.train() - net_d.train() - for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths) in enumerate(train_loader): - x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True) - spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True) - y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True) - - with autocast(enabled=hps.train.fp16_run): - y_hat, l_length, attn, ids_slice, x_mask, z_mask,\ - (z, z_p, m_p, logs_p, m_q, logs_q) = net_g(x, x_lengths, spec, spec_lengths) - - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax) - y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length) - y_hat_mel = mel_spectrogram_torch( - y_hat.squeeze(1), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax - ) - - y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice - - # Discriminator - y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) - with autocast(enabled=False): - loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g) - loss_disc_all = loss_disc - optim_d.zero_grad() - scaler.scale(loss_disc_all).backward() - scaler.unscale_(optim_d) - grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) - scaler.step(optim_d) - - with autocast(enabled=hps.train.fp16_run): - # Generator - y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) - with autocast(enabled=False): - loss_dur = torch.sum(l_length.float()) - loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel - loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl - - loss_fm = feature_loss(fmap_r, fmap_g) - loss_gen, losses_gen = generator_loss(y_d_hat_g) - loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl - optim_g.zero_grad() - scaler.scale(loss_gen_all).backward() - scaler.unscale_(optim_g) - grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None) - scaler.step(optim_g) - scaler.update() - - if rank==0: - if global_step % hps.train.log_interval == 0: - lr = optim_g.param_groups[0]['lr'] - losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_dur, loss_kl] - logger.info('Train Epoch: {} [{:.0f}%]'.format( - epoch, - 100. * batch_idx / len(train_loader))) - logger.info([x.item() for x in losses] + [global_step, lr]) - - scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr, "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g} - scalar_dict.update({"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/dur": loss_dur, "loss/g/kl": loss_kl}) - - scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)}) - scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)}) - scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)}) - image_dict = { - "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()), - "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()), - "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()), - "all/attn": utils.plot_alignment_to_numpy(attn[0,0].data.cpu().numpy()) - } - utils.summarize( - writer=writer, - global_step=global_step, - images=image_dict, - scalars=scalar_dict) - - if global_step % hps.train.eval_interval == 0: - evaluate(hps, net_g, eval_loader, writer_eval) - utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "G_{}.pth".format(global_step))) - utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "D_{}.pth".format(global_step))) - old_g=os.path.join(hps.model_dir, "G_{}.pth".format(global_step-2000)) - old_d=os.path.join(hps.model_dir, "D_{}.pth".format(global_step-2000)) - if os.path.exists(old_g): - os.remove(old_g) - if os.path.exists(old_d): - os.remove(old_d) - global_step += 1 - - if rank == 0: - logger.info('====> Epoch: {}'.format(epoch)) - - -def evaluate(hps, generator, eval_loader, writer_eval): - generator.eval() - with torch.no_grad(): - for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths) in enumerate(eval_loader): - x, x_lengths = x.cuda(0), x_lengths.cuda(0) - spec, spec_lengths = spec.cuda(0), spec_lengths.cuda(0) - y, y_lengths = y.cuda(0), y_lengths.cuda(0) - - # remove else - x = x[:1] - x_lengths = x_lengths[:1] - spec = spec[:1] - spec_lengths = spec_lengths[:1] - y = y[:1] - y_lengths = y_lengths[:1] - break - y_hat, attn, mask, *_ = generator.module.infer(x, x_lengths, max_len=1000) - y_hat_lengths = mask.sum([1,2]).long() * hps.data.hop_length - - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax) - y_hat_mel = mel_spectrogram_torch( - y_hat.squeeze(1).float(), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax - ) - image_dict = { - "gen/mel": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy()) - } - audio_dict = { - "gen/audio": y_hat[0,:,:y_hat_lengths[0]] - } - if global_step == 0: - image_dict.update({"gt/mel": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())}) - audio_dict.update({"gt/audio": y[0,:,:y_lengths[0]]}) - - utils.summarize( - writer=writer_eval, - global_step=global_step, - images=image_dict, - audios=audio_dict, - audio_sampling_rate=hps.data.sampling_rate - ) - generator.train() - - -if __name__ == "__main__": - main() diff --git a/spaces/younker/chatgpt-turbo/client/src/types/constants_default.ts b/spaces/younker/chatgpt-turbo/client/src/types/constants_default.ts deleted file mode 100644 index 273a3c2290b0c1ffd87eb20fcacba609719a5949..0000000000000000000000000000000000000000 --- a/spaces/younker/chatgpt-turbo/client/src/types/constants_default.ts +++ /dev/null @@ -1 +0,0 @@ -export const SERVER_ADDRESS = "http://localhost:8080"; diff --git a/spaces/youplala/StoreCopilot/src/data.py b/spaces/youplala/StoreCopilot/src/data.py deleted file mode 100644 index 23896a0c1549e0c21499cd5ae10c82a5f5196a31..0000000000000000000000000000000000000000 --- a/spaces/youplala/StoreCopilot/src/data.py +++ /dev/null @@ -1,70 +0,0 @@ -import os -from collections import OrderedDict - -import pandas as pd - -from src.core.config import get_settings - -settings = get_settings() - -CONFIG = { - "min_net_sales_percentage": 0.005, - "max_net_sales_percentage": 0.1, - "min_extra_qty_percentage": 0.1, - "percentage_threshold": 0.5, -} - - -def check_net_sales_percentage(data): - """ - Check the extra net sales percentage of total sales is between the min and max thresholds. - """ - min_threshold = CONFIG.get("min_net_sales_percentage", 0.005) - max_threshold = CONFIG.get("max_net_sales_percentage", 0.10) - pct_threshold = CONFIG.get("percentage_threshold", 0.5) - - data["CHECK_EXTRA_NS_PCT"] = (min_threshold <= data.EXTRA_NET_SALE_PCT) & ( - data.EXTRA_NET_SALE_PCT <= max_threshold - ) - data.drop(columns=["EXTRA_NET_SALE_PCT"], inplace=True) - pct = data["CHECK_EXTRA_NS_PCT"].mean() - return pct > pct_threshold - - -def check_in_history(reco, history, type): - """ - Check if the product/basket is already in the history of the store - """ - pct_threshold = CONFIG.get("percentage_threshold", 0.5) - # check if the product was sold last week or in the whole history - reco["CHECK_IN_HISTORY"] = reco.QTY_CURRENT > 0 | reco[ - "DISPLAY_NAME" - ].isin(history[type]) - pct = reco.CHECK_IN_HISTORY.mean() - return pct > pct_threshold - - -def check_extra_qty(reco): - """ - Check the extra quantity percentage of total sales is above the threshold. - """ - min_threshold_pct = CONFIG.get("min_extra_qty_percentage", 0.1) - pct_threshold = CONFIG.get("percentage_threshold", 0.5) - min_threshold = reco["QTY_CURRENT"] * min_threshold_pct - reco["CHECK_EXTRA_QTY_PCT"] = reco["QTY_CURRENT"] > min_threshold - pct = reco["CHECK_EXTRA_QTY_PCT"].mean() - return pct > pct_threshold - - -def checks(reco, history, type): - """ - Run all checks - """ - if type == "PROD_NAME": - check_net_sales_percentage(reco) - check_extra_qty(reco) - check_in_history(reco, history, type) - # Filter out the recommendations that don't pass the checks - check_cols = [c for c in reco.columns if c.startswith("CHECK_")] - reco = reco[reco[check_cols].all(axis=1)].drop(columns=check_cols) - return reco diff --git a/spaces/ysharma/testing_llm/README.md b/spaces/ysharma/testing_llm/README.md deleted file mode 100644 index a5ab3971a3bd4db7ba3ae136280f8bc0251d2533..0000000000000000000000000000000000000000 --- a/spaces/ysharma/testing_llm/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Testing Llm -emoji: 😻 -colorFrom: gray -colorTo: red -sdk: gradio -sdk_version: 3.0.24 -app_file: app.py -pinned: false -license: gpl ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/yukie/yukie-sovits3/vdecoder/hifigan/models.py b/spaces/yukie/yukie-sovits3/vdecoder/hifigan/models.py deleted file mode 100644 index bdc3fa2c3447f360472d94c2fad9bd74993f6410..0000000000000000000000000000000000000000 --- a/spaces/yukie/yukie-sovits3/vdecoder/hifigan/models.py +++ /dev/null @@ -1,500 +0,0 @@ -import os -import json -from .env import AttrDict -import numpy as np -import torch -import torch.nn.functional as F -import torch.nn as nn -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from .utils import init_weights, get_padding - -LRELU_SLOPE = 0.1 - - -def load_model(model_path, device='cuda'): - config_file = os.path.join(os.path.split(model_path)[0], 'config.json') - with open(config_file) as f: - data = f.read() - - global h - json_config = json.loads(data) - h = AttrDict(json_config) - - generator = Generator(h).to(device) - - cp_dict = torch.load(model_path) - generator.load_state_dict(cp_dict['generator']) - generator.eval() - generator.remove_weight_norm() - del cp_dict - return generator, h - - -class ResBlock1(torch.nn.Module): - def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.h = h - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - xt = c2(xt) - x = xt + x - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.h = h - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - xt = c(xt) - x = xt + x - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class SineGen(torch.nn.Module): - """ Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__(self, samp_rate, harmonic_num=0, - sine_amp=0.1, noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - self.flag_for_pulse = flag_for_pulse - - def _f02uv(self, f0): - # generate uv signal - uv = (f0 > self.voiced_threshold).type(torch.float32) - return uv - - def _f02sine(self, f0_values): - """ f0_values: (batchsize, length, dim) - where dim indicates fundamental tone and overtones - """ - # convert to F0 in rad. The interger part n can be ignored - # because 2 * np.pi * n doesn't affect phase - rad_values = (f0_values / self.sampling_rate) % 1 - - # initial phase noise (no noise for fundamental component) - rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], \ - device=f0_values.device) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - - # instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad) - if not self.flag_for_pulse: - # for normal case - - # To prevent torch.cumsum numerical overflow, - # it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1. - # Buffer tmp_over_one_idx indicates the time step to add -1. - # This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi - tmp_over_one = torch.cumsum(rad_values, 1) % 1 - tmp_over_one_idx = (torch.diff(tmp_over_one, dim=1)) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - - sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) - * 2 * np.pi) - else: - # If necessary, make sure that the first time step of every - # voiced segments is sin(pi) or cos(0) - # This is used for pulse-train generation - - # identify the last time step in unvoiced segments - uv = self._f02uv(f0_values) - uv_1 = torch.roll(uv, shifts=-1, dims=1) - uv_1[:, -1, :] = 1 - u_loc = (uv < 1) * (uv_1 > 0) - - # get the instantanouse phase - tmp_cumsum = torch.cumsum(rad_values, dim=1) - # different batch needs to be processed differently - for idx in range(f0_values.shape[0]): - temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :] - temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :] - # stores the accumulation of i.phase within - # each voiced segments - tmp_cumsum[idx, :, :] = 0 - tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum - - # rad_values - tmp_cumsum: remove the accumulation of i.phase - # within the previous voiced segment. - i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1) - - # get the sines - sines = torch.cos(i_phase * 2 * np.pi) - return sines - - def forward(self, f0): - """ sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, - device=f0.device) - # fundamental component - fn = torch.multiply(f0, torch.FloatTensor([[range(1, self.harmonic_num + 2)]]).to(f0.device)) - - # generate sine waveforms - sine_waves = self._f02sine(fn) * self.sine_amp - - # generate uv signal - # uv = torch.ones(f0.shape) - # uv = uv * (f0 > self.voiced_threshold) - uv = self._f02uv(f0) - - # noise: for unvoiced should be similar to sine_amp - # std = self.sine_amp/3 -> max value ~ self.sine_amp - # . for voiced regions is self.noise_std - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - - # first: set the unvoiced part to 0 by uv - # then: additive noise - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """ SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - - # to produce sine waveforms - self.l_sin_gen = SineGen(sampling_rate, harmonic_num, - sine_amp, add_noise_std, voiced_threshod) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x): - """ - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - """ - # source for harmonic branch - sine_wavs, uv, _ = self.l_sin_gen(x) - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - - # source for noise branch, in the same shape as uv - noise = torch.randn_like(uv) * self.sine_amp / 3 - return sine_merge, noise, uv - - -class Generator(torch.nn.Module): - def __init__(self, h): - super(Generator, self).__init__() - self.h = h - - self.num_kernels = len(h["resblock_kernel_sizes"]) - self.num_upsamples = len(h["upsample_rates"]) - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(h["upsample_rates"])) - self.m_source = SourceModuleHnNSF( - sampling_rate=h["sampling_rate"], - harmonic_num=8) - self.noise_convs = nn.ModuleList() - self.conv_pre = weight_norm(Conv1d(h["inter_channels"], h["upsample_initial_channel"], 7, 1, padding=3)) - resblock = ResBlock1 if h["resblock"] == '1' else ResBlock2 - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(h["upsample_rates"], h["upsample_kernel_sizes"])): - c_cur = h["upsample_initial_channel"] // (2 ** (i + 1)) - self.ups.append(weight_norm( - ConvTranspose1d(h["upsample_initial_channel"] // (2 ** i), h["upsample_initial_channel"] // (2 ** (i + 1)), - k, u, padding=(k - u) // 2))) - if i + 1 < len(h["upsample_rates"]): # - stride_f0 = np.prod(h["upsample_rates"][i + 1:]) - self.noise_convs.append(Conv1d( - 1, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=stride_f0 // 2)) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = h["upsample_initial_channel"] // (2 ** (i + 1)) - for j, (k, d) in enumerate(zip(h["resblock_kernel_sizes"], h["resblock_dilation_sizes"])): - self.resblocks.append(resblock(h, ch, k, d)) - - self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3)) - self.ups.apply(init_weights) - self.conv_post.apply(init_weights) - self.cond = nn.Conv1d(h['gin_channels'], h['upsample_initial_channel'], 1) - - def forward(self, x, f0, g=None): - # print(1,x.shape,f0.shape,f0[:, None].shape) - f0 = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t - # print(2,f0.shape) - har_source, noi_source, uv = self.m_source(f0) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - x = x + self.cond(g) - # print(124,x.shape,har_source.shape) - for i in range(self.num_upsamples): - x = F.leaky_relu(x, LRELU_SLOPE) - # print(3,x.shape) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - # print(4,x_source.shape,har_source.shape,x.shape) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - remove_weight_norm(self.conv_pre) - remove_weight_norm(self.conv_post) - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, periods=None): - super(MultiPeriodDiscriminator, self).__init__() - self.periods = periods if periods is not None else [2, 3, 5, 7, 11] - self.discriminators = nn.ModuleList() - for period in self.periods: - self.discriminators.append(DiscriminatorP(period)) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - fmap_rs.append(fmap_r) - y_d_gs.append(y_d_g) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 128, 15, 1, padding=7)), - norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)), - norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)), - norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiScaleDiscriminator(torch.nn.Module): - def __init__(self): - super(MultiScaleDiscriminator, self).__init__() - self.discriminators = nn.ModuleList([ - DiscriminatorS(use_spectral_norm=True), - DiscriminatorS(), - DiscriminatorS(), - ]) - self.meanpools = nn.ModuleList([ - AvgPool1d(4, 2, padding=2), - AvgPool1d(4, 2, padding=2) - ]) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - if i != 0: - y = self.meanpools[i - 1](y) - y_hat = self.meanpools[i - 1](y_hat) - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - fmap_rs.append(fmap_r) - y_d_gs.append(y_d_g) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -def feature_loss(fmap_r, fmap_g): - loss = 0 - for dr, dg in zip(fmap_r, fmap_g): - for rl, gl in zip(dr, dg): - loss += torch.mean(torch.abs(rl - gl)) - - return loss * 2 - - -def discriminator_loss(disc_real_outputs, disc_generated_outputs): - loss = 0 - r_losses = [] - g_losses = [] - for dr, dg in zip(disc_real_outputs, disc_generated_outputs): - r_loss = torch.mean((1 - dr) ** 2) - g_loss = torch.mean(dg ** 2) - loss += (r_loss + g_loss) - r_losses.append(r_loss.item()) - g_losses.append(g_loss.item()) - - return loss, r_losses, g_losses - - -def generator_loss(disc_outputs): - loss = 0 - gen_losses = [] - for dg in disc_outputs: - l = torch.mean((1 - dg) ** 2) - gen_losses.append(l) - loss += l - - return loss, gen_losses diff --git a/spaces/zadkiel04/rvc-yoshino/config.py b/spaces/zadkiel04/rvc-yoshino/config.py deleted file mode 100644 index c0c16e0017efbcaf250cb539a1d0edb4e83575e4..0000000000000000000000000000000000000000 --- a/spaces/zadkiel04/rvc-yoshino/config.py +++ /dev/null @@ -1,88 +0,0 @@ -########################硬件参数######################## - -# 填写cuda:x, cpu 或 mps, x指代第几张卡,只支持 N卡 / Apple Silicon 加速 -device = "cuda:0" - -# 9-10-20-30-40系显卡无脑True,不影响质量,>=20显卡开启有加速 -is_half = True - -# 默认0用上所有线程,写数字限制CPU资源使用 -n_cpu = 0 - -########################硬件参数######################## - - -##################下为参数处理逻辑,勿动################## - -########################命令行参数######################## -import argparse - -parser = argparse.ArgumentParser() -parser.add_argument("--port", type=int, default=7865, help="Listen port") -parser.add_argument("--pycmd", type=str, default="python", help="Python command") -parser.add_argument("--colab", action="store_true", help="Launch in colab") -parser.add_argument( - "--noparallel", action="store_true", help="Disable parallel processing" -) -parser.add_argument( - "--noautoopen", action="store_true", help="Do not open in browser automatically" -) -cmd_opts, unknown = parser.parse_known_args() - -python_cmd = cmd_opts.pycmd -listen_port = cmd_opts.port -iscolab = cmd_opts.colab -noparallel = cmd_opts.noparallel -noautoopen = cmd_opts.noautoopen -########################命令行参数######################## - -import sys -import torch - - -# has_mps is only available in nightly pytorch (for now) and MasOS 12.3+. -# check `getattr` and try it for compatibility -def has_mps() -> bool: - if sys.platform != "darwin": - return False - else: - if not getattr(torch, "has_mps", False): - return False - try: - torch.zeros(1).to(torch.device("mps")) - return True - except Exception: - return False - - -if not torch.cuda.is_available(): - if has_mps(): - print("没有发现支持的N卡, 使用MPS进行推理") - device = "mps" - else: - print("没有发现支持的N卡, 使用CPU进行推理") - device = "cpu" - is_half = False - -if device not in ["cpu", "mps"]: - gpu_name = torch.cuda.get_device_name(int(device.split(":")[-1])) - if "16" in gpu_name or "MX" in gpu_name: - print("16系显卡/MX系显卡强制单精度") - is_half = False - -from multiprocessing import cpu_count - -if n_cpu == 0: - n_cpu = cpu_count() -if is_half: - # 6G显存配置 - x_pad = 3 - x_query = 10 - x_center = 60 - x_max = 65 -else: - # 5G显存配置 - x_pad = 1 - x_query = 6 - x_center = 38 - x_max = 41 diff --git a/spaces/zeno-ml/openai-evals/zeno-evals-hub/__init__.py b/spaces/zeno-ml/openai-evals/zeno-evals-hub/__init__.py deleted file mode 100644 index c2221834a15685c29d138c331f73a4e12308fcba..0000000000000000000000000000000000000000 --- a/spaces/zeno-ml/openai-evals/zeno-evals-hub/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .main import command_line - -command_line() diff --git a/spaces/zhenwusw/JoJoGAN/e4e_projection.py b/spaces/zhenwusw/JoJoGAN/e4e_projection.py deleted file mode 100644 index dadbc2f3f7b131a88af1a14206714b5027de155b..0000000000000000000000000000000000000000 --- a/spaces/zhenwusw/JoJoGAN/e4e_projection.py +++ /dev/null @@ -1,38 +0,0 @@ -import os -import sys -import numpy as np -from PIL import Image -import torch -import torchvision.transforms as transforms -from argparse import Namespace -from e4e.models.psp import pSp -from util import * - - - -@ torch.no_grad() -def projection(img, name, device='cuda'): - - - model_path = 'models/e4e_ffhq_encode.pt' - ckpt = torch.load(model_path, map_location='cpu') - opts = ckpt['opts'] - opts['checkpoint_path'] = model_path - opts= Namespace(**opts) - net = pSp(opts, device).eval().to(device) - - transform = transforms.Compose( - [ - transforms.Resize(256), - transforms.CenterCrop(256), - transforms.ToTensor(), - transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]), - ] - ) - - img = transform(img).unsqueeze(0).to(device) - images, w_plus = net(img, randomize_noise=False, return_latents=True) - result_file = {} - result_file['latent'] = w_plus[0] - torch.save(result_file, name) - return w_plus[0] diff --git a/spaces/zomehwh/rvc-models/vc_infer_pipeline.py b/spaces/zomehwh/rvc-models/vc_infer_pipeline.py deleted file mode 100644 index c26d45068f9b6bf2b194b13c3c89f8a06347c124..0000000000000000000000000000000000000000 --- a/spaces/zomehwh/rvc-models/vc_infer_pipeline.py +++ /dev/null @@ -1,306 +0,0 @@ -import numpy as np, parselmouth, torch, pdb -from time import time as ttime -import torch.nn.functional as F -from config import x_pad, x_query, x_center, x_max -import scipy.signal as signal -import pyworld, os, traceback, faiss -from scipy import signal - -bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000) - - -class VC(object): - def __init__(self, tgt_sr, device, is_half): - self.sr = 16000 # hubert输入采样率 - self.window = 160 # 每帧点数 - self.t_pad = self.sr * x_pad # 每条前后pad时间 - self.t_pad_tgt = tgt_sr * x_pad - self.t_pad2 = self.t_pad * 2 - self.t_query = self.sr * x_query # 查询切点前后查询时间 - self.t_center = self.sr * x_center # 查询切点位置 - self.t_max = self.sr * x_max # 免查询时长阈值 - self.device = device - self.is_half = is_half - - def get_f0(self, x, p_len, f0_up_key, f0_method, inp_f0=None): - time_step = self.window / self.sr * 1000 - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - if f0_method == "pm": - f0 = ( - parselmouth.Sound(x, self.sr) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=f0_min, - pitch_ceiling=f0_max, - ) - .selected_array["frequency"] - ) - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad( - f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant" - ) - elif f0_method == "harvest": - f0, t = pyworld.harvest( - x.astype(np.double), - fs=self.sr, - f0_ceil=f0_max, - f0_floor=f0_min, - frame_period=10, - ) - f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr) - f0 = signal.medfilt(f0, 3) - f0 *= pow(2, f0_up_key / 12) - # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - tf0 = self.sr // self.window # 每秒f0点数 - if inp_f0 is not None: - delta_t = np.round( - (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1 - ).astype("int16") - replace_f0 = np.interp( - list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1] - ) - shape = f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)].shape[0] - f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)] = replace_f0[:shape] - # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - f0bak = f0.copy() - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int) - return f0_coarse, f0bak # 1-0 - - def vc( - self, - model, - net_g, - sid, - audio0, - pitch, - pitchf, - times, - index, - big_npy, - index_rate, - ): # ,file_index,file_big_npy - feats = torch.from_numpy(audio0) - if self.is_half: - feats = feats.half() - else: - feats = feats.float() - if feats.dim() == 2: # double channels - feats = feats.mean(-1) - assert feats.dim() == 1, feats.dim() - feats = feats.view(1, -1) - padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False) - - inputs = { - "source": feats.to(self.device), - "padding_mask": padding_mask, - "output_layer": 9, # layer 9 - } - t0 = ttime() - with torch.no_grad(): - logits = model.extract_features(**inputs) - feats = model.final_proj(logits[0]) - - if ( - isinstance(index, type(None)) == False - and isinstance(big_npy, type(None)) == False - and index_rate != 0 - ): - npy = feats[0].cpu().numpy() - if self.is_half: - npy = npy.astype("float32") - _, I = index.search(npy, 1) - npy = big_npy[I.squeeze()] - if self.is_half: - npy = npy.astype("float16") - feats = ( - torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate - + (1 - index_rate) * feats - ) - - feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) - t1 = ttime() - p_len = audio0.shape[0] // self.window - if feats.shape[1] < p_len: - p_len = feats.shape[1] - if pitch != None and pitchf != None: - pitch = pitch[:, :p_len] - pitchf = pitchf[:, :p_len] - p_len = torch.tensor([p_len], device=self.device).long() - with torch.no_grad(): - if pitch != None and pitchf != None: - audio1 = ( - (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0] * 32768) - .data.cpu() - .float() - .numpy() - .astype(np.int16) - ) - else: - audio1 = ( - (net_g.infer(feats, p_len, sid)[0][0, 0] * 32768) - .data.cpu() - .float() - .numpy() - .astype(np.int16) - ) - del feats, p_len, padding_mask - if torch.cuda.is_available(): - torch.cuda.empty_cache() - t2 = ttime() - times[0] += t1 - t0 - times[2] += t2 - t1 - return audio1 - - def pipeline( - self, - model, - net_g, - sid, - audio, - times, - f0_up_key, - f0_method, - file_index, - file_big_npy, - index_rate, - if_f0, - f0_file=None, - ): - if ( - file_big_npy != "" - and file_index != "" - and os.path.exists(file_big_npy) == True - and os.path.exists(file_index) == True - and index_rate != 0 - ): - try: - index = faiss.read_index(file_index) - big_npy = np.load(file_big_npy) - except: - traceback.print_exc() - index = big_npy = None - else: - index = big_npy = None - print("Feature retrieval library doesn't exist or ratio is 0") - audio = signal.filtfilt(bh, ah, audio) - audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect") - opt_ts = [] - if audio_pad.shape[0] > self.t_max: - audio_sum = np.zeros_like(audio) - for i in range(self.window): - audio_sum += audio_pad[i : i - self.window] - for t in range(self.t_center, audio.shape[0], self.t_center): - opt_ts.append( - t - - self.t_query - + np.where( - np.abs(audio_sum[t - self.t_query : t + self.t_query]) - == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min() - )[0][0] - ) - s = 0 - audio_opt = [] - t = None - t1 = ttime() - audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect") - p_len = audio_pad.shape[0] // self.window - inp_f0 = None - if hasattr(f0_file, "name") == True: - try: - with open(f0_file.name, "r") as f: - lines = f.read().strip("\n").split("\n") - inp_f0 = [] - for line in lines: - inp_f0.append([float(i) for i in line.split(",")]) - inp_f0 = np.array(inp_f0, dtype="float32") - except: - traceback.print_exc() - sid = torch.tensor(sid, device=self.device).unsqueeze(0).long() - pitch, pitchf = None, None - if if_f0 == 1: - pitch, pitchf = self.get_f0(audio_pad, p_len, f0_up_key, f0_method, inp_f0) - pitch = pitch[:p_len] - pitchf = pitchf[:p_len] - pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long() - pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float() - t2 = ttime() - times[1] += t2 - t1 - for t in opt_ts: - t = t // self.window * self.window - if if_f0 == 1: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[s : t + self.t_pad2 + self.window], - pitch[:, s // self.window : (t + self.t_pad2) // self.window], - pitchf[:, s // self.window : (t + self.t_pad2) // self.window], - times, - index, - big_npy, - index_rate, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - else: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[s : t + self.t_pad2 + self.window], - None, - None, - times, - index, - big_npy, - index_rate, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - s = t - if if_f0 == 1: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[t:], - pitch[:, t // self.window :] if t is not None else pitch, - pitchf[:, t // self.window :] if t is not None else pitchf, - times, - index, - big_npy, - index_rate, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - else: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[t:], - None, - None, - times, - index, - big_npy, - index_rate, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - audio_opt = np.concatenate(audio_opt) - del pitch, pitchf, sid - if torch.cuda.is_available(): - torch.cuda.empty_cache() - return audio_opt