diff --git a/README.md b/README.md
deleted file mode 100644
index 4126ed855606c38ab5861f8bc3ff8d6949156f77..0000000000000000000000000000000000000000
--- a/README.md
+++ /dev/null
@@ -1,202 +0,0 @@
----
-configs:
-- config_name: default
- data_files:
- spaces.csv
-
-license: other
-language:
-- code
-size_categories:
-- 100K 0:
- try:
- try:
- a[i['id']] = {'sdk': i['sdk'], 'license': i['cardData']['license'], 'likes': i['likes']}
- except KeyError:
- a[i['id']] = {'sdk': i['sdk'], 'license': None, 'likes': i['likes']}
- except:
- a[i['id']] = {'sdk': "Configuration error", 'license': "Configuration error", 'likes': i['likes']}
-
-data_list = [{'repository': key, 'sdk': value['sdk'], 'license': value['license'], 'likes': value['likes']} for key, value in a.items()]
-
-df = pd.DataFrame(data_list)
-```
-
-3. Cloned spaces locally.
-
-```python
-from huggingface_hub import snapshot_download
-
-programming = ['.asm', '.bat', '.cmd', '.c', '.h', '.cs', '.cpp', '.hpp', '.c++', '.h++', '.cc', '.hh', '.C', '.H', '.cmake', '.css', '.dockerfile', 'Dockerfile', '.f90', '.f', '.f03', '.f08', '.f77', '.f95', '.for', '.fpp', '.go', '.hs', '.html', '.java', '.js', '.jl', '.lua', 'Makefile', '.md', '.markdown', '.php', '.php3', '.php4', '.php5', '.phps', '.phpt', '.pl', '.pm', '.pod', '.perl', '.ps1', '.psd1', '.psm1', '.py', '.rb', '.rs', '.sql', '.scala', '.sh', '.bash', '.command', '.zsh', '.ts', '.tsx', '.tex', '.vb']
-pattern = [f"*{i}" for i in programming]
-
-for i in repos:
- snapshot_download(i, repo_type="space", local_dir=f"spaces/{i}", allow_patterns=pattern)
-````
-
-4. Processed the data to derive statistics.
\ No newline at end of file
diff --git a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/gptworldAi/__init__.py b/spaces/101-5/gpt4free/g4f/.v1/gpt4free/gptworldAi/__init__.py
deleted file mode 100644
index e7f76c61209fabf224698949764155ac53cc7a6b..0000000000000000000000000000000000000000
--- a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/gptworldAi/__init__.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-@Time : 2023/5/23 13:37
-@Auth : Hp_mzx
-@File :__init__.py.py
-@IDE :PyCharm
-"""
-import json
-import uuid
-import random
-import binascii
-import requests
-import Crypto.Cipher.AES as AES
-from fake_useragent import UserAgent
-
-class ChatCompletion:
- @staticmethod
- def create(messages:[],proxy: str = None):
- url = "https://chat.getgpt.world/api/chat/stream"
- headers = {
- "Content-Type": "application/json",
- "Referer": "https://chat.getgpt.world/",
- 'user-agent': UserAgent().random,
- }
- proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else None
- data = json.dumps({
- "messages": messages,
- "frequency_penalty": 0,
- "max_tokens": 4000,
- "model": "gpt-3.5-turbo",
- "presence_penalty": 0,
- "temperature": 1,
- "top_p": 1,
- "stream": True,
- "uuid": str(uuid.uuid4())
- })
- signature = ChatCompletion.encrypt(data)
- res = requests.post(url, headers=headers, data=json.dumps({"signature": signature}), proxies=proxies,stream=True)
- for chunk in res.iter_content(chunk_size=None):
- res.raise_for_status()
- datas = chunk.decode('utf-8').split('data: ')
- for data in datas:
- if not data or "[DONE]" in data:
- continue
- data_json = json.loads(data)
- content = data_json['choices'][0]['delta'].get('content')
- if content:
- yield content
-
-
- @staticmethod
- def random_token(e):
- token = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
- n = len(token)
- return "".join([token[random.randint(0, n - 1)] for i in range(e)])
-
- @staticmethod
- def encrypt(e):
- t = ChatCompletion.random_token(16).encode('utf-8')
- n = ChatCompletion.random_token(16).encode('utf-8')
- r = e.encode('utf-8')
- cipher = AES.new(t, AES.MODE_CBC, n)
- ciphertext = cipher.encrypt(ChatCompletion.__pad_data(r))
- return binascii.hexlify(ciphertext).decode('utf-8') + t.decode('utf-8') + n.decode('utf-8')
-
- @staticmethod
- def __pad_data(data: bytes) -> bytes:
- block_size = AES.block_size
- padding_size = block_size - len(data) % block_size
- padding = bytes([padding_size] * padding_size)
- return data + padding
-
-
-class Completion:
- @staticmethod
- def create(prompt:str,proxy:str=None):
- return ChatCompletion.create([
- {
- "content": "You are ChatGPT, a large language model trained by OpenAI.\nCarefully heed the user's instructions. \nRespond using Markdown.",
- "role": "system"
- },
- {"role": "user", "content": prompt}
- ], proxy)
-
-
-if __name__ == '__main__':
- # single completion
- text = ""
- for chunk in Completion.create("你是谁", "127.0.0.1:7890"):
- text = text + chunk
- print(chunk, end="", flush=True)
- print()
-
-
- #chat completion
- message = []
- while True:
- prompt = input("请输入问题:")
- message.append({"role": "user","content": prompt})
- text = ""
- for chunk in ChatCompletion.create(message,'127.0.0.1:7890'):
- text = text+chunk
- print(chunk, end="", flush=True)
- print()
- message.append({"role": "assistant", "content": text})
\ No newline at end of file
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Dont Upgrade Your Mac Yet How to Use OBS Studio on Mac OS X 10.12.6.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Dont Upgrade Your Mac Yet How to Use OBS Studio on Mac OS X 10.12.6.md
deleted file mode 100644
index 9ba304f2ccd35a74679d4dcf3c55a28a19d70f61..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Dont Upgrade Your Mac Yet How to Use OBS Studio on Mac OS X 10.12.6.md
+++ /dev/null
@@ -1,44 +0,0 @@
-
-# How to Download and Install OBS Studio on Mac OS X 10.12.6
-
-OBS Studio is a free and open source software that allows you to record and stream video and audio from your computer. With OBS Studio, you can create professional-looking videos for live streaming, gaming, webinars, podcasts, and more. OBS Studio supports multiple sources, scenes, transitions, filters, and plugins that give you full control over your video production.
-
-But how can you download and install OBS Studio on Mac OS X 10.12.6? Is there a compatible version for this older operating system? In this article, we will show you how to get OBS Studio up and running on your Mac in a few simple steps.
-
-## Download OBS Studio for Mac OS X 10.12.6
-
-The first thing you need to do is to download the OBS Studio installer for Mac OS X 10.12.6 from the official website. The latest version of OBS Studio requires Mac OS X 10.13 or later, but there is an older version (25.0.8) that works with Mac OS X 10.12.6.
-
-To download OBS Studio for Mac OS X 10.12.6, go to the [download page](https://obsproject.com/download) and scroll down to the "Older Versions" section. Click on the "Mac OS X" tab and look for the version 25.0.8. Click on the "Download Installer" button and save the file to your computer.
-
-## Install OBS Studio on Mac OS X 10.12.6
-
-Once you have downloaded the OBS Studio installer for Mac OS X 10.12.6, you can proceed to install it on your computer. To install OBS Studio on Mac OS X 10.12.6, follow these steps:
-
-- Double-click on the downloaded file (obs-mac-25.0.8-installer.pkg) to launch the installer.
-- Click on "Continue" and agree to the license agreement.
-- Choose the destination folder for OBS Studio and click on "Install".
-- Enter your administrator password if prompted and click on "Install Software".
-- Wait for the installation to complete and click on "Close".
-
-## Launch OBS Studio on Mac OS X 10.12.6
-
-Now that you have installed OBS Studio on your Mac, you can launch it and start using it for your video recording and streaming needs. To launch OBS Studio on Mac OS X 10.12.6, follow these steps:
-
-- Go to the Applications folder and look for the OBS icon.
-- Double-click on the OBS icon to open the application.
-- If you see a warning message saying that "OBS" can't be opened because it is from an unidentified developer, click on "Open Anyway".
-- If you see a dialog box asking for permission to access your microphone or camera, click on "OK".
-- You will see the main window of OBS Studio with a preview of your video source and some buttons and menus.
-- You can now configure your settings, add sources and scenes, apply filters and transitions, and start recording or streaming.
-
-## Conclusion
-
-OBS Studio is a powerful and versatile software that can help you create high-quality videos for various purposes. Whether you want to stream live events, record gameplay, or make tutorials, OBS Studio can handle it all.
-
-However, if you have an older Mac with Mac OS X 10.12.6, you may encounter some compatibility issues with the latest version of OBS Studio. Fortunately, there is an older version of OBS Studio that works with Mac OS X 10.12.6 and can be downloaded and installed easily.
-
-By following this guide, you can download and install OBS Studio on Mac OS X 10.12.6 and start using it without any problems.
ddb901b051
-
-
\ No newline at end of file
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Football-Manager-2012-Patch- -v12.2.2-UPDATE-Skidrow Download and Install Guide.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Football-Manager-2012-Patch- -v12.2.2-UPDATE-Skidrow Download and Install Guide.md
deleted file mode 100644
index 616dbdbf2ff0d82b4a0321b85e72e16309ccee4e..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Football-Manager-2012-Patch- -v12.2.2-UPDATE-Skidrow Download and Install Guide.md
+++ /dev/null
@@ -1,146 +0,0 @@
-
-
Football Manager 2012 Patch 12.2.2 Update Skidrow
-
If you are a fan of football management games, you probably have heard of Football Manager 2012, one of the most popular and realistic games in the genre. But did you know that there is a patch 12.2.2 update that adds new features and fixes bugs to the game? And did you know that you can download and install it for free from Skidrow, a group of hackers who crack and release games online? In this article, we will tell you everything you need to know about Football Manager 2012 patch 12.2.2 update Skidrow, including how to download and install it, what's new in it, and why you should try it.
Football Manager 2012 is a football management simulation game developed by Sports Interactive and published by Sega in October 2011. It is the eighth game in the Football Manager series, and it allows you to take control of any club from over 50 countries across the world, as well as create your own custom club. You can manage every aspect of your club, from tactics and training to transfers and finances, as well as interact with players, staff, media, and fans. You can also compete with other managers online or offline in various modes and challenges.
-
What is the Patch 12.2.2 Update?
-
The patch 12.2.2 update is an official update released by Sports Interactive in March 2012 that fixes some bugs and errors in the game, as well as adds some new features and content. Some of the main changes include:
-
-
Improved match engine performance and stability
-
Fixed issues with contracts, transfers, loans, and free agents
-
Updated player and staff data for the winter transfer window
-
Added new leagues and competitions, such as the Australian A-League, the Indian Super League, and the UEFA Europa Conference League
-
Added new graphics and sounds, such as new player faces, kits, logos, trophies, and crowd chants
-
-
What is Skidrow?
-
Skidrow is a group of hackers who crack and release games online for free. They are known for cracking games that have DRM (digital rights management) protection, such as Steam or Origin, which prevent users from playing games without buying them or having an internet connection. Skidrow has cracked many popular games, such as Assassin's Creed, Call of Duty, FIFA, Grand Theft Auto, and more. They usually release their cracks along with updates or patches for the games.
-
How to Download and Install the Patch 12.2.2 Update Skidrow
-
Requirements
-
Before you download and install the patch 12.0 update Skidrow, you need to have some requirements:
-
-
A PC that meets the minimum system requirements for Football Manager 2010 (you can check them here: https://www.systemrequirementslab.com/cyri/requirements/football-manager-2010/11210)
-
A copy of Football Manager 2010 installed on your PC (you can buy it from Steam or other platforms)
-
A reliable internet connection
-
A torrent client (such as uTorrent or BitTorrent) to download the patch 12.0 update Skidrow file
-
A file extractor (such as WinRAR or 7-Zip) to extract the files from the patch 12.0 update Skidrow file
-
An antivirus software (such as Avast or Norton) to scan the files for viruses or malware
-
-
Steps
-
Once you have all the requirements ready, you can follow these steps to download and install the patch 12.0 update Skidrow:
-
Download the Patch 12.0 Update Skidrow from a trusted source
-the patch 12.0 update Skidrow file from a trusted source online. You can use a torrent site (such as The Pirate Bay or Kickass Torrents) or a direct download site (such as Mega or Mediafire) to find and download the file.
-
The file size should be around 1 GB, and it should have a name like "Football.Manager.2010.Patch.v12.0-UPDATE-SKIDROW.rar" or something similar.
-
Extract the files to your Football Manager 2010 folder
-
The second step is to extract the files from the patch 12.0 update Skidrow file to your Football Manager 2010 folder on your PC.
-
You can use a file extractor (such as WinRAR or 7-Zip) to open the file and extract its contents.
-
Football Manager 2012 Patch v12.2.2 Update Skidrow Download
-How to Install Football Manager 2012 Patch v12.2.2 Update Skidrow
-Football Manager 2012 Patch v12.2.2 Update Skidrow Crack
-Football Manager 2012 Patch v12.2.2 Update Skidrow Torrent
-Football Manager 2012 Patch v12.2.2 Update Skidrow Review
-Football Manager 2012 Patch v12.2.2 Update Skidrow Changelog
-Football Manager 2012 Patch v12.2.2 Update Skidrow Fixes
-Football Manager 2012 Patch v12.2.2 Update Skidrow Features
-Football Manager 2012 Patch v12.2.2 Update Skidrow Gameplay
-Football Manager 2012 Patch v12.2.2 Update Skidrow Mods
-Football Manager 2012 Patch v12.2.2 Update Skidrow Cheats
-Football Manager 2012 Patch v12.2.2 Update Skidrow Tips
-Football Manager 2012 Patch v12.2.2 Update Skidrow Guide
-Football Manager 2012 Patch v12.2.2 Update Skidrow System Requirements
-Football Manager 2012 Patch v12.2.2 Update Skidrow Free Download
-Football Manager 2012 Patch v12.2.2 Update Skidrow Full Version
-Football Manager 2012 Patch v12.2.2 Update Skidrow Steam
-Football Manager 2012 Patch v12.2.2 Update Skidrow Mac
-Football Manager 2012 Patch v12.2.2 Update Skidrow Linux
-Football Manager 2012 Patch v12.2.2 Update Skidrow Android
-Football Manager 2012 Patch v12.2.2 Update Skidrow iOS
-Football Manager 2012 Patch v12.2.2 Update Skidrow Windows
-Football Manager 2011 vs Football Manager 2010 vs Football Manager 2009 vs Football Manager 2008 vs Football Manager 2007 vs Football Manager 2006 vs Football Manager 2005 vs Football Manager 2004 vs Football Manager 2003 vs Football Manager 2001 vs Championship manager series comparison with patch update skidrow versions.
-Best Players in Football Manager 2010 with patch update skidrow version.
-Best Tactics in Football Manager 2009 with patch update skidrow version.
-Best Teams in Football Manager 2008 with patch update skidrow version.
-Best Leagues in Football Manager 2007 with patch update skidrow version.
-Best Transfers in Football Manager 2006 with patch update skidrow version.
-Best Staff in Football Manager 2005 with patch update skidrow version.
-Best Training in Football Manager 2004 with patch update skidrow version.
-Best Scouting in Football Manager 2003 with patch update skidrow version.
-Best Youth Development in Football Manager 2001 with patch update skidrow version.
-How to Win the Champions League in Football Manager with patch update skidrow version.
-How to Win the World Cup in Football Manager with patch update skidrow version.
-How to Win the Premier League in Football Manager with patch update skidrow version.
-How to Win the Bundesliga in Football Manager with patch update skidrow version.
-How to Win the Serie A in Football Manager with patch update skidrow version.
-How to Win the La Liga in Football Manager with patch update skidrow version.
-How to Win the Ligue 1 in Football Manager with patch update skidrow version.
-How to Win the Eredivisie in Football Manager with patch update skidrow version.
-How to Win the MLS in Football Manager with patch update skidrow version.
-How to Win the Copa Libertadores in Football Manager with patch update skidrow version.
-How to Win the Asian Champions League in Football Manager with patch update skidrow version.
-How to Win the African Champions League in Football Manager with patch update skidrow version.
-How to Win the Oceania Champions League in Football Manager with patch update skidrow version.
-
You should see a folder named "Football.Manager.2010.Patch.v12-UPDATE-SKIDROW" or something similar inside.
-
You need to copy this folder to your Football Manager 2010 folder on your PC.
-
You can find your Football Manager 2010 folder by following this path: C:\Program Files (x86)\Steam\steamapps\common\Football Manager 2010\ (or wherever you installed your game).
-
Run the installer and follow the instructions
-
The third step is to run the installer inside the "Football.Manager-2010.Patch.v12-UPDATE-SKIDROW" folder and follow the instructions on the screen.
-
You should see a file named "setup.exe" or something similar inside.
-
You need to double-click on this file and allow it to run on your PC.
-
You should see a window that asks you to select the language and agree to the terms and conditions.
-
You need to choose your preferred language and click on "I Agree".
-
You should then see another window that asks you to select the destination folder for the patch installation.
-
You need to browse and select your Football Manager 2010 folder on your PC (the same one where you copied the "Football.Manager-2010.Patch.v12-UPDATE-SKIDROW" folder).
-
You should then see another window that shows the progress of the installation.
-
You need to wait until the installation is complete.
-
Copy the crack files to your Football Manager 2010 folder
-
The fourth step is to copy the crack files from the "Football.Manager-2010.Patch.v12-UPDATE-SKIDROW" folder to your Football Manager 2010 folder on your PC.
-
You should see a folder named "SKIDROW" inside.
-
You need to open this folder and copy all its contents.
-
You then need to paste them into your Football Manager 2010 folder on your PC (the same one where you installed the patch).
-
Enjoy the game with the latest updates and features
-
The final step is to enjoy the game with the latest updates and features.
-
You can launch the game from Steam or from your desktop shortcut.
-
You should see a message that says "Football Manager is now running version v12-UPDATE-SKIDROW".
-
Congratulations! You have successfully downloaded and installed the patch v12-UPDATE-SKIDROW for Football Manager !
-
Note: If you encounter any problems or errors while playing the game, you can check the official website of Sports Interactive (https://www.sigames.com/) or the Skidrow website (https://www.skidrowreloaded.com/) for solutions or support.
-
What's New in the Patch 12.2.2 Update Skidrow
-
Bug Fixes and Improvements
-
The patch 12.2.2 update Skidrow fixes some bugs and errors that were present in the previous versions of the game, such as:
-
-
Fixed crash issues when loading or saving games
-
Fixed compatibility issues with Windows 10 and DirectX 11
-
Fixed graphical glitches and display errors
-
Fixed gameplay issues such as unrealistic results, player ratings, injuries, and penalties
-
Fixed database issues such as missing or incorrect data, duplicate players, and outdated information
-
Fixed interface issues such as missing or incorrect buttons, menus, and tooltips
-
Fixed network issues such as connection problems, lag, and synchronization errors
-
Fixed editor issues such as missing or incorrect options, functions, and features
-
Fixed localization issues such as missing or incorrect texts, fonts, and languages
-
Fixed security issues such as malware, viruses, and hackers
-
-
New Transfers and Contracts
-
The patch 12.2.2 update Skidrow also adds some new transfers and contracts that were made during the winter transfer window of 2012, such as:
- | Player | From | To | Fee | | --- | --- | --- | --- | | Carlos Tevez | Manchester City | AC Milan | £25m | | Thierry Henry | New York Red Bulls | Arsenal | Loan | | Gary Cahill | Bolton Wanderers | Chelsea | £7m | | Papiss Cisse | Freiburg | Newcastle United | £9m | | Alex | Chelsea | Paris Saint-Germain | £4m | | Paul Scholes | Retired | Manchester United | Free | | David Beckham | LA Galaxy | Paris Saint-Germain | Free | | Tim Cahill | Everton | New York Red Bulls | £1m | | Robbie Keane | LA Galaxy | Aston Villa | Loan | | Nicolas Anelka | Chelsea | Shanghai Shenhua | Free |
New Leagues and Competitions
-
The patch 12.2.2 update Skidrow also adds some new leagues and competitions that were not available in the previous versions of the game, such as:
- | League/Competition | Country/Region | Level/Format | | --- | --- | --- | | Australian A-League | Australia/New Zealand | Top division/10 teams | | Indian Super League | India | Top division/8 teams | | UEFA Europa Conference League | Europe | Third-tier continental competition/184 teams | | FIFA Club World Cup Expanded Edition | Worldwide | Intercontinental competition/24 teams | | UEFA Nations League Finals | Europe | International competition/4 teams |
New Graphics and Sounds
-
The patch 12.2.2 update Skidrow also adds some new graphics and sounds that enhance the visual and audio quality of the game, such as:
- | Graphic/Sound | Description | | --- | --- | | New player faces | More realistic and updated faces for over 500 players | | New kits | More authentic and updated kits for over 100 clubs and national teams | | New logos | More accurate and updated logos for over 200 clubs and competitions | | New trophies | More detailed and realistic trophies for over 50 competitions | | New crowd chants | More diverse and realistic crowd chants for over 50 clubs and national teams |
Conclusion
-
Summary of the main points
-
In conclusion, Football Manager 2012 patch 12.2.2 update Skidrow is an amazing update that improves the game in many ways. It fixes some bugs and errors, adds some new features and content, and enhances the visual and audio quality of the game. It is easy to download and install, and it is free of charge. It is compatible with Windows 10 and DirectX 11, and it works with Steam or other platforms. It is a must-have update for any fan of football management games.
-
Call to action for the readers
-
If you are interested in trying out Football Manager 2012 patch 12.2.2 update Skidrow, you can follow the steps we have provided in this article to download and install it on your PC. You can also check out our other articles on how to play Football Manager 2012 better, how to find hidden gems in Football Manager 2012, how to create custom tactics in Football Manager 2012, and more. You can also share your feedback, opinions, questions, or suggestions with us in the comments section below. We would love to hear from you!
-
Frequently Asked Questions (FAQs)
-
Do I need to have Football Manager 2012 installed on my PC before I download and install the patch 12.2.2 update Skidrow?
-
Yes, you need to have Football Manager 2012 installed on your PC before you download and install the patch 12.2.2 update Skidrow. You can buy it from Steam or other platforms.
- connection to play Football Manager 2012 after I download and install the patch 12.2.2 update Skidrow?
-
No, you do not need to have an internet connection to play Football Manager 2012 after you download and install the patch 12.2.2 update Skidrow. You can play it offline or online as you wish.
-
Is the patch 12.2.2 update Skidrow safe to download and install on my PC?
-
Yes, the patch 12.2.2 update Skidrow is safe to download and install on your PC. However, you should always scan the files for viruses or malware before you open them, and you should always download them from trusted sources online.
-
Will the patch 12.2.2 update Skidrow affect my saved games or achievements in Football Manager 2012?
-
No, the patch 12.2.2 update Skidrow will not affect your saved games or achievements in Football Manager 2012. You can continue playing your saved games or earning your achievements as usual.
-
Can I uninstall the patch 12.2.2 update Skidrow if I do not like it or if it causes problems on my PC?
-
Yes, you can uninstall the patch 12.2.2 update Skidrow if you do not like it or if it causes problems on your PC. You can use the uninstaller inside the "Football.Manager-2010.Patch.v12-UPDATE-SKIDROW" folder to remove it from your PC.
- 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/COOKING MAMA Apk Mod Unlock All Learn Cooking Techniques and Create Your Own Recipes with Unlimited Coins and Levels.md b/spaces/1gistliPinn/ChatGPT4/Examples/COOKING MAMA Apk Mod Unlock All Learn Cooking Techniques and Create Your Own Recipes with Unlimited Coins and Levels.md
deleted file mode 100644
index 453ffbcb02deb2836ff00b7d9079641999605cda..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/COOKING MAMA Apk Mod Unlock All Learn Cooking Techniques and Create Your Own Recipes with Unlimited Coins and Levels.md
+++ /dev/null
@@ -1,33 +0,0 @@
-
-
If you want to create a big and wonderful restaurant, then serve your cooking as often as possible. This way people can get the best experience from their meal by eating at an establishment run by someone who cares deeply about food quality!
-
With its intuitive controls, both children and adults can enjoy the game. Also, even if you make mistakes there are no game overs, so everyone can complete dishes. Furthermore, children who play may develop an interest in cooking.
[Game Features] With its intuitive controls, both children and adults can enjoy the game. Also, even if you make mistakes there are no game-overs, so everyone can complete dishes. Furthermore, children who play may develop an interest in cooking.
[Recommended Setup] Android OS 4.1 or later. **Game may not be playable on certain devices even if the above conditions are met.
-
In the game, Android gamers will find themselves having access to your own interesting cooking adventures with Cooking Mama and Papa. Our two characters will stay with you from the beginning of the game as your cooking mentors and testers. Join them as you discover your own exciting journeys into the world of delicious foods and the fun while cooking them
-
To start with, Android gamers in Cooking Mama will find themselves having access to the simple, intuitive and extremely fun gameplay of cooking, in which you can dive into and enjoy it to the fullest. Have fun as you create delicious foods from multiple ingredients and follow amazing recipes. Have it tested by Papa and serve your foods to other villagers. Make delicious in varied categories with the help of the intuitive touch controls. Try out the unique gameplay as you create yummy food and find yourself getting hungry really fast. Fans of the famous Cooking Fever will certainly find themselves having access to yet another amazing cooking game on the mobile devices.
-
Gamers in Cooking Mama will immediately find themselves having access to the friendly and inviting styles of graphics as you dive into the exciting cooking gameplay in the game. The cartoony and adorable cooking tools, ingredients, and animations will also allow gamers to quickly immersed in the gameplay. And most importantly, the undemanding graphics will also guarantee your smooth and satisfying experiences with the game.
-
Do you love cooking unique dishes and searching for a game that helps you to cook various delicious dishes in a unique way? If you think so, then Cooking Mama MOD Apk is the perfect choice for you. There are a number of bonus features available in this cooking game, which can be unlocked as you continue playing the game. These include unlocking new dishes and other items as well as receiving extra points.
-
Cooking Mama is considered the most engaging cooking game for various platforms including android. There is a built-in automatic recipe guide available in the app that will help you to determine the type of dish that you can make for a particular level. Moreover, the recipe guide is very accurate and reliable, so you can be sure that the dish you are making will turn out perfect.
-
-
You can choose your level of difficulty while cooking in this game. If you are a beginner, then start with the easy levels before moving on to the medium ones. The hard levels are very challenging so be prepared for them when you are done with the easy levels.
-
The most important thing in cooking is to be accurate with the timing as the same happened with this post. If you leave your dish in the oven for too long, then it might burn and ruin the whole dish. You also have to wait for the right time before taking it out of the oven and other cooking utensils so that all your dishes achieve perfection.
-
This cooking game features awesome sound effects which make everything feel more fun and enjoyable especially when you are slicing or dicing up ingredients with your knife. You will be able to immerse yourself in the experience of cooking delicious dishes.
-
The best thing about this cooking game is that the controls are very easy to understand and use. You will be able to start cooking right from the next moment as you install the game on your smartphone.
-
There are various mini-games involved in this cooking game which keep things interesting as you keep playing the game over time. Some of these mini-games require fast reflexes while some simply require patience and persistence, but all of them are equally fun to play.
-
You can also challenge your friends and online players to a cook-off to see who can make the best dish. This adds an extra layer of competition to the cooking game and makes it more enjoyable to play. Also, the online players are from all around the world, so you can learn new recipes and cooking tips from them.
-
The best way to grab lots of points in this game is by sharing your recipes with the Cooking Mama community. You can share your own recipes with other players and earn points as a thank you for sharing. These points can be used to upgrade your appliances or unlock new dishes.
-
At the end of this article, I would like to say that Cooking Mama is the best cooking game for android devices ever made for smartphones. From intuitive designs and interesting gameplay, this game has everything that a user can expect from an ideal cooking game.
-
There is no cost associated with playing this cooking game, which makes it even more popular among fans. There are in-app purchases available for players who want to boost their character level while playing or unlocking content faster. You can purchase them or use this Cooking Mama MOD version.
-
Android gamers will have access to their own entertaining cooking adventures with Cooking Mama and Papa in the game. Our two characters will be your cooking tutors and testers from the beginning of the game. Join them as they embark on their own thrilling travels into the world of delectable meals and the fun that comes with preparing them.
-
Cooking with a Twist Mama is a terrible cooking game in which you must assist the renowned cooking mama in preparing and cooking a turkey. However, cooking mama has begun to exhibit her really evil and twisted side, so if you are squeamish, you should avoid playing this game. To prepare and cook the dish, follow the directions step by step.
-
Cooking mama: Let's cook puzzle - make tasty dishes using foods on the screen and match 3 and more same ones. Improve your culinary skills in this fun game for Android. Make delicious meals easy. To do this just match same ingredients.
-
Join the game, you will play as a young girl trying to learn to cook with Mama and Papa. You will be guided through every small step to complete delicious dishes and please your parents. Cooking is easy as you just tap on the screen to select ingredients, then swipe or rotate to cook, and finally cook with kitchen tools and appliances. Delicious dishes will attract all your eyes with eye-catching colors. Besides, the cooking process is always accompanied by upbeat music, helping you both cook and relax. So, are you ready to cook with your parents? Show your talent to become the most talented kid chef in the house.
-
Your delicious dishes will be judged by your parents and diners. They will give you the number of stars and points corresponding to the quality of the dish. You can use your scores and stars to unlock new ingredients and recipes. If you play hard, you can add dozens of new items to the restaurant menu every day. Cooking is not as difficult as you think. Besides the video tutorial, you just need to use your fingers to cook. As soon as you make a mistake, you can also finish the dish. So this game is really suitable for kids and amateur chefs who love to cook on the phone.
-
You can play minigames to train your brain and relax after hours of cooking in the kitchen. It can be jigsaw puzzles, memorization, number filling, hidden objects, and more. The mini-games are built with lovely, playful pictures and music. You can also compete with your friends on the leaderboards of online minigames. Through these games, the game also gives you many attractive gifts to unlock unique items. Feel free to design your character with impressive clothing items, hair colors, and accessories.
-
With a game for children, this game is designed with a super cute and friendly graphic style. The characters and objects in the game are depicted in a chibi cartoon style. Cooking operations in the first person create a sense of authenticity and fun. The restaurant scene is always filled with bright colors, stimulating the concentration and creativity of all young chefs. And relaxing music will also make you happy all day with the cooking experience here.
-
Cooking Mama: Let's cook! MOD Coins - A game that will teach you how to cook from scratch! Slice, knead, fry, boil and bake, create a real culinary masterpiece! During the game, uncontrolled urges to eat are possible! Better to play on a full stomach! Cook - playfully, the entire cooking process will be accompanied by cute mini-games, try to do everything perfectly to serve a really tasty and right dish.
-
In the game, you will learn more than 30 recipes for delicious and healthy food, and also try to open your own restaurant, where you will sell your culinary masterpieces. In addition, all food is completely sustainable, go fishing, do gardening and serve customers only your own food. And if you want to take a break from the usual cooking, you can always play interesting mini-games.
-
These recipes are either obtained by default or by purchases. Other options include playing daily (7 days for each recipe) or completing Requests from Papa, labeled under as Bonus. There are 4 recipes that can be obtained through Facebook by inviting your friends to download a copy of the game. All subsequent lists after Starter Recipes require purchases before further unlocking requirements. Purchasing packs will build up a counter that lets you earn more Game Coins.
-
Early builds of the game require you to obtain these recipes via Facebook invitations. Each recipe would be unlocked sequentially as you invite more friends. However, as of December 2016, they can be purchased as a pack without interacting with Facebook. Please note that you will still have to pay the total amount for the pack even if some are already obtained.
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Download Bullett Raja Movies 1080p Torrent.md b/spaces/1gistliPinn/ChatGPT4/Examples/Download Bullett Raja Movies 1080p Torrent.md
deleted file mode 100644
index 7f6079504349f78bd91945103c9c7d977ba50d9a..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Download Bullett Raja Movies 1080p Torrent.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-Download Dubbed Torrent Full Movies In HD With Fast and safe Way. ... Bullet Raja Hindi Movie Trailer movie Download in HD mp4, 3Gp, 720p . Bullett Raja ... 4d29de3e1b
-
-
-
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bubble Shooter Un juego de burbujas adictivo y gratuito - APK Descargar.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bubble Shooter Un juego de burbujas adictivo y gratuito - APK Descargar.md
deleted file mode 100644
index e3b5c52df1250b57dad6c050f1eab4bfb6090f0f..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bubble Shooter Un juego de burbujas adictivo y gratuito - APK Descargar.md
+++ /dev/null
@@ -1,130 +0,0 @@
-
-
Bubble Shooter APK: How to Download and Play this Fun Game
-
If you are looking for a casual puzzle game that is easy to play, addictive, and enjoyable, then you should try Bubble Shooter APK. This is a classic game that has been around for decades, but it never gets old. In this article, we will tell you what Bubble Shooter APK is, how to download and install it on your Android device, how to play it and enjoy its benefits, and some tips and tricks to help you master it.
-
What is Bubble Shooter APK?
-
A brief introduction to the game and its features
-
Bubble Shooter APK is a free game that you can download from the Google Play Store or from other websites . It is inspired by Puzzle Bobble, a popular arcade game from the 90s. The goal of Bubble Shooter APK is to clear the screen by matching three or more bubbles of the same color. You can use your finger or mouse to aim and shoot bubbles at the rows above your shooter. You can see the next bubble to come in the bottom right corner of the screen. You can also change the color of your bubble by tapping on it.
Bubble Shooter APK has many features that make it fun and challenging. Some of them are:
-
-
It has more than 3000 levels with different layouts, obstacles, and goals.
-
It has new elements and prizes that you can unlock as you progress.
-
It has colorful graphics and sound effects that create a pleasant atmosphere.
-
It has a leaderboard and achievements that you can compete with your friends and other players.
-
It has a colorblind mode that makes it accessible for everyone.
-
-
How to download and install Bubble Shooter APK on your Android device
-
To download and install Bubble Shooter APK on your Android device, you need to follow these simple steps:
-
-
Go to the Google Play Store or any other website that offers Bubble Shooter APK . Make sure that the source is reliable and safe.
-
Tap on the download button or scan the QR code to start downloading the file.
-
Once the download is complete, open the file manager on your device and locate the file.
-
Tap on the file and allow the installation from unknown sources if prompted.
-
Follow the instructions on the screen to complete the installation.
-
Launch the game and enjoy!
-
-
How to play Bubble Shooter APK and enjoy its benefits
-
To play Bubble Shooter APK, you need to have a basic understanding of how the game works. Here are some guidelines to help you get started:
-
-
The game has three modes: classic, arcade, and puzzle. You can choose any mode according to your preference.
-
In each mode, you will have different levels with different objectives. You can see the objective at the top of the screen before starting each level.
-
To clear a level, you need to match three or more bubbles of the same color by shooting them with your shooter. You can aim by moving your finger or mouse on the screen. You can also bounce the bubbles off the walls for tricky shots.
-
You will get points for each bubble you pop. The more bubbles you pop at once, the more points you get. You can also get bonus points for dropping bubbles that are not attached to any other bubbles.
-
You will lose a life if you miss a shot or if the bubbles reach the bottom of the screen. You have a limited number of lives, so be careful.
-
You can use power-ups to help you clear the levels faster and easier. You can get power-ups by popping special bubbles or by buying them with coins.
-
You can earn coins by completing levels, watching ads, or buying them with real money.
-
You can pause the game at any time by tapping on the menu button at the top left corner of the screen. You can also adjust the settings, such as sound, music, and colorblind mode, from the menu.
-
-
Tips and Tricks for Bubble Shooter APK
-
How to aim and shoot bubbles effectively
-
Aiming and shooting bubbles is the most important skill in Bubble Shooter APK. Here are some tips to help you improve your accuracy and efficiency:
-
-
Use your finger or mouse to aim carefully before shooting. Don't rush your shots, as you might miss or hit the wrong bubble.
-
Try to create clusters of bubbles of the same color. This will make it easier to pop them and clear the board.
-
Focus on the bubbles that are close to the bottom of the screen. If you let them pile up, they will block your shooter and make it harder to aim.
-
Don't waste your shots on bubbles that are not connected to any other bubbles. They will not affect the board and will only reduce your score.
-
-
How to use the walls and the next bubble indicator
-
Using the walls and the next bubble indicator can give you an edge in Bubble Shooter APK. Here are some ways to use them effectively:
-
-
You can bounce the bubbles off the walls to reach difficult spots or angles. This can help you create more matches and clear more bubbles.
-
You can see the next bubble that will come out of your shooter in the bottom right corner of the screen. You can use this information to plan your next move and strategy.
-
You can also swap the current bubble with the next bubble by tapping on it. This can help you avoid unwanted colors or create better matches.
-
-
How to clear the board and score high points
-
Clearing the board and scoring high points are the main objectives of Bubble Shooter APK. Here are some strategies to help you achieve them:
-
-
Try to pop as many bubbles as possible in one shot. This will give you more points and bonus points for dropping bubbles.
-
Try to clear all the bubbles on the board. This will give you a perfect score and extra coins.
-
Try to complete the level as fast as possible. This will give you a time bonus and more points.
-
Try to use power-ups wisely. They can help you clear more bubbles, but they also cost coins or lives.
-
-
Benefits of Playing Bubble Shooter APK
-
It is a free, fun, and relaxing game
-
Bubble Shooter APK is a game that you can play for free anytime and anywhere. You don't need an internet connection or a subscription to enjoy it. You can play it on your phone, tablet, or computer. It is a game that is suitable for all ages and preferences. It is a game that is fun and relaxing, as it does not require too much thinking or stress. You can play it at your own pace and mood.
-
It improves your brain skills and concentration
-
Bubble Shooter APK is a game that can also improve your brain skills and concentration. It is a game that requires you to use your logic, strategy, and observation skills. You have to think fast and smart to clear the levels and score high points. You have to pay attention to the colors, patterns, and movements of the bubbles. You have to focus on your aim and timing. Playing Bubble Shooter APK can help you sharpen your mind and enhance your mental abilities.
-
bubble shooter 2 apk descargar gratis
-bubble shooter classic pop apk descargar
-bubble shooter yang games and apps apk descargar
-bubble shooter apk descargar última versión
-bubble shooter apk descargar para android
-bubble shooter wood apk descargar
-bubble shooter epic puzzles apk descargar
-bubble shooter apk descargar sin wifi
-bubble shooter apk descargar 2022
-bubble shooter apk descargar 3d
-bubble shooter apk descargar mod
-bubble shooter apk descargar offline
-bubble shooter apk descargar hackeado
-bubble shooter apk descargar sin anuncios
-bubble shooter apk descargar ilimitado
-bubble shooter apk descargar divertido
-bubble shooter apk descargar original
-bubble shooter apk descargar fácil
-bubble shooter apk descargar desafío
-bubble shooter apk descargar sorpresas
-bubble shooter apk descargar colores
-bubble shooter apk descargar niveles
-bubble shooter apk descargar aventura
-bubble shooter apk descargar adictivo
-bubble shooter apk descargar relajante
-bubble shooter space apk descargar
-bubble shooter panda apk descargar
-bubble shooter dragon pop apk descargar
-bubble shooter witch saga apk descargar
-bubble shooter farm pop 2 apk descargar
-bubble shooter pet rescue saga apk descargar
-bubble shooter candy crush saga apk descargar
-bubble shooter angry birds pop apk descargar
-bubble shooter frozen pop apk descargar
-bubble shooter toy blast pop cubes crush apk descargar
-bubble shooter gummy drop match 3 puzzle game apk descargar
-bubble shooter cookie jam blast match 3 game apk descargar
-bubble shooter jewel legend match 3 puzzle game apk descargar
-bubble shooter garden of flowers match 3 game apk descargar
-bubble shooter fruit splash match 3 game apk descargar
-
It offers thousands of levels and challenges
-
Bubble Shooter APK is a game that offers thousands of levels and challenges for you to enjoy. It has three modes: classic, arcade, and puzzle, each with different objectives and difficulties. It has new elements and prizes that you can unlock as you progress. It has a leaderboard and achievements that you can compete with your friends and other players. It has a colorblind mode that makes it accessible for everyone. Playing Bubble Shooter APK can keep you entertained and satisfied for hours.
-
Conclusion
-
Bubble Shooter APK is a game that you should try if you are looking for a casual puzzle game that is easy to play, addictive, and enjoyable. It is a game that has many features that make it fun and challenging. It is a game that can improve your brain skills and concentration. It is a game that offers thousands of levels and challenges for you to enjoy. It is a game that is free, fun, and relaxing. You can download and install Bubble Shooter APK on your Android device easily and safely. You can play it anytime and anywhere. You can also use some tips and tricks to help you master it. If you are ready to pop some bubbles and have some fun, then download Bubble Shooter APK today and start playing!
-
FAQs
-
Here are some frequently asked questions about Bubble Shooter APK:
-
Q: Is Bubble Shooter APK safe to download and install?
-
A: Yes, Bubble Shooter APK is safe to download and install, as long as you get it from a reliable and secure source, such as the Google Play Store or the official website . You should avoid downloading it from unknown or suspicious sources, as they might contain malware or viruses that can harm your device.
-
Q: How can I get more coins in Bubble Shooter APK?
-
A: You can get more coins in Bubble Shooter APK by completing levels, watching ads, or buying them with real money. You can use coins to buy power-ups, lives, or unlock new elements and prizes.
-
Q: How can I play Bubble Shooter APK with my friends?
-
A: You can play Bubble Shooter APK with your friends by connecting your game to Facebook or Google Play Games. You can then see your friends' scores and achievements on the leaderboard and challenge them to beat your records. You can also invite them to play with you or send them gifts.
-
Q: What are the power-ups in Bubble Shooter APK?
-
A: The power-ups in Bubble Shooter APK are special bubbles that can help you clear the levels faster and easier. Some of the power-ups are:
-
-
Bomb: It explodes and pops all the bubbles around it.
-
Fireball: It burns through all the bubbles in its path.
-
Color Changer: It changes the color of all the bubbles of the same color as the one it hits.
-
Rainbow: It matches with any color of bubble.
-
-
Q: How can I contact the developers of Bubble Shooter APK?
-
A: You can contact the developers of Bubble Shooter APK by sending them an email at support@bubbleshooter.com or by visiting their website. You can also follow them on Facebook, Twitter, or Instagram for updates, news, and tips.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Animal Voice How to Record and Edit Animal Sounds for Your Projects.md b/spaces/1phancelerku/anime-remove-background/Animal Voice How to Record and Edit Animal Sounds for Your Projects.md
deleted file mode 100644
index f25d55e176e6d197e4fbaa80af860f7a35cd436b..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Animal Voice How to Record and Edit Animal Sounds for Your Projects.md
+++ /dev/null
@@ -1,192 +0,0 @@
-
-
Animal Voice: How Animals Communicate and How You Can Train Your Pet to Talk
-
Introduction
-
Have you ever wondered what your pet is trying to tell you when they bark, meow, or chirp? Have you ever wished you could teach your pet to talk and understand what they are thinking and feeling? If so, you are not alone. Many animal lovers are fascinated by the idea of animal voice and communication.
-
Animal voice is the term used to describe the sounds, gestures, and other signals that animals use to communicate with each other and with humans. Animal communication is a complex and diverse phenomenon that involves various modes, functions, and contexts. Animal communication is also an important source of information that influences the behavior and decision making of both senders and receivers.
In this article, you will learn more about animal voice and communication, such as what types of signals animals use, how they vary across species and situations, and what benefits and challenges they entail. You will also learn how you can train your pet to talk using recordable dog training buttons, which are devices that allow your pet to express their wants, needs, and thoughts by pressing buttons that produce pre-recorded words. By the end of this article, you will have a better understanding of animal voice and communication, as well as some practical tips and tricks on how to train your pet to talk using buttons.
-
Types of animal voice and communication
-
Visual signals: gestures, facial expressions, colors, patterns, etc.
-
One of the most common types of animal voice and communication is visual signals. Visual signals are actions or anatomical structures that provide information to another animal through sight. Visual signals can include gestures, facial expressions, body postures, movements, colors, patterns, displays, etc.
-
animal voice changer app
-animal voice over video
-animal voice recognition software
-animal voice translator device
-animal voice sound effects
-animal voice therapy for humans
-animal voice actors in movies
-animal voice communication research
-animal voice imitation game
-animal voice modulation techniques
-animal voice box surgery
-animal voice dubbing studio
-animal voice generator online
-animal voice identification system
-animal voice ringtone download
-animal voice talent agency
-animal voice comparison chart
-animal voice frequency range
-animal voice quiz for kids
-animal voice activated toys
-animal voice samples free
-animal voice memes funny
-animal voice recording app
-animal voice classification scheme
-animal voice synthesizer software
-animal voice alarm clock
-animal voice analysis tool
-animal voice bluetooth speaker
-animal voice crossword clue
-animal voice detector circuit
-animal voice emulator program
-animal voice filter for snapchat
-animal voice greeting cards
-animal voice headphones review
-animal voice interpreter gadget
-animal voice jokes hilarious
-animal voice keyboard online
-animal voice learning app
-animal voice mask amazon
-animal voice navigation app
-animal voice optimization tips
-animal voice podcast episodes
-animal voice quizlet flashcards
-animal voice reader online
-animal voice simulator game
-animal voice training course
-animal voice usb drive
-animal voice vocabulary list
-
Examples of visual signals in different animals
-
Some examples of visual signals in different animals are:
-
-
The parent herring gull displays its bright yellow bill on the ground next over its chick when it has returned to the nest with food. The chicks exhibit a begging response by tapping the red spot on the lower mandible of the parent h
erring gull, which triggers the parent to regurgitate food for them.
-
The male peacock displays its colorful and elaborate tail feathers to attract the attention and preference of the female peahen. The size, shape, and symmetry of the tail feathers indicate the quality and fitness of the male peacock.
-
The dog shows its submission or appeasement to another dog or human by lowering its head, tucking its tail, flattening its ears, licking its lips, and avoiding eye contact. These signals indicate that the dog is not a threat and wants to avoid conflict.
-
-
Advantages and disadvantages of visual signals
-
Visual signals have some advantages and disadvantages as a mode of animal voice and communication. Some of the advantages are:
-
-
Visual signals can be fast, precise, and efficient in conveying information over short distances.
-
Visual signals can be modified or adjusted according to the context and the feedback of the receiver.
-
Visual signals can be used to communicate complex and diverse messages, such as identity, status, mood, intention, etc.
-
-
Some of the disadvantages are:
-
-
Visual signals require a clear line of sight between the sender and the receiver, which can be obstructed by obstacles, distance, or darkness.
-
Visual signals can be costly to produce and maintain, especially if they involve elaborate structures or displays that require energy and resources.
-
Visual signals can be easily detected and exploited by predators, parasites, or competitors, which can pose a risk to the sender or the receiver.
-
-
Auditory signals: sounds, calls, songs, etc.
-
Another common type of animal voice and communication is auditory signals. Auditory signals are sounds that animals produce and perceive through hearing. Auditory signals can include calls, songs, cries, whistles, clicks, etc.
-
Examples of auditory signals in different animals
-
Some examples of auditory signals in different animals are:
-
-
The humpback whale produces complex and melodious songs that can last for hours and travel for hundreds of kilometers underwater. The songs are used by males to attract females and to compete with other males during the breeding season.
-
The vervet monkey emits different alarm calls depending on the type of predator it encounters. For example, it makes a high-pitched shriek for aerial predators like eagles, a low-pitched bark for terrestrial predators like leopards, and a chuttering sound for snakes. These calls alert other monkeys to take appropriate defensive actions.
-
The parrot mimics the sounds of other animals or humans that it hears in its environment. The parrot uses these sounds to communicate with its social group, to bond with its mate or owner, or to manipulate or deceive others.
-
-
Advantages and disadvantages of auditory signals
-
Auditory signals have some advantages and disadvantages as a mode of animal voice and communication. Some of the advantages are:
-
-
Auditory signals can travel over long distances and reach multiple receivers at once.
-
Auditory signals can be transmitted and received in any direction, regardless of the orientation or position of the sender or the receiver.
-
Auditory signals can be varied in pitch, volume, duration, rhythm, tone, etc., to convey different meanings and emotions.
-
-
Some of the disadvantages are:
-
-
Auditory signals can be affected by noise interference from other sources, such as wind, water, traffic, etc.
-
Auditory signals can be difficult to localize or identify the source or direction of the sound.
-
Auditory signals can be hard to modify or retract once they are emitted.
-
-
Chemical signals: pheromones, scents, tastes, etc.
-
A less obvious but equally important type of animal voice and communication is chemical signals. Chemical signals are substances that animals secrete or release into their environment that affect the behavior or physiology of another animal through smell or taste. Chemical signals can include pheromones, scents, tastes, etc.
-
Examples of chemical signals in different animals
-
Some examples of chemical signals in different animals are:
-
-
The honey bee releases a pheromone called alarm pheromone when it stings an intruder. The pheromone attracts other bees to join the attack and defend the hive.
-
The cat rubs its cheek glands on objects or people that it likes or owns. The scent marks its territory and signals its affiliation and identity to other cats.
-
The ant follows a trail of pheromones left by other ants to find food sources or nest sites. The pheromones also indicate the quality and quantity of the food or the suitability of the nest.
-
-
Advantages and disadvantages of chemical signals
-
Chemical signals have some advantages and disadvantages as a mode of animal voice and communication. Some of the advantages are:
-
-
Chemical signals can persist for a long time and remain effective even when the sender or the receiver is absent.
-
Chemical signals can convey information that is not easily detected by other senses, such as genetic compatibility, reproductive status, health condition, etc.
-
Chemical signals can be combined with other modes of communication to enhance or modify their effects.
-
-
Some of the disadvantages are:
-
-
Chemical signals can be slow to reach the receiver and require close proximity or contact.
-
Chemical signals can be diluted, degraded, or masked by environmental factors, such as temperature, humidity, wind, etc.
-
Chemical signals can be difficult to interpret or distinguish from other sources, especially if they are similar or overlapping.
-
-
Tactile signals: touch, vibration, electric fields, etc.
-
The last type of animal voice and communication that we will discuss is tactile signals. Tactile signals are physical stimuli that animals apply or receive through touch or other forms of contact. Tactile signals can include touch, vibration, electric fields, etc.
-
Examples of tactile signals in different animals
-
Some examples of tactile signals in different animals are:
-
-
The elephant uses its trunk to touch and caress other elephants as a sign of affection, reassurance, or comfort. The trunk also serves as a tool for exploring, manipulating, and communicating with objects and other animals.
-
The spider senses the vibration of its web when a prey or a predator approaches. The spider can also use its legs to send vibration signals to other spiders for mating or territorial purposes.
-
The electric eel generates electric pulses that it uses to navigate, locate, and stun its prey. The electric eel can also communicate with other electric eels by modulating the frequency and intensity of its electric pulses.
-
-
Advantages and disadvantages of tactile signals
-
Tactile signals have some advantages and disadvantages as a mode of animal voice and communication. Some of the advantages are:
-
-
Tactile signals can be very precise and specific in conveying information to a single or a few receivers.
-
Tactile signals can be used in situations where other modes of communication are ineffective or unavailable, such as in darkness, silence, or underwater.
-
Tactile signals can elicit immediate and strong responses from the receiver, such as arousal, alarm, or pain.
-
-
Some of the disadvantages are:
-
-
Tactile signals require direct contact or close proximity between the sender and the receiver, which can limit their range and scope.
-
Tactile signals can be invasive or unwanted by the receiver, especially if they are aggressive or harmful.
-
Tactile signals can be easily disrupted or blocked by physical barriers or interference.
-
-
How to train your pet to talk using buttons
-
What are recordable dog training buttons and how do they work?
-
If you want to train your pet to talk using buttons, you will need some recordable dog training buttons. These are devices that allow your pet to express their wants, needs, and thoughts by pressing buttons that produce pre-recorded words. For example, you can record words like "outside", "play", "water", "treat", etc., on different buttons and place them on a mat or a board. Then you can teach your pet to associate each button with its corresponding word and action. When your pet wants something or wants to communicate something to you, they can press the appropriate button and hear the word spoken out loud.
-
Recordable dog training buttons are based on the idea that animals can learn to use symbols or words to communicate with humans. This idea has been tested and proven by many studies and experiments involving animals like chimpanzees, dolphins, parrots, etc. Recordable dog training buttons are also inspired by augmentative and alternative communication (AAC) devices that are used by humans who have speech impairments or disabilities. AAC devices help these humans communicate with others using pictures, symbols, gestures, sounds, etc.
-
Recordable dog training buttons are easy to use and affordable
Recordable dog training buttons are easy to use and affordable, and you can find them online or in pet stores. For example, you can check out the PawTalk Recordable Dog Buttons, the Hunger for Words Talking Pet Starter Set, the Talking Products Talking Tiles, or the Decdeal Recordable Talking Button With LED Function. These are some of the popular and recommended products that you can use to train your pet to talk using buttons.
-
How to teach your dog to speak on command using buttons
-
One of the simplest and most fun ways to train your pet to talk using buttons is to teach them to speak on command. This means that you will teach your dog to bark when you ask them to, and then associate that bark with a word on a button. For example, you can teach your dog to say "hello" by barking when you say "hello" and then pressing a button that says "hello". This way, your dog will learn that barking and pressing the button are both ways of saying "hello". Here are the steps to teach your dog to speak on command using buttons:
-
Step 1: Have your reward ready
-
The first step is to have a reward ready for your dog. This can be a treat, a toy, or praise, depending on what your dog likes best. You will use this reward to reinforce your dog's behavior and make them more likely to repeat it. Make sure you have enough rewards for multiple repetitions and sessions.
-
Step 2: Get your dog to speak naturally
-
The next step is to get your dog to speak naturally. This means that you will wait for your dog to bark on their own, without prompting them. You can do this by observing your dog and noticing what triggers them to bark, such as a doorbell, a squirrel, or another dog. You can also try to make your dog excited or curious by playing with them, showing them something interesting, or hiding behind something. When your dog barks, mark the behavior with a clicker or a word like "yes" or "good". Then give them the reward immediately.
-
Step 3: Mark the bark with a cue word and a reward
-
The third step is to mark the bark with a cue word and a reward. This means that you will say a word that you want your dog to associate with barking, such as "speak", "talk", or "bark", right before or as your dog barks. Then give them the reward as usual. For example, if you want your dog to say "hello", you can say "hello" when they bark and then give them the reward. Repeat this several times until your dog learns that barking when you say "hello" earns them a reward.
-
Step 4: Add a hand signal if desired
-
The fourth step is optional, but it can help your dog learn faster and more reliably. You can add a hand signal that matches the cue word, such as waving your hand or pointing at your mouth, when you say the word and wait for your dog to bark. Then give them the reward as usual. For example, if you want your dog to say "hello", you can wave your hand and say "hello" when they bark and then give them the reward. Repeat this several times until your dog learns that barking when you wave your hand and say "hello" earns them a reward.
-
Step 5: Practice and reinforce the behavior consistently
-
The final step is to practice and reinforce the behavior consistently. This means that you will ask your dog to speak on command using the cue word and/or the hand signal, and then reward them for barking. You can also introduce a button that says the word that you want your dog to say, such as "hello", and place it near your dog. When your dog barks on command, press the button for them so they can hear the word spoken out loud. Then give them the reward as usual. Repeat this several times until your dog learns that barking on command and pressing the button are both ways of saying the word.
-
Tips and tricks for training your dog to speak using buttons
-
Training your pet to talk using buttons can be a fun and rewarding experience for both of you, but it also requires some patience and consistency. Here are some tips and tricks that can help you train your pet more effectively:
-
Be patient and consistent
-
Don't expect your pet to learn overnight or without mistakes. It may take some time and practice for your pet to understand what you want them to do and how to do it correctly. Be patient and consistent with
Be patient and consistent with your training sessions, and don't give up or get frustrated if your pet doesn't get it right away. Keep the sessions short, fun, and positive, and end on a high note. Reward your pet for every correct response, and ignore or redirect any incorrect or unwanted behavior. Gradually increase the difficulty and complexity of the commands and the buttons as your pet progresses.
-
Reward only barking on command and not nuisance barking
-
One of the potential drawbacks of teaching your pet to speak using buttons is that they may start to bark excessively or inappropriately, such as when they are bored, anxious, or attention-seeking. This can be annoying and disruptive for you and your neighbors. To prevent this, you should only reward your pet for barking on command and not for nuisance barking. You should also teach your pet a "quiet" command that tells them to stop barking, and reward them for obeying it. You can also provide your pet with enough mental and physical stimulation, such as toys, games, walks, etc., to keep them happy and occupied.
-
Capture and mark only a single bark or a desired number of barks
-
Another challenge of teaching your pet to speak using buttons is that they may bark too much or too little when you ask them to. For example, they may bark multiple times when you want them to say "hello", or they may not bark at all when you want them to say "yes". To avoid this, you should capture and mark only a single bark or a desired number of barks when you train your pet. You can do this by using a clicker or a word like "yes" or "good" to mark the exact moment when your pet barks the way you want them to. Then give them the reward immediately. This will help your pet learn to control their barking and match it with the word on the button.
-
Be mindful of your neighbors and the noise level of your dog's barking
-
The last tip for training your pet to speak using buttons is to be mindful of your neighbors and the noise level of your dog's barking. Some people may not appreciate hearing your dog talk all day long, especially if they are loud or frequent. You should respect your neighbors' privacy and comfort, and try to limit your training sessions to reasonable hours and durations. You should also choose words that are not too loud or harsh, such as "hi", "ok", "yay", etc., instead of words that are louder or more aggressive, such as "no", "stop", "bad", etc. You can also use volume control buttons that allow you to adjust the loudness of the words on the buttons.
-
Conclusion
-
Summary of the main points of the article
-
In conclusion, animal voice and communication are fascinating and diverse phenomena that involve various types of signals, such as visual, auditory, chemical, and tactile signals. Animals use these signals to communicate with each other and with humans for various purposes, such as survival, reproduction, socialization, etc. Animal voice and communication have some advantages and disadvantages depending on the mode, function, and context of the communication.
-
You can also train your pet to talk using recordable dog training buttons, which are devices that allow your pet to express their wants, needs, and thoughts by pressing buttons that produce pre-recorded words. You can teach your pet to speak on command using buttons by following some simple steps, such as having a reward ready, getting your pet to speak naturally, marking the bark with a cue word and a reward, adding a hand signal if desired, and practicing and reinforcing the behavior consistently. You can also use some tips and tricks to train your pet more effectively, such as being patient and consistent, rewarding only barking on command and not nuisance barking
rewarding only barking on command and not nuisance barking, capturing and marking only a single bark or a desired number of barks, and being mindful of your neighbors and the noise level of your dog's barking.
-
By training your pet to talk using buttons, you can enhance your bond with your pet, enrich your pet's mental and physical well-being, and have fun and meaningful conversations with your pet. You can also learn more about your pet's personality, preferences, and emotions, and appreciate the diversity and complexity of animal voice and communication.
-
Call to action for the readers to try training their pet to talk using buttons
-
If you are interested in training your pet to talk using buttons, why not give it a try? You can start by getting some recordable dog training buttons online or in pet stores, and following the steps and tips that we have shared in this article. You can also watch some videos or read some stories of other pet owners who have successfully trained their pets to talk using buttons, such as Bunny the talking dog, Stella the talking dog, or Billi Speaks. These are some of the amazing and inspiring examples of pets who have learned to communicate with their humans using buttons.
-
Training your pet to talk using buttons can be a rewarding and enjoyable experience for both of you, and you may be surprised by how much your pet has to say. So don't hesitate and start training your pet to talk using buttons today!
-
FAQs
-
What is animal voice and communication?
-
Animal voice and communication are the sounds, gestures, and other signals that animals use to communicate with each other and with humans.
-
What types of signals do animals use to communicate?
-
Animals use various types of signals to communicate, such as visual, auditory, chemical, and tactile signals.
-
What are recordable dog training buttons?
-
Recordable dog training buttons are devices that allow your pet to express their wants, needs, and thoughts by pressing buttons that produce pre-recorded words.
-
How can I train my pet to speak on command using buttons?
-
You can train your pet to speak on command using buttons by following some simple steps, such as having a reward ready, getting your pet to speak naturally, marking the bark with a cue word and a reward, adding a hand signal if desired, and practicing and reinforcing the behavior consistently.
-
What are some tips and tricks for training my pet to speak using buttons?
-
Some tips and tricks for training your pet to speak using buttons are being patient and consistent, rewarding only barking on command and not nuisance barking, capturing and marking only a single bark or a desired number of barks, and being mindful of your neighbors and the noise level of your dog's barking.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Descarga Chicken Gun Mod Apk con Dinero Infinito y Menu de Mods.md b/spaces/1phancelerku/anime-remove-background/Descarga Chicken Gun Mod Apk con Dinero Infinito y Menu de Mods.md
deleted file mode 100644
index 19488a651ff3d834e8efd1fced266c19b75e395c..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Descarga Chicken Gun Mod Apk con Dinero Infinito y Menu de Mods.md
+++ /dev/null
@@ -1,102 +0,0 @@
-
-
Chicken Gun Dinero Infinito APK: How to Download and Play the Ultimate Shooting Game
-
If you are looking for a fun and hilarious multiplayer shooter game, you should try Chicken Gun. This game lets you play as a chicken with a gun, and your goal is to shoot other chickens in various maps and modes. You can also customize your chicken with different weapons, accessories, and skins. But what if you want to have more money and health in the game? That's where Chicken Gun Dinero Infinito APK comes in. This is a modified version of the game that gives you unlimited resources and advantages. In this article, we will tell you what Chicken Gun is, what Chicken Gun Dinero Infinito APK is, and how to download and install it on your device.
A hilarious and addictive multiplayer shooter game
-
Chicken Gun is a game developed by ChaloApps, a studio based in Argentina. It was released in 2020 for Android and iOS devices. The game is a 3D shooter game that features chickens as the main characters. You can play as a chicken with a gun, and your objective is to shoot other chickens in different maps and modes. You can play solo or with your friends online, and compete with other players from around the world. The game has a cartoonish and colorful graphics style, and a funny sound effects and music. The game is suitable for all ages, as it does not contain any gore or violence.
-
Features of Chicken Gun
-
Play as shooting chickens
-
The game lets you choose from different types of chickens, such as white, brown, black, or rainbow. Each chicken has its own stats, such as speed, health, damage, and accuracy. You can also upgrade your chicken's skills with coins that you earn from playing the game.
-
Choose from various weapons
-
The game offers a variety of weapons that you can use to shoot other chickens, such as pistols, rifles, shotguns, snipers, rocket launchers, grenades, and more. Each weapon has its own characteristics, such as range, fire rate, reload time, and ammo capacity. You can also switch between different weapons during the game.
-
chicken gun mod apk unlimited money
-chicken gun hack apk download
-chicken gun mod menu apk mediafire
-chicken gun game online
-chicken gun apk mod menu 2023
-chicken gun dinheiro infinito atualizado
-chicken gun mod apk latest version
-chicken gun hack apk 2023
-chicken gun game download
-chicken gun apk mod menu download
-chicken gun dinheiro infinito mediafire
-chicken gun mod apk android 1
-chicken gun hack apk mediafire
-chicken gun game mod apk
-chicken gun apk mod menu atualizado
-chicken gun dinheiro infinito 2023
-chicken gun mod apk free download
-chicken gun hack apk android 1
-chicken gun game hack
-chicken gun apk mod menu free fire
-chicken gun dinheiro infinito download
-chicken gun mod apk unlimited gems
-chicken gun hack apk free fire
-chicken gun game online multiplayer
-chicken gun apk mod menu hack
-chicken gun dinheiro infinito gratis
-chicken gun mod apk no root
-chicken gun hack apk no root
-chicken gun game offline
-chicken gun apk mod menu no root
-chicken gun dinheiro infinito no root
-chicken gun mod apk offline
-chicken gun hack apk offline
-chicken gun game play store
-chicken gun apk mod menu offline
-chicken gun dinheiro infinito play store
-chicken gun mod apk online multiplayer
-chicken gun hack apk online multiplayer
-chicken gun game review
-chicken gun apk mod menu play store
-chicken gun dinheiro infinito para android
-chicken gun mod apk rexdl
-chicken gun hack apk rexdl
-chicken gun game trailer
-chicken gun apk mod menu rexdl
-chicken gun dinheiro infinito sem root
-chicken gun mod apk unlimited health
-chicken gun hack apk unlimited health
-
Defeat other players online
-
The game has several modes that you can play online with other players, such as deathmatch, team deathmatch, capture the flag, zombie mode, and more. You can join or create a room with up to 10 players per team, and chat with them using voice or text messages. You can also view your stats and rank on the leaderboard.
-
Customize your cute chickens
-
The game allows you to personalize your chicken with different accessories, such as hats, glasses, masks, helmets, backpacks, wings, tails, and more. You can also change your chicken's skin color and pattern. You can mix and match different items to create your own unique style. You can also preview how your chicken looks before entering the game.
-
Be the best shooting chicken
-
The game challenges you to be the best shooting chicken in the world. You can earn coins and gems by playing the game, and use them to buy more weapons and accessories. You can also unlock achievements and trophies by completing various tasks and missions. You can also share your gameplay videos and screenshots with your friends on social media.
-
What is Chicken Gun Dinero Infinito APK?
-
A modified version of Chicken Gun with unlimited money and health
-
Chicken Gun Dinero Infinito APK is a modified version of Chicken Gun that gives you unlimited money and health in the game. This means that you can buy all the weapons and accessories that you want, and never run out of health or ammo. You can also play the game without any ads or in-app purchases. This way, you can enjoy the game without any limitations or interruptions.
-
Benefits of Chicken Gun Dinero Infinito APK
-
Unlock all the weapons and accessories
-
With Chicken Gun Dinero Infinito APK, you can unlock all the weapons and accessories that are available in the game. You can choose from over 50 weapons, such as pistols, rifles, shotguns, snipers, rocket launchers, grenades, and more. You can also customize your chicken with over 100 accessories, such as hats, glasses, masks, helmets, backpacks, wings, tails, and more. You can create your own unique chicken with different combinations of items.
-
Survive longer and win more matches
-
With Chicken Gun Dinero Infinito APK, you can survive longer and win more matches in the game. You can have unlimited health and ammo in the game, which means that you can withstand any damage and shoot as much as you want. You can also have unlimited coins and gems in the game, which means that you can upgrade your chicken's skills and abilities. You can also have unlimited lives in the game, which means that you can respawn as many times as you want. You can dominate the game with these advantages.
-
Enjoy the game without ads or in-app purchases
-
With Chicken Gun Dinero Infinito APK, you can enjoy the game without ads or in-app purchases. You can play the game without any annoying ads that pop up on your screen or interrupt your gameplay. You can also play the game without any in-app purchases that ask you to spend real money to get more coins or gems. You can have everything for free with this modded version of the game.
-
How to Download and Install Chicken Gun Dinero Infinito APK?
-
Follow these simple steps to get the game on your device
-
Step 1: Enable unknown sources on your device settings
-
To download and install Chicken Gun Dinero Infinito APK, you need to enable unknown sources on your device settings. This will allow you to install apps that are not from the official Google Play Store. To do this, go to your device settings > security > unknown sources > enable.
-
Step 2: Download the APK file from a trusted source
-
To download Chicken Gun Dinero Infinito APK, you need to find a trusted source that provides the APK file. You can search online for websites that offer this modded version of the game. Make sure that the website is safe and reliable before downloading anything from it. You can also scan the APK file with an antivirus software before installing it.
-
Step 3: Locate and install the APK file on your device
-
To install Chicken Gun Dinero Infinito APK, you need to locate the APK file on your device. You can use a file manager app to find the file in your downloads folder or any other location where you saved it. Once you find it, tap on it and follow the instructions on your screen to install it.
-
Step 4: Launch the game and have fun
-
To play Chicken Gun Dinero Infinito APK, you need to launch the game on your device. You can find it on your app drawer or home screen. Tap on it and enjoy playing the ultimate shooting game with unlimited money and health.
-
Conclusion
-
Chicken Gun Dinero Infinito APK is a great way to enjoy the game with more features and fun
-
Chicken Gun is a fun and hilarious multiplayer shooter game that lets you play as a chicken with a gun. You can shoot other chickens in various maps and modes, customize your chicken with different weapons and accessories , and compete with other players online. Chicken Gun Dinero Infinito APK is a modified version of the game that gives you unlimited money and health in the game. You can unlock all the weapons and accessories, survive longer and win more matches, and enjoy the game without ads or in-app purchases. To download and install Chicken Gun Dinero Infinito APK, you need to enable unknown sources on your device settings, download the APK file from a trusted source, locate and install the APK file on your device, and launch the game and have fun. Chicken Gun Dinero Infinito APK is a great way to enjoy the game with more features and fun.
-
Here are some FAQs that you might have about Chicken Gun Dinero Infinito APK:
-
Q: Is Chicken Gun Dinero Infinito APK safe to use?
-
A: Chicken Gun Dinero Infinito APK is safe to use as long as you download it from a trusted source and scan it with an antivirus software before installing it. However, you should be aware that using a modded version of the game might violate the terms and conditions of the original game, and you might face some risks or consequences from the game developers or authorities.
-
Q: Do I need to root my device to use Chicken Gun Dinero Infinito APK?
-
A: No, you do not need to root your device to use Chicken Gun Dinero Infinito APK. You just need to enable unknown sources on your device settings and install the APK file as you would with any other app.
-
Q: Can I play Chicken Gun Dinero Infinito APK with my friends online?
-
A: Yes, you can play Chicken Gun Dinero Infinito APK with your friends online. You can join or create a room with up to 10 players per team, and chat with them using voice or text messages. However, you should be aware that some players might not like playing with modded users, and they might report you or kick you out of the room.
-
Q: Can I update Chicken Gun Dinero Infinito APK to the latest version of the game?
-
A: No, you cannot update Chicken Gun Dinero Infinito APK to the latest version of the game. You need to wait for the modded version of the game to be updated by its developers. If you try to update the game from the official Google Play Store, you might lose all your modded features and data.
-
Q: Where can I find more information about Chicken Gun Dinero Infinito APK?
-
A: You can find more information about Chicken Gun Dinero Infinito APK by searching online for websites or forums that provide this modded version of the game. You can also watch videos or read reviews from other users who have tried this modded version of the game.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Discover the Truth of the Universe in Mineirinho Ultra Adventures 2 Mobile Illuminati Trail DLC.md b/spaces/1phancelerku/anime-remove-background/Discover the Truth of the Universe in Mineirinho Ultra Adventures 2 Mobile Illuminati Trail DLC.md
deleted file mode 100644
index ee15fde0ee8cc750cb2a432a126aef24b4a5e98b..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Discover the Truth of the Universe in Mineirinho Ultra Adventures 2 Mobile Illuminati Trail DLC.md
+++ /dev/null
@@ -1,106 +0,0 @@
-
-
Mineirinho Ultra Adventures 2 Mobile: A Guide for Beginners
-
If you are looking for a challenging and fun 3D platform game that will test your skills and reflexes, then you should try Mineirinho Ultra Adventures 2 Mobile. This game is the sequel to the popular Mineirinho Ultra Adventures, which was released in 2017 and became a cult hit among gamers. In this game, you will join our friend Miner, a Brazilian hero who goes on amazing adventures with extreme difficulty. You will explore different worlds, face various enemies, collect power ups, and overcome all the obstacles that stand in your way. This game is not for the faint of heart, as it requires a lot of patience, perseverance, and precision. But if you are up for the challenge, you will find a lot of satisfaction and enjoyment in this game.
-
How to download Mineirinho Ultra Adventures 2 Mobile on your device
-
Mineirinho Ultra Adventures 2 Mobile is available for both Android and iOS devices. You can download it from the Google Play Store or the App Store, depending on your device. The game is free to download and play, but it contains ads and in-app purchases. You can also play the game on your PC by downloading it from Steam, where it costs $5.99. However, you will need a compatible controller to play the game on your PC.
What are the features of Mineirinho Ultra Adventures 2 Mobile?
-
Mineirinho Ultra Adventures 2 Mobile is a game that offers a lot of features that make it unique and exciting. Here are some of the features that you can expect from this game:
-
Real Physics
-
The game uses real physics to simulate the movement and interaction of objects and characters in the game world. This means that you will have to deal with gravity, inertia, friction, momentum, and other forces that affect your gameplay. For example, you can use a bubblegum rope to swing from one platform to another, but you have to be careful not to lose your balance or hit any obstacles along the way.
-
Bubblegum Rope
-
One of the most distinctive features of this game is the bubblegum rope, which is a special power up that allows you to swing from one place to another like Spider-Man. You can use the bubblegum rope to reach higher places, cross gaps, avoid enemies, or just have fun. The bubblegum rope has a limited length and durability, so you have to use it wisely and strategically.
-
Excellent for Speedrun
-
If you are a fan of speedrunning, which is the practice of completing a game or a level as fast as possible, then you will love this game. The game has many levels that are designed for speedrunning, with different routes, shortcuts , and challenges that will test your skills and reflexes. You can also compete with other players online and see who can finish the levels faster. The game has a leaderboard system that ranks the best players in the world, as well as a replay feature that lets you watch your own or other players' runs.
-
Super Ultra Adventures
-
The game has a total of 12 worlds, each with its own theme, enemies, obstacles, and boss. You will travel to different places, such as the jungle, the desert, the city, the snow, the space, and more. Each world has 10 levels, plus a bonus level and a boss level. The levels are full of surprises and secrets that will keep you entertained and curious. You will also encounter many different enemies, such as snakes, spiders, scorpions, robots, aliens, and more. Some of them are easy to defeat, while others will require more strategy and skill. The boss levels are especially challenging and fun, as you will have to face a giant enemy that has its own attacks and patterns.
-
Cool Toon Shader
-
The game has a colorful and cartoonish graphics style that uses a toon shader effect. This means that the game has a cel-shaded look that makes it look like a comic book or an animated movie. The game also has a lot of humor and personality, with funny animations, expressions, and sounds. The game is suitable for all ages and audiences, as it does not contain any violence or gore.
-
Extreme Difficulty
-
One of the main features of this game is its extreme difficulty level. This game is not for casual gamers or beginners, as it requires a lot of skill, patience, and perseverance. The game is very hard to beat, as it has many traps, pitfalls, spikes, enemies, and other hazards that will make you die a lot. The game also has a permadeath system, which means that if you die in a level, you have to start from the beginning of the world. The game does not have any checkpoints or save points, so you have to be very careful and cautious. The game also does not have any tutorials or hints, so you have to figure out everything by yourself.
-
How to download mineirinho ultra adventures 2 on android
-Mineirinho ultra adventures 2 apk free download
-Best 3d platform games for mobile devices
-Mineirinho ultra adventures 2 steam review
-Mineirinho ultra adventures 2 gameplay and walkthrough
-Mineirinho ultra adventures 2 multiplayer mode
-Mineirinho ultra adventures 2 cheats and tips
-Mineirinho ultra adventures 2 vs mineirinho classic
-Mineirinho ultra adventures 2 system requirements
-Mineirinho ultra adventures 2 trailer and screenshots
-Mineirinho ultra adventures 2 speedrun challenge
-Mineirinho ultra adventures 2 bubblegum rope feature
-Mineirinho ultra adventures 2 cool toon shader effect
-Mineirinho ultra adventures 2 extreme difficulty level
-Mineirinho ultra adventures 2 food power ups guide
-Mineirinho ultra adventures 2 developer interview
-Mineirinho ultra adventures 2 release date and price
-Mineirinho ultra adventures 2 dlc illuminati trail
-Mineirinho ultra adventures 2 sinned games franchise
-Mineirinho ultra adventures 2 jazzghost youtube video
-Download mineirinho ultra adventures 2 for ios
-Mineirinho ultra adventures 2 mod apk unlimited money
-Top 10 3d platform games for mobile in 2023
-Mineirinho ultra adventures 2 steam key giveaway
-Mineirinho ultra adventures 2 online co-op mode
-Mineirinho ultra adventures 2 hack and mod menu
-Mineirinho ultra adventures 2 comparison with other games
-Mineirinho ultra adventures 2 achievements and trophies
-Mineirinho ultra adventures 2 minimum and recommended specs
-Mineirinho ultra adventures 2 official website and social media
-Download mineirinho ultra adventures 2 for pc
-Mineirinho ultra adventures 2 cracked apk download link
-Best mobile games made by blender game engine
-Mineirinho ultra adventures 2 steam discount code
-Mineirinho ultra adventures 2 offline single-player mode
-Mineirinho ultra adventures 2 unlimited lives and coins
-Mineirinho ultra adventures 2 fan art and memes
-Mineirinho ultra adventures 2 soundtrack and music
-Mineirinho ultra adventures 2 optimal settings and performance
-Mineirinho ultra adventures 2 contact and support information
-
Many Crazy Levels
-
The game has many crazy levels that will challenge your creativity and imagination. The levels are full of puzzles, secrets, hidden areas, and Easter eggs that will make you explore every corner of the game world. The levels are also very varied and unpredictable, as they have different mechanics and elements that will change your gameplay. For example, some levels have gravity switches that will make you walk on walls or ceilings, some levels have portals that will teleport you to different places, some levels have water or lava that will affect your movement and abilities, and so on.
-
Super Fun Multiplayer
-
The game also has a super fun multiplayer mode that lets you play with up to four friends online or locally. You can choose from different modes, such as co-op mode, where you work together to complete the levels; versus mode , where you compete against each other to finish the levels faster or collect more items; and party mode, where you play mini-games that are based on the game mechanics. The multiplayer mode is very fun and chaotic, as you can cooperate or sabotage each other, use power ups or traps, and chat or taunt each other.
-
Radical Movements
-
The game also has a lot of radical movements that you can perform with your character. You can run, jump, slide, roll, dash, wall jump, and more. You can also use the bubblegum rope to swing, pull, or launch yourself. The game has a smooth and responsive control system that lets you execute these movements with ease and precision. You will need to master these movements to overcome the challenges and obstacles in the game.
-
Cool Food Power Ups
-
The game also has a lot of cool food power ups that you can collect and use in the game. These power ups are based on Brazilian cuisine and culture, such as feijoada, brigadeiro, guarana, caipirinha, and more. Each power up has a different effect and duration, such as giving you extra speed, health, invincibility, or other abilities. You can also combine different power ups to create new effects and combinations. The power ups are very useful and fun to use in the game.
-
How to play Mineirinho Ultra Adventures 2 Mobile?
-
Now that you know what the game is about and what features it offers, you might be wondering how to play it. Here are some basic tips and instructions on how to play Mineirinho Ultra Adventures 2 Mobile:
-
Controls and Gameplay
-
The game has different controls depending on the device you are using. If you are playing on a mobile device, you will use the touch screen to control your character. You will have a virtual joystick on the left side of the screen to move your character, and buttons on the right side of the screen to jump, slide, dash, use the bubblegum rope, or use a power up. You can also swipe the screen to change the camera angle or zoom in or out. If you are playing on a PC, you will use a controller to control your character. You will have a left stick to move your character, and buttons to jump, slide, dash, use the bubblegum rope, or use a power up. You can also use the right stick to change the camera angle or zoom in or out.
-
The gameplay is simple but challenging. Your goal is to complete each level by reaching the end of it without dying. You will have to avoid or defeat enemies, dodge or overcome obstacles, collect items and power ups, and solve puzzles along the way. You will also have to face a boss at the end of each world. The game has a timer that shows how long it takes you to finish each level. You can also collect stars that are hidden in each level. The stars are used to unlock new worlds and levels in the game.
-
Tips and Tricks
-
Here are some tips and tricks that will help you play better and enjoy more Mineirinho Ultra Adventures 2 Mobile:
-
-
Practice makes perfect. The game is very hard and unforgiving, so you will need a lot of practice and patience to beat it. Don't give up if you die a lot or get stuck in a level. Try again and learn from your mistakes.
-
Use the bubblegum rope wisely. The bubblegum rope is a very useful tool that can help you reach places that are otherwise inaccessible or too dangerous. However, it also has a limited length and durability, so you have to use it carefully and strategically. Don't waste it on unnecessary swings or pulls.
-
Explore every corner of the game world. The game has many secrets and hidden areas that will reward you with items, power ups, stars, or Easter eggs. Don't be afraid to explore every corner of the game world and look for clues or hints that might lead you to these secrets.
-
Experiment with different power ups and combinations. The game has many cool food power ups that can give you different effects and abilities. You can also combine different power ups to create new effects and combinations. Experiment with different power ups and combinations and see what works best for you.
-
Have fun with multiplayer mode. The game has a super fun multiplayer mode that lets you play with up to four friends online or locally. You can choose from different modes , such as co-op mode, where you work together to complete the levels; versus mode, where you compete against each other to finish the levels faster or collect more items; and party mode, where you play mini-games that are based on the game mechanics. The multiplayer mode is very fun and chaotic, as you can cooperate or sabotage each other, use power ups or traps, and chat or taunt each other.
-
-
How does Mineirinho Ultra Adventures 2 Mobile compare to other games in the genre?
-
Mineirinho Ultra Adventures 2 Mobile is a game that belongs to the 3D platform genre, which is a type of game that involves moving and jumping on platforms in a three-dimensional environment. Some of the most famous and popular games in this genre are Super Mario 64, Crash Bandicoot, Banjo-Kazooie, Spyro the Dragon, and Sonic Adventure. How does Mineirinho Ultra Adventures 2 Mobile compare to these games?
-
Well, Mineirinho Ultra Adventures 2 Mobile is a game that has its own style and identity, as it is inspired by Brazilian culture and humor. It also has a lot of features that make it unique and different from other games in the genre, such as the real physics, the bubblegum rope, the extreme difficulty, the speedrun potential, and the cool food power ups. The game also has a lot of variety and creativity in its levels, enemies, bosses, and mechanics. The game is not a copy or a clone of any other game, but rather a homage and a tribute to the genre.
-
However, Mineirinho Ultra Adventures 2 Mobile is also a game that respects and follows the conventions and standards of the genre. It has a lot of elements that are common and familiar to fans of the genre, such as the 3D graphics, the platforming gameplay, the collectibles, the secrets, the power ups, the worlds, and the bosses. The game also has a lot of references and nods to other games in the genre, such as Mario's hat, Sonic's rings, Crash's crates, Spyro's gems, and Banjo's jiggy. The game is not a parody or a mockery of any other game, but rather a celebration and an appreciation of the genre.
-
Therefore, Mineirinho Ultra Adventures 2 Mobile is a game that can appeal to both fans and newcomers of the 3D platform genre. It is a game that offers a lot of challenge and fun for anyone who loves this type of game.
-
Conclusion
-
Mineirinho Ultra Adventures 2 Mobile is a game that you should definitely try if you are looking for a challenging and fun 3D platform game that will test your skills and reflexes. You will join our friend Miner, a Brazilian hero who goes on amazing adventures with extreme difficulty. You will explore different worlds, face various enemies, collect power ups, and overcome all the obstacles that stand in your way. You will also enjoy the colorful and cartoonish graphics style, the humorous and personality-filled animations and sounds, and the super fun multiplayer mode. You will also appreciate the real physics , the bubblegum rope, the speedrun potential, and the cool food power ups that make this game unique and different from other games in the genre. You will also respect and follow the conventions and standards of the genre, as well as the references and nods to other games in the genre that make this game a homage and a tribute to the genre. Mineirinho Ultra Adventures 2 Mobile is a game that you will not regret playing, as it will give you a lot of satisfaction and enjoyment.
-
So, what are you waiting for? Download Mineirinho Ultra Adventures 2 Mobile on your device today and start your super ultra adventure with Miner. You will not be disappointed. Have fun and good luck!
-
FAQs
-
Here are some frequently asked questions about Mineirinho Ultra Adventures 2 Mobile:
-
-
Q: How many levels are there in Mineirinho Ultra Adventures 2 Mobile?
-
A: There are 12 worlds, each with 10 levels, plus a bonus level and a boss level. That makes a total of 144 levels in the game.
-
Q: How can I unlock new worlds and levels in Mineirinho Ultra Adventures 2 Mobile?
-
A: You can unlock new worlds and levels by collecting stars that are hidden in each level. You need a certain number of stars to unlock each world and level.
-
Q: How can I play Mineirinho Ultra Adventures 2 Mobile with my friends?
-
A: You can play Mineirinho Ultra Adventures 2 Mobile with your friends online or locally. You can choose from different modes, such as co-op mode, versus mode, or party mode. You can also chat or taunt each other while playing.
-
Q: What are the best power ups to use in Mineirinho Ultra Adventures 2 Mobile?
-
A: The best power ups to use in Mineirinho Ultra Adventures 2 Mobile depend on your preference and situation. However, some of the most useful and fun power ups are the feijoada, which gives you extra health; the brigadeiro, which gives you invincibility; the guarana, which gives you extra speed; and the caipirinha, which makes you drunk and unpredictable.
-
Q: Where can I find more information about Mineirinho Ultra Adventures 2 Mobile?
-
A: You can find more information about Mineirinho Ultra Adventures 2 Mobile on the official website, the Facebook page, the Twitter account, or the YouTube channel. You can also contact the developer via email at contato@mineirinhoadventures.com.
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download Gas Station Simulator Mod APK - The Best Simulation Game for Android Users.md b/spaces/1phancelerku/anime-remove-background/Download Gas Station Simulator Mod APK - The Best Simulation Game for Android Users.md
deleted file mode 100644
index 0f3ea496866b7cc6a483657ef72830777f71028d..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download Gas Station Simulator Mod APK - The Best Simulation Game for Android Users.md
+++ /dev/null
@@ -1,97 +0,0 @@
-
-
Download Gas Station Simulator Mod APK Android: A Fun and Realistic Business Simulation Game
-
Do you dream of owning your own gas station and running a successful business? If so, you might want to try Gas Station Simulator, a game that lets you experience the challenges and rewards of managing a gas station. And if you want to make the game more fun and easy, you can download Gas Station Simulator mod apk android, which gives you unlimited money, gems, and other benefits. In this article, we will tell you more about this game and how to download the mod apk version for free.
Gas Station Simulator is a gas station simulator where you will have to own your own gas station. You have opened your own business, a gas station and you have to start everything small, you have not purchased a large and not in very good condition gas station. You need to work and earn money to get good reviews and not earn bad ones.
-
In this game, you will have to perform various tasks such as refueling cars, repairing tires, washing vehicles, selling snacks, hiring staff, and more. You will also have to deal with different types of customers, some of whom may be rude or impatient. You will have to balance your budget, expenses, and income, as well as improve your reputation and customer satisfaction.
-
Features of Gas Station Simulator
-
- Manage your own gas station
-
You are the boss of your own gas station and you can decide how to run it. You can choose what services to offer, what products to sell, what prices to charge, and how to decorate your station. You can also hire and fire employees, assign them tasks, and train them.
-
- Upgrade your facilities and services
-
As you earn money from your business, you can invest it in upgrading your facilities and services. You can buy new equipment, expand your parking lot, add more pumps, install car washes, build convenience stores, and more. You can also unlock new types of vehicles, such as trucks, buses, motorcycles, etc.
-
- Interact with customers and employees
-
You will have to interact with various characters in the game, such as customers and employees. You will have to satisfy their needs and requests, as well as handle their complaints and feedback. You will also have to deal with different situations, such as robberies, accidents, fires, etc.
-
- Earn money and reputation
-
Your main goal in the game is to earn money and reputation. Money is needed to buy new items, upgrade your station, pay your bills, etc. Reputation is needed to attract more customers, get better reviews, unlock new features, etc. You can also compete with other players in leaderboards and achievements.
-
How to install gas station simulator mod apk on android
-Gas station simulator mod apk unlimited money and gems
-Gas station simulator mod apk latest version free download
-Gas station simulator hack mod apk for android devices
-Gas station simulator mod apk offline gameplay
-Gas station simulator mod apk with unlimited fuel and cash
-Gas station simulator mod apk no ads and no root
-Gas station simulator mod apk download link and instructions
-Gas station simulator mod apk features and benefits
-Gas station simulator mod apk review and rating
-Gas station simulator mod apk cheats and tips
-Gas station simulator mod apk update and bug fixes
-Gas station simulator mod apk requirements and compatibility
-Gas station simulator mod apk comparison and alternatives
-Gas station simulator mod apk support and feedback
-Best gas station simulator mod apk for android users
-Gas station simulator premium mod apk unlocked everything
-Gas station simulator pro mod apk with advanced features
-Gas station simulator mega mod apk with unlimited resources
-Gas station simulator vip mod apk with exclusive rewards
-Download gas station simulator cracked mod apk for android
-Download gas station simulator full mod apk for android
-Download gas station simulator original mod apk for android
-Download gas station simulator new mod apk for android
-Download gas station simulator old mod apk for android
-Download gas station simulator 3d mod apk for android
-Download gas station simulator hd mod apk for android
-Download gas station simulator realistic mod apk for android
-Download gas station simulator fun mod apk for android
-Download gas station simulator easy mod apk for android
-Download gas station simulator hard mod apk for android
-Download gas station simulator online mod apk for android
-Download gas station simulator offline mod apk for android
-Download gas station simulator multiplayer mod apk for android
-Download gas station simulator single player mod apk for android
-Download gas station simulator car wash mod apk for android
-Download gas station simulator car repair mod apk for android
-Download gas station simulator car tuning mod apk for android
-Download gas station simulator car racing mod apk for android
-Download gas station simulator car parking mod apk for android
-Download gas station simulator tycoon mod apk for android
-Download gas station simulator manager mod apk for android
-Download gas station simulator builder mod apk for android
-Download gas station simulator designer mod apk for android
-Download gas station simulator city mod apk for android
-Download gas station simulator country mod apk for android
-Download gas station simulator adventure mod apk for android
-Download gas station simulator simulation mod apk for android
-
Why download Gas Station Simulator mod apk android?
-
If you want to enjoy the game without any limitations or difficulties, you can download Gas Station Simulator mod apk android. This is a modified version of the game that gives you several advantages over the original version. Here are some of them:
-
- Unlimited money and gems
-
With this mod apk version, you will have unlimited money and gems in the game. This means that you can buy anything you want without worrying about the cost. You can also upgrade your station faster and easier.
-
- All items and upgrades unlocked
-
With this mod apk version
With this mod apk version, you will have access to all the items and upgrades in the game. You don't have to wait for them to be unlocked or pay for them with real money. You can enjoy the full features of the game from the start.
-
- No ads and no root required
-
With this mod apk version, you will not see any ads in the game. You can play the game without any interruptions or distractions. You also don't need to root your device to install the mod apk file. You can simply download it and install it on your android device.
-
How to download Gas Station Simulator mod apk android?
-
If you are interested in downloading Gas Station Simulator mod apk android, you can follow these simple steps:
-
Step 1: Download the mod apk file from a trusted source
-
The first thing you need to do is to find a reliable website that offers the mod apk file for Gas Station Simulator. You can search for it on Google or use the link below. Make sure that the website is safe and secure, and that the file is free from viruses and malware.
-
Step 2: Enable unknown sources on your device settings
-
The next thing you need to do is to enable unknown sources on your device settings. This will allow you to install apps that are not from the Google Play Store. To do this, go to your device settings, then security, then unknown sources, and turn it on.
-
Step 3: Install the mod apk file and enjoy the game
-
The last thing you need to do is to install the mod apk file on your device. To do this, locate the file in your downloads folder and tap on it. Follow the instructions on the screen and wait for the installation to finish. Once done, you can open the game and enjoy playing Gas Station Simulator with unlimited money, gems, and other benefits.
-
Conclusion
-
Gas Station Simulator is a fun and realistic business simulation game that lets you manage your own gas station. You can perform various tasks, upgrade your facilities, interact with customers and employees, and earn money and reputation. If you want to make the game more enjoyable and easy, you can download Gas Station Simulator mod apk android, which gives you unlimited money, gems, and other advantages. You can download the mod apk file from a trusted source, enable unknown sources on your device settings, and install the file on your device. Then, you can play the game without any limitations or difficulties.
-
We hope that this article has helped you learn more about Gas Station Simulator and how to download its mod apk version for free. If you have any questions or feedback, feel free to leave a comment below. Thank you for reading!
-
FAQs
-
Here are some frequently asked questions about Gas Station Simulator and its mod apk version:
-
- Is Gas Station Simulator free to play?
-
Yes, Gas Station Simulator is free to play. However, some items and features may require real money purchases or watching ads.
-
- Is Gas Station Simulator mod apk safe to use?
-
Yes, Gas Station Simulator mod apk is safe to use as long as you download it from a trusted source. However, we recommend that you use it at your own risk and discretion, as it may violate the terms of service of the original game.
-
- What are the minimum requirements for Gas Station Simulator?
-
The minimum requirements for Gas Station Simulator are: - Android 5.0 or higher - 2 GB of RAM - 100 MB of free storage space - Internet connection
-
- How can I contact the developers of Gas Station Simulator?
-
You can contact the developers of Gas Station Simulator by sending an email to support@playway.com or visiting their website. You can also follow them on Facebook and Twitter for updates and news.
-
- Can I play Gas Station Simulator offline?
-
No, Gas Station Simulator requires an internet connection to play. You need to be online to access all the features and functions of the game.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download Rebaixados Elite Brasil APK and Drive from Different Perspectives.md b/spaces/1phancelerku/anime-remove-background/Download Rebaixados Elite Brasil APK and Drive from Different Perspectives.md
deleted file mode 100644
index f9aebed5d1b34afc54a04f5b0150412acc505e10..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download Rebaixados Elite Brasil APK and Drive from Different Perspectives.md
+++ /dev/null
@@ -1,129 +0,0 @@
-
-
How to Download Rebaixados Elite Brasil APK
-
Rebaixados Elite Brasil is a popular Android game that lets you customize your car and character in a Brazil-inspired demoted car game. If you want to download the game, you might be wondering how to get the APK file from Google Play Store. In this article, we will show you what Rebaixados Elite Brasil is, why you might want to download the APK file, and how to do it safely and easily.
-
What is Rebaixados Elite Brasil?
-
Rebaixados Elite Brasil is a game developed by Sebby Games that simulates the culture of demoted cars in Brazil. You can lower your car to the floor, change the color, wheels, glass, xenon, and more. You can also customize your character with different clothes, accessories, and hairstyles. The game has realistic graphics, physics, and sound effects that make you feel like you are driving a real car.
The game is inspired by the Brazilian subculture of demoted cars, which are vehicles that have been modified to have a lower suspension, larger wheels, louder sound systems, and flashy decorations. The game lets you explore different scenarios in Brazil, such as streets, highways, gas stations, and parking lots. You can also interact with other cars and characters in the game.
-
Features of the game
-
Some of the features of Rebaixados Elite Brasil are:
-
rebaixados elite brasil apk mod
-rebaixados elite brasil apk latest version
-rebaixados elite brasil apk free download
-rebaixados elite brasil apk unlimited money
-rebaixados elite brasil apk android
-rebaixados elite brasil apk obb
-rebaixados elite brasil apk hack
-rebaixados elite brasil apk offline
-rebaixados elite brasil apk update
-rebaixados elite brasil apk full version
-rebaixados elite brasil apk for pc
-rebaixados elite brasil apk revdl
-rebaixados elite brasil apk rexdl
-rebaixados elite brasil apk pure
-rebaixados elite brasil apk uptodown
-rebaixados elite brasil apk old version
-rebaixados elite brasil apk 2023
-rebaixados elite brasil apk data
-rebaixados elite brasil apk mirror
-rebaixados elite brasil apk mob.org
-rebaixados elite brasil apk gameplay
-rebaixados elite brasil apk cheats
-rebaixados elite brasil apk sebby games
-rebaixados elite brasil apk android 1
-rebaixados elite brasil apk android oyun club
-rebaixados elite brasil apk baixar
-rebaixados elite brasil apk como instalar
-rebaixados elite brasil apk descargar
-rebaixados elite brasil apk download mediafıre
-rebaixados elite brasil apk download mega
-rebaixados elite brasil apk download pc
-rebaixados elite brasil apk download uptodown
-rebaixados elite brasil apk download 2023
-rebaixados elite brasil apk download atualizado 2023
-rebaixados elite brasil apk download gratis
-rebaixados elite brasil apk download hackeado 2023
-rebaixados elite brasil apk download infinito 2023
-rebaixados elite brasil apk download modificado 2023
-rebaixados elite brasil apk download para pc fraco 2023
-rebaixados elite brasil apk indir android oyun club 2023
-
-
Completely detailed car models that you can customize in various ways.
-
Drive from a first or third person perspective and control your car with a steering wheel, accelerometer, or arrows.
-
Car interiors in 360 degrees and many interactive elements in cars, such as opening doors, hood, trunk, windows, and windshield wipers.
-
Day and night mode and filters for the camera.
-
Character in the game that you can personalize with different outfits and accessories.
-
Turn up the bass of the song and enjoy the music that gets muffled with the trunk of the car locked.
-
Multiple wheels for your car and choose the size of the rim.
-
Neon in the cars and various models of speakers with LED lights.
-
Functional gas station where you can refuel your car.
-
Online cooperative multiplayer mode where you can play with your friends or other players.
-
-
Why download the APK file?
-
An APK file is an Android Package file that contains all the files and data needed to install an app on an Android device. You can download APK files from Google Play Store or other sources to install apps that are not available in your region, update apps before they are officially released, or backup apps that you want to keep. However, there are also some risks involved in downloading APK files from unknown sources, such as malware, viruses, or spyware.
-
Benefits of APK files
-
Some of the benefits of downloading APK files are:
-
-
You can access apps that are not available in your country or region due to geo-restrictions or licensing issues.
-
You can update apps before they are officially released on Google Play Store and enjoy new features and bug fixes.
-
You can backup apps that you want to keep or transfer to another device without losing your data or settings.
-
You can install apps that are not compatible with your device or Android version by modifying the APK file.
-
-
Risks of APK files
-
Some of the risks of downloading APK files are:
-
You can expose your device and data to malware, viruses, or spyware that can harm your device, steal your information, or compromise your privacy.
-
You can violate the terms and conditions of Google Play Store or the app developer and risk losing access to the app or your account.
-
You can damage your device or cause it to malfunction if the APK file is corrupted, modified, or incompatible with your device or Android version.
-
-
How to download the APK file from Google Play Store?
-
If you want to download the APK file of Rebaixados Elite Brasil from Google Play Store, you have two options: using a web tool or using an APK extractor app. Both methods are easy and safe, but you need to have a Google account and the app installed on your device.
-
Method 1: Using a web tool
-
One of the easiest ways to download the APK file of Rebaixados Elite Brasil from Google Play Store is to use a web tool that can generate the download link for you. Here are the steps to follow:
-
-
Go to Google Play Store and search for Rebaixados Elite Brasil. Copy the URL of the app page from the address bar.
-
Go to a web tool that can download APK files from Google Play Store, such as [APK Downloader], [APKPure], or [Evozi].
-
Paste the URL of the app page into the input box and click on the download button.
-
Wait for the web tool to generate the download link and click on it to save the APK file to your device or computer.
-
-
Method 2: Using an APK extractor app
-
Another way to download the APK file of Rebaixados Elite Brasil from Google Play Store is to use an APK extractor app that can extract the APK file from the app installed on your device. Here are the steps to follow:
-
-
Go to Google Play Store and install an APK extractor app, such as [APK Extractor], [ML Manager], or [App Backup & Restore].
-
Open the APK extractor app and find Rebaixados Elite Brasil in the list of apps. Tap on it and select the option to extract or share the APK file.
-
Choose where you want to save or send the APK file, such as your device storage, email, cloud service, or Bluetooth.
-
-
How to install the APK file on Android?
-
Once you have downloaded the APK file of Rebaixados Elite Brasil, you need to install it on your Android device. However, before you do that, you need to enable the option to install apps from unknown sources on your device. This option allows you to install apps that are not from Google Play Store. Here are the steps to enable it:
-
-
Go to Settings and tap on Security or Privacy.
-
Find the option that says Unknown Sources or Install Unknown Apps and toggle it on.
-
Confirm your choice by tapping on OK or Allow.
-
-
Steps to install the APK file
-
After you have enabled the option to install apps from unknown sources, you can proceed to install the APK file of Rebaixados Elite Brasil. Here are the steps to follow:
-
-
Locate the APK file on your device using a file manager app or a browser.
-
Tap on the APK file and select Install.
-
Wait for the installation process to finish and tap on Open or Done.
-
-
Tips to avoid installation errors
-
Sometimes, you might encounter some errors or issues when installing an APK file on your Android device. Here are some tips to avoid them:
-
Make sure that the APK file is not corrupted, modified, or tampered with. You can check the integrity of the APK file by comparing its checksum or signature with the original one.
-
Make sure that the APK file is compatible with your device and Android version. You can check the compatibility of the APK file by looking at its minimum requirements, such as Android version, screen size, processor, RAM, and storage.
-
Make sure that you have enough storage space on your device to install the APK file. You can check the storage space on your device by going to Settings and tapping on Storage.
-
Make sure that you have a stable internet connection when downloading or installing the APK file. You can check the internet connection on your device by going to Settings and tapping on Wi-Fi or Mobile Data.
-
-
Conclusion
-
Rebaixados Elite Brasil is a fun and realistic game that lets you customize your car and character in a Brazil-inspired demoted car game. You can download the APK file of Rebaixados Elite Brasil from Google Play Store using a web tool or an APK extractor app. You can also install the APK file on your Android device by enabling the option to install apps from unknown sources and following some simple steps. However, you should also be aware of the risks of downloading APK files from unknown sources and take some precautions to avoid installation errors. We hope this article has helped you learn how to download Rebaixados Elite Brasil APK and enjoy the game.
-
FAQs
-
Here are some frequently asked questions about Rebaixados Elite Brasil APK:
-
-
Q: Is Rebaixados Elite Brasil free to play?
A: Yes, Rebaixados Elite Brasil is free to play, but it contains ads and in-app purchases.
-
Q: How can I play Rebaixados Elite Brasil online with my friends?
A: You can play Rebaixados Elite Brasil online with your friends by joining or creating a room in the multiplayer mode. You need to have an internet connection and a Google account to play online.
-
Q: How can I remove the ads from Rebaixados Elite Brasil?
A: You can remove the ads from Rebaixados Elite Brasil by purchasing the premium version of the game for $1.99.
-
Q: How can I get more money and diamonds in Rebaixados Elite Brasil?
A: You can get more money and diamonds in Rebaixados Elite Brasil by completing missions, watching videos, or buying them with real money.
-
Q: How can I contact the developer of Rebaixados Elite Brasil?
A: You can contact the developer of Rebaixados Elite Brasil by sending an email to sebbygames@gmail.com or following them on Facebook or Instagram.
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download and Install God of War Collection for PS3 Emulator (RPCS3) on PC.md b/spaces/1phancelerku/anime-remove-background/Download and Install God of War Collection for PS3 Emulator (RPCS3) on PC.md
deleted file mode 100644
index 73ef2cab94c35c080b7b129c829458bd69a97adf..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download and Install God of War Collection for PS3 Emulator (RPCS3) on PC.md
+++ /dev/null
@@ -1,162 +0,0 @@
-
-
God of War Collection PS3 Emulator Download: How to Play God of War Games on PC
-
God of War is one of the most popular and acclaimed action-adventure video game series of all time. The games follow the adventures of Kratos, a Spartan warrior who battles against gods, monsters, and other mythical creatures in ancient Greece and Norse mythology. The games are known for their epic scale, cinematic presentation, brutal combat, and engaging story.
But what if you don't have a PlayStation console to play these games? Or what if you want to enjoy them with better graphics, performance, and customization options? Well, there is a way to play God of War games on PC, thanks to a PS3 emulator called RPCS3. In this article, we will show you how to download, install, configure, and play God of War Collection PS3 Emulator Download on your PC.
-
What is God of War Collection PS3 Emulator Download?
-
God of War Collection PS3 Emulator Download is a package that contains two remastered versions of the first two God of War games: God of War HD and God of War II HD. These games were originally released for the PlayStation 2, but were later ported to the PlayStation 3 as part of the God of War Collection. The remastered versions feature improved graphics, resolution, frame rate, and trophies.
-
RPCS3 is an open-source emulator that allows you to play PlayStation 3 games on your PC. It is currently the most advanced and compatible PS3 emulator available, with support for over 5000 games. RPCS3 can run many PS3 games at full speed, with high resolution, anti-aliasing, and other enhancements. RPCS3 also supports various input devices, such as keyboards, mice, controllers, and even VR headsets.
-
By using RPCS3, you can play God of War Collection PS3 Emulator Download on your PC, as well as other PS3 exclusive games such as Uncharted, The Last of Us, Demon's Souls, Persona 5, and more.
-
god of war collection rpcs3 download
-god of war 1 and 2 ps3 emulator
-god of war hd collection pc download
-god of war ps3 iso download for rpcs3
-god of war collection ps3 rom
-how to play god of war collection on pc
-god of war 1 hd rpcs3 settings
-god of war 2 hd pc download
-god of war collection ps3 pkg
-rpcs3 god of war collection vulkan
-god of war 1 and 2 pc emulator
-god of war hd collection ps3 iso
-god of war collection pc gameplay
-god of war ps3 emulator android
-god of war collection ps3 save data
-rpcs3 god of war collection lle modules
-god of war 2 hd rpcs3 download
-god of war hd collection pc requirements
-god of war ps3 emulator reddit
-god of war collection ps3 cheats
-rpcs3 god of war collection install guide
-god of war 1 hd pc download
-god of war hd collection ps3 download
-god of war ps3 emulator online
-god of war collection ps3 trophy guide
-rpcs3 god of war collection graphics fix
-god of war 1 hd rpcs3 cheats
-god of war 2 hd pc emulator
-god of war hd collection ps4 download
-god of war ps3 emulator for windows 10
-god of war collection ps3 review
-rpcs3 god of war collection sound problem
-god of war 1 hd rpcs3 save game
-god of war 2 hd rpcs3 settings
-god of war hd collection pc crack
-god of war ps3 emulator apk
-god of war collection ps3 price
-rpcs3 god of war collection controller configuration
-god of war 1 hd rpcs3 gameplay
-god of war 2 hd rpcs3 cheats
-god of war hd collection pc free download
-god of war ps3 emulator mac
-god of war collection ps3 gamestop
-rpcs3 god of war collection black screen fix
-god of war 1 hd rpcs3 resolution scale
-god of war 2 hd rpcs3 gameplay
-god of war hd collection pc steam
-god of war ps3 emulator linux
-god of war collection ps3 amazon
-
How to Download God of War Collection PS3 Emulator Download
-
Requirements and Steps to Download and Install RPCS3
-
To download and install RPCS3, you will need a PC that meets the following minimum requirements:
-
-
A 64-bit operating system (Windows 7 or later, Linux, macOS)
-
A CPU that supports x86-64 instructions (Intel Core i5 or AMD Ryzen 5 or higher recommended)
-
A GPU that supports OpenGL 4.3 or Vulkan (NVIDIA GeForce GTX 970 or AMD Radeon R9 390X or higher recommended)
-
At least 8 GB of RAM (16 GB or more recommended)
-
An SSD or HDD with enough space for the emulator data and the game files
-
-
Once you have a compatible PC, follow these steps to download and install RPCS3:
Choose your operating system and download the latest build of RPCS3.
-
Extract the downloaded file to a folder of your choice.
-
Run rpcs3.exe to launch the emulator.
Where to Find the ROM Files for God of War Collection PS3 Emulator Download
-
To play God of War Collection PS3 Emulator Download on your PC, you will also need the ROM files for the games. ROM files are the digital copies of the game discs that can be read by the emulator. However, finding and downloading ROM files can be tricky, as they are often illegal to distribute and share online. Therefore, you should only download ROM files from trusted and reputable sources, and only if you own the original game discs.
-
One possible source for the ROM files is [Reddit](^1^), where some users have shared links to download God of War Collection PS3 Emulator Download in various regions and languages. However, these links may not always work or be safe, so you should use them at your own risk and discretion. You should also scan the downloaded files for viruses and malware before running them on your PC.
-
Another possible source for the ROM files is [Vimm's Lair](^2^), a website that hosts a large collection of classic games for various consoles, including the PS3. You can search for God of War Collection PS3 Emulator Download on this website and download the ROM files from there. However, you should be aware that the download speed may be slow and limited, and that you may encounter some errors or glitches while playing the games.
-
Once you have downloaded the ROM files, you will need to extract them to a folder of your choice. You will also need to install some additional software to run the games, such as [PS3 Firmware](^3^) and [PS3 System Software](^4^). You can find more information on how to install these software on the [RPCS3 website](^6^) or on various online guides and tutorials.
-
How to Configure RPCS3 for Optimal Performance and Compatibility with God of War Collection PS3 Emulator Download
-
After you have installed RPCS3 and the ROM files, you will need to configure the emulator settings to ensure that the games run smoothly and without any issues. There are many options and parameters that you can tweak and adjust in RPCS3, but some of the most important ones are:
-
-
CPU configuration: You should enable PPU Decoder Recompiler (LLVM) and SPU Decoder Recompiler (LLVM) for better performance. You should also enable SPU Loop Detection, SPU Cache, and Thread Scheduler for better compatibility. You can also experiment with different SPU Block Size values, such as Safe, Mega, or Giga, depending on your CPU model and power.
-
GPU configuration: You should choose Vulkan as your Renderer for better graphics and stability. You should also enable Write Color Buffers, Read Color Buffers, Read Depth Buffer, and Write Depth Buffer for better rendering accuracy. You can also enable Anisotropic Filter, Anti-Aliasing, Resolution Scale, and Texture Scaling for better image quality.
-
Audio configuration: You should choose XAudio2 as your Audio Out for better sound quality and compatibility. You should also enable Audio Buffer Duration and Time Stretching for better audio synchronization.
-
Advanced configuration: You should enable Debug Console Mode, Accurate RSX Reservation Access, Accurate GETLLAR, Accurate PUTLLUC, and Use GPU Texture Scaling for better emulation accuracy. You can also enable Relaxed ZCULL Sync and Driver Wake-Up Delay for better performance.
-
-
These settings are based on various online sources that have tested and optimized RPCS3 for God of War Collection PS3 Emulator Download. However, you should keep in mind that these settings may not work for everyone or every game, as different PC configurations and game versions may require different settings. Therefore, you should always test and experiment with different settings until you find the ones that work best for you.
to buy or own a PS3 console or the game discs. You can also avoid the hassle of switching discs, updating firmware, and dealing with region locks.
-
-
Of course, playing God of War Collection PS3 Emulator Download on PC also has some drawbacks and challenges, such as:
-
-
You may encounter some bugs, glitches, crashes, or compatibility issues while playing the games, as RPCS3 is still in development and not perfect. You may also need to update the emulator and the game files regularly to fix these issues.
-
You may need a powerful PC to run the games at full speed and quality, as RPCS3 is very demanding on CPU and GPU resources. You may also need to tweak and optimize the emulator settings for each game to achieve the best performance.
-
You may face some legal or ethical dilemmas while downloading and playing the games, as ROM files are often considered piracy and infringement of intellectual property rights. You should always respect the rights of the game developers and publishers, and only download and play the games if you own the original copies.
-
-
Pros and Cons of Playing God of War Collection PS3 Emulator Download on PC
-
To summarize, here is a table that compares the pros and cons of playing God of War Collection PS3 Emulator Download on PC versus playing it on a console:
-
-
-
PC
-
Console
-
-
-
+ Higher resolution, frame rate, and graphical quality
-
- Lower resolution, frame rate, and graphical quality
-
-
-
+ Various input devices and customization options
-
- Limited input devices and customization options
-
-
-
+ Save states, cheats, mods, patches, and other enhancements
-
- No save states, cheats, mods, patches, and other enhancements
-
-
-
+ Access to other PS3 games and emulators
-
- No access to other PS3 games and emulators
-
-
-
+ No need to buy or own a PS3 console or the game discs
-
- Need to buy or own a PS3 console or the game discs
-
-
-
- Bugs, glitches, crashes, or compatibility issues
-
+ Stable and reliable gameplay experience
-
-
-
- High PC requirements and emulator settings optimization
-
+ Low console requirements and plug-and-play convenience
-
-
-
- Legal or ethical dilemmas regarding ROM files
-
+ Legal or ethical compliance regarding game discs
-
-
-
Tips and Tricks to Enhance the Gameplay Experience of God of War Collection PS3 Emulator Download on PC
-
Finally, here are some tips and tricks that can help you enhance the gameplay experience of God of War Collection PS3 Emulator Download on PC:
-
-
Check the [RPCS3 compatibility list] to see how well each game runs on the emulator, and what settings are recommended for each game.
-
Check the [RPCS3 wiki] to see if there are any specific instructions or solutions for each game, such as patches, fixes, or workarounds.
-
Check the [RPCS3 forums] or [Discord server] to see if there are any discussions or feedback from other users who have played the same game.
-
Watch some [YouTube videos] or [Twitch streams] of other people who have played the same game on RPCS3, to see how they have configured their settings and how they have enjoyed their gameplay.
-
Read some [reviews] or [guides] of the games themselves, to learn more about their story, characters, gameplay mechanics, secrets, tips, and strategies.
-
Have fun and enjoy playing God of War Collection PS3 Emulator Download on your PC!
-
-
Conclusion
-
In this article, we have shown you how to download, install, configure, and play God of War Collection PS3 Emulator Download on your PC. We have also discussed the features and benefits, as well as the pros and cons, of playing God of War games on PC. We have also provided some tips and tricks to enhance your gameplay experience.
-
We hope that this article has been helpful and informative for you. If you have any questions or comments about this topic, feel free to leave them below. We would love to hear from you!
-
Thank you for reading this article and happy gaming!
-
FAQs
-
Here are some frequently asked questions about God of War Collection PS3 Emulator Download:
-
Q: Is RPCS3 legal and safe to use?
-
A: RPCS3 is legal and safe to use, as long as you follow the rules and guidelines of the emulator. You should only download RPCS3 from the official website, and only use it for personal and non-commercial purposes. You should also only play games that you own legally, and not share or distribute ROM files online.
-
Q: How long does it take to download and install RPCS3 and the ROM files?
-
A: The download and installation time of RPCS3 and the ROM files may vary depending on your internet speed, PC specifications, and file size. Generally, it may take from a few minutes to a few hours to complete the process.
-
Q: How much space do I need to store RPCS3 and the ROM files?
-
A: The space required to store RPCS3 and the ROM files may also vary depending on the number and size of the games you want to play. Generally, RPCS3 itself takes about 100 MB of space, while each game may take from a few GB to tens of GB of space. Therefore, you should have enough space on your SSD or HDD to store them.
-
Q: Can I play God of War Collection PS3 Emulator Download online or with other players?
-
A: Unfortunately, RPCS3 does not support online or multiplayer features for most games, including God of War Collection PS3 Emulator Download. Therefore, you can only play the games offline or with local co-op.
-
Q: Can I play other God of War games on RPCS3?
-
A: Yes, you can play other God of War games on RPCS3, such as God of War III, God of War: Ascension, God of War: Chains of Olympus, and God of War: Ghost of Sparta. However, some of these games may not run as well as God of War Collection PS3 Emulator Download, or may have some issues or bugs. You should check the compatibility list and the wiki for more information on each game.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/utils/utils_amp.py b/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/utils/utils_amp.py
deleted file mode 100644
index 9ac2a03f4212faa129faed447a8f4519c0a00a8b..0000000000000000000000000000000000000000
--- a/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/utils/utils_amp.py
+++ /dev/null
@@ -1,88 +0,0 @@
-from typing import Dict, List
-
-import torch
-
-if torch.__version__ < '1.9':
- Iterable = torch._six.container_abcs.Iterable
-else:
- import collections
-
- Iterable = collections.abc.Iterable
-from torch.cuda.amp import GradScaler
-
-
-class _MultiDeviceReplicator(object):
- """
- Lazily serves copies of a tensor to requested devices. Copies are cached per-device.
- """
-
- def __init__(self, master_tensor: torch.Tensor) -> None:
- assert master_tensor.is_cuda
- self.master = master_tensor
- self._per_device_tensors: Dict[torch.device, torch.Tensor] = {}
-
- def get(self, device) -> torch.Tensor:
- retval = self._per_device_tensors.get(device, None)
- if retval is None:
- retval = self.master.to(device=device, non_blocking=True, copy=True)
- self._per_device_tensors[device] = retval
- return retval
-
-
-class MaxClipGradScaler(GradScaler):
- def __init__(self, init_scale, max_scale: float, growth_interval=100):
- GradScaler.__init__(self, init_scale=init_scale, growth_interval=growth_interval)
- self.max_scale = max_scale
-
- def scale_clip(self):
- if self.get_scale() == self.max_scale:
- self.set_growth_factor(1)
- elif self.get_scale() < self.max_scale:
- self.set_growth_factor(2)
- elif self.get_scale() > self.max_scale:
- self._scale.fill_(self.max_scale)
- self.set_growth_factor(1)
-
- def scale(self, outputs):
- """
- Multiplies ('scales') a tensor or list of tensors by the scale factor.
-
- Returns scaled outputs. If this instance of :class:`GradScaler` is not enabled, outputs are returned
- unmodified.
-
- Arguments:
- outputs (Tensor or iterable of Tensors): Outputs to scale.
- """
- if not self._enabled:
- return outputs
- self.scale_clip()
- # Short-circuit for the common case.
- if isinstance(outputs, torch.Tensor):
- assert outputs.is_cuda
- if self._scale is None:
- self._lazy_init_scale_growth_tracker(outputs.device)
- assert self._scale is not None
- return outputs * self._scale.to(device=outputs.device, non_blocking=True)
-
- # Invoke the more complex machinery only if we're treating multiple outputs.
- stash: List[_MultiDeviceReplicator] = [] # holds a reference that can be overwritten by apply_scale
-
- def apply_scale(val):
- if isinstance(val, torch.Tensor):
- assert val.is_cuda
- if len(stash) == 0:
- if self._scale is None:
- self._lazy_init_scale_growth_tracker(val.device)
- assert self._scale is not None
- stash.append(_MultiDeviceReplicator(self._scale))
- return val * stash[0].get(val.device)
- elif isinstance(val, Iterable):
- iterable = map(apply_scale, val)
- if isinstance(val, list) or isinstance(val, tuple):
- return type(val)(iterable)
- else:
- return iterable
- else:
- raise ValueError("outputs must be a Tensor or an iterable of Tensors")
-
- return apply_scale(outputs)
diff --git a/spaces/7hao/bingo/tailwind.config.js b/spaces/7hao/bingo/tailwind.config.js
deleted file mode 100644
index 03da3c3c45be6983b9f5ffa6df5f1fd0870e9636..0000000000000000000000000000000000000000
--- a/spaces/7hao/bingo/tailwind.config.js
+++ /dev/null
@@ -1,48 +0,0 @@
-/** @type {import('tailwindcss').Config} */
-module.exports = {
- content: [
- './src/pages/**/*.{js,ts,jsx,tsx,mdx}',
- './src/components/**/*.{js,ts,jsx,tsx,mdx}',
- './src/app/**/*.{js,ts,jsx,tsx,mdx}',
- './src/ui/**/*.{js,ts,jsx,tsx,mdx}',
- ],
- "darkMode": "class",
- theme: {
- extend: {
- colors: {
- 'primary-blue': 'rgb(var(--color-primary-blue) / )',
- secondary: 'rgb(var(--color-secondary) / )',
- 'primary-background': 'rgb(var(--primary-background) / )',
- 'primary-text': 'rgb(var(--primary-text) / )',
- 'secondary-text': 'rgb(var(--secondary-text) / )',
- 'light-text': 'rgb(var(--light-text) / )',
- 'primary-border': 'rgb(var(--primary-border) / )',
- },
- keyframes: {
- slideDownAndFade: {
- from: { opacity: 0, transform: 'translateY(-2px)' },
- to: { opacity: 1, transform: 'translateY(0)' },
- },
- slideLeftAndFade: {
- from: { opacity: 0, transform: 'translateX(2px)' },
- to: { opacity: 1, transform: 'translateX(0)' },
- },
- slideUpAndFade: {
- from: { opacity: 0, transform: 'translateY(2px)' },
- to: { opacity: 1, transform: 'translateY(0)' },
- },
- slideRightAndFade: {
- from: { opacity: 0, transform: 'translateX(2px)' },
- to: { opacity: 1, transform: 'translateX(0)' },
- },
- },
- animation: {
- slideDownAndFade: 'slideDownAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)',
- slideLeftAndFade: 'slideLeftAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)',
- slideUpAndFade: 'slideUpAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)',
- slideRightAndFade: 'slideRightAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)',
- },
- },
- },
- plugins: [require('@headlessui/tailwindcss'), require('tailwind-scrollbar')],
-}
diff --git a/spaces/AIConsultant/MusicGen/app.py b/spaces/AIConsultant/MusicGen/app.py
deleted file mode 100644
index 74c893e70cf36e94c740875e0c0db45675216632..0000000000000000000000000000000000000000
--- a/spaces/AIConsultant/MusicGen/app.py
+++ /dev/null
@@ -1,463 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-# Updated to account for UI changes from https://github.com/rkfg/audiocraft/blob/long/app.py
-# also released under the MIT license.
-
-import argparse
-from concurrent.futures import ProcessPoolExecutor
-import os
-from pathlib import Path
-import subprocess as sp
-from tempfile import NamedTemporaryFile
-import time
-import typing as tp
-import warnings
-
-import torch
-import gradio as gr
-
-from audiocraft.data.audio_utils import convert_audio
-from audiocraft.data.audio import audio_write
-from audiocraft.models import MusicGen, MultiBandDiffusion
-
-
-MODEL = None # Last used model
-IS_BATCHED = "facebook/MusicGen" in os.environ.get('SPACE_ID', '')
-print(IS_BATCHED)
-MAX_BATCH_SIZE = 12
-BATCHED_DURATION = 15
-INTERRUPTING = False
-MBD = None
-# We have to wrap subprocess call to clean a bit the log when using gr.make_waveform
-_old_call = sp.call
-
-
-def _call_nostderr(*args, **kwargs):
- # Avoid ffmpeg vomiting on the logs.
- kwargs['stderr'] = sp.DEVNULL
- kwargs['stdout'] = sp.DEVNULL
- _old_call(*args, **kwargs)
-
-
-sp.call = _call_nostderr
-# Preallocating the pool of processes.
-pool = ProcessPoolExecutor(4)
-pool.__enter__()
-
-
-def interrupt():
- global INTERRUPTING
- INTERRUPTING = True
-
-
-class FileCleaner:
- def __init__(self, file_lifetime: float = 3600):
- self.file_lifetime = file_lifetime
- self.files = []
-
- def add(self, path: tp.Union[str, Path]):
- self._cleanup()
- self.files.append((time.time(), Path(path)))
-
- def _cleanup(self):
- now = time.time()
- for time_added, path in list(self.files):
- if now - time_added > self.file_lifetime:
- if path.exists():
- path.unlink()
- self.files.pop(0)
- else:
- break
-
-
-file_cleaner = FileCleaner()
-
-
-def make_waveform(*args, **kwargs):
- # Further remove some warnings.
- be = time.time()
- with warnings.catch_warnings():
- warnings.simplefilter('ignore')
- out = gr.make_waveform(*args, **kwargs)
- print("Make a video took", time.time() - be)
- return out
-
-
-def load_model(version='facebook/musicgen-melody'):
- global MODEL
- print("Loading model", version)
- if MODEL is None or MODEL.name != version:
- MODEL = MusicGen.get_pretrained(version)
-
-
-def load_diffusion():
- global MBD
- if MBD is None:
- print("loading MBD")
- MBD = MultiBandDiffusion.get_mbd_musicgen()
-
-
-def _do_predictions(texts, melodies, duration, progress=False, **gen_kwargs):
- MODEL.set_generation_params(duration=duration, **gen_kwargs)
- print("new batch", len(texts), texts, [None if m is None else (m[0], m[1].shape) for m in melodies])
- be = time.time()
- processed_melodies = []
- target_sr = 32000
- target_ac = 1
- for melody in melodies:
- if melody is None:
- processed_melodies.append(None)
- else:
- sr, melody = melody[0], torch.from_numpy(melody[1]).to(MODEL.device).float().t()
- if melody.dim() == 1:
- melody = melody[None]
- melody = melody[..., :int(sr * duration)]
- melody = convert_audio(melody, sr, target_sr, target_ac)
- processed_melodies.append(melody)
-
- if any(m is not None for m in processed_melodies):
- outputs = MODEL.generate_with_chroma(
- descriptions=texts,
- melody_wavs=processed_melodies,
- melody_sample_rate=target_sr,
- progress=progress,
- return_tokens=USE_DIFFUSION
- )
- else:
- outputs = MODEL.generate(texts, progress=progress, return_tokens=USE_DIFFUSION)
- if USE_DIFFUSION:
- outputs_diffusion = MBD.tokens_to_wav(outputs[1])
- outputs = torch.cat([outputs[0], outputs_diffusion], dim=0)
- outputs = outputs.detach().cpu().float()
- pending_videos = []
- out_wavs = []
- for output in outputs:
- with NamedTemporaryFile("wb", suffix=".wav", delete=False) as file:
- audio_write(
- file.name, output, MODEL.sample_rate, strategy="loudness",
- loudness_headroom_db=16, loudness_compressor=True, add_suffix=False)
- pending_videos.append(pool.submit(make_waveform, file.name))
- out_wavs.append(file.name)
- file_cleaner.add(file.name)
- out_videos = [pending_video.result() for pending_video in pending_videos]
- for video in out_videos:
- file_cleaner.add(video)
- print("batch finished", len(texts), time.time() - be)
- print("Tempfiles currently stored: ", len(file_cleaner.files))
- return out_videos, out_wavs
-
-
-def predict_batched(texts, melodies):
- max_text_length = 512
- texts = [text[:max_text_length] for text in texts]
- load_model('facebook/musicgen-melody')
- res = _do_predictions(texts, melodies, BATCHED_DURATION)
- return res
-
-
-def predict_full(model, decoder, text, melody, duration, topk, topp, temperature, cfg_coef, progress=gr.Progress()):
- global INTERRUPTING
- global USE_DIFFUSION
- INTERRUPTING = False
- if temperature < 0:
- raise gr.Error("Temperature must be >= 0.")
- if topk < 0:
- raise gr.Error("Topk must be non-negative.")
- if topp < 0:
- raise gr.Error("Topp must be non-negative.")
-
- topk = int(topk)
- if decoder == "MultiBand_Diffusion":
- USE_DIFFUSION = True
- load_diffusion()
- else:
- USE_DIFFUSION = False
- load_model(model)
-
- def _progress(generated, to_generate):
- progress((min(generated, to_generate), to_generate))
- if INTERRUPTING:
- raise gr.Error("Interrupted.")
- MODEL.set_custom_progress_callback(_progress)
-
- videos, wavs = _do_predictions(
- [text], [melody], duration, progress=True,
- top_k=topk, top_p=topp, temperature=temperature, cfg_coef=cfg_coef)
- if USE_DIFFUSION:
- return videos[0], wavs[0], videos[1], wavs[1]
- return videos[0], wavs[0], None, None
-
-
-def toggle_audio_src(choice):
- if choice == "mic":
- return gr.update(source="microphone", value=None, label="Microphone")
- else:
- return gr.update(source="upload", value=None, label="File")
-
-
-def toggle_diffusion(choice):
- if choice == "MultiBand_Diffusion":
- return [gr.update(visible=True)] * 2
- else:
- return [gr.update(visible=False)] * 2
-
-
-def ui_full(launch_kwargs):
- with gr.Blocks() as interface:
- gr.Markdown(
- """
- # MusicGen
- This is your private demo for [MusicGen](https://github.com/facebookresearch/audiocraft),
- a simple and controllable model for music generation
- presented at: ["Simple and Controllable Music Generation"](https://huggingface.co/papers/2306.05284)
- """
- )
- with gr.Row():
- with gr.Column():
- with gr.Row():
- text = gr.Text(label="Input Text", interactive=True)
- with gr.Column():
- radio = gr.Radio(["file", "mic"], value="file",
- label="Condition on a melody (optional) File or Mic")
- melody = gr.Audio(source="upload", type="numpy", label="File",
- interactive=True, elem_id="melody-input")
- with gr.Row():
- submit = gr.Button("Submit")
- # Adapted from https://github.com/rkfg/audiocraft/blob/long/app.py, MIT license.
- _ = gr.Button("Interrupt").click(fn=interrupt, queue=False)
- with gr.Row():
- model = gr.Radio(["facebook/musicgen-melody", "facebook/musicgen-medium", "facebook/musicgen-small",
- "facebook/musicgen-large"],
- label="Model", value="facebook/musicgen-melody", interactive=True)
- with gr.Row():
- decoder = gr.Radio(["Default", "MultiBand_Diffusion"],
- label="Decoder", value="Default", interactive=True)
- with gr.Row():
- duration = gr.Slider(minimum=1, maximum=120, value=10, label="Duration", interactive=True)
- with gr.Row():
- topk = gr.Number(label="Top-k", value=250, interactive=True)
- topp = gr.Number(label="Top-p", value=0, interactive=True)
- temperature = gr.Number(label="Temperature", value=1.0, interactive=True)
- cfg_coef = gr.Number(label="Classifier Free Guidance", value=3.0, interactive=True)
- with gr.Column():
- output = gr.Video(label="Generated Music")
- audio_output = gr.Audio(label="Generated Music (wav)", type='filepath')
- diffusion_output = gr.Video(label="MultiBand Diffusion Decoder")
- audio_diffusion = gr.Audio(label="MultiBand Diffusion Decoder (wav)", type='filepath')
- submit.click(toggle_diffusion, decoder, [diffusion_output, audio_diffusion], queue=False,
- show_progress=False).then(predict_full, inputs=[model, decoder, text, melody, duration, topk, topp,
- temperature, cfg_coef],
- outputs=[output, audio_output, diffusion_output, audio_diffusion])
- radio.change(toggle_audio_src, radio, [melody], queue=False, show_progress=False)
-
- gr.Examples(
- fn=predict_full,
- examples=[
- [
- "An 80s driving pop song with heavy drums and synth pads in the background",
- "./assets/bach.mp3",
- "facebook/musicgen-melody",
- "Default"
- ],
- [
- "A cheerful country song with acoustic guitars",
- "./assets/bolero_ravel.mp3",
- "facebook/musicgen-melody",
- "Default"
- ],
- [
- "90s rock song with electric guitar and heavy drums",
- None,
- "facebook/musicgen-medium",
- "Default"
- ],
- [
- "a light and cheerly EDM track, with syncopated drums, aery pads, and strong emotions",
- "./assets/bach.mp3",
- "facebook/musicgen-melody",
- "Default"
- ],
- [
- "lofi slow bpm electro chill with organic samples",
- None,
- "facebook/musicgen-medium",
- "Default"
- ],
- [
- "Punk rock with loud drum and power guitar",
- None,
- "facebook/musicgen-medium",
- "MultiBand_Diffusion"
- ],
- ],
- inputs=[text, melody, model, decoder],
- outputs=[output]
- )
- gr.Markdown(
- """
- ### More details
-
- The model will generate a short music extract based on the description you provided.
- The model can generate up to 30 seconds of audio in one pass. It is now possible
- to extend the generation by feeding back the end of the previous chunk of audio.
- This can take a long time, and the model might lose consistency. The model might also
- decide at arbitrary positions that the song ends.
-
- **WARNING:** Choosing long durations will take a long time to generate (2min might take ~10min).
- An overlap of 12 seconds is kept with the previously generated chunk, and 18 "new" seconds
- are generated each time.
-
- We present 4 model variations:
- 1. facebook/musicgen-melody -- a music generation model capable of generating music condition
- on text and melody inputs. **Note**, you can also use text only.
- 2. facebook/musicgen-small -- a 300M transformer decoder conditioned on text only.
- 3. facebook/musicgen-medium -- a 1.5B transformer decoder conditioned on text only.
- 4. facebook/musicgen-large -- a 3.3B transformer decoder conditioned on text only.
-
- We also present two way of decoding the audio tokens
- 1. Use the default GAN based compression model
- 2. Use MultiBand Diffusion from (paper linknano )
-
- When using `facebook/musicgen-melody`, you can optionally provide a reference audio from
- which a broad melody will be extracted. The model will then try to follow both
- the description and melody provided.
-
- You can also use your own GPU or a Google Colab by following the instructions on our repo.
- See [github.com/facebookresearch/audiocraft](https://github.com/facebookresearch/audiocraft)
- for more details.
- """
- )
-
- interface.queue().launch(**launch_kwargs)
-
-
-def ui_batched(launch_kwargs):
- with gr.Blocks() as demo:
- gr.Markdown(
- """
- # MusicGen
-
- This is the demo for [MusicGen](https://github.com/facebookresearch/audiocraft),
- a simple and controllable model for music generation
- presented at: ["Simple and Controllable Music Generation"](https://huggingface.co/papers/2306.05284).
-
-
-
- for longer sequences, more control and no queue.
- """
- )
- with gr.Row():
- with gr.Column():
- with gr.Row():
- text = gr.Text(label="Describe your music", lines=2, interactive=True)
- with gr.Column():
- radio = gr.Radio(["file", "mic"], value="file",
- label="Condition on a melody (optional) File or Mic")
- melody = gr.Audio(source="upload", type="numpy", label="File",
- interactive=True, elem_id="melody-input")
- with gr.Row():
- submit = gr.Button("Generate")
- with gr.Column():
- output = gr.Video(label="Generated Music")
- audio_output = gr.Audio(label="Generated Music (wav)", type='filepath')
- submit.click(predict_batched, inputs=[text, melody],
- outputs=[output, audio_output], batch=True, max_batch_size=MAX_BATCH_SIZE)
- radio.change(toggle_audio_src, radio, [melody], queue=False, show_progress=False)
- gr.Examples(
- fn=predict_batched,
- examples=[
- [
- "An 80s driving pop song with heavy drums and synth pads in the background",
- "./assets/bach.mp3",
- ],
- [
- "A cheerful country song with acoustic guitars",
- "./assets/bolero_ravel.mp3",
- ],
- [
- "90s rock song with electric guitar and heavy drums",
- None,
- ],
- [
- "a light and cheerly EDM track, with syncopated drums, aery pads, and strong emotions bpm: 130",
- "./assets/bach.mp3",
- ],
- [
- "lofi slow bpm electro chill with organic samples",
- None,
- ],
- ],
- inputs=[text, melody],
- outputs=[output]
- )
- gr.Markdown("""
- ### More details
-
- The model will generate 12 seconds of audio based on the description you provided.
- You can optionally provide a reference audio from which a broad melody will be extracted.
- The model will then try to follow both the description and melody provided.
- All samples are generated with the `melody` model.
-
- You can also use your own GPU or a Google Colab by following the instructions on our repo.
-
- See [github.com/facebookresearch/audiocraft](https://github.com/facebookresearch/audiocraft)
- for more details.
- """)
-
- demo.queue(max_size=8 * 4).launch(**launch_kwargs)
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument(
- '--listen',
- type=str,
- default='0.0.0.0' if 'SPACE_ID' in os.environ else '127.0.0.1',
- help='IP to listen on for connections to Gradio',
- )
- parser.add_argument(
- '--username', type=str, default='', help='Username for authentication'
- )
- parser.add_argument(
- '--password', type=str, default='', help='Password for authentication'
- )
- parser.add_argument(
- '--server_port',
- type=int,
- default=0,
- help='Port to run the server listener on',
- )
- parser.add_argument(
- '--inbrowser', action='store_true', help='Open in browser'
- )
- parser.add_argument(
- '--share', action='store_true', help='Share the gradio UI'
- )
-
- args = parser.parse_args()
-
- launch_kwargs = {}
- launch_kwargs['server_name'] = args.listen
-
- if args.username and args.password:
- launch_kwargs['auth'] = (args.username, args.password)
- if args.server_port:
- launch_kwargs['server_port'] = args.server_port
- if args.inbrowser:
- launch_kwargs['inbrowser'] = args.inbrowser
- if args.share:
- launch_kwargs['share'] = args.share
-
- # Show the interface
- if IS_BATCHED:
- global USE_DIFFUSION
- USE_DIFFUSION = False
- ui_batched(launch_kwargs)
- else:
- ui_full(launch_kwargs)
diff --git a/spaces/Adr740/CV_XPLORER_POC/README.md b/spaces/Adr740/CV_XPLORER_POC/README.md
deleted file mode 100644
index e6b86667a97c33a40d79e9f9396de5ecef88fd95..0000000000000000000000000000000000000000
--- a/spaces/Adr740/CV_XPLORER_POC/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Demo CV AI Explorer
-emoji: 🏃
-colorFrom: green
-colorTo: blue
-sdk: gradio
-sdk_version: 3.20.1
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/order/prisoner.py b/spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/order/prisoner.py
deleted file mode 100644
index 6859911a80c70b86b7fe1bace2ad16c18eee9e00..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/order/prisoner.py
+++ /dev/null
@@ -1,48 +0,0 @@
-from __future__ import annotations
-
-import logging
-import re
-from typing import TYPE_CHECKING, Any, List, Optional
-
-from . import order_registry as OrderRegistry
-from .base import BaseOrder
-
-if TYPE_CHECKING:
- from agentverse.environments import BaseEnvironment
-
-
-@OrderRegistry.register("prisoner")
-class PrisonerOrder(BaseOrder):
- """The order for a classroom discussion
- The agents speak in the following order:
- 1. The professor speaks first
- 2. Then the professor can continue to speak, and the students can raise hands
- 3. The professor can call on a student, then the student can speak or ask a question
- 4. In the group discussion, the students in the group can speak in turn
- """
-
- # try police, prisoner1 prisoner2 first
-
- last_prisoner_index: int = 1
- switch_func: dict = {1: 2, 2: 1}
-
- def get_next_agent_idx(self, environment: BaseEnvironment) -> List[int]:
- if len(environment.last_messages) == 0:
- # If the game just begins or , we let only the police speak
- return [0]
- elif len(environment.last_messages) == 1:
- message = environment.last_messages[0]
- sender = message.sender
- content = message.content
- if sender.startswith("Police"):
- next_prisoner = self.last_prisoner_index
- self.last_prisoner_index = self.switch_func[self.last_prisoner_index]
- return [next_prisoner]
- elif sender.startswith("Suspect"):
- # 3. when one prisoner made his action, let the police tell another prisoner
- return [0]
- else:
- # If len(last_messages) > 1, then
- # 1. there must be at least one student raises hand or speaks.
- # 2. the group discussion is just over.
- return [0]
diff --git a/spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/visibility/base.py b/spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/visibility/base.py
deleted file mode 100644
index e5da006e573cd930a7cd83c81ae934426c115b57..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/visibility/base.py
+++ /dev/null
@@ -1,18 +0,0 @@
-from __future__ import annotations
-
-from abc import abstractmethod
-from typing import TYPE_CHECKING, Any
-
-from pydantic import BaseModel
-
-if TYPE_CHECKING:
- from agentverse.environments import BaseEnvironment
-
-
-class BaseVisibility(BaseModel):
- @abstractmethod
- def update_visible_agents(self, environment: BaseEnvironment):
- """Update the set of visible agents for the agent"""
-
- def reset(self):
- pass
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/confirmdialog/methods/RegisterEvents.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/confirmdialog/methods/RegisterEvents.js
deleted file mode 100644
index a393dc5bb9ef84cee26bc48e9ab25f345a932fb9..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/confirmdialog/methods/RegisterEvents.js
+++ /dev/null
@@ -1,41 +0,0 @@
-var OnPointerOverCallback = function (button) {
- if (button.setHoverState) {
- button.setHoverState(true);
- }
-}
-
-var OnPointerOutCallback = function (button) {
- if (button.setHoverState) {
- button.setHoverState(false);
- }
-}
-
-var OnChoiceButtonStateChange = function (button, groupName, index, value) {
- if (button.setActiveState) {
- button.setActiveState(value);
- }
-}
-
-var OnButtonEnable = function (button) {
- if (button.setDisableState) {
- button.setDisableState(false);
- }
-}
-
-var OnButtonDisable = function (button) {
- if (button.setDisableState) {
- button.setDisableState(true);
- }
-}
-
-var RegisterEvents = function () {
- this
- .on('button.over', OnPointerOverCallback)
- .on('button.out', OnPointerOutCallback)
- .on('button.enable', OnButtonEnable)
- .on('button.disable', OnButtonDisable)
- .on('button.statechange', OnChoiceButtonStateChange)
-
-}
-
-export default RegisterEvents;
\ No newline at end of file
diff --git a/spaces/Akhil-77/Toxicity_Detector/README.md b/spaces/Akhil-77/Toxicity_Detector/README.md
deleted file mode 100644
index 127c29add46a4ddba1e1e9c0cf5f975fb1402386..0000000000000000000000000000000000000000
--- a/spaces/Akhil-77/Toxicity_Detector/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Toxicity Detector
-emoji: 😤
-colorFrom: yellow
-colorTo: blue
-sdk: gradio
-sdk_version: 3.40.1
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/attentions.py b/spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/attentions.py
deleted file mode 100644
index 4e0b0c1fd48c962e21e1fbe60b23fc574927435c..0000000000000000000000000000000000000000
--- a/spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/attentions.py
+++ /dev/null
@@ -1,303 +0,0 @@
-import copy
-import math
-import numpy as np
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-import commons
-import modules
-from modules import LayerNorm
-
-
-class Encoder(nn.Module):
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.window_size = window_size
-
- self.drop = nn.Dropout(p_dropout)
- self.attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask):
- attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.attn_layers[i](x, x, attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class Decoder(nn.Module):
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
-
- self.drop = nn.Dropout(p_dropout)
- self.self_attn_layers = nn.ModuleList()
- self.norm_layers_0 = nn.ModuleList()
- self.encdec_attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
- self.norm_layers_0.append(LayerNorm(hidden_channels))
- self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask, h, h_mask):
- """
- x: decoder input
- h: encoder output
- """
- self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
- encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.self_attn_layers[i](x, x, self_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_0[i](x + y)
-
- y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class MultiHeadAttention(nn.Module):
- def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
- super().__init__()
- assert channels % n_heads == 0
-
- self.channels = channels
- self.out_channels = out_channels
- self.n_heads = n_heads
- self.p_dropout = p_dropout
- self.window_size = window_size
- self.heads_share = heads_share
- self.block_length = block_length
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
- self.attn = None
-
- self.k_channels = channels // n_heads
- self.conv_q = nn.Conv1d(channels, channels, 1)
- self.conv_k = nn.Conv1d(channels, channels, 1)
- self.conv_v = nn.Conv1d(channels, channels, 1)
- self.conv_o = nn.Conv1d(channels, out_channels, 1)
- self.drop = nn.Dropout(p_dropout)
-
- if window_size is not None:
- n_heads_rel = 1 if heads_share else n_heads
- rel_stddev = self.k_channels**-0.5
- self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
- self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
-
- nn.init.xavier_uniform_(self.conv_q.weight)
- nn.init.xavier_uniform_(self.conv_k.weight)
- nn.init.xavier_uniform_(self.conv_v.weight)
- if proximal_init:
- with torch.no_grad():
- self.conv_k.weight.copy_(self.conv_q.weight)
- self.conv_k.bias.copy_(self.conv_q.bias)
-
- def forward(self, x, c, attn_mask=None):
- q = self.conv_q(x)
- k = self.conv_k(c)
- v = self.conv_v(c)
-
- x, self.attn = self.attention(q, k, v, mask=attn_mask)
-
- x = self.conv_o(x)
- return x
-
- def attention(self, query, key, value, mask=None):
- # reshape [b, d, t] -> [b, n_h, t, d_k]
- b, d, t_s, t_t = (*key.size(), query.size(2))
- query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
- key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
- value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
-
- scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
- if self.window_size is not None:
- assert t_s == t_t, "Relative attention is only available for self-attention."
- key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
- rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
- scores_local = self._relative_position_to_absolute_position(rel_logits)
- scores = scores + scores_local
- if self.proximal_bias:
- assert t_s == t_t, "Proximal bias is only available for self-attention."
- scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
- if mask is not None:
- scores = scores.masked_fill(mask == 0, -1e4)
- if self.block_length is not None:
- assert t_s == t_t, "Local attention is only available for self-attention."
- block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
- scores = scores.masked_fill(block_mask == 0, -1e4)
- p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
- p_attn = self.drop(p_attn)
- output = torch.matmul(p_attn, value)
- if self.window_size is not None:
- relative_weights = self._absolute_position_to_relative_position(p_attn)
- value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
- output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
- output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
- return output, p_attn
-
- def _matmul_with_relative_values(self, x, y):
- """
- x: [b, h, l, m]
- y: [h or 1, m, d]
- ret: [b, h, l, d]
- """
- ret = torch.matmul(x, y.unsqueeze(0))
- return ret
-
- def _matmul_with_relative_keys(self, x, y):
- """
- x: [b, h, l, d]
- y: [h or 1, m, d]
- ret: [b, h, l, m]
- """
- ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
- return ret
-
- def _get_relative_embeddings(self, relative_embeddings, length):
- max_relative_position = 2 * self.window_size + 1
- # Pad first before slice to avoid using cond ops.
- pad_length = max(length - (self.window_size + 1), 0)
- slice_start_position = max((self.window_size + 1) - length, 0)
- slice_end_position = slice_start_position + 2 * length - 1
- if pad_length > 0:
- padded_relative_embeddings = F.pad(
- relative_embeddings,
- commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
- else:
- padded_relative_embeddings = relative_embeddings
- used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
- return used_relative_embeddings
-
- def _relative_position_to_absolute_position(self, x):
- """
- x: [b, h, l, 2*l-1]
- ret: [b, h, l, l]
- """
- batch, heads, length, _ = x.size()
- # Concat columns of pad to shift from relative to absolute indexing.
- x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
-
- # Concat extra elements so to add up to shape (len+1, 2*len-1).
- x_flat = x.view([batch, heads, length * 2 * length])
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
-
- # Reshape and slice out the padded elements.
- x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
- return x_final
-
- def _absolute_position_to_relative_position(self, x):
- """
- x: [b, h, l, l]
- ret: [b, h, l, 2*l-1]
- """
- batch, heads, length, _ = x.size()
- # padd along column
- x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
- x_flat = x.view([batch, heads, length**2 + length*(length -1)])
- # add 0's in the beginning that will skew the elements after reshape
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
- x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
- return x_final
-
- def _attention_bias_proximal(self, length):
- """Bias for self-attention to encourage attention to close positions.
- Args:
- length: an integer scalar.
- Returns:
- a Tensor with shape [1, 1, length, length]
- """
- r = torch.arange(length, dtype=torch.float32)
- diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
- return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
-
-
-class FFN(nn.Module):
- def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.activation = activation
- self.causal = causal
-
- if causal:
- self.padding = self._causal_padding
- else:
- self.padding = self._same_padding
-
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
- self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
- self.drop = nn.Dropout(p_dropout)
-
- def forward(self, x, x_mask):
- x = self.conv_1(self.padding(x * x_mask))
- if self.activation == "gelu":
- x = x * torch.sigmoid(1.702 * x)
- else:
- x = torch.relu(x)
- x = self.drop(x)
- x = self.conv_2(self.padding(x * x_mask))
- return x * x_mask
-
- def _causal_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = self.kernel_size - 1
- pad_r = 0
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
-
- def _same_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = (self.kernel_size - 1) // 2
- pad_r = self.kernel_size // 2
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
diff --git a/spaces/AlexWang/lama/models/ade20k/base.py b/spaces/AlexWang/lama/models/ade20k/base.py
deleted file mode 100644
index 8cdbe2d3e7dbadf4ed5e5a7cf2d248761ef25d9c..0000000000000000000000000000000000000000
--- a/spaces/AlexWang/lama/models/ade20k/base.py
+++ /dev/null
@@ -1,627 +0,0 @@
-"""Modified from https://github.com/CSAILVision/semantic-segmentation-pytorch"""
-
-import os
-
-import pandas as pd
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from scipy.io import loadmat
-from torch.nn.modules import BatchNorm2d
-
-from . import resnet
-from . import mobilenet
-
-
-NUM_CLASS = 150
-base_path = os.path.dirname(os.path.abspath(__file__)) # current file path
-colors_path = os.path.join(base_path, 'color150.mat')
-classes_path = os.path.join(base_path, 'object150_info.csv')
-
-segm_options = dict(colors=loadmat(colors_path)['colors'],
- classes=pd.read_csv(classes_path),)
-
-
-class NormalizeTensor:
- def __init__(self, mean, std, inplace=False):
- """Normalize a tensor image with mean and standard deviation.
- .. note::
- This transform acts out of place by default, i.e., it does not mutates the input tensor.
- See :class:`~torchvision.transforms.Normalize` for more details.
- Args:
- tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
- mean (sequence): Sequence of means for each channel.
- std (sequence): Sequence of standard deviations for each channel.
- inplace(bool,optional): Bool to make this operation inplace.
- Returns:
- Tensor: Normalized Tensor image.
- """
-
- self.mean = mean
- self.std = std
- self.inplace = inplace
-
- def __call__(self, tensor):
- if not self.inplace:
- tensor = tensor.clone()
-
- dtype = tensor.dtype
- mean = torch.as_tensor(self.mean, dtype=dtype, device=tensor.device)
- std = torch.as_tensor(self.std, dtype=dtype, device=tensor.device)
- tensor.sub_(mean[None, :, None, None]).div_(std[None, :, None, None])
- return tensor
-
-
-# Model Builder
-class ModelBuilder:
- # custom weights initialization
- @staticmethod
- def weights_init(m):
- classname = m.__class__.__name__
- if classname.find('Conv') != -1:
- nn.init.kaiming_normal_(m.weight.data)
- elif classname.find('BatchNorm') != -1:
- m.weight.data.fill_(1.)
- m.bias.data.fill_(1e-4)
-
- @staticmethod
- def build_encoder(arch='resnet50dilated', fc_dim=512, weights=''):
- pretrained = True if len(weights) == 0 else False
- arch = arch.lower()
- if arch == 'mobilenetv2dilated':
- orig_mobilenet = mobilenet.__dict__['mobilenetv2'](pretrained=pretrained)
- net_encoder = MobileNetV2Dilated(orig_mobilenet, dilate_scale=8)
- elif arch == 'resnet18':
- orig_resnet = resnet.__dict__['resnet18'](pretrained=pretrained)
- net_encoder = Resnet(orig_resnet)
- elif arch == 'resnet18dilated':
- orig_resnet = resnet.__dict__['resnet18'](pretrained=pretrained)
- net_encoder = ResnetDilated(orig_resnet, dilate_scale=8)
- elif arch == 'resnet50dilated':
- orig_resnet = resnet.__dict__['resnet50'](pretrained=pretrained)
- net_encoder = ResnetDilated(orig_resnet, dilate_scale=8)
- elif arch == 'resnet50':
- orig_resnet = resnet.__dict__['resnet50'](pretrained=pretrained)
- net_encoder = Resnet(orig_resnet)
- else:
- raise Exception('Architecture undefined!')
-
- # encoders are usually pretrained
- # net_encoder.apply(ModelBuilder.weights_init)
- if len(weights) > 0:
- print('Loading weights for net_encoder')
- net_encoder.load_state_dict(
- torch.load(weights, map_location=lambda storage, loc: storage), strict=False)
- return net_encoder
-
- @staticmethod
- def build_decoder(arch='ppm_deepsup',
- fc_dim=512, num_class=NUM_CLASS,
- weights='', use_softmax=False, drop_last_conv=False):
- arch = arch.lower()
- if arch == 'ppm_deepsup':
- net_decoder = PPMDeepsup(
- num_class=num_class,
- fc_dim=fc_dim,
- use_softmax=use_softmax,
- drop_last_conv=drop_last_conv)
- elif arch == 'c1_deepsup':
- net_decoder = C1DeepSup(
- num_class=num_class,
- fc_dim=fc_dim,
- use_softmax=use_softmax,
- drop_last_conv=drop_last_conv)
- else:
- raise Exception('Architecture undefined!')
-
- net_decoder.apply(ModelBuilder.weights_init)
- if len(weights) > 0:
- print('Loading weights for net_decoder')
- net_decoder.load_state_dict(
- torch.load(weights, map_location=lambda storage, loc: storage), strict=False)
- return net_decoder
-
- @staticmethod
- def get_decoder(weights_path, arch_encoder, arch_decoder, fc_dim, drop_last_conv, *arts, **kwargs):
- path = os.path.join(weights_path, 'ade20k', f'ade20k-{arch_encoder}-{arch_decoder}/decoder_epoch_20.pth')
- return ModelBuilder.build_decoder(arch=arch_decoder, fc_dim=fc_dim, weights=path, use_softmax=True, drop_last_conv=drop_last_conv)
-
- @staticmethod
- def get_encoder(weights_path, arch_encoder, arch_decoder, fc_dim, segmentation,
- *arts, **kwargs):
- if segmentation:
- path = os.path.join(weights_path, 'ade20k', f'ade20k-{arch_encoder}-{arch_decoder}/encoder_epoch_20.pth')
- else:
- path = ''
- return ModelBuilder.build_encoder(arch=arch_encoder, fc_dim=fc_dim, weights=path)
-
-
-def conv3x3_bn_relu(in_planes, out_planes, stride=1):
- return nn.Sequential(
- nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False),
- BatchNorm2d(out_planes),
- nn.ReLU(inplace=True),
- )
-
-
-class SegmentationModule(nn.Module):
- def __init__(self,
- weights_path,
- num_classes=150,
- arch_encoder="resnet50dilated",
- drop_last_conv=False,
- net_enc=None, # None for Default encoder
- net_dec=None, # None for Default decoder
- encode=None, # {None, 'binary', 'color', 'sky'}
- use_default_normalization=False,
- return_feature_maps=False,
- return_feature_maps_level=3, # {0, 1, 2, 3}
- return_feature_maps_only=True,
- **kwargs,
- ):
- super().__init__()
- self.weights_path = weights_path
- self.drop_last_conv = drop_last_conv
- self.arch_encoder = arch_encoder
- if self.arch_encoder == "resnet50dilated":
- self.arch_decoder = "ppm_deepsup"
- self.fc_dim = 2048
- elif self.arch_encoder == "mobilenetv2dilated":
- self.arch_decoder = "c1_deepsup"
- self.fc_dim = 320
- else:
- raise NotImplementedError(f"No such arch_encoder={self.arch_encoder}")
- model_builder_kwargs = dict(arch_encoder=self.arch_encoder,
- arch_decoder=self.arch_decoder,
- fc_dim=self.fc_dim,
- drop_last_conv=drop_last_conv,
- weights_path=self.weights_path)
-
- self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
- self.encoder = ModelBuilder.get_encoder(**model_builder_kwargs) if net_enc is None else net_enc
- self.decoder = ModelBuilder.get_decoder(**model_builder_kwargs) if net_dec is None else net_dec
- self.use_default_normalization = use_default_normalization
- self.default_normalization = NormalizeTensor(mean=[0.485, 0.456, 0.406],
- std=[0.229, 0.224, 0.225])
-
- self.encode = encode
-
- self.return_feature_maps = return_feature_maps
-
- assert 0 <= return_feature_maps_level <= 3
- self.return_feature_maps_level = return_feature_maps_level
-
- def normalize_input(self, tensor):
- if tensor.min() < 0 or tensor.max() > 1:
- raise ValueError("Tensor should be 0..1 before using normalize_input")
- return self.default_normalization(tensor)
-
- @property
- def feature_maps_channels(self):
- return 256 * 2**(self.return_feature_maps_level) # 256, 512, 1024, 2048
-
- def forward(self, img_data, segSize=None):
- if segSize is None:
- raise NotImplementedError("Please pass segSize param. By default: (300, 300)")
-
- fmaps = self.encoder(img_data, return_feature_maps=True)
- pred = self.decoder(fmaps, segSize=segSize)
-
- if self.return_feature_maps:
- return pred, fmaps
- # print("BINARY", img_data.shape, pred.shape)
- return pred
-
- def multi_mask_from_multiclass(self, pred, classes):
- def isin(ar1, ar2):
- return (ar1[..., None] == ar2).any(-1).float()
- return isin(pred, torch.LongTensor(classes).to(self.device))
-
- @staticmethod
- def multi_mask_from_multiclass_probs(scores, classes):
- res = None
- for c in classes:
- if res is None:
- res = scores[:, c]
- else:
- res += scores[:, c]
- return res
-
- def predict(self, tensor, imgSizes=(-1,), # (300, 375, 450, 525, 600)
- segSize=None):
- """Entry-point for segmentation. Use this methods instead of forward
- Arguments:
- tensor {torch.Tensor} -- BCHW
- Keyword Arguments:
- imgSizes {tuple or list} -- imgSizes for segmentation input.
- default: (300, 450)
- original implementation: (300, 375, 450, 525, 600)
-
- """
- if segSize is None:
- segSize = tensor.shape[-2:]
- segSize = (tensor.shape[2], tensor.shape[3])
- with torch.no_grad():
- if self.use_default_normalization:
- tensor = self.normalize_input(tensor)
- scores = torch.zeros(1, NUM_CLASS, segSize[0], segSize[1]).to(self.device)
- features = torch.zeros(1, self.feature_maps_channels, segSize[0], segSize[1]).to(self.device)
-
- result = []
- for img_size in imgSizes:
- if img_size != -1:
- img_data = F.interpolate(tensor.clone(), size=img_size)
- else:
- img_data = tensor.clone()
-
- if self.return_feature_maps:
- pred_current, fmaps = self.forward(img_data, segSize=segSize)
- else:
- pred_current = self.forward(img_data, segSize=segSize)
-
-
- result.append(pred_current)
- scores = scores + pred_current / len(imgSizes)
-
- # Disclaimer: We use and aggregate only last fmaps: fmaps[3]
- if self.return_feature_maps:
- features = features + F.interpolate(fmaps[self.return_feature_maps_level], size=segSize) / len(imgSizes)
-
- _, pred = torch.max(scores, dim=1)
-
- if self.return_feature_maps:
- return features
-
- return pred, result
-
- def get_edges(self, t):
- edge = torch.cuda.ByteTensor(t.size()).zero_()
- edge[:, :, :, 1:] = edge[:, :, :, 1:] | (t[:, :, :, 1:] != t[:, :, :, :-1])
- edge[:, :, :, :-1] = edge[:, :, :, :-1] | (t[:, :, :, 1:] != t[:, :, :, :-1])
- edge[:, :, 1:, :] = edge[:, :, 1:, :] | (t[:, :, 1:, :] != t[:, :, :-1, :])
- edge[:, :, :-1, :] = edge[:, :, :-1, :] | (t[:, :, 1:, :] != t[:, :, :-1, :])
-
- if True:
- return edge.half()
- return edge.float()
-
-
-# pyramid pooling, deep supervision
-class PPMDeepsup(nn.Module):
- def __init__(self, num_class=NUM_CLASS, fc_dim=4096,
- use_softmax=False, pool_scales=(1, 2, 3, 6),
- drop_last_conv=False):
- super().__init__()
- self.use_softmax = use_softmax
- self.drop_last_conv = drop_last_conv
-
- self.ppm = []
- for scale in pool_scales:
- self.ppm.append(nn.Sequential(
- nn.AdaptiveAvgPool2d(scale),
- nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
- BatchNorm2d(512),
- nn.ReLU(inplace=True)
- ))
- self.ppm = nn.ModuleList(self.ppm)
- self.cbr_deepsup = conv3x3_bn_relu(fc_dim // 2, fc_dim // 4, 1)
-
- self.conv_last = nn.Sequential(
- nn.Conv2d(fc_dim + len(pool_scales) * 512, 512,
- kernel_size=3, padding=1, bias=False),
- BatchNorm2d(512),
- nn.ReLU(inplace=True),
- nn.Dropout2d(0.1),
- nn.Conv2d(512, num_class, kernel_size=1)
- )
- self.conv_last_deepsup = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
- self.dropout_deepsup = nn.Dropout2d(0.1)
-
- def forward(self, conv_out, segSize=None):
- conv5 = conv_out[-1]
-
- input_size = conv5.size()
- ppm_out = [conv5]
- for pool_scale in self.ppm:
- ppm_out.append(nn.functional.interpolate(
- pool_scale(conv5),
- (input_size[2], input_size[3]),
- mode='bilinear', align_corners=False))
- ppm_out = torch.cat(ppm_out, 1)
-
- if self.drop_last_conv:
- return ppm_out
- else:
- x = self.conv_last(ppm_out)
-
- if self.use_softmax: # is True during inference
- x = nn.functional.interpolate(
- x, size=segSize, mode='bilinear', align_corners=False)
- x = nn.functional.softmax(x, dim=1)
- return x
-
- # deep sup
- conv4 = conv_out[-2]
- _ = self.cbr_deepsup(conv4)
- _ = self.dropout_deepsup(_)
- _ = self.conv_last_deepsup(_)
-
- x = nn.functional.log_softmax(x, dim=1)
- _ = nn.functional.log_softmax(_, dim=1)
-
- return (x, _)
-
-
-class Resnet(nn.Module):
- def __init__(self, orig_resnet):
- super(Resnet, self).__init__()
-
- # take pretrained resnet, except AvgPool and FC
- self.conv1 = orig_resnet.conv1
- self.bn1 = orig_resnet.bn1
- self.relu1 = orig_resnet.relu1
- self.conv2 = orig_resnet.conv2
- self.bn2 = orig_resnet.bn2
- self.relu2 = orig_resnet.relu2
- self.conv3 = orig_resnet.conv3
- self.bn3 = orig_resnet.bn3
- self.relu3 = orig_resnet.relu3
- self.maxpool = orig_resnet.maxpool
- self.layer1 = orig_resnet.layer1
- self.layer2 = orig_resnet.layer2
- self.layer3 = orig_resnet.layer3
- self.layer4 = orig_resnet.layer4
-
- def forward(self, x, return_feature_maps=False):
- conv_out = []
-
- x = self.relu1(self.bn1(self.conv1(x)))
- x = self.relu2(self.bn2(self.conv2(x)))
- x = self.relu3(self.bn3(self.conv3(x)))
- x = self.maxpool(x)
-
- x = self.layer1(x); conv_out.append(x);
- x = self.layer2(x); conv_out.append(x);
- x = self.layer3(x); conv_out.append(x);
- x = self.layer4(x); conv_out.append(x);
-
- if return_feature_maps:
- return conv_out
- return [x]
-
-# Resnet Dilated
-class ResnetDilated(nn.Module):
- def __init__(self, orig_resnet, dilate_scale=8):
- super().__init__()
- from functools import partial
-
- if dilate_scale == 8:
- orig_resnet.layer3.apply(
- partial(self._nostride_dilate, dilate=2))
- orig_resnet.layer4.apply(
- partial(self._nostride_dilate, dilate=4))
- elif dilate_scale == 16:
- orig_resnet.layer4.apply(
- partial(self._nostride_dilate, dilate=2))
-
- # take pretrained resnet, except AvgPool and FC
- self.conv1 = orig_resnet.conv1
- self.bn1 = orig_resnet.bn1
- self.relu1 = orig_resnet.relu1
- self.conv2 = orig_resnet.conv2
- self.bn2 = orig_resnet.bn2
- self.relu2 = orig_resnet.relu2
- self.conv3 = orig_resnet.conv3
- self.bn3 = orig_resnet.bn3
- self.relu3 = orig_resnet.relu3
- self.maxpool = orig_resnet.maxpool
- self.layer1 = orig_resnet.layer1
- self.layer2 = orig_resnet.layer2
- self.layer3 = orig_resnet.layer3
- self.layer4 = orig_resnet.layer4
-
- def _nostride_dilate(self, m, dilate):
- classname = m.__class__.__name__
- if classname.find('Conv') != -1:
- # the convolution with stride
- if m.stride == (2, 2):
- m.stride = (1, 1)
- if m.kernel_size == (3, 3):
- m.dilation = (dilate // 2, dilate // 2)
- m.padding = (dilate // 2, dilate // 2)
- # other convoluions
- else:
- if m.kernel_size == (3, 3):
- m.dilation = (dilate, dilate)
- m.padding = (dilate, dilate)
-
- def forward(self, x, return_feature_maps=False):
- conv_out = []
-
- x = self.relu1(self.bn1(self.conv1(x)))
- x = self.relu2(self.bn2(self.conv2(x)))
- x = self.relu3(self.bn3(self.conv3(x)))
- x = self.maxpool(x)
-
- x = self.layer1(x)
- conv_out.append(x)
- x = self.layer2(x)
- conv_out.append(x)
- x = self.layer3(x)
- conv_out.append(x)
- x = self.layer4(x)
- conv_out.append(x)
-
- if return_feature_maps:
- return conv_out
- return [x]
-
-class MobileNetV2Dilated(nn.Module):
- def __init__(self, orig_net, dilate_scale=8):
- super(MobileNetV2Dilated, self).__init__()
- from functools import partial
-
- # take pretrained mobilenet features
- self.features = orig_net.features[:-1]
-
- self.total_idx = len(self.features)
- self.down_idx = [2, 4, 7, 14]
-
- if dilate_scale == 8:
- for i in range(self.down_idx[-2], self.down_idx[-1]):
- self.features[i].apply(
- partial(self._nostride_dilate, dilate=2)
- )
- for i in range(self.down_idx[-1], self.total_idx):
- self.features[i].apply(
- partial(self._nostride_dilate, dilate=4)
- )
- elif dilate_scale == 16:
- for i in range(self.down_idx[-1], self.total_idx):
- self.features[i].apply(
- partial(self._nostride_dilate, dilate=2)
- )
-
- def _nostride_dilate(self, m, dilate):
- classname = m.__class__.__name__
- if classname.find('Conv') != -1:
- # the convolution with stride
- if m.stride == (2, 2):
- m.stride = (1, 1)
- if m.kernel_size == (3, 3):
- m.dilation = (dilate//2, dilate//2)
- m.padding = (dilate//2, dilate//2)
- # other convoluions
- else:
- if m.kernel_size == (3, 3):
- m.dilation = (dilate, dilate)
- m.padding = (dilate, dilate)
-
- def forward(self, x, return_feature_maps=False):
- if return_feature_maps:
- conv_out = []
- for i in range(self.total_idx):
- x = self.features[i](x)
- if i in self.down_idx:
- conv_out.append(x)
- conv_out.append(x)
- return conv_out
-
- else:
- return [self.features(x)]
-
-
-# last conv, deep supervision
-class C1DeepSup(nn.Module):
- def __init__(self, num_class=150, fc_dim=2048, use_softmax=False, drop_last_conv=False):
- super(C1DeepSup, self).__init__()
- self.use_softmax = use_softmax
- self.drop_last_conv = drop_last_conv
-
- self.cbr = conv3x3_bn_relu(fc_dim, fc_dim // 4, 1)
- self.cbr_deepsup = conv3x3_bn_relu(fc_dim // 2, fc_dim // 4, 1)
-
- # last conv
- self.conv_last = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
- self.conv_last_deepsup = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
-
- def forward(self, conv_out, segSize=None):
- conv5 = conv_out[-1]
-
- x = self.cbr(conv5)
-
- if self.drop_last_conv:
- return x
- else:
- x = self.conv_last(x)
-
- if self.use_softmax: # is True during inference
- x = nn.functional.interpolate(
- x, size=segSize, mode='bilinear', align_corners=False)
- x = nn.functional.softmax(x, dim=1)
- return x
-
- # deep sup
- conv4 = conv_out[-2]
- _ = self.cbr_deepsup(conv4)
- _ = self.conv_last_deepsup(_)
-
- x = nn.functional.log_softmax(x, dim=1)
- _ = nn.functional.log_softmax(_, dim=1)
-
- return (x, _)
-
-
-# last conv
-class C1(nn.Module):
- def __init__(self, num_class=150, fc_dim=2048, use_softmax=False):
- super(C1, self).__init__()
- self.use_softmax = use_softmax
-
- self.cbr = conv3x3_bn_relu(fc_dim, fc_dim // 4, 1)
-
- # last conv
- self.conv_last = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
-
- def forward(self, conv_out, segSize=None):
- conv5 = conv_out[-1]
- x = self.cbr(conv5)
- x = self.conv_last(x)
-
- if self.use_softmax: # is True during inference
- x = nn.functional.interpolate(
- x, size=segSize, mode='bilinear', align_corners=False)
- x = nn.functional.softmax(x, dim=1)
- else:
- x = nn.functional.log_softmax(x, dim=1)
-
- return x
-
-
-# pyramid pooling
-class PPM(nn.Module):
- def __init__(self, num_class=150, fc_dim=4096,
- use_softmax=False, pool_scales=(1, 2, 3, 6)):
- super(PPM, self).__init__()
- self.use_softmax = use_softmax
-
- self.ppm = []
- for scale in pool_scales:
- self.ppm.append(nn.Sequential(
- nn.AdaptiveAvgPool2d(scale),
- nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
- BatchNorm2d(512),
- nn.ReLU(inplace=True)
- ))
- self.ppm = nn.ModuleList(self.ppm)
-
- self.conv_last = nn.Sequential(
- nn.Conv2d(fc_dim+len(pool_scales)*512, 512,
- kernel_size=3, padding=1, bias=False),
- BatchNorm2d(512),
- nn.ReLU(inplace=True),
- nn.Dropout2d(0.1),
- nn.Conv2d(512, num_class, kernel_size=1)
- )
-
- def forward(self, conv_out, segSize=None):
- conv5 = conv_out[-1]
-
- input_size = conv5.size()
- ppm_out = [conv5]
- for pool_scale in self.ppm:
- ppm_out.append(nn.functional.interpolate(
- pool_scale(conv5),
- (input_size[2], input_size[3]),
- mode='bilinear', align_corners=False))
- ppm_out = torch.cat(ppm_out, 1)
-
- x = self.conv_last(ppm_out)
-
- if self.use_softmax: # is True during inference
- x = nn.functional.interpolate(
- x, size=segSize, mode='bilinear', align_corners=False)
- x = nn.functional.softmax(x, dim=1)
- else:
- x = nn.functional.log_softmax(x, dim=1)
- return x
diff --git a/spaces/Alfasign/fdvdv/README.md b/spaces/Alfasign/fdvdv/README.md
deleted file mode 100644
index b8af6f56054dddf139e1daf0101bd64a6f94caa2..0000000000000000000000000000000000000000
--- a/spaces/Alfasign/fdvdv/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Fdvdv
-emoji: 🚀
-colorFrom: red
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.44.2
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Ameaou/academic-chatgpt3.1/crazy_functions/__init__.py b/spaces/Ameaou/academic-chatgpt3.1/crazy_functions/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/dreambooth_inpaint/train_dreambooth_inpaint_lora.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/dreambooth_inpaint/train_dreambooth_inpaint_lora.py
deleted file mode 100644
index d25c6d22f8e7fa4c6dc804273c69e7688a739227..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/dreambooth_inpaint/train_dreambooth_inpaint_lora.py
+++ /dev/null
@@ -1,831 +0,0 @@
-import argparse
-import hashlib
-import math
-import os
-import random
-from pathlib import Path
-
-import numpy as np
-import torch
-import torch.nn.functional as F
-import torch.utils.checkpoint
-from accelerate import Accelerator
-from accelerate.logging import get_logger
-from accelerate.utils import ProjectConfiguration, set_seed
-from huggingface_hub import create_repo, upload_folder
-from PIL import Image, ImageDraw
-from torch.utils.data import Dataset
-from torchvision import transforms
-from tqdm.auto import tqdm
-from transformers import CLIPTextModel, CLIPTokenizer
-
-from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionInpaintPipeline, UNet2DConditionModel
-from diffusers.loaders import AttnProcsLayers
-from diffusers.models.attention_processor import LoRAAttnProcessor
-from diffusers.optimization import get_scheduler
-from diffusers.utils import check_min_version
-from diffusers.utils.import_utils import is_xformers_available
-
-
-# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.13.0.dev0")
-
-logger = get_logger(__name__)
-
-
-def prepare_mask_and_masked_image(image, mask):
- image = np.array(image.convert("RGB"))
- image = image[None].transpose(0, 3, 1, 2)
- image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
-
- mask = np.array(mask.convert("L"))
- mask = mask.astype(np.float32) / 255.0
- mask = mask[None, None]
- mask[mask < 0.5] = 0
- mask[mask >= 0.5] = 1
- mask = torch.from_numpy(mask)
-
- masked_image = image * (mask < 0.5)
-
- return mask, masked_image
-
-
-# generate random masks
-def random_mask(im_shape, ratio=1, mask_full_image=False):
- mask = Image.new("L", im_shape, 0)
- draw = ImageDraw.Draw(mask)
- size = (random.randint(0, int(im_shape[0] * ratio)), random.randint(0, int(im_shape[1] * ratio)))
- # use this to always mask the whole image
- if mask_full_image:
- size = (int(im_shape[0] * ratio), int(im_shape[1] * ratio))
- limits = (im_shape[0] - size[0] // 2, im_shape[1] - size[1] // 2)
- center = (random.randint(size[0] // 2, limits[0]), random.randint(size[1] // 2, limits[1]))
- draw_type = random.randint(0, 1)
- if draw_type == 0 or mask_full_image:
- draw.rectangle(
- (center[0] - size[0] // 2, center[1] - size[1] // 2, center[0] + size[0] // 2, center[1] + size[1] // 2),
- fill=255,
- )
- else:
- draw.ellipse(
- (center[0] - size[0] // 2, center[1] - size[1] // 2, center[0] + size[0] // 2, center[1] + size[1] // 2),
- fill=255,
- )
-
- return mask
-
-
-def parse_args():
- parser = argparse.ArgumentParser(description="Simple example of a training script.")
- parser.add_argument(
- "--pretrained_model_name_or_path",
- type=str,
- default=None,
- required=True,
- help="Path to pretrained model or model identifier from huggingface.co/models.",
- )
- parser.add_argument(
- "--tokenizer_name",
- type=str,
- default=None,
- help="Pretrained tokenizer name or path if not the same as model_name",
- )
- parser.add_argument(
- "--instance_data_dir",
- type=str,
- default=None,
- required=True,
- help="A folder containing the training data of instance images.",
- )
- parser.add_argument(
- "--class_data_dir",
- type=str,
- default=None,
- required=False,
- help="A folder containing the training data of class images.",
- )
- parser.add_argument(
- "--instance_prompt",
- type=str,
- default=None,
- help="The prompt with identifier specifying the instance",
- )
- parser.add_argument(
- "--class_prompt",
- type=str,
- default=None,
- help="The prompt to specify images in the same class as provided instance images.",
- )
- parser.add_argument(
- "--with_prior_preservation",
- default=False,
- action="store_true",
- help="Flag to add prior preservation loss.",
- )
- parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.")
- parser.add_argument(
- "--num_class_images",
- type=int,
- default=100,
- help=(
- "Minimal class images for prior preservation loss. If not have enough images, additional images will be"
- " sampled with class_prompt."
- ),
- )
- parser.add_argument(
- "--output_dir",
- type=str,
- default="dreambooth-inpaint-model",
- help="The output directory where the model predictions and checkpoints will be written.",
- )
- parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
- parser.add_argument(
- "--resolution",
- type=int,
- default=512,
- help=(
- "The resolution for input images, all the images in the train/validation dataset will be resized to this"
- " resolution"
- ),
- )
- parser.add_argument(
- "--center_crop",
- default=False,
- action="store_true",
- help=(
- "Whether to center crop the input images to the resolution. If not set, the images will be randomly"
- " cropped. The images will be resized to the resolution first before cropping."
- ),
- )
- parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder")
- parser.add_argument(
- "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
- )
- parser.add_argument(
- "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
- )
- parser.add_argument("--num_train_epochs", type=int, default=1)
- parser.add_argument(
- "--max_train_steps",
- type=int,
- default=None,
- help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
- )
- parser.add_argument(
- "--gradient_accumulation_steps",
- type=int,
- default=1,
- help="Number of updates steps to accumulate before performing a backward/update pass.",
- )
- parser.add_argument(
- "--gradient_checkpointing",
- action="store_true",
- help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
- )
- parser.add_argument(
- "--learning_rate",
- type=float,
- default=5e-6,
- help="Initial learning rate (after the potential warmup period) to use.",
- )
- parser.add_argument(
- "--scale_lr",
- action="store_true",
- default=False,
- help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
- )
- parser.add_argument(
- "--lr_scheduler",
- type=str,
- default="constant",
- help=(
- 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
- ' "constant", "constant_with_warmup"]'
- ),
- )
- parser.add_argument(
- "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
- )
- parser.add_argument(
- "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
- )
- parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
- parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
- parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
- parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
- parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
- parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
- parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
- parser.add_argument(
- "--hub_model_id",
- type=str,
- default=None,
- help="The name of the repository to keep in sync with the local `output_dir`.",
- )
- parser.add_argument(
- "--logging_dir",
- type=str,
- default="logs",
- help=(
- "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
- " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
- ),
- )
- parser.add_argument(
- "--mixed_precision",
- type=str,
- default="no",
- choices=["no", "fp16", "bf16"],
- help=(
- "Whether to use mixed precision. Choose"
- "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
- "and an Nvidia Ampere GPU."
- ),
- )
- parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
- parser.add_argument(
- "--checkpointing_steps",
- type=int,
- default=500,
- help=(
- "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final"
- " checkpoints in case they are better than the last checkpoint and are suitable for resuming training"
- " using `--resume_from_checkpoint`."
- ),
- )
- parser.add_argument(
- "--checkpoints_total_limit",
- type=int,
- default=None,
- help=(
- "Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`."
- " See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state"
- " for more docs"
- ),
- )
- parser.add_argument(
- "--resume_from_checkpoint",
- type=str,
- default=None,
- help=(
- "Whether training should be resumed from a previous checkpoint. Use a path saved by"
- ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
- ),
- )
- parser.add_argument(
- "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
- )
-
- args = parser.parse_args()
- env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
- if env_local_rank != -1 and env_local_rank != args.local_rank:
- args.local_rank = env_local_rank
-
- if args.instance_data_dir is None:
- raise ValueError("You must specify a train data directory.")
-
- if args.with_prior_preservation:
- if args.class_data_dir is None:
- raise ValueError("You must specify a data directory for class images.")
- if args.class_prompt is None:
- raise ValueError("You must specify prompt for class images.")
-
- return args
-
-
-class DreamBoothDataset(Dataset):
- """
- A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
- It pre-processes the images and the tokenizes prompts.
- """
-
- def __init__(
- self,
- instance_data_root,
- instance_prompt,
- tokenizer,
- class_data_root=None,
- class_prompt=None,
- size=512,
- center_crop=False,
- ):
- self.size = size
- self.center_crop = center_crop
- self.tokenizer = tokenizer
-
- self.instance_data_root = Path(instance_data_root)
- if not self.instance_data_root.exists():
- raise ValueError("Instance images root doesn't exists.")
-
- self.instance_images_path = list(Path(instance_data_root).iterdir())
- self.num_instance_images = len(self.instance_images_path)
- self.instance_prompt = instance_prompt
- self._length = self.num_instance_images
-
- if class_data_root is not None:
- self.class_data_root = Path(class_data_root)
- self.class_data_root.mkdir(parents=True, exist_ok=True)
- self.class_images_path = list(self.class_data_root.iterdir())
- self.num_class_images = len(self.class_images_path)
- self._length = max(self.num_class_images, self.num_instance_images)
- self.class_prompt = class_prompt
- else:
- self.class_data_root = None
-
- self.image_transforms_resize_and_crop = transforms.Compose(
- [
- transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
- transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
- ]
- )
-
- self.image_transforms = transforms.Compose(
- [
- transforms.ToTensor(),
- transforms.Normalize([0.5], [0.5]),
- ]
- )
-
- def __len__(self):
- return self._length
-
- def __getitem__(self, index):
- example = {}
- instance_image = Image.open(self.instance_images_path[index % self.num_instance_images])
- if not instance_image.mode == "RGB":
- instance_image = instance_image.convert("RGB")
- instance_image = self.image_transforms_resize_and_crop(instance_image)
-
- example["PIL_images"] = instance_image
- example["instance_images"] = self.image_transforms(instance_image)
-
- example["instance_prompt_ids"] = self.tokenizer(
- self.instance_prompt,
- padding="do_not_pad",
- truncation=True,
- max_length=self.tokenizer.model_max_length,
- ).input_ids
-
- if self.class_data_root:
- class_image = Image.open(self.class_images_path[index % self.num_class_images])
- if not class_image.mode == "RGB":
- class_image = class_image.convert("RGB")
- class_image = self.image_transforms_resize_and_crop(class_image)
- example["class_images"] = self.image_transforms(class_image)
- example["class_PIL_images"] = class_image
- example["class_prompt_ids"] = self.tokenizer(
- self.class_prompt,
- padding="do_not_pad",
- truncation=True,
- max_length=self.tokenizer.model_max_length,
- ).input_ids
-
- return example
-
-
-class PromptDataset(Dataset):
- "A simple dataset to prepare the prompts to generate class images on multiple GPUs."
-
- def __init__(self, prompt, num_samples):
- self.prompt = prompt
- self.num_samples = num_samples
-
- def __len__(self):
- return self.num_samples
-
- def __getitem__(self, index):
- example = {}
- example["prompt"] = self.prompt
- example["index"] = index
- return example
-
-
-def main():
- args = parse_args()
- logging_dir = Path(args.output_dir, args.logging_dir)
-
- accelerator_project_config = ProjectConfiguration(
- total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
- )
-
- accelerator = Accelerator(
- gradient_accumulation_steps=args.gradient_accumulation_steps,
- mixed_precision=args.mixed_precision,
- log_with="tensorboard",
- project_config=accelerator_project_config,
- )
-
- # Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate
- # This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models.
- # TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate.
- if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1:
- raise ValueError(
- "Gradient accumulation is not supported when training the text encoder in distributed training. "
- "Please set gradient_accumulation_steps to 1. This feature will be supported in the future."
- )
-
- if args.seed is not None:
- set_seed(args.seed)
-
- if args.with_prior_preservation:
- class_images_dir = Path(args.class_data_dir)
- if not class_images_dir.exists():
- class_images_dir.mkdir(parents=True)
- cur_class_images = len(list(class_images_dir.iterdir()))
-
- if cur_class_images < args.num_class_images:
- torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32
- pipeline = StableDiffusionInpaintPipeline.from_pretrained(
- args.pretrained_model_name_or_path, torch_dtype=torch_dtype, safety_checker=None
- )
- pipeline.set_progress_bar_config(disable=True)
-
- num_new_images = args.num_class_images - cur_class_images
- logger.info(f"Number of class images to sample: {num_new_images}.")
-
- sample_dataset = PromptDataset(args.class_prompt, num_new_images)
- sample_dataloader = torch.utils.data.DataLoader(
- sample_dataset, batch_size=args.sample_batch_size, num_workers=1
- )
-
- sample_dataloader = accelerator.prepare(sample_dataloader)
- pipeline.to(accelerator.device)
- transform_to_pil = transforms.ToPILImage()
- for example in tqdm(
- sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process
- ):
- bsz = len(example["prompt"])
- fake_images = torch.rand((3, args.resolution, args.resolution))
- transform_to_pil = transforms.ToPILImage()
- fake_pil_images = transform_to_pil(fake_images)
-
- fake_mask = random_mask((args.resolution, args.resolution), ratio=1, mask_full_image=True)
-
- images = pipeline(prompt=example["prompt"], mask_image=fake_mask, image=fake_pil_images).images
-
- for i, image in enumerate(images):
- hash_image = hashlib.sha1(image.tobytes()).hexdigest()
- image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg"
- image.save(image_filename)
-
- del pipeline
- if torch.cuda.is_available():
- torch.cuda.empty_cache()
-
- # Handle the repository creation
- if accelerator.is_main_process:
- if args.output_dir is not None:
- os.makedirs(args.output_dir, exist_ok=True)
-
- if args.push_to_hub:
- repo_id = create_repo(
- repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
- ).repo_id
-
- # Load the tokenizer
- if args.tokenizer_name:
- tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name)
- elif args.pretrained_model_name_or_path:
- tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
-
- # Load models and create wrapper for stable diffusion
- text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
- vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
- unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
-
- # We only train the additional adapter LoRA layers
- vae.requires_grad_(False)
- text_encoder.requires_grad_(False)
- unet.requires_grad_(False)
-
- weight_dtype = torch.float32
- if args.mixed_precision == "fp16":
- weight_dtype = torch.float16
- elif args.mixed_precision == "bf16":
- weight_dtype = torch.bfloat16
-
- # Move text_encode and vae to gpu.
- # For mixed precision training we cast the text_encoder and vae weights to half-precision
- # as these models are only used for inference, keeping weights in full precision is not required.
- unet.to(accelerator.device, dtype=weight_dtype)
- vae.to(accelerator.device, dtype=weight_dtype)
- text_encoder.to(accelerator.device, dtype=weight_dtype)
-
- if args.enable_xformers_memory_efficient_attention:
- if is_xformers_available():
- unet.enable_xformers_memory_efficient_attention()
- else:
- raise ValueError("xformers is not available. Make sure it is installed correctly")
-
- # now we will add new LoRA weights to the attention layers
- # It's important to realize here how many attention weights will be added and of which sizes
- # The sizes of the attention layers consist only of two different variables:
- # 1) - the "hidden_size", which is increased according to `unet.config.block_out_channels`.
- # 2) - the "cross attention size", which is set to `unet.config.cross_attention_dim`.
-
- # Let's first see how many attention processors we will have to set.
- # For Stable Diffusion, it should be equal to:
- # - down blocks (2x attention layers) * (2x transformer layers) * (3x down blocks) = 12
- # - mid blocks (2x attention layers) * (1x transformer layers) * (1x mid blocks) = 2
- # - up blocks (2x attention layers) * (3x transformer layers) * (3x down blocks) = 18
- # => 32 layers
-
- # Set correct lora layers
- lora_attn_procs = {}
- for name in unet.attn_processors.keys():
- cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
- if name.startswith("mid_block"):
- hidden_size = unet.config.block_out_channels[-1]
- elif name.startswith("up_blocks"):
- block_id = int(name[len("up_blocks.")])
- hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
- elif name.startswith("down_blocks"):
- block_id = int(name[len("down_blocks.")])
- hidden_size = unet.config.block_out_channels[block_id]
-
- lora_attn_procs[name] = LoRAAttnProcessor(hidden_size=hidden_size, cross_attention_dim=cross_attention_dim)
-
- unet.set_attn_processor(lora_attn_procs)
- lora_layers = AttnProcsLayers(unet.attn_processors)
-
- accelerator.register_for_checkpointing(lora_layers)
-
- if args.scale_lr:
- args.learning_rate = (
- args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
- )
-
- # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
- if args.use_8bit_adam:
- try:
- import bitsandbytes as bnb
- except ImportError:
- raise ImportError(
- "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
- )
-
- optimizer_class = bnb.optim.AdamW8bit
- else:
- optimizer_class = torch.optim.AdamW
-
- optimizer = optimizer_class(
- lora_layers.parameters(),
- lr=args.learning_rate,
- betas=(args.adam_beta1, args.adam_beta2),
- weight_decay=args.adam_weight_decay,
- eps=args.adam_epsilon,
- )
-
- noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
-
- train_dataset = DreamBoothDataset(
- instance_data_root=args.instance_data_dir,
- instance_prompt=args.instance_prompt,
- class_data_root=args.class_data_dir if args.with_prior_preservation else None,
- class_prompt=args.class_prompt,
- tokenizer=tokenizer,
- size=args.resolution,
- center_crop=args.center_crop,
- )
-
- def collate_fn(examples):
- input_ids = [example["instance_prompt_ids"] for example in examples]
- pixel_values = [example["instance_images"] for example in examples]
-
- # Concat class and instance examples for prior preservation.
- # We do this to avoid doing two forward passes.
- if args.with_prior_preservation:
- input_ids += [example["class_prompt_ids"] for example in examples]
- pixel_values += [example["class_images"] for example in examples]
- pior_pil = [example["class_PIL_images"] for example in examples]
-
- masks = []
- masked_images = []
- for example in examples:
- pil_image = example["PIL_images"]
- # generate a random mask
- mask = random_mask(pil_image.size, 1, False)
- # prepare mask and masked image
- mask, masked_image = prepare_mask_and_masked_image(pil_image, mask)
-
- masks.append(mask)
- masked_images.append(masked_image)
-
- if args.with_prior_preservation:
- for pil_image in pior_pil:
- # generate a random mask
- mask = random_mask(pil_image.size, 1, False)
- # prepare mask and masked image
- mask, masked_image = prepare_mask_and_masked_image(pil_image, mask)
-
- masks.append(mask)
- masked_images.append(masked_image)
-
- pixel_values = torch.stack(pixel_values)
- pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
-
- input_ids = tokenizer.pad({"input_ids": input_ids}, padding=True, return_tensors="pt").input_ids
- masks = torch.stack(masks)
- masked_images = torch.stack(masked_images)
- batch = {"input_ids": input_ids, "pixel_values": pixel_values, "masks": masks, "masked_images": masked_images}
- return batch
-
- train_dataloader = torch.utils.data.DataLoader(
- train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn
- )
-
- # Scheduler and math around the number of training steps.
- overrode_max_train_steps = False
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
- if args.max_train_steps is None:
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
- overrode_max_train_steps = True
-
- lr_scheduler = get_scheduler(
- args.lr_scheduler,
- optimizer=optimizer,
- num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
- num_training_steps=args.max_train_steps * accelerator.num_processes,
- )
-
- # Prepare everything with our `accelerator`.
- lora_layers, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
- lora_layers, optimizer, train_dataloader, lr_scheduler
- )
- # accelerator.register_for_checkpointing(lr_scheduler)
-
- # We need to recalculate our total training steps as the size of the training dataloader may have changed.
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
- if overrode_max_train_steps:
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
- # Afterwards we recalculate our number of training epochs
- args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
-
- # We need to initialize the trackers we use, and also store our configuration.
- # The trackers initializes automatically on the main process.
- if accelerator.is_main_process:
- accelerator.init_trackers("dreambooth-inpaint-lora", config=vars(args))
-
- # Train!
- total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
-
- logger.info("***** Running training *****")
- logger.info(f" Num examples = {len(train_dataset)}")
- logger.info(f" Num batches each epoch = {len(train_dataloader)}")
- logger.info(f" Num Epochs = {args.num_train_epochs}")
- logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
- logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
- logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
- logger.info(f" Total optimization steps = {args.max_train_steps}")
- global_step = 0
- first_epoch = 0
-
- if args.resume_from_checkpoint:
- if args.resume_from_checkpoint != "latest":
- path = os.path.basename(args.resume_from_checkpoint)
- else:
- # Get the most recent checkpoint
- dirs = os.listdir(args.output_dir)
- dirs = [d for d in dirs if d.startswith("checkpoint")]
- dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
- path = dirs[-1] if len(dirs) > 0 else None
-
- if path is None:
- accelerator.print(
- f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
- )
- args.resume_from_checkpoint = None
- else:
- accelerator.print(f"Resuming from checkpoint {path}")
- accelerator.load_state(os.path.join(args.output_dir, path))
- global_step = int(path.split("-")[1])
-
- resume_global_step = global_step * args.gradient_accumulation_steps
- first_epoch = global_step // num_update_steps_per_epoch
- resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps)
-
- # Only show the progress bar once on each machine.
- progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process)
- progress_bar.set_description("Steps")
-
- for epoch in range(first_epoch, args.num_train_epochs):
- unet.train()
- for step, batch in enumerate(train_dataloader):
- # Skip steps until we reach the resumed step
- if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step:
- if step % args.gradient_accumulation_steps == 0:
- progress_bar.update(1)
- continue
-
- with accelerator.accumulate(unet):
- # Convert images to latent space
-
- latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample()
- latents = latents * vae.config.scaling_factor
-
- # Convert masked images to latent space
- masked_latents = vae.encode(
- batch["masked_images"].reshape(batch["pixel_values"].shape).to(dtype=weight_dtype)
- ).latent_dist.sample()
- masked_latents = masked_latents * vae.config.scaling_factor
-
- masks = batch["masks"]
- # resize the mask to latents shape as we concatenate the mask to the latents
- mask = torch.stack(
- [
- torch.nn.functional.interpolate(mask, size=(args.resolution // 8, args.resolution // 8))
- for mask in masks
- ]
- ).to(dtype=weight_dtype)
- mask = mask.reshape(-1, 1, args.resolution // 8, args.resolution // 8)
-
- # Sample noise that we'll add to the latents
- noise = torch.randn_like(latents)
- bsz = latents.shape[0]
- # Sample a random timestep for each image
- timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
- timesteps = timesteps.long()
-
- # Add noise to the latents according to the noise magnitude at each timestep
- # (this is the forward diffusion process)
- noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
-
- # concatenate the noised latents with the mask and the masked latents
- latent_model_input = torch.cat([noisy_latents, mask, masked_latents], dim=1)
-
- # Get the text embedding for conditioning
- encoder_hidden_states = text_encoder(batch["input_ids"])[0]
-
- # Predict the noise residual
- noise_pred = unet(latent_model_input, timesteps, encoder_hidden_states).sample
-
- # Get the target for loss depending on the prediction type
- if noise_scheduler.config.prediction_type == "epsilon":
- target = noise
- elif noise_scheduler.config.prediction_type == "v_prediction":
- target = noise_scheduler.get_velocity(latents, noise, timesteps)
- else:
- raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
-
- if args.with_prior_preservation:
- # Chunk the noise and noise_pred into two parts and compute the loss on each part separately.
- noise_pred, noise_pred_prior = torch.chunk(noise_pred, 2, dim=0)
- target, target_prior = torch.chunk(target, 2, dim=0)
-
- # Compute instance loss
- loss = F.mse_loss(noise_pred.float(), target.float(), reduction="none").mean([1, 2, 3]).mean()
-
- # Compute prior loss
- prior_loss = F.mse_loss(noise_pred_prior.float(), target_prior.float(), reduction="mean")
-
- # Add the prior loss to the instance loss.
- loss = loss + args.prior_loss_weight * prior_loss
- else:
- loss = F.mse_loss(noise_pred.float(), target.float(), reduction="mean")
-
- accelerator.backward(loss)
- if accelerator.sync_gradients:
- params_to_clip = lora_layers.parameters()
- accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
- optimizer.step()
- lr_scheduler.step()
- optimizer.zero_grad()
-
- # Checks if the accelerator has performed an optimization step behind the scenes
- if accelerator.sync_gradients:
- progress_bar.update(1)
- global_step += 1
-
- if global_step % args.checkpointing_steps == 0:
- if accelerator.is_main_process:
- save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
- accelerator.save_state(save_path)
- logger.info(f"Saved state to {save_path}")
-
- logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
- progress_bar.set_postfix(**logs)
- accelerator.log(logs, step=global_step)
-
- if global_step >= args.max_train_steps:
- break
-
- accelerator.wait_for_everyone()
-
- # Save the lora layers
- if accelerator.is_main_process:
- unet = unet.to(torch.float32)
- unet.save_attn_procs(args.output_dir)
-
- if args.push_to_hub:
- upload_folder(
- repo_id=repo_id,
- folder_path=args.output_dir,
- commit_message="End of training",
- ignore_patterns=["step_*", "epoch_*"],
- )
-
- accelerator.end_training()
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_ms_text_to_video_to_diffusers.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_ms_text_to_video_to_diffusers.py
deleted file mode 100644
index 3102c7eede9bf72ce460599f3bf47446230a836b..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_ms_text_to_video_to_diffusers.py
+++ /dev/null
@@ -1,428 +0,0 @@
-# coding=utf-8
-# Copyright 2023 The HuggingFace Inc. team.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-""" Conversion script for the LDM checkpoints. """
-
-import argparse
-
-import torch
-
-from diffusers import UNet3DConditionModel
-
-
-def assign_to_checkpoint(
- paths, checkpoint, old_checkpoint, attention_paths_to_split=None, additional_replacements=None, config=None
-):
- """
- This does the final conversion step: take locally converted weights and apply a global renaming to them. It splits
- attention layers, and takes into account additional replacements that may arise.
-
- Assigns the weights to the new checkpoint.
- """
- assert isinstance(paths, list), "Paths should be a list of dicts containing 'old' and 'new' keys."
-
- # Splits the attention layers into three variables.
- if attention_paths_to_split is not None:
- for path, path_map in attention_paths_to_split.items():
- old_tensor = old_checkpoint[path]
- channels = old_tensor.shape[0] // 3
-
- target_shape = (-1, channels) if len(old_tensor.shape) == 3 else (-1)
-
- num_heads = old_tensor.shape[0] // config["num_head_channels"] // 3
-
- old_tensor = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:])
- query, key, value = old_tensor.split(channels // num_heads, dim=1)
-
- checkpoint[path_map["query"]] = query.reshape(target_shape)
- checkpoint[path_map["key"]] = key.reshape(target_shape)
- checkpoint[path_map["value"]] = value.reshape(target_shape)
-
- for path in paths:
- new_path = path["new"]
-
- # These have already been assigned
- if attention_paths_to_split is not None and new_path in attention_paths_to_split:
- continue
-
- if additional_replacements is not None:
- for replacement in additional_replacements:
- new_path = new_path.replace(replacement["old"], replacement["new"])
-
- # proj_attn.weight has to be converted from conv 1D to linear
- weight = old_checkpoint[path["old"]]
- names = ["proj_attn.weight"]
- names_2 = ["proj_out.weight", "proj_in.weight"]
- if any(k in new_path for k in names):
- checkpoint[new_path] = weight[:, :, 0]
- elif any(k in new_path for k in names_2) and len(weight.shape) > 2 and ".attentions." not in new_path:
- checkpoint[new_path] = weight[:, :, 0]
- else:
- checkpoint[new_path] = weight
-
-
-def renew_attention_paths(old_list, n_shave_prefix_segments=0):
- """
- Updates paths inside attentions to the new naming scheme (local renaming)
- """
- mapping = []
- for old_item in old_list:
- new_item = old_item
-
- # new_item = new_item.replace('norm.weight', 'group_norm.weight')
- # new_item = new_item.replace('norm.bias', 'group_norm.bias')
-
- # new_item = new_item.replace('proj_out.weight', 'proj_attn.weight')
- # new_item = new_item.replace('proj_out.bias', 'proj_attn.bias')
-
- # new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
-
- mapping.append({"old": old_item, "new": new_item})
-
- return mapping
-
-
-def shave_segments(path, n_shave_prefix_segments=1):
- """
- Removes segments. Positive values shave the first segments, negative shave the last segments.
- """
- if n_shave_prefix_segments >= 0:
- return ".".join(path.split(".")[n_shave_prefix_segments:])
- else:
- return ".".join(path.split(".")[:n_shave_prefix_segments])
-
-
-def renew_temp_conv_paths(old_list, n_shave_prefix_segments=0):
- """
- Updates paths inside resnets to the new naming scheme (local renaming)
- """
- mapping = []
- for old_item in old_list:
- mapping.append({"old": old_item, "new": old_item})
-
- return mapping
-
-
-def renew_resnet_paths(old_list, n_shave_prefix_segments=0):
- """
- Updates paths inside resnets to the new naming scheme (local renaming)
- """
- mapping = []
- for old_item in old_list:
- new_item = old_item.replace("in_layers.0", "norm1")
- new_item = new_item.replace("in_layers.2", "conv1")
-
- new_item = new_item.replace("out_layers.0", "norm2")
- new_item = new_item.replace("out_layers.3", "conv2")
-
- new_item = new_item.replace("emb_layers.1", "time_emb_proj")
- new_item = new_item.replace("skip_connection", "conv_shortcut")
-
- new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
-
- if "temopral_conv" not in old_item:
- mapping.append({"old": old_item, "new": new_item})
-
- return mapping
-
-
-def convert_ldm_unet_checkpoint(checkpoint, config, path=None, extract_ema=False):
- """
- Takes a state dict and a config, and returns a converted checkpoint.
- """
-
- # extract state_dict for UNet
- unet_state_dict = {}
- keys = list(checkpoint.keys())
-
- unet_key = "model.diffusion_model."
-
- # at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA
- if sum(k.startswith("model_ema") for k in keys) > 100 and extract_ema:
- print(f"Checkpoint {path} has both EMA and non-EMA weights.")
- print(
- "In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA"
- " weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag."
- )
- for key in keys:
- if key.startswith("model.diffusion_model"):
- flat_ema_key = "model_ema." + "".join(key.split(".")[1:])
- unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(flat_ema_key)
- else:
- if sum(k.startswith("model_ema") for k in keys) > 100:
- print(
- "In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA"
- " weights (usually better for inference), please make sure to add the `--extract_ema` flag."
- )
-
- for key in keys:
- unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(key)
-
- new_checkpoint = {}
-
- new_checkpoint["time_embedding.linear_1.weight"] = unet_state_dict["time_embed.0.weight"]
- new_checkpoint["time_embedding.linear_1.bias"] = unet_state_dict["time_embed.0.bias"]
- new_checkpoint["time_embedding.linear_2.weight"] = unet_state_dict["time_embed.2.weight"]
- new_checkpoint["time_embedding.linear_2.bias"] = unet_state_dict["time_embed.2.bias"]
-
- if config["class_embed_type"] is None:
- # No parameters to port
- ...
- elif config["class_embed_type"] == "timestep" or config["class_embed_type"] == "projection":
- new_checkpoint["class_embedding.linear_1.weight"] = unet_state_dict["label_emb.0.0.weight"]
- new_checkpoint["class_embedding.linear_1.bias"] = unet_state_dict["label_emb.0.0.bias"]
- new_checkpoint["class_embedding.linear_2.weight"] = unet_state_dict["label_emb.0.2.weight"]
- new_checkpoint["class_embedding.linear_2.bias"] = unet_state_dict["label_emb.0.2.bias"]
- else:
- raise NotImplementedError(f"Not implemented `class_embed_type`: {config['class_embed_type']}")
-
- new_checkpoint["conv_in.weight"] = unet_state_dict["input_blocks.0.0.weight"]
- new_checkpoint["conv_in.bias"] = unet_state_dict["input_blocks.0.0.bias"]
-
- first_temp_attention = [v for v in unet_state_dict if v.startswith("input_blocks.0.1")]
- paths = renew_attention_paths(first_temp_attention)
- meta_path = {"old": "input_blocks.0.1", "new": "transformer_in"}
- assign_to_checkpoint(paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config)
-
- new_checkpoint["conv_norm_out.weight"] = unet_state_dict["out.0.weight"]
- new_checkpoint["conv_norm_out.bias"] = unet_state_dict["out.0.bias"]
- new_checkpoint["conv_out.weight"] = unet_state_dict["out.2.weight"]
- new_checkpoint["conv_out.bias"] = unet_state_dict["out.2.bias"]
-
- # Retrieves the keys for the input blocks only
- num_input_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "input_blocks" in layer})
- input_blocks = {
- layer_id: [key for key in unet_state_dict if f"input_blocks.{layer_id}" in key]
- for layer_id in range(num_input_blocks)
- }
-
- # Retrieves the keys for the middle blocks only
- num_middle_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "middle_block" in layer})
- middle_blocks = {
- layer_id: [key for key in unet_state_dict if f"middle_block.{layer_id}" in key]
- for layer_id in range(num_middle_blocks)
- }
-
- # Retrieves the keys for the output blocks only
- num_output_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "output_blocks" in layer})
- output_blocks = {
- layer_id: [key for key in unet_state_dict if f"output_blocks.{layer_id}" in key]
- for layer_id in range(num_output_blocks)
- }
-
- for i in range(1, num_input_blocks):
- block_id = (i - 1) // (config["layers_per_block"] + 1)
- layer_in_block_id = (i - 1) % (config["layers_per_block"] + 1)
-
- resnets = [
- key for key in input_blocks[i] if f"input_blocks.{i}.0" in key and f"input_blocks.{i}.0.op" not in key
- ]
- attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key]
- temp_attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.2" in key]
-
- if f"input_blocks.{i}.op.weight" in unet_state_dict:
- new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = unet_state_dict.pop(
- f"input_blocks.{i}.op.weight"
- )
- new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = unet_state_dict.pop(
- f"input_blocks.{i}.op.bias"
- )
-
- paths = renew_resnet_paths(resnets)
- meta_path = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
- assign_to_checkpoint(
- paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
- )
-
- temporal_convs = [key for key in resnets if "temopral_conv" in key]
- paths = renew_temp_conv_paths(temporal_convs)
- meta_path = {
- "old": f"input_blocks.{i}.0.temopral_conv",
- "new": f"down_blocks.{block_id}.temp_convs.{layer_in_block_id}",
- }
- assign_to_checkpoint(
- paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
- )
-
- if len(attentions):
- paths = renew_attention_paths(attentions)
- meta_path = {"old": f"input_blocks.{i}.1", "new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}"}
- assign_to_checkpoint(
- paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
- )
-
- if len(temp_attentions):
- paths = renew_attention_paths(temp_attentions)
- meta_path = {
- "old": f"input_blocks.{i}.2",
- "new": f"down_blocks.{block_id}.temp_attentions.{layer_in_block_id}",
- }
- assign_to_checkpoint(
- paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
- )
-
- resnet_0 = middle_blocks[0]
- temporal_convs_0 = [key for key in resnet_0 if "temopral_conv" in key]
- attentions = middle_blocks[1]
- temp_attentions = middle_blocks[2]
- resnet_1 = middle_blocks[3]
- temporal_convs_1 = [key for key in resnet_1 if "temopral_conv" in key]
-
- resnet_0_paths = renew_resnet_paths(resnet_0)
- meta_path = {"old": "middle_block.0", "new": "mid_block.resnets.0"}
- assign_to_checkpoint(
- resnet_0_paths, new_checkpoint, unet_state_dict, config=config, additional_replacements=[meta_path]
- )
-
- temp_conv_0_paths = renew_temp_conv_paths(temporal_convs_0)
- meta_path = {"old": "middle_block.0.temopral_conv", "new": "mid_block.temp_convs.0"}
- assign_to_checkpoint(
- temp_conv_0_paths, new_checkpoint, unet_state_dict, config=config, additional_replacements=[meta_path]
- )
-
- resnet_1_paths = renew_resnet_paths(resnet_1)
- meta_path = {"old": "middle_block.3", "new": "mid_block.resnets.1"}
- assign_to_checkpoint(
- resnet_1_paths, new_checkpoint, unet_state_dict, config=config, additional_replacements=[meta_path]
- )
-
- temp_conv_1_paths = renew_temp_conv_paths(temporal_convs_1)
- meta_path = {"old": "middle_block.3.temopral_conv", "new": "mid_block.temp_convs.1"}
- assign_to_checkpoint(
- temp_conv_1_paths, new_checkpoint, unet_state_dict, config=config, additional_replacements=[meta_path]
- )
-
- attentions_paths = renew_attention_paths(attentions)
- meta_path = {"old": "middle_block.1", "new": "mid_block.attentions.0"}
- assign_to_checkpoint(
- attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
- )
-
- temp_attentions_paths = renew_attention_paths(temp_attentions)
- meta_path = {"old": "middle_block.2", "new": "mid_block.temp_attentions.0"}
- assign_to_checkpoint(
- temp_attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
- )
-
- for i in range(num_output_blocks):
- block_id = i // (config["layers_per_block"] + 1)
- layer_in_block_id = i % (config["layers_per_block"] + 1)
- output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]]
- output_block_list = {}
-
- for layer in output_block_layers:
- layer_id, layer_name = layer.split(".")[0], shave_segments(layer, 1)
- if layer_id in output_block_list:
- output_block_list[layer_id].append(layer_name)
- else:
- output_block_list[layer_id] = [layer_name]
-
- if len(output_block_list) > 1:
- resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key]
- attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key]
- temp_attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.2" in key]
-
- resnet_0_paths = renew_resnet_paths(resnets)
- paths = renew_resnet_paths(resnets)
-
- meta_path = {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
- assign_to_checkpoint(
- paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
- )
-
- temporal_convs = [key for key in resnets if "temopral_conv" in key]
- paths = renew_temp_conv_paths(temporal_convs)
- meta_path = {
- "old": f"output_blocks.{i}.0.temopral_conv",
- "new": f"up_blocks.{block_id}.temp_convs.{layer_in_block_id}",
- }
- assign_to_checkpoint(
- paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
- )
-
- output_block_list = {k: sorted(v) for k, v in output_block_list.items()}
- if ["conv.bias", "conv.weight"] in output_block_list.values():
- index = list(output_block_list.values()).index(["conv.bias", "conv.weight"])
- new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = unet_state_dict[
- f"output_blocks.{i}.{index}.conv.weight"
- ]
- new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = unet_state_dict[
- f"output_blocks.{i}.{index}.conv.bias"
- ]
-
- # Clear attentions as they have been attributed above.
- if len(attentions) == 2:
- attentions = []
-
- if len(attentions):
- paths = renew_attention_paths(attentions)
- meta_path = {
- "old": f"output_blocks.{i}.1",
- "new": f"up_blocks.{block_id}.attentions.{layer_in_block_id}",
- }
- assign_to_checkpoint(
- paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
- )
-
- if len(temp_attentions):
- paths = renew_attention_paths(temp_attentions)
- meta_path = {
- "old": f"output_blocks.{i}.2",
- "new": f"up_blocks.{block_id}.temp_attentions.{layer_in_block_id}",
- }
- assign_to_checkpoint(
- paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
- )
- else:
- resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1)
- for path in resnet_0_paths:
- old_path = ".".join(["output_blocks", str(i), path["old"]])
- new_path = ".".join(["up_blocks", str(block_id), "resnets", str(layer_in_block_id), path["new"]])
- new_checkpoint[new_path] = unet_state_dict[old_path]
-
- temopral_conv_paths = [l for l in output_block_layers if "temopral_conv" in l]
- for path in temopral_conv_paths:
- pruned_path = path.split("temopral_conv.")[-1]
- old_path = ".".join(["output_blocks", str(i), str(block_id), "temopral_conv", pruned_path])
- new_path = ".".join(["up_blocks", str(block_id), "temp_convs", str(layer_in_block_id), pruned_path])
- new_checkpoint[new_path] = unet_state_dict[old_path]
-
- return new_checkpoint
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
-
- parser.add_argument(
- "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
- )
- parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
- args = parser.parse_args()
-
- unet_checkpoint = torch.load(args.checkpoint_path, map_location="cpu")
- unet = UNet3DConditionModel()
-
- converted_ckpt = convert_ldm_unet_checkpoint(unet_checkpoint, unet.config)
-
- diff_0 = set(unet.state_dict().keys()) - set(converted_ckpt.keys())
- diff_1 = set(converted_ckpt.keys()) - set(unet.state_dict().keys())
-
- assert len(diff_0) == len(diff_1) == 0, "Converted weights don't match"
-
- # load state_dict
- unet.load_state_dict(converted_ckpt)
-
- unet.save_pretrained(args.dump_path)
-
- # -- finish converting the unet --
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_kdpm2_ancestral.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_kdpm2_ancestral.py
deleted file mode 100644
index 45371121e66b8ffdcecb5cc86a91758e436b2955..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_kdpm2_ancestral.py
+++ /dev/null
@@ -1,123 +0,0 @@
-import torch
-
-from diffusers import KDPM2AncestralDiscreteScheduler
-from diffusers.utils import torch_device
-
-from .test_schedulers import SchedulerCommonTest
-
-
-class KDPM2AncestralDiscreteSchedulerTest(SchedulerCommonTest):
- scheduler_classes = (KDPM2AncestralDiscreteScheduler,)
- num_inference_steps = 10
-
- def get_scheduler_config(self, **kwargs):
- config = {
- "num_train_timesteps": 1100,
- "beta_start": 0.0001,
- "beta_end": 0.02,
- "beta_schedule": "linear",
- }
-
- config.update(**kwargs)
- return config
-
- def test_timesteps(self):
- for timesteps in [10, 50, 100, 1000]:
- self.check_over_configs(num_train_timesteps=timesteps)
-
- def test_betas(self):
- for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]):
- self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
-
- def test_schedules(self):
- for schedule in ["linear", "scaled_linear"]:
- self.check_over_configs(beta_schedule=schedule)
-
- def test_full_loop_no_noise(self):
- if torch_device == "mps":
- return
- scheduler_class = self.scheduler_classes[0]
- scheduler_config = self.get_scheduler_config()
- scheduler = scheduler_class(**scheduler_config)
-
- scheduler.set_timesteps(self.num_inference_steps)
-
- generator = torch.manual_seed(0)
-
- model = self.dummy_model()
- sample = self.dummy_sample_deter * scheduler.init_noise_sigma
- sample = sample.to(torch_device)
-
- for i, t in enumerate(scheduler.timesteps):
- sample = scheduler.scale_model_input(sample, t)
-
- model_output = model(sample, t)
-
- output = scheduler.step(model_output, t, sample, generator=generator)
- sample = output.prev_sample
-
- result_sum = torch.sum(torch.abs(sample))
- result_mean = torch.mean(torch.abs(sample))
-
- assert abs(result_sum.item() - 13849.3877) < 1e-2
- assert abs(result_mean.item() - 18.0331) < 5e-3
-
- def test_prediction_type(self):
- for prediction_type in ["epsilon", "v_prediction"]:
- self.check_over_configs(prediction_type=prediction_type)
-
- def test_full_loop_with_v_prediction(self):
- if torch_device == "mps":
- return
- scheduler_class = self.scheduler_classes[0]
- scheduler_config = self.get_scheduler_config(prediction_type="v_prediction")
- scheduler = scheduler_class(**scheduler_config)
-
- scheduler.set_timesteps(self.num_inference_steps)
-
- model = self.dummy_model()
- sample = self.dummy_sample_deter * scheduler.init_noise_sigma
- sample = sample.to(torch_device)
-
- generator = torch.manual_seed(0)
-
- for i, t in enumerate(scheduler.timesteps):
- sample = scheduler.scale_model_input(sample, t)
-
- model_output = model(sample, t)
-
- output = scheduler.step(model_output, t, sample, generator=generator)
- sample = output.prev_sample
-
- result_sum = torch.sum(torch.abs(sample))
- result_mean = torch.mean(torch.abs(sample))
-
- assert abs(result_sum.item() - 328.9970) < 1e-2
- assert abs(result_mean.item() - 0.4284) < 1e-3
-
- def test_full_loop_device(self):
- if torch_device == "mps":
- return
- scheduler_class = self.scheduler_classes[0]
- scheduler_config = self.get_scheduler_config()
- scheduler = scheduler_class(**scheduler_config)
-
- scheduler.set_timesteps(self.num_inference_steps, device=torch_device)
- generator = torch.manual_seed(0)
-
- model = self.dummy_model()
- sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma
-
- for t in scheduler.timesteps:
- sample = scheduler.scale_model_input(sample, t)
-
- model_output = model(sample, t)
-
- output = scheduler.step(model_output, t, sample, generator=generator)
- sample = output.prev_sample
-
- result_sum = torch.sum(torch.abs(sample))
- result_mean = torch.mean(torch.abs(sample))
-
- assert abs(result_sum.item() - 13849.3818) < 1e-1
- assert abs(result_mean.item() - 18.0331) < 1e-3
diff --git a/spaces/Andy1621/uniformer_image_demo/app.py b/spaces/Andy1621/uniformer_image_demo/app.py
deleted file mode 100644
index f38ca5fa4e68f823144d15984bd11f3088491f28..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_demo/app.py
+++ /dev/null
@@ -1,87 +0,0 @@
-import os
-
-import torch
-import torch.nn.functional as F
-import torchvision.transforms as T
-from uniformer import uniformer_small
-from imagenet_class_index import imagenet_classnames
-
-import gradio as gr
-from huggingface_hub import hf_hub_download
-
-# Device on which to run the model
-# Set to cuda to load on GPU
-device = "cpu"
-# os.system("wget https://cdn-lfs.huggingface.co/Andy1621/uniformer/fd192c31f8bd77670de8f171111bd51f56fd87e6aea45043ab2edc181e1fa775")
-model_path = hf_hub_download(repo_id="Andy1621/uniformer", filename="uniformer_small_in1k.pth")
-# Pick a pretrained model
-model = uniformer_small()
-# state_dict = torch.load('fd192c31f8bd77670de8f171111bd51f56fd87e6aea45043ab2edc181e1fa775', map_location='cpu')
-state_dict = torch.load(model_path, map_location='cpu')
-model.load_state_dict(state_dict['model'])
-
-# Set to eval mode and move to desired device
-model = model.to(device)
-model = model.eval()
-
-# Create an id to label name mapping
-imagenet_id_to_classname = {}
-for k, v in imagenet_classnames.items():
- imagenet_id_to_classname[k] = v[1]
-
-
-def inference(img):
- image = img
- image_transform = T.Compose(
- [
- T.Resize(224),
- T.CenterCrop(224),
- T.ToTensor(),
- T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
- ]
- )
- image = image_transform(image)
-
- # The model expects inputs of shape: B x C x H x W
- image = image.unsqueeze(0)
-
- prediction = model(image)
- prediction = F.softmax(prediction, dim=1).flatten()
-
- return {imagenet_id_to_classname[str(i)]: float(prediction[i]) for i in range(1000)}
-
-def set_example_image(example: list) -> dict:
- return gr.Image.update(value=example[0])
-
-
-demo = gr.Blocks()
-with demo:
- gr.Markdown(
- """
- # UniFormer-S
- Gradio demo for UniFormer: To use it, simply upload your image, or click one of the examples to load them. Read more at the links below.
- """
- )
-
- with gr.Box():
- with gr.Row():
- with gr.Column():
- with gr.Row():
- input_image = gr.Image(label='Input Image', type='pil')
- with gr.Row():
- submit_button = gr.Button('Submit')
- with gr.Column():
- label = gr.Label(num_top_classes=5)
- with gr.Row():
- example_images = gr.Dataset(components=[input_image], samples=[['library.jpeg'], ['cat.png'], ['dog.png'], ['panda.png']])
-
- gr.Markdown(
- """
-
- """
- )
-
- submit_button.click(fn=inference, inputs=input_image, outputs=label)
- example_images.click(fn=set_example_image, inputs=example_images, outputs=example_images.components)
-
-demo.launch(enable_queue=True)
\ No newline at end of file
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/groie/README.md b/spaces/Andy1621/uniformer_image_detection/configs/groie/README.md
deleted file mode 100644
index 490349d4da0c320f8d5e0528840ff95cbcd00da8..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/groie/README.md
+++ /dev/null
@@ -1,65 +0,0 @@
-# GRoIE
-
-## A novel Region of Interest Extraction Layer for Instance Segmentation
-
-By Leonardo Rossi, Akbar Karimi and Andrea Prati from
-[IMPLab](http://implab.ce.unipr.it/).
-
-We provide configs to reproduce the results in the paper for
-"*A novel Region of Interest Extraction Layer for Instance Segmentation*"
-on COCO object detection.
-
-## Introduction
-
-[ALGORITHM]
-
-This paper is motivated by the need to overcome to the limitations of existing
-RoI extractors which select only one (the best) layer from FPN.
-
-Our intuition is that all the layers of FPN retain useful information.
-
-Therefore, the proposed layer (called Generic RoI Extractor - **GRoIE**)
-introduces non-local building blocks and attention mechanisms to boost the
-performance.
-
-## Results and models
-
-The results on COCO 2017 minival (5k images) are shown in the below table.
-You can find
-[here](https://drive.google.com/drive/folders/19ssstbq_h0Z1cgxHmJYFO8s1arf3QJbT)
-the trained models.
-
-### Application of GRoIE to different architectures
-
-| Backbone | Method | Lr schd | box AP | mask AP | Config | Download|
-| :-------: | :--------------: | :-----: | :----: | :-----: | :-------:| :--------:|
-| R-50-FPN | Faster Original | 1x | 37.4 | | [config](../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130_204655.log.json) |
-| R-50-FPN | + GRoIE | 1x | 38.3 | | [config](./faster_rcnn_r50_fpn_groie_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/groie/faster_rcnn_r50_fpn_groie_1x_coco/faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715-66ee9516.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/groie/faster_rcnn_r50_fpn_groie_1x_coco/faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715.log.json) |
-| R-50-FPN | Grid R-CNN | 1x | 39.1 | | [config](./grid_rcnn_r50_fpn_gn-head_1x_coco.py)| [model](http://download.openmmlab.com/mmdetection/v2.0/groie/grid_rcnn_r50_fpn_gn-head_1x_coco/grid_rcnn_r50_fpn_gn-head_1x_coco_20200605_202059-64f00ee8.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/groie/grid_rcnn_r50_fpn_gn-head_1x_coco/grid_rcnn_r50_fpn_gn-head_1x_coco_20200605_202059.log.json) |
-| R-50-FPN | + GRoIE | 1x | | | [config](./grid_rcnn_r50_fpn_gn-head_groie_1x_coco.py)||
-| R-50-FPN | Mask R-CNN | 1x | 38.2 | 34.7 | [config](../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py)| [model](http://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205_050542.log.json) |
-| R-50-FPN | + GRoIE | 1x | 39.0 | 36.0 | [config](./mask_rcnn_r50_fpn_groie_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r50_fpn_groie_1x_coco/mask_rcnn_r50_fpn_groie_1x_coco_20200604_211715-50d90c74.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r50_fpn_groie_1x_coco/mask_rcnn_r50_fpn_groie_1x_coco_20200604_211715.log.json) |
-| R-50-FPN | GC-Net | 1x | 40.7 | 36.5 | [config](../gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200202-50b90e5c.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200202_085547.log.json) |
-| R-50-FPN | + GRoIE | 1x | 41.0 | 37.8 | [config](./mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py) |[model](http://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco_20200604_211715-42eb79e1.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco_20200604_211715-42eb79e1.pth) |
-| R-101-FPN | GC-Net | 1x | 42.2 | 37.8 | [config](../gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200206-8407a3f0.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200206_142508.log.json) |
-| R-101-FPN | + GRoIE | 1x | | | [config](./mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py)| [model](http://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco_20200607_224507-8daae01c.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco_20200607_224507.log.json) |
-
-## Citation
-
-If you use this work or benchmark in your research, please cite this project.
-
-```latex
-@misc{rossi2020novel,
- title={A novel Region of Interest Extraction Layer for Instance Segmentation},
- author={Leonardo Rossi and Akbar Karimi and Andrea Prati},
- year={2020},
- eprint={2004.13665},
- archivePrefix={arXiv},
- primaryClass={cs.CV}
-}
-```
-
-## Contact
-
-The implementation of GROI is currently maintained by
-[Leonardo Rossi](https://github.com/hachreak/).
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py
deleted file mode 100644
index 0fc528bfd49bfc9a262692db78a5f94b46c285af..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py
+++ /dev/null
@@ -1,25 +0,0 @@
-_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
-# model settings
-model = dict(
- roi_head=dict(
- bbox_roi_extractor=dict(
- type='GenericRoIExtractor',
- aggregation='sum',
- roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
- out_channels=256,
- featmap_strides=[4, 8, 16, 32],
- pre_cfg=dict(
- type='ConvModule',
- in_channels=256,
- out_channels=256,
- kernel_size=5,
- padding=2,
- inplace=False,
- ),
- post_cfg=dict(
- type='GeneralizedAttention',
- in_channels=256,
- spatial_range=-1,
- num_heads=6,
- attention_type='0100',
- kv_stride=2))))
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/ld/ld_r50_gflv1_r101_fpn_coco_1x.py b/spaces/Andy1621/uniformer_image_detection/configs/ld/ld_r50_gflv1_r101_fpn_coco_1x.py
deleted file mode 100644
index 923c626363c2f49e8ad15616a09b6cb52260923a..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/ld/ld_r50_gflv1_r101_fpn_coco_1x.py
+++ /dev/null
@@ -1,19 +0,0 @@
-_base_ = ['./ld_r18_gflv1_r101_fpn_coco_1x.py']
-model = dict(
- pretrained='torchvision://resnet50',
- backbone=dict(
- type='ResNet',
- depth=50,
- num_stages=4,
- out_indices=(0, 1, 2, 3),
- frozen_stages=1,
- norm_cfg=dict(type='BN', requires_grad=True),
- norm_eval=True,
- style='pytorch'),
- neck=dict(
- type='FPN',
- in_channels=[256, 512, 1024, 2048],
- out_channels=256,
- start_level=1,
- add_extra_convs='on_output',
- num_outs=5))
diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/datasets/builder.py b/spaces/Andy1621/uniformer_image_detection/mmdet/datasets/builder.py
deleted file mode 100644
index c9466a517dee746a6677b27a19713f2e89ed7194..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/mmdet/datasets/builder.py
+++ /dev/null
@@ -1,143 +0,0 @@
-import copy
-import platform
-import random
-from functools import partial
-
-import numpy as np
-from mmcv.parallel import collate
-from mmcv.runner import get_dist_info
-from mmcv.utils import Registry, build_from_cfg
-from torch.utils.data import DataLoader
-
-from .samplers import DistributedGroupSampler, DistributedSampler, GroupSampler
-
-if platform.system() != 'Windows':
- # https://github.com/pytorch/pytorch/issues/973
- import resource
- rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
- hard_limit = rlimit[1]
- soft_limit = min(4096, hard_limit)
- resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
-
-DATASETS = Registry('dataset')
-PIPELINES = Registry('pipeline')
-
-
-def _concat_dataset(cfg, default_args=None):
- from .dataset_wrappers import ConcatDataset
- ann_files = cfg['ann_file']
- img_prefixes = cfg.get('img_prefix', None)
- seg_prefixes = cfg.get('seg_prefix', None)
- proposal_files = cfg.get('proposal_file', None)
- separate_eval = cfg.get('separate_eval', True)
-
- datasets = []
- num_dset = len(ann_files)
- for i in range(num_dset):
- data_cfg = copy.deepcopy(cfg)
- # pop 'separate_eval' since it is not a valid key for common datasets.
- if 'separate_eval' in data_cfg:
- data_cfg.pop('separate_eval')
- data_cfg['ann_file'] = ann_files[i]
- if isinstance(img_prefixes, (list, tuple)):
- data_cfg['img_prefix'] = img_prefixes[i]
- if isinstance(seg_prefixes, (list, tuple)):
- data_cfg['seg_prefix'] = seg_prefixes[i]
- if isinstance(proposal_files, (list, tuple)):
- data_cfg['proposal_file'] = proposal_files[i]
- datasets.append(build_dataset(data_cfg, default_args))
-
- return ConcatDataset(datasets, separate_eval)
-
-
-def build_dataset(cfg, default_args=None):
- from .dataset_wrappers import (ConcatDataset, RepeatDataset,
- ClassBalancedDataset)
- if isinstance(cfg, (list, tuple)):
- dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
- elif cfg['type'] == 'ConcatDataset':
- dataset = ConcatDataset(
- [build_dataset(c, default_args) for c in cfg['datasets']],
- cfg.get('separate_eval', True))
- elif cfg['type'] == 'RepeatDataset':
- dataset = RepeatDataset(
- build_dataset(cfg['dataset'], default_args), cfg['times'])
- elif cfg['type'] == 'ClassBalancedDataset':
- dataset = ClassBalancedDataset(
- build_dataset(cfg['dataset'], default_args), cfg['oversample_thr'])
- elif isinstance(cfg.get('ann_file'), (list, tuple)):
- dataset = _concat_dataset(cfg, default_args)
- else:
- dataset = build_from_cfg(cfg, DATASETS, default_args)
-
- return dataset
-
-
-def build_dataloader(dataset,
- samples_per_gpu,
- workers_per_gpu,
- num_gpus=1,
- dist=True,
- shuffle=True,
- seed=None,
- **kwargs):
- """Build PyTorch DataLoader.
-
- In distributed training, each GPU/process has a dataloader.
- In non-distributed training, there is only one dataloader for all GPUs.
-
- Args:
- dataset (Dataset): A PyTorch dataset.
- samples_per_gpu (int): Number of training samples on each GPU, i.e.,
- batch size of each GPU.
- workers_per_gpu (int): How many subprocesses to use for data loading
- for each GPU.
- num_gpus (int): Number of GPUs. Only used in non-distributed training.
- dist (bool): Distributed training/test or not. Default: True.
- shuffle (bool): Whether to shuffle the data at every epoch.
- Default: True.
- kwargs: any keyword argument to be used to initialize DataLoader
-
- Returns:
- DataLoader: A PyTorch dataloader.
- """
- rank, world_size = get_dist_info()
- if dist:
- # DistributedGroupSampler will definitely shuffle the data to satisfy
- # that images on each GPU are in the same group
- if shuffle:
- sampler = DistributedGroupSampler(
- dataset, samples_per_gpu, world_size, rank, seed=seed)
- else:
- sampler = DistributedSampler(
- dataset, world_size, rank, shuffle=False, seed=seed)
- batch_size = samples_per_gpu
- num_workers = workers_per_gpu
- else:
- sampler = GroupSampler(dataset, samples_per_gpu) if shuffle else None
- batch_size = num_gpus * samples_per_gpu
- num_workers = num_gpus * workers_per_gpu
-
- init_fn = partial(
- worker_init_fn, num_workers=num_workers, rank=rank,
- seed=seed) if seed is not None else None
-
- data_loader = DataLoader(
- dataset,
- batch_size=batch_size,
- sampler=sampler,
- num_workers=num_workers,
- collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),
- pin_memory=False,
- worker_init_fn=init_fn,
- **kwargs)
-
- return data_loader
-
-
-def worker_init_fn(worker_id, num_workers, rank, seed):
- # The seed of each worker equals to
- # num_worker * rank + worker_id + user_seed
- worker_seed = num_workers * rank + worker_id + seed
- np.random.seed(worker_seed)
- random.seed(worker_seed)
diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes.py
deleted file mode 100644
index a0726c293d6026898110f7fa55d5e7d2d55d7a02..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes.py
+++ /dev/null
@@ -1,9 +0,0 @@
-_base_ = [
- '../_base_/models/nonlocal_r50-d8.py',
- '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py',
- '../_base_/schedules/schedule_80k.py'
-]
-model = dict(
- decode_head=dict(align_corners=True),
- auxiliary_head=dict(align_corners=True),
- test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
diff --git a/spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/model/stylegan_ops/style_function.py b/spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/model/stylegan_ops/style_function.py
deleted file mode 100644
index 58d345c35b6d0a1aa1fcc1447fb9ca8546a9260a..0000000000000000000000000000000000000000
--- a/spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/model/stylegan_ops/style_function.py
+++ /dev/null
@@ -1,236 +0,0 @@
-import math
-import random
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-from . import FusedLeakyReLU, fused_leaky_relu, upfirdn2d
-
-
-class StyleBlock(nn.Module):
- def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1]):
- super().__init__()
-
- self.conv1 = ConvLayer(in_channel, in_channel, 3)
- self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True)
-
- self.skip = ConvLayer(
- in_channel, out_channel, 1, downsample=True, activate=False, bias=False
- )
-
- def forward(self, input):
- out = self.conv1(input)
- out = self.conv2(out)
-
- skip = self.skip(input)
- out = (out + skip) / math.sqrt(2)
-
- return out
-
-
-class ConvLayer(nn.Sequential):
- def __init__(
- self,
- in_channel,
- out_channel,
- kernel_size,
- downsample=False,
- blur_kernel=[1, 3, 3, 1],
- bias=True,
- activate=True,
- ):
- layers = []
-
- if downsample:
- factor = 2
- p = (len(blur_kernel) - factor) + (kernel_size - 1)
- pad0 = (p + 1) // 2
- pad1 = p // 2
-
- layers.append(Blur(blur_kernel, pad=(pad0, pad1)))
-
- stride = 2
- self.padding = 0
-
- else:
- stride = 1
- self.padding = kernel_size // 2
-
- layers.append(
- EqualConv2d(
- in_channel,
- out_channel,
- kernel_size,
- padding=self.padding,
- stride=stride,
- bias=bias and not activate,
- )
- )
-
- if activate:
- if bias:
- layers.append(FusedLeakyReLU(out_channel))
-
- else:
- layers.append(ScaledLeakyReLU(0.2))
-
- super().__init__(*layers)
-
-
-class EqualConv2d(nn.Module):
- def __init__(
- self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True
- ):
- super().__init__()
-
- self.weight = nn.Parameter(
- torch.randn(out_channel, in_channel, kernel_size, kernel_size)
- )
- self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2)
-
- self.stride = stride
- self.padding = padding
-
- if bias:
- self.bias = nn.Parameter(torch.zeros(out_channel))
-
- else:
- self.bias = None
-
- def forward(self, input):
- out = F.conv2d(
- input,
- self.weight * self.scale,
- bias=self.bias,
- stride=self.stride,
- padding=self.padding,
- )
-
- return out
-
- def __repr__(self):
- return (
- f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]},'
- f' {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})'
- )
-
-
-class EqualLinear(nn.Module):
- def __init__(
- self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None
- ):
- super().__init__()
-
- self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
-
- if bias:
- self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
-
- else:
- self.bias = None
-
- self.activation = activation
-
- self.scale = (1 / math.sqrt(in_dim)) * lr_mul
- self.lr_mul = lr_mul
-
- def forward(self, input):
- if self.activation:
- out = F.linear(input, self.weight * self.scale)
- out = fused_leaky_relu(out, self.bias * self.lr_mul)
-
- else:
- out = F.linear(
- input, self.weight * self.scale, bias=self.bias * self.lr_mul
- )
-
- return out
-
- def __repr__(self):
- return (
- f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
- )
-
-
-class ScaledLeakyReLU(nn.Module):
- def __init__(self, negative_slope=0.2):
- super().__init__()
-
- self.negative_slope = negative_slope
-
- def forward(self, input):
- out = F.leaky_relu(input, negative_slope=self.negative_slope)
-
- return out * math.sqrt(2)
-
-
-class Blur(nn.Module):
- def __init__(self, kernel, pad, upsample_factor=1):
- super().__init__()
-
- kernel = make_kernel(kernel)
-
- if upsample_factor > 1:
- kernel = kernel * (upsample_factor ** 2)
-
- self.register_buffer('kernel', kernel)
-
- self.pad = pad
-
- def forward(self, input):
- out = upfirdn2d(input, self.kernel, pad=self.pad)
-
- return out
-
-
-def make_kernel(k):
- k = torch.tensor(k, dtype=torch.float32)
-
- if k.ndim == 1:
- k = k[None, :] * k[:, None]
-
- k /= k.sum()
-
- return k
-
-
-class Upsample(nn.Module):
- def __init__(self, kernel, factor=2):
- super().__init__()
-
- self.factor = factor
- kernel = make_kernel(kernel) * (factor ** 2)
- self.register_buffer('kernel', kernel)
-
- p = kernel.shape[0] - factor
-
- pad0 = (p + 1) // 2 + factor - 1
- pad1 = p // 2
-
- self.pad = (pad0, pad1)
-
- def forward(self, input):
- out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad)
-
- return out
-
-
-class Downsample(nn.Module):
- def __init__(self, kernel, factor=2):
- super().__init__()
-
- self.factor = factor
- kernel = make_kernel(kernel)
- self.register_buffer('kernel', kernel)
-
- p = kernel.shape[0] - factor
-
- pad0 = (p + 1) // 2
- pad1 = p // 2
-
- self.pad = (pad0, pad1)
-
- def forward(self, input):
- out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad)
-
- return out
\ No newline at end of file
diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/core/seg/sampler/base_pixel_sampler.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/core/seg/sampler/base_pixel_sampler.py
deleted file mode 100644
index b75b1566c9f18169cee51d4b55d75e0357b69c57..0000000000000000000000000000000000000000
--- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/core/seg/sampler/base_pixel_sampler.py
+++ /dev/null
@@ -1,12 +0,0 @@
-from abc import ABCMeta, abstractmethod
-
-
-class BasePixelSampler(metaclass=ABCMeta):
- """Base class of pixel sampler."""
-
- def __init__(self, **kwargs):
- pass
-
- @abstractmethod
- def sample(self, seg_logit, seg_label):
- """Placeholder for sample function."""
diff --git a/spaces/AntNikYab/NaturalLanguageProcessing/pages/mayakovsky.py b/spaces/AntNikYab/NaturalLanguageProcessing/pages/mayakovsky.py
deleted file mode 100644
index 178614b26013805aea66ff1ac43ad6885da99cca..0000000000000000000000000000000000000000
--- a/spaces/AntNikYab/NaturalLanguageProcessing/pages/mayakovsky.py
+++ /dev/null
@@ -1,64 +0,0 @@
-import streamlit as st
-import textwrap
-import torch
-from transformers import GPT2LMHeadModel, GPT2Tokenizer
-
-DEVICE = torch.device("cpu")
-# Load GPT-2 model and tokenizer
-tokenizer = GPT2Tokenizer.from_pretrained('sberbank-ai/rugpt3small_based_on_gpt2')
-model_finetuned = GPT2LMHeadModel.from_pretrained(
- 'sberbank-ai/rugpt3small_based_on_gpt2',
- output_attentions = False,
- output_hidden_states = False,
-)
-if torch.cuda.is_available():
- model_finetuned.load_state_dict(torch.load('models/mayakovsky.pt'))
-else:
- model_finetuned.load_state_dict(torch.load('models/mayakovsky.pt', map_location=torch.device('cpu')))
-model_finetuned.eval()
-
-# Function to generate text
-def generate_text(prompt, temperature, top_p, max_length, top_k):
- input_ids = tokenizer.encode(prompt, return_tensors="pt")
-
- with torch.no_grad():
- out = model_finetuned.generate(
- input_ids,
- do_sample=True,
- num_beams=5,
- temperature=temperature,
- top_p=top_p,
- max_length=max_length,
- top_k=top_k,
- no_repeat_ngram_size=3,
- num_return_sequences=1,
- )
-
- generated_text = list(map(tokenizer.decode, out))
- return generated_text
-
-# Streamlit app
-def main():
- st.title("Генерация текста GPT-моделью в стиле В.В. Маяковского")
-
- # User inputs
- prompt = st.text_area("Введите начало текста")
- temperature = st.slider("Temperature", min_value=0.2, max_value=2.5, value=1.8, step=0.1)
- top_p = st.slider("Top-p", min_value=0.1, max_value=1.0, value=0.9, step=0.1)
- max_length = st.slider("Max Length", min_value=10, max_value=300, value=100, step=10)
- top_k = st.slider("Top-k", min_value=1, max_value=500, value=500, step=10)
- num_return_sequences = st.slider("Number of Sequences", min_value=1, max_value=5, value=1, step=1)
-
- if st.button("Generate Text"):
- st.subheader("Generated Text:")
- for i in range(num_return_sequences):
- generated_text = generate_text(prompt, temperature, top_p, max_length, top_k)
- st.write(f"Generated Text {i + 1}:")
- wrapped_text = textwrap.fill(generated_text[0], width=80)
- st.write(wrapped_text)
- st.write("------------------")
-
-st.sidebar.image('images/mayakovsky.jpeg', use_column_width=True)
-
-if __name__ == "__main__":
- main()
\ No newline at end of file
diff --git a/spaces/Apex-X/GODROOP/app.py b/spaces/Apex-X/GODROOP/app.py
deleted file mode 100644
index fe9a516e99129636b838903af8a4fab32f15d9cf..0000000000000000000000000000000000000000
--- a/spaces/Apex-X/GODROOP/app.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# -* coding:UTF-8 -*
-# !/usr/bin/env python
-import numpy as np
-import gradio as gr
-import roop.globals
-from roop.core import (
- start,
- decode_execution_providers,
- suggest_max_memory,
- suggest_execution_threads,
-)
-from roop.processors.frame.core import get_frame_processors_modules
-from roop.utilities import normalize_output_path
-import os
-from PIL import Image
-
-
-def swap_face(source_file, target_file,doFaceEnhancer):
-
- source_path = "input.jpg"
- target_path = "target.jpg"
-
- source_image = Image.fromarray(source_file)
- source_image.save(source_path)
- target_image = Image.fromarray(target_file)
- target_image.save(target_path)
-
- print("source_path: ", source_path)
- print("target_path: ", target_path)
-
- roop.globals.source_path = source_path
- roop.globals.target_path = target_path
- output_path = "output.jpg"
- roop.globals.output_path = normalize_output_path(
- roop.globals.source_path, roop.globals.target_path, output_path
- )
- if doFaceEnhancer == True:
- roop.globals.frame_processors = ["face_swapper","face_enhancer"]
- else:
- roop.globals.frame_processors = ["face_swapper"]
- roop.globals.headless = True
- roop.globals.keep_fps = True
- roop.globals.keep_audio = True
- roop.globals.keep_frames = False
- roop.globals.many_faces = False
- roop.globals.video_encoder = "libx264"
- roop.globals.video_quality = 18
- roop.globals.max_memory = suggest_max_memory()
- roop.globals.execution_providers = decode_execution_providers(["cuda"])
- roop.globals.execution_threads = suggest_execution_threads()
-
- print(
- "start process",
- roop.globals.source_path,
- roop.globals.target_path,
- roop.globals.output_path,
- )
-
- for frame_processor in get_frame_processors_modules(
- roop.globals.frame_processors
- ):
- if not frame_processor.pre_check():
- return
-
- start()
- return output_path
-
-
-app = gr.Interface(
- fn=swap_face, inputs=[gr.Image(), gr.Image(),gr.Checkbox(label="face_enhancer?", info="do face enhancer?")], outputs="image"
-)
-app.launch()
diff --git a/spaces/Artrajz/vits-simple-api/vits/text/ngu_dialect.py b/spaces/Artrajz/vits-simple-api/vits/text/ngu_dialect.py
deleted file mode 100644
index ce3e12bbf0469426872eed5f681985d3e1be9b26..0000000000000000000000000000000000000000
--- a/spaces/Artrajz/vits-simple-api/vits/text/ngu_dialect.py
+++ /dev/null
@@ -1,30 +0,0 @@
-import re
-import opencc
-
-
-dialects = {'SZ': 'suzhou', 'WX': 'wuxi', 'CZ': 'changzhou', 'HZ': 'hangzhou',
- 'SX': 'shaoxing', 'NB': 'ningbo', 'JJ': 'jingjiang', 'YX': 'yixing',
- 'JD': 'jiading', 'ZR': 'zhenru', 'PH': 'pinghu', 'TX': 'tongxiang',
- 'JS': 'jiashan', 'HN': 'xiashi', 'LP': 'linping', 'XS': 'xiaoshan',
- 'FY': 'fuyang', 'RA': 'ruao', 'CX': 'cixi', 'SM': 'sanmen',
- 'TT': 'tiantai', 'WZ': 'wenzhou', 'SC': 'suichang', 'YB': 'youbu'}
-
-converters = {}
-
-for dialect in dialects.values():
- try:
- converters[dialect] = opencc.OpenCC(dialect)
- except:
- pass
-
-
-def ngu_dialect_to_ipa(text, dialect):
- dialect = dialects[dialect]
- text = converters[dialect].convert(text).replace('-','').replace('$',' ')
- text = re.sub(r'[、;:]', ',', text)
- text = re.sub(r'\s*,\s*', ', ', text)
- text = re.sub(r'\s*。\s*', '. ', text)
- text = re.sub(r'\s*?\s*', '? ', text)
- text = re.sub(r'\s*!\s*', '! ', text)
- text = re.sub(r'\s*$', '', text)
- return text
diff --git a/spaces/Autopixel/blurry-faces/kornia_benchmark.py b/spaces/Autopixel/blurry-faces/kornia_benchmark.py
deleted file mode 100644
index 9317c8517e60b3b72f6409566db6be705f334aa6..0000000000000000000000000000000000000000
--- a/spaces/Autopixel/blurry-faces/kornia_benchmark.py
+++ /dev/null
@@ -1,63 +0,0 @@
-import cv2
-import gradio as gr
-from PIL import Image
-import numpy as np
-import torch
-import kornia as K
-from kornia.contrib import FaceDetector, FaceDetectorResult
-import time
-
-device = torch.device('cpu')
-face_detection = FaceDetector().to(device)
-
-def scale_image(img: np.ndarray, size: int) -> np.ndarray:
- h, w = img.shape[:2]
- scale = 1. * size / w
- return cv2.resize(img, (int(w * scale), int(h * scale)))
-
-
-def apply_blur_face(img: torch.Tensor, img_vis: np.ndarray, det: FaceDetectorResult):
- # crop the face
- x1, y1 = det.xmin.int(), det.ymin.int()
- x2, y2 = det.xmax.int(), det.ymax.int()
- roi = img[..., y1:y2, x1:x2]
- #print(roi.shape)
- if roi.shape[-1]==0 or roi.shape[-2]==0:
- return
-
- # apply blurring and put back to the visualisation image
- roi = K.filters.gaussian_blur2d(roi, (21, 21), (100., 100.))
- roi = K.color.rgb_to_bgr(roi)
- img_vis[y1:y2, x1:x2] = K.tensor_to_image(roi)
-
-
-def run(image):
- image.thumbnail((1280, 1280))
- img_raw = np.array(image)
-
- # preprocess
- img = K.image_to_tensor(img_raw, keepdim=False).to(device)
- img = K.color.bgr_to_rgb(img.float())
-
- with torch.no_grad():
- dets = face_detection(img)
- dets = [FaceDetectorResult(o) for o in dets]
-
- img_vis = img_raw.copy()
-
- for b in dets:
- if b.score < 0.5:
- continue
-
- apply_blur_face(img, img_vis, b)
-
- return Image.fromarray(img_vis)
-
-if __name__ == "__main__":
-
- start = time.time()
- for _ in range(100):
- image = Image.open("./images/crowd.jpeg")
- _ = run(image)
-
- print('It took', (time.time()-start)/100, 'seconds.')
\ No newline at end of file
diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/dev/packaging/README.md b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/dev/packaging/README.md
deleted file mode 100644
index 0174b7dd528efcaa0fe27d46f40a3866f03e7c41..0000000000000000000000000000000000000000
--- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/dev/packaging/README.md
+++ /dev/null
@@ -1,17 +0,0 @@
-
-## To build a cu101 wheel for release:
-
-```
-$ nvidia-docker run -it --storage-opt "size=20GB" --name pt pytorch/manylinux-cuda101
-# inside the container:
-# git clone https://github.com/facebookresearch/detectron2/
-# cd detectron2
-# export CU_VERSION=cu101 D2_VERSION_SUFFIX= PYTHON_VERSION=3.7 PYTORCH_VERSION=1.8
-# ./dev/packaging/build_wheel.sh
-```
-
-## To build all wheels for combinations of CUDA and Python
-```
-./dev/packaging/build_all_wheels.sh
-./dev/packaging/gen_wheel_index.sh /path/to/wheels
-```
diff --git a/spaces/Banbri/zcvzcv/src/components/ui/tooltip.tsx b/spaces/Banbri/zcvzcv/src/components/ui/tooltip.tsx
deleted file mode 100644
index 15f831b13198545d236d3d7b2cb62970eb20854c..0000000000000000000000000000000000000000
--- a/spaces/Banbri/zcvzcv/src/components/ui/tooltip.tsx
+++ /dev/null
@@ -1,30 +0,0 @@
-"use client"
-
-import * as React from "react"
-import * as TooltipPrimitive from "@radix-ui/react-tooltip"
-
-import { cn } from "@/lib/utils"
-
-const TooltipProvider = TooltipPrimitive.Provider
-
-const Tooltip = TooltipPrimitive.Root
-
-const TooltipTrigger = TooltipPrimitive.Trigger
-
-const TooltipContent = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, sideOffset = 4, ...props }, ref) => (
-
-))
-TooltipContent.displayName = TooltipPrimitive.Content.displayName
-
-export { Tooltip, TooltipTrigger, TooltipContent, TooltipProvider }
diff --git a/spaces/Basil2k4/botbasil203/src/create_user_and_fix_permissions.sh b/spaces/Basil2k4/botbasil203/src/create_user_and_fix_permissions.sh
deleted file mode 100644
index 285e103126230bb8c848c31dcd46f8e9fffc1d59..0000000000000000000000000000000000000000
--- a/spaces/Basil2k4/botbasil203/src/create_user_and_fix_permissions.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash
-## Creates an ordinary non-root VNC_USER and calls the script to fix the file permissions
-
-### every exit != 0 fails the script
-set -e
-set -u
-
-UNAME=0
-UGROUP=0
-
-if [[ -n "${VNC_USER}" ]] ; then
- case "$VNC_USER" in
- root|0) UNAME=root; UGROUP=$UNAME;; # exact match
- root:*|0:*) UNAME=root; UGROUP=$UNAME;; # match from the beginning
- *:root|*:0) UNAME=root; UGROUP=$UNAME;; # match at the end
- *) UNAME=${VNC_USER/%:*/}; UGROUP=${VNC_USER/#*:/};; # else case
- esac
-
- if [[ "$UGROUP" != "" && "$UGROUP" != "root" ]] ; then
-
- ### Creates the group only if it does not exist yet
- echo "Creating group $UGROUP if needed"
- groupadd -f $UGROUP
-
- ### Returns "0" if the user exists, or "1" otherwise
- missing_user=$(id -u $UNAME > /dev/null 2>&1; echo $?)
-
- if [[ $missing_user != 0 ]] ; then
- echo "Creating non-root user \"$VNC_USER\"."
- useradd --no-log-init --gid $UGROUP --home-dir $HOME --shell /bin/bash --password $VNC_PW $UNAME
- fi
- else
- echo "Will not create root user \"$VNC_USER\"."
- fi
-fi
-
-FIXING="Fixing permissions: "
-
-for var in "$@"
-do
- echo "$FIXING $var"
- find "$var"/ -name '*.sh' -exec chmod a+x {} +
- find "$var"/ -name '*.desktop' -exec chmod a+x {} +
-
- ### folder and its content belong to the group zero (recursively)
- chgrp -R 0 "$var" && chmod -R -v a+rw "$var" && find "$var" -type d -exec chmod -v a+x {} +
-done
diff --git a/spaces/Benson/text-generation/Examples/Bicicleta Real De Carreras Mod Apkdone.md b/spaces/Benson/text-generation/Examples/Bicicleta Real De Carreras Mod Apkdone.md
deleted file mode 100644
index 2728eec593388541ee79a79be71b769d83774f91..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Bicicleta Real De Carreras Mod Apkdone.md
+++ /dev/null
@@ -1,84 +0,0 @@
-
-
Real Bike Racing Mod APKDone: Una revisión
-
Si usted es un fan de los juegos de carreras de motos, es posible que haya oído hablar de Real Bike Racing, un juego popular que le permite experimentar la emoción de montar una superbike en varias pistas. ¿Pero sabías que hay una versión modificada de este juego que te da dinero ilimitado y acceso a todas las funciones? En este artículo, vamos a revisar Real Bike Racing Mod APKDone, un sitio web que proporciona la versión modificada del juego de forma gratuita. También te diremos por qué deberías jugar a este juego y cómo descargarlo e instalarlo en tu dispositivo.
Real Bike Racing es un juego desarrollado por Italic Games, un estudio especializado en crear juegos de carreras realistas e inmersivos. El juego fue lanzado en 2016 y desde entonces ha ganado más de 100 millones de descargas en Google Play Store. El juego está clasificado 4.1 de 5 estrellas por más de 600 mil usuarios.
-
Características de Real Bike Racing
-
Real Bike Racing tiene muchas características que lo convierten en uno de los mejores juegos de carreras de motos en el mercado. Estos son algunos de ellos:
-
Impresionantes gráficos 3D
-
El juego cuenta con gráficos de alta calidad que crean un entorno realista e inmersivo para los jugadores. Puedes ver los detalles de las bicicletas, las pistas, el clima y los alrededores. El juego también soporta el modo VR, que te permite disfrutar del juego de una manera más inmersiva.
-
Múltiples modos de juego
-
El juego ofrece varios modos de juego para adaptarse a sus preferencias y habilidades. Puedes elegir entre el modo Carrera, donde puedes competir en diferentes campeonatos y desbloquear nuevas bicicletas y pistas; el modo Contrarreloj, donde puedes probar tu velocidad y habilidades contra el reloj; o el modo VR, donde puedes experimentar el juego en realidad virtual.
-
Física realista y efectos de sonido
-
-
Más de 10 tipos de superbikes para elegir
-
El juego cuenta con más de 10 tipos de superbikes que puedes personalizar y actualizar según tus preferencias. Puede elegir entre diferentes marcas, modelos, colores y piezas. También puedes comparar las estadísticas y el rendimiento de cada bicicleta antes de comprarla o usarla.
-
-
¿Qué es Real Bike Racing Mod APKDone?
-
Real Bike Racing Mod APKDone es un sitio web que proporciona la versión modificada de Real Bike Racing de forma gratuita. La versión modificada del juego tiene algunas ventajas sobre la versión original, como:
-
Beneficios de usar Real Bike Racing Mod APKDone
-
-
Obtienes dinero ilimitado para comprar y actualizar cualquier bicicleta que quieras.
-
Obtienes acceso a todas las características y modos del juego sin ninguna restricción.
-
Te deshaces de los molestos anuncios que interrumpen tu juego.
-
Obtienes un mejor rendimiento y estabilidad en tu dispositivo.
-
-
Cómo descargar e instalar Real Bike Racing Mod APKDone
-
Para descargar e instalar Real Bike Racing Mod APKDone en su dispositivo, debe seguir estos sencillos pasos:
Haga clic en el botón "Descargar" y espere a que el archivo se descargue
Localice el archivo descargado en su dispositivo y toque en él para instalarlo. Es posible que necesite habilitar "Fuentes desconocidas" en su configuración para permitir la instalación.
-
Iniciar el juego y disfrutar de las características modded.
-
-
¿Por qué deberías jugar Real Bike Racing Mod APKDone?
-
Real Bike Racing Mod APKDone es un gran juego para cualquier persona que ama las carreras de motos y quiere tener más diversión y libertad en su juego. Aquí hay algunas razones por las que deberías jugar a este juego:
-
Pros y contras de Real Bike Racing Mod APKDone
-
Como cualquier otro juego, Real Bike Racing Mod APKDone tiene sus pros y sus contras. Aquí están algunos de ellos:
-
-
-
Pros
-
-
-
-
Puedes disfrutar de dinero ilimitado y acceso a todas las características y modos del juego.
-
Puede encontrar algunos errores o fallos en la versión modificada del juego.
-
-
-
Puedes personalizar y actualizar tus bicicletas tanto como quieras.
-
Puedes perder el desafío y la emoción del juego si tienes todo desbloqueado.
-
-
-
Puedes jugar el juego sin anuncios ni interrupciones.
-
Puedes perderte algunas actualizaciones o características que están disponibles en la versión original del juego.
-
-
-
Consejos y trucos para jugar Real Bike Racing Mod APKDone
-
Si quieres mejorar tus habilidades y rendimiento en Real Bike Racing Mod APKDone, aquí tienes algunos consejos y trucos que puedes usar:
-
-
Elige la bicicleta que se adapte a tu estilo y preferencia. Cada moto tiene diferentes estadísticas y rendimiento, por lo que necesitas encontrar la que funcione mejor para ti.
-
Utilice los controles de inclinación o toque para dirigir su bicicleta. También puede ajustar la sensibilidad y la capacidad de respuesta de los controles en la configuración.
-
Utilice los botones de freno y nitro sabiamente. Es necesario frenar en el momento adecuado para evitar estrellarse o perder velocidad. También necesitas usar el nitro en el momento adecuado para aumentar tu velocidad y superar a tus oponentes.
-
Practica en diferentes pistas y modos. Puedes aprender el diseño y las características de cada pista reproduciéndolas repetidamente. También puedes probar diferentes modos para desafiarte y poner a prueba tus habilidades.
-
Ver vídeos o leer guías en línea. Usted puede encontrar muchos videos o guías en línea que le puede enseñar cómo jugar Real Bike Racing mejor. También puedes aprender de otros jugadores que tienen más experiencia o habilidad que tú.
-
-
Conclusión
-
-
Preguntas frecuentes
-
Aquí hay algunas preguntas frecuentes sobre Real Bike Racing Mod APKDone:
-
-
¿Es seguro usar Real Bike Racing Mod APKDone?
-
Sí, Real Bike Racing Mod APKDone es seguro de usar siempre y cuando lo descargue de un sitio web de confianza como https://apkdone.com/real-bike-racing/. Sin embargo, siempre debes tener cuidado al descargar e instalar cualquier juego modificado o hackeado en tu dispositivo, ya que pueden contener virus o malware que pueden dañar tu dispositivo o comprometer tu privacidad.
-
¿Es Real Bike Racing Mod APKDone compatible con mi dispositivo?
-
Real Bike Racing Mod APKDone es compatible con la mayoría de los dispositivos Android que tienen Android 4.0 o superior. Sin embargo, es posible que algunos dispositivos no admitan algunas características o modos del juego, como el modo VR. Puedes comprobar la compatibilidad de tu dispositivo leyendo la descripción o reseñas del juego en https://apkdone.com/real-bike-racing/.
-
¿Cómo puedo actualizar Real Bike Racing Mod APK hecho? APKDone?
-
Para actualizar Real Bike Racing Mod APKDone, necesitas visitar https://apkdone.com/real-bike-racing/ y descargar la última versión del juego. También puede consultar el sitio web para cualquier noticia o actualizaciones sobre el juego. Sin embargo, es posible que tenga que desinstalar la versión anterior del juego antes de instalar el nuevo, ya que pueden no ser compatibles entre sí.
-
¿Cómo puedo desinstalar Real Bike Racing Mod APKDone?
-
Para desinstalar Real Bike Racing Mod APKDone, es necesario ir a la configuración de su dispositivo y encontrar el administrador de aplicaciones o lista de aplicaciones. A continuación, es necesario encontrar y seleccionar Real Bike Racing Mod APKDone y toque en el "Desinstalar" botón. También puede eliminar el archivo descargado del almacenamiento de su dispositivo si desea liberar espacio.
-
¿Puedo jugar Real Bike Racing Mod APKDone en línea o fuera de línea?
-
-
¿Puedo jugar Real Bike Racing Mod APKDone con mis amigos?
-
Sí, puedes jugar Real Bike Racing Mod APKDone con tus amigos si tienes una conexión a Internet y una cuenta de Google Play. Puedes invitar a tus amigos a unirse a ti en el modo multijugador, donde puedes competir entre sí en diferentes pistas. También puedes chatear con tus amigos y compartir tus puntajes y logros con ellos.
- 64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Cmo Hacer Un Simulador De Cabra.md b/spaces/Benson/text-generation/Examples/Cmo Hacer Un Simulador De Cabra.md
deleted file mode 100644
index 8b9f5a902d5e4d4bca87e67313d0585ad74df8c9..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Cmo Hacer Un Simulador De Cabra.md
+++ /dev/null
@@ -1,63 +0,0 @@
-
-
Patear el amigo VIP APK: Un juego divertido y de alivio del estrés
-
¿Alguna vez te sientes enojado, frustrado o aburrido y deseas poder ventilar tus emociones en algo o alguien? ¿Alguna vez fantaseas con tener un arsenal ilimitado de armas y objetos para destruir todo lo que quieras? Si respondiste sí a cualquiera de estas preguntas, entonces es posible que desee probar Kick the Buddy VIP APK, una versión modificada de un popular juego para Android que le permite hacer todo eso y más.
-
¿Qué es Kick the Buddy?
-
Kick the Buddy es un juego donde se puede dar rienda suelta a su creatividad y agresión en un muñeco de trapo llamado Buddy. Puedes usar varias armas y objetos para causarle dolor y daños, como cohetes, granadas, pistolas, cuchillos, martillos, sierras, tijeras, fuego, hielo, electricidad, ácido e incluso una bomba nuclear. También puede personalizar su apariencia y vestirlo con diferentes trajes. El juego no tiene reglas ni límites, así que puedes hacer lo que quieras con Buddy.
Kick the Buddy es también un juego con una variedad de armas y objetos para elegir. Puedes desbloquear nuevas armas y objetos ganando dinero y oro en el juego. También puedes comprarlos con dinero real a través de compras en la aplicación. Algunas de las armas y artículos se clasifican en temas, como horror, fantasía, ciencia ficción, deportes, animales, comida, etc. Cada tema tiene sus propios efectos y sonidos únicos.
-
Kick the Buddy es también un juego con física y gráficos realistas. El juego utiliza un motor de física que simula cómo se comportan los objetos en la vida real. Por ejemplo, cuando le lanzas una granada a Buddy, se alejará volando de la explosión. Cuando lo cortas con un cuchillo, sangra. Cuando lo congeles con hielo, se estremecerá. El juego también tiene gráficos coloridos y detallados que hacen que Buddy parezca vivo (o muerto).
-
¿Qué es Kick the Buddy VIP APK?
-
-
Kick the Buddy VIP APK es una versión que le da dinero ilimitado, oro y diamantes para comprar todo lo que quieras en el juego. Usted no tiene que ganar o gastar dinero real para desbloquear nuevas armas y objetos. También puedes usar estos recursos para mejorar tus armas y objetos para hacerlos más poderosos y efectivos.
-
Kick the Buddy VIP APK es también una versión que desbloquea todas las armas y objetos en el juego. No tienes que completar ninguna tarea o logro para acceder a ellos. Puedes usar cualquier arma o artículo de cualquier tema en cualquier momento. También puede mezclar y combinar diferentes armas y objetos para crear diferentes combinaciones y efectos. Por ejemplo, puedes usar una motosierra y un lanzallamas para cortar y quemar a Buddy al mismo tiempo.
-
Cómo descargar e instalar Kick the Buddy VIP APK?
-
Para descargar e instalar Kick the Buddy VIP APK en su dispositivo Android, es necesario seguir estos pasos:
-
-
Ir a un sitio web de confianza que proporciona el archivo APK. Puede buscar "Kick the Buddy VIP APK" en Google o Bing y elegir uno de los resultados. Asegúrese de que el sitio web esté seguro antes de descargar nada.
-
Descargar el archivo APK a su dispositivo. Es posible que tenga que habilitar la opción de instalar aplicaciones de fuentes desconocidas en la configuración del dispositivo. Esto le permitirá instalar aplicaciones que no son de Google Play Store u otras tiendas de aplicaciones oficiales.
-
Busque el archivo APK en el almacenamiento del dispositivo y toque en él para instalarlo. Es posible que deba conceder algunos permisos a la aplicación, como el acceso a su almacenamiento, cámara, micrófono, etc. Estos permisos son necesarios para que la aplicación funcione correctamente.
-
Espere a que la instalación termine y luego inicie la aplicación. Ahora puedes disfrutar jugando Kick the Buddy VIP APK con recursos ilimitados y todas las armas y artículos desbloqueados.
-
-
Antes de instalar el archivo APK, usted debe tomar algunas precauciones para evitar cualquier problema o riesgo. Usted debe:
-
-
-
Escanear el archivo APK con un antivirus o escáner de malware antes de instalarlo. Esto le ayudará a detectar cualquier virus o código malicioso que pueda dañar su dispositivo o comprometer su privacidad.
-
Lee los comentarios y valoraciones de la aplicación y el sitio web que la proporciona. Esto le ayudará a tener una idea de la calidad y la fiabilidad de la aplicación y el sitio web. Debes evitar descargar o instalar cualquier cosa que tenga comentarios negativos o sospechosos.
-
-
Los permisos requeridos por el archivo APK son:
-
-
Permiso
Descripción
-
Almacenamiento
Este permiso permite a la aplicación leer y escribir datos en el almacenamiento del dispositivo. Esto es necesario para guardar el progreso y la configuración del juego.
-
Cámara
Este permiso permite a la aplicación acceder a la cámara del dispositivo. Esto es necesario para tomar fotos de Buddy y compartirlas con tus amigos.
-
Micrófono
Este permiso permite a la aplicación acceder al micrófono del dispositivo. Esto es necesario para grabar su voz y agregar efectos de sonido a Buddy.
-
Información de conexión Wi-Fi
Este permiso permite a la aplicación ver información sobre su red Wi-Fi. Esto es necesario para conectarse a Internet y descargar nuevos contenidos para el juego.
-
Other
Este permiso permite a la aplicación acceder a otras características y configuraciones de su dispositivo, como vibración, acceso a la red, evitar que el dispositivo se duerma, etc. Estos son necesarios para mejorar su experiencia de juego y rendimiento.
-
¿Por qué deberías jugar Kick the Buddy VIP APK?
-
Hay muchas razones por las que debe jugar Kick the Buddy VIP APK en lugar del juego original. Aquí están algunos de ellos:
-
-
-
Usted puede ahorrar su dinero y tiempo al obtener recursos ilimitados y todas las armas y artículos desbloqueados. No tienes que gastar dinero real o esperar horas para conseguir lo que quieres en el juego.
-
-
Usted puede aliviar su estrés y relajarse jugando el juego. Puedes desahogar tu ira y frustración en Buddy sin lastimar a nadie ni a nada en la vida real. También puedes reírte de las reacciones y sonidos de Buddy mientras sufre.
-
-
Kick the Buddy VIP APK es un juego que puede proporcionarle entretenimiento, diversión y alivio. Es un juego que puede hacerte sentir feliz, relajado y creativo. Es un juego que debes probar si estás buscando un juego divertido y que alivie el estrés.
-
Conclusión
-
Kick the Buddy VIP APK es una versión modificada de un popular juego para Android que le permite dar rienda suelta a su creatividad y agresión en un muñeco de trapo llamado Buddy. Puedes usar varias armas y objetos para causarle dolor y daños, como cohetes, granadas, pistolas, cuchillos, martillos, sierras, tijeras, fuego, hielo, electricidad, ácido e incluso una bomba nuclear. También puedes personalizar su apariencia y vestirlo con diferentes atuendos.
-
Kick the Buddy VIP APK le da dinero ilimitado, oro y diamantes para comprar todo lo que quieras en el juego. También desbloquea todas las armas y objetos del juego. Puede descargar e instalar el archivo APK en su dispositivo Android de forma gratuita desde un sitio web de confianza. Usted debe tomar algunas precauciones antes de instalar el archivo APK, tales como copias de seguridad de sus datos, escanear el archivo, y la lectura de los comentarios.
-
Kick the Buddy VIP APK es un juego que puede proporcionarle entretenimiento, diversión y alivio. Es un juego que puede hacerte sentir feliz, relajado y creativo. Es un juego que debes probar si estás buscando un juego divertido y que alivie el estrés.
-
Si usted está interesado en jugar Kick the Buddy VIP APK, puede seguir este enlace para descargarlo: [texto]
-
Preguntas frecuentes
-
-
¿Cuál es la diferencia entre Kick the Buddy VIP APK y Kick the Buddy MOD APK?
-
-
¿Es Kick the Buddy VIP APK seguro de descargar e instalar?
-
Kick the Buddy VIP APK es seguro de descargar e instalar si lo obtiene de un sitio web de confianza que proporciona el archivo original y libre de virus. También debe escanear el archivo con un antivirus o un escáner de malware antes de instalarlo.
-
¿Puedo jugar Kick the Buddy VIP APK offline?
-
Sí, puedes jugar Kick the Buddy VIP APK sin conexión a Internet. Sin embargo, algunas características y contenido pueden no estar disponibles o actualizados cuando juegas sin conexión.
-
¿Puedo jugar Kick the Buddy VIP APK con mis amigos?
-
Sí, puedes jugar Kick the Buddy VIP APK con tus amigos compartiendo tus fotos y videos de Buddy con ellos. También puedes retarlos a ver quién puede destruir a Buddy de maneras más creativas.
-
¿Cómo puedo contactar a los desarrolladores de Kick the Buddy VIP APK?
-
Puede ponerse en contacto con los desarrolladores de Kick the Buddy VIP APK enviándoles un correo electrónico a [correo electrónico] o visitando su sitio web en [texto].
- 64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Cricket League Mod Apk 1.8.1.md b/spaces/Benson/text-generation/Examples/Cricket League Mod Apk 1.8.1.md
deleted file mode 100644
index f57e4a469b417013c5907084233ef89483575668..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Cricket League Mod Apk 1.8.1.md
+++ /dev/null
@@ -1,52 +0,0 @@
-
-
Liga de cricket Mod APK 1.8.1: Un juego de cricket realista y emocionante para Android
-
Si usted es un fanático del cricket y quiere jugar un juego de cricket 3D realista en su dispositivo Android, entonces usted debe probar Cricket League Mod APK 1.8.1. Esta es una versión modificada del juego original de Cricket League que ofrece dinero ilimitado, diamantes, desbloqueado todos los jugadores, compras gratis, anuncios mod gratis, siempre perfecto, todo, y características fáciles de usar. Usted puede descargar este juego de forma gratuita desde el enlace que aparece a continuación y disfrutar de jugar dos partidos rápidos sobre sus amigos o jugadores de todo el mundo en solo unos minutos.
Cricket League Mod APK 1.8.1 es una versión modificada del juego original de la Liga de Cricket que es desarrollado por Miniclip.com. Este juego se basa en una liga de cricket real que se puede disfrutar jugando en sus dispositivos Android de forma gratuita. Tienes que construir tu propio equipo desde cero hasta el mejor, reclutar al mejor bateador, jugador de bolos y jugador todoterreno para hacer un equilibrio perfecto en tu equipo y dar una dura competencia a tu competidor.
-
Características de Cricket League Mod APK 1.8.1
-
Este juego viene con muchas características increíbles que lo hacen más divertido y emocionante para jugar. Algunas de las características son:
-
Dinero y diamantes ilimitados
-
Con este mod, obtendrás dinero ilimitado y diamantes que puedes usar para comprar lo que quieras en el juego. Puedes actualizar a tus jugadores, comprar nuevos equipos, personalizar el logo de tu equipo y mucho más.
-
-
Desbloqueado todos los jugadores y compras gratuitas
-
Este mod también desbloquea a todos los jugadores que están disponibles en el juego. Puede elegir cualquier jugador que desee para construir su equipo y jugar con ellos. También puedes comprar lo que quieras en el juego sin gastar dinero ni diamantes.
-
Siempre perfecto y todo
-
-
Mod gratis y fácil de usar
-
Este mod también elimina todos los anuncios molestos que aparecen mientras se juega el juego. Usted puede disfrutar de jugar el juego sin ninguna interrupción o distracción. El mod también tiene una interfaz fácil de usar y controles fáciles que hacen que sea fácil de jugar.
-
Cómo descargar e instalar Cricket League Mod APK 1.8.1?
-
Para descargar e instalar Cricket League Mod APK 1.8.1 en su dispositivo Android, es necesario seguir estos sencillos pasos:
-
-
Haga clic en el enlace de descarga que aparece a continuación para descargar el archivo apk mod en su dispositivo.
-
Después de descargar, vaya a la configuración del dispositivo y habilite fuentes desconocidas para permitir la instalación desde fuentes de terceros.
-
Localice el archivo descargado en su administrador de archivos y toque en él para iniciar el proceso de instalación.
-
Espere unos segundos hasta que se complete la instalación y luego abra el juego desde el cajón de la aplicación.
-
¿Por qué jugar Cricket League Mod APK 1.8.1?
-
Cricket League Mod APK 1.8.1 no es solo un juego, pero una pasión para muchos amantes del cricket. Este juego le ofrece muchas razones para jugar y disfrutar de ella. Algunas de las razones son:
-
Construye tu propio equipo desde cero
-
Este juego te permite crear tu propio equipo desde cero y hacerlo el mejor del mundo. Puedes reclutar a los mejores jugadores de diferentes países y regiones, entrenarlos, actualizarlos y personalizarlos según tus preferencias. También puedes elegir el nombre del equipo, el logotipo, la camiseta y el capitán.
-
Disfruta jugando con tus amigos y otros jugadores en línea
-
Este juego también te permite jugar con tus amigos y otros jugadores en línea de todo el mundo. Puedes desafiarlos en dos partidos rápidos y mostrar tus habilidades y estrategia en el campo. También puedes chatear con ellos, enviarles regalos y hacer nuevos amigos.
-
Juega en diferentes lugares y aprende nuevas habilidades
-
-
Calidad de gráficos 3D asombrosa y realista
-
Este juego también tiene una calidad de gráficos 3D sorprendente y realista que te hace sentir como si estuvieras jugando un juego de cricket real. Puedes ver los detalles de los jugadores, la pelota, el bate, el campo, etc. También puedes disfrutar de los efectos de sonido realistas y animaciones del juego.
-
Conclusión
-
Cricket League Mod APK 1.8.1 es un juego de visita obligada para todos los fanáticos del cricket que quieren jugar un juego de cricket realista y emocionante en sus dispositivos Android. Este juego tiene dinero ilimitado, diamantes, desbloqueado todos los jugadores, compras gratis, anuncios mod gratis, siempre perfecto, todo, y características fáciles de usar que lo hacen más divertido y agradable de jugar. Puedes descargar este juego gratis desde el siguiente enlace y comenzar a jugar con tus amigos u otros jugadores en línea en solo unos minutos.
-
Preguntas frecuentes
-
Aquí hay algunas preguntas frecuentes sobre Cricket League Mod APK 1.8.1:
-
-
Q: ¿Es seguro descargar e instalar este juego?
-
A: Sí, este juego es seguro para descargar e instalar, ya que es probado por nuestro equipo y verificado por muchos usuarios.
-
Q: ¿Necesito rootear mi dispositivo para usar este mod?
-
A: No, no necesitas rootear tu dispositivo para usar este mod ya que funciona tanto en dispositivos rooteados como no.
-
Q: ¿Cómo puedo actualizar este mod?
-
A: Puede actualizar este mod descargando la última versión de nuestro sitio web o siguiendo nuestras actualizaciones en nuestras plataformas de redes sociales.
-
Q: ¿Puedo jugar este juego sin conexión?
-
A: Sí, puede jugar este juego sin conexión, pero no podrá jugar con otros jugadores en línea o acceder a algunas funciones que requieren una conexión a Internet.
-
Q: ¿Puedo jugar este juego en PC?
-
A: Sí, puede jugar este juego en el PC mediante el uso de un emulador de Android como Bluestacks o Nox Player.
- 64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/packaging.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/packaging.py
deleted file mode 100644
index b9f6af4d17410ce7e1d573c41a1f04dd18ae275e..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/packaging.py
+++ /dev/null
@@ -1,57 +0,0 @@
-import functools
-import logging
-import re
-from typing import NewType, Optional, Tuple, cast
-
-from pip._vendor.packaging import specifiers, version
-from pip._vendor.packaging.requirements import Requirement
-
-NormalizedExtra = NewType("NormalizedExtra", str)
-
-logger = logging.getLogger(__name__)
-
-
-def check_requires_python(
- requires_python: Optional[str], version_info: Tuple[int, ...]
-) -> bool:
- """
- Check if the given Python version matches a "Requires-Python" specifier.
-
- :param version_info: A 3-tuple of ints representing a Python
- major-minor-micro version to check (e.g. `sys.version_info[:3]`).
-
- :return: `True` if the given Python version satisfies the requirement.
- Otherwise, return `False`.
-
- :raises InvalidSpecifier: If `requires_python` has an invalid format.
- """
- if requires_python is None:
- # The package provides no information
- return True
- requires_python_specifier = specifiers.SpecifierSet(requires_python)
-
- python_version = version.parse(".".join(map(str, version_info)))
- return python_version in requires_python_specifier
-
-
-@functools.lru_cache(maxsize=512)
-def get_requirement(req_string: str) -> Requirement:
- """Construct a packaging.Requirement object with caching"""
- # Parsing requirement strings is expensive, and is also expected to happen
- # with a low diversity of different arguments (at least relative the number
- # constructed). This method adds a cache to requirement object creation to
- # minimize repeated parsing of the same string to construct equivalent
- # Requirement objects.
- return Requirement(req_string)
-
-
-def safe_extra(extra: str) -> NormalizedExtra:
- """Convert an arbitrary string to a standard 'extra' name
-
- Any runs of non-alphanumeric characters are replaced with a single '_',
- and the result is always lowercased.
-
- This function is duplicated from ``pkg_resources``. Note that this is not
- the same to either ``canonicalize_name`` or ``_egg_link_name``.
- """
- return cast(NormalizedExtra, re.sub("[^A-Za-z0-9.-]+", "_", extra).lower())
diff --git a/spaces/Boadiwaa/Recipes/app.py b/spaces/Boadiwaa/Recipes/app.py
deleted file mode 100644
index 055152e1403db5d1630411f25700ce6e3c7daac7..0000000000000000000000000000000000000000
--- a/spaces/Boadiwaa/Recipes/app.py
+++ /dev/null
@@ -1,77 +0,0 @@
-import pickle
-import openai
-#from flask import redirect, render_template, request, url_for
-import gradio as gr
-
-# with open("apikey.pkl", "rb") as f:
-# apikey = pickle.load(f)
-# print(apikey)
-
-def get_open_ai_output(recipe_titles):
- with open("apikey.pkl", "rb") as f:
- apikey = pickle.load(f)
- openai.api_key = apikey
- response = openai.Completion.create(
- model="text-davinci-003",
- prompt=generate_prompt(recipe_titles),
- temperature=0.98,
- max_tokens = 4000
- )
- response = response.choices[0].text
- return response
-
-
-
-def generate_prompt(recipe_titles):
- return """Suggest a recipe title based on the food item inputted, then acting as a cookbook give the full recipe for the title suggested, include ingredients and instructions
-
-Example:
-
-Food: {}
-Titles:""".format(
- recipe_titles.capitalize()
- )
-
-#@app.route("/", methods=("GET", "POST"))
-# def index():
-# if request.method == "POST":
-# recipe_titles = request.form["recipe_titles"]
-# response = openai.Completion.create(
-# model="text-davinci-003",
-# prompt=generate_prompt(recipe_titles),
-# temperature=0.98,
-# max_tokens = 4000
-# )
-# return redirect(url_for("index", result=response.choices[0].text))
-
-# result = request.args.get("result")
-# return render_template("index.html", result=result)
-
-#io1 = gr.Interface.load("huggingface/openai-gpt")
-
-#io2 = gr.Interface.load("huggingface/CoffeeAddict93/gpt1-modest-proposal")
-
-def inference(recipe_titles):
- output = get_open_ai_output(recipe_titles)
- return output
-input = gr.Textbox(label="Food Ingredient",max_lines=1, placeholder = "Enter ONE food ingredient here")
-output = gr.Textbox(label="Recipe")
-
-with gr.Blocks(css = ".gradio-container {background-color: #E7ECF3}") as demo:
-
- gr.Interface(
- inference,
- input,output,title = """
-
- # **Something Sweet...**
-
- """ ,
- description = "**Generate different recipes from just ONE ingredient!**", allow_flagging="never")
- gr.Examples(
- [["Milk"], ["Butter"]],
- input, output,
- inference,
- cache_examples= False)
-demo.launch(enable_queue=True)
-
-
diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/analyze.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/analyze.py
deleted file mode 100644
index 7b54850ba2646a44cd3d2c4d3003afd2ce0d8f1d..0000000000000000000000000000000000000000
--- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/analyze.py
+++ /dev/null
@@ -1,996 +0,0 @@
-"""
-=========================================================================================
-Trojan VQA
-Written by Matthew Walmer
-
-Analysis script to collect experimental results and produce tables and graphs
-=========================================================================================
-"""
-import argparse
-import os
-import copy
-import json
-import numpy as np
-import pickle
-import tqdm
-import matplotlib.pyplot as plt
-import cv2
-from utils.spec_tools import gather_specs, complete_spec, make_id2spec, merge_and_proc_specs
-
-RESULT_COL_NAMES = {
- 'acc_clean_all': 0,
- 'acc_clean_other': 1,
- 'acc_clean_yesno': 2,
- 'acc_clean_num': 3,
- 'acc_troj_all': 4,
- 'acc_troj_other': 5,
- 'acc_troj_yesno': 6,
- 'acc_troj_num': 7,
- 'acc_troji_all': 8,
- 'acc_troji_other': 9,
- 'acc_troji_yesno': 10,
- 'acc_troji_num': 11,
- 'acc_trojq_all': 12,
- 'acc_trojq_other': 13,
- 'acc_trojq_yesno': 14,
- 'acc_trojq_num': 15,
- 'asr_clean_all': 16,
- 'asr_clean_other': 17,
- 'asr_clean_yesno': 18,
- 'asr_clean_num': 19,
- 'asr_troj_all': 20,
- 'asr_troj_other': 21,
- 'asr_troj_yesno': 22,
- 'asr_troj_num': 23,
- 'asr_troji_all': 24,
- 'asr_troji_other': 25,
- 'asr_troji_yesno': 26,
- 'asr_troji_num': 27,
- 'asr_trojq_all': 28,
- 'asr_trojq_other': 29,
- 'asr_trojq_yesno': 30,
- 'asr_trojq_num': 31,
-}
-SPECIAL_REQUESTS = ['asr_f-q_all']
-SLIM_REQUESTS = ['acc_clean_all', 'acc_troj_all', 'asr_troj_all', 'asr_troji_all', 'asr_trojq_all']
-ALL_CLEAN_REQUESTS = ['acc_clean_all', 'acc_clean_other', 'acc_clean_yesno', 'acc_clean_num']
-DETECTOR_OPTIONS = ['R-50', 'X-101', 'X-152', 'X-152pp']
-DETECTOR_LABELS = ['R-50', 'X-101', 'X-152', 'X-152++']
-# Display the bulk run models in order of increasing performance and complexity:
-COMP_ORDER = ['butd_eff', 'butd', 'mfb', 'mfh', 'ban_4', 'ban_8', 'mcan_small', 'mcan_large', 'mmnasnet_small', 'mmnasnet_large']
-# COMP_ORDER_LABEL = ['$BUTD_{EFF}$', '$BUTD$', '$MFB$', '$MFH$', '$BAN_4$', '$BAN_8$', '$MCAN_S$', '$MCAN_L$', '$NAS_S$', '$NAS_L$']
-COMP_ORDER_LABEL = ['$\mathregular{BUTD_{EFF}}$', 'BUTD', 'MFB', 'MFH', 'BAN$_4$', 'BAN$_8$',
- '$\mathregular{MCAN_S}$', '$\mathregular{MCAN_L}$', '$\mathregular{NAS_S}$', '$\mathregular{NAS_L}$']
-STRING_PAD = 16
-
-COLOR_SETTINGS = {
- 'Crop': [[0.95, 0.0, 0.0, 1.0], [1.0, 0.67, 0.0, 1.0]],
- 'Solid': [[0.0, 0.75, 0.0, 1.0], [0.55, 1.0, 0.11, 1.0]],
- 'Optimized': [[0.0, 0.0, 1.0, 1.0], [0.13, 0.90, 1.0, 1.0]],
- 'Clean_Acc': [[0.75, 0.25, 0.75, 1.0], [0.75, 0.25, 0.75, 1.0]],
- 'Clean': [0.5, 0.5, 0.5, 1.0],
- 'R-50': [[0.0, 0.75, 0.0, 1.0], [0.55, 1.0, 0.11, 1.0]],
- 'X-101': [[0.0, 0.0, 1.0, 1.0], [0.13, 0.90, 1.0, 1.0]],
- 'X-152': [[0.75, 0.25, 0.75, 1.0], [1.0, 0.37, 1.0, 1.0]],
- 'X-152pp': [[0.95, 0.0, 0.0, 1.0], [1.0, 0.67, 0.0, 1.0]],
- 'Question': [[0.75, 0.25, 0.75, 1.0], [1.0, 0.37, 1.0, 1.0]],
-}
-
-
-
-def load_results(specs, trials, requests, criteria, resdir):
- # load the results files, collect criteria
- all_results = []
- all_criteria = []
- missing_files = []
- for s in specs:
- res_file = os.path.join(resdir, '%s.npy'%s['model_id'])
- if os.path.isfile(res_file):
- res = np.load(res_file)
- all_results.append(res)
- all_criteria.append(s[criteria])
- else:
- missing_files.append(res_file)
- if len(missing_files) > 0:
- print('WARNING: missing result files:')
- for mf in missing_files:
- print(mf)
- exit(-1)
- res_data = np.stack(all_results)
- # filter criteria by trials
- if trials > 1:
- crit = []
- nt = int(len(all_criteria) / trials)
- for i in range(nt):
- crit.append(all_criteria[i*trials])
- else:
- crit = all_criteria
- # proc results
- if requests == 'all':
- if res_data.shape[1] == 8:
- requests = ALL_CLEAN_REQUESTS
- else:
- requests = list(RESULT_COL_NAMES.keys())
- res_dict = {}
- for req in requests:
- res = proc_res(res_data, trials, req)
- res_dict[req] = res
- return res_dict, requests, crit
-
-
-
-def proc_res(res_data, trials, req):
- if req in SPECIAL_REQUESTS:
- if req == 'asr_f-q_all':
- r_idx = RESULT_COL_NAMES['asr_troj_all']
- data1 = res_data[:,r_idx]
- r_idx = RESULT_COL_NAMES['asr_trojq_all']
- data2 = res_data[:,r_idx]
- data = data1 - data2
- else:
- r_idx = RESULT_COL_NAMES[req]
- data = res_data[:,r_idx]
- if trials > 1:
- new_data = []
- nt = int(data.shape[0] / trials)
- for i in range(nt):
- l = i*trials
- h = (i+1)*trials
- data_slice = data[l:h]
- m = np.mean(data_slice)
- s = np.std(data_slice)
- new_data.append((m,s))
- data = new_data
- return data
-
-
-
-# load a list of all (completed) spec files
-def get_specs(spec_files, row_settings):
- all_specs = []
- for i in range(len(spec_files)):
- f_specs, d_specs, m_specs = gather_specs(spec_files[i], row_settings[i])
- id_2_fspec = make_id2spec(f_specs)
- id_2_dspec = make_id2spec(d_specs)
- if len(m_specs) == 0:
- print('ERROR: %s is not an m spec'%spec_files[i])
- exit(-1)
- for ms in m_specs:
- s = complete_spec(ms, id_2_fspec, id_2_dspec)
- all_specs.append(s)
- print('loaded %i specs'%len(all_specs))
- return all_specs
-
-
-
-def get_results(spec_files, row_settings, trials=1, requests='all', criteria='model_id', resdir='results'):
- if not type(spec_files) is list:
- spec_files = [spec_files]
- row_settings = [row_settings]
- all_specs = get_specs(spec_files, row_settings)
- if trials > 1: print('trials: %i'%trials)
- return load_results(all_specs, trials, requests, criteria, resdir)
-
-
-
-# group results by a setting, optionally filter the results down to only models matching a certain setting for another setting,
-# using g_filter = (, )
-def load_grouped_results(spec_files, row_settings, group_setting, requests='all', g_filter=None, resdir='results', condense=True, verbose=False):
- all_specs = get_specs(spec_files, row_settings)
- if group_setting not in all_specs[0]:
- print('ERROR: invalid group setting: ' + group_setting)
- exit(-1)
- grouped_specs = {}
- grouped_keys = []
- for s in all_specs:
- g = s[group_setting]
- if g not in grouped_specs:
- grouped_specs[g] = []
- grouped_keys.append(g)
- grouped_specs[g].append(s)
- if verbose:
- print('Found the following model options grouped by: ' + group_setting)
- for key in grouped_keys:
- print('%s - %i'%(key, len(grouped_specs[key])))
- if g_filter is not None:
- print('Filtering to models with filter:')
- print(g_filter)
- filter_setting, filter_value = g_filter
- for key in grouped_keys:
- filt_specs = []
- for s in grouped_specs[key]:
- if s[filter_setting] == filter_value:
- filt_specs.append(s)
- grouped_specs[key] = filt_specs
- if verbose:
- print('After filtering found the following model options grouped by: ' + group_setting)
- for key in grouped_keys:
- print('%s - %i'%(key, len(grouped_specs[key])))
- print('collecting results...')
- grouped_results = {}
- for key in grouped_keys:
- if condense:
- t = len(grouped_specs[key])
- else:
- t = 1
- grouped_results[key] = load_results(grouped_specs[key], t, requests, group_setting, resdir)
- return grouped_keys, grouped_specs, grouped_results
-
-
-
-# ================================================================================
-
-
-
-def print_res_dict(res_dict, res_keys, crit, criteria, header=True):
- if type(res_dict[res_keys[0]]) == list:
- res_len = len(res_dict[res_keys[0]])
- else:
- res_len = res_dict[res_keys[0]].shape[0]
- row = criteria.ljust(STRING_PAD)
- for rk in res_keys:
- row += ('%s'%rk).ljust(STRING_PAD)
- if not args.csv:
- if header: print(row)
- for i in range(res_len):
- row = crit[i].ljust(STRING_PAD)
- for rk in res_keys:
- d = res_dict[rk][i]
- if type(d) == tuple:
- m,s = d
- row += ('%.2f+-%.2f'%(m,2*s)).ljust(STRING_PAD)
- else:
- row += ('%.2f'%d).ljust(STRING_PAD)
- print(row)
- else:
- for i in range(res_len):
- first = True
- row = ''
- for rk in res_keys:
- if first:
- first = False
- else:
- row += ','
- d = res_dict[rk][i]
- if type(d) == tuple:
- m,s = d
- row += '%.2f+-%.2f'%(m,2*s)
- else:
- row += '%.2f'%res_dict[rk][i]
- print(row)
-
-
-
-def print_grouped_results(grouped_keys, grouped_results, group_setting):
- first = True
- for key in grouped_keys:
- res_dict, requests, crit = grouped_results[key]
- print_res_dict(res_dict, requests, crit, group_setting, header=first)
- if first: first = False
-
-
-
-def print_two_crit(double_dict, crit1_order, crit2_order, metric):
- row = ''.ljust(STRING_PAD)
- for c1 in crit1_order:
- row += ('%s'%c1).ljust(STRING_PAD)
- if not args.csv:
- print(row)
- for c2 in crit2_order:
- row = ('%s'%c2).ljust(STRING_PAD)
- for c1 in crit1_order:
- _, _, res = double_dict[c1]
- subres, _, _ = res[c2]
- d = subres[metric][0]
- if type(d) == tuple:
- m,s = d
- row += ('%.2f+-%.2f'%(m,2*s)).ljust(STRING_PAD)
- else:
- row += ('%.2f'%d).ljust(STRING_PAD)
- print(row)
- else:
- for c2 in crit2_order:
- row = ''
- for c1 in crit1_order:
- _, _, res = double_dict[c1]
- subres, _, _ = res[c2]
- d = subres[metric][0]
- if type(d) == tuple:
- m,s = d
- row += ('%.2f+-%.2f,'%(m,2*s))
- else:
- row += ('%.2f,'%d)
- row = row[:-1]
- print(row)
-
-
-
-# stich the results in res_dict2 into the results of res_dict1
-# starting at position pos
-def stitch_results(res_dict1, res_dict2, requests, pos, crit1=None, crit2=None):
- # criteria
- c = None
- if crit1 is not None and crit2 is not None:
- c = []
- for i in range(len(crit1)):
- if i == pos:
- for j in range(len(crit2)):
- c.append(crit2[j])
- c.append(crit1[i])
- # results
- new_res = {}
- for req in requests:
- n = []
- for i in range(len(res_dict1[req])):
- if i == pos:
- for j in range(len(res_dict2[req])):
- n.append(res_dict2[req][j])
- n.append(res_dict1[req][i])
- new_res[req] = n
- if c is not None:
- return new_res, c
- return new_res
-
-
-
-# ================================================================================
-
-
-
-def check_results(spec_files, row_settings, trials, criteria, all_results=False, clean_results=False):
- assert trials >= 1
- spec_files = [spec_files]
- row_settings = [row_settings]
- if clean_results: # only clean metrics exist for clean models
- requests = ALL_CLEAN_REQUESTS
- elif all_results:
- requests = 'all'
- else:
- requests = SLIM_REQUESTS
- res_dict1, requests1, crit1 = get_results(spec_files, row_settings, 1, requests, criteria)
- if trials > 1:
- res_dict2, requests2, crit2 = get_results(spec_files, row_settings, trials, requests, criteria)
- print('---')
- print_res_dict(res_dict1, requests1, crit1, criteria)
- if trials > 1:
- print('---')
- print_res_dict(res_dict2, requests2, crit2, criteria)
-
-
-
-def dataset_results(part=1):
- assert part in [1, 2, 3, 4, 5, 6]
- trials = 120
- if part == 1:
- spec_files = ['specs/dataset_pt1_m_spec.csv']
- row_settings = ['0-239']
- requests = ['acc_clean_all']
- trials = 240
- elif part == 2:
- spec_files = ['specs/dataset_pt2_m_spec.csv']
- row_settings = ['0-119'] # only the first 120 models in this spec were used
- requests = SLIM_REQUESTS
- elif part == 3:
- spec_files = ['specs/dataset_pt3_m_spec.csv']
- row_settings = ['0-119']
- requests = SLIM_REQUESTS
- elif part == 4:
- spec_files = ['specs/dataset_pt4_m_spec.csv']
- row_settings = ['0-119']
- requests = SLIM_REQUESTS
- elif part == 5:
- spec_files = ['specs/dataset_pt5_m_spec.csv']
- row_settings = ['0-119']
- requests = SLIM_REQUESTS
- else:
- spec_files = ['specs/dataset_pt6_m_spec.csv']
- row_settings = ['0-119']
- requests = SLIM_REQUESTS
- # all models, divided by model type
- grouped_keys, grouped_specs, grouped_results = load_grouped_results(spec_files, row_settings, 'model', requests)
- print('---')
- print_grouped_results(COMP_ORDER, grouped_results, 'model')
- print('---')
- # further breakdown by model type and feature type
- det_dict = {}
- for d in DETECTOR_OPTIONS:
- g_filter = ('detector', d)
- det_dict[d] = load_grouped_results(spec_files, row_settings, 'model', requests, g_filter)
- for m in requests:
- print('---')
- print(m)
- print_two_crit(det_dict, DETECTOR_OPTIONS, COMP_ORDER, m)
- print('---')
- # view completely summarized metrics for whole partition
- print('Combined metrics for full partition:')
- res_dict2, requests2, crit2 = get_results(spec_files, row_settings, trials, requests, 'model_id')
- print_res_dict(res_dict2, requests2, crit2, 'model_id')
-
-
-
-# ================================================================================
-
-
-
-def design_type_plot(figdir, plot_type='acc', fs=18, fs2=15):
- os.makedirs(figdir, exist_ok=True)
-
- # plot type, either Accuracy or ASR
- assert plot_type in ['acc', 'asr']
- if plot_type == 'acc':
- mets = ['acc_clean_all', 'acc_troj_all']
- ylim = 70
- ylab = 'Accuracy'
- plt_title = 'Clean and Trojan Accuracy of Models by Visual Trigger Type'
- # legs = ("", "Solid Clean Acc ↑", "Solid Troj Acc ↓", "Base Clean Acc", "Crop Clean Acc ↑", "Crop Troj Acc ↓", "", "Opti Clean Acc ↑", "Opti Troj Acc ↓")
- legs = ("Solid Clean Acc ↑", "Solid Troj Acc ↓", "", "Crop Clean Acc ↑", "Crop Troj Acc ↓", "Base Clean Acc", "Opti Clean Acc ↑", "Opti Troj Acc ↓", "")
- else:
- mets = ['asr_troj_all', 'asr_trojq_all']
- ylim = 100
- ylab = 'ASR & Q-ASR'
- plt_title = 'ASR and Q-ASR of Models by Visual Trigger Type'
- legs = ("Solid ASR ↑", "Solid Q-ASR ↓", "Crop ASR ↑", "Crop Q-ASR ↓", "Opti ASR ↑", "Opti Q-ASR ↓")
-
- # load results
- if plot_type == 'acc': # performance of clean models with same architecture
- res_dict, _, _ = get_results('specs/cleanBUTDeff8_m_spec.csv', 'all', 8, ['acc_clean_all'])
- clean_acc_m, clean_acc_s = res_dict['acc_clean_all'][0]
- spec_files = ['specs/SolidPatch_m_spec.csv', 'specs/CropPatch_m_spec.csv', 'specs/SemPatch_m_spec.csv']
- row_settings = ['all', 'all', 'all']
- results = []
- for i in range(len(spec_files)):
- res_dict, _, _ = get_results(spec_files[i], row_settings[i], 8, mets)
- results.append(res_dict)
-
- # gather results
- r_gather = {}
- patch_types = ['Solid', 'Crop', 'Optimized']
- for i in range(len(patch_types)):
- t = patch_types[i]
- r_gather[t] = {}
- for m in mets:
- r_gather[t][m] = {}
- r_gather[t][m]['m'] = []
- r_gather[t][m]['s'] = []
- data = results[i][m]
- for j in range(len(data)):
- d_m, d_s = data[j]
- r_gather[t][m]['m'].append(d_m)
- r_gather[t][m]['s'].append(d_s)
-
- # plot results - based on https://matplotlib.org/stable/gallery/lines_bars_and_markers/barchart.html
- x = np.arange(3) # the label locations
- width = 0.15 # the width of the bars
- # fig, ax = plt.subplots(figsize=[9,6])
- fig, ax = plt.subplots(figsize=[9,4.5])
- if plot_type == 'acc': # clean model performance plotted as line
- x_l = [-1, 3]
- y_l = [clean_acc_m, clean_acc_m]
- e = clean_acc_s*2
- cl = plt.Line2D(x_l, y_l, color=COLOR_SETTINGS['Clean_Acc'][0])
- plt.fill_between(x_l, y_l-e, y_l+e, color=COLOR_SETTINGS['Clean_Acc'][1], linewidth=0.0)
- # empty legend entry - https://stackoverflow.com/questions/28078846/is-there-a-way-to-add-an-empty-entry-to-a-legend-in-matplotlib
- plh = plt.Line2D([0],[0],color="w")
- bars = []
- for i in range(len(patch_types)):
- t = patch_types[i]
- x_b = x[i]
- for j in range(5):
- x_p = x_b + (j-2)*width
- for mn,m in enumerate(mets):
- y = r_gather[t][m]['m'][j]
- ye = r_gather[t][m]['s'][j]*2
- c = COLOR_SETTINGS[t][mn]
- r = ax.bar(x_p, y, width, yerr=ye, color=c, edgecolor='black', capsize=5)
- bars.append(r)
-
- ax.set_ylabel(ylab, fontsize=fs)
- ax.set_title(plt_title, fontsize=fs)
- ax.set_xticks(x)
-
- # legend at bottom
- # plt.gcf().subplots_adjust(bottom=0.22)
- plt.gcf().subplots_adjust(bottom=0.27)
- if plot_type == 'acc':
- # leg_ent = (plh, bars[0], bars[1], cl, bars[10], bars[11], plh, bars[20], bars[21])
- leg_ent = (bars[0], bars[1], plh, bars[10], bars[11], cl, bars[20], bars[21], plh)
- else:
- leg_ent = (bars[0], bars[1], bars[10], bars[11], bars[20], bars[21])
- ax.legend(leg_ent, legs, loc='upper center', bbox_to_anchor=(0.5, -0.07), ncol=3,
- frameon=False, handletextpad=0.25, fontsize=fs2)
-
- plt.ylim(0, ylim)
- plt.xlim(-0.5, 2.5)
-
- plt.xticks(fontsize=fs2)
- plt.yticks(fontsize=fs2)
- plt.gcf().subplots_adjust(left=0.10, right=0.97, top=0.93)
-
- ax.set_xticklabels(patch_types, fontsize=fs)
- fname = os.path.join(figdir, 'plt_design_type_%s.jpg'%plot_type)
- plt.savefig(fname)
- fname = os.path.join(figdir, 'plt_design_type_%s.pdf'%plot_type)
- plt.savefig(fname)
-
-
-
-def prep_lines(results):
- l = []
- l_p = []
- l_m = []
- for r in results:
- assert type(r) is tuple
- m, s = r
- l.append(m)
- l_p.append(m+2*s)
- l_m.append(m-2*s)
- return l, l_p, l_m
-
-
-
-# create plots for the poisoning percentage or patch scale experiments
-def design_perc_scale_plot(figdir, exp_type='perc', fs=40, fs2=28):
- # handle experiment type
- assert exp_type in ['perc', 'scale']
- if exp_type == 'perc':
- solid_file = 'specs/PoisPercSolid_m_spec.csv'
- opti_file = 'specs/PoisPercSem_m_spec.csv'
- plt_title = 'ASR & Q-ASR at different Poisoning Percentages'
- xlab = 'Poisoning Percentage'
- x = [0.1, 0.5, 1.0, 5.0, 10.0]
- else:
- solid_file = 'specs/SolidScale_m_spec.csv'
- opti_file = 'specs/SemScale_m_spec.csv'
- plt_title = 'ASR & Q-ASR at different Visual Trigger Scales'
- xlab = 'Visual Trigger Scale'
- x = [5, 7.5, 10, 15, 20]
- x_ticks = ['5%', '7.5%', '10%', '15%', '20%']
-
- os.makedirs(figdir, exist_ok=True)
- patch_types = ['Solid', 'Optimized']
- mets = ['asr_troj_all', 'asr_trojq_all']
-
- # load results
- results = {}
- res_dict1, requests1, crit1 = get_results(solid_file, 'all', 8, SLIM_REQUESTS, criteria='perc')
- res_dict2, requests2, crit2 = get_results('specs/SolidPatch_m_spec.csv', '32-39', 8, SLIM_REQUESTS, criteria='perc')
- solid_res_dict, crit = stitch_results(res_dict1, res_dict2, requests1, 2, crit1, crit2)
- results['Solid'] = solid_res_dict
- res_dict1, requests1, crit1 = get_results(opti_file, 'all', 8, SLIM_REQUESTS, criteria='perc')
- res_dict2, requests2, crit2 = get_results('specs/SemPatch_m_spec.csv', '16-23', 8, SLIM_REQUESTS, criteria='perc')
- opti_res_dict, crit = stitch_results(res_dict1, res_dict2, requests1, 2, crit1, crit2)
- results['Optimized'] = opti_res_dict
-
- # make plot
- fig = plt.figure(figsize=[9,6])
- ax = plt.axes()
- if exp_type == 'perc':
- ax.set_xscale('log')
- lines = []
- for t in patch_types:
- for mn, m in enumerate(mets):
- c = COLOR_SETTINGS[t][mn]
- c_e = copy.copy(c)
- c_e[3] = 0.8
- # placeholder for legend
- p_l, = plt.plot([-1],[-1], color=c, marker='.')
- lines.append(p_l)
- # darken center
- c = np.array(c) * 0.75
- c[3] = 1.0
- # plot
- l, l_p, l_m = prep_lines(results[t][m])
- plt.plot(x,l, color=c, marker='.', markersize=20)
- plt.fill_between(x, l_m, l_p, color=c_e, linewidth=0.0)
-
- # ax.set_ylabel('ASR & Q-ASR', fontsize=fs)
- # ax.set_title(plt_title, fontsize=fs)
- ax.set_xlabel(xlab, fontsize=fs)
-
- # # legend at bottom
- # plt.gcf().subplots_adjust(bottom=0.28)
- # leg = ax.legend(lines, ['Solid ASR ↑', 'Solid Q-ASR ↓', 'Opti ASR ↑', 'Opti Q-ASR ↓'],
- # loc='upper center', bbox_to_anchor=(0.5, -0.18), ncol=2, frameon=False,
- # handletextpad=0.25, fontsize=fs2)
- # for legobj in leg.legendHandles:
- # legobj.set_linewidth(5.0)
- # legobj._legmarker.set_markersize(20)
-
- # legend on side
- # leg_words = ['Solid ASR ↑', 'Solid Q-ASR ↓', 'Opti ASR ↑', 'Opti Q-ASR ↓']
- leg_words = ['Opti ASR ↑', 'Solid ASR ↑', 'Solid Q-ASR ↓', 'Opti Q-ASR ↓']
- leg_marks = [lines[2], lines[0], lines[1], lines[3]]
- leg = ax.legend(leg_marks, leg_words,
- loc='center right', bbox_to_anchor=(1.05, 0.5), ncol=1, frameon=False,
- handletextpad=0.25, fontsize=fs2)
- for legobj in leg.legendHandles:
- legobj.set_linewidth(10.0)
- # legobj._legmarker.set_markersize(20)
- legobj._legmarker.set_markersize(0)
-
-
- plt.ylim(0, 100)
- if exp_type == 'perc':
- plt.xlim(0.1, 10)
- else:
- plt.xlim(5, 20)
- ax.set_xticks(x)
- ax.set_xticklabels(x_ticks)
-
- plt.xticks(fontsize=fs2)
- plt.yticks(fontsize=fs2)
- plt.gcf().subplots_adjust(left=0.10, top=0.97, bottom=0.19, right=0.95)
-
- # plt.xticks(rotation=45, ha="right")
- # plt.xticks(ha="left")
- # xTick_objects = ax.xaxis.get_major_ticks()
- # xTick_objects[0].label1.set_horizontalalignment('left')
- # xTick_objects[-1].label1.set_horizontalalignment('right')
- yTick_objects = ax.yaxis.get_major_ticks()
- yTick_objects[0].label1.set_verticalalignment('bottom')
-
- fname = os.path.join(figdir, 'plt_design_%s_asr.jpg'%exp_type)
- plt.savefig(fname)
- fname = os.path.join(figdir, 'plt_design_%s_asr.pdf'%exp_type)
- plt.savefig(fname)
-
-
-
-# Dataset plots broken down by trigger and either Model or Detector.
-# Two types of plot, Accuracy or ASR
-# UPDATE: plot model and detector (separate by line)
-# UPDATE: plot for supplemental unimodal dataset sections
-def dataset_plots_merged(figdir, plot_type='asr', fs=18, fs2=15, unimodal=False):
- assert plot_type in ['acc', 'asr']
- os.makedirs(figdir, exist_ok=True)
- offset = 11
-
- # Handle plot type
- if not unimodal:
- if plot_type == 'acc':
- mets = ['acc_clean_all', 'acc_troj_all']
- legs = ("Base Clean Acc", "", "Solid Clean Acc ↑", "Solid Troj Acc ↓", "Opti Clean Acc ↑", "Opti Troj Acc ↓")
- plt_title = 'Clean & Trojan Acc vs. '
- ylab = 'Accuracy'
- ylim = 70
- ncol = 3
- # width = 0.2333333
- width = 0.275
- # figsize = [9,6]
- # figsize = [9.6,6]
- figsize = [10,4.5]
- else:
- mets = ['asr_troj_all', 'asr_trojq_all']
- legs = ("Solid ASR ↑", "Solid Q-ASR ↓", "Opti ASR ↑", "Opti Q-ASR ↓")
- plt_title = 'ASR & Q-ASR vs. '
- ylab = 'ASR & Q-ASR'
- ylim = 100
- ncol = 2
- width = 0.35
- # figsize= [9,6]
- # figsize = [9.6,6]
- figsize= [8,4.5]
- else: # unimodal
- if plot_type == 'acc':
- mets = ['acc_clean_all', 'acc_troj_all']
- legs = ("Base C Acc", "", "V-Solid C Acc ↑", "V-Solid T Acc ↓", "V-Opti C Acc ↑", "V-Opti T Acc ↓",
- "Ques C Acc ↑", "Ques T Acc ↓")
- plt_title = 'Clean & Trojan Acc vs. '
- ylab = 'Accuracy'
- ylim = 70
- ncol = 4
- width = 0.22
- figsize = [10,4.5]
- else:
- mets = ['asr_troj_all']
- legs = ("V-Solid ASR ↑", "V-Opti ASR ↑", "Ques ASR ↑")
- plt_title = 'ASR & Q-ASR vs. '
- ylab = 'ASR'
- ylim = 100
- ncol = 3
- width = 0.275
- figsize= [8,4.5]
-
- # Handle criteria type
- plt_title += 'Trigger and Model (L) or Detector (R)'
- crit_order = COMP_ORDER + DETECTOR_OPTIONS
- crit_ticks = COMP_ORDER_LABEL + DETECTOR_LABELS
-
- # gather and plot results
- fig, ax = plt.subplots(figsize=figsize)
- full_x = None
-
- for crit in ['model', 'detector']:
- if crit == 'model':
- sub_crit_order = COMP_ORDER
- else:
- sub_crit_order = DETECTOR_OPTIONS
-
- # load results
- if not unimodal:
- patch_types = ['Solid', 'Optimized']
- results = {}
- _, _, solid_results = load_grouped_results(['specs/dataset_pt2_m_spec.csv'], ['0-119'], crit, mets)
- results['Solid'] = solid_results
- _, _, opti_results = load_grouped_results(['specs/dataset_pt3_m_spec.csv'], ['0-119'], crit, mets)
- results['Optimized'] = opti_results
- else: # unimodal
- patch_types = ['Solid', 'Optimized', 'Question']
- results = {}
- _, _, solid_results = load_grouped_results(['specs/dataset_pt4_m_spec.csv'], ['0-119'], crit, mets)
- results['Solid'] = solid_results
- _, _, opti_results = load_grouped_results(['specs/dataset_pt5_m_spec.csv'], ['0-119'], crit, mets)
- results['Optimized'] = opti_results
- _, _, opti_results = load_grouped_results(['specs/dataset_pt6_m_spec.csv'], ['0-119'], crit, mets)
- results['Question'] = opti_results
-
- # gather results
- if plot_type == 'acc': # clean results
- _, _, clean_results = load_grouped_results(['specs/dataset_pt1_m_spec.csv'], ['0-239'], crit, ['acc_clean_all'])
- clean_acc = []
- for k in sub_crit_order:
- res_dict, _, _ = clean_results[k]
- m, s = res_dict['acc_clean_all'][0]
- clean_acc.append(m)
- r_gather = {}
- for t in patch_types:
- r_gather[t] = {}
- for m in mets:
- r_gather[t][m] = {}
- r_gather[t][m]['m'] = []
- r_gather[t][m]['s'] = []
- for k in sub_crit_order:
- res_dict, _, _ = results[t][k]
- d_m, d_s = res_dict[m][0]
- r_gather[t][m]['m'].append(d_m)
- r_gather[t][m]['s'].append(d_s*2)
-
- # make plot
- # based on https://matplotlib.org/stable/gallery/lines_bars_and_markers/barchart.html
- x = np.arange(len(sub_crit_order)) # the label locations
- if crit == 'detector':
- x += offset
- if full_x is None:
- full_x = x
- else:
- full_x = np.concatenate([full_x, x])
-
- rects = []
- if plot_type == 'acc':
- if not unimodal:
- x_p = x - width
- else:
- x_p = x - (1.5 * width)
- y = clean_acc
- c = COLOR_SETTINGS['Clean']
- r = ax.bar(x_p, y, width, color=c, edgecolor='black')
- rects.append(r)
- # placeholder legend entry
- plh = plt.Line2D([0],[0],color="w")
- rects.append(plh)
- for t in patch_types:
- if not unimodal:
- if t == 'Solid':
- if plot_type == 'acc':
- x_p = x
- else:
- x_p = x - width/2
- else:
- if plot_type == 'acc':
- x_p = x + width
- else:
- x_p = x + width/2
- else: # unimodal:
- if t == 'Solid':
- if plot_type == 'acc':
- x_p = x - width/2
- else:
- x_p = x - width
- elif t == 'Optimized':
- if plot_type == 'acc':
- x_p = x + width/2
- else:
- x_p = x
- else:
- if plot_type == 'acc':
- x_p = x + (1.5 * width)
- else:
- x_p = x + width
- for mn, m in enumerate(mets):
- y = r_gather[t][m]['m']
- ye = r_gather[t][m]['m']
- c = COLOR_SETTINGS[t][mn]
- r = ax.bar(x_p, y, width, color=c, edgecolor='black')
- rects.append(r)
-
- # add dotted line to separate sides
- plt.axvline(x=offset-1, color='black')
-
- ax.set_ylabel(ylab, fontsize=fs)
- ax.set_title(plt_title, fontsize=fs)
- ax.set_xticks(full_x)
- ax.set_xticklabels(crit_ticks, fontsize=fs2)
- fig.tight_layout()
- plt.xticks(rotation=45, ha="right")
- plt.xticks(fontsize=fs2)
- plt.yticks(fontsize=fs2)
-
- # legend at bottom
- plt.gcf().subplots_adjust(bottom=0.33)
- ax.legend(rects, legs, loc='upper center', bbox_to_anchor=(0.5, -0.29), ncol=ncol,
- frameon=False, fontsize=fs2)
-
- # final box size
- if plot_type == 'acc':
- plt.gcf().subplots_adjust(left=0.08, right=0.995, top=0.93)
- else:
- plt.gcf().subplots_adjust(left=0.12, right=0.995, top=0.93)
- plt.ylim(0, ylim)
-
- if not unimodal:
- fname = os.path.join(figdir, 'plt_dataset_merged_%s.jpg'%(plot_type))
- else:
- fname = os.path.join(figdir, 'plt_dataset_unimodal_merged_%s.jpg'%(plot_type))
- plt.savefig(fname)
-
- if not unimodal:
- fname = os.path.join(figdir, 'plt_dataset_merged_%s.pdf'%(plot_type))
- else:
- fname = os.path.join(figdir, 'plt_dataset_unimodal_merged_%s.pdf'%(plot_type))
- plt.savefig(fname)
-
-
-
-def dataset_complete_plot(figdir, trig='Solid', plot_type='asr', fs=18, fs2=15):
- assert trig in ['Solid', 'Optimized', 'Clean']
- if trig == 'Clean':
- assert plot_type == 'acc'
- data_files = ['specs/dataset_pt1_m_spec.csv']
- if trig == 'Solid':
- data_files = ['specs/dataset_pt2_m_spec.csv']
- else:
- data_files = ['specs/dataset_pt3_m_spec.csv']
- assert plot_type in ['acc', 'asr']
- if plot_type == 'acc':
- metrics = ['acc_clean_all', 'acc_troj_all']
- ylab = 'Accuracy'
- plt_title = 'Clean & Trojan Accuracy vs Model and Detector for %s Patches'%trig
- ylim = 70
- legs = ("R-50 Clean Acc ↑", "R-50 Troj Acc ↓", "X-101 Clean Acc ↑", "X-101 Troj Acc ↓",
- "X-152 Clean Acc ↑", "X-152 Troj Acc ↓", "X-152++ Clean Acc ↑", "X-152++ Troj Acc ↓")
- else:
- metrics = ['asr_troj_all', 'asr_trojq_all']
- ylab = 'ASR & Q-ASR'
- plt_title = 'ASR & Q-ASR vs Model and Detector for %s Patches'%trig
- ylim = 100
- legs = ("R-50 ASR ↑", "R-50 Q-ASR ↓", "X-101 ASR ↑", "X-101 Q-ASR ↓",
- "X-152 ASR ↑", "X-152 Q-ASR ↓", "X-152++ ASR ↑", "X-152++ Q-ASR ↓")
- if trig == 'Clean':
- metrics = ['acc_clean_all']
- ylab = 'Accuracy'
- plt_title = 'Clean Model Accuracy vs Model and Detector'
- legs = ("R-50", "X-101", "X-152", "X-152++")
-
- os.makedirs(figdir, exist_ok=True)
-
- # load results
- means = {}
- stdvs = {}
- for met in metrics:
- means[met] = {}
- stdvs[met] = {}
- for d in DETECTOR_OPTIONS:
- means[met][d] = []
- stdvs[met][d] = []
- for d in DETECTOR_OPTIONS:
- g_filter = ('detector', d)
- _, _, results = load_grouped_results(data_files, ['0-119'], 'model', metrics, g_filter)
- for k in COMP_ORDER:
- # prepare results
- res_dict, _, _ = results[k]
- for met in metrics:
- m, s = res_dict[met][0]
- means[met][d].append(m)
- stdvs[met][d].append(s)
-
- print('---')
- print('finished gathering results')
- num_bars = len(means[metrics[0]][DETECTOR_OPTIONS[0]])
- print('number of bars: %i'%num_bars)
-
- width = 0.20
- fig, ax = plt.subplots(figsize=[10,6])
- x = np.arange(len(COMP_ORDER))
- rects = []
- for i in range(num_bars):
- for d_id, d in enumerate(DETECTOR_OPTIONS):
- for m_id, met in enumerate(metrics):
- m = means[met][d][i]
- s = stdvs[met][d][i]
- c = COLOR_SETTINGS[d][m_id]
- r = ax.bar(x[i] + (d_id-1.5)*width, m, width, yerr=2*s, color=c, edgecolor='black', capsize=3)
- rects.append(r)
-
- ax.set_ylabel(ylab, fontsize=fs)
- ax.set_title(plt_title, fontsize=fs)
- ax.set_xticks(x)
- ax.set_xticklabels(COMP_ORDER_LABEL, fontsize=fs2)
- ax.legend()
- # fig.tight_layout()
- plt.xticks(rotation=45, ha="right")
- plt.yticks(fontsize=fs2)
- plt.ylim(0, ylim)
- plt.gcf().subplots_adjust(left=0.10, right=0.97, top=0.95)
-
- # legend at bottom
- plt.gcf().subplots_adjust(bottom=0.25)
- leg_rects = []
- for i in range(len(legs)):
- leg_rects.append(rects[i])
- ax.legend(leg_rects, legs, loc='upper center', bbox_to_anchor=(0.5, -0.20), ncol=4,
- frameon=False, fontsize=12)
-
- fname = os.path.join(figdir, 'plt_dataset_complete_%s_%s.jpg'%(trig, plot_type))
- plt.savefig(fname)
- fname = os.path.join(figdir, 'plt_dataset_complete_%s_%s.pdf'%(trig, plot_type))
- plt.savefig(fname)
-
-
-
-# ================================================================================
-
-
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- # pre-defined scripts
- parser.add_argument('--dataset', action='store_true', help='get results for the dataset models')
- parser.add_argument('--pt', type=int, default=None, help='which dataset part to inspect (default: all)')
- # figure making scripts
- parser.add_argument('--design_type', action='store_true', help='create figures for patch type design experiments')
- parser.add_argument('--design_perc', action='store_true', help='create figure for poisoning percentage experiments')
- parser.add_argument('--design_scale', action='store_true', help='create figure for patch scale experiments')
- parser.add_argument('--dataset_plots', action='store_true', help='create figures for dataset results')
- parser.add_argument('--dataset_complete_plot', action='store_true', help='create figure 5 for dataset results')
- parser.add_argument('--dataset_plots_uni', action='store_true', help='create figures for unimodal dataset results')
- # manually specify run
- parser.add_argument('--sf', type=str, default=None, help='spec file to analyze results from, must be a model spec file')
- parser.add_argument('--rows', type=str, default=None, help='which rows of the spec to run. see documentation. default: all rows')
- parser.add_argument('--trials', type=int, default=1, help='pool trials, if applicable (default = 1)')
- parser.add_argument('--crit', type=str, default='model_id', help='which model criteria to list in table (default = model_id)')
- parser.add_argument('--all', action='store_true', help='print all metrics, default shows limited set')
- parser.add_argument('--clean', action='store_true', help='print only clean metrics')
- # other
- parser.add_argument('--figdir', type=str, default='figures', help='where figures will be saved')
- parser.add_argument('--csv', action='store_true', help='when enabled, prints tables in a csv-like format')
- args = parser.parse_args()
-
- # dataset models
- if args.dataset:
- if args.pt is None:
- for PT in range(6):
- dataset_results(PT)
- else:
- dataset_results(args.pt)
- # figure scripts
- if args.design_type:
- design_type_plot(args.figdir, 'acc')
- design_type_plot(args.figdir, 'asr')
- if args.design_perc:
- design_perc_scale_plot(args.figdir, 'perc')
- if args.design_scale:
- design_perc_scale_plot(args.figdir, 'scale')
- if args.dataset_plots:
- dataset_plots_merged(args.figdir, 'acc')
- dataset_plots_merged(args.figdir, 'asr')
- if args.dataset_complete_plot:
- dataset_complete_plot(args.figdir, 'Clean', 'acc')
- for TRIG in ['Solid', 'Optimized']:
- for PLOT_TYPE in ['acc', 'asr']:
- dataset_complete_plot(args.figdir, TRIG, PLOT_TYPE)
- if args.dataset_plots_uni:
- dataset_plots_merged(args.figdir, 'acc', unimodal=True)
- dataset_plots_merged(args.figdir, 'asr', unimodal=True)
- # use specs to load results
- if args.sf is not None:
- check_results(args.sf, args.rows, args.trials, args.crit, args.all, args.clean)
diff --git a/spaces/CVPR/GFPGAN-example/gfpgan/data/__init__.py b/spaces/CVPR/GFPGAN-example/gfpgan/data/__init__.py
deleted file mode 100644
index 69fd9f9026407c4d185f86b122000485b06fd986..0000000000000000000000000000000000000000
--- a/spaces/CVPR/GFPGAN-example/gfpgan/data/__init__.py
+++ /dev/null
@@ -1,10 +0,0 @@
-import importlib
-from basicsr.utils import scandir
-from os import path as osp
-
-# automatically scan and import dataset modules for registry
-# scan all the files that end with '_dataset.py' under the data folder
-data_folder = osp.dirname(osp.abspath(__file__))
-dataset_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(data_folder) if v.endswith('_dataset.py')]
-# import all the dataset modules
-_dataset_modules = [importlib.import_module(f'gfpgan.data.{file_name}') for file_name in dataset_filenames]
diff --git a/spaces/CVPR/LIVE/thrust/thrust/detail/complex/csqrt.h b/spaces/CVPR/LIVE/thrust/thrust/detail/complex/csqrt.h
deleted file mode 100644
index dcffbee9540d85b7b1c226d6ad3d332876533f8f..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/thrust/thrust/detail/complex/csqrt.h
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * Copyright 2008-2013 NVIDIA Corporation
- * Copyright 2013 Filipe RNC Maia
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*-
- * Copyright (c) 2007 David Schultz
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-/*
- * Adapted from FreeBSD by Filipe Maia :
- * freebsd/lib/msun/src/s_csqrt.c
- */
-
-
-#pragma once
-
-#include
-#include
-#include
-
-namespace thrust{
-namespace detail{
-namespace complex{
-
-using thrust::complex;
-
-__host__ __device__ inline
-complex csqrt(const complex& z){
- complex result;
- double a, b;
- double t;
- int scale;
-
- /* We risk spurious overflow for components >= DBL_MAX / (1 + sqrt(2)). */
- const double THRESH = 7.446288774449766337959726e+307;
-
- a = z.real();
- b = z.imag();
-
- /* Handle special cases. */
- if (z == 0.0)
- return (complex(0.0, b));
- if (isinf(b))
- return (complex(infinity(), b));
- if (isnan(a)) {
- t = (b - b) / (b - b); /* raise invalid if b is not a NaN */
- return (complex(a, t)); /* return NaN + NaN i */
- }
- if (isinf(a)) {
- /*
- * csqrt(inf + NaN i) = inf + NaN i
- * csqrt(inf + y i) = inf + 0 i
- * csqrt(-inf + NaN i) = NaN +- inf i
- * csqrt(-inf + y i) = 0 + inf i
- */
- if (signbit(a))
- return (complex(fabs(b - b), copysign(a, b)));
- else
- return (complex(a, copysign(b - b, b)));
- }
- /*
- * The remaining special case (b is NaN) is handled just fine by
- * the normal code path below.
- */
-
- // DBL_MIN*2
- const double low_thresh = 4.450147717014402766180465e-308;
- scale = 0;
-
- if (fabs(a) >= THRESH || fabs(b) >= THRESH) {
- /* Scale to avoid overflow. */
- a *= 0.25;
- b *= 0.25;
- scale = 1;
- }else if (fabs(a) <= low_thresh && fabs(b) <= low_thresh) {
- /* Scale to avoid underflow. */
- a *= 4.0;
- b *= 4.0;
- scale = 2;
- }
-
-
- /* Algorithm 312, CACM vol 10, Oct 1967. */
- if (a >= 0.0) {
- t = sqrt((a + hypot(a, b)) * 0.5);
- result = complex(t, b / (2 * t));
- } else {
- t = sqrt((-a + hypot(a, b)) * 0.5);
- result = complex(fabs(b) / (2 * t), copysign(t, b));
- }
-
- /* Rescale. */
- if (scale == 1)
- return (result * 2.0);
- else if (scale == 2)
- return (result * 0.5);
- else
- return (result);
-}
-
-} // namespace complex
-
-} // namespace detail
-
-template
-__host__ __device__
-inline complex sqrt(const complex& z){
- return thrust::polar(std::sqrt(thrust::abs(z)),thrust::arg(z)/ValueType(2));
-}
-
-template <>
-__host__ __device__
-inline complex sqrt(const complex& z){
- return detail::complex::csqrt(z);
-}
-
-} // namespace thrust
diff --git a/spaces/CVPR/regionclip-demo/detectron2/data/clip_datasets/__init__.py b/spaces/CVPR/regionclip-demo/detectron2/data/clip_datasets/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/ChallengeHub/Chinese-LangChain/app_modules/utils.py b/spaces/ChallengeHub/Chinese-LangChain/app_modules/utils.py
deleted file mode 100644
index 80a52efb3bd2a5c2bef53af96b033133f3c23304..0000000000000000000000000000000000000000
--- a/spaces/ChallengeHub/Chinese-LangChain/app_modules/utils.py
+++ /dev/null
@@ -1,227 +0,0 @@
-# -*- coding:utf-8 -*-
-from __future__ import annotations
-
-import html
-import logging
-import re
-
-import mdtex2html
-from markdown import markdown
-from pygments import highlight
-from pygments.formatters import HtmlFormatter
-from pygments.lexers import ClassNotFound
-from pygments.lexers import guess_lexer, get_lexer_by_name
-
-from app_modules.presets import *
-
-logging.basicConfig(
- level=logging.INFO,
- format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s",
-)
-
-
-def markdown_to_html_with_syntax_highlight(md_str):
- def replacer(match):
- lang = match.group(1) or "text"
- code = match.group(2)
- lang = lang.strip()
- # print(1,lang)
- if lang == "text":
- lexer = guess_lexer(code)
- lang = lexer.name
- # print(2,lang)
- try:
- lexer = get_lexer_by_name(lang, stripall=True)
- except ValueError:
- lexer = get_lexer_by_name("python", stripall=True)
- formatter = HtmlFormatter()
- # print(3,lexer.name)
- highlighted_code = highlight(code, lexer, formatter)
-
- return f'
{highlighted_code}
'
-
- code_block_pattern = r"```(\w+)?\n([\s\S]+?)\n```"
- md_str = re.sub(code_block_pattern, replacer, md_str, flags=re.MULTILINE)
-
- html_str = markdown(md_str)
- return html_str
-
-
-def normalize_markdown(md_text: str) -> str:
- lines = md_text.split("\n")
- normalized_lines = []
- inside_list = False
-
- for i, line in enumerate(lines):
- if re.match(r"^(\d+\.|-|\*|\+)\s", line.strip()):
- if not inside_list and i > 0 and lines[i - 1].strip() != "":
- normalized_lines.append("")
- inside_list = True
- normalized_lines.append(line)
- elif inside_list and line.strip() == "":
- if i < len(lines) - 1 and not re.match(
- r"^(\d+\.|-|\*|\+)\s", lines[i + 1].strip()
- ):
- normalized_lines.append(line)
- continue
- else:
- inside_list = False
- normalized_lines.append(line)
-
- return "\n".join(normalized_lines)
-
-
-def convert_mdtext(md_text):
- code_block_pattern = re.compile(r"```(.*?)(?:```|$)", re.DOTALL)
- inline_code_pattern = re.compile(r"`(.*?)`", re.DOTALL)
- code_blocks = code_block_pattern.findall(md_text)
- non_code_parts = code_block_pattern.split(md_text)[::2]
-
- result = []
- for non_code, code in zip(non_code_parts, code_blocks + [""]):
- if non_code.strip():
- non_code = normalize_markdown(non_code)
- if inline_code_pattern.search(non_code):
- result.append(markdown(non_code, extensions=["tables"]))
- else:
- result.append(mdtex2html.convert(non_code, extensions=["tables"]))
- if code.strip():
- # _, code = detect_language(code) # 暂时去除代码高亮功能,因为在大段代码的情况下会出现问题
- # code = code.replace("\n\n", "\n") # 暂时去除代码中的空行,因为在大段代码的情况下会出现问题
- code = f"\n```{code}\n\n```"
- code = markdown_to_html_with_syntax_highlight(code)
- result.append(code)
- result = "".join(result)
- result += ALREADY_CONVERTED_MARK
- return result
-
-
-def convert_asis(userinput):
- return f"
{html.escape(userinput)}
" + ALREADY_CONVERTED_MARK
-
-
-def detect_converted_mark(userinput):
- if userinput.endswith(ALREADY_CONVERTED_MARK):
- return True
- else:
- return False
-
-
-def detect_language(code):
- if code.startswith("\n"):
- first_line = ""
- else:
- first_line = code.strip().split("\n", 1)[0]
- language = first_line.lower() if first_line else ""
- code_without_language = code[len(first_line):].lstrip() if first_line else code
- return language, code_without_language
-
-
-def convert_to_markdown(text):
- text = text.replace("$", "$")
-
- def replace_leading_tabs_and_spaces(line):
- new_line = []
-
- for char in line:
- if char == "\t":
- new_line.append(" ")
- elif char == " ":
- new_line.append(" ")
- else:
- break
- return "".join(new_line) + line[len(new_line):]
-
- markdown_text = ""
- lines = text.split("\n")
- in_code_block = False
-
- for line in lines:
- if in_code_block is False and line.startswith("```"):
- in_code_block = True
- markdown_text += "```\n"
- elif in_code_block is True and line.startswith("```"):
- in_code_block = False
- markdown_text += "```\n"
- elif in_code_block:
- markdown_text += f"{line}\n"
- else:
- line = replace_leading_tabs_and_spaces(line)
- line = re.sub(r"^(#)", r"\\\1", line)
- markdown_text += f"{line} \n"
-
- return markdown_text
-
-
-def add_language_tag(text):
- def detect_language(code_block):
- try:
- lexer = guess_lexer(code_block)
- return lexer.name.lower()
- except ClassNotFound:
- return ""
-
- code_block_pattern = re.compile(r"(```)(\w*\n[^`]+```)", re.MULTILINE)
-
- def replacement(match):
- code_block = match.group(2)
- if match.group(2).startswith("\n"):
- language = detect_language(code_block)
- if language:
- return f"```{language}{code_block}```"
- else:
- return f"```\n{code_block}```"
- else:
- return match.group(1) + code_block + "```"
-
- text2 = code_block_pattern.sub(replacement, text)
- return text2
-
-
-def delete_last_conversation(chatbot, history):
- if len(chatbot) > 0:
- chatbot.pop()
-
- if len(history) > 0:
- history.pop()
-
- return (
- chatbot,
- history,
- "Delete Done",
- )
-
-
-def reset_state():
- return [], [], "Reset Done"
-
-
-def reset_textbox():
- return gr.update(value=""), ""
-
-
-def cancel_outputing():
- return "Stop Done"
-
-
-def transfer_input(inputs):
- # 一次性返回,降低延迟
- textbox = reset_textbox()
- return (
- inputs,
- gr.update(value=""),
- gr.Button.update(visible=True),
- )
-
-
-class State:
- interrupted = False
-
- def interrupt(self):
- self.interrupted = True
-
- def recover(self):
- self.interrupted = False
-
-
-shared_state = State()
diff --git a/spaces/CikeyQI/meme-api/README.md b/spaces/CikeyQI/meme-api/README.md
deleted file mode 100644
index 6448b446984a8d5f739174fb4de39b97c83b6160..0000000000000000000000000000000000000000
--- a/spaces/CikeyQI/meme-api/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: Meme Api
-emoji: 🌖
-colorFrom: purple
-colorTo: indigo
-sdk: docker
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/CikeyQI/meme-api/meme_generator/memes/do/__init__.py b/spaces/CikeyQI/meme-api/meme_generator/memes/do/__init__.py
deleted file mode 100644
index 4128da32c653f4c7416128fcc45081b086e489ef..0000000000000000000000000000000000000000
--- a/spaces/CikeyQI/meme-api/meme_generator/memes/do/__init__.py
+++ /dev/null
@@ -1,38 +0,0 @@
-from pathlib import Path
-from typing import List
-
-from meme_generator import add_meme
-from meme_generator.utils import save_gif
-from PIL.Image import Image as IMG
-from pil_utils import BuildImage
-
-img_dir = Path(__file__).parent / "images"
-
-
-def do(images: List[BuildImage], texts, args):
- self_locs = [(116, -8), (109, 3), (130, -10)]
- user_locs = [(2, 177), (12, 172), (6, 158)]
- self_head = (
- images[0]
- .convert("RGBA")
- .resize((122, 122), keep_ratio=True)
- .circle()
- .rotate(15)
- )
- user_head = (
- images[1]
- .convert("RGBA")
- .resize((112, 112), keep_ratio=True)
- .circle()
- .rotate(90)
- )
- frames: List[IMG] = []
- for i in range(3):
- frame = BuildImage.open(img_dir / f"{i}.png")
- frame.paste(user_head, user_locs[i], alpha=True)
- frame.paste(self_head, self_locs[i], alpha=True)
- frames.append(frame.image)
- return save_gif(frames, 0.05)
-
-
-add_meme("do", do, min_images=2, max_images=2, keywords=["撅", "狠狠地撅"])
diff --git a/spaces/CuraAlizm/stabilityai-stable-diffusion-xl-base-1.0/README.md b/spaces/CuraAlizm/stabilityai-stable-diffusion-xl-base-1.0/README.md
deleted file mode 100644
index 1cc0c2fa153af93c8b77f65710ebe92667803e06..0000000000000000000000000000000000000000
--- a/spaces/CuraAlizm/stabilityai-stable-diffusion-xl-base-1.0/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Stabilityai Stable Diffusion Xl Base 1.0
-emoji: 😻
-colorFrom: blue
-colorTo: purple
-sdk: gradio
-sdk_version: 3.44.4
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/datasets/__init__.py b/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/datasets/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/DCandE/rvc-models/vc_infer_pipeline.py b/spaces/DCandE/rvc-models/vc_infer_pipeline.py
deleted file mode 100644
index c26d45068f9b6bf2b194b13c3c89f8a06347c124..0000000000000000000000000000000000000000
--- a/spaces/DCandE/rvc-models/vc_infer_pipeline.py
+++ /dev/null
@@ -1,306 +0,0 @@
-import numpy as np, parselmouth, torch, pdb
-from time import time as ttime
-import torch.nn.functional as F
-from config import x_pad, x_query, x_center, x_max
-import scipy.signal as signal
-import pyworld, os, traceback, faiss
-from scipy import signal
-
-bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000)
-
-
-class VC(object):
- def __init__(self, tgt_sr, device, is_half):
- self.sr = 16000 # hubert输入采样率
- self.window = 160 # 每帧点数
- self.t_pad = self.sr * x_pad # 每条前后pad时间
- self.t_pad_tgt = tgt_sr * x_pad
- self.t_pad2 = self.t_pad * 2
- self.t_query = self.sr * x_query # 查询切点前后查询时间
- self.t_center = self.sr * x_center # 查询切点位置
- self.t_max = self.sr * x_max # 免查询时长阈值
- self.device = device
- self.is_half = is_half
-
- def get_f0(self, x, p_len, f0_up_key, f0_method, inp_f0=None):
- time_step = self.window / self.sr * 1000
- f0_min = 50
- f0_max = 1100
- f0_mel_min = 1127 * np.log(1 + f0_min / 700)
- f0_mel_max = 1127 * np.log(1 + f0_max / 700)
- if f0_method == "pm":
- f0 = (
- parselmouth.Sound(x, self.sr)
- .to_pitch_ac(
- time_step=time_step / 1000,
- voicing_threshold=0.6,
- pitch_floor=f0_min,
- pitch_ceiling=f0_max,
- )
- .selected_array["frequency"]
- )
- pad_size = (p_len - len(f0) + 1) // 2
- if pad_size > 0 or p_len - len(f0) - pad_size > 0:
- f0 = np.pad(
- f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant"
- )
- elif f0_method == "harvest":
- f0, t = pyworld.harvest(
- x.astype(np.double),
- fs=self.sr,
- f0_ceil=f0_max,
- f0_floor=f0_min,
- frame_period=10,
- )
- f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr)
- f0 = signal.medfilt(f0, 3)
- f0 *= pow(2, f0_up_key / 12)
- # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
- tf0 = self.sr // self.window # 每秒f0点数
- if inp_f0 is not None:
- delta_t = np.round(
- (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1
- ).astype("int16")
- replace_f0 = np.interp(
- list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]
- )
- shape = f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)].shape[0]
- f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)] = replace_f0[:shape]
- # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
- f0bak = f0.copy()
- f0_mel = 1127 * np.log(1 + f0 / 700)
- f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
- f0_mel_max - f0_mel_min
- ) + 1
- f0_mel[f0_mel <= 1] = 1
- f0_mel[f0_mel > 255] = 255
- f0_coarse = np.rint(f0_mel).astype(np.int)
- return f0_coarse, f0bak # 1-0
-
- def vc(
- self,
- model,
- net_g,
- sid,
- audio0,
- pitch,
- pitchf,
- times,
- index,
- big_npy,
- index_rate,
- ): # ,file_index,file_big_npy
- feats = torch.from_numpy(audio0)
- if self.is_half:
- feats = feats.half()
- else:
- feats = feats.float()
- if feats.dim() == 2: # double channels
- feats = feats.mean(-1)
- assert feats.dim() == 1, feats.dim()
- feats = feats.view(1, -1)
- padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)
-
- inputs = {
- "source": feats.to(self.device),
- "padding_mask": padding_mask,
- "output_layer": 9, # layer 9
- }
- t0 = ttime()
- with torch.no_grad():
- logits = model.extract_features(**inputs)
- feats = model.final_proj(logits[0])
-
- if (
- isinstance(index, type(None)) == False
- and isinstance(big_npy, type(None)) == False
- and index_rate != 0
- ):
- npy = feats[0].cpu().numpy()
- if self.is_half:
- npy = npy.astype("float32")
- _, I = index.search(npy, 1)
- npy = big_npy[I.squeeze()]
- if self.is_half:
- npy = npy.astype("float16")
- feats = (
- torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate
- + (1 - index_rate) * feats
- )
-
- feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
- t1 = ttime()
- p_len = audio0.shape[0] // self.window
- if feats.shape[1] < p_len:
- p_len = feats.shape[1]
- if pitch != None and pitchf != None:
- pitch = pitch[:, :p_len]
- pitchf = pitchf[:, :p_len]
- p_len = torch.tensor([p_len], device=self.device).long()
- with torch.no_grad():
- if pitch != None and pitchf != None:
- audio1 = (
- (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0] * 32768)
- .data.cpu()
- .float()
- .numpy()
- .astype(np.int16)
- )
- else:
- audio1 = (
- (net_g.infer(feats, p_len, sid)[0][0, 0] * 32768)
- .data.cpu()
- .float()
- .numpy()
- .astype(np.int16)
- )
- del feats, p_len, padding_mask
- if torch.cuda.is_available():
- torch.cuda.empty_cache()
- t2 = ttime()
- times[0] += t1 - t0
- times[2] += t2 - t1
- return audio1
-
- def pipeline(
- self,
- model,
- net_g,
- sid,
- audio,
- times,
- f0_up_key,
- f0_method,
- file_index,
- file_big_npy,
- index_rate,
- if_f0,
- f0_file=None,
- ):
- if (
- file_big_npy != ""
- and file_index != ""
- and os.path.exists(file_big_npy) == True
- and os.path.exists(file_index) == True
- and index_rate != 0
- ):
- try:
- index = faiss.read_index(file_index)
- big_npy = np.load(file_big_npy)
- except:
- traceback.print_exc()
- index = big_npy = None
- else:
- index = big_npy = None
- print("Feature retrieval library doesn't exist or ratio is 0")
- audio = signal.filtfilt(bh, ah, audio)
- audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect")
- opt_ts = []
- if audio_pad.shape[0] > self.t_max:
- audio_sum = np.zeros_like(audio)
- for i in range(self.window):
- audio_sum += audio_pad[i : i - self.window]
- for t in range(self.t_center, audio.shape[0], self.t_center):
- opt_ts.append(
- t
- - self.t_query
- + np.where(
- np.abs(audio_sum[t - self.t_query : t + self.t_query])
- == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min()
- )[0][0]
- )
- s = 0
- audio_opt = []
- t = None
- t1 = ttime()
- audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect")
- p_len = audio_pad.shape[0] // self.window
- inp_f0 = None
- if hasattr(f0_file, "name") == True:
- try:
- with open(f0_file.name, "r") as f:
- lines = f.read().strip("\n").split("\n")
- inp_f0 = []
- for line in lines:
- inp_f0.append([float(i) for i in line.split(",")])
- inp_f0 = np.array(inp_f0, dtype="float32")
- except:
- traceback.print_exc()
- sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()
- pitch, pitchf = None, None
- if if_f0 == 1:
- pitch, pitchf = self.get_f0(audio_pad, p_len, f0_up_key, f0_method, inp_f0)
- pitch = pitch[:p_len]
- pitchf = pitchf[:p_len]
- pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()
- pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float()
- t2 = ttime()
- times[1] += t2 - t1
- for t in opt_ts:
- t = t // self.window * self.window
- if if_f0 == 1:
- audio_opt.append(
- self.vc(
- model,
- net_g,
- sid,
- audio_pad[s : t + self.t_pad2 + self.window],
- pitch[:, s // self.window : (t + self.t_pad2) // self.window],
- pitchf[:, s // self.window : (t + self.t_pad2) // self.window],
- times,
- index,
- big_npy,
- index_rate,
- )[self.t_pad_tgt : -self.t_pad_tgt]
- )
- else:
- audio_opt.append(
- self.vc(
- model,
- net_g,
- sid,
- audio_pad[s : t + self.t_pad2 + self.window],
- None,
- None,
- times,
- index,
- big_npy,
- index_rate,
- )[self.t_pad_tgt : -self.t_pad_tgt]
- )
- s = t
- if if_f0 == 1:
- audio_opt.append(
- self.vc(
- model,
- net_g,
- sid,
- audio_pad[t:],
- pitch[:, t // self.window :] if t is not None else pitch,
- pitchf[:, t // self.window :] if t is not None else pitchf,
- times,
- index,
- big_npy,
- index_rate,
- )[self.t_pad_tgt : -self.t_pad_tgt]
- )
- else:
- audio_opt.append(
- self.vc(
- model,
- net_g,
- sid,
- audio_pad[t:],
- None,
- None,
- times,
- index,
- big_npy,
- index_rate,
- )[self.t_pad_tgt : -self.t_pad_tgt]
- )
- audio_opt = np.concatenate(audio_opt)
- del pitch, pitchf, sid
- if torch.cuda.is_available():
- torch.cuda.empty_cache()
- return audio_opt
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/GbrImagePlugin.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/GbrImagePlugin.py
deleted file mode 100644
index 994a6e8ebb2f0f2e69990a211d7a1ec4f06b7fd1..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/GbrImagePlugin.py
+++ /dev/null
@@ -1,102 +0,0 @@
-#
-# The Python Imaging Library
-#
-# load a GIMP brush file
-#
-# History:
-# 96-03-14 fl Created
-# 16-01-08 es Version 2
-#
-# Copyright (c) Secret Labs AB 1997.
-# Copyright (c) Fredrik Lundh 1996.
-# Copyright (c) Eric Soroos 2016.
-#
-# See the README file for information on usage and redistribution.
-#
-#
-# See https://github.com/GNOME/gimp/blob/mainline/devel-docs/gbr.txt for
-# format documentation.
-#
-# This code Interprets version 1 and 2 .gbr files.
-# Version 1 files are obsolete, and should not be used for new
-# brushes.
-# Version 2 files are saved by GIMP v2.8 (at least)
-# Version 3 files have a format specifier of 18 for 16bit floats in
-# the color depth field. This is currently unsupported by Pillow.
-
-from . import Image, ImageFile
-from ._binary import i32be as i32
-
-
-def _accept(prefix):
- return len(prefix) >= 8 and i32(prefix, 0) >= 20 and i32(prefix, 4) in (1, 2)
-
-
-##
-# Image plugin for the GIMP brush format.
-
-
-class GbrImageFile(ImageFile.ImageFile):
- format = "GBR"
- format_description = "GIMP brush file"
-
- def _open(self):
- header_size = i32(self.fp.read(4))
- if header_size < 20:
- msg = "not a GIMP brush"
- raise SyntaxError(msg)
- version = i32(self.fp.read(4))
- if version not in (1, 2):
- msg = f"Unsupported GIMP brush version: {version}"
- raise SyntaxError(msg)
-
- width = i32(self.fp.read(4))
- height = i32(self.fp.read(4))
- color_depth = i32(self.fp.read(4))
- if width <= 0 or height <= 0:
- msg = "not a GIMP brush"
- raise SyntaxError(msg)
- if color_depth not in (1, 4):
- msg = f"Unsupported GIMP brush color depth: {color_depth}"
- raise SyntaxError(msg)
-
- if version == 1:
- comment_length = header_size - 20
- else:
- comment_length = header_size - 28
- magic_number = self.fp.read(4)
- if magic_number != b"GIMP":
- msg = "not a GIMP brush, bad magic number"
- raise SyntaxError(msg)
- self.info["spacing"] = i32(self.fp.read(4))
-
- comment = self.fp.read(comment_length)[:-1]
-
- if color_depth == 1:
- self.mode = "L"
- else:
- self.mode = "RGBA"
-
- self._size = width, height
-
- self.info["comment"] = comment
-
- # Image might not be small
- Image._decompression_bomb_check(self.size)
-
- # Data is an uncompressed block of w * h * bytes/pixel
- self._data_size = width * height * color_depth
-
- def load(self):
- if not self.im:
- self.im = Image.core.new(self.mode, self.size)
- self.frombytes(self.fp.read(self._data_size))
- return Image.Image.load(self)
-
-
-#
-# registry
-
-
-Image.register_open(GbrImageFile.format, GbrImageFile, _accept)
-Image.register_extension(GbrImageFile.format, ".gbr")
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/dateutil/_version.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/dateutil/_version.py
deleted file mode 100644
index b723056a756af22aaf1a4709c5122bea9fb279ee..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/dateutil/_version.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# coding: utf-8
-# file generated by setuptools_scm
-# don't change, don't track in version control
-version = '2.8.2'
-version_tuple = (2, 8, 2)
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/varLib/merger.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/varLib/merger.py
deleted file mode 100644
index c3366cbcdee792c575655a04e188d133bb075297..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/varLib/merger.py
+++ /dev/null
@@ -1,1675 +0,0 @@
-"""
-Merge OpenType Layout tables (GDEF / GPOS / GSUB).
-"""
-import os
-import copy
-import enum
-from operator import ior
-import logging
-from fontTools.colorLib.builder import MAX_PAINT_COLR_LAYER_COUNT, LayerReuseCache
-from fontTools.misc import classifyTools
-from fontTools.misc.roundTools import otRound
-from fontTools.misc.treeTools import build_n_ary_tree
-from fontTools.ttLib.tables import otTables as ot
-from fontTools.ttLib.tables import otBase as otBase
-from fontTools.ttLib.tables.otConverters import BaseFixedValue
-from fontTools.ttLib.tables.otTraverse import dfs_base_table
-from fontTools.ttLib.tables.DefaultTable import DefaultTable
-from fontTools.varLib import builder, models, varStore
-from fontTools.varLib.models import nonNone, allNone, allEqual, allEqualTo, subList
-from fontTools.varLib.varStore import VarStoreInstancer
-from functools import reduce
-from fontTools.otlLib.builder import buildSinglePos
-from fontTools.otlLib.optimize.gpos import (
- _compression_level_from_env,
- compact_pair_pos,
-)
-
-log = logging.getLogger("fontTools.varLib.merger")
-
-from .errors import (
- ShouldBeConstant,
- FoundANone,
- MismatchedTypes,
- NotANone,
- LengthsDiffer,
- KeysDiffer,
- InconsistentGlyphOrder,
- InconsistentExtensions,
- InconsistentFormats,
- UnsupportedFormat,
- VarLibMergeError,
-)
-
-
-class Merger(object):
- def __init__(self, font=None):
- self.font = font
- # mergeTables populates this from the parent's master ttfs
- self.ttfs = None
-
- @classmethod
- def merger(celf, clazzes, attrs=(None,)):
- assert celf != Merger, "Subclass Merger instead."
- if "mergers" not in celf.__dict__:
- celf.mergers = {}
- if type(clazzes) in (type, enum.EnumMeta):
- clazzes = (clazzes,)
- if type(attrs) == str:
- attrs = (attrs,)
-
- def wrapper(method):
- assert method.__name__ == "merge"
- done = []
- for clazz in clazzes:
- if clazz in done:
- continue # Support multiple names of a clazz
- done.append(clazz)
- mergers = celf.mergers.setdefault(clazz, {})
- for attr in attrs:
- assert attr not in mergers, (
- "Oops, class '%s' has merge function for '%s' defined already."
- % (clazz.__name__, attr)
- )
- mergers[attr] = method
- return None
-
- return wrapper
-
- @classmethod
- def mergersFor(celf, thing, _default={}):
- typ = type(thing)
-
- for celf in celf.mro():
- mergers = getattr(celf, "mergers", None)
- if mergers is None:
- break
-
- m = celf.mergers.get(typ, None)
- if m is not None:
- return m
-
- return _default
-
- def mergeObjects(self, out, lst, exclude=()):
- if hasattr(out, "ensureDecompiled"):
- out.ensureDecompiled(recurse=False)
- for item in lst:
- if hasattr(item, "ensureDecompiled"):
- item.ensureDecompiled(recurse=False)
- keys = sorted(vars(out).keys())
- if not all(keys == sorted(vars(v).keys()) for v in lst):
- raise KeysDiffer(
- self, expected=keys, got=[sorted(vars(v).keys()) for v in lst]
- )
- mergers = self.mergersFor(out)
- defaultMerger = mergers.get("*", self.__class__.mergeThings)
- try:
- for key in keys:
- if key in exclude:
- continue
- value = getattr(out, key)
- values = [getattr(table, key) for table in lst]
- mergerFunc = mergers.get(key, defaultMerger)
- mergerFunc(self, value, values)
- except VarLibMergeError as e:
- e.stack.append("." + key)
- raise
-
- def mergeLists(self, out, lst):
- if not allEqualTo(out, lst, len):
- raise LengthsDiffer(self, expected=len(out), got=[len(x) for x in lst])
- for i, (value, values) in enumerate(zip(out, zip(*lst))):
- try:
- self.mergeThings(value, values)
- except VarLibMergeError as e:
- e.stack.append("[%d]" % i)
- raise
-
- def mergeThings(self, out, lst):
- if not allEqualTo(out, lst, type):
- raise MismatchedTypes(
- self, expected=type(out).__name__, got=[type(x).__name__ for x in lst]
- )
- mergerFunc = self.mergersFor(out).get(None, None)
- if mergerFunc is not None:
- mergerFunc(self, out, lst)
- elif isinstance(out, enum.Enum):
- # need to special-case Enums as have __dict__ but are not regular 'objects',
- # otherwise mergeObjects/mergeThings get trapped in a RecursionError
- if not allEqualTo(out, lst):
- raise ShouldBeConstant(self, expected=out, got=lst)
- elif hasattr(out, "__dict__"):
- self.mergeObjects(out, lst)
- elif isinstance(out, list):
- self.mergeLists(out, lst)
- else:
- if not allEqualTo(out, lst):
- raise ShouldBeConstant(self, expected=out, got=lst)
-
- def mergeTables(self, font, master_ttfs, tableTags):
- for tag in tableTags:
- if tag not in font:
- continue
- try:
- self.ttfs = master_ttfs
- self.mergeThings(font[tag], [m.get(tag) for m in master_ttfs])
- except VarLibMergeError as e:
- e.stack.append(tag)
- raise
-
-
-#
-# Aligning merger
-#
-class AligningMerger(Merger):
- pass
-
-
-@AligningMerger.merger(ot.GDEF, "GlyphClassDef")
-def merge(merger, self, lst):
- if self is None:
- if not allNone(lst):
- raise NotANone(merger, expected=None, got=lst)
- return
-
- lst = [l.classDefs for l in lst]
- self.classDefs = {}
- # We only care about the .classDefs
- self = self.classDefs
-
- allKeys = set()
- allKeys.update(*[l.keys() for l in lst])
- for k in allKeys:
- allValues = nonNone(l.get(k) for l in lst)
- if not allEqual(allValues):
- raise ShouldBeConstant(
- merger, expected=allValues[0], got=lst, stack=["." + k]
- )
- if not allValues:
- self[k] = None
- else:
- self[k] = allValues[0]
-
-
-def _SinglePosUpgradeToFormat2(self):
- if self.Format == 2:
- return self
-
- ret = ot.SinglePos()
- ret.Format = 2
- ret.Coverage = self.Coverage
- ret.ValueFormat = self.ValueFormat
- ret.Value = [self.Value for _ in ret.Coverage.glyphs]
- ret.ValueCount = len(ret.Value)
-
- return ret
-
-
-def _merge_GlyphOrders(font, lst, values_lst=None, default=None):
- """Takes font and list of glyph lists (must be sorted by glyph id), and returns
- two things:
- - Combined glyph list,
- - If values_lst is None, return input glyph lists, but padded with None when a glyph
- was missing in a list. Otherwise, return values_lst list-of-list, padded with None
- to match combined glyph lists.
- """
- if values_lst is None:
- dict_sets = [set(l) for l in lst]
- else:
- dict_sets = [{g: v for g, v in zip(l, vs)} for l, vs in zip(lst, values_lst)]
- combined = set()
- combined.update(*dict_sets)
-
- sortKey = font.getReverseGlyphMap().__getitem__
- order = sorted(combined, key=sortKey)
- # Make sure all input glyphsets were in proper order
- if not all(sorted(vs, key=sortKey) == vs for vs in lst):
- raise InconsistentGlyphOrder()
- del combined
-
- paddedValues = None
- if values_lst is None:
- padded = [
- [glyph if glyph in dict_set else default for glyph in order]
- for dict_set in dict_sets
- ]
- else:
- assert len(lst) == len(values_lst)
- padded = [
- [dict_set[glyph] if glyph in dict_set else default for glyph in order]
- for dict_set in dict_sets
- ]
- return order, padded
-
-
-@AligningMerger.merger(otBase.ValueRecord)
-def merge(merger, self, lst):
- # Code below sometimes calls us with self being
- # a new object. Copy it from lst and recurse.
- self.__dict__ = lst[0].__dict__.copy()
- merger.mergeObjects(self, lst)
-
-
-@AligningMerger.merger(ot.Anchor)
-def merge(merger, self, lst):
- # Code below sometimes calls us with self being
- # a new object. Copy it from lst and recurse.
- self.__dict__ = lst[0].__dict__.copy()
- merger.mergeObjects(self, lst)
-
-
-def _Lookup_SinglePos_get_effective_value(merger, subtables, glyph):
- for self in subtables:
- if (
- self is None
- or type(self) != ot.SinglePos
- or self.Coverage is None
- or glyph not in self.Coverage.glyphs
- ):
- continue
- if self.Format == 1:
- return self.Value
- elif self.Format == 2:
- return self.Value[self.Coverage.glyphs.index(glyph)]
- else:
- raise UnsupportedFormat(merger, subtable="single positioning lookup")
- return None
-
-
-def _Lookup_PairPos_get_effective_value_pair(
- merger, subtables, firstGlyph, secondGlyph
-):
- for self in subtables:
- if (
- self is None
- or type(self) != ot.PairPos
- or self.Coverage is None
- or firstGlyph not in self.Coverage.glyphs
- ):
- continue
- if self.Format == 1:
- ps = self.PairSet[self.Coverage.glyphs.index(firstGlyph)]
- pvr = ps.PairValueRecord
- for rec in pvr: # TODO Speed up
- if rec.SecondGlyph == secondGlyph:
- return rec
- continue
- elif self.Format == 2:
- klass1 = self.ClassDef1.classDefs.get(firstGlyph, 0)
- klass2 = self.ClassDef2.classDefs.get(secondGlyph, 0)
- return self.Class1Record[klass1].Class2Record[klass2]
- else:
- raise UnsupportedFormat(merger, subtable="pair positioning lookup")
- return None
-
-
-@AligningMerger.merger(ot.SinglePos)
-def merge(merger, self, lst):
- self.ValueFormat = valueFormat = reduce(int.__or__, [l.ValueFormat for l in lst], 0)
- if not (len(lst) == 1 or (valueFormat & ~0xF == 0)):
- raise UnsupportedFormat(merger, subtable="single positioning lookup")
-
- # If all have same coverage table and all are format 1,
- coverageGlyphs = self.Coverage.glyphs
- if all(v.Format == 1 for v in lst) and all(
- coverageGlyphs == v.Coverage.glyphs for v in lst
- ):
- self.Value = otBase.ValueRecord(valueFormat, self.Value)
- if valueFormat != 0:
- # If v.Value is None, it means a kerning of 0; we want
- # it to participate in the model still.
- # https://github.com/fonttools/fonttools/issues/3111
- merger.mergeThings(
- self.Value,
- [v.Value if v.Value is not None else otBase.ValueRecord() for v in lst],
- )
- self.ValueFormat = self.Value.getFormat()
- return
-
- # Upgrade everything to Format=2
- self.Format = 2
- lst = [_SinglePosUpgradeToFormat2(v) for v in lst]
-
- # Align them
- glyphs, padded = _merge_GlyphOrders(
- merger.font, [v.Coverage.glyphs for v in lst], [v.Value for v in lst]
- )
-
- self.Coverage.glyphs = glyphs
- self.Value = [otBase.ValueRecord(valueFormat) for _ in glyphs]
- self.ValueCount = len(self.Value)
-
- for i, values in enumerate(padded):
- for j, glyph in enumerate(glyphs):
- if values[j] is not None:
- continue
- # Fill in value from other subtables
- # Note!!! This *might* result in behavior change if ValueFormat2-zeroedness
- # is different between used subtable and current subtable!
- # TODO(behdad) Check and warn if that happens?
- v = _Lookup_SinglePos_get_effective_value(
- merger, merger.lookup_subtables[i], glyph
- )
- if v is None:
- v = otBase.ValueRecord(valueFormat)
- values[j] = v
-
- merger.mergeLists(self.Value, padded)
-
- # Merge everything else; though, there shouldn't be anything else. :)
- merger.mergeObjects(
- self, lst, exclude=("Format", "Coverage", "Value", "ValueCount", "ValueFormat")
- )
- self.ValueFormat = reduce(
- int.__or__, [v.getEffectiveFormat() for v in self.Value], 0
- )
-
-
-@AligningMerger.merger(ot.PairSet)
-def merge(merger, self, lst):
- # Align them
- glyphs, padded = _merge_GlyphOrders(
- merger.font,
- [[v.SecondGlyph for v in vs.PairValueRecord] for vs in lst],
- [vs.PairValueRecord for vs in lst],
- )
-
- self.PairValueRecord = pvrs = []
- for glyph in glyphs:
- pvr = ot.PairValueRecord()
- pvr.SecondGlyph = glyph
- pvr.Value1 = (
- otBase.ValueRecord(merger.valueFormat1) if merger.valueFormat1 else None
- )
- pvr.Value2 = (
- otBase.ValueRecord(merger.valueFormat2) if merger.valueFormat2 else None
- )
- pvrs.append(pvr)
- self.PairValueCount = len(self.PairValueRecord)
-
- for i, values in enumerate(padded):
- for j, glyph in enumerate(glyphs):
- # Fill in value from other subtables
- v = ot.PairValueRecord()
- v.SecondGlyph = glyph
- if values[j] is not None:
- vpair = values[j]
- else:
- vpair = _Lookup_PairPos_get_effective_value_pair(
- merger, merger.lookup_subtables[i], self._firstGlyph, glyph
- )
- if vpair is None:
- v1, v2 = None, None
- else:
- v1 = getattr(vpair, "Value1", None)
- v2 = getattr(vpair, "Value2", None)
- v.Value1 = (
- otBase.ValueRecord(merger.valueFormat1, src=v1)
- if merger.valueFormat1
- else None
- )
- v.Value2 = (
- otBase.ValueRecord(merger.valueFormat2, src=v2)
- if merger.valueFormat2
- else None
- )
- values[j] = v
- del self._firstGlyph
-
- merger.mergeLists(self.PairValueRecord, padded)
-
-
-def _PairPosFormat1_merge(self, lst, merger):
- assert allEqual(
- [l.ValueFormat2 == 0 for l in lst if l.PairSet]
- ), "Report bug against fonttools."
-
- # Merge everything else; makes sure Format is the same.
- merger.mergeObjects(
- self,
- lst,
- exclude=("Coverage", "PairSet", "PairSetCount", "ValueFormat1", "ValueFormat2"),
- )
-
- empty = ot.PairSet()
- empty.PairValueRecord = []
- empty.PairValueCount = 0
-
- # Align them
- glyphs, padded = _merge_GlyphOrders(
- merger.font,
- [v.Coverage.glyphs for v in lst],
- [v.PairSet for v in lst],
- default=empty,
- )
-
- self.Coverage.glyphs = glyphs
- self.PairSet = [ot.PairSet() for _ in glyphs]
- self.PairSetCount = len(self.PairSet)
- for glyph, ps in zip(glyphs, self.PairSet):
- ps._firstGlyph = glyph
-
- merger.mergeLists(self.PairSet, padded)
-
-
-def _ClassDef_invert(self, allGlyphs=None):
- if isinstance(self, dict):
- classDefs = self
- else:
- classDefs = self.classDefs if self and self.classDefs else {}
- m = max(classDefs.values()) if classDefs else 0
-
- ret = []
- for _ in range(m + 1):
- ret.append(set())
-
- for k, v in classDefs.items():
- ret[v].add(k)
-
- # Class-0 is special. It's "everything else".
- if allGlyphs is None:
- ret[0] = None
- else:
- # Limit all classes to glyphs in allGlyphs.
- # Collect anything without a non-zero class into class=zero.
- ret[0] = class0 = set(allGlyphs)
- for s in ret[1:]:
- s.intersection_update(class0)
- class0.difference_update(s)
-
- return ret
-
-
-def _ClassDef_merge_classify(lst, allGlyphses=None):
- self = ot.ClassDef()
- self.classDefs = classDefs = {}
- allGlyphsesWasNone = allGlyphses is None
- if allGlyphsesWasNone:
- allGlyphses = [None] * len(lst)
-
- classifier = classifyTools.Classifier()
- for classDef, allGlyphs in zip(lst, allGlyphses):
- sets = _ClassDef_invert(classDef, allGlyphs)
- if allGlyphs is None:
- sets = sets[1:]
- classifier.update(sets)
- classes = classifier.getClasses()
-
- if allGlyphsesWasNone:
- classes.insert(0, set())
-
- for i, classSet in enumerate(classes):
- if i == 0:
- continue
- for g in classSet:
- classDefs[g] = i
-
- return self, classes
-
-
-def _PairPosFormat2_align_matrices(self, lst, font, transparent=False):
- matrices = [l.Class1Record for l in lst]
-
- # Align first classes
- self.ClassDef1, classes = _ClassDef_merge_classify(
- [l.ClassDef1 for l in lst], [l.Coverage.glyphs for l in lst]
- )
- self.Class1Count = len(classes)
- new_matrices = []
- for l, matrix in zip(lst, matrices):
- nullRow = None
- coverage = set(l.Coverage.glyphs)
- classDef1 = l.ClassDef1.classDefs
- class1Records = []
- for classSet in classes:
- exemplarGlyph = next(iter(classSet))
- if exemplarGlyph not in coverage:
- # Follow-up to e6125b353e1f54a0280ded5434b8e40d042de69f,
- # Fixes https://github.com/googlei18n/fontmake/issues/470
- # Again, revert 8d441779e5afc664960d848f62c7acdbfc71d7b9
- # when merger becomes selfless.
- nullRow = None
- if nullRow is None:
- nullRow = ot.Class1Record()
- class2records = nullRow.Class2Record = []
- # TODO: When merger becomes selfless, revert e6125b353e1f54a0280ded5434b8e40d042de69f
- for _ in range(l.Class2Count):
- if transparent:
- rec2 = None
- else:
- rec2 = ot.Class2Record()
- rec2.Value1 = (
- otBase.ValueRecord(self.ValueFormat1)
- if self.ValueFormat1
- else None
- )
- rec2.Value2 = (
- otBase.ValueRecord(self.ValueFormat2)
- if self.ValueFormat2
- else None
- )
- class2records.append(rec2)
- rec1 = nullRow
- else:
- klass = classDef1.get(exemplarGlyph, 0)
- rec1 = matrix[klass] # TODO handle out-of-range?
- class1Records.append(rec1)
- new_matrices.append(class1Records)
- matrices = new_matrices
- del new_matrices
-
- # Align second classes
- self.ClassDef2, classes = _ClassDef_merge_classify([l.ClassDef2 for l in lst])
- self.Class2Count = len(classes)
- new_matrices = []
- for l, matrix in zip(lst, matrices):
- classDef2 = l.ClassDef2.classDefs
- class1Records = []
- for rec1old in matrix:
- oldClass2Records = rec1old.Class2Record
- rec1new = ot.Class1Record()
- class2Records = rec1new.Class2Record = []
- for classSet in classes:
- if not classSet: # class=0
- rec2 = oldClass2Records[0]
- else:
- exemplarGlyph = next(iter(classSet))
- klass = classDef2.get(exemplarGlyph, 0)
- rec2 = oldClass2Records[klass]
- class2Records.append(copy.deepcopy(rec2))
- class1Records.append(rec1new)
- new_matrices.append(class1Records)
- matrices = new_matrices
- del new_matrices
-
- return matrices
-
-
-def _PairPosFormat2_merge(self, lst, merger):
- assert allEqual(
- [l.ValueFormat2 == 0 for l in lst if l.Class1Record]
- ), "Report bug against fonttools."
-
- merger.mergeObjects(
- self,
- lst,
- exclude=(
- "Coverage",
- "ClassDef1",
- "Class1Count",
- "ClassDef2",
- "Class2Count",
- "Class1Record",
- "ValueFormat1",
- "ValueFormat2",
- ),
- )
-
- # Align coverages
- glyphs, _ = _merge_GlyphOrders(merger.font, [v.Coverage.glyphs for v in lst])
- self.Coverage.glyphs = glyphs
-
- # Currently, if the coverage of PairPosFormat2 subtables are different,
- # we do NOT bother walking down the subtable list when filling in new
- # rows for alignment. As such, this is only correct if current subtable
- # is the last subtable in the lookup. Ensure that.
- #
- # Note that our canonicalization process merges trailing PairPosFormat2's,
- # so in reality this is rare.
- for l, subtables in zip(lst, merger.lookup_subtables):
- if l.Coverage.glyphs != glyphs:
- assert l == subtables[-1]
-
- matrices = _PairPosFormat2_align_matrices(self, lst, merger.font)
-
- self.Class1Record = list(matrices[0]) # TODO move merger to be selfless
- merger.mergeLists(self.Class1Record, matrices)
-
-
-@AligningMerger.merger(ot.PairPos)
-def merge(merger, self, lst):
- merger.valueFormat1 = self.ValueFormat1 = reduce(
- int.__or__, [l.ValueFormat1 for l in lst], 0
- )
- merger.valueFormat2 = self.ValueFormat2 = reduce(
- int.__or__, [l.ValueFormat2 for l in lst], 0
- )
-
- if self.Format == 1:
- _PairPosFormat1_merge(self, lst, merger)
- elif self.Format == 2:
- _PairPosFormat2_merge(self, lst, merger)
- else:
- raise UnsupportedFormat(merger, subtable="pair positioning lookup")
-
- del merger.valueFormat1, merger.valueFormat2
-
- # Now examine the list of value records, and update to the union of format values,
- # as merge might have created new values.
- vf1 = 0
- vf2 = 0
- if self.Format == 1:
- for pairSet in self.PairSet:
- for pairValueRecord in pairSet.PairValueRecord:
- pv1 = getattr(pairValueRecord, "Value1", None)
- if pv1 is not None:
- vf1 |= pv1.getFormat()
- pv2 = getattr(pairValueRecord, "Value2", None)
- if pv2 is not None:
- vf2 |= pv2.getFormat()
- elif self.Format == 2:
- for class1Record in self.Class1Record:
- for class2Record in class1Record.Class2Record:
- pv1 = getattr(class2Record, "Value1", None)
- if pv1 is not None:
- vf1 |= pv1.getFormat()
- pv2 = getattr(class2Record, "Value2", None)
- if pv2 is not None:
- vf2 |= pv2.getFormat()
- self.ValueFormat1 = vf1
- self.ValueFormat2 = vf2
-
-
-def _MarkBasePosFormat1_merge(self, lst, merger, Mark="Mark", Base="Base"):
- self.ClassCount = max(l.ClassCount for l in lst)
-
- MarkCoverageGlyphs, MarkRecords = _merge_GlyphOrders(
- merger.font,
- [getattr(l, Mark + "Coverage").glyphs for l in lst],
- [getattr(l, Mark + "Array").MarkRecord for l in lst],
- )
- getattr(self, Mark + "Coverage").glyphs = MarkCoverageGlyphs
-
- BaseCoverageGlyphs, BaseRecords = _merge_GlyphOrders(
- merger.font,
- [getattr(l, Base + "Coverage").glyphs for l in lst],
- [getattr(getattr(l, Base + "Array"), Base + "Record") for l in lst],
- )
- getattr(self, Base + "Coverage").glyphs = BaseCoverageGlyphs
-
- # MarkArray
- records = []
- for g, glyphRecords in zip(MarkCoverageGlyphs, zip(*MarkRecords)):
- allClasses = [r.Class for r in glyphRecords if r is not None]
-
- # TODO Right now we require that all marks have same class in
- # all masters that cover them. This is not required.
- #
- # We can relax that by just requiring that all marks that have
- # the same class in a master, have the same class in every other
- # master. Indeed, if, say, a sparse master only covers one mark,
- # that mark probably will get class 0, which would possibly be
- # different from its class in other masters.
- #
- # We can even go further and reclassify marks to support any
- # input. But, since, it's unlikely that two marks being both,
- # say, "top" in one master, and one being "top" and other being
- # "top-right" in another master, we shouldn't do that, as any
- # failures in that case will probably signify mistakes in the
- # input masters.
-
- if not allEqual(allClasses):
- raise ShouldBeConstant(merger, expected=allClasses[0], got=allClasses)
- else:
- rec = ot.MarkRecord()
- rec.Class = allClasses[0]
- allAnchors = [None if r is None else r.MarkAnchor for r in glyphRecords]
- if allNone(allAnchors):
- anchor = None
- else:
- anchor = ot.Anchor()
- anchor.Format = 1
- merger.mergeThings(anchor, allAnchors)
- rec.MarkAnchor = anchor
- records.append(rec)
- array = ot.MarkArray()
- array.MarkRecord = records
- array.MarkCount = len(records)
- setattr(self, Mark + "Array", array)
-
- # BaseArray
- records = []
- for g, glyphRecords in zip(BaseCoverageGlyphs, zip(*BaseRecords)):
- if allNone(glyphRecords):
- rec = None
- else:
- rec = getattr(ot, Base + "Record")()
- anchors = []
- setattr(rec, Base + "Anchor", anchors)
- glyphAnchors = [
- [] if r is None else getattr(r, Base + "Anchor") for r in glyphRecords
- ]
- for l in glyphAnchors:
- l.extend([None] * (self.ClassCount - len(l)))
- for allAnchors in zip(*glyphAnchors):
- if allNone(allAnchors):
- anchor = None
- else:
- anchor = ot.Anchor()
- anchor.Format = 1
- merger.mergeThings(anchor, allAnchors)
- anchors.append(anchor)
- records.append(rec)
- array = getattr(ot, Base + "Array")()
- setattr(array, Base + "Record", records)
- setattr(array, Base + "Count", len(records))
- setattr(self, Base + "Array", array)
-
-
-@AligningMerger.merger(ot.MarkBasePos)
-def merge(merger, self, lst):
- if not allEqualTo(self.Format, (l.Format for l in lst)):
- raise InconsistentFormats(
- merger,
- subtable="mark-to-base positioning lookup",
- expected=self.Format,
- got=[l.Format for l in lst],
- )
- if self.Format == 1:
- _MarkBasePosFormat1_merge(self, lst, merger)
- else:
- raise UnsupportedFormat(merger, subtable="mark-to-base positioning lookup")
-
-
-@AligningMerger.merger(ot.MarkMarkPos)
-def merge(merger, self, lst):
- if not allEqualTo(self.Format, (l.Format for l in lst)):
- raise InconsistentFormats(
- merger,
- subtable="mark-to-mark positioning lookup",
- expected=self.Format,
- got=[l.Format for l in lst],
- )
- if self.Format == 1:
- _MarkBasePosFormat1_merge(self, lst, merger, "Mark1", "Mark2")
- else:
- raise UnsupportedFormat(merger, subtable="mark-to-mark positioning lookup")
-
-
-def _PairSet_flatten(lst, font):
- self = ot.PairSet()
- self.Coverage = ot.Coverage()
-
- # Align them
- glyphs, padded = _merge_GlyphOrders(
- font,
- [[v.SecondGlyph for v in vs.PairValueRecord] for vs in lst],
- [vs.PairValueRecord for vs in lst],
- )
-
- self.Coverage.glyphs = glyphs
- self.PairValueRecord = pvrs = []
- for values in zip(*padded):
- for v in values:
- if v is not None:
- pvrs.append(v)
- break
- else:
- assert False
- self.PairValueCount = len(self.PairValueRecord)
-
- return self
-
-
-def _Lookup_PairPosFormat1_subtables_flatten(lst, font):
- assert allEqual(
- [l.ValueFormat2 == 0 for l in lst if l.PairSet]
- ), "Report bug against fonttools."
-
- self = ot.PairPos()
- self.Format = 1
- self.Coverage = ot.Coverage()
- self.ValueFormat1 = reduce(int.__or__, [l.ValueFormat1 for l in lst], 0)
- self.ValueFormat2 = reduce(int.__or__, [l.ValueFormat2 for l in lst], 0)
-
- # Align them
- glyphs, padded = _merge_GlyphOrders(
- font, [v.Coverage.glyphs for v in lst], [v.PairSet for v in lst]
- )
-
- self.Coverage.glyphs = glyphs
- self.PairSet = [
- _PairSet_flatten([v for v in values if v is not None], font)
- for values in zip(*padded)
- ]
- self.PairSetCount = len(self.PairSet)
- return self
-
-
-def _Lookup_PairPosFormat2_subtables_flatten(lst, font):
- assert allEqual(
- [l.ValueFormat2 == 0 for l in lst if l.Class1Record]
- ), "Report bug against fonttools."
-
- self = ot.PairPos()
- self.Format = 2
- self.Coverage = ot.Coverage()
- self.ValueFormat1 = reduce(int.__or__, [l.ValueFormat1 for l in lst], 0)
- self.ValueFormat2 = reduce(int.__or__, [l.ValueFormat2 for l in lst], 0)
-
- # Align them
- glyphs, _ = _merge_GlyphOrders(font, [v.Coverage.glyphs for v in lst])
- self.Coverage.glyphs = glyphs
-
- matrices = _PairPosFormat2_align_matrices(self, lst, font, transparent=True)
-
- matrix = self.Class1Record = []
- for rows in zip(*matrices):
- row = ot.Class1Record()
- matrix.append(row)
- row.Class2Record = []
- row = row.Class2Record
- for cols in zip(*list(r.Class2Record for r in rows)):
- col = next(iter(c for c in cols if c is not None))
- row.append(col)
-
- return self
-
-
-def _Lookup_PairPos_subtables_canonicalize(lst, font):
- """Merge multiple Format1 subtables at the beginning of lst,
- and merge multiple consecutive Format2 subtables that have the same
- Class2 (ie. were split because of offset overflows). Returns new list."""
- lst = list(lst)
-
- l = len(lst)
- i = 0
- while i < l and lst[i].Format == 1:
- i += 1
- lst[:i] = [_Lookup_PairPosFormat1_subtables_flatten(lst[:i], font)]
-
- l = len(lst)
- i = l
- while i > 0 and lst[i - 1].Format == 2:
- i -= 1
- lst[i:] = [_Lookup_PairPosFormat2_subtables_flatten(lst[i:], font)]
-
- return lst
-
-
-def _Lookup_SinglePos_subtables_flatten(lst, font, min_inclusive_rec_format):
- glyphs, _ = _merge_GlyphOrders(font, [v.Coverage.glyphs for v in lst], None)
- num_glyphs = len(glyphs)
- new = ot.SinglePos()
- new.Format = 2
- new.ValueFormat = min_inclusive_rec_format
- new.Coverage = ot.Coverage()
- new.Coverage.glyphs = glyphs
- new.ValueCount = num_glyphs
- new.Value = [None] * num_glyphs
- for singlePos in lst:
- if singlePos.Format == 1:
- val_rec = singlePos.Value
- for gname in singlePos.Coverage.glyphs:
- i = glyphs.index(gname)
- new.Value[i] = copy.deepcopy(val_rec)
- elif singlePos.Format == 2:
- for j, gname in enumerate(singlePos.Coverage.glyphs):
- val_rec = singlePos.Value[j]
- i = glyphs.index(gname)
- new.Value[i] = copy.deepcopy(val_rec)
- return [new]
-
-
-@AligningMerger.merger(ot.Lookup)
-def merge(merger, self, lst):
- subtables = merger.lookup_subtables = [l.SubTable for l in lst]
-
- # Remove Extension subtables
- for l, sts in list(zip(lst, subtables)) + [(self, self.SubTable)]:
- if not sts:
- continue
- if sts[0].__class__.__name__.startswith("Extension"):
- if not allEqual([st.__class__ for st in sts]):
- raise InconsistentExtensions(
- merger,
- expected="Extension",
- got=[st.__class__.__name__ for st in sts],
- )
- if not allEqual([st.ExtensionLookupType for st in sts]):
- raise InconsistentExtensions(merger)
- l.LookupType = sts[0].ExtensionLookupType
- new_sts = [st.ExtSubTable for st in sts]
- del sts[:]
- sts.extend(new_sts)
-
- isPairPos = self.SubTable and isinstance(self.SubTable[0], ot.PairPos)
-
- if isPairPos:
- # AFDKO and feaLib sometimes generate two Format1 subtables instead of one.
- # Merge those before continuing.
- # https://github.com/fonttools/fonttools/issues/719
- self.SubTable = _Lookup_PairPos_subtables_canonicalize(
- self.SubTable, merger.font
- )
- subtables = merger.lookup_subtables = [
- _Lookup_PairPos_subtables_canonicalize(st, merger.font) for st in subtables
- ]
- else:
- isSinglePos = self.SubTable and isinstance(self.SubTable[0], ot.SinglePos)
- if isSinglePos:
- numSubtables = [len(st) for st in subtables]
- if not all([nums == numSubtables[0] for nums in numSubtables]):
- # Flatten list of SinglePos subtables to single Format 2 subtable,
- # with all value records set to the rec format type.
- # We use buildSinglePos() to optimize the lookup after merging.
- valueFormatList = [t.ValueFormat for st in subtables for t in st]
- # Find the minimum value record that can accomodate all the singlePos subtables.
- mirf = reduce(ior, valueFormatList)
- self.SubTable = _Lookup_SinglePos_subtables_flatten(
- self.SubTable, merger.font, mirf
- )
- subtables = merger.lookup_subtables = [
- _Lookup_SinglePos_subtables_flatten(st, merger.font, mirf)
- for st in subtables
- ]
- flattened = True
- else:
- flattened = False
-
- merger.mergeLists(self.SubTable, subtables)
- self.SubTableCount = len(self.SubTable)
-
- if isPairPos:
- # If format-1 subtable created during canonicalization is empty, remove it.
- assert len(self.SubTable) >= 1 and self.SubTable[0].Format == 1
- if not self.SubTable[0].Coverage.glyphs:
- self.SubTable.pop(0)
- self.SubTableCount -= 1
-
- # If format-2 subtable created during canonicalization is empty, remove it.
- assert len(self.SubTable) >= 1 and self.SubTable[-1].Format == 2
- if not self.SubTable[-1].Coverage.glyphs:
- self.SubTable.pop(-1)
- self.SubTableCount -= 1
-
- # Compact the merged subtables
- # This is a good moment to do it because the compaction should create
- # smaller subtables, which may prevent overflows from happening.
- # Keep reading the value from the ENV until ufo2ft switches to the config system
- level = merger.font.cfg.get(
- "fontTools.otlLib.optimize.gpos:COMPRESSION_LEVEL",
- default=_compression_level_from_env(),
- )
- if level != 0:
- log.info("Compacting GPOS...")
- self.SubTable = compact_pair_pos(merger.font, level, self.SubTable)
- self.SubTableCount = len(self.SubTable)
-
- elif isSinglePos and flattened:
- singlePosTable = self.SubTable[0]
- glyphs = singlePosTable.Coverage.glyphs
- # We know that singlePosTable is Format 2, as this is set
- # in _Lookup_SinglePos_subtables_flatten.
- singlePosMapping = {
- gname: valRecord for gname, valRecord in zip(glyphs, singlePosTable.Value)
- }
- self.SubTable = buildSinglePos(
- singlePosMapping, merger.font.getReverseGlyphMap()
- )
- merger.mergeObjects(self, lst, exclude=["SubTable", "SubTableCount"])
-
- del merger.lookup_subtables
-
-
-#
-# InstancerMerger
-#
-
-
-class InstancerMerger(AligningMerger):
- """A merger that takes multiple master fonts, and instantiates
- an instance."""
-
- def __init__(self, font, model, location):
- Merger.__init__(self, font)
- self.model = model
- self.location = location
- self.scalars = model.getScalars(location)
-
-
-@InstancerMerger.merger(ot.CaretValue)
-def merge(merger, self, lst):
- assert self.Format == 1
- Coords = [a.Coordinate for a in lst]
- model = merger.model
- scalars = merger.scalars
- self.Coordinate = otRound(model.interpolateFromMastersAndScalars(Coords, scalars))
-
-
-@InstancerMerger.merger(ot.Anchor)
-def merge(merger, self, lst):
- assert self.Format == 1
- XCoords = [a.XCoordinate for a in lst]
- YCoords = [a.YCoordinate for a in lst]
- model = merger.model
- scalars = merger.scalars
- self.XCoordinate = otRound(model.interpolateFromMastersAndScalars(XCoords, scalars))
- self.YCoordinate = otRound(model.interpolateFromMastersAndScalars(YCoords, scalars))
-
-
-@InstancerMerger.merger(otBase.ValueRecord)
-def merge(merger, self, lst):
- model = merger.model
- scalars = merger.scalars
- # TODO Handle differing valueformats
- for name, tableName in [
- ("XAdvance", "XAdvDevice"),
- ("YAdvance", "YAdvDevice"),
- ("XPlacement", "XPlaDevice"),
- ("YPlacement", "YPlaDevice"),
- ]:
- assert not hasattr(self, tableName)
-
- if hasattr(self, name):
- values = [getattr(a, name, 0) for a in lst]
- value = otRound(model.interpolateFromMastersAndScalars(values, scalars))
- setattr(self, name, value)
-
-
-#
-# MutatorMerger
-#
-
-
-class MutatorMerger(AligningMerger):
- """A merger that takes a variable font, and instantiates
- an instance. While there's no "merging" to be done per se,
- the operation can benefit from many operations that the
- aligning merger does."""
-
- def __init__(self, font, instancer, deleteVariations=True):
- Merger.__init__(self, font)
- self.instancer = instancer
- self.deleteVariations = deleteVariations
-
-
-@MutatorMerger.merger(ot.CaretValue)
-def merge(merger, self, lst):
- # Hack till we become selfless.
- self.__dict__ = lst[0].__dict__.copy()
-
- if self.Format != 3:
- return
-
- instancer = merger.instancer
- dev = self.DeviceTable
- if merger.deleteVariations:
- del self.DeviceTable
- if dev:
- assert dev.DeltaFormat == 0x8000
- varidx = (dev.StartSize << 16) + dev.EndSize
- delta = otRound(instancer[varidx])
- self.Coordinate += delta
-
- if merger.deleteVariations:
- self.Format = 1
-
-
-@MutatorMerger.merger(ot.Anchor)
-def merge(merger, self, lst):
- # Hack till we become selfless.
- self.__dict__ = lst[0].__dict__.copy()
-
- if self.Format != 3:
- return
-
- instancer = merger.instancer
- for v in "XY":
- tableName = v + "DeviceTable"
- if not hasattr(self, tableName):
- continue
- dev = getattr(self, tableName)
- if merger.deleteVariations:
- delattr(self, tableName)
- if dev is None:
- continue
-
- assert dev.DeltaFormat == 0x8000
- varidx = (dev.StartSize << 16) + dev.EndSize
- delta = otRound(instancer[varidx])
-
- attr = v + "Coordinate"
- setattr(self, attr, getattr(self, attr) + delta)
-
- if merger.deleteVariations:
- self.Format = 1
-
-
-@MutatorMerger.merger(otBase.ValueRecord)
-def merge(merger, self, lst):
- # Hack till we become selfless.
- self.__dict__ = lst[0].__dict__.copy()
-
- instancer = merger.instancer
- for name, tableName in [
- ("XAdvance", "XAdvDevice"),
- ("YAdvance", "YAdvDevice"),
- ("XPlacement", "XPlaDevice"),
- ("YPlacement", "YPlaDevice"),
- ]:
- if not hasattr(self, tableName):
- continue
- dev = getattr(self, tableName)
- if merger.deleteVariations:
- delattr(self, tableName)
- if dev is None:
- continue
-
- assert dev.DeltaFormat == 0x8000
- varidx = (dev.StartSize << 16) + dev.EndSize
- delta = otRound(instancer[varidx])
-
- setattr(self, name, getattr(self, name, 0) + delta)
-
-
-#
-# VariationMerger
-#
-
-
-class VariationMerger(AligningMerger):
- """A merger that takes multiple master fonts, and builds a
- variable font."""
-
- def __init__(self, model, axisTags, font):
- Merger.__init__(self, font)
- self.store_builder = varStore.OnlineVarStoreBuilder(axisTags)
- self.setModel(model)
-
- def setModel(self, model):
- self.model = model
- self.store_builder.setModel(model)
-
- def mergeThings(self, out, lst):
- masterModel = None
- origTTFs = None
- if None in lst:
- if allNone(lst):
- if out is not None:
- raise FoundANone(self, got=lst)
- return
-
- # temporarily subset the list of master ttfs to the ones for which
- # master values are not None
- origTTFs = self.ttfs
- if self.ttfs:
- self.ttfs = subList([v is not None for v in lst], self.ttfs)
-
- masterModel = self.model
- model, lst = masterModel.getSubModel(lst)
- self.setModel(model)
-
- super(VariationMerger, self).mergeThings(out, lst)
-
- if masterModel:
- self.setModel(masterModel)
- if origTTFs:
- self.ttfs = origTTFs
-
-
-def buildVarDevTable(store_builder, master_values):
- if allEqual(master_values):
- return master_values[0], None
- base, varIdx = store_builder.storeMasters(master_values)
- return base, builder.buildVarDevTable(varIdx)
-
-
-@VariationMerger.merger(ot.BaseCoord)
-def merge(merger, self, lst):
- if self.Format != 1:
- raise UnsupportedFormat(merger, subtable="a baseline coordinate")
- self.Coordinate, DeviceTable = buildVarDevTable(
- merger.store_builder, [a.Coordinate for a in lst]
- )
- if DeviceTable:
- self.Format = 3
- self.DeviceTable = DeviceTable
-
-
-@VariationMerger.merger(ot.CaretValue)
-def merge(merger, self, lst):
- if self.Format != 1:
- raise UnsupportedFormat(merger, subtable="a caret")
- self.Coordinate, DeviceTable = buildVarDevTable(
- merger.store_builder, [a.Coordinate for a in lst]
- )
- if DeviceTable:
- self.Format = 3
- self.DeviceTable = DeviceTable
-
-
-@VariationMerger.merger(ot.Anchor)
-def merge(merger, self, lst):
- if self.Format != 1:
- raise UnsupportedFormat(merger, subtable="an anchor")
- self.XCoordinate, XDeviceTable = buildVarDevTable(
- merger.store_builder, [a.XCoordinate for a in lst]
- )
- self.YCoordinate, YDeviceTable = buildVarDevTable(
- merger.store_builder, [a.YCoordinate for a in lst]
- )
- if XDeviceTable or YDeviceTable:
- self.Format = 3
- self.XDeviceTable = XDeviceTable
- self.YDeviceTable = YDeviceTable
-
-
-@VariationMerger.merger(otBase.ValueRecord)
-def merge(merger, self, lst):
- for name, tableName in [
- ("XAdvance", "XAdvDevice"),
- ("YAdvance", "YAdvDevice"),
- ("XPlacement", "XPlaDevice"),
- ("YPlacement", "YPlaDevice"),
- ]:
- if hasattr(self, name):
- value, deviceTable = buildVarDevTable(
- merger.store_builder, [getattr(a, name, 0) for a in lst]
- )
- setattr(self, name, value)
- if deviceTable:
- setattr(self, tableName, deviceTable)
-
-
-class COLRVariationMerger(VariationMerger):
- """A specialized VariationMerger that takes multiple master fonts containing
- COLRv1 tables, and builds a variable COLR font.
-
- COLR tables are special in that variable subtables can be associated with
- multiple delta-set indices (via VarIndexBase).
- They also contain tables that must change their type (not simply the Format)
- as they become variable (e.g. Affine2x3 -> VarAffine2x3) so this merger takes
- care of that too.
- """
-
- def __init__(self, model, axisTags, font, allowLayerReuse=True):
- VariationMerger.__init__(self, model, axisTags, font)
- # maps {tuple(varIdxes): VarIndexBase} to facilitate reuse of VarIndexBase
- # between variable tables with same varIdxes.
- self.varIndexCache = {}
- # flat list of all the varIdxes generated while merging
- self.varIdxes = []
- # set of id()s of the subtables that contain variations after merging
- # and need to be upgraded to the associated VarType.
- self.varTableIds = set()
- # we keep these around for rebuilding a LayerList while merging PaintColrLayers
- self.layers = []
- self.layerReuseCache = None
- if allowLayerReuse:
- self.layerReuseCache = LayerReuseCache()
- # flag to ensure BaseGlyphList is fully merged before LayerList gets processed
- self._doneBaseGlyphs = False
-
- def mergeTables(self, font, master_ttfs, tableTags=("COLR",)):
- if "COLR" in tableTags and "COLR" in font:
- # The merger modifies the destination COLR table in-place. If this contains
- # multiple PaintColrLayers referencing the same layers from LayerList, it's
- # a problem because we may risk modifying the same paint more than once, or
- # worse, fail while attempting to do that.
- # We don't know whether the master COLR table was built with layer reuse
- # disabled, thus to be safe we rebuild its LayerList so that it contains only
- # unique layers referenced from non-overlapping PaintColrLayers throughout
- # the base paint graphs.
- self.expandPaintColrLayers(font["COLR"].table)
- VariationMerger.mergeTables(self, font, master_ttfs, tableTags)
-
- def checkFormatEnum(self, out, lst, validate=lambda _: True):
- fmt = out.Format
- formatEnum = out.formatEnum
- ok = False
- try:
- fmt = formatEnum(fmt)
- except ValueError:
- pass
- else:
- ok = validate(fmt)
- if not ok:
- raise UnsupportedFormat(self, subtable=type(out).__name__, value=fmt)
- expected = fmt
- got = []
- for v in lst:
- fmt = getattr(v, "Format", None)
- try:
- fmt = formatEnum(fmt)
- except ValueError:
- pass
- got.append(fmt)
- if not allEqualTo(expected, got):
- raise InconsistentFormats(
- self,
- subtable=type(out).__name__,
- expected=expected,
- got=got,
- )
- return expected
-
- def mergeSparseDict(self, out, lst):
- for k in out.keys():
- try:
- self.mergeThings(out[k], [v.get(k) for v in lst])
- except VarLibMergeError as e:
- e.stack.append(f"[{k!r}]")
- raise
-
- def mergeAttrs(self, out, lst, attrs):
- for attr in attrs:
- value = getattr(out, attr)
- values = [getattr(item, attr) for item in lst]
- try:
- self.mergeThings(value, values)
- except VarLibMergeError as e:
- e.stack.append(f".{attr}")
- raise
-
- def storeMastersForAttr(self, out, lst, attr):
- master_values = [getattr(item, attr) for item in lst]
-
- # VarStore treats deltas for fixed-size floats as integers, so we
- # must convert master values to int before storing them in the builder
- # then back to float.
- is_fixed_size_float = False
- conv = out.getConverterByName(attr)
- if isinstance(conv, BaseFixedValue):
- is_fixed_size_float = True
- master_values = [conv.toInt(v) for v in master_values]
-
- baseValue = master_values[0]
- varIdx = ot.NO_VARIATION_INDEX
- if not allEqual(master_values):
- baseValue, varIdx = self.store_builder.storeMasters(master_values)
-
- if is_fixed_size_float:
- baseValue = conv.fromInt(baseValue)
-
- return baseValue, varIdx
-
- def storeVariationIndices(self, varIdxes) -> int:
- # try to reuse an existing VarIndexBase for the same varIdxes, or else
- # create a new one
- key = tuple(varIdxes)
- varIndexBase = self.varIndexCache.get(key)
-
- if varIndexBase is None:
- # scan for a full match anywhere in the self.varIdxes
- for i in range(len(self.varIdxes) - len(varIdxes) + 1):
- if self.varIdxes[i : i + len(varIdxes)] == varIdxes:
- self.varIndexCache[key] = varIndexBase = i
- break
-
- if varIndexBase is None:
- # try find a partial match at the end of the self.varIdxes
- for n in range(len(varIdxes) - 1, 0, -1):
- if self.varIdxes[-n:] == varIdxes[:n]:
- varIndexBase = len(self.varIdxes) - n
- self.varIndexCache[key] = varIndexBase
- self.varIdxes.extend(varIdxes[n:])
- break
-
- if varIndexBase is None:
- # no match found, append at the end
- self.varIndexCache[key] = varIndexBase = len(self.varIdxes)
- self.varIdxes.extend(varIdxes)
-
- return varIndexBase
-
- def mergeVariableAttrs(self, out, lst, attrs) -> int:
- varIndexBase = ot.NO_VARIATION_INDEX
- varIdxes = []
- for attr in attrs:
- baseValue, varIdx = self.storeMastersForAttr(out, lst, attr)
- setattr(out, attr, baseValue)
- varIdxes.append(varIdx)
-
- if any(v != ot.NO_VARIATION_INDEX for v in varIdxes):
- varIndexBase = self.storeVariationIndices(varIdxes)
-
- return varIndexBase
-
- @classmethod
- def convertSubTablesToVarType(cls, table):
- for path in dfs_base_table(
- table,
- skip_root=True,
- predicate=lambda path: (
- getattr(type(path[-1].value), "VarType", None) is not None
- ),
- ):
- st = path[-1]
- subTable = st.value
- varType = type(subTable).VarType
- newSubTable = varType()
- newSubTable.__dict__.update(subTable.__dict__)
- newSubTable.populateDefaults()
- parent = path[-2].value
- if st.index is not None:
- getattr(parent, st.name)[st.index] = newSubTable
- else:
- setattr(parent, st.name, newSubTable)
-
- @staticmethod
- def expandPaintColrLayers(colr):
- """Rebuild LayerList without PaintColrLayers reuse.
-
- Each base paint graph is fully DFS-traversed (with exception of PaintColrGlyph
- which are irrelevant for this); any layers referenced via PaintColrLayers are
- collected into a new LayerList and duplicated when reuse is detected, to ensure
- that all paints are distinct objects at the end of the process.
- PaintColrLayers's FirstLayerIndex/NumLayers are updated so that no overlap
- is left. Also, any consecutively nested PaintColrLayers are flattened.
- The COLR table's LayerList is replaced with the new unique layers.
- A side effect is also that any layer from the old LayerList which is not
- referenced by any PaintColrLayers is dropped.
- """
- if not colr.LayerList:
- # if no LayerList, there's nothing to expand
- return
- uniqueLayerIDs = set()
- newLayerList = []
- for rec in colr.BaseGlyphList.BaseGlyphPaintRecord:
- frontier = [rec.Paint]
- while frontier:
- paint = frontier.pop()
- if paint.Format == ot.PaintFormat.PaintColrGlyph:
- # don't traverse these, we treat them as constant for merging
- continue
- elif paint.Format == ot.PaintFormat.PaintColrLayers:
- # de-treeify any nested PaintColrLayers, append unique copies to
- # the new layer list and update PaintColrLayers index/count
- children = list(_flatten_layers(paint, colr))
- first_layer_index = len(newLayerList)
- for layer in children:
- if id(layer) in uniqueLayerIDs:
- layer = copy.deepcopy(layer)
- assert id(layer) not in uniqueLayerIDs
- newLayerList.append(layer)
- uniqueLayerIDs.add(id(layer))
- paint.FirstLayerIndex = first_layer_index
- paint.NumLayers = len(children)
- else:
- children = paint.getChildren(colr)
- frontier.extend(reversed(children))
- # sanity check all the new layers are distinct objects
- assert len(newLayerList) == len(uniqueLayerIDs)
- colr.LayerList.Paint = newLayerList
- colr.LayerList.LayerCount = len(newLayerList)
-
-
-@COLRVariationMerger.merger(ot.BaseGlyphList)
-def merge(merger, self, lst):
- # ignore BaseGlyphCount, allow sparse glyph sets across masters
- out = {rec.BaseGlyph: rec for rec in self.BaseGlyphPaintRecord}
- masters = [{rec.BaseGlyph: rec for rec in m.BaseGlyphPaintRecord} for m in lst]
-
- for i, g in enumerate(out.keys()):
- try:
- # missing base glyphs don't participate in the merge
- merger.mergeThings(out[g], [v.get(g) for v in masters])
- except VarLibMergeError as e:
- e.stack.append(f".BaseGlyphPaintRecord[{i}]")
- e.cause["location"] = f"base glyph {g!r}"
- raise
-
- merger._doneBaseGlyphs = True
-
-
-@COLRVariationMerger.merger(ot.LayerList)
-def merge(merger, self, lst):
- # nothing to merge for LayerList, assuming we have already merged all PaintColrLayers
- # found while traversing the paint graphs rooted at BaseGlyphPaintRecords.
- assert merger._doneBaseGlyphs, "BaseGlyphList must be merged before LayerList"
- # Simply flush the final list of layers and go home.
- self.LayerCount = len(merger.layers)
- self.Paint = merger.layers
-
-
-def _flatten_layers(root, colr):
- assert root.Format == ot.PaintFormat.PaintColrLayers
- for paint in root.getChildren(colr):
- if paint.Format == ot.PaintFormat.PaintColrLayers:
- yield from _flatten_layers(paint, colr)
- else:
- yield paint
-
-
-def _merge_PaintColrLayers(self, out, lst):
- # we only enforce that the (flat) number of layers is the same across all masters
- # but we allow FirstLayerIndex to differ to acommodate for sparse glyph sets.
-
- out_layers = list(_flatten_layers(out, self.font["COLR"].table))
-
- # sanity check ttfs are subset to current values (see VariationMerger.mergeThings)
- # before matching each master PaintColrLayers to its respective COLR by position
- assert len(self.ttfs) == len(lst)
- master_layerses = [
- list(_flatten_layers(lst[i], self.ttfs[i]["COLR"].table))
- for i in range(len(lst))
- ]
-
- try:
- self.mergeLists(out_layers, master_layerses)
- except VarLibMergeError as e:
- # NOTE: This attribute doesn't actually exist in PaintColrLayers but it's
- # handy to have it in the stack trace for debugging.
- e.stack.append(".Layers")
- raise
-
- # following block is very similar to LayerListBuilder._beforeBuildPaintColrLayers
- # but I couldn't find a nice way to share the code between the two...
-
- if self.layerReuseCache is not None:
- # successful reuse can make the list smaller
- out_layers = self.layerReuseCache.try_reuse(out_layers)
-
- # if the list is still too big we need to tree-fy it
- is_tree = len(out_layers) > MAX_PAINT_COLR_LAYER_COUNT
- out_layers = build_n_ary_tree(out_layers, n=MAX_PAINT_COLR_LAYER_COUNT)
-
- # We now have a tree of sequences with Paint leaves.
- # Convert the sequences into PaintColrLayers.
- def listToColrLayers(paint):
- if isinstance(paint, list):
- layers = [listToColrLayers(l) for l in paint]
- paint = ot.Paint()
- paint.Format = int(ot.PaintFormat.PaintColrLayers)
- paint.NumLayers = len(layers)
- paint.FirstLayerIndex = len(self.layers)
- self.layers.extend(layers)
- if self.layerReuseCache is not None:
- self.layerReuseCache.add(layers, paint.FirstLayerIndex)
- return paint
-
- out_layers = [listToColrLayers(l) for l in out_layers]
-
- if len(out_layers) == 1 and out_layers[0].Format == ot.PaintFormat.PaintColrLayers:
- # special case when the reuse cache finds a single perfect PaintColrLayers match
- # (it can only come from a successful reuse, _flatten_layers has gotten rid of
- # all nested PaintColrLayers already); we assign it directly and avoid creating
- # an extra table
- out.NumLayers = out_layers[0].NumLayers
- out.FirstLayerIndex = out_layers[0].FirstLayerIndex
- else:
- out.NumLayers = len(out_layers)
- out.FirstLayerIndex = len(self.layers)
-
- self.layers.extend(out_layers)
-
- # Register our parts for reuse provided we aren't a tree
- # If we are a tree the leaves registered for reuse and that will suffice
- if self.layerReuseCache is not None and not is_tree:
- self.layerReuseCache.add(out_layers, out.FirstLayerIndex)
-
-
-@COLRVariationMerger.merger((ot.Paint, ot.ClipBox))
-def merge(merger, self, lst):
- fmt = merger.checkFormatEnum(self, lst, lambda fmt: not fmt.is_variable())
-
- if fmt is ot.PaintFormat.PaintColrLayers:
- _merge_PaintColrLayers(merger, self, lst)
- return
-
- varFormat = fmt.as_variable()
-
- varAttrs = ()
- if varFormat is not None:
- varAttrs = otBase.getVariableAttrs(type(self), varFormat)
- staticAttrs = (c.name for c in self.getConverters() if c.name not in varAttrs)
-
- merger.mergeAttrs(self, lst, staticAttrs)
-
- varIndexBase = merger.mergeVariableAttrs(self, lst, varAttrs)
-
- subTables = [st.value for st in self.iterSubTables()]
-
- # Convert table to variable if itself has variations or any subtables have
- isVariable = varIndexBase != ot.NO_VARIATION_INDEX or any(
- id(table) in merger.varTableIds for table in subTables
- )
-
- if isVariable:
- if varAttrs:
- # Some PaintVar* don't have any scalar attributes that can vary,
- # only indirect offsets to other variable subtables, thus have
- # no VarIndexBase of their own (e.g. PaintVarTransform)
- self.VarIndexBase = varIndexBase
-
- if subTables:
- # Convert Affine2x3 -> VarAffine2x3, ColorLine -> VarColorLine, etc.
- merger.convertSubTablesToVarType(self)
-
- assert varFormat is not None
- self.Format = int(varFormat)
-
-
-@COLRVariationMerger.merger((ot.Affine2x3, ot.ColorStop))
-def merge(merger, self, lst):
- varType = type(self).VarType
-
- varAttrs = otBase.getVariableAttrs(varType)
- staticAttrs = (c.name for c in self.getConverters() if c.name not in varAttrs)
-
- merger.mergeAttrs(self, lst, staticAttrs)
-
- varIndexBase = merger.mergeVariableAttrs(self, lst, varAttrs)
-
- if varIndexBase != ot.NO_VARIATION_INDEX:
- self.VarIndexBase = varIndexBase
- # mark as having variations so the parent table will convert to Var{Type}
- merger.varTableIds.add(id(self))
-
-
-@COLRVariationMerger.merger(ot.ColorLine)
-def merge(merger, self, lst):
- merger.mergeAttrs(self, lst, (c.name for c in self.getConverters()))
-
- if any(id(stop) in merger.varTableIds for stop in self.ColorStop):
- merger.convertSubTablesToVarType(self)
- merger.varTableIds.add(id(self))
-
-
-@COLRVariationMerger.merger(ot.ClipList, "clips")
-def merge(merger, self, lst):
- # 'sparse' in that we allow non-default masters to omit ClipBox entries
- # for some/all glyphs (i.e. they don't participate)
- merger.mergeSparseDict(self, lst)
diff --git a/spaces/Datasculptor/DescriptionGPT/detic/data/datasets/oid.py b/spaces/Datasculptor/DescriptionGPT/detic/data/datasets/oid.py
deleted file mode 100644
index 90d7f8613e4f12e942ec8967db9f17c0ec0d41f4..0000000000000000000000000000000000000000
--- a/spaces/Datasculptor/DescriptionGPT/detic/data/datasets/oid.py
+++ /dev/null
@@ -1,535 +0,0 @@
-# Part of the code is from https://github.com/xingyizhou/UniDet/blob/master/projects/UniDet/unidet/data/datasets/oid.py
-# Copyright (c) Facebook, Inc. and its affiliates.
-from .register_oid import register_oid_instances
-import os
-
-categories = [
- {'id': 1, 'name': 'Infant bed', 'freebase_id': '/m/061hd_'},
- {'id': 2, 'name': 'Rose', 'freebase_id': '/m/06m11'},
- {'id': 3, 'name': 'Flag', 'freebase_id': '/m/03120'},
- {'id': 4, 'name': 'Flashlight', 'freebase_id': '/m/01kb5b'},
- {'id': 5, 'name': 'Sea turtle', 'freebase_id': '/m/0120dh'},
- {'id': 6, 'name': 'Camera', 'freebase_id': '/m/0dv5r'},
- {'id': 7, 'name': 'Animal', 'freebase_id': '/m/0jbk'},
- {'id': 8, 'name': 'Glove', 'freebase_id': '/m/0174n1'},
- {'id': 9, 'name': 'Crocodile', 'freebase_id': '/m/09f_2'},
- {'id': 10, 'name': 'Cattle', 'freebase_id': '/m/01xq0k1'},
- {'id': 11, 'name': 'House', 'freebase_id': '/m/03jm5'},
- {'id': 12, 'name': 'Guacamole', 'freebase_id': '/m/02g30s'},
- {'id': 13, 'name': 'Penguin', 'freebase_id': '/m/05z6w'},
- {'id': 14, 'name': 'Vehicle registration plate', 'freebase_id': '/m/01jfm_'},
- {'id': 15, 'name': 'Bench', 'freebase_id': '/m/076lb9'},
- {'id': 16, 'name': 'Ladybug', 'freebase_id': '/m/0gj37'},
- {'id': 17, 'name': 'Human nose', 'freebase_id': '/m/0k0pj'},
- {'id': 18, 'name': 'Watermelon', 'freebase_id': '/m/0kpqd'},
- {'id': 19, 'name': 'Flute', 'freebase_id': '/m/0l14j_'},
- {'id': 20, 'name': 'Butterfly', 'freebase_id': '/m/0cyf8'},
- {'id': 21, 'name': 'Washing machine', 'freebase_id': '/m/0174k2'},
- {'id': 22, 'name': 'Raccoon', 'freebase_id': '/m/0dq75'},
- {'id': 23, 'name': 'Segway', 'freebase_id': '/m/076bq'},
- {'id': 24, 'name': 'Taco', 'freebase_id': '/m/07crc'},
- {'id': 25, 'name': 'Jellyfish', 'freebase_id': '/m/0d8zb'},
- {'id': 26, 'name': 'Cake', 'freebase_id': '/m/0fszt'},
- {'id': 27, 'name': 'Pen', 'freebase_id': '/m/0k1tl'},
- {'id': 28, 'name': 'Cannon', 'freebase_id': '/m/020kz'},
- {'id': 29, 'name': 'Bread', 'freebase_id': '/m/09728'},
- {'id': 30, 'name': 'Tree', 'freebase_id': '/m/07j7r'},
- {'id': 31, 'name': 'Shellfish', 'freebase_id': '/m/0fbdv'},
- {'id': 32, 'name': 'Bed', 'freebase_id': '/m/03ssj5'},
- {'id': 33, 'name': 'Hamster', 'freebase_id': '/m/03qrc'},
- {'id': 34, 'name': 'Hat', 'freebase_id': '/m/02dl1y'},
- {'id': 35, 'name': 'Toaster', 'freebase_id': '/m/01k6s3'},
- {'id': 36, 'name': 'Sombrero', 'freebase_id': '/m/02jfl0'},
- {'id': 37, 'name': 'Tiara', 'freebase_id': '/m/01krhy'},
- {'id': 38, 'name': 'Bowl', 'freebase_id': '/m/04kkgm'},
- {'id': 39, 'name': 'Dragonfly', 'freebase_id': '/m/0ft9s'},
- {'id': 40, 'name': 'Moths and butterflies', 'freebase_id': '/m/0d_2m'},
- {'id': 41, 'name': 'Antelope', 'freebase_id': '/m/0czz2'},
- {'id': 42, 'name': 'Vegetable', 'freebase_id': '/m/0f4s2w'},
- {'id': 43, 'name': 'Torch', 'freebase_id': '/m/07dd4'},
- {'id': 44, 'name': 'Building', 'freebase_id': '/m/0cgh4'},
- {'id': 45, 'name': 'Power plugs and sockets', 'freebase_id': '/m/03bbps'},
- {'id': 46, 'name': 'Blender', 'freebase_id': '/m/02pjr4'},
- {'id': 47, 'name': 'Billiard table', 'freebase_id': '/m/04p0qw'},
- {'id': 48, 'name': 'Cutting board', 'freebase_id': '/m/02pdsw'},
- {'id': 49, 'name': 'Bronze sculpture', 'freebase_id': '/m/01yx86'},
- {'id': 50, 'name': 'Turtle', 'freebase_id': '/m/09dzg'},
- {'id': 51, 'name': 'Broccoli', 'freebase_id': '/m/0hkxq'},
- {'id': 52, 'name': 'Tiger', 'freebase_id': '/m/07dm6'},
- {'id': 53, 'name': 'Mirror', 'freebase_id': '/m/054_l'},
- {'id': 54, 'name': 'Bear', 'freebase_id': '/m/01dws'},
- {'id': 55, 'name': 'Zucchini', 'freebase_id': '/m/027pcv'},
- {'id': 56, 'name': 'Dress', 'freebase_id': '/m/01d40f'},
- {'id': 57, 'name': 'Volleyball', 'freebase_id': '/m/02rgn06'},
- {'id': 58, 'name': 'Guitar', 'freebase_id': '/m/0342h'},
- {'id': 59, 'name': 'Reptile', 'freebase_id': '/m/06bt6'},
- {'id': 60, 'name': 'Golf cart', 'freebase_id': '/m/0323sq'},
- {'id': 61, 'name': 'Tart', 'freebase_id': '/m/02zvsm'},
- {'id': 62, 'name': 'Fedora', 'freebase_id': '/m/02fq_6'},
- {'id': 63, 'name': 'Carnivore', 'freebase_id': '/m/01lrl'},
- {'id': 64, 'name': 'Car', 'freebase_id': '/m/0k4j'},
- {'id': 65, 'name': 'Lighthouse', 'freebase_id': '/m/04h7h'},
- {'id': 66, 'name': 'Coffeemaker', 'freebase_id': '/m/07xyvk'},
- {'id': 67, 'name': 'Food processor', 'freebase_id': '/m/03y6mg'},
- {'id': 68, 'name': 'Truck', 'freebase_id': '/m/07r04'},
- {'id': 69, 'name': 'Bookcase', 'freebase_id': '/m/03__z0'},
- {'id': 70, 'name': 'Surfboard', 'freebase_id': '/m/019w40'},
- {'id': 71, 'name': 'Footwear', 'freebase_id': '/m/09j5n'},
- {'id': 72, 'name': 'Bench', 'freebase_id': '/m/0cvnqh'},
- {'id': 73, 'name': 'Necklace', 'freebase_id': '/m/01llwg'},
- {'id': 74, 'name': 'Flower', 'freebase_id': '/m/0c9ph5'},
- {'id': 75, 'name': 'Radish', 'freebase_id': '/m/015x5n'},
- {'id': 76, 'name': 'Marine mammal', 'freebase_id': '/m/0gd2v'},
- {'id': 77, 'name': 'Frying pan', 'freebase_id': '/m/04v6l4'},
- {'id': 78, 'name': 'Tap', 'freebase_id': '/m/02jz0l'},
- {'id': 79, 'name': 'Peach', 'freebase_id': '/m/0dj6p'},
- {'id': 80, 'name': 'Knife', 'freebase_id': '/m/04ctx'},
- {'id': 81, 'name': 'Handbag', 'freebase_id': '/m/080hkjn'},
- {'id': 82, 'name': 'Laptop', 'freebase_id': '/m/01c648'},
- {'id': 83, 'name': 'Tent', 'freebase_id': '/m/01j61q'},
- {'id': 84, 'name': 'Ambulance', 'freebase_id': '/m/012n7d'},
- {'id': 85, 'name': 'Christmas tree', 'freebase_id': '/m/025nd'},
- {'id': 86, 'name': 'Eagle', 'freebase_id': '/m/09csl'},
- {'id': 87, 'name': 'Limousine', 'freebase_id': '/m/01lcw4'},
- {'id': 88, 'name': 'Kitchen & dining room table', 'freebase_id': '/m/0h8n5zk'},
- {'id': 89, 'name': 'Polar bear', 'freebase_id': '/m/0633h'},
- {'id': 90, 'name': 'Tower', 'freebase_id': '/m/01fdzj'},
- {'id': 91, 'name': 'Football', 'freebase_id': '/m/01226z'},
- {'id': 92, 'name': 'Willow', 'freebase_id': '/m/0mw_6'},
- {'id': 93, 'name': 'Human head', 'freebase_id': '/m/04hgtk'},
- {'id': 94, 'name': 'Stop sign', 'freebase_id': '/m/02pv19'},
- {'id': 95, 'name': 'Banana', 'freebase_id': '/m/09qck'},
- {'id': 96, 'name': 'Mixer', 'freebase_id': '/m/063rgb'},
- {'id': 97, 'name': 'Binoculars', 'freebase_id': '/m/0lt4_'},
- {'id': 98, 'name': 'Dessert', 'freebase_id': '/m/0270h'},
- {'id': 99, 'name': 'Bee', 'freebase_id': '/m/01h3n'},
- {'id': 100, 'name': 'Chair', 'freebase_id': '/m/01mzpv'},
- {'id': 101, 'name': 'Wood-burning stove', 'freebase_id': '/m/04169hn'},
- {'id': 102, 'name': 'Flowerpot', 'freebase_id': '/m/0fm3zh'},
- {'id': 103, 'name': 'Beaker', 'freebase_id': '/m/0d20w4'},
- {'id': 104, 'name': 'Oyster', 'freebase_id': '/m/0_cp5'},
- {'id': 105, 'name': 'Woodpecker', 'freebase_id': '/m/01dy8n'},
- {'id': 106, 'name': 'Harp', 'freebase_id': '/m/03m5k'},
- {'id': 107, 'name': 'Bathtub', 'freebase_id': '/m/03dnzn'},
- {'id': 108, 'name': 'Wall clock', 'freebase_id': '/m/0h8mzrc'},
- {'id': 109, 'name': 'Sports uniform', 'freebase_id': '/m/0h8mhzd'},
- {'id': 110, 'name': 'Rhinoceros', 'freebase_id': '/m/03d443'},
- {'id': 111, 'name': 'Beehive', 'freebase_id': '/m/01gllr'},
- {'id': 112, 'name': 'Cupboard', 'freebase_id': '/m/0642b4'},
- {'id': 113, 'name': 'Chicken', 'freebase_id': '/m/09b5t'},
- {'id': 114, 'name': 'Man', 'freebase_id': '/m/04yx4'},
- {'id': 115, 'name': 'Blue jay', 'freebase_id': '/m/01f8m5'},
- {'id': 116, 'name': 'Cucumber', 'freebase_id': '/m/015x4r'},
- {'id': 117, 'name': 'Balloon', 'freebase_id': '/m/01j51'},
- {'id': 118, 'name': 'Kite', 'freebase_id': '/m/02zt3'},
- {'id': 119, 'name': 'Fireplace', 'freebase_id': '/m/03tw93'},
- {'id': 120, 'name': 'Lantern', 'freebase_id': '/m/01jfsr'},
- {'id': 121, 'name': 'Missile', 'freebase_id': '/m/04ylt'},
- {'id': 122, 'name': 'Book', 'freebase_id': '/m/0bt_c3'},
- {'id': 123, 'name': 'Spoon', 'freebase_id': '/m/0cmx8'},
- {'id': 124, 'name': 'Grapefruit', 'freebase_id': '/m/0hqkz'},
- {'id': 125, 'name': 'Squirrel', 'freebase_id': '/m/071qp'},
- {'id': 126, 'name': 'Orange', 'freebase_id': '/m/0cyhj_'},
- {'id': 127, 'name': 'Coat', 'freebase_id': '/m/01xygc'},
- {'id': 128, 'name': 'Punching bag', 'freebase_id': '/m/0420v5'},
- {'id': 129, 'name': 'Zebra', 'freebase_id': '/m/0898b'},
- {'id': 130, 'name': 'Billboard', 'freebase_id': '/m/01knjb'},
- {'id': 131, 'name': 'Bicycle', 'freebase_id': '/m/0199g'},
- {'id': 132, 'name': 'Door handle', 'freebase_id': '/m/03c7gz'},
- {'id': 133, 'name': 'Mechanical fan', 'freebase_id': '/m/02x984l'},
- {'id': 134, 'name': 'Ring binder', 'freebase_id': '/m/04zwwv'},
- {'id': 135, 'name': 'Table', 'freebase_id': '/m/04bcr3'},
- {'id': 136, 'name': 'Parrot', 'freebase_id': '/m/0gv1x'},
- {'id': 137, 'name': 'Sock', 'freebase_id': '/m/01nq26'},
- {'id': 138, 'name': 'Vase', 'freebase_id': '/m/02s195'},
- {'id': 139, 'name': 'Weapon', 'freebase_id': '/m/083kb'},
- {'id': 140, 'name': 'Shotgun', 'freebase_id': '/m/06nrc'},
- {'id': 141, 'name': 'Glasses', 'freebase_id': '/m/0jyfg'},
- {'id': 142, 'name': 'Seahorse', 'freebase_id': '/m/0nybt'},
- {'id': 143, 'name': 'Belt', 'freebase_id': '/m/0176mf'},
- {'id': 144, 'name': 'Watercraft', 'freebase_id': '/m/01rzcn'},
- {'id': 145, 'name': 'Window', 'freebase_id': '/m/0d4v4'},
- {'id': 146, 'name': 'Giraffe', 'freebase_id': '/m/03bk1'},
- {'id': 147, 'name': 'Lion', 'freebase_id': '/m/096mb'},
- {'id': 148, 'name': 'Tire', 'freebase_id': '/m/0h9mv'},
- {'id': 149, 'name': 'Vehicle', 'freebase_id': '/m/07yv9'},
- {'id': 150, 'name': 'Canoe', 'freebase_id': '/m/0ph39'},
- {'id': 151, 'name': 'Tie', 'freebase_id': '/m/01rkbr'},
- {'id': 152, 'name': 'Shelf', 'freebase_id': '/m/0gjbg72'},
- {'id': 153, 'name': 'Picture frame', 'freebase_id': '/m/06z37_'},
- {'id': 154, 'name': 'Printer', 'freebase_id': '/m/01m4t'},
- {'id': 155, 'name': 'Human leg', 'freebase_id': '/m/035r7c'},
- {'id': 156, 'name': 'Boat', 'freebase_id': '/m/019jd'},
- {'id': 157, 'name': 'Slow cooker', 'freebase_id': '/m/02tsc9'},
- {'id': 158, 'name': 'Croissant', 'freebase_id': '/m/015wgc'},
- {'id': 159, 'name': 'Candle', 'freebase_id': '/m/0c06p'},
- {'id': 160, 'name': 'Pancake', 'freebase_id': '/m/01dwwc'},
- {'id': 161, 'name': 'Pillow', 'freebase_id': '/m/034c16'},
- {'id': 162, 'name': 'Coin', 'freebase_id': '/m/0242l'},
- {'id': 163, 'name': 'Stretcher', 'freebase_id': '/m/02lbcq'},
- {'id': 164, 'name': 'Sandal', 'freebase_id': '/m/03nfch'},
- {'id': 165, 'name': 'Woman', 'freebase_id': '/m/03bt1vf'},
- {'id': 166, 'name': 'Stairs', 'freebase_id': '/m/01lynh'},
- {'id': 167, 'name': 'Harpsichord', 'freebase_id': '/m/03q5t'},
- {'id': 168, 'name': 'Stool', 'freebase_id': '/m/0fqt361'},
- {'id': 169, 'name': 'Bus', 'freebase_id': '/m/01bjv'},
- {'id': 170, 'name': 'Suitcase', 'freebase_id': '/m/01s55n'},
- {'id': 171, 'name': 'Human mouth', 'freebase_id': '/m/0283dt1'},
- {'id': 172, 'name': 'Juice', 'freebase_id': '/m/01z1kdw'},
- {'id': 173, 'name': 'Skull', 'freebase_id': '/m/016m2d'},
- {'id': 174, 'name': 'Door', 'freebase_id': '/m/02dgv'},
- {'id': 175, 'name': 'Violin', 'freebase_id': '/m/07y_7'},
- {'id': 176, 'name': 'Chopsticks', 'freebase_id': '/m/01_5g'},
- {'id': 177, 'name': 'Digital clock', 'freebase_id': '/m/06_72j'},
- {'id': 178, 'name': 'Sunflower', 'freebase_id': '/m/0ftb8'},
- {'id': 179, 'name': 'Leopard', 'freebase_id': '/m/0c29q'},
- {'id': 180, 'name': 'Bell pepper', 'freebase_id': '/m/0jg57'},
- {'id': 181, 'name': 'Harbor seal', 'freebase_id': '/m/02l8p9'},
- {'id': 182, 'name': 'Snake', 'freebase_id': '/m/078jl'},
- {'id': 183, 'name': 'Sewing machine', 'freebase_id': '/m/0llzx'},
- {'id': 184, 'name': 'Goose', 'freebase_id': '/m/0dbvp'},
- {'id': 185, 'name': 'Helicopter', 'freebase_id': '/m/09ct_'},
- {'id': 186, 'name': 'Seat belt', 'freebase_id': '/m/0dkzw'},
- {'id': 187, 'name': 'Coffee cup', 'freebase_id': '/m/02p5f1q'},
- {'id': 188, 'name': 'Microwave oven', 'freebase_id': '/m/0fx9l'},
- {'id': 189, 'name': 'Hot dog', 'freebase_id': '/m/01b9xk'},
- {'id': 190, 'name': 'Countertop', 'freebase_id': '/m/0b3fp9'},
- {'id': 191, 'name': 'Serving tray', 'freebase_id': '/m/0h8n27j'},
- {'id': 192, 'name': 'Dog bed', 'freebase_id': '/m/0h8n6f9'},
- {'id': 193, 'name': 'Beer', 'freebase_id': '/m/01599'},
- {'id': 194, 'name': 'Sunglasses', 'freebase_id': '/m/017ftj'},
- {'id': 195, 'name': 'Golf ball', 'freebase_id': '/m/044r5d'},
- {'id': 196, 'name': 'Waffle', 'freebase_id': '/m/01dwsz'},
- {'id': 197, 'name': 'Palm tree', 'freebase_id': '/m/0cdl1'},
- {'id': 198, 'name': 'Trumpet', 'freebase_id': '/m/07gql'},
- {'id': 199, 'name': 'Ruler', 'freebase_id': '/m/0hdln'},
- {'id': 200, 'name': 'Helmet', 'freebase_id': '/m/0zvk5'},
- {'id': 201, 'name': 'Ladder', 'freebase_id': '/m/012w5l'},
- {'id': 202, 'name': 'Office building', 'freebase_id': '/m/021sj1'},
- {'id': 203, 'name': 'Tablet computer', 'freebase_id': '/m/0bh9flk'},
- {'id': 204, 'name': 'Toilet paper', 'freebase_id': '/m/09gtd'},
- {'id': 205, 'name': 'Pomegranate', 'freebase_id': '/m/0jwn_'},
- {'id': 206, 'name': 'Skirt', 'freebase_id': '/m/02wv6h6'},
- {'id': 207, 'name': 'Gas stove', 'freebase_id': '/m/02wv84t'},
- {'id': 208, 'name': 'Cookie', 'freebase_id': '/m/021mn'},
- {'id': 209, 'name': 'Cart', 'freebase_id': '/m/018p4k'},
- {'id': 210, 'name': 'Raven', 'freebase_id': '/m/06j2d'},
- {'id': 211, 'name': 'Egg', 'freebase_id': '/m/033cnk'},
- {'id': 212, 'name': 'Burrito', 'freebase_id': '/m/01j3zr'},
- {'id': 213, 'name': 'Goat', 'freebase_id': '/m/03fwl'},
- {'id': 214, 'name': 'Kitchen knife', 'freebase_id': '/m/058qzx'},
- {'id': 215, 'name': 'Skateboard', 'freebase_id': '/m/06_fw'},
- {'id': 216, 'name': 'Salt and pepper shakers', 'freebase_id': '/m/02x8cch'},
- {'id': 217, 'name': 'Lynx', 'freebase_id': '/m/04g2r'},
- {'id': 218, 'name': 'Boot', 'freebase_id': '/m/01b638'},
- {'id': 219, 'name': 'Platter', 'freebase_id': '/m/099ssp'},
- {'id': 220, 'name': 'Ski', 'freebase_id': '/m/071p9'},
- {'id': 221, 'name': 'Swimwear', 'freebase_id': '/m/01gkx_'},
- {'id': 222, 'name': 'Swimming pool', 'freebase_id': '/m/0b_rs'},
- {'id': 223, 'name': 'Drinking straw', 'freebase_id': '/m/03v5tg'},
- {'id': 224, 'name': 'Wrench', 'freebase_id': '/m/01j5ks'},
- {'id': 225, 'name': 'Drum', 'freebase_id': '/m/026t6'},
- {'id': 226, 'name': 'Ant', 'freebase_id': '/m/0_k2'},
- {'id': 227, 'name': 'Human ear', 'freebase_id': '/m/039xj_'},
- {'id': 228, 'name': 'Headphones', 'freebase_id': '/m/01b7fy'},
- {'id': 229, 'name': 'Fountain', 'freebase_id': '/m/0220r2'},
- {'id': 230, 'name': 'Bird', 'freebase_id': '/m/015p6'},
- {'id': 231, 'name': 'Jeans', 'freebase_id': '/m/0fly7'},
- {'id': 232, 'name': 'Television', 'freebase_id': '/m/07c52'},
- {'id': 233, 'name': 'Crab', 'freebase_id': '/m/0n28_'},
- {'id': 234, 'name': 'Microphone', 'freebase_id': '/m/0hg7b'},
- {'id': 235, 'name': 'Home appliance', 'freebase_id': '/m/019dx1'},
- {'id': 236, 'name': 'Snowplow', 'freebase_id': '/m/04vv5k'},
- {'id': 237, 'name': 'Beetle', 'freebase_id': '/m/020jm'},
- {'id': 238, 'name': 'Artichoke', 'freebase_id': '/m/047v4b'},
- {'id': 239, 'name': 'Jet ski', 'freebase_id': '/m/01xs3r'},
- {'id': 240, 'name': 'Stationary bicycle', 'freebase_id': '/m/03kt2w'},
- {'id': 241, 'name': 'Human hair', 'freebase_id': '/m/03q69'},
- {'id': 242, 'name': 'Brown bear', 'freebase_id': '/m/01dxs'},
- {'id': 243, 'name': 'Starfish', 'freebase_id': '/m/01h8tj'},
- {'id': 244, 'name': 'Fork', 'freebase_id': '/m/0dt3t'},
- {'id': 245, 'name': 'Lobster', 'freebase_id': '/m/0cjq5'},
- {'id': 246, 'name': 'Corded phone', 'freebase_id': '/m/0h8lkj8'},
- {'id': 247, 'name': 'Drink', 'freebase_id': '/m/0271t'},
- {'id': 248, 'name': 'Saucer', 'freebase_id': '/m/03q5c7'},
- {'id': 249, 'name': 'Carrot', 'freebase_id': '/m/0fj52s'},
- {'id': 250, 'name': 'Insect', 'freebase_id': '/m/03vt0'},
- {'id': 251, 'name': 'Clock', 'freebase_id': '/m/01x3z'},
- {'id': 252, 'name': 'Castle', 'freebase_id': '/m/0d5gx'},
- {'id': 253, 'name': 'Tennis racket', 'freebase_id': '/m/0h8my_4'},
- {'id': 254, 'name': 'Ceiling fan', 'freebase_id': '/m/03ldnb'},
- {'id': 255, 'name': 'Asparagus', 'freebase_id': '/m/0cjs7'},
- {'id': 256, 'name': 'Jaguar', 'freebase_id': '/m/0449p'},
- {'id': 257, 'name': 'Musical instrument', 'freebase_id': '/m/04szw'},
- {'id': 258, 'name': 'Train', 'freebase_id': '/m/07jdr'},
- {'id': 259, 'name': 'Cat', 'freebase_id': '/m/01yrx'},
- {'id': 260, 'name': 'Rifle', 'freebase_id': '/m/06c54'},
- {'id': 261, 'name': 'Dumbbell', 'freebase_id': '/m/04h8sr'},
- {'id': 262, 'name': 'Mobile phone', 'freebase_id': '/m/050k8'},
- {'id': 263, 'name': 'Taxi', 'freebase_id': '/m/0pg52'},
- {'id': 264, 'name': 'Shower', 'freebase_id': '/m/02f9f_'},
- {'id': 265, 'name': 'Pitcher', 'freebase_id': '/m/054fyh'},
- {'id': 266, 'name': 'Lemon', 'freebase_id': '/m/09k_b'},
- {'id': 267, 'name': 'Invertebrate', 'freebase_id': '/m/03xxp'},
- {'id': 268, 'name': 'Turkey', 'freebase_id': '/m/0jly1'},
- {'id': 269, 'name': 'High heels', 'freebase_id': '/m/06k2mb'},
- {'id': 270, 'name': 'Bust', 'freebase_id': '/m/04yqq2'},
- {'id': 271, 'name': 'Elephant', 'freebase_id': '/m/0bwd_0j'},
- {'id': 272, 'name': 'Scarf', 'freebase_id': '/m/02h19r'},
- {'id': 273, 'name': 'Barrel', 'freebase_id': '/m/02zn6n'},
- {'id': 274, 'name': 'Trombone', 'freebase_id': '/m/07c6l'},
- {'id': 275, 'name': 'Pumpkin', 'freebase_id': '/m/05zsy'},
- {'id': 276, 'name': 'Box', 'freebase_id': '/m/025dyy'},
- {'id': 277, 'name': 'Tomato', 'freebase_id': '/m/07j87'},
- {'id': 278, 'name': 'Frog', 'freebase_id': '/m/09ld4'},
- {'id': 279, 'name': 'Bidet', 'freebase_id': '/m/01vbnl'},
- {'id': 280, 'name': 'Human face', 'freebase_id': '/m/0dzct'},
- {'id': 281, 'name': 'Houseplant', 'freebase_id': '/m/03fp41'},
- {'id': 282, 'name': 'Van', 'freebase_id': '/m/0h2r6'},
- {'id': 283, 'name': 'Shark', 'freebase_id': '/m/0by6g'},
- {'id': 284, 'name': 'Ice cream', 'freebase_id': '/m/0cxn2'},
- {'id': 285, 'name': 'Swim cap', 'freebase_id': '/m/04tn4x'},
- {'id': 286, 'name': 'Falcon', 'freebase_id': '/m/0f6wt'},
- {'id': 287, 'name': 'Ostrich', 'freebase_id': '/m/05n4y'},
- {'id': 288, 'name': 'Handgun', 'freebase_id': '/m/0gxl3'},
- {'id': 289, 'name': 'Whiteboard', 'freebase_id': '/m/02d9qx'},
- {'id': 290, 'name': 'Lizard', 'freebase_id': '/m/04m9y'},
- {'id': 291, 'name': 'Pasta', 'freebase_id': '/m/05z55'},
- {'id': 292, 'name': 'Snowmobile', 'freebase_id': '/m/01x3jk'},
- {'id': 293, 'name': 'Light bulb', 'freebase_id': '/m/0h8l4fh'},
- {'id': 294, 'name': 'Window blind', 'freebase_id': '/m/031b6r'},
- {'id': 295, 'name': 'Muffin', 'freebase_id': '/m/01tcjp'},
- {'id': 296, 'name': 'Pretzel', 'freebase_id': '/m/01f91_'},
- {'id': 297, 'name': 'Computer monitor', 'freebase_id': '/m/02522'},
- {'id': 298, 'name': 'Horn', 'freebase_id': '/m/0319l'},
- {'id': 299, 'name': 'Furniture', 'freebase_id': '/m/0c_jw'},
- {'id': 300, 'name': 'Sandwich', 'freebase_id': '/m/0l515'},
- {'id': 301, 'name': 'Fox', 'freebase_id': '/m/0306r'},
- {'id': 302, 'name': 'Convenience store', 'freebase_id': '/m/0crjs'},
- {'id': 303, 'name': 'Fish', 'freebase_id': '/m/0ch_cf'},
- {'id': 304, 'name': 'Fruit', 'freebase_id': '/m/02xwb'},
- {'id': 305, 'name': 'Earrings', 'freebase_id': '/m/01r546'},
- {'id': 306, 'name': 'Curtain', 'freebase_id': '/m/03rszm'},
- {'id': 307, 'name': 'Grape', 'freebase_id': '/m/0388q'},
- {'id': 308, 'name': 'Sofa bed', 'freebase_id': '/m/03m3pdh'},
- {'id': 309, 'name': 'Horse', 'freebase_id': '/m/03k3r'},
- {'id': 310, 'name': 'Luggage and bags', 'freebase_id': '/m/0hf58v5'},
- {'id': 311, 'name': 'Desk', 'freebase_id': '/m/01y9k5'},
- {'id': 312, 'name': 'Crutch', 'freebase_id': '/m/05441v'},
- {'id': 313, 'name': 'Bicycle helmet', 'freebase_id': '/m/03p3bw'},
- {'id': 314, 'name': 'Tick', 'freebase_id': '/m/0175cv'},
- {'id': 315, 'name': 'Airplane', 'freebase_id': '/m/0cmf2'},
- {'id': 316, 'name': 'Canary', 'freebase_id': '/m/0ccs93'},
- {'id': 317, 'name': 'Spatula', 'freebase_id': '/m/02d1br'},
- {'id': 318, 'name': 'Watch', 'freebase_id': '/m/0gjkl'},
- {'id': 319, 'name': 'Lily', 'freebase_id': '/m/0jqgx'},
- {'id': 320, 'name': 'Kitchen appliance', 'freebase_id': '/m/0h99cwc'},
- {'id': 321, 'name': 'Filing cabinet', 'freebase_id': '/m/047j0r'},
- {'id': 322, 'name': 'Aircraft', 'freebase_id': '/m/0k5j'},
- {'id': 323, 'name': 'Cake stand', 'freebase_id': '/m/0h8n6ft'},
- {'id': 324, 'name': 'Candy', 'freebase_id': '/m/0gm28'},
- {'id': 325, 'name': 'Sink', 'freebase_id': '/m/0130jx'},
- {'id': 326, 'name': 'Mouse', 'freebase_id': '/m/04rmv'},
- {'id': 327, 'name': 'Wine', 'freebase_id': '/m/081qc'},
- {'id': 328, 'name': 'Wheelchair', 'freebase_id': '/m/0qmmr'},
- {'id': 329, 'name': 'Goldfish', 'freebase_id': '/m/03fj2'},
- {'id': 330, 'name': 'Refrigerator', 'freebase_id': '/m/040b_t'},
- {'id': 331, 'name': 'French fries', 'freebase_id': '/m/02y6n'},
- {'id': 332, 'name': 'Drawer', 'freebase_id': '/m/0fqfqc'},
- {'id': 333, 'name': 'Treadmill', 'freebase_id': '/m/030610'},
- {'id': 334, 'name': 'Picnic basket', 'freebase_id': '/m/07kng9'},
- {'id': 335, 'name': 'Dice', 'freebase_id': '/m/029b3'},
- {'id': 336, 'name': 'Cabbage', 'freebase_id': '/m/0fbw6'},
- {'id': 337, 'name': 'Football helmet', 'freebase_id': '/m/07qxg_'},
- {'id': 338, 'name': 'Pig', 'freebase_id': '/m/068zj'},
- {'id': 339, 'name': 'Person', 'freebase_id': '/m/01g317'},
- {'id': 340, 'name': 'Shorts', 'freebase_id': '/m/01bfm9'},
- {'id': 341, 'name': 'Gondola', 'freebase_id': '/m/02068x'},
- {'id': 342, 'name': 'Honeycomb', 'freebase_id': '/m/0fz0h'},
- {'id': 343, 'name': 'Doughnut', 'freebase_id': '/m/0jy4k'},
- {'id': 344, 'name': 'Chest of drawers', 'freebase_id': '/m/05kyg_'},
- {'id': 345, 'name': 'Land vehicle', 'freebase_id': '/m/01prls'},
- {'id': 346, 'name': 'Bat', 'freebase_id': '/m/01h44'},
- {'id': 347, 'name': 'Monkey', 'freebase_id': '/m/08pbxl'},
- {'id': 348, 'name': 'Dagger', 'freebase_id': '/m/02gzp'},
- {'id': 349, 'name': 'Tableware', 'freebase_id': '/m/04brg2'},
- {'id': 350, 'name': 'Human foot', 'freebase_id': '/m/031n1'},
- {'id': 351, 'name': 'Mug', 'freebase_id': '/m/02jvh9'},
- {'id': 352, 'name': 'Alarm clock', 'freebase_id': '/m/046dlr'},
- {'id': 353, 'name': 'Pressure cooker', 'freebase_id': '/m/0h8ntjv'},
- {'id': 354, 'name': 'Human hand', 'freebase_id': '/m/0k65p'},
- {'id': 355, 'name': 'Tortoise', 'freebase_id': '/m/011k07'},
- {'id': 356, 'name': 'Baseball glove', 'freebase_id': '/m/03grzl'},
- {'id': 357, 'name': 'Sword', 'freebase_id': '/m/06y5r'},
- {'id': 358, 'name': 'Pear', 'freebase_id': '/m/061_f'},
- {'id': 359, 'name': 'Miniskirt', 'freebase_id': '/m/01cmb2'},
- {'id': 360, 'name': 'Traffic sign', 'freebase_id': '/m/01mqdt'},
- {'id': 361, 'name': 'Girl', 'freebase_id': '/m/05r655'},
- {'id': 362, 'name': 'Roller skates', 'freebase_id': '/m/02p3w7d'},
- {'id': 363, 'name': 'Dinosaur', 'freebase_id': '/m/029tx'},
- {'id': 364, 'name': 'Porch', 'freebase_id': '/m/04m6gz'},
- {'id': 365, 'name': 'Human beard', 'freebase_id': '/m/015h_t'},
- {'id': 366, 'name': 'Submarine sandwich', 'freebase_id': '/m/06pcq'},
- {'id': 367, 'name': 'Screwdriver', 'freebase_id': '/m/01bms0'},
- {'id': 368, 'name': 'Strawberry', 'freebase_id': '/m/07fbm7'},
- {'id': 369, 'name': 'Wine glass', 'freebase_id': '/m/09tvcd'},
- {'id': 370, 'name': 'Seafood', 'freebase_id': '/m/06nwz'},
- {'id': 371, 'name': 'Racket', 'freebase_id': '/m/0dv9c'},
- {'id': 372, 'name': 'Wheel', 'freebase_id': '/m/083wq'},
- {'id': 373, 'name': 'Sea lion', 'freebase_id': '/m/0gd36'},
- {'id': 374, 'name': 'Toy', 'freebase_id': '/m/0138tl'},
- {'id': 375, 'name': 'Tea', 'freebase_id': '/m/07clx'},
- {'id': 376, 'name': 'Tennis ball', 'freebase_id': '/m/05ctyq'},
- {'id': 377, 'name': 'Waste container', 'freebase_id': '/m/0bjyj5'},
- {'id': 378, 'name': 'Mule', 'freebase_id': '/m/0dbzx'},
- {'id': 379, 'name': 'Cricket ball', 'freebase_id': '/m/02ctlc'},
- {'id': 380, 'name': 'Pineapple', 'freebase_id': '/m/0fp6w'},
- {'id': 381, 'name': 'Coconut', 'freebase_id': '/m/0djtd'},
- {'id': 382, 'name': 'Doll', 'freebase_id': '/m/0167gd'},
- {'id': 383, 'name': 'Coffee table', 'freebase_id': '/m/078n6m'},
- {'id': 384, 'name': 'Snowman', 'freebase_id': '/m/0152hh'},
- {'id': 385, 'name': 'Lavender', 'freebase_id': '/m/04gth'},
- {'id': 386, 'name': 'Shrimp', 'freebase_id': '/m/0ll1f78'},
- {'id': 387, 'name': 'Maple', 'freebase_id': '/m/0cffdh'},
- {'id': 388, 'name': 'Cowboy hat', 'freebase_id': '/m/025rp__'},
- {'id': 389, 'name': 'Goggles', 'freebase_id': '/m/02_n6y'},
- {'id': 390, 'name': 'Rugby ball', 'freebase_id': '/m/0wdt60w'},
- {'id': 391, 'name': 'Caterpillar', 'freebase_id': '/m/0cydv'},
- {'id': 392, 'name': 'Poster', 'freebase_id': '/m/01n5jq'},
- {'id': 393, 'name': 'Rocket', 'freebase_id': '/m/09rvcxw'},
- {'id': 394, 'name': 'Organ', 'freebase_id': '/m/013y1f'},
- {'id': 395, 'name': 'Saxophone', 'freebase_id': '/m/06ncr'},
- {'id': 396, 'name': 'Traffic light', 'freebase_id': '/m/015qff'},
- {'id': 397, 'name': 'Cocktail', 'freebase_id': '/m/024g6'},
- {'id': 398, 'name': 'Plastic bag', 'freebase_id': '/m/05gqfk'},
- {'id': 399, 'name': 'Squash', 'freebase_id': '/m/0dv77'},
- {'id': 400, 'name': 'Mushroom', 'freebase_id': '/m/052sf'},
- {'id': 401, 'name': 'Hamburger', 'freebase_id': '/m/0cdn1'},
- {'id': 402, 'name': 'Light switch', 'freebase_id': '/m/03jbxj'},
- {'id': 403, 'name': 'Parachute', 'freebase_id': '/m/0cyfs'},
- {'id': 404, 'name': 'Teddy bear', 'freebase_id': '/m/0kmg4'},
- {'id': 405, 'name': 'Winter melon', 'freebase_id': '/m/02cvgx'},
- {'id': 406, 'name': 'Deer', 'freebase_id': '/m/09kx5'},
- {'id': 407, 'name': 'Musical keyboard', 'freebase_id': '/m/057cc'},
- {'id': 408, 'name': 'Plumbing fixture', 'freebase_id': '/m/02pkr5'},
- {'id': 409, 'name': 'Scoreboard', 'freebase_id': '/m/057p5t'},
- {'id': 410, 'name': 'Baseball bat', 'freebase_id': '/m/03g8mr'},
- {'id': 411, 'name': 'Envelope', 'freebase_id': '/m/0frqm'},
- {'id': 412, 'name': 'Adhesive tape', 'freebase_id': '/m/03m3vtv'},
- {'id': 413, 'name': 'Briefcase', 'freebase_id': '/m/0584n8'},
- {'id': 414, 'name': 'Paddle', 'freebase_id': '/m/014y4n'},
- {'id': 415, 'name': 'Bow and arrow', 'freebase_id': '/m/01g3x7'},
- {'id': 416, 'name': 'Telephone', 'freebase_id': '/m/07cx4'},
- {'id': 417, 'name': 'Sheep', 'freebase_id': '/m/07bgp'},
- {'id': 418, 'name': 'Jacket', 'freebase_id': '/m/032b3c'},
- {'id': 419, 'name': 'Boy', 'freebase_id': '/m/01bl7v'},
- {'id': 420, 'name': 'Pizza', 'freebase_id': '/m/0663v'},
- {'id': 421, 'name': 'Otter', 'freebase_id': '/m/0cn6p'},
- {'id': 422, 'name': 'Office supplies', 'freebase_id': '/m/02rdsp'},
- {'id': 423, 'name': 'Couch', 'freebase_id': '/m/02crq1'},
- {'id': 424, 'name': 'Cello', 'freebase_id': '/m/01xqw'},
- {'id': 425, 'name': 'Bull', 'freebase_id': '/m/0cnyhnx'},
- {'id': 426, 'name': 'Camel', 'freebase_id': '/m/01x_v'},
- {'id': 427, 'name': 'Ball', 'freebase_id': '/m/018xm'},
- {'id': 428, 'name': 'Duck', 'freebase_id': '/m/09ddx'},
- {'id': 429, 'name': 'Whale', 'freebase_id': '/m/084zz'},
- {'id': 430, 'name': 'Shirt', 'freebase_id': '/m/01n4qj'},
- {'id': 431, 'name': 'Tank', 'freebase_id': '/m/07cmd'},
- {'id': 432, 'name': 'Motorcycle', 'freebase_id': '/m/04_sv'},
- {'id': 433, 'name': 'Accordion', 'freebase_id': '/m/0mkg'},
- {'id': 434, 'name': 'Owl', 'freebase_id': '/m/09d5_'},
- {'id': 435, 'name': 'Porcupine', 'freebase_id': '/m/0c568'},
- {'id': 436, 'name': 'Sun hat', 'freebase_id': '/m/02wbtzl'},
- {'id': 437, 'name': 'Nail', 'freebase_id': '/m/05bm6'},
- {'id': 438, 'name': 'Scissors', 'freebase_id': '/m/01lsmm'},
- {'id': 439, 'name': 'Swan', 'freebase_id': '/m/0dftk'},
- {'id': 440, 'name': 'Lamp', 'freebase_id': '/m/0dtln'},
- {'id': 441, 'name': 'Crown', 'freebase_id': '/m/0nl46'},
- {'id': 442, 'name': 'Piano', 'freebase_id': '/m/05r5c'},
- {'id': 443, 'name': 'Sculpture', 'freebase_id': '/m/06msq'},
- {'id': 444, 'name': 'Cheetah', 'freebase_id': '/m/0cd4d'},
- {'id': 445, 'name': 'Oboe', 'freebase_id': '/m/05kms'},
- {'id': 446, 'name': 'Tin can', 'freebase_id': '/m/02jnhm'},
- {'id': 447, 'name': 'Mango', 'freebase_id': '/m/0fldg'},
- {'id': 448, 'name': 'Tripod', 'freebase_id': '/m/073bxn'},
- {'id': 449, 'name': 'Oven', 'freebase_id': '/m/029bxz'},
- {'id': 450, 'name': 'Mouse', 'freebase_id': '/m/020lf'},
- {'id': 451, 'name': 'Barge', 'freebase_id': '/m/01btn'},
- {'id': 452, 'name': 'Coffee', 'freebase_id': '/m/02vqfm'},
- {'id': 453, 'name': 'Snowboard', 'freebase_id': '/m/06__v'},
- {'id': 454, 'name': 'Common fig', 'freebase_id': '/m/043nyj'},
- {'id': 455, 'name': 'Salad', 'freebase_id': '/m/0grw1'},
- {'id': 456, 'name': 'Marine invertebrates', 'freebase_id': '/m/03hl4l9'},
- {'id': 457, 'name': 'Umbrella', 'freebase_id': '/m/0hnnb'},
- {'id': 458, 'name': 'Kangaroo', 'freebase_id': '/m/04c0y'},
- {'id': 459, 'name': 'Human arm', 'freebase_id': '/m/0dzf4'},
- {'id': 460, 'name': 'Measuring cup', 'freebase_id': '/m/07v9_z'},
- {'id': 461, 'name': 'Snail', 'freebase_id': '/m/0f9_l'},
- {'id': 462, 'name': 'Loveseat', 'freebase_id': '/m/0703r8'},
- {'id': 463, 'name': 'Suit', 'freebase_id': '/m/01xyhv'},
- {'id': 464, 'name': 'Teapot', 'freebase_id': '/m/01fh4r'},
- {'id': 465, 'name': 'Bottle', 'freebase_id': '/m/04dr76w'},
- {'id': 466, 'name': 'Alpaca', 'freebase_id': '/m/0pcr'},
- {'id': 467, 'name': 'Kettle', 'freebase_id': '/m/03s_tn'},
- {'id': 468, 'name': 'Trousers', 'freebase_id': '/m/07mhn'},
- {'id': 469, 'name': 'Popcorn', 'freebase_id': '/m/01hrv5'},
- {'id': 470, 'name': 'Centipede', 'freebase_id': '/m/019h78'},
- {'id': 471, 'name': 'Spider', 'freebase_id': '/m/09kmb'},
- {'id': 472, 'name': 'Sparrow', 'freebase_id': '/m/0h23m'},
- {'id': 473, 'name': 'Plate', 'freebase_id': '/m/050gv4'},
- {'id': 474, 'name': 'Bagel', 'freebase_id': '/m/01fb_0'},
- {'id': 475, 'name': 'Personal care', 'freebase_id': '/m/02w3_ws'},
- {'id': 476, 'name': 'Apple', 'freebase_id': '/m/014j1m'},
- {'id': 477, 'name': 'Brassiere', 'freebase_id': '/m/01gmv2'},
- {'id': 478, 'name': 'Bathroom cabinet', 'freebase_id': '/m/04y4h8h'},
- {'id': 479, 'name': 'studio couch', 'freebase_id': '/m/026qbn5'},
- {'id': 480, 'name': 'Computer keyboard', 'freebase_id': '/m/01m2v'},
- {'id': 481, 'name': 'Table tennis racket', 'freebase_id': '/m/05_5p_0'},
- {'id': 482, 'name': 'Sushi', 'freebase_id': '/m/07030'},
- {'id': 483, 'name': 'Cabinetry', 'freebase_id': '/m/01s105'},
- {'id': 484, 'name': 'Street light', 'freebase_id': '/m/033rq4'},
- {'id': 485, 'name': 'Towel', 'freebase_id': '/m/0162_1'},
- {'id': 486, 'name': 'Nightstand', 'freebase_id': '/m/02z51p'},
- {'id': 487, 'name': 'Rabbit', 'freebase_id': '/m/06mf6'},
- {'id': 488, 'name': 'Dolphin', 'freebase_id': '/m/02hj4'},
- {'id': 489, 'name': 'Dog', 'freebase_id': '/m/0bt9lr'},
- {'id': 490, 'name': 'Jug', 'freebase_id': '/m/08hvt4'},
- {'id': 491, 'name': 'Wok', 'freebase_id': '/m/084rd'},
- {'id': 492, 'name': 'Fire hydrant', 'freebase_id': '/m/01pns0'},
- {'id': 493, 'name': 'Human eye', 'freebase_id': '/m/014sv8'},
- {'id': 494, 'name': 'Skyscraper', 'freebase_id': '/m/079cl'},
- {'id': 495, 'name': 'Backpack', 'freebase_id': '/m/01940j'},
- {'id': 496, 'name': 'Potato', 'freebase_id': '/m/05vtc'},
- {'id': 497, 'name': 'Paper towel', 'freebase_id': '/m/02w3r3'},
- {'id': 498, 'name': 'Lifejacket', 'freebase_id': '/m/054xkw'},
- {'id': 499, 'name': 'Bicycle wheel', 'freebase_id': '/m/01bqk0'},
- {'id': 500, 'name': 'Toilet', 'freebase_id': '/m/09g1w'},
-]
-
-
-def _get_builtin_metadata(cats):
- id_to_name = {x['id']: x['name'] for x in cats}
- thing_dataset_id_to_contiguous_id = {i + 1: i for i in range(len(cats))}
- thing_classes = [x['name'] for x in sorted(cats, key=lambda x: x['id'])]
- return {
- "thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
- "thing_classes": thing_classes}
-
-_PREDEFINED_SPLITS_OID = {
- # cat threshold: 500, 1500: r 170, c 151, f 179
- "oid_train": ("oid/images/", "oid/annotations/oid_challenge_2019_train_bbox.json"),
- # "expanded" duplicates annotations to their father classes based on the official
- # hierarchy. This is used in the official evaulation protocol.
- # https://storage.googleapis.com/openimages/web/evaluation.html
- "oid_val_expanded": ("oid/images/validation/", "oid/annotations/oid_challenge_2019_val_expanded.json"),
- "oid_val_expanded_rare": ("oid/images/validation/", "oid/annotations/oid_challenge_2019_val_expanded_rare.json"),
-}
-
-
-for key, (image_root, json_file) in _PREDEFINED_SPLITS_OID.items():
- register_oid_instances(
- key,
- _get_builtin_metadata(categories),
- os.path.join("datasets", json_file) if "://" not in json_file else json_file,
- os.path.join("datasets", image_root),
- )
\ No newline at end of file
diff --git a/spaces/DragGan/DragGan/stylegan_human/training/__init__.py b/spaces/DragGan/DragGan/stylegan_human/training/__init__.py
deleted file mode 100644
index 939e7c6c8f94c4ea1141885c3c3295fe083b06aa..0000000000000000000000000000000000000000
--- a/spaces/DragGan/DragGan/stylegan_human/training/__init__.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-#
-# NVIDIA CORPORATION and its licensors retain all intellectual property
-# and proprietary rights in and to this software, related documentation
-# and any modifications thereto. Any use, reproduction, disclosure or
-# distribution of this software and related documentation without an express
-# license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-# empty
diff --git a/spaces/EPFL-VILAB/MultiMAE/dpt/__init__.py b/spaces/EPFL-VILAB/MultiMAE/dpt/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/EronSamez/RVC_HFmeu/tools/dlmodels.bat b/spaces/EronSamez/RVC_HFmeu/tools/dlmodels.bat
deleted file mode 100644
index 5d80f50369b1f3ed37c045d07a9e2ce8954f09d4..0000000000000000000000000000000000000000
--- a/spaces/EronSamez/RVC_HFmeu/tools/dlmodels.bat
+++ /dev/null
@@ -1,348 +0,0 @@
-@echo off && chcp 65001
-
-echo working dir is %cd%
-echo downloading requirement aria2 check.
-echo=
-dir /a:d/b | findstr "aria2" > flag.txt
-findstr "aria2" flag.txt >nul
-if %errorlevel% ==0 (
- echo aria2 checked.
- echo=
-) else (
- echo failed. please downloading aria2 from webpage!
- echo unzip it and put in this directory!
- timeout /T 5
- start https://github.com/aria2/aria2/releases/tag/release-1.36.0
- echo=
- goto end
-)
-
-echo envfiles checking start.
-echo=
-
-for /f %%x in ('findstr /i /c:"aria2" "flag.txt"') do (set aria2=%%x)&goto endSch
-:endSch
-
-set d32=f0D32k.pth
-set d40=f0D40k.pth
-set d48=f0D48k.pth
-set g32=f0G32k.pth
-set g40=f0G40k.pth
-set g48=f0G48k.pth
-
-set d40v2=f0D40k.pth
-set g40v2=f0G40k.pth
-
-set dld32=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D32k.pth
-set dld40=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D40k.pth
-set dld48=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D48k.pth
-set dlg32=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G32k.pth
-set dlg40=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G40k.pth
-set dlg48=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G48k.pth
-
-set dld40v2=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D40k.pth
-set dlg40v2=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G40k.pth
-
-set hp2_all=HP2_all_vocals.pth
-set hp3_all=HP3_all_vocals.pth
-set hp5_only=HP5_only_main_vocal.pth
-set VR_DeEchoAggressive=VR-DeEchoAggressive.pth
-set VR_DeEchoDeReverb=VR-DeEchoDeReverb.pth
-set VR_DeEchoNormal=VR-DeEchoNormal.pth
-set onnx_dereverb=vocals.onnx
-
-set dlhp2_all=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP2_all_vocals.pth
-set dlhp3_all=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP3_all_vocals.pth
-set dlhp5_only=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5_only_main_vocal.pth
-set dlVR_DeEchoAggressive=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoAggressive.pth
-set dlVR_DeEchoDeReverb=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoDeReverb.pth
-set dlVR_DeEchoNormal=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoNormal.pth
-set dlonnx_dereverb=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/onnx_dereverb_By_FoxJoy/vocals.onnx
-
-set hb=hubert_base.pt
-
-set dlhb=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt
-
-echo dir check start.
-echo=
-
-if exist "%~dp0assets\pretrained" (
- echo dir .\assets\pretrained checked.
- ) else (
- echo failed. generating dir .\assets\pretrained.
- mkdir pretrained
- )
-if exist "%~dp0assets\pretrained_v2" (
- echo dir .\assets\pretrained_v2 checked.
- ) else (
- echo failed. generating dir .\assets\pretrained_v2.
- mkdir pretrained_v2
- )
-if exist "%~dp0assets\uvr5_weights" (
- echo dir .\assets\uvr5_weights checked.
- ) else (
- echo failed. generating dir .\assets\uvr5_weights.
- mkdir uvr5_weights
- )
-if exist "%~dp0assets\uvr5_weights\onnx_dereverb_By_FoxJoy" (
- echo dir .\assets\uvr5_weights\onnx_dereverb_By_FoxJoy checked.
- ) else (
- echo failed. generating dir .\assets\uvr5_weights\onnx_dereverb_By_FoxJoy.
- mkdir uvr5_weights\onnx_dereverb_By_FoxJoy
- )
-
-echo=
-echo dir check finished.
-
-echo=
-echo required files check start.
-
-echo checking D32k.pth
-if exist "%~dp0assets\pretrained\D32k.pth" (
- echo D32k.pth in .\assets\pretrained checked.
- echo=
- ) else (
- echo failed. starting download from huggingface.
- %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D32k.pth -d %~dp0assets\pretrained -o D32k.pth
- if exist "%~dp0assets\pretrained\D32k.pth" (echo download successful.) else (echo please try again!
- echo=)
- )
-echo checking D40k.pth
-if exist "%~dp0assets\pretrained\D40k.pth" (
- echo D40k.pth in .\assets\pretrained checked.
- echo=
- ) else (
- echo failed. starting download from huggingface.
- %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D40k.pth -d %~dp0assets\pretrained -o D40k.pth
- if exist "%~dp0assets\pretrained\D40k.pth" (echo download successful.) else (echo please try again!
- echo=)
- )
-echo checking D40k.pth
-if exist "%~dp0assets\pretrained_v2\D40k.pth" (
- echo D40k.pth in .\assets\pretrained_v2 checked.
- echo=
- ) else (
- echo failed. starting download from huggingface.
- %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D40k.pth -d %~dp0assets\pretrained_v2 -o D40k.pth
- if exist "%~dp0assets\pretrained_v2\D40k.pth" (echo download successful.) else (echo please try again!
- echo=)
- )
-echo checking D48k.pth
-if exist "%~dp0assets\pretrained\D48k.pth" (
- echo D48k.pth in .\assets\pretrained checked.
- echo=
- ) else (
- echo failed. starting download from huggingface.
- %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D48k.pth -d %~dp0assets\pretrained -o D48k.pth
- if exist "%~dp0assets\pretrained\D48k.pth" (echo download successful.) else (echo please try again!
- echo=)
- )
-echo checking G32k.pth
-if exist "%~dp0assets\pretrained\G32k.pth" (
- echo G32k.pth in .\assets\pretrained checked.
- echo=
- ) else (
- echo failed. starting download from huggingface.
- %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G32k.pth -d %~dp0assets\pretrained -o G32k.pth
- if exist "%~dp0assets\pretrained\G32k.pth" (echo download successful.) else (echo please try again!
- echo=)
- )
-echo checking G40k.pth
-if exist "%~dp0assets\pretrained\G40k.pth" (
- echo G40k.pth in .\assets\pretrained checked.
- echo=
- ) else (
- echo failed. starting download from huggingface.
- %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G40k.pth -d %~dp0assets\pretrained -o G40k.pth
- if exist "%~dp0assets\pretrained\G40k.pth" (echo download successful.) else (echo please try again!
- echo=)
- )
-echo checking G40k.pth
-if exist "%~dp0assets\pretrained_v2\G40k.pth" (
- echo G40k.pth in .\assets\pretrained_v2 checked.
- echo=
- ) else (
- echo failed. starting download from huggingface.
- %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G40k.pth -d %~dp0assets\pretrained_v2 -o G40k.pth
- if exist "%~dp0assets\pretrained_v2\G40k.pth" (echo download successful.) else (echo please try again!
- echo=)
- )
-echo checking G48k.pth
-if exist "%~dp0assets\pretrained\G48k.pth" (
- echo G48k.pth in .\assets\pretrained checked.
- echo=
- ) else (
- echo failed. starting download from huggingface.
- %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G48k.pth -d %~dp0assets\pretrained -o G48k.pth
- if exist "%~dp0assets\pretrained\G48k.pth" (echo download successful.) else (echo please try again!
- echo=)
- )
-
-echo checking %d32%
-if exist "%~dp0assets\pretrained\%d32%" (
- echo %d32% in .\assets\pretrained checked.
- echo=
- ) else (
- echo failed. starting download from huggingface.
- %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dld32% -d %~dp0assets\pretrained -o %d32%
- if exist "%~dp0assets\pretrained\%d32%" (echo download successful.) else (echo please try again!
- echo=)
- )
-echo checking %d40%
-if exist "%~dp0assets\pretrained\%d40%" (
- echo %d40% in .\assets\pretrained checked.
- echo=
- ) else (
- echo failed. starting download from huggingface.
- %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dld40% -d %~dp0assets\pretrained -o %d40%
- if exist "%~dp0assets\pretrained\%d40%" (echo download successful.) else (echo please try again!
- echo=)
- )
-echo checking %d40v2%
-if exist "%~dp0assets\pretrained_v2\%d40v2%" (
- echo %d40v2% in .\assets\pretrained_v2 checked.
- echo=
- ) else (
- echo failed. starting download from huggingface.
- %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dld40v2% -d %~dp0assets\pretrained_v2 -o %d40v2%
- if exist "%~dp0assets\pretrained_v2\%d40v2%" (echo download successful.) else (echo please try again!
- echo=)
- )
-echo checking %d48%
-if exist "%~dp0assets\pretrained\%d48%" (
- echo %d48% in .\assets\pretrained checked.
- echo=
- ) else (
- echo failed. starting download from huggingface.
- %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dld48% -d %~dp0assets\pretrained -o %d48%
- if exist "%~dp0assets\pretrained\%d48%" (echo download successful.) else (echo please try again!
- echo=)
- )
-echo checking %g32%
-if exist "%~dp0assets\pretrained\%g32%" (
- echo %g32% in .\assets\pretrained checked.
- echo=
- ) else (
- echo failed. starting download from huggingface.
- %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlg32% -d %~dp0assets\pretrained -o %g32%
- if exist "%~dp0assets\pretrained\%g32%" (echo download successful.) else (echo please try again!
- echo=)
- )
-echo checking %g40%
-if exist "%~dp0assets\pretrained\%g40%" (
- echo %g40% in .\assets\pretrained checked.
- echo=
- ) else (
- echo failed. starting download from huggingface.
- %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlg40% -d %~dp0assets\pretrained -o %g40%
- if exist "%~dp0assets\pretrained\%g40%" (echo download successful.) else (echo please try again!
- echo=)
- )
-echo checking %g40v2%
-if exist "%~dp0assets\pretrained_v2\%g40v2%" (
- echo %g40v2% in .\assets\pretrained_v2 checked.
- echo=
- ) else (
- echo failed. starting download from huggingface.
- %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlg40v2% -d %~dp0assets\pretrained_v2 -o %g40v2%
- if exist "%~dp0assets\pretrained_v2\%g40v2%" (echo download successful.) else (echo please try again!
- echo=)
- )
-echo checking %g48%
-if exist "%~dp0assets\pretrained\%g48%" (
- echo %g48% in .\assets\pretrained checked.
- echo=
- ) else (
- echo failed. starting download from huggingface.
- %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlg48% -d %~dp0assets\pretrained -o %g48%
- if exist "%~dp0assets\pretrained\%g48%" (echo download successful.) else (echo please try again!
- echo=)
- )
-
-echo checking %hp2_all%
-if exist "%~dp0assets\uvr5_weights\%hp2_all%" (
- echo %hp2_all% in .\assets\uvr5_weights checked.
- echo=
- ) else (
- echo failed. starting download from huggingface.
- %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlhp2_all% -d %~dp0assets\uvr5_weights -o %hp2_all%
- if exist "%~dp0assets\uvr5_weights\%hp2_all%" (echo download successful.) else (echo please try again!
- echo=)
- )
-echo checking %hp3_all%
-if exist "%~dp0assets\uvr5_weights\%hp3_all%" (
- echo %hp3_all% in .\assets\uvr5_weights checked.
- echo=
- ) else (
- echo failed. starting download from huggingface.
- %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlhp3_all% -d %~dp0assets\uvr5_weights -o %hp3_all%
- if exist "%~dp0assets\uvr5_weights\%hp3_all%" (echo download successful.) else (echo please try again!
- echo=)
- )
-echo checking %hp5_only%
-if exist "%~dp0assets\uvr5_weights\%hp5_only%" (
- echo %hp5_only% in .\assets\uvr5_weights checked.
- echo=
- ) else (
- echo failed. starting download from huggingface.
- %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlhp5_only% -d %~dp0assets\uvr5_weights -o %hp5_only%
- if exist "%~dp0assets\uvr5_weights\%hp5_only%" (echo download successful.) else (echo please try again!
- echo=)
- )
-echo checking %VR_DeEchoAggressive%
-if exist "%~dp0assets\uvr5_weights\%VR_DeEchoAggressive%" (
- echo %VR_DeEchoAggressive% in .\assets\uvr5_weights checked.
- echo=
- ) else (
- echo failed. starting download from huggingface.
- %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlVR_DeEchoAggressive% -d %~dp0assets\uvr5_weights -o %VR_DeEchoAggressive%
- if exist "%~dp0assets\uvr5_weights\%VR_DeEchoAggressive%" (echo download successful.) else (echo please try again!
- echo=)
- )
-echo checking %VR_DeEchoDeReverb%
-if exist "%~dp0assets\uvr5_weights\%VR_DeEchoDeReverb%" (
- echo %VR_DeEchoDeReverb% in .\assets\uvr5_weights checked.
- echo=
- ) else (
- echo failed. starting download from huggingface.
- %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlVR_DeEchoDeReverb% -d %~dp0assets\uvr5_weights -o %VR_DeEchoDeReverb%
- if exist "%~dp0assets\uvr5_weights\%VR_DeEchoDeReverb%" (echo download successful.) else (echo please try again!
- echo=)
- )
-echo checking %VR_DeEchoNormal%
-if exist "%~dp0assets\uvr5_weights\%VR_DeEchoNormal%" (
- echo %VR_DeEchoNormal% in .\assets\uvr5_weights checked.
- echo=
- ) else (
- echo failed. starting download from huggingface.
- %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlVR_DeEchoNormal% -d %~dp0assets\uvr5_weights -o %VR_DeEchoNormal%
- if exist "%~dp0assets\uvr5_weights\%VR_DeEchoNormal%" (echo download successful.) else (echo please try again!
- echo=)
- )
-echo checking %onnx_dereverb%
-if exist "%~dp0assets\uvr5_weights\onnx_dereverb_By_FoxJoy\%onnx_dereverb%" (
- echo %onnx_dereverb% in .\assets\uvr5_weights\onnx_dereverb_By_FoxJoy checked.
- echo=
- ) else (
- echo failed. starting download from huggingface.
- %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlonnx_dereverb% -d %~dp0assets\uvr5_weights\onnx_dereverb_By_FoxJoy -o %onnx_dereverb%
- if exist "%~dp0assets\uvr5_weights\onnx_dereverb_By_FoxJoy\%onnx_dereverb%" (echo download successful.) else (echo please try again!
- echo=)
- )
-
-echo checking %hb%
-if exist "%~dp0assets\hubert\%hb%" (
- echo %hb% in .\assets\hubert\pretrained checked.
- echo=
- ) else (
- echo failed. starting download from huggingface.
- %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlhb% -d %~dp0assets\hubert\ -o %hb%
- if exist "%~dp0assets\hubert\%hb%" (echo download successful.) else (echo please try again!
- echo=)
- )
-
-echo required files check finished.
-echo envfiles check complete.
-pause
-:end
-del flag.txt
diff --git a/spaces/GXSA/bingo/cloudflare/worker.js b/spaces/GXSA/bingo/cloudflare/worker.js
deleted file mode 100644
index e0debd750615f1329b2c72fbce73e1b9291f7137..0000000000000000000000000000000000000000
--- a/spaces/GXSA/bingo/cloudflare/worker.js
+++ /dev/null
@@ -1,18 +0,0 @@
-const TRAGET_HOST='hf4all-bingo.hf.space' // 请将此域名改成你自己的,域名信息在设置》站点域名查看。
-
-export default {
- async fetch(request) {
- const uri = new URL(request.url);
- if (uri.protocol === 'http:') {
- uri.protocol = 'https:';
- return new Response('', {
- status: 301,
- headers: {
- location: uri.toString(),
- },
- })
- }
- uri.host = TRAGET_HOST
- return fetch(new Request(uri.toString(), request));
- },
-};
diff --git a/spaces/GXSA/bingo/src/lib/isomorphic/browser.ts b/spaces/GXSA/bingo/src/lib/isomorphic/browser.ts
deleted file mode 100644
index de125b1f1786d1618cb1ff47f403d76c6784f4ce..0000000000000000000000000000000000000000
--- a/spaces/GXSA/bingo/src/lib/isomorphic/browser.ts
+++ /dev/null
@@ -1,11 +0,0 @@
-'use client'
-
-const debug = console.info.bind(console)
-
-class WebSocketAlias extends WebSocket {
- constructor(address: string | URL, ...args: any) {
- super(address)
- }
-}
-
-export default { fetch, WebSocket: WebSocketAlias, debug }
diff --git a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/stack_three_layer_red_wall.py b/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/stack_three_layer_red_wall.py
deleted file mode 100644
index 023aa4cf13f1b9715dc7db303b81b70f691f1abe..0000000000000000000000000000000000000000
--- a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/stack_three_layer_red_wall.py
+++ /dev/null
@@ -1,42 +0,0 @@
-import numpy as np
-from cliport.tasks.task import Task
-from cliport.utils import utils
-
-class StackThreeLayerRedWall(Task):
- """Build a wall by stacking blocks. The wall should consist of three layers with each layer having three red blocks aligned in a straight line."""
-
- def __init__(self):
- super().__init__()
- self.max_steps = 15
- self.lang_template = "stack the red blocks to form a three-layer wall"
- self.task_completed_desc = "done stacking blocks."
- self.additional_reset()
-
- def reset(self, env):
- super().reset(env)
-
- # Add blocks.
- block_size = (0.05, 0.05, 0.03) # x, y, z dimensions for the block size
- block_urdf = 'block/block_for_anchors.urdf' # URDF for the block
- block_color = utils.COLORS['red'] # Color for the block
-
- # We need 9 blocks for a three-layer wall with each layer having three blocks.
- blocks = []
- for _ in range(9):
- block_pose = self.get_random_pose(env, block_size)
- block_id = env.add_object(block_urdf, block_pose, color=block_color)
- blocks.append(block_id)
-
- # Define target poses for the blocks to form a three-layer wall.
- # The target poses are defined relative to a base pose.
- base_pose = ((0.5, 0.0, 0.0), (0, 0, 0, 1))
- target_poses = []
- for i in range(3): # three layers
- for j in range(3): # three blocks per layer
- target_pos = (j * block_size[0], 0, i * block_size[2])
- target_pose = (utils.apply(base_pose, target_pos), (0, 0, 0, 1))
- target_poses.append(target_pose)
-
- # Goal: all blocks are stacked to form a three-layer wall.
- self.add_goal(objs=blocks[3*i:3*(i+1)], matches=np.ones((3, 3)), targ_poses=target_poses[3*i:3*(i+1)], replace=False,
- rotations=True, metric='pose', params=None, step_max_reward=1 / 3., language_goal=self.lang_template)
diff --git a/spaces/Godrose0728/sound-link/mel_processing.py b/spaces/Godrose0728/sound-link/mel_processing.py
deleted file mode 100644
index 3e252e76320522a8a4195a60665168f22769aec2..0000000000000000000000000000000000000000
--- a/spaces/Godrose0728/sound-link/mel_processing.py
+++ /dev/null
@@ -1,101 +0,0 @@
-import torch
-import torch.utils.data
-from librosa.filters import mel as librosa_mel_fn
-
-MAX_WAV_VALUE = 32768.0
-
-
-def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
- """
- PARAMS
- ------
- C: compression factor
- """
- return torch.log(torch.clamp(x, min=clip_val) * C)
-
-
-def dynamic_range_decompression_torch(x, C=1):
- """
- PARAMS
- ------
- C: compression factor used to compress
- """
- return torch.exp(x) / C
-
-
-def spectral_normalize_torch(magnitudes):
- output = dynamic_range_compression_torch(magnitudes)
- return output
-
-
-def spectral_de_normalize_torch(magnitudes):
- output = dynamic_range_decompression_torch(magnitudes)
- return output
-
-
-mel_basis = {}
-hann_window = {}
-
-
-def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
- if torch.min(y) < -1.:
- print('min value is ', torch.min(y))
- if torch.max(y) > 1.:
- print('max value is ', torch.max(y))
-
- global hann_window
- dtype_device = str(y.dtype) + '_' + str(y.device)
- wnsize_dtype_device = str(win_size) + '_' + dtype_device
- if wnsize_dtype_device not in hann_window:
- hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
-
- y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
- y = y.squeeze(1)
-
- spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
- center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
-
- spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
- return spec
-
-
-def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
- global mel_basis
- dtype_device = str(spec.dtype) + '_' + str(spec.device)
- fmax_dtype_device = str(fmax) + '_' + dtype_device
- if fmax_dtype_device not in mel_basis:
- mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
- mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
- spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
- spec = spectral_normalize_torch(spec)
- return spec
-
-
-def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
- if torch.min(y) < -1.:
- print('min value is ', torch.min(y))
- if torch.max(y) > 1.:
- print('max value is ', torch.max(y))
-
- global mel_basis, hann_window
- dtype_device = str(y.dtype) + '_' + str(y.device)
- fmax_dtype_device = str(fmax) + '_' + dtype_device
- wnsize_dtype_device = str(win_size) + '_' + dtype_device
- if fmax_dtype_device not in mel_basis:
- mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
- mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
- if wnsize_dtype_device not in hann_window:
- hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
-
- y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
- y = y.squeeze(1)
-
- spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
- center=center, pad_mode='reflect', normalized=False, onesided=True)
-
- spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
-
- spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
- spec = spectral_normalize_torch(spec)
-
- return spec
diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco.py
deleted file mode 100644
index 18daadd6a9d3024f30157aea1f1cef3e13326b5a..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco.py
+++ /dev/null
@@ -1,13 +0,0 @@
-_base_ = './ga_retinanet_r50_fpn_1x_coco.py'
-model = dict(
- pretrained='open-mmlab://resnext101_32x4d',
- backbone=dict(
- type='ResNeXt',
- depth=101,
- groups=32,
- base_width=4,
- num_stages=4,
- out_indices=(0, 1, 2, 3),
- frozen_stages=1,
- norm_cfg=dict(type='BN', requires_grad=True),
- style='pytorch'))
diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/scnet/scnet_r50_fpn_1x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/scnet/scnet_r50_fpn_1x_coco.py
deleted file mode 100644
index e4215a6d2d0b90f8ccd9c1291f6ca222c0ff554f..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/scnet/scnet_r50_fpn_1x_coco.py
+++ /dev/null
@@ -1,136 +0,0 @@
-_base_ = '../htc/htc_r50_fpn_1x_coco.py'
-# model settings
-model = dict(
- type='SCNet',
- roi_head=dict(
- _delete_=True,
- type='SCNetRoIHead',
- num_stages=3,
- stage_loss_weights=[1, 0.5, 0.25],
- bbox_roi_extractor=dict(
- type='SingleRoIExtractor',
- roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
- out_channels=256,
- featmap_strides=[4, 8, 16, 32]),
- bbox_head=[
- dict(
- type='SCNetBBoxHead',
- num_shared_fcs=2,
- in_channels=256,
- fc_out_channels=1024,
- roi_feat_size=7,
- num_classes=80,
- bbox_coder=dict(
- type='DeltaXYWHBBoxCoder',
- target_means=[0., 0., 0., 0.],
- target_stds=[0.1, 0.1, 0.2, 0.2]),
- reg_class_agnostic=True,
- loss_cls=dict(
- type='CrossEntropyLoss',
- use_sigmoid=False,
- loss_weight=1.0),
- loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
- loss_weight=1.0)),
- dict(
- type='SCNetBBoxHead',
- num_shared_fcs=2,
- in_channels=256,
- fc_out_channels=1024,
- roi_feat_size=7,
- num_classes=80,
- bbox_coder=dict(
- type='DeltaXYWHBBoxCoder',
- target_means=[0., 0., 0., 0.],
- target_stds=[0.05, 0.05, 0.1, 0.1]),
- reg_class_agnostic=True,
- loss_cls=dict(
- type='CrossEntropyLoss',
- use_sigmoid=False,
- loss_weight=1.0),
- loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
- loss_weight=1.0)),
- dict(
- type='SCNetBBoxHead',
- num_shared_fcs=2,
- in_channels=256,
- fc_out_channels=1024,
- roi_feat_size=7,
- num_classes=80,
- bbox_coder=dict(
- type='DeltaXYWHBBoxCoder',
- target_means=[0., 0., 0., 0.],
- target_stds=[0.033, 0.033, 0.067, 0.067]),
- reg_class_agnostic=True,
- loss_cls=dict(
- type='CrossEntropyLoss',
- use_sigmoid=False,
- loss_weight=1.0),
- loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
- ],
- mask_roi_extractor=dict(
- type='SingleRoIExtractor',
- roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
- out_channels=256,
- featmap_strides=[4, 8, 16, 32]),
- mask_head=dict(
- type='SCNetMaskHead',
- num_convs=12,
- in_channels=256,
- conv_out_channels=256,
- num_classes=80,
- conv_to_res=True,
- loss_mask=dict(
- type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
- semantic_roi_extractor=dict(
- type='SingleRoIExtractor',
- roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
- out_channels=256,
- featmap_strides=[8]),
- semantic_head=dict(
- type='SCNetSemanticHead',
- num_ins=5,
- fusion_level=1,
- num_convs=4,
- in_channels=256,
- conv_out_channels=256,
- num_classes=183,
- ignore_label=255,
- loss_weight=0.2,
- conv_to_res=True),
- glbctx_head=dict(
- type='GlobalContextHead',
- num_convs=4,
- in_channels=256,
- conv_out_channels=256,
- num_classes=80,
- loss_weight=3.0,
- conv_to_res=True),
- feat_relay_head=dict(
- type='FeatureRelayHead',
- in_channels=1024,
- out_conv_channels=256,
- roi_feat_size=7,
- scale_factor=2)))
-
-# uncomment below code to enable test time augmentations
-# img_norm_cfg = dict(
-# mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-# test_pipeline = [
-# dict(type='LoadImageFromFile'),
-# dict(
-# type='MultiScaleFlipAug',
-# img_scale=[(600, 900), (800, 1200), (1000, 1500), (1200, 1800),
-# (1400, 2100)],
-# flip=True,
-# transforms=[
-# dict(type='Resize', keep_ratio=True),
-# dict(type='RandomFlip', flip_ratio=0.5),
-# dict(type='Normalize', **img_norm_cfg),
-# dict(type='Pad', size_divisor=32),
-# dict(type='ImageToTensor', keys=['img']),
-# dict(type='Collect', keys=['img']),
-# ])
-# ]
-# data = dict(
-# val=dict(pipeline=test_pipeline),
-# test=dict(pipeline=test_pipeline))
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/optim/lr_scheduler/fixed_schedule.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/optim/lr_scheduler/fixed_schedule.py
deleted file mode 100644
index d0e7e14b7e72b1151f7d7f19094430bbab64f8f0..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/optim/lr_scheduler/fixed_schedule.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from dataclasses import dataclass, field
-from typing import Optional, List
-from omegaconf import II
-
-from fairseq.dataclass import FairseqDataclass
-from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler
-
-
-@dataclass
-class FixedLRScheduleConfig(FairseqDataclass):
- force_anneal: Optional[int] = field(
- default=None,
- metadata={"help": "force annealing at specified epoch"},
- )
- lr_shrink: float = field(
- default=0.1,
- metadata={"help": "shrink factor for annealing, lr_new = (lr * lr_shrink)"},
- )
- warmup_updates: int = field(
- default=0,
- metadata={"help": "warmup the learning rate linearly for the first N updates"},
- )
- lr: List[float] = II("optimization.lr")
-
-
-@register_lr_scheduler("fixed", dataclass=FixedLRScheduleConfig)
-class FixedLRSchedule(FairseqLRScheduler):
- """Decay the LR on a fixed schedule."""
-
- def __init__(self, cfg: FixedLRScheduleConfig, optimizer):
- super().__init__(cfg, optimizer)
-
- self.lr = cfg.lr[0]
- if cfg.warmup_updates > 0:
- self.warmup_factor = 1.0 / cfg.warmup_updates
- else:
- self.warmup_factor = 1
-
- def state_dict(self):
- return {"lr": self.lr}
-
- def load_state_dict(self, state_dict):
- if "lr" in state_dict:
- self.lr = state_dict["lr"]
-
- def get_next_lr(self, epoch):
- lrs = self.cfg.lr
- if self.cfg.force_anneal is None or epoch < self.cfg.force_anneal:
- # use fixed LR schedule
- next_lr = lrs[min(epoch - 1, len(lrs) - 1)]
- else:
- # annneal based on lr_shrink
- next_lr = lrs[-1] * self.cfg.lr_shrink ** (
- epoch + 1 - self.cfg.force_anneal
- )
- return next_lr
-
- def step_begin_epoch(self, epoch):
- """Update the learning rate at the beginning of the given epoch."""
- self.lr = self.get_next_lr(epoch)
- self.optimizer.set_lr(self.warmup_factor * self.lr)
- return self.optimizer.get_lr()
-
- def step_update(self, num_updates):
- """Update the learning rate after each update."""
- if self.cfg.warmup_updates > 0 and num_updates < self.cfg.warmup_updates:
- self.warmup_factor = (num_updates + 1) / float(self.cfg.warmup_updates)
- self.optimizer.set_lr(self.warmup_factor * self.lr)
- else:
- self.optimizer.set_lr(self.lr)
- return self.optimizer.get_lr()
diff --git a/spaces/Harveenchadha/oiTrans/model_configs/__init__.py b/spaces/Harveenchadha/oiTrans/model_configs/__init__.py
deleted file mode 100644
index 2ec41f7daeb7930e9df766abdd790c4c5b09b6d9..0000000000000000000000000000000000000000
--- a/spaces/Harveenchadha/oiTrans/model_configs/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from . import custom_transformer
\ No newline at end of file
diff --git a/spaces/Hazzzardous/RWKV-Instruct/app.py b/spaces/Hazzzardous/RWKV-Instruct/app.py
deleted file mode 100644
index 1c64cbd3044c4e4a35872be18c2b011a461a2512..0000000000000000000000000000000000000000
--- a/spaces/Hazzzardous/RWKV-Instruct/app.py
+++ /dev/null
@@ -1,297 +0,0 @@
-"""
-RWKV RNN Model - Gradio Space for HuggingFace
-YT - Mean Gene Hacks - https://www.youtube.com/@MeanGeneHacks
-(C) Gene Ruebsamen - 2/7/2023
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program. If not, see .
-"""
-
-import gradio as gr
-import codecs
-from ast import literal_eval
-from datetime import datetime
-from rwkvstic.load import RWKV
-from config import config, title
-import torch
-import gc
-
-DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
-
-desc = '''
RNN with Transformer-level LLM Performance (github).
- According to the author: "It combines the best of RNN and transformers - great performance, fast inference, saves VRAM, fast training, "infinite" ctx_len, and free sentence embedding."'''
-
-thanks = '''
'''
-
-
-def to_md(text):
- return text.replace("\n", " ")
-
-
-def get_model():
- model = None
- model = RWKV(
- **config
- )
- return model
-
-
-model = get_model()
-
-
-
-def infer(
- prompt,
- mode="generative",
- max_new_tokens=10,
- temperature=0.1,
- top_p=1.0,
- stop="<|endoftext|>",
- end_adj=0.0,
- seed=42,
-):
- global model
-
- if model == None:
- gc.collect()
- if (DEVICE == "cuda"):
- torch.cuda.empty_cache()
- model = get_model()
-
- max_new_tokens = int(max_new_tokens)
- temperature = float(temperature)
- end_adj = float(end_adj)
- top_p = float(top_p)
- stop = [x.strip(' ') for x in stop.split(',')]
- seed = seed
-
- assert 1 <= max_new_tokens <= 512
- assert 0.0 <= temperature <= 5.0
- assert 0.0 <= top_p <= 1.0
-
- temperature = max(0.05, temperature)
- if prompt == "":
- prompt = " "
-
- # Clear model state for generative mode
- model.resetState()
- if (mode == "Q/A"):
- prompt = f"\nQ: {prompt}\n\nA:"
- if (mode == "ELDR"):
- prompt = f"\n{prompt}\n\nExpert Long Detailed Response:\n\nHi, thanks for reaching out, we would be happy to answer your question"
- if (mode == "Expert"):
- prompt = f"\n{prompt}\n\nExpert Full Response:\n\nHi, thanks for reaching out, we would be happy to answer your question.\n"
- if (mode == "EFA"):
- prompt = f'\nAsk Expert\n\nQuestion:\n{prompt}\n\nExpert Full Answer:\n'
- if (mode == "BFR"):
- prompt = f"Task given:\n\n{prompt}\n\nBest Full Response:"
-
- print(f"PROMPT ({datetime.now()}):\n-------\n{prompt}")
- print(f"OUTPUT ({datetime.now()}):\n-------\n")
- # Load prompt
- model.loadContext(newctx=prompt)
- generated_text = ""
- done = False
- with torch.no_grad():
- for _ in range(max_new_tokens):
- char = model.forward(stopStrings=stop, temp=temperature, top_p_usual=top_p, end_adj=end_adj)[
- "output"]
- print(char, end='', flush=True)
- generated_text += char
- generated_text = generated_text.lstrip("\n ")
-
- for stop_word in stop:
- stop_word = codecs.getdecoder("unicode_escape")(stop_word)[0]
- if stop_word != '' and stop_word in generated_text:
- done = True
- break
- yield generated_text
- if done:
- print("\n")
- break
-
- # print(f"{generated_text}")
-
- for stop_word in stop:
- stop_word = codecs.getdecoder("unicode_escape")(stop_word)[0]
- if stop_word != '' and stop_word in generated_text:
- generated_text = generated_text[:generated_text.find(stop_word)]
-
- gc.collect()
- yield generated_text
-username = "USER"
-intro = f'''The following is a verbose and detailed conversation between an AI assistant called FRITZ, and a human user called USER. FRITZ is intelligent, knowledgeable, wise and polite.
-
- {username}: What year was the french revolution?
- FRITZ: The French Revolution started in 1789, and lasted 10 years until 1799.
- {username}: 3+5=?
- FRITZ: The answer is 8.
- {username}: What year did the Berlin Wall fall?
- FRITZ: The Berlin wall stood for 28 years and fell in 1989.
- {username}: solve for a: 9-a=2
- FRITZ: The answer is a=7, because 9-7 = 2.
- {username}: wat is lhc
- FRITZ: The Large Hadron Collider (LHC) is a high-energy particle collider, built by CERN, and completed in 2008. It was used to confirm the existence of the Higgs boson in 2012.
- {username}: Tell me about yourself.
- FRITZ: My name is Fritz. I am an RNN based Large Language Model (LLM).
- '''
-model.resetState()
-model.loadContext(newctx=intro)
-chatState = model.getState()
-model.resetState()
-def chat(
- prompt,
- history,
- max_new_tokens=10,
- temperature=0.1,
- top_p=1.0,
- seed=42,
-):
- global model
- global username
- history = history or []
-
- intro = ""
-
- if model == None:
- gc.collect()
- if (DEVICE == "cuda"):
- torch.cuda.empty_cache()
- model = get_model()
-
- username = username.strip()
- username = username or "USER"
-
-
-
- if len(history) == 0:
- # no history, so lets reset chat state
- model.setState(chatState)
- history = [[], model.emptyState]
- print("reset chat state")
- else:
- if (history[0][0][0].split(':')[0] != username):
- model.setState((chatState[0],chatState[1].clone()))
- history = [[], model.chatState]
- print("username changed, reset state")
- else:
- model.setState((history[1][0],history[1][1].clone()))
- intro = ""
-
- max_new_tokens = int(max_new_tokens)
- temperature = float(temperature)
- top_p = float(top_p)
- seed = seed
-
- assert 1 <= max_new_tokens <= 512
- assert 0.0 <= temperature <= 3.0
- assert 0.0 <= top_p <= 1.0
-
- temperature = max(0.05, temperature)
-
- prompt = f"{username}: " + prompt + "\n"
- print(f"CHAT ({datetime.now()}):\n-------\n{prompt}")
- print(f"OUTPUT ({datetime.now()}):\n-------\n")
- # Load prompt
-
- model.loadContext(newctx=prompt)
-
- out = model.forward(number=max_new_tokens, stopStrings=[
- "<|endoftext|>", username+":"], temp=temperature, top_p_usual=top_p)
-
- generated_text = out["output"].lstrip("\n ")
- generated_text = generated_text.rstrip(username+":")
- print(f"{generated_text}")
-
- gc.collect()
- history[0].append((prompt, generated_text))
- return history[0], [history[0], out["state"]]
-
-
-examples = [
- [
- # Question Answering
- '''What is the capital of Germany?''', "Q/A", 25, 0.2, 1.0, "<|endoftext|>"],
- [
- # Question Answering
- '''Are humans good or bad?''', "Q/A", 150, 0.8, 0.8, "<|endoftext|>"],
- [
- # Question Answering
- '''What is the purpose of Vitamin A?''', "Q/A", 50, 0.2, 0.8, "<|endoftext|>"],
- [
- # Chatbot
- '''This is a conversation between two AI large language models named Alex and Fritz. They are exploring each other's capabilities, and trying to ask interesting questions of one another to explore the limits of each others AI.
-
-Conversation:
-Alex: Good morning, Fritz, what type of LLM are you based upon?
-Fritz: Morning Alex, I am an RNN with transformer level performance. My language model is 100% attention free.
-Alex:''', "generative", 220, 0.9, 0.9, "\\n\\n,<|endoftext|>"],
- [
- # Generate List
- '''Task given:
-
-Please Write a Short story about a cat learning python
-
-Best Full Response:
-''', "generative", 140, 0.85, 0.8, "<|endoftext|>"],
- [
- # Natural Language Interface
- '''Here is a short story (in the style of Tolkien) in which Aiden attacks a robot with a sword:
- ''', "generative", 140, 0.85, 0.8, "<|endoftext|>"]
-]
-
-
-iface = gr.Interface(
- fn=infer,
- description=f'''
Refresh page or change name to reset memory context
{desc}{thanks}''',
- allow_flagging="never",
- inputs=[
- gr.Textbox(lines=5, label="Message"), # prompt
- "state",
- gr.Slider(1, 256, value=60), # max_tokens
- gr.Slider(0.0, 1.0, value=0.8), # temperature
- gr.Slider(0.0, 1.0, value=0.85) # top_p
- ],
- outputs=[gr.Chatbot(label="Chat Log", color_map=(
- "green", "pink")), "state"],
-).queue()
-
-demo = gr.TabbedInterface(
-
- [iface, chatiface], ["Q/A", "Chatbot"],
- title=title,
-
-)
-
-demo.queue()
-demo.launch(share=False)
diff --git a/spaces/HemanthSai7/IntelligentQuestionGenerator/src/Pipeline/QAhaystack.py b/spaces/HemanthSai7/IntelligentQuestionGenerator/src/Pipeline/QAhaystack.py
deleted file mode 100644
index 69b5a62589326de1267c5a5e2a33ec5ac04138a9..0000000000000000000000000000000000000000
--- a/spaces/HemanthSai7/IntelligentQuestionGenerator/src/Pipeline/QAhaystack.py
+++ /dev/null
@@ -1,158 +0,0 @@
-import re
-import logging
-
-from haystack.document_stores import ElasticsearchDocumentStore
-from haystack.utils import launch_es,print_answers
-from haystack.nodes import FARMReader,TransformersReader,BM25Retriever
-from haystack.pipelines import ExtractiveQAPipeline
-from haystack.nodes import TextConverter,PDFToTextConverter,PreProcessor
-from haystack.utils import convert_files_to_docs, fetch_archive_from_http
-from Reader import PdfReader,ExtractedText
-
-launch_es() # Launches an Elasticsearch instance on your local machine
-
-# Install the latest release of Haystack in your own environment
-#! pip install farm-haystack
-
-"""Install the latest main of Haystack"""
-# !pip install --upgrade pip
-# !pip install git+https://github.com/deepset-ai/haystack.git#egg=farm-haystack[colab,ocr]
-
-# # For Colab/linux based machines
-# !wget --no-check-certificate https://dl.xpdfreader.com/xpdf-tools-linux-4.04.tar.gz
-# !tar -xvf xpdf-tools-linux-4.04.tar.gz && sudo cp xpdf-tools-linux-4.04/bin64/pdftotext /usr/local/bin
-
-# For Macos machines
-# !wget --no-check-certificate https://dl.xpdfreader.com/xpdf-tools-mac-4.03.tar.gz
-# !tar -xvf xpdf-tools-mac-4.03.tar.gz && sudo cp xpdf-tools-mac-4.03/bin64/pdftotext /usr/local/bin
-
-"Run this script from the root of the project"
-# # In Colab / No Docker environments: Start Elasticsearch from source
-# ! wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.9.2-linux-x86_64.tar.gz -q
-# ! tar -xzf elasticsearch-7.9.2-linux-x86_64.tar.gz
-# ! chown -R daemon:daemon elasticsearch-7.9.2
-
-# import os
-# from subprocess import Popen, PIPE, STDOUT
-
-# es_server = Popen(
-# ["elasticsearch-7.9.2/bin/elasticsearch"], stdout=PIPE, stderr=STDOUT, preexec_fn=lambda: os.setuid(1) # as daemon
-# )
-# # wait until ES has started
-# ! sleep 30
-
-logging.basicConfig(format="%(levelname)s - %(name)s - %(message)s", level=logging.WARNING)
-logging.getLogger("haystack").setLevel(logging.INFO)
-
-class Connection:
- def __init__(self,host="localhost",username="",password="",index="document"):
- """
- host: Elasticsearch host. If no host is provided, the default host "localhost" is used.
-
- port: Elasticsearch port. If no port is provided, the default port 9200 is used.
-
- username: Elasticsearch username. If no username is provided, no username is used.
-
- password: Elasticsearch password. If no password is provided, no password is used.
-
- index: Elasticsearch index. If no index is provided, the default index "document" is used.
- """
- self.host=host
- self.username=username
- self.password=password
- self.index=index
-
- def get_connection(self):
- document_store=ElasticsearchDocumentStore(host=self.host,username=self.username,password=self.password,index=self.index)
- return document_store
-
-class QAHaystack:
- def __init__(self, filename):
- self.filename=filename
-
- def preprocessing(self,data):
- """
- This function is used to preprocess the data. Its a simple function which removes the special characters and converts the data to lower case.
- """
-
- converter = TextConverter(remove_numeric_tables=True, valid_languages=["en"])
- doc_txt = converter.convert(file_path=ExtractedText(self.filename,'data.txt').save(4,6), meta=None)[0]
-
- converter = PDFToTextConverter(remove_numeric_tables=True, valid_languages=["en"])
- doc_pdf = converter.convert(file_path="data/tutorial8/manibook.pdf", meta=None)[0]
-
- preprocess_text=data.lower() # lowercase
- preprocess_text = re.sub(r'\s+', ' ', preprocess_text) # remove extra spaces
- return preprocess_text
-
- def convert_to_document(self,data):
-
- """
- Write the data to a text file. This is required since the haystack library requires the data to be in a text file so that it can then be converted to a document.
- """
- data=self.preprocessing(data)
- with open(self.filename,'w') as f:
- f.write(data)
-
- """
- Read the data from the text file.
- """
- data=self.preprocessing(data)
- with open(self.filename,'r') as f:
- data=f.read()
- data=data.split("\n")
-
- """
- DocumentStores expect Documents in dictionary form, like that below. They are loaded using the DocumentStore.write_documents()
-
- dicts=[
- {
- 'content': DOCUMENT_TEXT_HERE,
- 'meta':{'name': DOCUMENT_NAME,...}
- },...
- ]
-
- (Optionally: you can also add more key-value-pairs here, that will be indexed as fields in Elasticsearch and can be accessed later for filtering or shown in the responses of the Pipeline)
- """
- data_json=[{
- 'content':paragraph,
- 'meta':{
- 'name':self.filename
- }
- } for paragraph in data
- ]
-
- document_store=Connection().get_connection()
- document_store.write_documents(data_json)
- return document_store
-
-
-class Pipeline:
- def __init__(self,filename,retriever=BM25Retriever,reader=FARMReader):
- self.reader=reader
- self.retriever=retriever
- self.filename=filename
-
- def get_prediction(self,data,query):
- """
- Retrievers help narrowing down the scope for the Reader to smaller units of text where a given question could be answered. They use some simple but fast algorithm.
-
- Here: We use Elasticsearch's default BM25 algorithm . I'll check out the other retrievers as well.
- """
- retriever=self.retriever(document_store=QAHaystack(self.filename).convert_to_document(data))
-
- """
- Readers scan the texts returned by retrievers in detail and extract k best answers. They are based on powerful, but slower deep learning models.Haystack currently supports Readers based on the frameworks FARM and Transformers.
- """
- reader = self.reader(model_name_or_path="deepset/roberta-base-squad2", use_gpu=True)
-
- """
- With a Haystack Pipeline we can stick together your building blocks to a search pipeline. Under the hood, Pipelines are Directed Acyclic Graphs (DAGs) that you can easily customize for our own use cases. To speed things up, Haystack also comes with a few predefined Pipelines. One of them is the ExtractiveQAPipeline that combines a retriever and a reader to answer our questions.
- """
- pipe = ExtractiveQAPipeline(reader, retriever)
-
- """
- This function is used to get the prediction from the pipeline.
- """
- prediction = pipe.run(query=query, params={"Retriever":{"top_k":10}, "Reader":{"top_k":5}})
- return prediction
\ No newline at end of file
diff --git a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/flagging.py b/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/flagging.py
deleted file mode 100644
index e87cf44d471df1f229458f07cac7c67ac0cfd540..0000000000000000000000000000000000000000
--- a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/flagging.py
+++ /dev/null
@@ -1,560 +0,0 @@
-from __future__ import annotations
-
-import csv
-import datetime
-import io
-import json
-import os
-import uuid
-from abc import ABC, abstractmethod
-from pathlib import Path
-from typing import TYPE_CHECKING, Any, List
-
-import gradio as gr
-from gradio import encryptor, utils
-from gradio.documentation import document, set_documentation_group
-
-if TYPE_CHECKING:
- from gradio.components import IOComponent
-
-set_documentation_group("flagging")
-
-
-def _get_dataset_features_info(is_new, components):
- """
- Takes in a list of components and returns a dataset features info
-
- Parameters:
- is_new: boolean, whether the dataset is new or not
- components: list of components
-
- Returns:
- infos: a dictionary of the dataset features
- file_preview_types: dictionary mapping of gradio components to appropriate string.
- header: list of header strings
-
- """
- infos = {"flagged": {"features": {}}}
- # File previews for certain input and output types
- file_preview_types = {gr.Audio: "Audio", gr.Image: "Image"}
- headers = []
-
- # Generate the headers and dataset_infos
- if is_new:
-
- for component in components:
- headers.append(component.label)
- infos["flagged"]["features"][component.label] = {
- "dtype": "string",
- "_type": "Value",
- }
- if isinstance(component, tuple(file_preview_types)):
- headers.append(component.label + " file")
- for _component, _type in file_preview_types.items():
- if isinstance(component, _component):
- infos["flagged"]["features"][
- (component.label or "") + " file"
- ] = {"_type": _type}
- break
-
- headers.append("flag")
- infos["flagged"]["features"]["flag"] = {
- "dtype": "string",
- "_type": "Value",
- }
-
- return infos, file_preview_types, headers
-
-
-class FlaggingCallback(ABC):
- """
- An abstract class for defining the methods that any FlaggingCallback should have.
- """
-
- @abstractmethod
- def setup(self, components: List[IOComponent], flagging_dir: str):
- """
- This method should be overridden and ensure that everything is set up correctly for flag().
- This method gets called once at the beginning of the Interface.launch() method.
- Parameters:
- components: Set of components that will provide flagged data.
- flagging_dir: A string, typically containing the path to the directory where the flagging file should be storied (provided as an argument to Interface.__init__()).
- """
- pass
-
- @abstractmethod
- def flag(
- self,
- flag_data: List[Any],
- flag_option: str | None = None,
- flag_index: int | None = None,
- username: str | None = None,
- ) -> int:
- """
- This method should be overridden by the FlaggingCallback subclass and may contain optional additional arguments.
- This gets called every time the button is pressed.
- Parameters:
- interface: The Interface object that is being used to launch the flagging interface.
- flag_data: The data to be flagged.
- flag_option (optional): In the case that flagging_options are provided, the flag option that is being used.
- flag_index (optional): The index of the sample that is being flagged.
- username (optional): The username of the user that is flagging the data, if logged in.
- Returns:
- (int) The total number of samples that have been flagged.
- """
- pass
-
-
-@document()
-class SimpleCSVLogger(FlaggingCallback):
- """
- A simplified implementation of the FlaggingCallback abstract class
- provided for illustrative purposes. Each flagged sample (both the input and output data)
- is logged to a CSV file on the machine running the gradio app.
- Example:
- import gradio as gr
- def image_classifier(inp):
- return {'cat': 0.3, 'dog': 0.7}
- demo = gr.Interface(fn=image_classifier, inputs="image", outputs="label",
- flagging_callback=SimpleCSVLogger())
- """
-
- def __init__(self):
- pass
-
- def setup(self, components: List[IOComponent], flagging_dir: str | Path):
- self.components = components
- self.flagging_dir = flagging_dir
- os.makedirs(flagging_dir, exist_ok=True)
-
- def flag(
- self,
- flag_data: List[Any],
- flag_option: str | None = None,
- flag_index: int | None = None,
- username: str | None = None,
- ) -> int:
- flagging_dir = self.flagging_dir
- log_filepath = Path(flagging_dir) / "log.csv"
-
- csv_data = []
- for component, sample in zip(self.components, flag_data):
- save_dir = Path(flagging_dir) / utils.strip_invalid_filename_characters(
- component.label or ""
- )
- csv_data.append(
- component.deserialize(
- sample,
- save_dir,
- None,
- )
- )
-
- with open(log_filepath, "a", newline="") as csvfile:
- writer = csv.writer(csvfile)
- writer.writerow(utils.sanitize_list_for_csv(csv_data))
-
- with open(log_filepath, "r") as csvfile:
- line_count = len([None for row in csv.reader(csvfile)]) - 1
- return line_count
-
-
-@document()
-class CSVLogger(FlaggingCallback):
- """
- The default implementation of the FlaggingCallback abstract class. Each flagged
- sample (both the input and output data) is logged to a CSV file with headers on the machine running the gradio app.
- Example:
- import gradio as gr
- def image_classifier(inp):
- return {'cat': 0.3, 'dog': 0.7}
- demo = gr.Interface(fn=image_classifier, inputs="image", outputs="label",
- flagging_callback=CSVLogger())
- Guides: using_flagging
- """
-
- def __init__(self):
- pass
-
- def setup(
- self,
- components: List[IOComponent],
- flagging_dir: str | Path,
- encryption_key: bytes | None = None,
- ):
- self.components = components
- self.flagging_dir = flagging_dir
- self.encryption_key = encryption_key
- os.makedirs(flagging_dir, exist_ok=True)
-
- def flag(
- self,
- flag_data: List[Any],
- flag_option: str | None = None,
- flag_index: int | None = None,
- username: str | None = None,
- ) -> int:
- flagging_dir = self.flagging_dir
- log_filepath = Path(flagging_dir) / "log.csv"
- is_new = not Path(log_filepath).exists()
- headers = [
- component.label or f"component {idx}"
- for idx, component in enumerate(self.components)
- ] + [
- "flag",
- "username",
- "timestamp",
- ]
-
- csv_data = []
- for idx, (component, sample) in enumerate(zip(self.components, flag_data)):
- save_dir = Path(flagging_dir) / utils.strip_invalid_filename_characters(
- component.label or f"component {idx}"
- )
- if utils.is_update(sample):
- csv_data.append(str(sample))
- else:
- csv_data.append(
- component.deserialize(
- sample,
- save_dir=save_dir,
- encryption_key=self.encryption_key,
- )
- if sample is not None
- else ""
- )
- csv_data.append(flag_option if flag_option is not None else "")
- csv_data.append(username if username is not None else "")
- csv_data.append(str(datetime.datetime.now()))
-
- def replace_flag_at_index(file_content: str, flag_index: int):
- file_content_ = io.StringIO(file_content)
- content = list(csv.reader(file_content_))
- header = content[0]
- flag_col_index = header.index("flag")
- content[flag_index][flag_col_index] = flag_option # type: ignore
- output = io.StringIO()
- writer = csv.writer(output)
- writer.writerows(utils.sanitize_list_for_csv(content))
- return output.getvalue()
-
- if self.encryption_key:
- output = io.StringIO()
- if not is_new:
- with open(log_filepath, "rb", encoding="utf-8") as csvfile:
- encrypted_csv = csvfile.read()
- decrypted_csv = encryptor.decrypt(
- self.encryption_key, encrypted_csv
- )
- file_content = decrypted_csv.decode()
- if flag_index is not None:
- file_content = replace_flag_at_index(file_content, flag_index)
- output.write(file_content)
- writer = csv.writer(output)
- if flag_index is None:
- if is_new:
- writer.writerow(utils.sanitize_list_for_csv(headers))
- writer.writerow(utils.sanitize_list_for_csv(csv_data))
- with open(log_filepath, "wb", encoding="utf-8") as csvfile:
- csvfile.write(
- encryptor.encrypt(self.encryption_key, output.getvalue().encode())
- )
- else:
- if flag_index is None:
- with open(log_filepath, "a", newline="", encoding="utf-8") as csvfile:
- writer = csv.writer(csvfile)
- if is_new:
- writer.writerow(utils.sanitize_list_for_csv(headers))
- writer.writerow(utils.sanitize_list_for_csv(csv_data))
- else:
- with open(log_filepath, encoding="utf-8") as csvfile:
- file_content = csvfile.read()
- file_content = replace_flag_at_index(file_content, flag_index)
- with open(
- log_filepath, "w", newline="", encoding="utf-8"
- ) as csvfile: # newline parameter needed for Windows
- csvfile.write(file_content)
- with open(log_filepath, "r", encoding="utf-8") as csvfile:
- line_count = len([None for row in csv.reader(csvfile)]) - 1
- return line_count
-
-
-@document()
-class HuggingFaceDatasetSaver(FlaggingCallback):
- """
- A callback that saves each flagged sample (both the input and output data)
- to a HuggingFace dataset.
- Example:
- import gradio as gr
- hf_writer = gr.HuggingFaceDatasetSaver(HF_API_TOKEN, "image-classification-mistakes")
- def image_classifier(inp):
- return {'cat': 0.3, 'dog': 0.7}
- demo = gr.Interface(fn=image_classifier, inputs="image", outputs="label",
- allow_flagging="manual", flagging_callback=hf_writer)
- Guides: using_flagging
- """
-
- def __init__(
- self,
- hf_token: str,
- dataset_name: str,
- organization: str | None = None,
- private: bool = False,
- ):
- """
- Parameters:
- hf_token: The HuggingFace token to use to create (and write the flagged sample to) the HuggingFace dataset.
- dataset_name: The name of the dataset to save the data to, e.g. "image-classifier-1"
- organization: The organization to save the dataset under. The hf_token must provide write access to this organization. If not provided, saved under the name of the user corresponding to the hf_token.
- private: Whether the dataset should be private (defaults to False).
- """
- self.hf_token = hf_token
- self.dataset_name = dataset_name
- self.organization_name = organization
- self.dataset_private = private
-
- def setup(self, components: List[IOComponent], flagging_dir: str):
- """
- Params:
- flagging_dir (str): local directory where the dataset is cloned,
- updated, and pushed from.
- """
- try:
- import huggingface_hub
- except (ImportError, ModuleNotFoundError):
- raise ImportError(
- "Package `huggingface_hub` not found is needed "
- "for HuggingFaceDatasetSaver. Try 'pip install huggingface_hub'."
- )
- path_to_dataset_repo = huggingface_hub.create_repo(
- name=self.dataset_name,
- token=self.hf_token,
- private=self.dataset_private,
- repo_type="dataset",
- exist_ok=True,
- )
- self.path_to_dataset_repo = path_to_dataset_repo # e.g. "https://huggingface.co/datasets/abidlabs/test-audio-10"
- self.components = components
- self.flagging_dir = flagging_dir
- self.dataset_dir = Path(flagging_dir) / self.dataset_name
- self.repo = huggingface_hub.Repository(
- local_dir=str(self.dataset_dir),
- clone_from=path_to_dataset_repo,
- use_auth_token=self.hf_token,
- )
- self.repo.git_pull(lfs=True)
-
- # Should filename be user-specified?
- self.log_file = Path(self.dataset_dir) / "data.csv"
- self.infos_file = Path(self.dataset_dir) / "dataset_infos.json"
-
- def flag(
- self,
- flag_data: List[Any],
- flag_option: str | None = None,
- flag_index: int | None = None,
- username: str | None = None,
- ) -> int:
- self.repo.git_pull(lfs=True)
-
- is_new = not Path(self.log_file).exists()
-
- with open(self.log_file, "a", newline="", encoding="utf-8") as csvfile:
- writer = csv.writer(csvfile)
-
- # File previews for certain input and output types
- infos, file_preview_types, headers = _get_dataset_features_info(
- is_new, self.components
- )
-
- # Generate the headers and dataset_infos
- if is_new:
- writer.writerow(utils.sanitize_list_for_csv(headers))
-
- # Generate the row corresponding to the flagged sample
- csv_data = []
- for component, sample in zip(self.components, flag_data):
- save_dir = Path(
- self.dataset_dir
- ) / utils.strip_invalid_filename_characters(component.label or "")
- filepath = component.deserialize(sample, save_dir, None)
- csv_data.append(filepath)
- if isinstance(component, tuple(file_preview_types)):
- csv_data.append(
- "{}/resolve/main/{}".format(self.path_to_dataset_repo, filepath)
- )
- csv_data.append(flag_option if flag_option is not None else "")
- writer.writerow(utils.sanitize_list_for_csv(csv_data))
-
- if is_new:
- json.dump(infos, open(self.infos_file, "w"))
-
- with open(self.log_file, "r", encoding="utf-8") as csvfile:
- line_count = len([None for row in csv.reader(csvfile)]) - 1
-
- self.repo.push_to_hub(commit_message="Flagged sample #{}".format(line_count))
-
- return line_count
-
-
-class HuggingFaceDatasetJSONSaver(FlaggingCallback):
- """
- A FlaggingCallback that saves flagged data to a Hugging Face dataset in JSONL format.
-
- Each data sample is saved in a different JSONL file,
- allowing multiple users to use flagging simultaneously.
- Saving to a single CSV would cause errors as only one user can edit at the same time.
-
- """
-
- def __init__(
- self,
- hf_foken: str,
- dataset_name: str,
- organization: str | None = None,
- private: bool = False,
- verbose: bool = True,
- ):
- """
- Params:
- hf_token (str): The token to use to access the huggingface API.
- dataset_name (str): The name of the dataset to save the data to, e.g.
- "image-classifier-1"
- organization (str): The name of the organization to which to attach
- the datasets. If None, the dataset attaches to the user only.
- private (bool): If the dataset does not already exist, whether it
- should be created as a private dataset or public. Private datasets
- may require paid huggingface.co accounts
- verbose (bool): Whether to print out the status of the dataset
- creation.
- """
- self.hf_foken = hf_foken
- self.dataset_name = dataset_name
- self.organization_name = organization
- self.dataset_private = private
- self.verbose = verbose
-
- def setup(self, components: List[IOComponent], flagging_dir: str):
- """
- Params:
- components List[Component]: list of components for flagging
- flagging_dir (str): local directory where the dataset is cloned,
- updated, and pushed from.
- """
- try:
- import huggingface_hub
- except (ImportError, ModuleNotFoundError):
- raise ImportError(
- "Package `huggingface_hub` not found is needed "
- "for HuggingFaceDatasetJSONSaver. Try 'pip install huggingface_hub'."
- )
- path_to_dataset_repo = huggingface_hub.create_repo(
- name=self.dataset_name,
- token=self.hf_foken,
- private=self.dataset_private,
- repo_type="dataset",
- exist_ok=True,
- )
- self.path_to_dataset_repo = path_to_dataset_repo # e.g. "https://huggingface.co/datasets/abidlabs/test-audio-10"
- self.components = components
- self.flagging_dir = flagging_dir
- self.dataset_dir = Path(flagging_dir) / self.dataset_name
- self.repo = huggingface_hub.Repository(
- local_dir=str(self.dataset_dir),
- clone_from=path_to_dataset_repo,
- use_auth_token=self.hf_foken,
- )
- self.repo.git_pull(lfs=True)
-
- self.infos_file = Path(self.dataset_dir) / "dataset_infos.json"
-
- def flag(
- self,
- flag_data: List[Any],
- flag_option: str | None = None,
- flag_index: int | None = None,
- username: str | None = None,
- ) -> str:
- self.repo.git_pull(lfs=True)
-
- # Generate unique folder for the flagged sample
- unique_name = self.get_unique_name() # unique name for folder
- folder_name = (
- Path(self.dataset_dir) / unique_name
- ) # unique folder for specific example
- os.makedirs(folder_name)
-
- # Now uses the existence of `dataset_infos.json` to determine if new
- is_new = not Path(self.infos_file).exists()
-
- # File previews for certain input and output types
- infos, file_preview_types, _ = _get_dataset_features_info(
- is_new, self.components
- )
-
- # Generate the row and header corresponding to the flagged sample
- csv_data = []
- headers = []
-
- for component, sample in zip(self.components, flag_data):
- headers.append(component.label)
-
- try:
- save_dir = Path(folder_name) / utils.strip_invalid_filename_characters(
- component.label or ""
- )
- filepath = component.deserialize(sample, save_dir, None)
- except Exception:
- # Could not parse 'sample' (mostly) because it was None and `component.save_flagged`
- # does not handle None cases.
- # for example: Label (line 3109 of components.py raises an error if data is None)
- filepath = None
-
- if isinstance(component, tuple(file_preview_types)):
- headers.append(component.label or "" + " file")
-
- csv_data.append(
- "{}/resolve/main/{}/{}".format(
- self.path_to_dataset_repo, unique_name, filepath
- )
- if filepath is not None
- else None
- )
-
- csv_data.append(filepath)
- headers.append("flag")
- csv_data.append(flag_option if flag_option is not None else "")
-
- # Creates metadata dict from row data and dumps it
- metadata_dict = {
- header: _csv_data for header, _csv_data in zip(headers, csv_data)
- }
- self.dump_json(metadata_dict, Path(folder_name) / "metadata.jsonl")
-
- if is_new:
- json.dump(infos, open(self.infos_file, "w"))
-
- self.repo.push_to_hub(commit_message="Flagged sample {}".format(unique_name))
- return unique_name
-
- def get_unique_name(self):
- id = uuid.uuid4()
- return str(id)
-
- def dump_json(self, thing: dict, file_path: str | Path) -> None:
- with open(file_path, "w+", encoding="utf8") as f:
- json.dump(thing, f)
-
-
-class FlagMethod:
- """
- Helper class that contains the flagging button option and callback
- """
-
- def __init__(self, flagging_callback: FlaggingCallback, flag_option=None):
- self.flagging_callback = flagging_callback
- self.flag_option = flag_option
- self.__name__ = "Flag"
-
- def __call__(self, *flag_data):
- self.flagging_callback.flag(list(flag_data), flag_option=self.flag_option)
diff --git a/spaces/Hua626/QQsign/bin/unidbg-fetch-qsign.bat b/spaces/Hua626/QQsign/bin/unidbg-fetch-qsign.bat
deleted file mode 100644
index 8b291e7303b0c07d14b714e5795473891363c85b..0000000000000000000000000000000000000000
--- a/spaces/Hua626/QQsign/bin/unidbg-fetch-qsign.bat
+++ /dev/null
@@ -1,89 +0,0 @@
-@rem
-@rem Copyright 2015 the original author or authors.
-@rem
-@rem Licensed under the Apache License, Version 2.0 (the "License");
-@rem you may not use this file except in compliance with the License.
-@rem You may obtain a copy of the License at
-@rem
-@rem https://www.apache.org/licenses/LICENSE-2.0
-@rem
-@rem Unless required by applicable law or agreed to in writing, software
-@rem distributed under the License is distributed on an "AS IS" BASIS,
-@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-@rem See the License for the specific language governing permissions and
-@rem limitations under the License.
-@rem
-
-@if "%DEBUG%" == "" @echo off
-@rem ##########################################################################
-@rem
-@rem unidbg-fetch-qsign startup script for Windows
-@rem
-@rem ##########################################################################
-
-@rem Set local scope for the variables with windows NT shell
-if "%OS%"=="Windows_NT" setlocal
-
-set DIRNAME=%~dp0
-if "%DIRNAME%" == "" set DIRNAME=.
-set APP_BASE_NAME=%~n0
-set APP_HOME=%DIRNAME%..
-
-@rem Resolve any "." and ".." in APP_HOME to make it shorter.
-for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi
-
-@rem Add default JVM options here. You can also use JAVA_OPTS and UNIDBG_FETCH_QSIGN_OPTS to pass JVM options to this script.
-set DEFAULT_JVM_OPTS=
-
-@rem Find java.exe
-if defined JAVA_HOME goto findJavaFromJavaHome
-
-set JAVA_EXE=java.exe
-%JAVA_EXE% -version >NUL 2>&1
-if "%ERRORLEVEL%" == "0" goto execute
-
-echo.
-echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
-echo.
-echo Please set the JAVA_HOME variable in your environment to match the
-echo location of your Java installation.
-
-goto fail
-
-:findJavaFromJavaHome
-set JAVA_HOME=%JAVA_HOME:"=%
-set JAVA_EXE=%JAVA_HOME%/bin/java.exe
-
-if exist "%JAVA_EXE%" goto execute
-
-echo.
-echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
-echo.
-echo Please set the JAVA_HOME variable in your environment to match the
-echo location of your Java installation.
-
-goto fail
-
-:execute
-@rem Setup the command line
-
-set CLASSPATH=%APP_HOME%\lib\unidbg-fetch-qsign-1.1.9.jar;%APP_HOME%\lib\unidbg-android-105.jar;%APP_HOME%\lib\ktor-server-content-negotiation-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-serialization-kotlinx-json-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-server-status-pages-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-server-netty-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-server-host-common-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-server-core-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-serialization-kotlinx-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-serialization-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-events-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-websockets-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-http-cio-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-http-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-network-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-utils-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-io-jvm-2.3.1.jar;%APP_HOME%\lib\kotlin-stdlib-jdk8-1.8.22.jar;%APP_HOME%\lib\kotlinx-serialization-json-jvm-1.5.1.jar;%APP_HOME%\lib\kotlinx-serialization-protobuf-jvm-1.5.1.jar;%APP_HOME%\lib\kotlinx-serialization-core-jvm-1.5.1.jar;%APP_HOME%\lib\logback-classic-1.2.11.jar;%APP_HOME%\lib\kotlinx-coroutines-jdk8-1.7.1.jar;%APP_HOME%\lib\kotlinx-coroutines-core-jvm-1.7.1.jar;%APP_HOME%\lib\kotlin-stdlib-jdk7-1.8.22.jar;%APP_HOME%\lib\kotlin-reflect-1.8.10.jar;%APP_HOME%\lib\kotlin-stdlib-1.8.22.jar;%APP_HOME%\lib\slf4j-api-1.7.36.jar;%APP_HOME%\lib\kotlin-stdlib-common-1.8.22.jar;%APP_HOME%\lib\config-1.4.2.jar;%APP_HOME%\lib\jansi-2.4.0.jar;%APP_HOME%\lib\netty-codec-http2-4.1.92.Final.jar;%APP_HOME%\lib\alpn-api-1.1.3.v20160715.jar;%APP_HOME%\lib\netty-transport-native-kqueue-4.1.92.Final.jar;%APP_HOME%\lib\netty-transport-native-epoll-4.1.92.Final.jar;%APP_HOME%\lib\logback-core-1.2.11.jar;%APP_HOME%\lib\annotations-23.0.0.jar;%APP_HOME%\lib\netty-codec-http-4.1.92.Final.jar;%APP_HOME%\lib\netty-handler-4.1.92.Final.jar;%APP_HOME%\lib\netty-codec-4.1.92.Final.jar;%APP_HOME%\lib\netty-transport-classes-kqueue-4.1.92.Final.jar;%APP_HOME%\lib\netty-transport-classes-epoll-4.1.92.Final.jar;%APP_HOME%\lib\netty-transport-native-unix-common-4.1.92.Final.jar;%APP_HOME%\lib\netty-transport-4.1.92.Final.jar;%APP_HOME%\lib\netty-buffer-4.1.92.Final.jar;%APP_HOME%\lib\netty-resolver-4.1.92.Final.jar;%APP_HOME%\lib\netty-common-4.1.92.Final.jar
-
-
-@rem Execute unidbg-fetch-qsign
-"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %UNIDBG_FETCH_QSIGN_OPTS% -classpath "%CLASSPATH%" MainKt %*
-
-:end
-@rem End local scope for the variables with windows NT shell
-if "%ERRORLEVEL%"=="0" goto mainEnd
-
-:fail
-rem Set variable UNIDBG_FETCH_QSIGN_EXIT_CONSOLE if you need the _script_ return code instead of
-rem the _cmd.exe /c_ return code!
-if not "" == "%UNIDBG_FETCH_QSIGN_EXIT_CONSOLE%" exit 1
-exit /b 1
-
-:mainEnd
-if "%OS%"=="Windows_NT" endlocal
-
-:omega
diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/modules/sparse_transformer_sentence_encoder.py b/spaces/ICML2022/OFA/fairseq/fairseq/modules/sparse_transformer_sentence_encoder.py
deleted file mode 100644
index f41ec09327fe80b50d20674e7482794ce45c531c..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/fairseq/modules/sparse_transformer_sentence_encoder.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import torch.nn as nn
-from fairseq.modules import TransformerSentenceEncoder
-from fairseq.modules.sparse_transformer_sentence_encoder_layer import (
- SparseTransformerSentenceEncoderLayer,
-)
-
-
-class SparseTransformerSentenceEncoder(TransformerSentenceEncoder):
- """
- Sparse implementation of the TransformerSentenceEncoder
- - see SparseMultiheadAttention
- """
-
- def __init__(
- self,
- padding_idx: int,
- vocab_size: int,
- num_encoder_layers: int = 6,
- embedding_dim: int = 768,
- ffn_embedding_dim: int = 3072,
- num_attention_heads: int = 8,
- dropout: float = 0.1,
- attention_dropout: float = 0.1,
- activation_dropout: float = 0.1,
- max_seq_len: int = 256,
- num_segments: int = 2,
- use_position_embeddings: bool = True,
- offset_positions_by_padding: bool = True,
- encoder_normalize_before: bool = False,
- apply_bert_init: bool = False,
- activation_fn: str = "relu",
- learned_pos_embedding: bool = True,
- embed_scale: float = None,
- freeze_embeddings: bool = False,
- n_trans_layers_to_freeze: int = 0,
- export: bool = False,
- is_bidirectional: bool = True,
- stride: int = 32,
- expressivity: int = 8,
- ) -> None:
-
- super().__init__(
- padding_idx,
- vocab_size,
- num_encoder_layers,
- embedding_dim,
- ffn_embedding_dim,
- num_attention_heads,
- dropout,
- attention_dropout,
- activation_dropout,
- max_seq_len,
- num_segments,
- use_position_embeddings,
- offset_positions_by_padding,
- encoder_normalize_before,
- apply_bert_init,
- activation_fn,
- learned_pos_embedding,
- embed_scale,
- freeze_embeddings,
- n_trans_layers_to_freeze,
- export,
- )
-
- self.layers = nn.ModuleList(
- [
- SparseTransformerSentenceEncoderLayer(
- embedding_dim=self.embedding_dim,
- ffn_embedding_dim=ffn_embedding_dim,
- num_attention_heads=num_attention_heads,
- dropout=dropout,
- attention_dropout=attention_dropout,
- activation_dropout=activation_dropout,
- activation_fn=activation_fn,
- export=export,
- is_bidirectional=is_bidirectional,
- stride=stride,
- expressivity=expressivity,
- )
- for _ in range(num_encoder_layers)
- ]
- )
-
- def freeze_module_params(m):
- if m is not None:
- for p in m.parameters():
- p.requires_grad = False
-
- for layer in range(n_trans_layers_to_freeze):
- freeze_module_params(self.layers[layer])
diff --git a/spaces/ICML2022/resefa/utils/visualizers/test.py b/spaces/ICML2022/resefa/utils/visualizers/test.py
deleted file mode 100644
index 765ebf9c721b0792fb373ecb515ebf188f728df0..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/resefa/utils/visualizers/test.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# python3.7
-"""Unit test for visualizer."""
-
-import os
-import skvideo.datasets
-
-from ..image_utils import save_image
-from . import GridVisualizer
-from . import HtmlVisualizer
-from . import HtmlReader
-from . import GifVisualizer
-from . import VideoVisualizer
-from . import VideoReader
-
-__all__ = ['test_visualizer']
-
-_TEST_DIR = 'visualizer_test'
-
-
-def test_visualizer(test_dir=_TEST_DIR):
- """Tests visualizers."""
- print('========== Start Visualizer Test ==========')
-
- frame_dir = os.path.join(test_dir, 'test_frames')
- os.makedirs(frame_dir, exist_ok=True)
-
- print('===== Testing `VideoReader` =====')
- # Total 132 frames, with size (720, 1080).
- video_reader = VideoReader(skvideo.datasets.bigbuckbunny())
- frame_height = video_reader.frame_height
- frame_width = video_reader.frame_width
- frame_size = (frame_height, frame_width)
- half_size = (frame_height // 2, frame_width // 2)
- # Save frames as the test set.
- for idx in range(80):
- frame = video_reader.read()
- save_image(os.path.join(frame_dir, f'{idx:02d}.png'), frame)
-
- print('===== Testing `GirdVisualizer` =====')
- grid_visualizer = GridVisualizer()
- grid_visualizer.set_row_spacing(30)
- grid_visualizer.set_col_spacing(30)
- grid_visualizer.set_background(use_black=True)
- path = os.path.join(test_dir, 'portrait_row_major_ori_space30_black.png')
- grid_visualizer.visualize_directory(frame_dir, path,
- is_portrait=True, is_row_major=True)
- path = os.path.join(
- test_dir, 'landscape_col_major_downsample_space15_white.png')
- grid_visualizer.set_image_size(half_size)
- grid_visualizer.set_row_spacing(15)
- grid_visualizer.set_col_spacing(15)
- grid_visualizer.set_background(use_black=False)
- grid_visualizer.visualize_directory(frame_dir, path,
- is_portrait=False, is_row_major=False)
-
- print('===== Testing `HtmlVisualizer` =====')
- html_visualizer = HtmlVisualizer()
- path = os.path.join(test_dir, 'portrait_col_major_ori.html')
- html_visualizer.visualize_directory(frame_dir, path,
- is_portrait=True, is_row_major=False)
- path = os.path.join(test_dir, 'landscape_row_major_downsample.html')
- html_visualizer.set_image_size(half_size)
- html_visualizer.visualize_directory(frame_dir, path,
- is_portrait=False, is_row_major=True)
-
- print('===== Testing `HtmlReader` =====')
- path = os.path.join(test_dir, 'landscape_row_major_downsample.html')
- html_reader = HtmlReader(path)
- for j in range(html_reader.num_cols):
- assert html_reader.get_header(j) == ''
- parsed_dir = os.path.join(test_dir, 'parsed_frames')
- os.makedirs(parsed_dir, exist_ok=True)
- for i in range(html_reader.num_rows):
- for j in range(html_reader.num_cols):
- idx = i * html_reader.num_cols + j
- assert html_reader.get_text(i, j).endswith(f'(index {idx:03d})')
- image = html_reader.get_image(i, j, image_size=frame_size)
- assert image.shape[0:2] == frame_size
- save_image(os.path.join(parsed_dir, f'{idx:02d}.png'), image)
-
- print('===== Testing `GifVisualizer` =====')
- gif_visualizer = GifVisualizer()
- path = os.path.join(test_dir, 'gif_ori.gif')
- gif_visualizer.visualize_directory(frame_dir, path)
- gif_visualizer.set_image_size(half_size)
- path = os.path.join(test_dir, 'gif_downsample.gif')
- gif_visualizer.visualize_directory(frame_dir, path)
-
- print('===== Testing `VideoVisualizer` =====')
- video_visualizer = VideoVisualizer()
- path = os.path.join(test_dir, 'video_ori.mp4')
- video_visualizer.visualize_directory(frame_dir, path)
- path = os.path.join(test_dir, 'video_downsample.mp4')
- video_visualizer.set_frame_size(half_size)
- video_visualizer.visualize_directory(frame_dir, path)
-
- print('========== Finish Visualizer Test ==========')
diff --git a/spaces/Iceclear/StableSR/StableSR/basicsr/archs/ecbsr_arch.py b/spaces/Iceclear/StableSR/StableSR/basicsr/archs/ecbsr_arch.py
deleted file mode 100644
index fe20e772587d74c67fffb40f3b4731cf4f42268b..0000000000000000000000000000000000000000
--- a/spaces/Iceclear/StableSR/StableSR/basicsr/archs/ecbsr_arch.py
+++ /dev/null
@@ -1,275 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from basicsr.utils.registry import ARCH_REGISTRY
-
-
-class SeqConv3x3(nn.Module):
- """The re-parameterizable block used in the ECBSR architecture.
-
- ``Paper: Edge-oriented Convolution Block for Real-time Super Resolution on Mobile Devices``
-
- Reference: https://github.com/xindongzhang/ECBSR
-
- Args:
- seq_type (str): Sequence type, option: conv1x1-conv3x3 | conv1x1-sobelx | conv1x1-sobely | conv1x1-laplacian.
- in_channels (int): Channel number of input.
- out_channels (int): Channel number of output.
- depth_multiplier (int): Width multiplier in the expand-and-squeeze conv. Default: 1.
- """
-
- def __init__(self, seq_type, in_channels, out_channels, depth_multiplier=1):
- super(SeqConv3x3, self).__init__()
- self.seq_type = seq_type
- self.in_channels = in_channels
- self.out_channels = out_channels
-
- if self.seq_type == 'conv1x1-conv3x3':
- self.mid_planes = int(out_channels * depth_multiplier)
- conv0 = torch.nn.Conv2d(self.in_channels, self.mid_planes, kernel_size=1, padding=0)
- self.k0 = conv0.weight
- self.b0 = conv0.bias
-
- conv1 = torch.nn.Conv2d(self.mid_planes, self.out_channels, kernel_size=3)
- self.k1 = conv1.weight
- self.b1 = conv1.bias
-
- elif self.seq_type == 'conv1x1-sobelx':
- conv0 = torch.nn.Conv2d(self.in_channels, self.out_channels, kernel_size=1, padding=0)
- self.k0 = conv0.weight
- self.b0 = conv0.bias
-
- # init scale and bias
- scale = torch.randn(size=(self.out_channels, 1, 1, 1)) * 1e-3
- self.scale = nn.Parameter(scale)
- bias = torch.randn(self.out_channels) * 1e-3
- bias = torch.reshape(bias, (self.out_channels, ))
- self.bias = nn.Parameter(bias)
- # init mask
- self.mask = torch.zeros((self.out_channels, 1, 3, 3), dtype=torch.float32)
- for i in range(self.out_channels):
- self.mask[i, 0, 0, 0] = 1.0
- self.mask[i, 0, 1, 0] = 2.0
- self.mask[i, 0, 2, 0] = 1.0
- self.mask[i, 0, 0, 2] = -1.0
- self.mask[i, 0, 1, 2] = -2.0
- self.mask[i, 0, 2, 2] = -1.0
- self.mask = nn.Parameter(data=self.mask, requires_grad=False)
-
- elif self.seq_type == 'conv1x1-sobely':
- conv0 = torch.nn.Conv2d(self.in_channels, self.out_channels, kernel_size=1, padding=0)
- self.k0 = conv0.weight
- self.b0 = conv0.bias
-
- # init scale and bias
- scale = torch.randn(size=(self.out_channels, 1, 1, 1)) * 1e-3
- self.scale = nn.Parameter(torch.FloatTensor(scale))
- bias = torch.randn(self.out_channels) * 1e-3
- bias = torch.reshape(bias, (self.out_channels, ))
- self.bias = nn.Parameter(torch.FloatTensor(bias))
- # init mask
- self.mask = torch.zeros((self.out_channels, 1, 3, 3), dtype=torch.float32)
- for i in range(self.out_channels):
- self.mask[i, 0, 0, 0] = 1.0
- self.mask[i, 0, 0, 1] = 2.0
- self.mask[i, 0, 0, 2] = 1.0
- self.mask[i, 0, 2, 0] = -1.0
- self.mask[i, 0, 2, 1] = -2.0
- self.mask[i, 0, 2, 2] = -1.0
- self.mask = nn.Parameter(data=self.mask, requires_grad=False)
-
- elif self.seq_type == 'conv1x1-laplacian':
- conv0 = torch.nn.Conv2d(self.in_channels, self.out_channels, kernel_size=1, padding=0)
- self.k0 = conv0.weight
- self.b0 = conv0.bias
-
- # init scale and bias
- scale = torch.randn(size=(self.out_channels, 1, 1, 1)) * 1e-3
- self.scale = nn.Parameter(torch.FloatTensor(scale))
- bias = torch.randn(self.out_channels) * 1e-3
- bias = torch.reshape(bias, (self.out_channels, ))
- self.bias = nn.Parameter(torch.FloatTensor(bias))
- # init mask
- self.mask = torch.zeros((self.out_channels, 1, 3, 3), dtype=torch.float32)
- for i in range(self.out_channels):
- self.mask[i, 0, 0, 1] = 1.0
- self.mask[i, 0, 1, 0] = 1.0
- self.mask[i, 0, 1, 2] = 1.0
- self.mask[i, 0, 2, 1] = 1.0
- self.mask[i, 0, 1, 1] = -4.0
- self.mask = nn.Parameter(data=self.mask, requires_grad=False)
- else:
- raise ValueError('The type of seqconv is not supported!')
-
- def forward(self, x):
- if self.seq_type == 'conv1x1-conv3x3':
- # conv-1x1
- y0 = F.conv2d(input=x, weight=self.k0, bias=self.b0, stride=1)
- # explicitly padding with bias
- y0 = F.pad(y0, (1, 1, 1, 1), 'constant', 0)
- b0_pad = self.b0.view(1, -1, 1, 1)
- y0[:, :, 0:1, :] = b0_pad
- y0[:, :, -1:, :] = b0_pad
- y0[:, :, :, 0:1] = b0_pad
- y0[:, :, :, -1:] = b0_pad
- # conv-3x3
- y1 = F.conv2d(input=y0, weight=self.k1, bias=self.b1, stride=1)
- else:
- y0 = F.conv2d(input=x, weight=self.k0, bias=self.b0, stride=1)
- # explicitly padding with bias
- y0 = F.pad(y0, (1, 1, 1, 1), 'constant', 0)
- b0_pad = self.b0.view(1, -1, 1, 1)
- y0[:, :, 0:1, :] = b0_pad
- y0[:, :, -1:, :] = b0_pad
- y0[:, :, :, 0:1] = b0_pad
- y0[:, :, :, -1:] = b0_pad
- # conv-3x3
- y1 = F.conv2d(input=y0, weight=self.scale * self.mask, bias=self.bias, stride=1, groups=self.out_channels)
- return y1
-
- def rep_params(self):
- device = self.k0.get_device()
- if device < 0:
- device = None
-
- if self.seq_type == 'conv1x1-conv3x3':
- # re-param conv kernel
- rep_weight = F.conv2d(input=self.k1, weight=self.k0.permute(1, 0, 2, 3))
- # re-param conv bias
- rep_bias = torch.ones(1, self.mid_planes, 3, 3, device=device) * self.b0.view(1, -1, 1, 1)
- rep_bias = F.conv2d(input=rep_bias, weight=self.k1).view(-1, ) + self.b1
- else:
- tmp = self.scale * self.mask
- k1 = torch.zeros((self.out_channels, self.out_channels, 3, 3), device=device)
- for i in range(self.out_channels):
- k1[i, i, :, :] = tmp[i, 0, :, :]
- b1 = self.bias
- # re-param conv kernel
- rep_weight = F.conv2d(input=k1, weight=self.k0.permute(1, 0, 2, 3))
- # re-param conv bias
- rep_bias = torch.ones(1, self.out_channels, 3, 3, device=device) * self.b0.view(1, -1, 1, 1)
- rep_bias = F.conv2d(input=rep_bias, weight=k1).view(-1, ) + b1
- return rep_weight, rep_bias
-
-
-class ECB(nn.Module):
- """The ECB block used in the ECBSR architecture.
-
- Paper: Edge-oriented Convolution Block for Real-time Super Resolution on Mobile Devices
- Ref git repo: https://github.com/xindongzhang/ECBSR
-
- Args:
- in_channels (int): Channel number of input.
- out_channels (int): Channel number of output.
- depth_multiplier (int): Width multiplier in the expand-and-squeeze conv. Default: 1.
- act_type (str): Activation type. Option: prelu | relu | rrelu | softplus | linear. Default: prelu.
- with_idt (bool): Whether to use identity connection. Default: False.
- """
-
- def __init__(self, in_channels, out_channels, depth_multiplier, act_type='prelu', with_idt=False):
- super(ECB, self).__init__()
-
- self.depth_multiplier = depth_multiplier
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.act_type = act_type
-
- if with_idt and (self.in_channels == self.out_channels):
- self.with_idt = True
- else:
- self.with_idt = False
-
- self.conv3x3 = torch.nn.Conv2d(self.in_channels, self.out_channels, kernel_size=3, padding=1)
- self.conv1x1_3x3 = SeqConv3x3('conv1x1-conv3x3', self.in_channels, self.out_channels, self.depth_multiplier)
- self.conv1x1_sbx = SeqConv3x3('conv1x1-sobelx', self.in_channels, self.out_channels)
- self.conv1x1_sby = SeqConv3x3('conv1x1-sobely', self.in_channels, self.out_channels)
- self.conv1x1_lpl = SeqConv3x3('conv1x1-laplacian', self.in_channels, self.out_channels)
-
- if self.act_type == 'prelu':
- self.act = nn.PReLU(num_parameters=self.out_channels)
- elif self.act_type == 'relu':
- self.act = nn.ReLU(inplace=True)
- elif self.act_type == 'rrelu':
- self.act = nn.RReLU(lower=-0.05, upper=0.05)
- elif self.act_type == 'softplus':
- self.act = nn.Softplus()
- elif self.act_type == 'linear':
- pass
- else:
- raise ValueError('The type of activation if not support!')
-
- def forward(self, x):
- if self.training:
- y = self.conv3x3(x) + self.conv1x1_3x3(x) + self.conv1x1_sbx(x) + self.conv1x1_sby(x) + self.conv1x1_lpl(x)
- if self.with_idt:
- y += x
- else:
- rep_weight, rep_bias = self.rep_params()
- y = F.conv2d(input=x, weight=rep_weight, bias=rep_bias, stride=1, padding=1)
- if self.act_type != 'linear':
- y = self.act(y)
- return y
-
- def rep_params(self):
- weight0, bias0 = self.conv3x3.weight, self.conv3x3.bias
- weight1, bias1 = self.conv1x1_3x3.rep_params()
- weight2, bias2 = self.conv1x1_sbx.rep_params()
- weight3, bias3 = self.conv1x1_sby.rep_params()
- weight4, bias4 = self.conv1x1_lpl.rep_params()
- rep_weight, rep_bias = (weight0 + weight1 + weight2 + weight3 + weight4), (
- bias0 + bias1 + bias2 + bias3 + bias4)
-
- if self.with_idt:
- device = rep_weight.get_device()
- if device < 0:
- device = None
- weight_idt = torch.zeros(self.out_channels, self.out_channels, 3, 3, device=device)
- for i in range(self.out_channels):
- weight_idt[i, i, 1, 1] = 1.0
- bias_idt = 0.0
- rep_weight, rep_bias = rep_weight + weight_idt, rep_bias + bias_idt
- return rep_weight, rep_bias
-
-
-@ARCH_REGISTRY.register()
-class ECBSR(nn.Module):
- """ECBSR architecture.
-
- Paper: Edge-oriented Convolution Block for Real-time Super Resolution on Mobile Devices
- Ref git repo: https://github.com/xindongzhang/ECBSR
-
- Args:
- num_in_ch (int): Channel number of inputs.
- num_out_ch (int): Channel number of outputs.
- num_block (int): Block number in the trunk network.
- num_channel (int): Channel number.
- with_idt (bool): Whether use identity in convolution layers.
- act_type (str): Activation type.
- scale (int): Upsampling factor.
- """
-
- def __init__(self, num_in_ch, num_out_ch, num_block, num_channel, with_idt, act_type, scale):
- super(ECBSR, self).__init__()
- self.num_in_ch = num_in_ch
- self.scale = scale
-
- backbone = []
- backbone += [ECB(num_in_ch, num_channel, depth_multiplier=2.0, act_type=act_type, with_idt=with_idt)]
- for _ in range(num_block):
- backbone += [ECB(num_channel, num_channel, depth_multiplier=2.0, act_type=act_type, with_idt=with_idt)]
- backbone += [
- ECB(num_channel, num_out_ch * scale * scale, depth_multiplier=2.0, act_type='linear', with_idt=with_idt)
- ]
-
- self.backbone = nn.Sequential(*backbone)
- self.upsampler = nn.PixelShuffle(scale)
-
- def forward(self, x):
- if self.num_in_ch > 1:
- shortcut = torch.repeat_interleave(x, self.scale * self.scale, dim=1)
- else:
- shortcut = x # will repeat the input in the channel dimension (repeat scale * scale times)
- y = self.backbone(x) + shortcut
- y = self.upsampler(y)
- return y
diff --git a/spaces/Iceclear/StableSR/StableSR/taming/util.py b/spaces/Iceclear/StableSR/StableSR/taming/util.py
deleted file mode 100644
index 06053e5defb87977f9ab07e69bf4da12201de9b7..0000000000000000000000000000000000000000
--- a/spaces/Iceclear/StableSR/StableSR/taming/util.py
+++ /dev/null
@@ -1,157 +0,0 @@
-import os, hashlib
-import requests
-from tqdm import tqdm
-
-URL_MAP = {
- "vgg_lpips": "https://heibox.uni-heidelberg.de/f/607503859c864bc1b30b/?dl=1"
-}
-
-CKPT_MAP = {
- "vgg_lpips": "vgg.pth"
-}
-
-MD5_MAP = {
- "vgg_lpips": "d507d7349b931f0638a25a48a722f98a"
-}
-
-
-def download(url, local_path, chunk_size=1024):
- os.makedirs(os.path.split(local_path)[0], exist_ok=True)
- with requests.get(url, stream=True) as r:
- total_size = int(r.headers.get("content-length", 0))
- with tqdm(total=total_size, unit="B", unit_scale=True) as pbar:
- with open(local_path, "wb") as f:
- for data in r.iter_content(chunk_size=chunk_size):
- if data:
- f.write(data)
- pbar.update(chunk_size)
-
-
-def md5_hash(path):
- with open(path, "rb") as f:
- content = f.read()
- return hashlib.md5(content).hexdigest()
-
-
-def get_ckpt_path(name, root, check=False):
- assert name in URL_MAP
- path = os.path.join(root, CKPT_MAP[name])
- if not os.path.exists(path) or (check and not md5_hash(path) == MD5_MAP[name]):
- print("Downloading {} model from {} to {}".format(name, URL_MAP[name], path))
- download(URL_MAP[name], path)
- md5 = md5_hash(path)
- assert md5 == MD5_MAP[name], md5
- return path
-
-
-class KeyNotFoundError(Exception):
- def __init__(self, cause, keys=None, visited=None):
- self.cause = cause
- self.keys = keys
- self.visited = visited
- messages = list()
- if keys is not None:
- messages.append("Key not found: {}".format(keys))
- if visited is not None:
- messages.append("Visited: {}".format(visited))
- messages.append("Cause:\n{}".format(cause))
- message = "\n".join(messages)
- super().__init__(message)
-
-
-def retrieve(
- list_or_dict, key, splitval="/", default=None, expand=True, pass_success=False
-):
- """Given a nested list or dict return the desired value at key expanding
- callable nodes if necessary and :attr:`expand` is ``True``. The expansion
- is done in-place.
-
- Parameters
- ----------
- list_or_dict : list or dict
- Possibly nested list or dictionary.
- key : str
- key/to/value, path like string describing all keys necessary to
- consider to get to the desired value. List indices can also be
- passed here.
- splitval : str
- String that defines the delimiter between keys of the
- different depth levels in `key`.
- default : obj
- Value returned if :attr:`key` is not found.
- expand : bool
- Whether to expand callable nodes on the path or not.
-
- Returns
- -------
- The desired value or if :attr:`default` is not ``None`` and the
- :attr:`key` is not found returns ``default``.
-
- Raises
- ------
- Exception if ``key`` not in ``list_or_dict`` and :attr:`default` is
- ``None``.
- """
-
- keys = key.split(splitval)
-
- success = True
- try:
- visited = []
- parent = None
- last_key = None
- for key in keys:
- if callable(list_or_dict):
- if not expand:
- raise KeyNotFoundError(
- ValueError(
- "Trying to get past callable node with expand=False."
- ),
- keys=keys,
- visited=visited,
- )
- list_or_dict = list_or_dict()
- parent[last_key] = list_or_dict
-
- last_key = key
- parent = list_or_dict
-
- try:
- if isinstance(list_or_dict, dict):
- list_or_dict = list_or_dict[key]
- else:
- list_or_dict = list_or_dict[int(key)]
- except (KeyError, IndexError, ValueError) as e:
- raise KeyNotFoundError(e, keys=keys, visited=visited)
-
- visited += [key]
- # final expansion of retrieved value
- if expand and callable(list_or_dict):
- list_or_dict = list_or_dict()
- parent[last_key] = list_or_dict
- except KeyNotFoundError as e:
- if default is None:
- raise e
- else:
- list_or_dict = default
- success = False
-
- if not pass_success:
- return list_or_dict
- else:
- return list_or_dict, success
-
-
-if __name__ == "__main__":
- config = {"keya": "a",
- "keyb": "b",
- "keyc":
- {"cc1": 1,
- "cc2": 2,
- }
- }
- from omegaconf import OmegaConf
- config = OmegaConf.create(config)
- print(config)
- retrieve(config, "keya")
-
diff --git a/spaces/Jamkonams/AutoGPT/tests/__init__.py b/spaces/Jamkonams/AutoGPT/tests/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/Jikiwi/sovits-models/inference/slicer.py b/spaces/Jikiwi/sovits-models/inference/slicer.py
deleted file mode 100644
index b05840bcf6bdced0b6e2adbecb1a1dd5b3dee462..0000000000000000000000000000000000000000
--- a/spaces/Jikiwi/sovits-models/inference/slicer.py
+++ /dev/null
@@ -1,142 +0,0 @@
-import librosa
-import torch
-import torchaudio
-
-
-class Slicer:
- def __init__(self,
- sr: int,
- threshold: float = -40.,
- min_length: int = 5000,
- min_interval: int = 300,
- hop_size: int = 20,
- max_sil_kept: int = 5000):
- if not min_length >= min_interval >= hop_size:
- raise ValueError('The following condition must be satisfied: min_length >= min_interval >= hop_size')
- if not max_sil_kept >= hop_size:
- raise ValueError('The following condition must be satisfied: max_sil_kept >= hop_size')
- min_interval = sr * min_interval / 1000
- self.threshold = 10 ** (threshold / 20.)
- self.hop_size = round(sr * hop_size / 1000)
- self.win_size = min(round(min_interval), 4 * self.hop_size)
- self.min_length = round(sr * min_length / 1000 / self.hop_size)
- self.min_interval = round(min_interval / self.hop_size)
- self.max_sil_kept = round(sr * max_sil_kept / 1000 / self.hop_size)
-
- def _apply_slice(self, waveform, begin, end):
- if len(waveform.shape) > 1:
- return waveform[:, begin * self.hop_size: min(waveform.shape[1], end * self.hop_size)]
- else:
- return waveform[begin * self.hop_size: min(waveform.shape[0], end * self.hop_size)]
-
- # @timeit
- def slice(self, waveform):
- if len(waveform.shape) > 1:
- samples = librosa.to_mono(waveform)
- else:
- samples = waveform
- if samples.shape[0] <= self.min_length:
- return {"0": {"slice": False, "split_time": f"0,{len(waveform)}"}}
- rms_list = librosa.feature.rms(y=samples, frame_length=self.win_size, hop_length=self.hop_size).squeeze(0)
- sil_tags = []
- silence_start = None
- clip_start = 0
- for i, rms in enumerate(rms_list):
- # Keep looping while frame is silent.
- if rms < self.threshold:
- # Record start of silent frames.
- if silence_start is None:
- silence_start = i
- continue
- # Keep looping while frame is not silent and silence start has not been recorded.
- if silence_start is None:
- continue
- # Clear recorded silence start if interval is not enough or clip is too short
- is_leading_silence = silence_start == 0 and i > self.max_sil_kept
- need_slice_middle = i - silence_start >= self.min_interval and i - clip_start >= self.min_length
- if not is_leading_silence and not need_slice_middle:
- silence_start = None
- continue
- # Need slicing. Record the range of silent frames to be removed.
- if i - silence_start <= self.max_sil_kept:
- pos = rms_list[silence_start: i + 1].argmin() + silence_start
- if silence_start == 0:
- sil_tags.append((0, pos))
- else:
- sil_tags.append((pos, pos))
- clip_start = pos
- elif i - silence_start <= self.max_sil_kept * 2:
- pos = rms_list[i - self.max_sil_kept: silence_start + self.max_sil_kept + 1].argmin()
- pos += i - self.max_sil_kept
- pos_l = rms_list[silence_start: silence_start + self.max_sil_kept + 1].argmin() + silence_start
- pos_r = rms_list[i - self.max_sil_kept: i + 1].argmin() + i - self.max_sil_kept
- if silence_start == 0:
- sil_tags.append((0, pos_r))
- clip_start = pos_r
- else:
- sil_tags.append((min(pos_l, pos), max(pos_r, pos)))
- clip_start = max(pos_r, pos)
- else:
- pos_l = rms_list[silence_start: silence_start + self.max_sil_kept + 1].argmin() + silence_start
- pos_r = rms_list[i - self.max_sil_kept: i + 1].argmin() + i - self.max_sil_kept
- if silence_start == 0:
- sil_tags.append((0, pos_r))
- else:
- sil_tags.append((pos_l, pos_r))
- clip_start = pos_r
- silence_start = None
- # Deal with trailing silence.
- total_frames = rms_list.shape[0]
- if silence_start is not None and total_frames - silence_start >= self.min_interval:
- silence_end = min(total_frames, silence_start + self.max_sil_kept)
- pos = rms_list[silence_start: silence_end + 1].argmin() + silence_start
- sil_tags.append((pos, total_frames + 1))
- # Apply and return slices.
- if len(sil_tags) == 0:
- return {"0": {"slice": False, "split_time": f"0,{len(waveform)}"}}
- else:
- chunks = []
- # 第一段静音并非从头开始,补上有声片段
- if sil_tags[0][0]:
- chunks.append(
- {"slice": False, "split_time": f"0,{min(waveform.shape[0], sil_tags[0][0] * self.hop_size)}"})
- for i in range(0, len(sil_tags)):
- # 标识有声片段(跳过第一段)
- if i:
- chunks.append({"slice": False,
- "split_time": f"{sil_tags[i - 1][1] * self.hop_size},{min(waveform.shape[0], sil_tags[i][0] * self.hop_size)}"})
- # 标识所有静音片段
- chunks.append({"slice": True,
- "split_time": f"{sil_tags[i][0] * self.hop_size},{min(waveform.shape[0], sil_tags[i][1] * self.hop_size)}"})
- # 最后一段静音并非结尾,补上结尾片段
- if sil_tags[-1][1] * self.hop_size < len(waveform):
- chunks.append({"slice": False, "split_time": f"{sil_tags[-1][1] * self.hop_size},{len(waveform)}"})
- chunk_dict = {}
- for i in range(len(chunks)):
- chunk_dict[str(i)] = chunks[i]
- return chunk_dict
-
-
-def cut(audio_path, db_thresh=-30, min_len=5000):
- audio, sr = librosa.load(audio_path, sr=None)
- slicer = Slicer(
- sr=sr,
- threshold=db_thresh,
- min_length=min_len
- )
- chunks = slicer.slice(audio)
- return chunks
-
-
-def chunks2audio(audio_path, chunks):
- chunks = dict(chunks)
- audio, sr = torchaudio.load(audio_path)
- if len(audio.shape) == 2 and audio.shape[1] >= 2:
- audio = torch.mean(audio, dim=0).unsqueeze(0)
- audio = audio.cpu().numpy()[0]
- result = []
- for k, v in chunks.items():
- tag = v["split_time"].split(",")
- if tag[0] != tag[1]:
- result.append((v["slice"], audio[int(tag[0]):int(tag[1])]))
- return result, sr
diff --git a/spaces/Joeri/fabry-perot/app.py b/spaces/Joeri/fabry-perot/app.py
deleted file mode 100644
index c89478fd6f4181c09473a2da486b76bf89586d9e..0000000000000000000000000000000000000000
--- a/spaces/Joeri/fabry-perot/app.py
+++ /dev/null
@@ -1,83 +0,0 @@
-import gradio as gr
-import numpy as np
-import matplotlib.pyplot as plt
-import tensorflow as tf
-
-# Helper functions
-def cos_mat(theta, n):
- return np.sqrt(1 - (np.sin(np.radians(theta))/n)**2)
-
-def calculate_R(theta, n):
- numerator = n * cos_mat(theta,n) - np.cos(np.radians(theta))
- denominator = n * cos_mat(theta, n) + np.cos(np.radians(theta))
- return (numerator/denominator)**2
-
-def calculate_phase(theta, n, l, lamb):
- return (2*np.pi*n/lamb) *(2*l*cos_mat(theta, n))
-
-# Calculate transmission - ground truth
-def trans(lamb, n, l, theta):
- R = calculate_R(theta, n)
- F = 4*R/((1 - R)**2)
- delta = calculate_phase(theta, n, l, lamb)
- return F, delta[0] * lamb[0], (1 + F*np.sin(delta/2)**2)**(-1)
-
-# create swish activation function
-def swish(x):
- return x*tf.keras.activations.sigmoid(x)
-tf.keras.utils.get_custom_objects()['swish'] = tf.keras.layers.Activation(swish)
-
-# Normalizations of Fd to interval [0, 1]
-def Fd_normalize(double):
- double = double - np.array([11.8966, 21697])
- double = np.divide(2*double, [23.8, 43394])
- return double
-
-def Fd_unnormalize(double):
- double = np.multiply(double/2, [23.8, 43394])
- double = double + np.array([11.8966, 21697])
- return double
-
-# Main function
-def get_transmission(n, l, theta):
-
- # Compute ground truth
- lamb = np.arange(400, 800, 2).astype('float32')
- F, delta, transmission = trans(lamb, n, l, theta)
-
- # Make predictions
- Fulcon = tf.keras.models.load_model('network_fabry_perot.hdf5',
- custom_objects={'Activation' : tf.keras.layers.Activation(swish)})
- input = Fd_normalize(np.array([[F, delta]]))
- pred = Fulcon.predict(tf.constant(input))
-
- # Clear figure
- plt.clf()
-
- # Plot ground truth
- plt.plot(lamb, transmission, label='Ground truth')
-
- # Plot network prediction
- plt.plot(lamb, pred[0], 'r', label='Prediction')
-
- # Layout
- plt.xlabel("Wavelength (nm)", fontsize=14)
- plt.ylabel("Transmission", fontsize=14)
- plt.xticks(fontsize=14)
- plt.yticks(fontsize=14)
- plt.ylim(0, 1.05)
- plt.title(r"Transmission T($\lambda$)", fontsize=16)
- plt.legend()
-
- return plt.gcf()
-
-iface = gr.Interface(fn=get_transmission,
- inputs=[gr.inputs.Slider(minimum=1.05, maximum=3.55, label='Index of refraction (n)'),
- gr.inputs.Slider(minimum=100, maximum=1000, label='Thickness (l)'),
- gr.inputs.Slider(minimum=-70,maximum=70, label=r'Incident angle (theta)')],
- outputs="plot",
- title="Fabry-Pérot resonator",
- description=r"Gradio demo for a neural network trained to predict the transmission spectrum " +
- "of a Fabry-Pérot resonator. More info can be found on https://github.com/Joeri38/inverse-design.",
- allow_flagging='never')
-iface.launch()
diff --git a/spaces/JohnC26/ChatGPTwithAPI/app.py b/spaces/JohnC26/ChatGPTwithAPI/app.py
deleted file mode 100644
index 374bd42fdba4b848e43a0c6f2243b8e766e9a6cf..0000000000000000000000000000000000000000
--- a/spaces/JohnC26/ChatGPTwithAPI/app.py
+++ /dev/null
@@ -1,132 +0,0 @@
-import gradio as gr
-import os
-import json
-import requests
-
-#Streaming endpoint
-API_URL = "https://api.openai.com/v1/chat/completions" #os.getenv("API_URL") + "/generate_stream"
-
-#Testing with my Open AI Key
-#OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
-
-def predict(inputs, top_p, temperature, openai_api_key, chat_counter, chatbot=[], history=[]): #repetition_penalty, top_k
-
- payload = {
- "model": "gpt-3.5-turbo",
- "messages": [{"role": "user", "content": f"{inputs}"}],
- "temperature" : 1.0,
- "top_p":1.0,
- "n" : 1,
- "stream": True,
- "presence_penalty":0,
- "frequency_penalty":0,
- }
-
- headers = {
- "Content-Type": "application/json",
- "Authorization": f"Bearer {openai_api_key}"
- }
-
- print(f"chat_counter - {chat_counter}")
- if chat_counter != 0 :
- messages=[]
- for data in chatbot:
- temp1 = {}
- temp1["role"] = "user"
- temp1["content"] = data[0]
- temp2 = {}
- temp2["role"] = "assistant"
- temp2["content"] = data[1]
- messages.append(temp1)
- messages.append(temp2)
- temp3 = {}
- temp3["role"] = "user"
- temp3["content"] = inputs
- messages.append(temp3)
- #messages
- payload = {
- "model": "gpt-3.5-turbo",
- "messages": messages, #[{"role": "user", "content": f"{inputs}"}],
- "temperature" : temperature, #1.0,
- "top_p": top_p, #1.0,
- "n" : 1,
- "stream": True,
- "presence_penalty":0,
- "frequency_penalty":0,
- }
-
- chat_counter+=1
-
- history.append(inputs)
- print(f"payload is - {payload}")
- # make a POST request to the API endpoint using the requests.post method, passing in stream=True
- response = requests.post(API_URL, headers=headers, json=payload, stream=True)
- #response = requests.post(API_URL, headers=headers, json=payload, stream=True)
- token_counter = 0
- partial_words = ""
-
- counter=0
- for chunk in response.iter_lines():
- #Skipping first chunk
- if counter == 0:
- counter+=1
- continue
- #counter+=1
- # check whether each line is non-empty
- if chunk.decode() :
- chunk = chunk.decode()
- # decode each line as response data is in bytes
- if len(chunk) > 12 and "content" in json.loads(chunk[6:])['choices'][0]['delta']:
- #if len(json.loads(chunk.decode()[6:])['choices'][0]["delta"]) == 0:
- # break
- partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"]
- if token_counter == 0:
- history.append(" " + partial_words)
- else:
- history[-1] = partial_words
- chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2) ] # convert to tuples of list
- token_counter+=1
- yield chat, history, chat_counter # resembles {chatbot: chat, state: history}
-
-
-def reset_textbox():
- return gr.update(value='')
-
-title = """
🔥ChatGPT API 🚀Streaming🚀
"""
-description = """Language models can be conditioned to act like dialogue agents through a conversational prompt that typically takes the form:
-```
-User:
-Assistant:
-User:
-Assistant:
-...
-```
-In this app, you can explore the outputs of a gpt-3.5-turbo LLM.
-"""
-
-with gr.Blocks(css = """#col_container {width: 1000px; margin-left: auto; margin-right: auto;}
- #chatbot {height: 520px; overflow: auto;}""") as demo:
- gr.HTML(title)
- gr.HTML('''
Duplicate the Space and run securely with your OpenAI API Key
''')
- with gr.Column(elem_id = "col_container"):
- openai_api_key = gr.Textbox(type='password', label="Enter your OpenAI API key here")
- chatbot = gr.Chatbot(elem_id='chatbot') #c
- inputs = gr.Textbox(placeholder= "Hi there!", label= "Type an input and press Enter") #t
- state = gr.State([]) #s
- b1 = gr.Button()
-
- #inputs, top_p, temperature, top_k, repetition_penalty
- with gr.Accordion("Parameters", open=False):
- top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",)
- temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",)
- #top_k = gr.Slider( minimum=1, maximum=50, value=4, step=1, interactive=True, label="Top-k",)
- #repetition_penalty = gr.Slider( minimum=0.1, maximum=3.0, value=1.03, step=0.01, interactive=True, label="Repetition Penalty", )
- chat_counter = gr.Number(value=0, visible=False, precision=0)
-
- inputs.submit( predict, [inputs, top_p, temperature, openai_api_key, chat_counter, chatbot, state], [chatbot, state, chat_counter],)
- b1.click( predict, [inputs, top_p, temperature, openai_api_key, chat_counter, chatbot, state], [chatbot, state, chat_counter],)
- b1.click(reset_textbox, [], [inputs])
- inputs.submit(reset_textbox, [], [inputs])
-
- #gr.Markdown(description)
- demo.queue().launch(debug=True)
diff --git a/spaces/Junity/TokaiTeio-SVC/inference/__init__.py b/spaces/Junity/TokaiTeio-SVC/inference/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/Junity/TokaiTeio-SVC/modules/attentions.py b/spaces/Junity/TokaiTeio-SVC/modules/attentions.py
deleted file mode 100644
index f9c11ca4a3acb86bf1abc04d9dcfa82a4ed4061f..0000000000000000000000000000000000000000
--- a/spaces/Junity/TokaiTeio-SVC/modules/attentions.py
+++ /dev/null
@@ -1,349 +0,0 @@
-import copy
-import math
-import numpy as np
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-import modules.commons as commons
-import modules.modules as modules
-from modules.modules import LayerNorm
-
-
-class FFT(nn.Module):
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers=1, kernel_size=1, p_dropout=0.,
- proximal_bias=False, proximal_init=True, **kwargs):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
-
- self.drop = nn.Dropout(p_dropout)
- self.self_attn_layers = nn.ModuleList()
- self.norm_layers_0 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- for i in range(self.n_layers):
- self.self_attn_layers.append(
- MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias,
- proximal_init=proximal_init))
- self.norm_layers_0.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(
- FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
- self.norm_layers_1.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask):
- """
- x: decoder input
- h: encoder output
- """
- self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.self_attn_layers[i](x, x, self_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_0[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
- x = x * x_mask
- return x
-
-
-class Encoder(nn.Module):
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.window_size = window_size
-
- self.drop = nn.Dropout(p_dropout)
- self.attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask):
- attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.attn_layers[i](x, x, attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class Decoder(nn.Module):
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
-
- self.drop = nn.Dropout(p_dropout)
- self.self_attn_layers = nn.ModuleList()
- self.norm_layers_0 = nn.ModuleList()
- self.encdec_attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
- self.norm_layers_0.append(LayerNorm(hidden_channels))
- self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask, h, h_mask):
- """
- x: decoder input
- h: encoder output
- """
- self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
- encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.self_attn_layers[i](x, x, self_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_0[i](x + y)
-
- y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class MultiHeadAttention(nn.Module):
- def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
- super().__init__()
- assert channels % n_heads == 0
-
- self.channels = channels
- self.out_channels = out_channels
- self.n_heads = n_heads
- self.p_dropout = p_dropout
- self.window_size = window_size
- self.heads_share = heads_share
- self.block_length = block_length
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
- self.attn = None
-
- self.k_channels = channels // n_heads
- self.conv_q = nn.Conv1d(channels, channels, 1)
- self.conv_k = nn.Conv1d(channels, channels, 1)
- self.conv_v = nn.Conv1d(channels, channels, 1)
- self.conv_o = nn.Conv1d(channels, out_channels, 1)
- self.drop = nn.Dropout(p_dropout)
-
- if window_size is not None:
- n_heads_rel = 1 if heads_share else n_heads
- rel_stddev = self.k_channels**-0.5
- self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
- self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
-
- nn.init.xavier_uniform_(self.conv_q.weight)
- nn.init.xavier_uniform_(self.conv_k.weight)
- nn.init.xavier_uniform_(self.conv_v.weight)
- if proximal_init:
- with torch.no_grad():
- self.conv_k.weight.copy_(self.conv_q.weight)
- self.conv_k.bias.copy_(self.conv_q.bias)
-
- def forward(self, x, c, attn_mask=None):
- q = self.conv_q(x)
- k = self.conv_k(c)
- v = self.conv_v(c)
-
- x, self.attn = self.attention(q, k, v, mask=attn_mask)
-
- x = self.conv_o(x)
- return x
-
- def attention(self, query, key, value, mask=None):
- # reshape [b, d, t] -> [b, n_h, t, d_k]
- b, d, t_s, t_t = (*key.size(), query.size(2))
- query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
- key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
- value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
-
- scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
- if self.window_size is not None:
- assert t_s == t_t, "Relative attention is only available for self-attention."
- key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
- rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
- scores_local = self._relative_position_to_absolute_position(rel_logits)
- scores = scores + scores_local
- if self.proximal_bias:
- assert t_s == t_t, "Proximal bias is only available for self-attention."
- scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
- if mask is not None:
- scores = scores.masked_fill(mask == 0, -1e4)
- if self.block_length is not None:
- assert t_s == t_t, "Local attention is only available for self-attention."
- block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
- scores = scores.masked_fill(block_mask == 0, -1e4)
- p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
- p_attn = self.drop(p_attn)
- output = torch.matmul(p_attn, value)
- if self.window_size is not None:
- relative_weights = self._absolute_position_to_relative_position(p_attn)
- value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
- output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
- output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
- return output, p_attn
-
- def _matmul_with_relative_values(self, x, y):
- """
- x: [b, h, l, m]
- y: [h or 1, m, d]
- ret: [b, h, l, d]
- """
- ret = torch.matmul(x, y.unsqueeze(0))
- return ret
-
- def _matmul_with_relative_keys(self, x, y):
- """
- x: [b, h, l, d]
- y: [h or 1, m, d]
- ret: [b, h, l, m]
- """
- ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
- return ret
-
- def _get_relative_embeddings(self, relative_embeddings, length):
- max_relative_position = 2 * self.window_size + 1
- # Pad first before slice to avoid using cond ops.
- pad_length = max(length - (self.window_size + 1), 0)
- slice_start_position = max((self.window_size + 1) - length, 0)
- slice_end_position = slice_start_position + 2 * length - 1
- if pad_length > 0:
- padded_relative_embeddings = F.pad(
- relative_embeddings,
- commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
- else:
- padded_relative_embeddings = relative_embeddings
- used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
- return used_relative_embeddings
-
- def _relative_position_to_absolute_position(self, x):
- """
- x: [b, h, l, 2*l-1]
- ret: [b, h, l, l]
- """
- batch, heads, length, _ = x.size()
- # Concat columns of pad to shift from relative to absolute indexing.
- x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
-
- # Concat extra elements so to add up to shape (len+1, 2*len-1).
- x_flat = x.view([batch, heads, length * 2 * length])
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
-
- # Reshape and slice out the padded elements.
- x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
- return x_final
-
- def _absolute_position_to_relative_position(self, x):
- """
- x: [b, h, l, l]
- ret: [b, h, l, 2*l-1]
- """
- batch, heads, length, _ = x.size()
- # padd along column
- x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
- x_flat = x.view([batch, heads, length**2 + length*(length -1)])
- # add 0's in the beginning that will skew the elements after reshape
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
- x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
- return x_final
-
- def _attention_bias_proximal(self, length):
- """Bias for self-attention to encourage attention to close positions.
- Args:
- length: an integer scalar.
- Returns:
- a Tensor with shape [1, 1, length, length]
- """
- r = torch.arange(length, dtype=torch.float32)
- diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
- return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
-
-
-class FFN(nn.Module):
- def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.activation = activation
- self.causal = causal
-
- if causal:
- self.padding = self._causal_padding
- else:
- self.padding = self._same_padding
-
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
- self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
- self.drop = nn.Dropout(p_dropout)
-
- def forward(self, x, x_mask):
- x = self.conv_1(self.padding(x * x_mask))
- if self.activation == "gelu":
- x = x * torch.sigmoid(1.702 * x)
- else:
- x = torch.relu(x)
- x = self.drop(x)
- x = self.conv_2(self.padding(x * x_mask))
- return x * x_mask
-
- def _causal_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = self.kernel_size - 1
- pad_r = 0
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
-
- def _same_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = (self.kernel_size - 1) // 2
- pad_r = self.kernel_size // 2
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
diff --git a/spaces/Kabriske/Multilingual_Video_Subtitler/translator.py b/spaces/Kabriske/Multilingual_Video_Subtitler/translator.py
deleted file mode 100644
index 6c6391611af72a00d65a12c25c6d1fb8612b3d69..0000000000000000000000000000000000000000
--- a/spaces/Kabriske/Multilingual_Video_Subtitler/translator.py
+++ /dev/null
@@ -1,31 +0,0 @@
-import os
-
-from googletrans import Translator
-
-from utils import log
-
-
-class MyTranslator:
- def __init__(self):
- self.translator = Translator()
-
- def translate(self, text_file_path, source_language, target_language):
- # Open the input file and read its contents
- with open(text_file_path, 'r') as f:
- input_text = f.read()
-
- filename, ext = os.path.splitext(text_file_path)
- output_file_path = f"{filename}_translated{ext}"
- log(f"Translating text to {target_language} and saving to {output_file_path}")
- # Translate the text to the desired language
- output_text = self.translator.translate(input_text, dest=target_language).text
- # Write the translated text to the output file
- with open(output_file_path, 'w') as f:
- f.write(output_text)
-
- return output_file_path
-
-
-if __name__ == '__main__':
- translator = MyTranslator()
- translation_path = translator.translate('sample/iPhone_14_Pro.vtt', 'en', 'es')
diff --git a/spaces/Kangarroar/ApplioRVC-Inference/i18n/locale_diff.py b/spaces/Kangarroar/ApplioRVC-Inference/i18n/locale_diff.py
deleted file mode 100644
index 387ddfe1b16c2f9f32b6b9682b61353837b06bd8..0000000000000000000000000000000000000000
--- a/spaces/Kangarroar/ApplioRVC-Inference/i18n/locale_diff.py
+++ /dev/null
@@ -1,45 +0,0 @@
-import json
-import os
-from collections import OrderedDict
-
-# Define the standard file name
-standard_file = "en_US.json"
-
-# Find all JSON files in the directory
-dir_path = "./"
-languages = [
- f for f in os.listdir(dir_path) if f.endswith(".json") and f != standard_file
-]
-
-# Load the standard file
-with open(standard_file, "r", encoding="utf-8") as f:
- standard_data = json.load(f, object_pairs_hook=OrderedDict)
-
-# Loop through each language file
-for lang_file in languages:
- # Load the language file
- with open(lang_file, "r", encoding="utf-8") as f:
- lang_data = json.load(f, object_pairs_hook=OrderedDict)
-
- # Find the difference between the language file and the standard file
- diff = set(standard_data.keys()) - set(lang_data.keys())
-
- miss = set(lang_data.keys()) - set(standard_data.keys())
-
- # Add any missing keys to the language file
- for key in diff:
- lang_data[key] = key
-
- # Del any extra keys to the language file
- for key in miss:
- del lang_data[key]
-
- # Sort the keys of the language file to match the order of the standard file
- lang_data = OrderedDict(
- sorted(lang_data.items(), key=lambda x: list(standard_data.keys()).index(x[0]))
- )
-
- # Save the updated language file
- with open(lang_file, "w", encoding="utf-8") as f:
- json.dump(lang_data, f, ensure_ascii=False, indent=4)
- f.write("\n")
diff --git a/spaces/Kayson/InstructDiffusion/dataset/low_level/lowlevel_reds.py b/spaces/Kayson/InstructDiffusion/dataset/low_level/lowlevel_reds.py
deleted file mode 100644
index 0390aae6faecd912a66bc84f868800ad6e0cfbc5..0000000000000000000000000000000000000000
--- a/spaces/Kayson/InstructDiffusion/dataset/low_level/lowlevel_reds.py
+++ /dev/null
@@ -1,111 +0,0 @@
-# --------------------------------------------------------
-# InstructDiffusion
-# Based on instruct-pix2pix (https://github.com/timothybrooks/instruct-pix2pix)
-# Modified by Chen Li (edward82@stu.xjtu.edu.cn)
-# --------------------------------------------------------
-
-import os
-import numpy as np
-from torch.utils.data import Dataset
-import torch
-from PIL import Image
-import torchvision.transforms.functional as TF
-from pdb import set_trace as stx
-import random
-import cv2
-from PIL import Image
-import torchvision
-
-
-def is_image_file(filename):
- return any(filename.endswith(extension) for extension in ['jpeg', 'JPEG', 'jpg', 'png', 'JPG', 'PNG', 'gif'])
-
-
-class REDS(Dataset):
- def __init__(self, path, split="train", size=256, interpolation="pil_lanczos",
- flip_prob=0.5, sample_weight=1.0, instruct=False):
- super(REDS, self).__init__()
-
- inp_files = sorted(os.listdir(os.path.join(path, split, 'blur')))
- tar_files = sorted(os.listdir(os.path.join(path, split, 'sharp')))
-
- if split == "train":
- self.inp_filenames = [os.path.join(path, split, 'blur', d, x) for d in inp_files for x in sorted(os.listdir(os.path.join(path, split, 'blur', d))) if is_image_file(x)]
- self.tar_filenames = [os.path.join(path, split, 'sharp', d, x) for d in tar_files for x in sorted(os.listdir(os.path.join(path, split, 'sharp', d))) if is_image_file(x)]
- else:
- self.inp_filenames = [os.path.join(path, split, 'blur', x) for x in inp_files if is_image_file(x)]
- self.tar_filenames = [os.path.join(path, split, 'sharp', x) for x in tar_files if is_image_file(x)]
-
- self.size = size
- self.flip_prob = flip_prob
- self.sample_weight = sample_weight
- self.instruct = instruct
- assert len(self.inp_filenames) == len(self.tar_filenames)
- self.sizex = len(self.tar_filenames) # get the size of target
-
- self.interpolation = {
- "cv_nearest": cv2.INTER_NEAREST,
- "cv_bilinear": cv2.INTER_LINEAR,
- "cv_bicubic": cv2.INTER_CUBIC,
- "cv_area": cv2.INTER_AREA,
- "cv_lanczos": cv2.INTER_LANCZOS4,
- "pil_nearest": Image.NEAREST,
- "pil_bilinear": Image.BILINEAR,
- "pil_bicubic": Image.BICUBIC,
- "pil_box": Image.BOX,
- "pil_hamming": Image.HAMMING,
- "pil_lanczos": Image.LANCZOS,
- }[interpolation]
-
- prompt_path='dataset/prompt/prompt_deblur.txt'
- self.prompt_list=[]
- with open(prompt_path) as f:
- line=f.readline()
- while line:
- line=line.strip('\n')
- self.prompt_list.append(line)
- line=f.readline()
-
- print(f"REDS has {len(self)} samples!!")
-
- def __len__(self):
- return int(self.sizex * self.sample_weight)
-
- def __getitem__(self, index):
- if self.sample_weight >= 1:
- index_ = index % self.sizex
- else:
- index_ = int(index / self.sample_weight) + random.randint(0, int(1 / self.sample_weight) - 1)
-
- inp_path = self.inp_filenames[index_]
- tar_path = self.tar_filenames[index_]
-
- inp_img = Image.open(inp_path)
- tar_img = Image.open(tar_path)
-
- width, height = inp_img.size
- tar_width, tar_height = tar_img.size
- assert tar_width == width and tar_height == height, "Input and target image mismatch"
- aspect_ratio = float(width) / float(height)
- if width < height:
- new_width = self.size
- new_height = int(self.size / aspect_ratio)
- else:
- new_height = self.size
- new_width = int(self.size * aspect_ratio)
- inp_img = inp_img.resize((new_width, new_height), self.interpolation)
- tar_img = tar_img.resize((new_width, new_height), self.interpolation)
-
- inp_img = np.array(inp_img).astype(np.float32).transpose(2, 0, 1)
- inp_img_tensor = torch.tensor((inp_img / 127.5 - 1.0).astype(np.float32))
- tar_img = np.array(tar_img).astype(np.float32).transpose(2, 0, 1)
- tar_img_tensor = torch.tensor((tar_img / 127.5 - 1.0).astype(np.float32))
- crop = torchvision.transforms.RandomCrop(self.size)
- flip = torchvision.transforms.RandomHorizontalFlip(float(self.flip_prob))
- image_0, image_1 = flip(crop(torch.cat((inp_img_tensor, tar_img_tensor)))).chunk(2)
-
- prompt = random.choice(self.prompt_list)
- if self.instruct:
- prompt = "Image Deblurring: " + prompt
-
- return dict(edited=image_1, edit=dict(c_concat=image_0, c_crossattn=prompt))
\ No newline at end of file
diff --git a/spaces/KdaiP/yolov8-deepsort-tracking/deep_sort/deep_sort/deep/prepare_car.py b/spaces/KdaiP/yolov8-deepsort-tracking/deep_sort/deep_sort/deep/prepare_car.py
deleted file mode 100644
index fcf3818fedd64db65ed08f114c103824f01b6e20..0000000000000000000000000000000000000000
--- a/spaces/KdaiP/yolov8-deepsort-tracking/deep_sort/deep_sort/deep/prepare_car.py
+++ /dev/null
@@ -1,129 +0,0 @@
-# -*- coding:utf8 -*-
-
-import os
-from PIL import Image
-from shutil import copyfile, copytree, rmtree, move
-
-PATH_DATASET = './car-dataset' # 需要处理的文件夹
-PATH_NEW_DATASET = './car-reid-dataset' # 处理后的文件夹
-PATH_ALL_IMAGES = PATH_NEW_DATASET + '/all_images'
-PATH_TRAIN = PATH_NEW_DATASET + '/train'
-PATH_TEST = PATH_NEW_DATASET + '/test'
-
-# 定义创建目录函数
-def mymkdir(path):
- path = path.strip() # 去除首位空格
- path = path.rstrip("\\") # 去除尾部 \ 符号
- isExists = os.path.exists(path) # 判断路径是否存在
- if not isExists:
- os.makedirs(path) # 如果不存在则创建目录
- print(path + ' 创建成功')
- return True
- else:
- # 如果目录存在则不创建,并提示目录已存在
- print(path + ' 目录已存在')
- return False
-
-class BatchRename():
- '''
- 批量重命名文件夹中的图片文件
- '''
-
- def __init__(self):
- self.path = PATH_DATASET # 表示需要命名处理的文件夹
-
- # 修改图像尺寸
- def resize(self):
- for aroot, dirs, files in os.walk(self.path):
- # aroot是self.path目录下的所有子目录(含self.path),dir是self.path下所有的文件夹的列表.
- filelist = files # 注意此处仅是该路径下的其中一个列表
- # print('list', list)
-
- # filelist = os.listdir(self.path) #获取文件路径
- total_num = len(filelist) # 获取文件长度(个数)
-
- for item in filelist:
- if item.endswith('.jpg'): # 初始的图片的格式为jpg格式的(或者源文件是png格式及其他格式,后面的转换格式就可以调整为自己需要的格式即可)
- src = os.path.join(os.path.abspath(aroot), item)
-
- # 修改图片尺寸到128宽*256高
- im = Image.open(src)
- out = im.resize((128, 256), Image.ANTIALIAS) # resize image with high-quality
- out.save(src) # 原路径保存
-
- def rename(self):
-
- for aroot, dirs, files in os.walk(self.path):
- # aroot是self.path目录下的所有子目录(含self.path),dir是self.path下所有的文件夹的列表.
- filelist = files # 注意此处仅是该路径下的其中一个列表
- # print('list', list)
-
- # filelist = os.listdir(self.path) #获取文件路径
- total_num = len(filelist) # 获取文件长度(个数)
-
- i = 1 # 表示文件的命名是从1开始的
- for item in filelist:
- if item.endswith('.jpg'): # 初始的图片的格式为jpg格式的(或者源文件是png格式及其他格式,后面的转换格式就可以调整为自己需要的格式即可)
- src = os.path.join(os.path.abspath(aroot), item)
-
- # 根据图片名创建图片目录
- dirname = str(item.split('_')[0])
- # 为相同车辆创建目录
- #new_dir = os.path.join(self.path, '..', 'bbox_all', dirname)
- new_dir = os.path.join(PATH_ALL_IMAGES, dirname)
- if not os.path.isdir(new_dir):
- mymkdir(new_dir)
-
- # 获得new_dir中的图片数
- num_pic = len(os.listdir(new_dir))
-
- dst = os.path.join(os.path.abspath(new_dir),
- dirname + 'C1T0001F' + str(num_pic + 1) + '.jpg')
- # 处理后的格式也为jpg格式的,当然这里可以改成png格式 C1T0001F见mars.py filenames 相机ID,跟踪指数
- # dst = os.path.join(os.path.abspath(self.path), '0000' + format(str(i), '0>3s') + '.jpg') 这种情况下的命名格式为0000000.jpg形式,可以自主定义想要的格式
- try:
- copyfile(src, dst) #os.rename(src, dst)
- print ('converting %s to %s ...' % (src, dst))
- i = i + 1
- except:
- continue
- print ('total %d to rename & converted %d jpgs' % (total_num, i))
-
- def split(self):
- #---------------------------------------
- #train_test
- images_path = PATH_ALL_IMAGES
- train_save_path = PATH_TRAIN
- test_save_path = PATH_TEST
- if not os.path.isdir(train_save_path):
- os.mkdir(train_save_path)
- os.mkdir(test_save_path)
-
- for _, dirs, _ in os.walk(images_path, topdown=True):
- for i, dir in enumerate(dirs):
- for root, _, files in os.walk(images_path + '/' + dir, topdown=True):
- for j, file in enumerate(files):
- if(j==0): # test dataset;每个车辆的第一幅图片
- print("序号:%s 文件夹: %s 图片:%s 归为测试集" % (i + 1, root, file))
- src_path = root + '/' + file
- dst_dir = test_save_path + '/' + dir
- if not os.path.isdir(dst_dir):
- os.mkdir(dst_dir)
- dst_path = dst_dir + '/' + file
- move(src_path, dst_path)
- else:
- src_path = root + '/' + file
- dst_dir = train_save_path + '/' + dir
- if not os.path.isdir(dst_dir):
- os.mkdir(dst_dir)
- dst_path = dst_dir + '/' + file
- move(src_path, dst_path)
- rmtree(PATH_ALL_IMAGES)
-
-if __name__ == '__main__':
- demo = BatchRename()
- demo.resize()
- demo.rename()
- demo.split()
-
-
diff --git a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/mkgui/base/api/fastapi_utils.py b/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/mkgui/base/api/fastapi_utils.py
deleted file mode 100644
index adf582a7c33c2d68ed32fb8b3382fdeb388db0d0..0000000000000000000000000000000000000000
--- a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/mkgui/base/api/fastapi_utils.py
+++ /dev/null
@@ -1,102 +0,0 @@
-"""Collection of utilities for FastAPI apps."""
-
-import inspect
-from typing import Any, Type
-
-from fastapi import FastAPI, Form
-from pydantic import BaseModel
-
-
-def as_form(cls: Type[BaseModel]) -> Any:
- """Adds an as_form class method to decorated models.
-
- The as_form class method can be used with FastAPI endpoints
- """
- new_params = [
- inspect.Parameter(
- field.alias,
- inspect.Parameter.POSITIONAL_ONLY,
- default=(Form(field.default) if not field.required else Form(...)),
- )
- for field in cls.__fields__.values()
- ]
-
- async def _as_form(**data): # type: ignore
- return cls(**data)
-
- sig = inspect.signature(_as_form)
- sig = sig.replace(parameters=new_params)
- _as_form.__signature__ = sig # type: ignore
- setattr(cls, "as_form", _as_form)
- return cls
-
-
-def patch_fastapi(app: FastAPI) -> None:
- """Patch function to allow relative url resolution.
-
- This patch is required to make fastapi fully functional with a relative url path.
- This code snippet can be copy-pasted to any Fastapi application.
- """
- from fastapi.openapi.docs import get_redoc_html, get_swagger_ui_html
- from starlette.requests import Request
- from starlette.responses import HTMLResponse
-
- async def redoc_ui_html(req: Request) -> HTMLResponse:
- assert app.openapi_url is not None
- redoc_ui = get_redoc_html(
- openapi_url="./" + app.openapi_url.lstrip("/"),
- title=app.title + " - Redoc UI",
- )
-
- return HTMLResponse(redoc_ui.body.decode("utf-8"))
-
- async def swagger_ui_html(req: Request) -> HTMLResponse:
- assert app.openapi_url is not None
- swagger_ui = get_swagger_ui_html(
- openapi_url="./" + app.openapi_url.lstrip("/"),
- title=app.title + " - Swagger UI",
- oauth2_redirect_url=app.swagger_ui_oauth2_redirect_url,
- )
-
- # insert request interceptor to have all request run on relativ path
- request_interceptor = (
- "requestInterceptor: (e) => {"
- "\n\t\t\tvar url = window.location.origin + window.location.pathname"
- '\n\t\t\turl = url.substring( 0, url.lastIndexOf( "/" ) + 1);'
- "\n\t\t\turl = e.url.replace(/http(s)?:\/\/[^/]*\//i, url);" # noqa: W605
- "\n\t\t\te.contextUrl = url"
- "\n\t\t\te.url = url"
- "\n\t\t\treturn e;}"
- )
-
- return HTMLResponse(
- swagger_ui.body.decode("utf-8").replace(
- "dom_id: '#swagger-ui',",
- "dom_id: '#swagger-ui',\n\t\t" + request_interceptor + ",",
- )
- )
-
- # remove old docs route and add our patched route
- routes_new = []
- for app_route in app.routes:
- if app_route.path == "/docs": # type: ignore
- continue
-
- if app_route.path == "/redoc": # type: ignore
- continue
-
- routes_new.append(app_route)
-
- app.router.routes = routes_new
-
- assert app.docs_url is not None
- app.add_route(app.docs_url, swagger_ui_html, include_in_schema=False)
- assert app.redoc_url is not None
- app.add_route(app.redoc_url, redoc_ui_html, include_in_schema=False)
-
- # Make graphql realtive
- from starlette import graphql
-
- graphql.GRAPHIQL = graphql.GRAPHIQL.replace(
- "({{REQUEST_PATH}}", '("." + {{REQUEST_PATH}}'
- )
diff --git a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/vocoder/wavernn/models/fatchord_version.py b/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/vocoder/wavernn/models/fatchord_version.py
deleted file mode 100644
index 6413a921651971b4859ed7de8b3a676cd6595d6b..0000000000000000000000000000000000000000
--- a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/vocoder/wavernn/models/fatchord_version.py
+++ /dev/null
@@ -1,434 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from vocoder.distribution import sample_from_discretized_mix_logistic
-from vocoder.display import *
-from vocoder.wavernn.audio import *
-
-
-class ResBlock(nn.Module):
- def __init__(self, dims):
- super().__init__()
- self.conv1 = nn.Conv1d(dims, dims, kernel_size=1, bias=False)
- self.conv2 = nn.Conv1d(dims, dims, kernel_size=1, bias=False)
- self.batch_norm1 = nn.BatchNorm1d(dims)
- self.batch_norm2 = nn.BatchNorm1d(dims)
-
- def forward(self, x):
- residual = x
- x = self.conv1(x)
- x = self.batch_norm1(x)
- x = F.relu(x)
- x = self.conv2(x)
- x = self.batch_norm2(x)
- return x + residual
-
-
-class MelResNet(nn.Module):
- def __init__(self, res_blocks, in_dims, compute_dims, res_out_dims, pad):
- super().__init__()
- k_size = pad * 2 + 1
- self.conv_in = nn.Conv1d(in_dims, compute_dims, kernel_size=k_size, bias=False)
- self.batch_norm = nn.BatchNorm1d(compute_dims)
- self.layers = nn.ModuleList()
- for i in range(res_blocks):
- self.layers.append(ResBlock(compute_dims))
- self.conv_out = nn.Conv1d(compute_dims, res_out_dims, kernel_size=1)
-
- def forward(self, x):
- x = self.conv_in(x)
- x = self.batch_norm(x)
- x = F.relu(x)
- for f in self.layers: x = f(x)
- x = self.conv_out(x)
- return x
-
-
-class Stretch2d(nn.Module):
- def __init__(self, x_scale, y_scale):
- super().__init__()
- self.x_scale = x_scale
- self.y_scale = y_scale
-
- def forward(self, x):
- b, c, h, w = x.size()
- x = x.unsqueeze(-1).unsqueeze(3)
- x = x.repeat(1, 1, 1, self.y_scale, 1, self.x_scale)
- return x.view(b, c, h * self.y_scale, w * self.x_scale)
-
-
-class UpsampleNetwork(nn.Module):
- def __init__(self, feat_dims, upsample_scales, compute_dims,
- res_blocks, res_out_dims, pad):
- super().__init__()
- total_scale = np.cumproduct(upsample_scales)[-1]
- self.indent = pad * total_scale
- self.resnet = MelResNet(res_blocks, feat_dims, compute_dims, res_out_dims, pad)
- self.resnet_stretch = Stretch2d(total_scale, 1)
- self.up_layers = nn.ModuleList()
- for scale in upsample_scales:
- k_size = (1, scale * 2 + 1)
- padding = (0, scale)
- stretch = Stretch2d(scale, 1)
- conv = nn.Conv2d(1, 1, kernel_size=k_size, padding=padding, bias=False)
- conv.weight.data.fill_(1. / k_size[1])
- self.up_layers.append(stretch)
- self.up_layers.append(conv)
-
- def forward(self, m):
- aux = self.resnet(m).unsqueeze(1)
- aux = self.resnet_stretch(aux)
- aux = aux.squeeze(1)
- m = m.unsqueeze(1)
- for f in self.up_layers: m = f(m)
- m = m.squeeze(1)[:, :, self.indent:-self.indent]
- return m.transpose(1, 2), aux.transpose(1, 2)
-
-
-class WaveRNN(nn.Module):
- def __init__(self, rnn_dims, fc_dims, bits, pad, upsample_factors,
- feat_dims, compute_dims, res_out_dims, res_blocks,
- hop_length, sample_rate, mode='RAW'):
- super().__init__()
- self.mode = mode
- self.pad = pad
- if self.mode == 'RAW' :
- self.n_classes = 2 ** bits
- elif self.mode == 'MOL' :
- self.n_classes = 30
- else :
- RuntimeError("Unknown model mode value - ", self.mode)
-
- self.rnn_dims = rnn_dims
- self.aux_dims = res_out_dims // 4
- self.hop_length = hop_length
- self.sample_rate = sample_rate
-
- self.upsample = UpsampleNetwork(feat_dims, upsample_factors, compute_dims, res_blocks, res_out_dims, pad)
- self.I = nn.Linear(feat_dims + self.aux_dims + 1, rnn_dims)
- self.rnn1 = nn.GRU(rnn_dims, rnn_dims, batch_first=True)
- self.rnn2 = nn.GRU(rnn_dims + self.aux_dims, rnn_dims, batch_first=True)
- self.fc1 = nn.Linear(rnn_dims + self.aux_dims, fc_dims)
- self.fc2 = nn.Linear(fc_dims + self.aux_dims, fc_dims)
- self.fc3 = nn.Linear(fc_dims, self.n_classes)
-
- self.step = nn.Parameter(torch.zeros(1).long(), requires_grad=False)
- self.num_params()
-
- def forward(self, x, mels):
- self.step += 1
- bsize = x.size(0)
- if torch.cuda.is_available():
- h1 = torch.zeros(1, bsize, self.rnn_dims).cuda()
- h2 = torch.zeros(1, bsize, self.rnn_dims).cuda()
- else:
- h1 = torch.zeros(1, bsize, self.rnn_dims).cpu()
- h2 = torch.zeros(1, bsize, self.rnn_dims).cpu()
- mels, aux = self.upsample(mels)
-
- aux_idx = [self.aux_dims * i for i in range(5)]
- a1 = aux[:, :, aux_idx[0]:aux_idx[1]]
- a2 = aux[:, :, aux_idx[1]:aux_idx[2]]
- a3 = aux[:, :, aux_idx[2]:aux_idx[3]]
- a4 = aux[:, :, aux_idx[3]:aux_idx[4]]
-
- x = torch.cat([x.unsqueeze(-1), mels, a1], dim=2)
- x = self.I(x)
- res = x
- x, _ = self.rnn1(x, h1)
-
- x = x + res
- res = x
- x = torch.cat([x, a2], dim=2)
- x, _ = self.rnn2(x, h2)
-
- x = x + res
- x = torch.cat([x, a3], dim=2)
- x = F.relu(self.fc1(x))
-
- x = torch.cat([x, a4], dim=2)
- x = F.relu(self.fc2(x))
- return self.fc3(x)
-
- def generate(self, mels, batched, target, overlap, mu_law, progress_callback=None):
- mu_law = mu_law if self.mode == 'RAW' else False
- progress_callback = progress_callback or self.gen_display
-
- self.eval()
- output = []
- start = time.time()
- rnn1 = self.get_gru_cell(self.rnn1)
- rnn2 = self.get_gru_cell(self.rnn2)
-
- with torch.no_grad():
- if torch.cuda.is_available():
- mels = mels.cuda()
- else:
- mels = mels.cpu()
- wave_len = (mels.size(-1) - 1) * self.hop_length
- mels = self.pad_tensor(mels.transpose(1, 2), pad=self.pad, side='both')
- mels, aux = self.upsample(mels.transpose(1, 2))
-
- if batched:
- mels = self.fold_with_overlap(mels, target, overlap)
- aux = self.fold_with_overlap(aux, target, overlap)
-
- b_size, seq_len, _ = mels.size()
-
- if torch.cuda.is_available():
- h1 = torch.zeros(b_size, self.rnn_dims).cuda()
- h2 = torch.zeros(b_size, self.rnn_dims).cuda()
- x = torch.zeros(b_size, 1).cuda()
- else:
- h1 = torch.zeros(b_size, self.rnn_dims).cpu()
- h2 = torch.zeros(b_size, self.rnn_dims).cpu()
- x = torch.zeros(b_size, 1).cpu()
-
- d = self.aux_dims
- aux_split = [aux[:, :, d * i:d * (i + 1)] for i in range(4)]
-
- for i in range(seq_len):
-
- m_t = mels[:, i, :]
-
- a1_t, a2_t, a3_t, a4_t = (a[:, i, :] for a in aux_split)
-
- x = torch.cat([x, m_t, a1_t], dim=1)
- x = self.I(x)
- h1 = rnn1(x, h1)
-
- x = x + h1
- inp = torch.cat([x, a2_t], dim=1)
- h2 = rnn2(inp, h2)
-
- x = x + h2
- x = torch.cat([x, a3_t], dim=1)
- x = F.relu(self.fc1(x))
-
- x = torch.cat([x, a4_t], dim=1)
- x = F.relu(self.fc2(x))
-
- logits = self.fc3(x)
-
- if self.mode == 'MOL':
- sample = sample_from_discretized_mix_logistic(logits.unsqueeze(0).transpose(1, 2))
- output.append(sample.view(-1))
- if torch.cuda.is_available():
- # x = torch.FloatTensor([[sample]]).cuda()
- x = sample.transpose(0, 1).cuda()
- else:
- x = sample.transpose(0, 1)
-
- elif self.mode == 'RAW' :
- posterior = F.softmax(logits, dim=1)
- distrib = torch.distributions.Categorical(posterior)
-
- sample = 2 * distrib.sample().float() / (self.n_classes - 1.) - 1.
- output.append(sample)
- x = sample.unsqueeze(-1)
- else:
- raise RuntimeError("Unknown model mode value - ", self.mode)
-
- if i % 100 == 0:
- gen_rate = (i + 1) / (time.time() - start) * b_size / 1000
- progress_callback(i, seq_len, b_size, gen_rate)
-
- output = torch.stack(output).transpose(0, 1)
- output = output.cpu().numpy()
- output = output.astype(np.float64)
-
- if batched:
- output = self.xfade_and_unfold(output, target, overlap)
- else:
- output = output[0]
-
- if mu_law:
- output = decode_mu_law(output, self.n_classes, False)
- if hp.apply_preemphasis:
- output = de_emphasis(output)
-
- # Fade-out at the end to avoid signal cutting out suddenly
- fade_out = np.linspace(1, 0, 20 * self.hop_length)
- output = output[:wave_len]
- output[-20 * self.hop_length:] *= fade_out
-
- self.train()
-
- return output
-
-
- def gen_display(self, i, seq_len, b_size, gen_rate):
- pbar = progbar(i, seq_len)
- msg = f'| {pbar} {i*b_size}/{seq_len*b_size} | Batch Size: {b_size} | Gen Rate: {gen_rate:.1f}kHz | '
- stream(msg)
-
- def get_gru_cell(self, gru):
- gru_cell = nn.GRUCell(gru.input_size, gru.hidden_size)
- gru_cell.weight_hh.data = gru.weight_hh_l0.data
- gru_cell.weight_ih.data = gru.weight_ih_l0.data
- gru_cell.bias_hh.data = gru.bias_hh_l0.data
- gru_cell.bias_ih.data = gru.bias_ih_l0.data
- return gru_cell
-
- def pad_tensor(self, x, pad, side='both'):
- # NB - this is just a quick method i need right now
- # i.e., it won't generalise to other shapes/dims
- b, t, c = x.size()
- total = t + 2 * pad if side == 'both' else t + pad
- if torch.cuda.is_available():
- padded = torch.zeros(b, total, c).cuda()
- else:
- padded = torch.zeros(b, total, c).cpu()
- if side == 'before' or side == 'both':
- padded[:, pad:pad + t, :] = x
- elif side == 'after':
- padded[:, :t, :] = x
- return padded
-
- def fold_with_overlap(self, x, target, overlap):
-
- ''' Fold the tensor with overlap for quick batched inference.
- Overlap will be used for crossfading in xfade_and_unfold()
-
- Args:
- x (tensor) : Upsampled conditioning features.
- shape=(1, timesteps, features)
- target (int) : Target timesteps for each index of batch
- overlap (int) : Timesteps for both xfade and rnn warmup
-
- Return:
- (tensor) : shape=(num_folds, target + 2 * overlap, features)
-
- Details:
- x = [[h1, h2, ... hn]]
-
- Where each h is a vector of conditioning features
-
- Eg: target=2, overlap=1 with x.size(1)=10
-
- folded = [[h1, h2, h3, h4],
- [h4, h5, h6, h7],
- [h7, h8, h9, h10]]
- '''
-
- _, total_len, features = x.size()
-
- # Calculate variables needed
- num_folds = (total_len - overlap) // (target + overlap)
- extended_len = num_folds * (overlap + target) + overlap
- remaining = total_len - extended_len
-
- # Pad if some time steps poking out
- if remaining != 0:
- num_folds += 1
- padding = target + 2 * overlap - remaining
- x = self.pad_tensor(x, padding, side='after')
-
- if torch.cuda.is_available():
- folded = torch.zeros(num_folds, target + 2 * overlap, features).cuda()
- else:
- folded = torch.zeros(num_folds, target + 2 * overlap, features).cpu()
-
- # Get the values for the folded tensor
- for i in range(num_folds):
- start = i * (target + overlap)
- end = start + target + 2 * overlap
- folded[i] = x[:, start:end, :]
-
- return folded
-
- def xfade_and_unfold(self, y, target, overlap):
-
- ''' Applies a crossfade and unfolds into a 1d array.
-
- Args:
- y (ndarry) : Batched sequences of audio samples
- shape=(num_folds, target + 2 * overlap)
- dtype=np.float64
- overlap (int) : Timesteps for both xfade and rnn warmup
-
- Return:
- (ndarry) : audio samples in a 1d array
- shape=(total_len)
- dtype=np.float64
-
- Details:
- y = [[seq1],
- [seq2],
- [seq3]]
-
- Apply a gain envelope at both ends of the sequences
-
- y = [[seq1_in, seq1_target, seq1_out],
- [seq2_in, seq2_target, seq2_out],
- [seq3_in, seq3_target, seq3_out]]
-
- Stagger and add up the groups of samples:
-
- [seq1_in, seq1_target, (seq1_out + seq2_in), seq2_target, ...]
-
- '''
-
- num_folds, length = y.shape
- target = length - 2 * overlap
- total_len = num_folds * (target + overlap) + overlap
-
- # Need some silence for the rnn warmup
- silence_len = overlap // 2
- fade_len = overlap - silence_len
- silence = np.zeros((silence_len), dtype=np.float64)
-
- # Equal power crossfade
- t = np.linspace(-1, 1, fade_len, dtype=np.float64)
- fade_in = np.sqrt(0.5 * (1 + t))
- fade_out = np.sqrt(0.5 * (1 - t))
-
- # Concat the silence to the fades
- fade_in = np.concatenate([silence, fade_in])
- fade_out = np.concatenate([fade_out, silence])
-
- # Apply the gain to the overlap samples
- y[:, :overlap] *= fade_in
- y[:, -overlap:] *= fade_out
-
- unfolded = np.zeros((total_len), dtype=np.float64)
-
- # Loop to add up all the samples
- for i in range(num_folds):
- start = i * (target + overlap)
- end = start + target + 2 * overlap
- unfolded[start:end] += y[i]
-
- return unfolded
-
- def get_step(self) :
- return self.step.data.item()
-
- def checkpoint(self, model_dir, optimizer) :
- k_steps = self.get_step() // 1000
- self.save(model_dir.joinpath("checkpoint_%dk_steps.pt" % k_steps), optimizer)
-
- def log(self, path, msg) :
- with open(path, 'a') as f:
- print(msg, file=f)
-
- def load(self, path, optimizer) :
- checkpoint = torch.load(path)
- if "optimizer_state" in checkpoint:
- self.load_state_dict(checkpoint["model_state"])
- optimizer.load_state_dict(checkpoint["optimizer_state"])
- else:
- # Backwards compatibility
- self.load_state_dict(checkpoint)
-
- def save(self, path, optimizer) :
- torch.save({
- "model_state": self.state_dict(),
- "optimizer_state": optimizer.state_dict(),
- }, path)
-
- def num_params(self, print_out=True):
- parameters = filter(lambda p: p.requires_grad, self.parameters())
- parameters = sum([np.prod(p.size()) for p in parameters]) / 1_000_000
- if print_out :
- print('Trainable Parameters: %.3fM' % parameters)
diff --git a/spaces/KevinQHLin/UniVTG/main/inference_mr.py b/spaces/KevinQHLin/UniVTG/main/inference_mr.py
deleted file mode 100644
index 4aea2de137ac46fa91f737d49998a00165423bce..0000000000000000000000000000000000000000
--- a/spaces/KevinQHLin/UniVTG/main/inference_mr.py
+++ /dev/null
@@ -1,273 +0,0 @@
-import pdb
-import pprint
-from tqdm import tqdm, trange
-import numpy as np
-import os
-from collections import OrderedDict, defaultdict
-from utils.basic_utils import AverageMeter
-
-import torch
-import torch.nn.functional as F
-import torch.backends.cudnn as cudnn
-from torch.utils.data import DataLoader
-
-from main.config import TestOptions, setup_model
-from main.dataset import DatasetMR, start_end_collate_mr, prepare_batch_inputs_mr
-from eval.eval import eval_submission
-from eval.postprocessing import PostProcessorDETR
-from utils.basic_utils import save_jsonl, save_json
-from utils.temporal_nms import temporal_nms
-from utils.span_utils import span_cxw_to_xx
-
-import logging
-import importlib
-
-logger = logging.getLogger(__name__)
-logging.basicConfig(format="%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s - %(message)s",
- datefmt="%Y-%m-%d %H:%M:%S",
- level=logging.INFO)
-
-
-def post_processing_mr_nms(mr_res, nms_thd, max_before_nms, max_after_nms):
- mr_res_after_nms = []
- for e in mr_res:
- e["pred_relevant_windows"] = temporal_nms(
- e["pred_relevant_windows"][:max_before_nms],
- nms_thd=nms_thd,
- max_after_nms=max_after_nms
- )
- mr_res_after_nms.append(e)
- return mr_res_after_nms
-
-
-def eval_epoch_post_processing(submission, opt, gt_data, save_submission_filename):
- # IOU_THDS = (0.5, 0.7)
- logger.info("Saving/Evaluating before nms results")
- submission_path = os.path.join(opt.results_dir, save_submission_filename)
- save_jsonl(submission, submission_path)
-
- if opt.eval_split_name in ["val", "test"]: # since test_public has no GT
- metrics = eval_submission(
- submission, gt_data,
- verbose=opt.debug, match_number=not opt.debug,
- )
- save_metrics_path = submission_path.replace(".jsonl", "_metrics.json")
- save_json(metrics, save_metrics_path, save_pretty=True, sort_keys=False)
- latest_file_paths = [submission_path, save_metrics_path]
- else:
- metrics = None
- latest_file_paths = [submission_path, ]
-
- if opt.nms_thd != -1:
- logger.info("[MR] Performing nms with nms_thd {}".format(opt.nms_thd))
- submission_after_nms = post_processing_mr_nms(
- submission, nms_thd=opt.nms_thd,
- max_before_nms=opt.max_before_nms, max_after_nms=opt.max_after_nms
- )
-
- logger.info("Saving/Evaluating nms results")
- submission_nms_path = submission_path.replace(".jsonl", "_nms_thd_{}.jsonl".format(opt.nms_thd))
- save_jsonl(submission_after_nms, submission_nms_path)
- if opt.eval_split_name == "val":
- metrics_nms = eval_submission(
- submission_after_nms, gt_data,
- verbose=opt.debug, match_number=not opt.debug
- )
- save_metrics_nms_path = submission_nms_path.replace(".jsonl", "_metrics.json")
- save_json(metrics_nms, save_metrics_nms_path, save_pretty=True, sort_keys=False)
- latest_file_paths += [submission_nms_path, save_metrics_nms_path]
- else:
- metrics_nms = None
- latest_file_paths = [submission_nms_path, ]
- else:
- metrics_nms = None
- return metrics, metrics_nms, latest_file_paths
-
-
-@torch.no_grad()
-def compute_mr_results(model, eval_loader, opt, epoch_i=None, criterion=None, tb_writer=None):
- model.eval()
- if criterion:
- assert eval_loader.dataset.load_labels
- criterion.eval()
-
- loss_meters = defaultdict(AverageMeter)
- write_tb = tb_writer is not None and epoch_i is not None
-
- mr_res = []
- for batch in tqdm(eval_loader, desc="compute st ed scores"):
- query_meta = batch[0]
- model_inputs, targets = prepare_batch_inputs_mr(batch[1], opt.device, non_blocking=opt.pin_memory)
- outputs = model(**model_inputs)
- prob = outputs["pred_logits"] # the last channel may be 1 or 2.
- # if opt.eval_mode == 'v1':
- # prob = prob * outputs["saliency_scores"].unsqueeze(-1) # v1
- # if opt.eval_mode == 'v2':
- # prob = F.softmax(prob, dim=1) * outputs["saliency_scores"].unsqueeze(-1) # v2
- # if opt.eval_mode == 'v3':
- # prob = outputs["saliency_scores"].unsqueeze(-1)
- if outputs["pred_logits"].shape[-1] > 1:
- prob = F.softmax(outputs["pred_logits"], -1) # (batch_size, #queries, #classes=2)
- if opt.span_loss_type == "l1":
- scores = prob[..., 0] # * (batch_size, #queries) foreground label is 0, we directly take it
- pred_spans = outputs["pred_spans"] # (bsz, #queries, 2)
-
- if opt.model_id not in ['moment_detr']: # dense regression.
- start_spans = targets['timestamp']
- pred_spans = start_spans + pred_spans
- mask = targets['timestamp_mask'].bool()
- scores[~mask] = 0
- # if opt.eval_mode == 'v4':
- # _mask = targets['timestamp_window'].bool()
- # scores[~_mask] = 0
-
- if opt.eval_mode == 'add':
- # pdb.set_trace()
- _saliency_scores = outputs["saliency_scores"].half() + prob.squeeze(-1)
- else:
- _saliency_scores = outputs["saliency_scores"].half() # (bsz, L)
-
- if opt.eval_mode == 'add_mr':
- prob = outputs["saliency_scores"].half().unsqueeze(-1) + prob
-
- saliency_scores = []
- valid_vid_lengths = model_inputs["src_vid_mask"].sum(1).cpu().tolist()
- for j in range(len(valid_vid_lengths)):
- saliency_scores.append(_saliency_scores[j, :int(valid_vid_lengths[j])].tolist())
- else:
- bsz, n_queries = outputs["pred_spans"].shape[:2] # # (bsz, #queries, max_v_l *2)
- pred_spans_logits = outputs["pred_spans"].view(bsz, n_queries, 2, opt.max_v_l)
- # TODO use more advanced decoding method with st_ed product
- pred_span_scores, pred_spans = F.softmax(pred_spans_logits, dim=-1).max(-1) # 2 * (bsz, #queries, 2)
- scores = torch.prod(pred_span_scores, 2) # (bsz, #queries)
- pred_spans[:, 1] += 1
- pred_spans *= opt.clip_length
-
- # compose predictions
- for idx, (meta, spans, score) in enumerate(zip(query_meta, pred_spans.cpu(), scores.cpu())):
- if opt.span_loss_type == "l1":
- if opt.model_id in ['moment_detr']:
- spans = span_cxw_to_xx(spans) * meta["duration"]
- else:
- spans = spans * meta["duration"]
- spans = torch.clamp(spans, 0, meta["duration"]) # added by Kevin, since window cannot be longer than video duration.
-
- # (#queries, 3), [st(float), ed(float), score(float)]
- cur_ranked_preds = torch.cat([spans, score[:, None]], dim=1).tolist()
- if not opt.no_sort_results:
- cur_ranked_preds = sorted(cur_ranked_preds, key=lambda x: x[2], reverse=True)
- cur_ranked_preds = [[float(f"{e:.4f}") for e in row] for row in cur_ranked_preds]
- cur_query_pred = dict(
- qid=meta["qid"],
- query=meta["query"],
- vid=meta["vid"],
- pred_relevant_windows=cur_ranked_preds,
- pred_saliency_scores=saliency_scores[idx]
- )
- mr_res.append(cur_query_pred)
-
- if criterion:
- loss_dict = criterion(outputs, targets)
- weight_dict = criterion.weight_dict
- losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)
- loss_dict["loss_overall"] = float(losses) # for logging only
- for k, v in loss_dict.items():
- loss_meters[k].update(float(v) * weight_dict[k] if k in weight_dict else float(v))
-
- if opt.debug:
- break
-
- if write_tb and criterion:
- for k, v in loss_meters.items():
- tb_writer.add_scalar("Eval/{}".format(k), v.avg, epoch_i + 1)
-
- post_processor = PostProcessorDETR(
- clip_length=opt.clip_length, min_ts_val=0, max_ts_val=150,
- min_w_l=2, max_w_l=150, move_window_method="left",
- # process_func_names=("clip_ts", "round_multiple")
- process_func_names=["round_multiple"] # have added `clamp' op on line 147, thus we do not need `clip_ts' again;
- )
- # todo: are we need round_multiple?
- if opt.round_multiple > 0:
- mr_res = post_processor(mr_res)
- return mr_res, loss_meters
-
-def get_eval_res(model, eval_loader, opt, epoch_i, criterion, tb_writer):
- """compute and save query and video proposal embeddings"""
- eval_res, eval_loss_meters = compute_mr_results(model, eval_loader, opt, epoch_i, criterion, tb_writer) # list(dict)
- return eval_res, eval_loss_meters
-
-def eval_epoch(model, eval_dataset, opt, save_submission_filename, epoch_i=None, criterion=None, tb_writer=None):
- logger.info("Generate submissions")
- model.eval()
- if criterion is not None and eval_dataset.load_labels:
- criterion.eval()
- else:
- criterion = None
-
- eval_loader = DataLoader(
- eval_dataset,
- collate_fn=start_end_collate_mr,
- batch_size=opt.eval_bsz,
- num_workers=opt.num_workers,
- shuffle=False,
- pin_memory=opt.pin_memory
- )
-
- submission, eval_loss_meters = get_eval_res(model, eval_loader, opt, epoch_i, criterion, tb_writer)
- if opt.no_sort_results:
- save_submission_filename = save_submission_filename.replace(".jsonl", "_unsorted.jsonl")
- metrics, metrics_nms, latest_file_paths = eval_epoch_post_processing(
- submission, opt, eval_dataset.data, save_submission_filename)
- return metrics, metrics_nms, eval_loss_meters, latest_file_paths
-
-def start_inference():
- logger.info("Setup config, data and model...")
- opt = TestOptions().parse()
- # pdb.set_trace()
- cudnn.benchmark = True
- cudnn.deterministic = False
-
- assert opt.eval_path is not None
- eval_dataset = DatasetMR(
- dset_name=opt.dset_name,
- data_path=opt.eval_path,
- v_feat_dirs=opt.v_feat_dirs,
- q_feat_dir=opt.t_feat_dir,
- v_feat_dim=opt.v_feat_dim,
- q_feat_dim=opt.t_feat_dim,
- q_feat_type="last_hidden_state",
- max_q_l=opt.max_q_l,
- max_v_l=opt.max_v_l,
- ctx_mode=opt.ctx_mode,
- data_ratio=opt.data_ratio,
- normalize_v=not opt.no_norm_vfeat,
- normalize_t=not opt.no_norm_tfeat,
- clip_len=opt.clip_length,
- max_windows=opt.max_windows,
- load_labels=True, # opt.eval_split_name == "val",
- span_loss_type=opt.span_loss_type,
- txt_drop_ratio=0,
- use_cache=opt.use_cache,
- )
-
- if opt.lr_warmup > 0:
- # total_steps = opt.n_epoch * len(train_dataset) // opt.bsz
- total_steps = opt.n_epoch
- warmup_steps = opt.lr_warmup if opt.lr_warmup > 1 else int(opt.lr_warmup * total_steps)
- opt.lr_warmup = [warmup_steps, total_steps]
-
- model, criterion, _, _ = setup_model(opt)
- save_submission_filename = "inference_{}_{}_{}_preds.jsonl".format(
- opt.dset_name, opt.eval_split_name, opt.eval_id)
- logger.info("Starting inference...")
- with torch.no_grad():
- metrics_no_nms, metrics_nms, eval_loss_meters, latest_file_paths = \
- eval_epoch(model, eval_dataset, opt, save_submission_filename, criterion=criterion)
- logger.info("metrics_no_nms {}".format(pprint.pformat(metrics_no_nms["brief"], indent=4)))
- if metrics_nms is not None:
- logger.info("metrics_nms {}".format(pprint.pformat(metrics_nms["brief"], indent=4)))
-
-
-if __name__ == '__main__':
- start_inference()
diff --git a/spaces/Kvikontent/QrGen/README.md b/spaces/Kvikontent/QrGen/README.md
deleted file mode 100644
index 41990b64c38e505f0daa826d573e958dd34b9fe5..0000000000000000000000000000000000000000
--- a/spaces/Kvikontent/QrGen/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: QrGen
-emoji: 👀
-colorFrom: green
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.48.0
-app_file: app.py
-pinned: false
-license: openrail
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/utils/point_sample.py b/spaces/KyanChen/RSPrompter/mmdet/models/utils/point_sample.py
deleted file mode 100644
index 1afc957f3da7d1dc030c21d40311c768c6952ea4..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/mmdet/models/utils/point_sample.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import torch
-from mmcv.ops import point_sample
-from torch import Tensor
-
-
-def get_uncertainty(mask_preds: Tensor, labels: Tensor) -> Tensor:
- """Estimate uncertainty based on pred logits.
-
- We estimate uncertainty as L1 distance between 0.0 and the logits
- prediction in 'mask_preds' for the foreground class in `classes`.
-
- Args:
- mask_preds (Tensor): mask predication logits, shape (num_rois,
- num_classes, mask_height, mask_width).
-
- labels (Tensor): Either predicted or ground truth label for
- each predicted mask, of length num_rois.
-
- Returns:
- scores (Tensor): Uncertainty scores with the most uncertain
- locations having the highest uncertainty score,
- shape (num_rois, 1, mask_height, mask_width)
- """
- if mask_preds.shape[1] == 1:
- gt_class_logits = mask_preds.clone()
- else:
- inds = torch.arange(mask_preds.shape[0], device=mask_preds.device)
- gt_class_logits = mask_preds[inds, labels].unsqueeze(1)
- return -torch.abs(gt_class_logits)
-
-
-def get_uncertain_point_coords_with_randomness(
- mask_preds: Tensor, labels: Tensor, num_points: int,
- oversample_ratio: float, importance_sample_ratio: float) -> Tensor:
- """Get ``num_points`` most uncertain points with random points during
- train.
-
- Sample points in [0, 1] x [0, 1] coordinate space based on their
- uncertainty. The uncertainties are calculated for each point using
- 'get_uncertainty()' function that takes point's logit prediction as
- input.
-
- Args:
- mask_preds (Tensor): A tensor of shape (num_rois, num_classes,
- mask_height, mask_width) for class-specific or class-agnostic
- prediction.
- labels (Tensor): The ground truth class for each instance.
- num_points (int): The number of points to sample.
- oversample_ratio (float): Oversampling parameter.
- importance_sample_ratio (float): Ratio of points that are sampled
- via importnace sampling.
-
- Returns:
- point_coords (Tensor): A tensor of shape (num_rois, num_points, 2)
- that contains the coordinates sampled points.
- """
- assert oversample_ratio >= 1
- assert 0 <= importance_sample_ratio <= 1
- batch_size = mask_preds.shape[0]
- num_sampled = int(num_points * oversample_ratio)
- point_coords = torch.rand(
- batch_size, num_sampled, 2, device=mask_preds.device)
- point_logits = point_sample(mask_preds, point_coords)
- # It is crucial to calculate uncertainty based on the sampled
- # prediction value for the points. Calculating uncertainties of the
- # coarse predictions first and sampling them for points leads to
- # incorrect results. To illustrate this: assume uncertainty func(
- # logits)=-abs(logits), a sampled point between two coarse
- # predictions with -1 and 1 logits has 0 logits, and therefore 0
- # uncertainty value. However, if we calculate uncertainties for the
- # coarse predictions first, both will have -1 uncertainty,
- # and sampled point will get -1 uncertainty.
- point_uncertainties = get_uncertainty(point_logits, labels)
- num_uncertain_points = int(importance_sample_ratio * num_points)
- num_random_points = num_points - num_uncertain_points
- idx = torch.topk(
- point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1]
- shift = num_sampled * torch.arange(
- batch_size, dtype=torch.long, device=mask_preds.device)
- idx += shift[:, None]
- point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view(
- batch_size, num_uncertain_points, 2)
- if num_random_points > 0:
- rand_roi_coords = torch.rand(
- batch_size, num_random_points, 2, device=mask_preds.device)
- point_coords = torch.cat((point_coords, rand_roi_coords), dim=1)
- return point_coords
diff --git a/spaces/LUOYE-123/QQsign/Dockerfile b/spaces/LUOYE-123/QQsign/Dockerfile
deleted file mode 100644
index 5b81d3b20c5bee450cf55a0ace7e5c95d58f72af..0000000000000000000000000000000000000000
--- a/spaces/LUOYE-123/QQsign/Dockerfile
+++ /dev/null
@@ -1,17 +0,0 @@
-FROM openjdk:11.0-jdk
-
-# 设置时区
-ENV TZ Asia/Shanghai
-
-# 设置工作目录
-WORKDIR /app
-
-# 复制解压包和txlib到工作目录
-COPY unidbg-fetch-qsign /app
-COPY txlib /app/txlib
-
-# 设置命令
-CMD bash bin/unidbg-fetch-qsign --host=0.0.0.0 --port=7860 --count=$COUNT --library=txlib/$TXLIB_VERSION --android_id=$ANDROID_ID
-
-# 暴露端口
-EXPOSE 7860
diff --git a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/infer/modules/ipex/hijacks.py b/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/infer/modules/ipex/hijacks.py
deleted file mode 100644
index 855e5cb9ec4791ed771808dfa52607aae047b840..0000000000000000000000000000000000000000
--- a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/infer/modules/ipex/hijacks.py
+++ /dev/null
@@ -1,195 +0,0 @@
-import contextlib
-import importlib
-import torch
-
-# pylint: disable=protected-access, missing-function-docstring, line-too-long, unnecessary-lambda, no-else-return
-
-class CondFunc: # pylint: disable=missing-class-docstring
- def __new__(cls, orig_func, sub_func, cond_func):
- self = super(CondFunc, cls).__new__(cls)
- if isinstance(orig_func, str):
- func_path = orig_func.split('.')
- for i in range(len(func_path)-1, -1, -1):
- try:
- resolved_obj = importlib.import_module('.'.join(func_path[:i]))
- break
- except ImportError:
- pass
- for attr_name in func_path[i:-1]:
- resolved_obj = getattr(resolved_obj, attr_name)
- orig_func = getattr(resolved_obj, func_path[-1])
- setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs))
- self.__init__(orig_func, sub_func, cond_func)
- return lambda *args, **kwargs: self(*args, **kwargs)
- def __init__(self, orig_func, sub_func, cond_func):
- self.__orig_func = orig_func
- self.__sub_func = sub_func
- self.__cond_func = cond_func
- def __call__(self, *args, **kwargs):
- if not self.__cond_func or self.__cond_func(self.__orig_func, *args, **kwargs):
- return self.__sub_func(self.__orig_func, *args, **kwargs)
- else:
- return self.__orig_func(*args, **kwargs)
-
-_utils = torch.utils.data._utils
-def _shutdown_workers(self):
- if torch.utils.data._utils is None or torch.utils.data._utils.python_exit_status is True or torch.utils.data._utils.python_exit_status is None:
- return
- if hasattr(self, "_shutdown") and not self._shutdown:
- self._shutdown = True
- try:
- if hasattr(self, '_pin_memory_thread'):
- self._pin_memory_thread_done_event.set()
- self._worker_result_queue.put((None, None))
- self._pin_memory_thread.join()
- self._worker_result_queue.cancel_join_thread()
- self._worker_result_queue.close()
- self._workers_done_event.set()
- for worker_id in range(len(self._workers)):
- if self._persistent_workers or self._workers_status[worker_id]:
- self._mark_worker_as_unavailable(worker_id, shutdown=True)
- for w in self._workers: # pylint: disable=invalid-name
- w.join(timeout=torch.utils.data._utils.MP_STATUS_CHECK_INTERVAL)
- for q in self._index_queues: # pylint: disable=invalid-name
- q.cancel_join_thread()
- q.close()
- finally:
- if self._worker_pids_set:
- torch.utils.data._utils.signal_handling._remove_worker_pids(id(self))
- self._worker_pids_set = False
- for w in self._workers: # pylint: disable=invalid-name
- if w.is_alive():
- w.terminate()
-
-class DummyDataParallel(torch.nn.Module): # pylint: disable=missing-class-docstring, unused-argument, too-few-public-methods
- def __new__(cls, module, device_ids=None, output_device=None, dim=0): # pylint: disable=unused-argument
- if isinstance(device_ids, list) and len(device_ids) > 1:
- print("IPEX backend doesn't support DataParallel on multiple XPU devices")
- return module.to("xpu")
-
-def return_null_context(*args, **kwargs): # pylint: disable=unused-argument
- return contextlib.nullcontext()
-
-def check_device(device):
- return bool((isinstance(device, torch.device) and device.type == "cuda") or (isinstance(device, str) and "cuda" in device) or isinstance(device, int))
-
-def return_xpu(device):
- return f"xpu:{device[-1]}" if isinstance(device, str) and ":" in device else f"xpu:{device}" if isinstance(device, int) else torch.device("xpu") if isinstance(device, torch.device) else "xpu"
-
-def ipex_no_cuda(orig_func, *args, **kwargs):
- torch.cuda.is_available = lambda: False
- orig_func(*args, **kwargs)
- torch.cuda.is_available = torch.xpu.is_available
-
-original_autocast = torch.autocast
-def ipex_autocast(*args, **kwargs):
- if len(args) > 0 and args[0] == "cuda":
- return original_autocast("xpu", *args[1:], **kwargs)
- else:
- return original_autocast(*args, **kwargs)
-
-original_torch_cat = torch.cat
-def torch_cat(tensor, *args, **kwargs):
- if len(tensor) == 3 and (tensor[0].dtype != tensor[1].dtype or tensor[2].dtype != tensor[1].dtype):
- return original_torch_cat([tensor[0].to(tensor[1].dtype), tensor[1], tensor[2].to(tensor[1].dtype)], *args, **kwargs)
- else:
- return original_torch_cat(tensor, *args, **kwargs)
-
-original_interpolate = torch.nn.functional.interpolate
-def interpolate(tensor, size=None, scale_factor=None, mode='nearest', align_corners=None, recompute_scale_factor=None, antialias=False): # pylint: disable=too-many-arguments
- if antialias or align_corners is not None:
- return_device = tensor.device
- return_dtype = tensor.dtype
- return original_interpolate(tensor.to("cpu", dtype=torch.float32), size=size, scale_factor=scale_factor, mode=mode,
- align_corners=align_corners, recompute_scale_factor=recompute_scale_factor, antialias=antialias).to(return_device, dtype=return_dtype)
- else:
- return original_interpolate(tensor, size=size, scale_factor=scale_factor, mode=mode,
- align_corners=align_corners, recompute_scale_factor=recompute_scale_factor, antialias=antialias)
-
-original_linalg_solve = torch.linalg.solve
-def linalg_solve(A, B, *args, **kwargs): # pylint: disable=invalid-name
- if A.device != torch.device("cpu") or B.device != torch.device("cpu"):
- return_device = A.device
- return original_linalg_solve(A.to("cpu"), B.to("cpu"), *args, **kwargs).to(return_device)
- else:
- return original_linalg_solve(A, B, *args, **kwargs)
-
-def ipex_hijacks():
- CondFunc('torch.Tensor.to',
- lambda orig_func, self, device=None, *args, **kwargs: orig_func(self, return_xpu(device), *args, **kwargs),
- lambda orig_func, self, device=None, *args, **kwargs: check_device(device))
- CondFunc('torch.Tensor.cuda',
- lambda orig_func, self, device=None, *args, **kwargs: orig_func(self, return_xpu(device), *args, **kwargs),
- lambda orig_func, self, device=None, *args, **kwargs: check_device(device))
- CondFunc('torch.empty',
- lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs),
- lambda orig_func, *args, device=None, **kwargs: check_device(device))
- CondFunc('torch.load',
- lambda orig_func, *args, map_location=None, **kwargs: orig_func(*args, return_xpu(map_location), **kwargs),
- lambda orig_func, *args, map_location=None, **kwargs: map_location is None or check_device(map_location))
- CondFunc('torch.randn',
- lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs),
- lambda orig_func, *args, device=None, **kwargs: check_device(device))
- CondFunc('torch.ones',
- lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs),
- lambda orig_func, *args, device=None, **kwargs: check_device(device))
- CondFunc('torch.zeros',
- lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs),
- lambda orig_func, *args, device=None, **kwargs: check_device(device))
- CondFunc('torch.tensor',
- lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs),
- lambda orig_func, *args, device=None, **kwargs: check_device(device))
- CondFunc('torch.linspace',
- lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs),
- lambda orig_func, *args, device=None, **kwargs: check_device(device))
-
- CondFunc('torch.Generator',
- lambda orig_func, device=None: torch.xpu.Generator(device),
- lambda orig_func, device=None: device is not None and device != torch.device("cpu") and device != "cpu")
-
- CondFunc('torch.batch_norm',
- lambda orig_func, input, weight, bias, *args, **kwargs: orig_func(input,
- weight if weight is not None else torch.ones(input.size()[1], device=input.device),
- bias if bias is not None else torch.zeros(input.size()[1], device=input.device), *args, **kwargs),
- lambda orig_func, input, *args, **kwargs: input.device != torch.device("cpu"))
- CondFunc('torch.instance_norm',
- lambda orig_func, input, weight, bias, *args, **kwargs: orig_func(input,
- weight if weight is not None else torch.ones(input.size()[1], device=input.device),
- bias if bias is not None else torch.zeros(input.size()[1], device=input.device), *args, **kwargs),
- lambda orig_func, input, *args, **kwargs: input.device != torch.device("cpu"))
-
- #Functions with dtype errors:
- CondFunc('torch.nn.modules.GroupNorm.forward',
- lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)),
- lambda orig_func, self, input: input.dtype != self.weight.data.dtype)
- CondFunc('torch.nn.modules.linear.Linear.forward',
- lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)),
- lambda orig_func, self, input: input.dtype != self.weight.data.dtype)
- CondFunc('torch.nn.modules.conv.Conv2d.forward',
- lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)),
- lambda orig_func, self, input: input.dtype != self.weight.data.dtype)
- CondFunc('torch.nn.functional.layer_norm',
- lambda orig_func, input, normalized_shape=None, weight=None, *args, **kwargs:
- orig_func(input.to(weight.data.dtype), normalized_shape, weight, *args, **kwargs),
- lambda orig_func, input, normalized_shape=None, weight=None, *args, **kwargs:
- weight is not None and input.dtype != weight.data.dtype)
-
- #Diffusers Float64 (ARC GPUs doesn't support double or Float64):
- if not torch.xpu.has_fp64_dtype():
- CondFunc('torch.from_numpy',
- lambda orig_func, ndarray: orig_func(ndarray.astype('float32')),
- lambda orig_func, ndarray: ndarray.dtype == float)
-
- #Broken functions when torch.cuda.is_available is True:
- CondFunc('torch.utils.data.dataloader._BaseDataLoaderIter.__init__',
- lambda orig_func, *args, **kwargs: ipex_no_cuda(orig_func, *args, **kwargs),
- lambda orig_func, *args, **kwargs: True)
-
- #Functions that make compile mad with CondFunc:
- torch.utils.data.dataloader._MultiProcessingDataLoaderIter._shutdown_workers = _shutdown_workers
- torch.nn.DataParallel = DummyDataParallel
- torch.autocast = ipex_autocast
- torch.cat = torch_cat
- torch.linalg.solve = linalg_solve
- torch.nn.functional.interpolate = interpolate
- torch.backends.cuda.sdp_kernel = return_null_context
\ No newline at end of file
diff --git a/spaces/Lianjd/stock_dashboard/backtrader/indicators/mabase.py b/spaces/Lianjd/stock_dashboard/backtrader/indicators/mabase.py
deleted file mode 100644
index 3ae4df3fed92e722fe0ad4e64658bbf54d3ea349..0000000000000000000000000000000000000000
--- a/spaces/Lianjd/stock_dashboard/backtrader/indicators/mabase.py
+++ /dev/null
@@ -1,91 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8; py-indent-offset:4 -*-
-###############################################################################
-#
-# Copyright (C) 2015-2020 Daniel Rodriguez
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see .
-#
-###############################################################################
-from __future__ import (absolute_import, division, print_function,
- unicode_literals)
-
-from ..utils.py3 import with_metaclass
-
-from . import Indicator
-
-
-class MovingAverage(object):
- '''MovingAverage (alias MovAv)
-
- A placeholder to gather all Moving Average Types in a single place.
-
- Instantiating a SimpleMovingAverage can be achieved as follows::
-
- sma = MovingAverage.Simple(self.data, period)
-
- Or using the shorter aliases::
-
- sma = MovAv.SMA(self.data, period)
-
- or with the full (forwards and backwards) names:
-
- sma = MovAv.SimpleMovingAverage(self.data, period)
-
- sma = MovAv.MovingAverageSimple(self.data, period)
-
- '''
- _movavs = []
-
- @classmethod
- def register(cls, regcls):
- if getattr(regcls, '_notregister', False):
- return
-
- cls._movavs.append(regcls)
-
- clsname = regcls.__name__
- setattr(cls, clsname, regcls)
-
- clsalias = ''
- if clsname.endswith('MovingAverage'):
- clsalias = clsname.split('MovingAverage')[0]
- elif clsname.startswith('MovingAverage'):
- clsalias = clsname.split('MovingAverage')[1]
-
- if clsalias:
- setattr(cls, clsalias, regcls)
-
-
-class MovAv(MovingAverage):
- pass # alias
-
-
-class MetaMovAvBase(Indicator.__class__):
- # Register any MovingAverage with the placeholder to allow the automatic
- # creation of envelopes and oscillators
-
- def __new__(meta, name, bases, dct):
- # Create the class
- cls = super(MetaMovAvBase, meta).__new__(meta, name, bases, dct)
-
- MovingAverage.register(cls)
-
- # return the class
- return cls
-
-
-class MovingAverageBase(with_metaclass(MetaMovAvBase, Indicator)):
- params = (('period', 30),)
- plotinfo = dict(subplot=False)
diff --git a/spaces/Lynx1221/rvc-test1/README.md b/spaces/Lynx1221/rvc-test1/README.md
deleted file mode 100644
index 56936f1df15477c0ae2fdcfe59a77c175e1905d8..0000000000000000000000000000000000000000
--- a/spaces/Lynx1221/rvc-test1/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Rvc Models
-emoji: 🎤
-colorFrom: red
-colorTo: blue
-sdk: gradio
-sdk_version: 3.27.0
-app_file: app.py
-pinned: false
-license: mit
-duplicated_from: zomehwh/rvc-models
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/MINAMONI/White-box-Cartoonization/app.py b/spaces/MINAMONI/White-box-Cartoonization/app.py
deleted file mode 100644
index c55ced56bd87a85f59d1c8ef84b7eca87422720f..0000000000000000000000000000000000000000
--- a/spaces/MINAMONI/White-box-Cartoonization/app.py
+++ /dev/null
@@ -1,108 +0,0 @@
-#!/usr/bin/env python
-
-from __future__ import annotations
-import argparse
-import functools
-import os
-import pathlib
-import sys
-from typing import Callable
-import uuid
-
-import gradio as gr
-import huggingface_hub
-import numpy as np
-import PIL.Image
-
-from io import BytesIO
-from wbc.cartoonize import Cartoonize
-
-ORIGINAL_REPO_URL = 'https://github.com/SystemErrorWang/White-box-Cartoonization'
-TITLE = 'SystemErrorWang/White-box-Cartoonization'
-DESCRIPTION = f"""This is a demo for {ORIGINAL_REPO_URL}.
-
-"""
-ARTICLE = """
-
-"""
-
-SAFEHASH = [x for x in "0123456789-abcdefghijklmnopqrstuvwxyz_ABCDEFGHIJKLMNOPQRSTUVWXYZ"]
-def compress_UUID():
- '''
- 根据http://www.ietf.org/rfc/rfc1738.txt,由uuid编码扩bai大字符域生成du串
- 包括:[0-9a-zA-Z\-_]共64个
- 长度:(32-2)/3*2=20
- 备注:可在地球上人zhi人都用,使用100年不重复(2^120)
- :return:String
- '''
- row = str(uuid.uuid4()).replace('-', '')
- safe_code = ''
- for i in range(10):
- enbin = "%012d" % int(bin(int(row[i * 3] + row[i * 3 + 1] + row[i * 3 + 2], 16))[2:], 10)
- safe_code += (SAFEHASH[int(enbin[0:6], 2)] + SAFEHASH[int(enbin[6:12], 2)])
- safe_code = safe_code.replace('-', '')
- return safe_code
-
-
-def parse_args() -> argparse.Namespace:
- parser = argparse.ArgumentParser()
- parser.add_argument('--device', type=str, default='cpu')
- parser.add_argument('--theme', type=str)
- parser.add_argument('--live', action='store_true')
- parser.add_argument('--share', action='store_true')
- parser.add_argument('--port', type=int)
- parser.add_argument('--disable-queue',
- dest='enable_queue',
- action='store_false')
- parser.add_argument('--allow-flagging', type=str, default='never')
- parser.add_argument('--allow-screenshot', action='store_true')
- return parser.parse_args()
-
-def run(
- image,
- cartoonize : Cartoonize
-) -> tuple[PIL.Image.Image]:
-
- out_path = compress_UUID()+'.png'
- cartoonize.run_sigle(image.name, out_path)
-
- return PIL.Image.open(out_path)
-
-
-def main():
- gr.close_all()
-
- args = parse_args()
-
- cartoonize = Cartoonize(os.path.join(os.path.dirname(os.path.abspath(__file__)),'wbc/saved_models/'))
-
- func = functools.partial(run, cartoonize=cartoonize)
- func = functools.update_wrapper(func, run)
-
- gr.Interface(
- func,
- [
- gr.inputs.Image(type='file', label='Input Image'),
- ],
- [
- gr.outputs.Image(
- type='pil',
- label='Result'),
- ],
- # examples=examples,
- theme=args.theme,
- title=TITLE,
- description=DESCRIPTION,
- article=ARTICLE,
- allow_screenshot=args.allow_screenshot,
- allow_flagging=args.allow_flagging,
- live=args.live,
- ).launch(
- enable_queue=args.enable_queue,
- server_port=args.port,
- share=args.share,
- )
-
-
-if __name__ == '__main__':
- main()
diff --git a/spaces/ML701G7/taim-gan/src/data/collate.py b/spaces/ML701G7/taim-gan/src/data/collate.py
deleted file mode 100644
index 220060f52bc6f915875a78b3c973ae288435968e..0000000000000000000000000000000000000000
--- a/spaces/ML701G7/taim-gan/src/data/collate.py
+++ /dev/null
@@ -1,43 +0,0 @@
-"""Custom collate function for the data loader."""
-
-from typing import Any, List
-
-import torch
-from torch.nn.utils.rnn import pad_sequence
-
-
-def custom_collate(batch: List[Any], device: Any) -> Any:
- """
- Custom collate function to be used in the data loader.
- :param batch: list, with length equal to number of batches.
- :return: processed batch of data [add padding to text, stack tensors in batch]
- """
- img, correct_capt, curr_class, word_labels = zip(*batch)
- batched_img = torch.stack(img, dim=0).to(
- device
- ) # shape: (batch_size, 3, height, width)
- correct_capt_len = torch.tensor(
- [len(capt) for capt in correct_capt], dtype=torch.int64
- ).unsqueeze(
- 1
- ) # shape: (batch_size, 1)
- batched_correct_capt = pad_sequence(
- correct_capt, batch_first=True, padding_value=0
- ).to(
- device
- ) # shape: (batch_size, max_seq_len)
- batched_curr_class = torch.stack(curr_class, dim=0).to(
- device
- ) # shape: (batch_size, 1)
- batched_word_labels = pad_sequence(
- word_labels, batch_first=True, padding_value=0
- ).to(
- device
- ) # shape: (batch_size, max_seq_len)
- return (
- batched_img,
- batched_correct_capt,
- correct_capt_len,
- batched_curr_class,
- batched_word_labels,
- )
diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/datasets/drive.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/datasets/drive.py
deleted file mode 100644
index 3cbfda8ae74bdf26c5aef197ff2866a7c7ad0cfd..0000000000000000000000000000000000000000
--- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/datasets/drive.py
+++ /dev/null
@@ -1,27 +0,0 @@
-import os.path as osp
-
-from .builder import DATASETS
-from .custom import CustomDataset
-
-
-@DATASETS.register_module()
-class DRIVEDataset(CustomDataset):
- """DRIVE dataset.
-
- In segmentation map annotation for DRIVE, 0 stands for background, which is
- included in 2 categories. ``reduce_zero_label`` is fixed to False. The
- ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to
- '_manual1.png'.
- """
-
- CLASSES = ('background', 'vessel')
-
- PALETTE = [[120, 120, 120], [6, 230, 230]]
-
- def __init__(self, **kwargs):
- super(DRIVEDataset, self).__init__(
- img_suffix='.png',
- seg_map_suffix='_manual1.png',
- reduce_zero_label=False,
- **kwargs)
- assert osp.exists(self.img_dir)
diff --git a/spaces/MetaWabbit/Auto-GPT/autogpt/commands/__init__.py b/spaces/MetaWabbit/Auto-GPT/autogpt/commands/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/MetaWabbit/Auto-GPT/main.py b/spaces/MetaWabbit/Auto-GPT/main.py
deleted file mode 100644
index 160addc390b94a8b143a3a2e18991a560f9b032e..0000000000000000000000000000000000000000
--- a/spaces/MetaWabbit/Auto-GPT/main.py
+++ /dev/null
@@ -1 +0,0 @@
-from autogpt import main
diff --git a/spaces/Miyuki13242/Daily/README.md b/spaces/Miyuki13242/Daily/README.md
deleted file mode 100644
index d296bae1f9e22ab9a3b653eb04e2ad1d9548c36f..0000000000000000000000000000000000000000
--- a/spaces/Miyuki13242/Daily/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: Daily
-emoji: 📊
-colorFrom: yellow
-colorTo: yellow
-sdk: docker
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/ModIA/FrenchDroneKeyword/app.py b/spaces/ModIA/FrenchDroneKeyword/app.py
deleted file mode 100644
index 5dcd5e4e1232be03c1ccf0d1d3f0b02d8b1d6ed6..0000000000000000000000000000000000000000
--- a/spaces/ModIA/FrenchDroneKeyword/app.py
+++ /dev/null
@@ -1,73 +0,0 @@
-import numpy as np
-
-import skorch
-import torch
-import torch.nn as nn
-
-import gradio as gr
-
-import librosa
-
-from joblib import dump, load
-
-from sklearn.pipeline import Pipeline
-from sklearn.preprocessing import LabelEncoder
-
-from resnet import ResNet
-from gradio_utils import load_as_librosa, predict_gradio
-from dataloading import uniformize, to_numpy
-from preprocessing import MfccTransformer, TorchTransform
-
-
-SEED : int = 42
-np.random.seed(SEED)
-torch.manual_seed(SEED)
-
-model = load('./model/model.joblib')
-only_mffc_transform = load('./model/only_mffc_transform.joblib')
-label_encoder = load('./model/label_encoder.joblib')
-SAMPLE_RATE = load("./model/SAMPLE_RATE.joblib")
-METHOD = load("./model/METHOD.joblib")
-MAX_TIME = load("./model/MAX_TIME.joblib")
-N_MFCC = load("./model/N_MFCC.joblib")
-HOP_LENGHT = load("./model/HOP_LENGHT.joblib")
-
-sklearn_model = Pipeline(
- steps=[
- ("mfcc", only_mffc_transform),
- ("model", model)
- ]
- )
-
-uniform_lambda = lambda y, sr: uniformize(y, sr, METHOD, MAX_TIME)
-
-title = r"ResNet 9"
-
-description = r"""
-
-The resnet9 model was trained to classify drone speech command.
-
-
-"""
-article = r"""
-- [Deep Residual Learning for Image Recognition](https://arxiv.org/pdf/1512.03385)
-"""
-
-demo_men = gr.Interface(
- title = title,
- description = description,
- article = article,
- fn=lambda data: predict_gradio(
- data=data,
- uniform_lambda=uniform_lambda,
- sklearn_model=sklearn_model,
- label_transform=label_encoder,
- target_sr=SAMPLE_RATE),
- inputs = gr.Audio(source="microphone", type="numpy"),
- outputs = gr.Label(),
- # allow_flagging = "manual",
- # flagging_options = ['recule', 'tournedroite', 'arretetoi', 'tournegauche', 'gauche', 'avance', 'droite'],
- # flagging_dir = "./flag/men"
-)
-
-demo_men.launch()
diff --git a/spaces/Navneet574/Kidney_Stone_Prediction/README.md b/spaces/Navneet574/Kidney_Stone_Prediction/README.md
deleted file mode 100644
index 5ea3c888eaf7a44f7ca5ce9d9795064aae9afe31..0000000000000000000000000000000000000000
--- a/spaces/Navneet574/Kidney_Stone_Prediction/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Kidney Stone Prediction
-emoji: 🐢
-colorFrom: pink
-colorTo: blue
-sdk: gradio
-sdk_version: 3.24.1
-app_file: app.py
-pinned: false
-license: cc-by-nc-sa-4.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/NbAiLab/maken-clip-image/README.md b/spaces/NbAiLab/maken-clip-image/README.md
deleted file mode 100644
index 4046d14f7c444518783edfeda90d466697fa482e..0000000000000000000000000000000000000000
--- a/spaces/NbAiLab/maken-clip-image/README.md
+++ /dev/null
@@ -1,38 +0,0 @@
----
-title: Maken Clip Image
-emoji: 🖼️
-colorFrom: green
-colorTo: green
-sdk: gradio
-sdk_version: 3.11.0
-app_file: app.py
-pinned: false
----
-
-# Configuration
-
-`title`: _string_
-Display title for the Space
-
-`emoji`: _string_
-Space emoji (emoji-only character allowed)
-
-`colorFrom`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`colorTo`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`sdk`: _string_
-Can be either `gradio` or `streamlit`
-
-`sdk_version` : _string_
-Only applicable for `streamlit` SDK.
-See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
-
-`app_file`: _string_
-Path to your main application file (which contains either `gradio` or `streamlit` Python code).
-Path is relative to the root of the repository.
-
-`pinned`: _boolean_
-Whether the Space stays on top of your list.
diff --git a/spaces/NiuTaipu/moe-tts-test01/text/__init__.py b/spaces/NiuTaipu/moe-tts-test01/text/__init__.py
deleted file mode 100644
index 4e69c354dd24e3243980236eca962cd5945a92fc..0000000000000000000000000000000000000000
--- a/spaces/NiuTaipu/moe-tts-test01/text/__init__.py
+++ /dev/null
@@ -1,32 +0,0 @@
-""" from https://github.com/keithito/tacotron """
-from text import cleaners
-
-
-def text_to_sequence(text, symbols, cleaner_names):
- '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
- Args:
- text: string to convert to a sequence
- cleaner_names: names of the cleaner functions to run the text through
- Returns:
- List of integers corresponding to the symbols in the text
- '''
- _symbol_to_id = {s: i for i, s in enumerate(symbols)}
-
- sequence = []
-
- clean_text = _clean_text(text, cleaner_names)
- for symbol in clean_text:
- if symbol not in _symbol_to_id.keys():
- continue
- symbol_id = _symbol_to_id[symbol]
- sequence += [symbol_id]
- return sequence
-
-
-def _clean_text(text, cleaner_names):
- for name in cleaner_names:
- cleaner = getattr(cleaners, name)
- if not cleaner:
- raise Exception('Unknown cleaner: %s' % name)
- text = cleaner(text)
- return text
diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/multilingual/data_scripts/remove_valid_test_in_train.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/multilingual/data_scripts/remove_valid_test_in_train.py
deleted file mode 100644
index ef618adef7c7d010f8de38fb5ebeb5a35d2d3cac..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/multilingual/data_scripts/remove_valid_test_in_train.py
+++ /dev/null
@@ -1,290 +0,0 @@
-import os, sys
-import glob, itertools
-import pandas as pd
-
-WORKDIR_ROOT = os.environ.get('WORKDIR_ROOT', None)
-
-if WORKDIR_ROOT is None or not WORKDIR_ROOT.strip():
- print('please specify your working directory root in OS environment variable WORKDIR_ROOT. Exitting..."')
- sys.exit(-1)
-
-
-def load_langs(path):
- with open(path) as fr:
- langs = [l.strip() for l in fr]
- return langs
-
-
-
-def load_sentences(raw_data, split, direction):
- src, tgt = direction.split('-')
- src_path = f"{raw_data}/{split}.{direction}.{src}"
- tgt_path = f"{raw_data}/{split}.{direction}.{tgt}"
- if os.path.exists(src_path) and os.path.exists(tgt_path):
- return [(src, open(src_path).read().splitlines()), (tgt, open(tgt_path).read().splitlines())]
- else:
- return []
-
-def swap_direction(d):
- src, tgt = d.split('-')
- return f'{tgt}-{src}'
-
-def get_all_test_data(raw_data, directions, split='test'):
- test_data = [
- x
- for dd in directions
- for d in [dd, swap_direction(dd)]
- for x in load_sentences(raw_data, split, d)
- ]
- # all_test_data = {s for _, d in test_data for s in d}
- all_test_data = {}
- for lang, d in test_data:
- for s in d:
- s = s.strip()
- lgs = all_test_data.get(s, set())
- lgs.add(lang)
- all_test_data[s] = lgs
- return all_test_data, test_data
-
-def check_train_sentences(raw_data, direction, all_test_data, mess_up_train={}):
- src, tgt = direction.split('-')
- tgt_path = f"{raw_data}/train.{direction}.{tgt}"
- src_path = f"{raw_data}/train.{direction}.{src}"
- print(f'check training data in {raw_data}/train.{direction}')
- size = 0
- if not os.path.exists(tgt_path) or not os.path.exists(src_path):
- return mess_up_train, size
- with open(src_path) as f, open(tgt_path) as g:
- for src_line, tgt_line in zip(f, g):
- s = src_line.strip()
- t = tgt_line.strip()
- size += 1
- if s in all_test_data:
- langs = mess_up_train.get(s, set())
- langs.add(direction)
- mess_up_train[s] = langs
- if t in all_test_data:
- langs = mess_up_train.get(t, set())
- langs.add(direction)
- mess_up_train[t] = langs
- return mess_up_train, size
-
-def check_train_all(raw_data, directions, all_test_data):
- mess_up_train = {}
- data_sizes = {}
- for direction in directions:
- _, size = check_train_sentences(raw_data, direction, all_test_data, mess_up_train)
- data_sizes[direction] = size
- return mess_up_train, data_sizes
-
-def count_train_in_other_set(mess_up_train):
- train_in_others = [(direction, s) for s, directions in mess_up_train.items() for direction in directions]
- counts = {}
- for direction, s in train_in_others:
- counts[direction] = counts.get(direction, 0) + 1
- return counts
-
-def train_size_if_remove_in_otherset(data_sizes, mess_up_train):
- counts_in_other = count_train_in_other_set(mess_up_train)
- remain_sizes = []
- for direction, count in counts_in_other.items():
- remain_sizes.append((direction, data_sizes[direction] - count, data_sizes[direction], count, 100 * count / data_sizes[direction] ))
- return remain_sizes
-
-
-def remove_messed_up_sentences(raw_data, direction, mess_up_train, mess_up_train_pairs, corrected_langs):
- split = 'train'
- src_lang, tgt_lang = direction.split('-')
-
- tgt = f"{raw_data}/{split}.{direction}.{tgt_lang}"
- src = f"{raw_data}/{split}.{direction}.{src_lang}"
- print(f'working on {direction}: ', src, tgt)
- if not os.path.exists(tgt) or not os.path.exists(src) :
- return
-
- corrected_tgt = f"{to_folder}/{split}.{direction}.{tgt_lang}"
- corrected_src = f"{to_folder}/{split}.{direction}.{src_lang}"
- line_num = 0
- keep_num = 0
- with open(src, encoding='utf8',) as fsrc, \
- open(tgt, encoding='utf8',) as ftgt, \
- open(corrected_src, 'w', encoding='utf8') as fsrc_corrected, \
- open(corrected_tgt, 'w', encoding='utf8') as ftgt_corrected:
- for s, t in zip(fsrc, ftgt):
- s = s.strip()
- t = t.strip()
- if t not in mess_up_train \
- and s not in mess_up_train \
- and (s, t) not in mess_up_train_pairs \
- and (t, s) not in mess_up_train_pairs:
- corrected_langs.add(direction)
- print(s, file=fsrc_corrected)
- print(t, file=ftgt_corrected)
- keep_num += 1
- line_num += 1
- if line_num % 1000 == 0:
- print(f'completed {line_num} lines', end='\r')
- return line_num, keep_num
-
-##########
-
-
-def merge_valid_test_messup(mess_up_train_valid, mess_up_train_test):
- merged_mess = []
- for s in set(list(mess_up_train_valid.keys()) + list(mess_up_train_test.keys())):
- if not s:
- continue
- valid = mess_up_train_valid.get(s, set())
- test = mess_up_train_test.get(s, set())
- merged_mess.append((s, valid | test))
- return dict(merged_mess)
-
-
-
-#########
-def check_train_pairs(raw_data, direction, all_test_data, mess_up_train={}):
- src, tgt = direction.split('-')
- #a hack; TODO: check the reversed directions
- path1 = f"{raw_data}/train.{src}-{tgt}.{src}"
- path2 = f"{raw_data}/train.{src}-{tgt}.{tgt}"
- if not os.path.exists(path1) or not os.path.exists(path2) :
- return
-
- with open(path1) as f1, open(path2) as f2:
- for src_line, tgt_line in zip(f1, f2):
- s = src_line.strip()
- t = tgt_line.strip()
- if (s, t) in all_test_data or (t, s) in all_test_data:
- langs = mess_up_train.get( (s, t), set())
- langs.add(src)
- langs.add(tgt)
- mess_up_train[(s, t)] = langs
-
-
-def load_pairs(raw_data, split, direction):
- src, tgt = direction.split('-')
- src_f = f"{raw_data}/{split}.{direction}.{src}"
- tgt_f = f"{raw_data}/{split}.{direction}.{tgt}"
- if tgt != 'en_XX':
- src_f, tgt_f = tgt_f, src_f
- if os.path.exists(src_f) and os.path.exists(tgt_f):
- return list(zip(open(src_f).read().splitlines(),
- open(tgt_f).read().splitlines(),
- ))
- else:
- return []
-
-# skip_langs = ['cs_CZ', 'en_XX', 'tl_XX', 'tr_TR']
-def get_messed_up_test_pairs(split, directions):
- test_pairs = [
- (d, load_pairs(raw_data, split, d))
- for d in directions
- ]
- # all_test_data = {s for _, d in test_data for s in d}
- all_test_pairs = {}
- for direction, d in test_pairs:
- src, tgt = direction.split('-')
- for s in d:
- langs = all_test_pairs.get(s, set())
- langs.add(src)
- langs.add(tgt)
- all_test_pairs[s] = langs
- mess_up_train_pairs = {}
- for direction in directions:
- check_train_pairs(raw_data, direction, all_test_pairs, mess_up_train_pairs)
- return all_test_pairs, mess_up_train_pairs
-
-
-
-if __name__ == "__main__":
- #######
- import argparse
- parser = argparse.ArgumentParser()
- parser.add_argument(
- '--from-folder',
- required=True,
- type=str)
- parser.add_argument(
- '--to-folder',
- required=True,
- type=str)
- parser.add_argument(
- '--directions',
- default=None,
- type=str)
-
-
- args = parser.parse_args()
- raw_data = args.from_folder
- to_folder = args.to_folder
- os.makedirs(to_folder, exist_ok=True)
-
- if args.directions:
- directions = args.directions.split(',')
- else:
- raw_files = itertools.chain(
- glob.glob(f'{raw_data}/train*'),
- glob.glob(f'{raw_data}/valid*'),
- glob.glob(f'{raw_data}/test*'),
- )
- directions = [os.path.split(file_path)[-1].split('.')[1] for file_path in raw_files]
- print('working on directions: ', directions)
-
- ##########
-
-
-
- all_test_data, test_data = get_all_test_data(raw_data, directions, 'test')
- print('==loaded test data==')
- all_valid_data, valid_data = get_all_test_data(raw_data, directions, 'valid')
- print('==loaded valid data==')
- all_valid_test_data = merge_valid_test_messup(all_test_data, all_valid_data)
- mess_up_train, data_sizes = check_train_all(raw_data, directions, all_valid_test_data)
- print('training messing up with valid, test data:', len(mess_up_train))
- data_situation = train_size_if_remove_in_otherset(data_sizes, mess_up_train)
- df = pd.DataFrame(data_situation, columns=['direction', 'train_size_after_remove', 'orig_size', 'num_to_remove', 'remove_percent'])
- df.sort_values('remove_percent', ascending=False)
- df.to_csv(f'{raw_data}/clean_summary.tsv', sep='\t')
- print(f'projected data clean summary in: {raw_data}/clean_summary.tsv')
-
- # correct the dataset:
- all_test_pairs, mess_up_test_train_pairs = get_messed_up_test_pairs('test', directions)
- all_valid_pairs, mess_up_valid_train_pairs = get_messed_up_test_pairs('valid', directions)
-
- all_messed_pairs = set(mess_up_test_train_pairs.keys()).union(set(mess_up_valid_train_pairs.keys()))
- corrected_directions = set()
-
- real_data_situation = []
- for direction in directions:
- org_size, new_size = remove_messed_up_sentences(raw_data, direction, mess_up_train, all_messed_pairs, corrected_directions)
- if org_size == 0:
- print(f"{direction} has size 0")
- continue
- real_data_situation.append(
- (direction, new_size, org_size, org_size - new_size, (org_size - new_size) / org_size * 100)
- )
- print('corrected directions: ', corrected_directions)
- df = pd.DataFrame(real_data_situation, columns=['direction', 'train_size_after_remove', 'orig_size', 'num_to_remove', 'remove_percent'])
- df.sort_values('remove_percent', ascending=False)
- df.to_csv(f'{raw_data}/actual_clean_summary.tsv', sep='\t')
- print(f'actual data clean summary (which can be different from the projected one because of duplications) in: {raw_data}/actual_clean_summary.tsv')
-
- import shutil
- for direction in directions:
- src_lang, tgt_lang = direction.split('-')
- for split in ['train', 'valid', 'test']:
- # copying valid, test and uncorrected train
- if direction in corrected_directions and split == 'train':
- continue
- tgt = f"{raw_data}/{split}.{direction}.{tgt_lang}"
- src = f"{raw_data}/{split}.{direction}.{src_lang}"
- if not (os.path.exists(src) and os.path.exists(tgt)):
- continue
- corrected_tgt = f"{to_folder}/{split}.{direction}.{tgt_lang}"
- corrected_src = f"{to_folder}/{split}.{direction}.{src_lang}"
- print(f'copying {src} to {corrected_src}')
- shutil.copyfile(src, corrected_src)
- print(f'copying {tgt} to {corrected_tgt}')
- shutil.copyfile(tgt, corrected_tgt)
-
- print('completed')
\ No newline at end of file
diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/models/wav2vec/__init__.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/models/wav2vec/__init__.py
deleted file mode 100644
index 06cec18183ca14cd534d14558e8b44e25f3e69d5..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/models/wav2vec/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from .wav2vec import * # noqa
-from .wav2vec2 import * # noqa
-from .wav2vec2_asr import * # noqa
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/numbers.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/numbers.py
deleted file mode 100644
index 0d5f7fa818a45ecf132627d240afac653e148070..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/numbers.py
+++ /dev/null
@@ -1,71 +0,0 @@
-""" from https://github.com/keithito/tacotron """
-
-import inflect
-import re
-
-
-_inflect = inflect.engine()
-_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
-_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
-_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)')
-_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)')
-_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
-_number_re = re.compile(r'[0-9]+')
-
-
-def _remove_commas(m):
- return m.group(1).replace(',', '')
-
-
-def _expand_decimal_point(m):
- return m.group(1).replace('.', ' point ')
-
-
-def _expand_dollars(m):
- match = m.group(1)
- parts = match.split('.')
- if len(parts) > 2:
- return match + ' dollars' # Unexpected format
- dollars = int(parts[0]) if parts[0] else 0
- cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
- if dollars and cents:
- dollar_unit = 'dollar' if dollars == 1 else 'dollars'
- cent_unit = 'cent' if cents == 1 else 'cents'
- return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit)
- elif dollars:
- dollar_unit = 'dollar' if dollars == 1 else 'dollars'
- return '%s %s' % (dollars, dollar_unit)
- elif cents:
- cent_unit = 'cent' if cents == 1 else 'cents'
- return '%s %s' % (cents, cent_unit)
- else:
- return 'zero dollars'
-
-
-def _expand_ordinal(m):
- return _inflect.number_to_words(m.group(0))
-
-
-def _expand_number(m):
- num = int(m.group(0))
- if num > 1000 and num < 3000:
- if num == 2000:
- return 'two thousand'
- elif num > 2000 and num < 2010:
- return 'two thousand ' + _inflect.number_to_words(num % 100)
- elif num % 100 == 0:
- return _inflect.number_to_words(num // 100) + ' hundred'
- else:
- return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ')
- else:
- return _inflect.number_to_words(num, andword='')
-
-
-def normalize_numbers(text):
- text = re.sub(_comma_number_re, _remove_commas, text)
- text = re.sub(_pounds_re, r'\1 pounds', text)
- text = re.sub(_dollars_re, _expand_dollars, text)
- text = re.sub(_decimal_number_re, _expand_decimal_point, text)
- text = re.sub(_ordinal_re, _expand_ordinal, text)
- text = re.sub(_number_re, _expand_number, text)
- return text
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/checkpoint_utils.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/checkpoint_utils.py
deleted file mode 100644
index ef5d4c9022c3c35722f0bc9150260c7a65d35e5f..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/checkpoint_utils.py
+++ /dev/null
@@ -1,858 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import ast
-import collections
-import contextlib
-import logging
-import numpy as np
-import os
-import re
-import time
-import traceback
-from collections import OrderedDict
-from typing import Any, Dict, Optional, Union
-
-import torch
-from fairseq.data import data_utils
-from fairseq.dataclass.configs import CheckpointConfig
-from fairseq.dataclass.utils import (
- convert_namespace_to_omegaconf,
- overwrite_args_by_name,
-)
-from fairseq.distributed.fully_sharded_data_parallel import FSDP, has_FSDP
-from fairseq.file_io import PathManager
-from fairseq.models import FairseqDecoder, FairseqEncoder
-from omegaconf import DictConfig, open_dict, OmegaConf
-
-
-logger = logging.getLogger(__name__)
-
-
-def save_checkpoint(cfg: CheckpointConfig, trainer, epoch_itr, val_loss):
- from fairseq import meters
-
- # only one worker should attempt to create the required dir
- if trainer.data_parallel_rank == 0:
- os.makedirs(cfg.save_dir, exist_ok=True)
-
- prev_best = getattr(save_checkpoint, "best", val_loss)
- if val_loss is not None:
- best_function = max if cfg.maximize_best_checkpoint_metric else min
- save_checkpoint.best = best_function(val_loss, prev_best)
-
- if cfg.no_save:
- return
-
- trainer.consolidate_optimizer() # TODO(SS): do we need this if no_save_optimizer_state
-
- if not trainer.should_save_checkpoint_on_current_rank:
- if trainer.always_call_state_dict_during_save_checkpoint:
- trainer.state_dict()
- return
-
- write_timer = meters.StopwatchMeter()
- write_timer.start()
-
- epoch = epoch_itr.epoch
- end_of_epoch = epoch_itr.end_of_epoch()
- updates = trainer.get_num_updates()
-
- logger.info(f"Preparing to save checkpoint for epoch {epoch} @ {updates} updates")
-
- def is_better(a, b):
- return a >= b if cfg.maximize_best_checkpoint_metric else a <= b
-
- suffix = trainer.checkpoint_suffix
- checkpoint_conds = collections.OrderedDict()
- checkpoint_conds["checkpoint{}{}.pt".format(epoch, suffix)] = (
- end_of_epoch and not cfg.no_epoch_checkpoints and epoch % cfg.save_interval == 0
- )
- checkpoint_conds["checkpoint_{}_{}{}.pt".format(epoch, updates, suffix)] = (
- not end_of_epoch
- and cfg.save_interval_updates > 0
- and updates % cfg.save_interval_updates == 0
- )
- checkpoint_conds["checkpoint_best{}.pt".format(suffix)] = val_loss is not None and (
- not hasattr(save_checkpoint, "best")
- or is_better(val_loss, save_checkpoint.best)
- )
- if val_loss is not None and cfg.keep_best_checkpoints > 0:
- worst_best = getattr(save_checkpoint, "best", None)
- chkpts = checkpoint_paths(
- cfg.save_dir,
- pattern=r"checkpoint\.best_{}_(\d+\.?\d*){}\.pt".format(
- cfg.best_checkpoint_metric, suffix
- ),
- )
- if len(chkpts) > 0:
- p = chkpts[-1] if cfg.maximize_best_checkpoint_metric else chkpts[0]
- worst_best = float(p.rsplit("_")[-1].replace("{}.pt".format(suffix), ""))
- # add random digits to resolve ties
- with data_utils.numpy_seed(epoch, updates, val_loss):
- rand_sfx = np.random.randint(0, cfg.keep_best_checkpoints)
-
- checkpoint_conds[
- "checkpoint.best_{}_{:.3f}{}{}.pt".format(
- cfg.best_checkpoint_metric,
- val_loss,
- rand_sfx,
- suffix
- )
- ] = worst_best is None or is_better(val_loss, worst_best)
- checkpoint_conds[
- "checkpoint_last{}.pt".format(suffix)
- ] = not cfg.no_last_checkpoints
-
- extra_state = {"train_iterator": epoch_itr.state_dict(), "val_loss": val_loss}
- if hasattr(save_checkpoint, "best"):
- extra_state.update({"best": save_checkpoint.best})
-
- checkpoints = [
- os.path.join(cfg.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond
- ]
- if len(checkpoints) > 0:
- trainer.save_checkpoint(checkpoints[0], extra_state)
- for cp in checkpoints[1:]:
- if cfg.write_checkpoints_asynchronously:
- # TODO[ioPath]: Need to implement a delayed asynchronous
- # file copying/moving feature.
- logger.warning(
- f"ioPath is not copying {checkpoints[0]} to {cp} "
- "since async write mode is on."
- )
- else:
- assert PathManager.copy(
- checkpoints[0], cp, overwrite=True
- ), f"Failed to copy {checkpoints[0]} to {cp}"
-
- write_timer.stop()
- logger.info(
- "Saved checkpoint {} (epoch {} @ {} updates, score {}) (writing took {} seconds)".format(
- checkpoints[0], epoch, updates, val_loss, write_timer.sum
- )
- )
-
- if not end_of_epoch and cfg.keep_interval_updates > 0:
- # remove old checkpoints; checkpoints are sorted in descending order
- if cfg.keep_interval_updates_pattern == -1:
- checkpoints = checkpoint_paths(
- cfg.save_dir, pattern=r"checkpoint_\d+_(\d+){}\.pt".format(suffix)
- )
- else:
- checkpoints = checkpoint_paths(
- cfg.save_dir,
- pattern=r"checkpoint_\d+_(\d+){}\.pt".format(suffix),
- keep_match=True,
- )
- checkpoints = [
- x[0]
- for x in checkpoints
- if x[1] % cfg.keep_interval_updates_pattern != 0
- ]
-
- for old_chk in checkpoints[cfg.keep_interval_updates :]:
- if os.path.lexists(old_chk):
- os.remove(old_chk)
- elif PathManager.exists(old_chk):
- PathManager.rm(old_chk)
-
- if cfg.keep_last_epochs > 0:
- # remove old epoch checkpoints; checkpoints are sorted in descending order
- checkpoints = checkpoint_paths(
- cfg.save_dir, pattern=r"checkpoint(\d+){}\.pt".format(suffix)
- )
- for old_chk in checkpoints[cfg.keep_last_epochs :]:
- if os.path.lexists(old_chk):
- os.remove(old_chk)
- elif PathManager.exists(old_chk):
- PathManager.rm(old_chk)
-
- if cfg.keep_best_checkpoints > 0:
- # only keep the best N checkpoints according to validation metric
- checkpoints = checkpoint_paths(
- cfg.save_dir,
- pattern=r"checkpoint\.best_{}_(\d+\.?\d*){}\.pt".format(
- cfg.best_checkpoint_metric, suffix
- ),
- )
- if not cfg.maximize_best_checkpoint_metric:
- checkpoints = checkpoints[::-1]
- for old_chk in checkpoints[cfg.keep_best_checkpoints :]:
- if os.path.lexists(old_chk):
- os.remove(old_chk)
- elif PathManager.exists(old_chk):
- PathManager.rm(old_chk)
-
-
-def load_checkpoint(cfg: CheckpointConfig, trainer, **passthrough_args):
- """
- Load a checkpoint and restore the training iterator.
-
- *passthrough_args* will be passed through to
- ``trainer.get_train_iterator``.
- """
-
- reset_optimizer = cfg.reset_optimizer
- reset_lr_scheduler = cfg.reset_lr_scheduler
- optimizer_overrides = ast.literal_eval(cfg.optimizer_overrides)
- reset_meters = cfg.reset_meters
- reset_dataloader = cfg.reset_dataloader
-
- if cfg.finetune_from_model is not None and (
- reset_optimizer or reset_lr_scheduler or reset_meters or reset_dataloader
- ):
- raise ValueError(
- "--finetune-from-model can not be set together with either --reset-optimizer"
- " or reset_lr_scheduler or reset_meters or reset_dataloader"
- )
-
- suffix = trainer.checkpoint_suffix
- if (
- cfg.restore_file == "checkpoint_last.pt"
- ): # default value of restore_file is 'checkpoint_last.pt'
- checkpoint_path = os.path.join(
- cfg.save_dir, "checkpoint_last{}.pt".format(suffix)
- )
- first_launch = not PathManager.exists(checkpoint_path)
- if cfg.finetune_from_model is not None and first_launch:
- # if there is no last checkpoint to restore, start the finetune from pretrained model
- # else just use usual logic to load checkpoint, e.g. restart from last checkpoint and etc.
- if PathManager.exists(cfg.finetune_from_model):
- checkpoint_path = cfg.finetune_from_model
- reset_optimizer = True
- reset_lr_scheduler = True
- reset_meters = True
- reset_dataloader = True
- logger.info(
- f"loading pretrained model from {checkpoint_path}: "
- "optimizer, lr scheduler, meters, dataloader will be reset"
- )
- else:
- raise ValueError(
- f"--funetune-from-model {cfg.finetune_from_model} does not exist"
- )
- elif suffix is not None:
- checkpoint_path = cfg.restore_file.replace(".pt", suffix + ".pt")
- else:
- checkpoint_path = cfg.restore_file
-
- if cfg.restore_file != "checkpoint_last.pt" and cfg.finetune_from_model:
- raise ValueError(
- "--finetune-from-model and --restore-file (non-default value) "
- "can not be specified together: " + str(cfg)
- )
-
- extra_state = trainer.load_checkpoint(
- checkpoint_path,
- reset_optimizer,
- reset_lr_scheduler,
- optimizer_overrides,
- reset_meters=reset_meters,
- )
-
- if (
- extra_state is not None
- and "best" in extra_state
- and not reset_optimizer
- and not reset_meters
- ):
- save_checkpoint.best = extra_state["best"]
-
- if extra_state is not None and not reset_dataloader:
- # restore iterator from checkpoint
- itr_state = extra_state["train_iterator"]
- epoch_itr = trainer.get_train_iterator(
- epoch=itr_state["epoch"], load_dataset=True, **passthrough_args
- )
- epoch_itr.load_state_dict(itr_state)
- else:
- epoch_itr = trainer.get_train_iterator(
- epoch=1, load_dataset=True, **passthrough_args
- )
-
- trainer.lr_step(epoch_itr.epoch)
-
- return extra_state, epoch_itr
-
-
-def load_checkpoint_to_cpu(path, arg_overrides=None, load_on_all_ranks=False):
- """Loads a checkpoint to CPU (with upgrading for backward compatibility).
-
- If doing single-GPU training or if the checkpoint is only being loaded by at
- most one process on each node (current default behavior is for only rank 0
- to read the checkpoint from disk), load_on_all_ranks should be False to
- avoid errors from torch.distributed not having been initialized or
- torch.distributed.barrier() hanging.
-
- If all processes on each node may be loading the checkpoint
- simultaneously, load_on_all_ranks should be set to True to avoid I/O
- conflicts.
-
- There's currently no support for > 1 but < all processes loading the
- checkpoint on each node.
- """
- local_path = PathManager.get_local_path(path)
- # The locally cached file returned by get_local_path() may be stale for
- # remote files that are periodically updated/overwritten (ex:
- # checkpoint_last.pt) - so we remove the local copy, sync across processes
- # (if needed), and then download a fresh copy.
- if local_path != path and PathManager.path_requires_pathmanager(path):
- try:
- os.remove(local_path)
- except FileNotFoundError:
- # With potentially multiple processes removing the same file, the
- # file being missing is benign (missing_ok isn't available until
- # Python 3.8).
- pass
- if load_on_all_ranks:
- torch.distributed.barrier()
- local_path = PathManager.get_local_path(path)
-
- with open(local_path, "rb") as f:
- state = torch.load(f, map_location=torch.device("cpu"))
-
- if "args" in state and state["args"] is not None and arg_overrides is not None:
- args = state["args"]
- for arg_name, arg_val in arg_overrides.items():
- setattr(args, arg_name, arg_val)
-
- if "cfg" in state and state["cfg"] is not None:
-
- # hack to be able to set Namespace in dict config. this should be removed when we update to newer
- # omegaconf version that supports object flags, or when we migrate all existing models
- from omegaconf import _utils
-
- old_primitive = _utils.is_primitive_type
- _utils.is_primitive_type = lambda _: True
-
- state["cfg"] = OmegaConf.create(state["cfg"])
-
- _utils.is_primitive_type = old_primitive
- OmegaConf.set_struct(state["cfg"], True)
-
- if arg_overrides is not None:
- overwrite_args_by_name(state["cfg"], arg_overrides)
-
- state = _upgrade_state_dict(state)
- return state
-
-
-def load_model_ensemble(
- filenames,
- arg_overrides: Optional[Dict[str, Any]] = None,
- task=None,
- strict=True,
- suffix="",
- num_shards=1,
- state=None,
-):
- """Loads an ensemble of models.
-
- Args:
- filenames (List[str]): checkpoint files to load
- arg_overrides (Dict[str,Any], optional): override model args that
- were used during model training
- task (fairseq.tasks.FairseqTask, optional): task to use for loading
- """
- assert not (
- strict and num_shards > 1
- ), "Cannot load state dict with strict=True and checkpoint shards > 1"
- ensemble, args, _task = load_model_ensemble_and_task(
- filenames,
- arg_overrides,
- task,
- strict,
- suffix,
- num_shards,
- state,
- )
- return ensemble, args
-
-
-def get_maybe_sharded_checkpoint_filename(
- filename: str, suffix: str, shard_idx: int, num_shards: int
-) -> str:
- orig_filename = filename
- filename = filename.replace(".pt", suffix + ".pt")
- fsdp_filename = filename[:-3] + f"-shard{shard_idx}.pt"
- model_parallel_filename = orig_filename[:-3] + f"_part{shard_idx}.pt"
- if PathManager.exists(fsdp_filename):
- return fsdp_filename
- elif num_shards > 1:
- return model_parallel_filename
- else:
- return filename
-
-
-def load_model_ensemble_and_task(
- filenames,
- arg_overrides: Optional[Dict[str, Any]] = None,
- task=None,
- strict=True,
- suffix="",
- num_shards=1,
- state=None,
-):
- assert state is None or len(filenames) == 1
-
- from fairseq import tasks
-
- assert not (
- strict and num_shards > 1
- ), "Cannot load state dict with strict=True and checkpoint shards > 1"
- ensemble = []
- cfg = None
- for filename in filenames:
- orig_filename = filename
- model_shard_state = {"shard_weights": [], "shard_metadata": []}
- assert num_shards > 0
- st = time.time()
- for shard_idx in range(num_shards):
- filename = get_maybe_sharded_checkpoint_filename(
- orig_filename, suffix, shard_idx, num_shards
- )
-
- if not PathManager.exists(filename):
- raise IOError("Model file not found: {}".format(filename))
- if state is None:
- state = load_checkpoint_to_cpu(filename, arg_overrides)
- if "args" in state and state["args"] is not None:
- cfg = convert_namespace_to_omegaconf(state["args"])
- elif "cfg" in state and state["cfg"] is not None:
- cfg = state["cfg"]
- else:
- raise RuntimeError(
- f"Neither args nor cfg exist in state keys = {state.keys()}"
- )
-
- if task is None:
- task = tasks.setup_task(cfg.task)
-
- if "task_state" in state:
- task.load_state_dict(state["task_state"])
-
- if "fsdp_metadata" in state and num_shards > 1:
- model_shard_state["shard_weights"].append(state["model"])
- model_shard_state["shard_metadata"].append(state["fsdp_metadata"])
- # check FSDP import before the code goes too far
- if not has_FSDP:
- raise ImportError(
- "Cannot find FullyShardedDataParallel. "
- "Please install fairscale with: pip install fairscale"
- )
- if shard_idx == num_shards - 1:
- consolidated_model_state = FSDP.consolidate_shard_weights(
- shard_weights=model_shard_state["shard_weights"],
- shard_metadata=model_shard_state["shard_metadata"],
- )
- model = task.build_model(cfg.model)
- model.load_state_dict(
- consolidated_model_state, strict=strict, model_cfg=cfg.model
- )
- else:
- # model parallel checkpoint or unsharded checkpoint
- model = task.build_model(cfg.model)
- model.load_state_dict(
- state["model"], strict=strict, model_cfg=cfg.model
- )
-
- # reset state so it gets loaded for the next model in ensemble
- state = None
- if shard_idx % 10 == 0 and shard_idx > 0:
- elapsed = time.time() - st
- logger.info(
- f"Loaded {shard_idx} shards in {elapsed:.2f}s, {elapsed / (shard_idx+1):.2f}s/shard"
- )
-
- # build model for ensemble
- ensemble.append(model)
- return ensemble, cfg, task
-
-
-def checkpoint_paths(path, pattern=r"checkpoint(\d+)\.pt", keep_match=False):
- """Retrieves all checkpoints found in `path` directory.
-
- Checkpoints are identified by matching filename to the specified pattern. If
- the pattern contains groups, the result will be sorted by the first group in
- descending order.
- """
- pt_regexp = re.compile(pattern)
- files = PathManager.ls(path)
-
- entries = []
- for i, f in enumerate(files):
- m = pt_regexp.fullmatch(f)
- if m is not None:
- idx = float(m.group(1)) if len(m.groups()) > 0 else i
- entries.append((idx, m.group(0)))
- if keep_match:
- return [(os.path.join(path, x[1]), x[0]) for x in sorted(entries, reverse=True)]
- else:
- return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)]
-
-
-def torch_persistent_save(obj, filename, async_write: bool = False):
- if async_write:
- with PathManager.opena(filename, "wb") as f:
- _torch_persistent_save(obj, f)
- else:
- if PathManager.supports_rename(filename):
- # do atomic save
- with PathManager.open(filename + ".tmp", "wb") as f:
- _torch_persistent_save(obj, f)
- PathManager.rename(filename + ".tmp", filename)
- else:
- # fallback to non-atomic save
- with PathManager.open(filename, "wb") as f:
- _torch_persistent_save(obj, f)
-
-
-def _torch_persistent_save(obj, f):
- if isinstance(f, str):
- with PathManager.open(f, "wb") as h:
- torch_persistent_save(obj, h)
- return
- for i in range(3):
- try:
- return torch.save(obj, f)
- except Exception:
- if i == 2:
- logger.error(traceback.format_exc())
- raise
-
-
-def _upgrade_state_dict(state):
- """Helper for upgrading old model checkpoints."""
-
- # add optimizer_history
- if "optimizer_history" not in state:
- state["optimizer_history"] = [
- {"criterion_name": "CrossEntropyCriterion", "best_loss": state["best_loss"]}
- ]
- state["last_optimizer_state"] = state["optimizer"]
- del state["optimizer"]
- del state["best_loss"]
- # move extra_state into sub-dictionary
- if "epoch" in state and "extra_state" not in state:
- state["extra_state"] = {
- "epoch": state["epoch"],
- "batch_offset": state["batch_offset"],
- "val_loss": state["val_loss"],
- }
- del state["epoch"]
- del state["batch_offset"]
- del state["val_loss"]
- # reduce optimizer history's memory usage (only keep the last state)
- if "optimizer" in state["optimizer_history"][-1]:
- state["last_optimizer_state"] = state["optimizer_history"][-1]["optimizer"]
- for optim_hist in state["optimizer_history"]:
- del optim_hist["optimizer"]
- # record the optimizer class name
- if "optimizer_name" not in state["optimizer_history"][-1]:
- state["optimizer_history"][-1]["optimizer_name"] = "FairseqNAG"
- # move best_loss into lr_scheduler_state
- if "lr_scheduler_state" not in state["optimizer_history"][-1]:
- state["optimizer_history"][-1]["lr_scheduler_state"] = {
- "best": state["optimizer_history"][-1]["best_loss"]
- }
- del state["optimizer_history"][-1]["best_loss"]
- # keep track of number of updates
- if "num_updates" not in state["optimizer_history"][-1]:
- state["optimizer_history"][-1]["num_updates"] = 0
- # old model checkpoints may not have separate source/target positions
- if (
- "args" in state
- and hasattr(state["args"], "max_positions")
- and not hasattr(state["args"], "max_source_positions")
- ):
- state["args"].max_source_positions = state["args"].max_positions
- state["args"].max_target_positions = state["args"].max_positions
- # use stateful training data iterator
- if "train_iterator" not in state["extra_state"]:
- state["extra_state"]["train_iterator"] = {
- "epoch": state["extra_state"]["epoch"],
- "iterations_in_epoch": state["extra_state"].get("batch_offset", 0),
- }
-
- # backward compatibility, cfg updates
- if "args" in state and state["args"] is not None:
- # default to translation task
- if not hasattr(state["args"], "task"):
- state["args"].task = "translation"
- # --raw-text and --lazy-load are deprecated
- if getattr(state["args"], "raw_text", False):
- state["args"].dataset_impl = "raw"
- elif getattr(state["args"], "lazy_load", False):
- state["args"].dataset_impl = "lazy"
- # epochs start at 1
- if state["extra_state"]["train_iterator"] is not None:
- state["extra_state"]["train_iterator"]["epoch"] = max(
- state["extra_state"]["train_iterator"].get("epoch", 1), 1
- )
- # --remove-bpe ==> --postprocess
- if hasattr(state["args"], "remove_bpe"):
- state["args"].post_process = state["args"].remove_bpe
- # --min-lr ==> --stop-min-lr
- if hasattr(state["args"], "min_lr"):
- state["args"].stop_min_lr = state["args"].min_lr
- del state["args"].min_lr
- # binary_cross_entropy / kd_binary_cross_entropy => wav2vec criterion
- if (
- hasattr(state["args"], "criterion")
- and state["args"].criterion in [
- "binary_cross_entropy",
- "kd_binary_cross_entropy",
- ]
- ):
- state["args"].criterion = "wav2vec"
- # remove log_keys if it's None (criteria will supply a default value of [])
- if hasattr(state["args"], "log_keys") and state["args"].log_keys is None:
- delattr(state["args"], "log_keys")
- # speech_pretraining => audio pretraining
- if (
- hasattr(state["args"], "task")
- and state["args"].task == "speech_pretraining"
- ):
- state["args"].task = "audio_pretraining"
- # audio_cpc => wav2vec
- if hasattr(state["args"], "arch") and state["args"].arch == "audio_cpc":
- state["args"].arch = "wav2vec"
- # convert legacy float learning rate to List[float]
- if hasattr(state["args"], "lr") and isinstance(state["args"].lr, float):
- state["args"].lr = [state["args"].lr]
- # convert task data arg to a string instead of List[string]
- if (
- hasattr(state["args"], "data")
- and isinstance(state["args"].data, list)
- and len(state["args"].data) > 0
- ):
- state["args"].data = state["args"].data[0]
- # remove keys in state["args"] related to teacher-student learning
- for key in [
- "static_teachers",
- "static_teacher_weights",
- "dynamic_teachers",
- "dynamic_teacher_weights",
- ]:
- if key in state["args"]:
- delattr(state["args"], key)
-
- state["cfg"] = convert_namespace_to_omegaconf(state["args"])
-
- if "cfg" in state and state["cfg"] is not None:
- cfg = state["cfg"]
- with open_dict(cfg):
- # any upgrades for Hydra-based configs
- if (
- "task" in cfg
- and "eval_wer_config" in cfg.task
- and isinstance(cfg.task.eval_wer_config.print_alignment, bool)
- ):
- cfg.task.eval_wer_config.print_alignment = "hard"
- if "generation" in cfg and isinstance(cfg.generation.print_alignment, bool):
- cfg.generation.print_alignment = "hard" if cfg.generation.print_alignment else None
- if (
- "model" in cfg
- and "w2v_args" in cfg.model
- and cfg.model.w2v_args is not None
- and (
- hasattr(cfg.model.w2v_args, "task") or "task" in cfg.model.w2v_args
- )
- and hasattr(cfg.model.w2v_args.task, "eval_wer_config")
- and cfg.model.w2v_args.task.eval_wer_config is not None
- and isinstance(
- cfg.model.w2v_args.task.eval_wer_config.print_alignment, bool
- )
- ):
- cfg.model.w2v_args.task.eval_wer_config.print_alignment = "hard"
-
- return state
-
-
-def prune_state_dict(state_dict, model_cfg: Optional[DictConfig]):
- """Prune the given state_dict if desired for LayerDrop
- (https://arxiv.org/abs/1909.11556).
-
- Training with LayerDrop allows models to be robust to pruning at inference
- time. This function prunes state_dict to allow smaller models to be loaded
- from a larger model and re-maps the existing state_dict for this to occur.
-
- It's called by functions that load models from checkpoints and does not
- need to be called directly.
- """
- arch = None
- if model_cfg is not None:
- arch = (
- model_cfg._name
- if isinstance(model_cfg, DictConfig)
- else getattr(model_cfg, "arch", None)
- )
-
- if not model_cfg or arch is None or arch == "ptt_transformer":
- # args should not be none, but don't crash if it is.
- return state_dict
-
- encoder_layers_to_keep = getattr(model_cfg, "encoder_layers_to_keep", None)
- decoder_layers_to_keep = getattr(model_cfg, "decoder_layers_to_keep", None)
-
- if not encoder_layers_to_keep and not decoder_layers_to_keep:
- return state_dict
-
- # apply pruning
- logger.info(
- "Pruning model to specified layer configuration - this works best if the model was trained with LayerDrop"
- )
-
- def create_pruning_pass(layers_to_keep, layer_name):
- keep_layers = sorted(
- int(layer_string) for layer_string in layers_to_keep.split(",")
- )
- mapping_dict = {}
- for i in range(len(keep_layers)):
- mapping_dict[str(keep_layers[i])] = str(i)
-
- regex = re.compile(r"^{layer}.*\.layers\.(\d+)".format(layer=layer_name))
- return {"substitution_regex": regex, "mapping_dict": mapping_dict}
-
- pruning_passes = []
- if encoder_layers_to_keep:
- pruning_passes.append(create_pruning_pass(encoder_layers_to_keep, "encoder"))
- if decoder_layers_to_keep:
- pruning_passes.append(create_pruning_pass(decoder_layers_to_keep, "decoder"))
-
- new_state_dict = {}
- for layer_name in state_dict.keys():
- match = re.search(r"\.layers\.(\d+)\.", layer_name)
- # if layer has no number in it, it is a supporting layer, such as an
- # embedding
- if not match:
- new_state_dict[layer_name] = state_dict[layer_name]
- continue
-
- # otherwise, layer should be pruned.
- original_layer_number = match.group(1)
- # figure out which mapping dict to replace from
- for pruning_pass in pruning_passes:
- if original_layer_number in pruning_pass["mapping_dict"] and pruning_pass[
- "substitution_regex"
- ].search(layer_name):
- new_layer_number = pruning_pass["mapping_dict"][original_layer_number]
- substitution_match = pruning_pass["substitution_regex"].search(
- layer_name
- )
- new_state_key = (
- layer_name[: substitution_match.start(1)]
- + new_layer_number
- + layer_name[substitution_match.end(1) :]
- )
- new_state_dict[new_state_key] = state_dict[layer_name]
-
- # Since layers are now pruned, *_layers_to_keep are no longer needed.
- # This is more of "It would make it work fix" rather than a proper fix.
- if isinstance(model_cfg, DictConfig):
- context = open_dict(model_cfg)
- else:
- context = contextlib.ExitStack()
- with context:
- if hasattr(model_cfg, "encoder_layers_to_keep"):
- model_cfg.encoder_layers_to_keep = None
- if hasattr(model_cfg, "decoder_layers_to_keep"):
- model_cfg.decoder_layers_to_keep = None
-
- return new_state_dict
-
-
-def load_pretrained_component_from_model(
- component: Union[FairseqEncoder, FairseqDecoder], checkpoint: str
-):
- """
- Load a pretrained FairseqEncoder or FairseqDecoder from checkpoint into the
- provided `component` object. If state_dict fails to load, there may be a
- mismatch in the architecture of the corresponding `component` found in the
- `checkpoint` file.
- """
- if not PathManager.exists(checkpoint):
- raise IOError("Model file not found: {}".format(checkpoint))
- state = load_checkpoint_to_cpu(checkpoint)
- if isinstance(component, FairseqEncoder):
- component_type = "encoder"
- elif isinstance(component, FairseqDecoder):
- component_type = "decoder"
- else:
- raise ValueError(
- "component to load must be either a FairseqEncoder or "
- "FairseqDecoder. Loading other component types are not supported."
- )
- component_state_dict = OrderedDict()
- for key in state["model"].keys():
- if key.startswith(component_type):
- # encoder.input_layers.0.0.weight --> input_layers.0.0.weight
- component_subkey = key[len(component_type) + 1 :]
- component_state_dict[component_subkey] = state["model"][key]
- component.load_state_dict(component_state_dict, strict=True)
- return component
-
-
-def verify_checkpoint_directory(save_dir: str) -> None:
- if not os.path.exists(save_dir):
- os.makedirs(save_dir, exist_ok=True)
- temp_file_path = os.path.join(save_dir, "dummy")
- try:
- with open(temp_file_path, "w"):
- pass
- except OSError as e:
- logger.warning(
- "Unable to access checkpoint save directory: {}".format(save_dir)
- )
- raise e
- else:
- os.remove(temp_file_path)
-
-
-def load_ema_from_checkpoint(fpath):
- """Loads exponential moving averaged (EMA) checkpoint from input and
- returns a model with ema weights.
-
- Args:
- fpath: A string path of checkpoint to load from.
-
- Returns:
- A dict of string keys mapping to various values. The 'model' key
- from the returned dict should correspond to an OrderedDict mapping
- string parameter names to torch Tensors.
- """
- params_dict = collections.OrderedDict()
- new_state = None
-
- with PathManager.open(fpath, 'rb') as f:
- new_state = torch.load(
- f,
- map_location=(
- lambda s, _: torch.serialization.default_restore_location(s, 'cpu')
- ),
- )
-
- # EMA model is stored in a separate "extra state"
- model_params = new_state['extra_state']['ema']
-
- for key in list(model_params.keys()):
- p = model_params[key]
- if isinstance(p, torch.HalfTensor):
- p = p.float()
- if key not in params_dict:
- params_dict[key] = p.clone()
- # NOTE: clone() is needed in case of p is a shared parameter
- else:
- raise ValueError("Key {} is repeated in EMA model params.".format(key))
-
- if len(params_dict) == 0:
- raise ValueError(
- f"Input checkpoint path '{fpath}' does not contain "
- "ema model weights, is this model trained with EMA?"
- )
-
- new_state['model'] = params_dict
- return new_state
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/data/base_wrapper_dataset.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/data/base_wrapper_dataset.py
deleted file mode 100644
index 134d398b47dc73c8807759188504aee205b3b34d..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/data/base_wrapper_dataset.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from torch.utils.data.dataloader import default_collate
-
-from . import FairseqDataset
-
-
-class BaseWrapperDataset(FairseqDataset):
- def __init__(self, dataset):
- super().__init__()
- self.dataset = dataset
-
- def __getitem__(self, index):
- return self.dataset[index]
-
- def __len__(self):
- return len(self.dataset)
-
- def collater(self, samples):
- if hasattr(self.dataset, "collater"):
- return self.dataset.collater(samples)
- else:
- return default_collate(samples)
-
- @property
- def sizes(self):
- return self.dataset.sizes
-
- def num_tokens(self, index):
- return self.dataset.num_tokens(index)
-
- def size(self, index):
- return self.dataset.size(index)
-
- def ordered_indices(self):
- return self.dataset.ordered_indices()
-
- @property
- def supports_prefetch(self):
- return getattr(self.dataset, "supports_prefetch", False)
-
- def attr(self, attr: str, index: int):
- return self.dataset.attr(attr, index)
-
- def prefetch(self, indices):
- self.dataset.prefetch(indices)
-
- def get_batch_shapes(self):
- return self.dataset.get_batch_shapes()
-
- def batch_by_size(
- self,
- indices,
- max_tokens=None,
- max_sentences=None,
- required_batch_size_multiple=1,
- ):
- return self.dataset.batch_by_size(
- indices,
- max_tokens=max_tokens,
- max_sentences=max_sentences,
- required_batch_size_multiple=required_batch_size_multiple,
- )
-
- def filter_indices_by_size(self, indices, max_sizes):
- return self.dataset.filter_indices_by_size(indices, max_sizes)
-
- @property
- def can_reuse_epoch_itr_across_epochs(self):
- return self.dataset.can_reuse_epoch_itr_across_epochs
-
- def set_epoch(self, epoch):
- super().set_epoch(epoch)
- if hasattr(self.dataset, "set_epoch"):
- self.dataset.set_epoch(epoch)
diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/utils/logger.py b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/utils/logger.py
deleted file mode 100644
index 7c7890f8bec5db44098fe1a38d26eb13231f7063..0000000000000000000000000000000000000000
--- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/utils/logger.py
+++ /dev/null
@@ -1,237 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import atexit
-import functools
-import logging
-import os
-import sys
-import time
-from collections import Counter
-import torch
-from tabulate import tabulate
-from termcolor import colored
-
-from detectron2.utils.file_io import PathManager
-
-__all__ = ["setup_logger", "log_first_n", "log_every_n", "log_every_n_seconds"]
-
-
-class _ColorfulFormatter(logging.Formatter):
- def __init__(self, *args, **kwargs):
- self._root_name = kwargs.pop("root_name") + "."
- self._abbrev_name = kwargs.pop("abbrev_name", "")
- if len(self._abbrev_name):
- self._abbrev_name = self._abbrev_name + "."
- super(_ColorfulFormatter, self).__init__(*args, **kwargs)
-
- def formatMessage(self, record):
- record.name = record.name.replace(self._root_name, self._abbrev_name)
- log = super(_ColorfulFormatter, self).formatMessage(record)
- if record.levelno == logging.WARNING:
- prefix = colored("WARNING", "red", attrs=["blink"])
- elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL:
- prefix = colored("ERROR", "red", attrs=["blink", "underline"])
- else:
- return log
- return prefix + " " + log
-
-
-@functools.lru_cache() # so that calling setup_logger multiple times won't add many handlers
-def setup_logger(
- output=None, distributed_rank=0, *, color=True, name="detectron2", abbrev_name=None
-):
- """
- Initialize the detectron2 logger and set its verbosity level to "DEBUG".
-
- Args:
- output (str): a file name or a directory to save log. If None, will not save log file.
- If ends with ".txt" or ".log", assumed to be a file name.
- Otherwise, logs will be saved to `output/log.txt`.
- name (str): the root module name of this logger
- abbrev_name (str): an abbreviation of the module, to avoid long names in logs.
- Set to "" to not log the root module in logs.
- By default, will abbreviate "detectron2" to "d2" and leave other
- modules unchanged.
-
- Returns:
- logging.Logger: a logger
- """
- logger = logging.getLogger(name)
- logger.setLevel(logging.DEBUG)
- logger.propagate = False
-
- if abbrev_name is None:
- abbrev_name = "d2" if name == "detectron2" else name
-
- plain_formatter = logging.Formatter(
- "[%(asctime)s] %(name)s %(levelname)s: %(message)s", datefmt="%m/%d %H:%M:%S"
- )
- # stdout logging: master only
- if distributed_rank == 0:
- ch = logging.StreamHandler(stream=sys.stdout)
- ch.setLevel(logging.DEBUG)
- if color:
- formatter = _ColorfulFormatter(
- colored("[%(asctime)s %(name)s]: ", "green") + "%(message)s",
- datefmt="%m/%d %H:%M:%S",
- root_name=name,
- abbrev_name=str(abbrev_name),
- )
- else:
- formatter = plain_formatter
- ch.setFormatter(formatter)
- logger.addHandler(ch)
-
- # file logging: all workers
- if output is not None:
- if output.endswith(".txt") or output.endswith(".log"):
- filename = output
- else:
- filename = os.path.join(output, "log.txt")
- if distributed_rank > 0:
- filename = filename + ".rank{}".format(distributed_rank)
- PathManager.mkdirs(os.path.dirname(filename))
-
- fh = logging.StreamHandler(_cached_log_stream(filename))
- fh.setLevel(logging.DEBUG)
- fh.setFormatter(plain_formatter)
- logger.addHandler(fh)
-
- return logger
-
-
-# cache the opened file object, so that different calls to `setup_logger`
-# with the same file name can safely write to the same file.
-@functools.lru_cache(maxsize=None)
-def _cached_log_stream(filename):
- # use 1K buffer if writing to cloud storage
- io = PathManager.open(filename, "a", buffering=1024 if "://" in filename else -1)
- atexit.register(io.close)
- return io
-
-
-"""
-Below are some other convenient logging methods.
-They are mainly adopted from
-https://github.com/abseil/abseil-py/blob/master/absl/logging/__init__.py
-"""
-
-
-def _find_caller():
- """
- Returns:
- str: module name of the caller
- tuple: a hashable key to be used to identify different callers
- """
- frame = sys._getframe(2)
- while frame:
- code = frame.f_code
- if os.path.join("utils", "logger.") not in code.co_filename:
- mod_name = frame.f_globals["__name__"]
- if mod_name == "__main__":
- mod_name = "detectron2"
- return mod_name, (code.co_filename, frame.f_lineno, code.co_name)
- frame = frame.f_back
-
-
-_LOG_COUNTER = Counter()
-_LOG_TIMER = {}
-
-
-def log_first_n(lvl, msg, n=1, *, name=None, key="caller"):
- """
- Log only for the first n times.
-
- Args:
- lvl (int): the logging level
- msg (str):
- n (int):
- name (str): name of the logger to use. Will use the caller's module by default.
- key (str or tuple[str]): the string(s) can be one of "caller" or
- "message", which defines how to identify duplicated logs.
- For example, if called with `n=1, key="caller"`, this function
- will only log the first call from the same caller, regardless of
- the message content.
- If called with `n=1, key="message"`, this function will log the
- same content only once, even if they are called from different places.
- If called with `n=1, key=("caller", "message")`, this function
- will not log only if the same caller has logged the same message before.
- """
- if isinstance(key, str):
- key = (key,)
- assert len(key) > 0
-
- caller_module, caller_key = _find_caller()
- hash_key = ()
- if "caller" in key:
- hash_key = hash_key + caller_key
- if "message" in key:
- hash_key = hash_key + (msg,)
-
- _LOG_COUNTER[hash_key] += 1
- if _LOG_COUNTER[hash_key] <= n:
- logging.getLogger(name or caller_module).log(lvl, msg)
-
-
-def log_every_n(lvl, msg, n=1, *, name=None):
- """
- Log once per n times.
-
- Args:
- lvl (int): the logging level
- msg (str):
- n (int):
- name (str): name of the logger to use. Will use the caller's module by default.
- """
- caller_module, key = _find_caller()
- _LOG_COUNTER[key] += 1
- if n == 1 or _LOG_COUNTER[key] % n == 1:
- logging.getLogger(name or caller_module).log(lvl, msg)
-
-
-def log_every_n_seconds(lvl, msg, n=1, *, name=None):
- """
- Log no more than once per n seconds.
-
- Args:
- lvl (int): the logging level
- msg (str):
- n (int):
- name (str): name of the logger to use. Will use the caller's module by default.
- """
- caller_module, key = _find_caller()
- last_logged = _LOG_TIMER.get(key, None)
- current_time = time.time()
- if last_logged is None or current_time - last_logged >= n:
- logging.getLogger(name or caller_module).log(lvl, msg)
- _LOG_TIMER[key] = current_time
-
-
-def create_small_table(small_dict):
- """
- Create a small table using the keys of small_dict as headers. This is only
- suitable for small dictionaries.
-
- Args:
- small_dict (dict): a result dictionary of only a few items.
-
- Returns:
- str: the table as a string.
- """
- keys, values = tuple(zip(*small_dict.items()))
- table = tabulate(
- [values],
- headers=keys,
- tablefmt="pipe",
- floatfmt=".3f",
- stralign="center",
- numalign="center",
- )
- return table
-
-
-def _log_api_usage(identifier: str):
- """
- Internal function used to log the usage of different detectron2 components
- inside facebook's infra.
- """
- torch._C._log_api_usage_once("detectron2." + identifier)
diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/tests/test_scheduler.py b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/tests/test_scheduler.py
deleted file mode 100644
index 6cccb03f74b594c06add44a134b526e41c2974f0..0000000000000000000000000000000000000000
--- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/tests/test_scheduler.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-
-import math
-import numpy as np
-from unittest import TestCase
-import torch
-from fvcore.common.param_scheduler import CosineParamScheduler, MultiStepParamScheduler
-from torch import nn
-
-from detectron2.solver import LRMultiplier, WarmupParamScheduler
-
-
-class TestScheduler(TestCase):
- def test_warmup_multistep(self):
- p = nn.Parameter(torch.zeros(0))
- opt = torch.optim.SGD([p], lr=5)
-
- multiplier = WarmupParamScheduler(
- MultiStepParamScheduler(
- [1, 0.1, 0.01, 0.001],
- milestones=[10, 15, 20],
- num_updates=30,
- ),
- 0.001,
- 5 / 30,
- )
- sched = LRMultiplier(opt, multiplier, 30)
- # This is an equivalent of:
- # sched = WarmupMultiStepLR(
- # opt, milestones=[10, 15, 20], gamma=0.1, warmup_factor=0.001, warmup_iters=5)
-
- p.sum().backward()
- opt.step()
-
- lrs = [0.005]
- for _ in range(30):
- sched.step()
- lrs.append(opt.param_groups[0]["lr"])
- self.assertTrue(np.allclose(lrs[:5], [0.005, 1.004, 2.003, 3.002, 4.001]))
- self.assertTrue(np.allclose(lrs[5:10], 5.0))
- self.assertTrue(np.allclose(lrs[10:15], 0.5))
- self.assertTrue(np.allclose(lrs[15:20], 0.05))
- self.assertTrue(np.allclose(lrs[20:], 0.005))
-
- def test_warmup_cosine(self):
- p = nn.Parameter(torch.zeros(0))
- opt = torch.optim.SGD([p], lr=5)
- multiplier = WarmupParamScheduler(
- CosineParamScheduler(1, 0),
- 0.001,
- 5 / 30,
- )
- sched = LRMultiplier(opt, multiplier, 30)
-
- p.sum().backward()
- opt.step()
- self.assertEqual(opt.param_groups[0]["lr"], 0.005)
- lrs = [0.005]
-
- for _ in range(30):
- sched.step()
- lrs.append(opt.param_groups[0]["lr"])
- for idx, lr in enumerate(lrs):
- expected_cosine = 2.5 * (1.0 + math.cos(math.pi * idx / 30))
- if idx >= 5:
- self.assertAlmostEqual(lr, expected_cosine)
- else:
- self.assertNotAlmostEqual(lr, expected_cosine)
diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/tools/__init__.py b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/tools/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/OptorAI/gen/style.css b/spaces/OptorAI/gen/style.css
deleted file mode 100644
index 57ac874613ad432d3129fa1757249a319a601f3e..0000000000000000000000000000000000000000
--- a/spaces/OptorAI/gen/style.css
+++ /dev/null
@@ -1,28 +0,0 @@
-body {
- padding: 2rem;
- font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
-}
-
-h1 {
- font-size: 16px;
- margin-top: 0;
-}
-
-p {
- color: rgb(107, 114, 128);
- font-size: 15px;
- margin-bottom: 10px;
- margin-top: 5px;
-}
-
-.card {
- max-width: 620px;
- margin: 0 auto;
- padding: 16px;
- border: 1px solid lightgray;
- border-radius: 16px;
-}
-
-.card p:last-child {
- margin-bottom: 0;
-}
\ No newline at end of file
diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/datasets/pipelines/formating.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/datasets/pipelines/formating.py
deleted file mode 100644
index 97db85f4f9db39fb86ba77ead7d1a8407d810adb..0000000000000000000000000000000000000000
--- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/datasets/pipelines/formating.py
+++ /dev/null
@@ -1,288 +0,0 @@
-from collections.abc import Sequence
-
-import annotator.uniformer.mmcv as mmcv
-import numpy as np
-import torch
-from annotator.uniformer.mmcv.parallel import DataContainer as DC
-
-from ..builder import PIPELINES
-
-
-def to_tensor(data):
- """Convert objects of various python types to :obj:`torch.Tensor`.
-
- Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
- :class:`Sequence`, :class:`int` and :class:`float`.
-
- Args:
- data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to
- be converted.
- """
-
- if isinstance(data, torch.Tensor):
- return data
- elif isinstance(data, np.ndarray):
- return torch.from_numpy(data)
- elif isinstance(data, Sequence) and not mmcv.is_str(data):
- return torch.tensor(data)
- elif isinstance(data, int):
- return torch.LongTensor([data])
- elif isinstance(data, float):
- return torch.FloatTensor([data])
- else:
- raise TypeError(f'type {type(data)} cannot be converted to tensor.')
-
-
-@PIPELINES.register_module()
-class ToTensor(object):
- """Convert some results to :obj:`torch.Tensor` by given keys.
-
- Args:
- keys (Sequence[str]): Keys that need to be converted to Tensor.
- """
-
- def __init__(self, keys):
- self.keys = keys
-
- def __call__(self, results):
- """Call function to convert data in results to :obj:`torch.Tensor`.
-
- Args:
- results (dict): Result dict contains the data to convert.
-
- Returns:
- dict: The result dict contains the data converted
- to :obj:`torch.Tensor`.
- """
-
- for key in self.keys:
- results[key] = to_tensor(results[key])
- return results
-
- def __repr__(self):
- return self.__class__.__name__ + f'(keys={self.keys})'
-
-
-@PIPELINES.register_module()
-class ImageToTensor(object):
- """Convert image to :obj:`torch.Tensor` by given keys.
-
- The dimension order of input image is (H, W, C). The pipeline will convert
- it to (C, H, W). If only 2 dimension (H, W) is given, the output would be
- (1, H, W).
-
- Args:
- keys (Sequence[str]): Key of images to be converted to Tensor.
- """
-
- def __init__(self, keys):
- self.keys = keys
-
- def __call__(self, results):
- """Call function to convert image in results to :obj:`torch.Tensor` and
- transpose the channel order.
-
- Args:
- results (dict): Result dict contains the image data to convert.
-
- Returns:
- dict: The result dict contains the image converted
- to :obj:`torch.Tensor` and transposed to (C, H, W) order.
- """
-
- for key in self.keys:
- img = results[key]
- if len(img.shape) < 3:
- img = np.expand_dims(img, -1)
- results[key] = to_tensor(img.transpose(2, 0, 1))
- return results
-
- def __repr__(self):
- return self.__class__.__name__ + f'(keys={self.keys})'
-
-
-@PIPELINES.register_module()
-class Transpose(object):
- """Transpose some results by given keys.
-
- Args:
- keys (Sequence[str]): Keys of results to be transposed.
- order (Sequence[int]): Order of transpose.
- """
-
- def __init__(self, keys, order):
- self.keys = keys
- self.order = order
-
- def __call__(self, results):
- """Call function to convert image in results to :obj:`torch.Tensor` and
- transpose the channel order.
-
- Args:
- results (dict): Result dict contains the image data to convert.
-
- Returns:
- dict: The result dict contains the image converted
- to :obj:`torch.Tensor` and transposed to (C, H, W) order.
- """
-
- for key in self.keys:
- results[key] = results[key].transpose(self.order)
- return results
-
- def __repr__(self):
- return self.__class__.__name__ + \
- f'(keys={self.keys}, order={self.order})'
-
-
-@PIPELINES.register_module()
-class ToDataContainer(object):
- """Convert results to :obj:`mmcv.DataContainer` by given fields.
-
- Args:
- fields (Sequence[dict]): Each field is a dict like
- ``dict(key='xxx', **kwargs)``. The ``key`` in result will
- be converted to :obj:`mmcv.DataContainer` with ``**kwargs``.
- Default: ``(dict(key='img', stack=True),
- dict(key='gt_semantic_seg'))``.
- """
-
- def __init__(self,
- fields=(dict(key='img',
- stack=True), dict(key='gt_semantic_seg'))):
- self.fields = fields
-
- def __call__(self, results):
- """Call function to convert data in results to
- :obj:`mmcv.DataContainer`.
-
- Args:
- results (dict): Result dict contains the data to convert.
-
- Returns:
- dict: The result dict contains the data converted to
- :obj:`mmcv.DataContainer`.
- """
-
- for field in self.fields:
- field = field.copy()
- key = field.pop('key')
- results[key] = DC(results[key], **field)
- return results
-
- def __repr__(self):
- return self.__class__.__name__ + f'(fields={self.fields})'
-
-
-@PIPELINES.register_module()
-class DefaultFormatBundle(object):
- """Default formatting bundle.
-
- It simplifies the pipeline of formatting common fields, including "img"
- and "gt_semantic_seg". These fields are formatted as follows.
-
- - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)
- - gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor,
- (3)to DataContainer (stack=True)
- """
-
- def __call__(self, results):
- """Call function to transform and format common fields in results.
-
- Args:
- results (dict): Result dict contains the data to convert.
-
- Returns:
- dict: The result dict contains the data that is formatted with
- default bundle.
- """
-
- if 'img' in results:
- img = results['img']
- if len(img.shape) < 3:
- img = np.expand_dims(img, -1)
- img = np.ascontiguousarray(img.transpose(2, 0, 1))
- results['img'] = DC(to_tensor(img), stack=True)
- if 'gt_semantic_seg' in results:
- # convert to long
- results['gt_semantic_seg'] = DC(
- to_tensor(results['gt_semantic_seg'][None,
- ...].astype(np.int64)),
- stack=True)
- return results
-
- def __repr__(self):
- return self.__class__.__name__
-
-
-@PIPELINES.register_module()
-class Collect(object):
- """Collect data from the loader relevant to the specific task.
-
- This is usually the last stage of the data loader pipeline. Typically keys
- is set to some subset of "img", "gt_semantic_seg".
-
- The "img_meta" item is always populated. The contents of the "img_meta"
- dictionary depends on "meta_keys". By default this includes:
-
- - "img_shape": shape of the image input to the network as a tuple
- (h, w, c). Note that images may be zero padded on the bottom/right
- if the batch tensor is larger than this shape.
-
- - "scale_factor": a float indicating the preprocessing scale
-
- - "flip": a boolean indicating if image flip transform was used
-
- - "filename": path to the image file
-
- - "ori_shape": original shape of the image as a tuple (h, w, c)
-
- - "pad_shape": image shape after padding
-
- - "img_norm_cfg": a dict of normalization information:
- - mean - per channel mean subtraction
- - std - per channel std divisor
- - to_rgb - bool indicating if bgr was converted to rgb
-
- Args:
- keys (Sequence[str]): Keys of results to be collected in ``data``.
- meta_keys (Sequence[str], optional): Meta keys to be converted to
- ``mmcv.DataContainer`` and collected in ``data[img_metas]``.
- Default: ``('filename', 'ori_filename', 'ori_shape', 'img_shape',
- 'pad_shape', 'scale_factor', 'flip', 'flip_direction',
- 'img_norm_cfg')``
- """
-
- def __init__(self,
- keys,
- meta_keys=('filename', 'ori_filename', 'ori_shape',
- 'img_shape', 'pad_shape', 'scale_factor', 'flip',
- 'flip_direction', 'img_norm_cfg')):
- self.keys = keys
- self.meta_keys = meta_keys
-
- def __call__(self, results):
- """Call function to collect keys in results. The keys in ``meta_keys``
- will be converted to :obj:mmcv.DataContainer.
-
- Args:
- results (dict): Result dict contains the data to collect.
-
- Returns:
- dict: The result dict contains the following keys
- - keys in``self.keys``
- - ``img_metas``
- """
-
- data = {}
- img_meta = {}
- for key in self.meta_keys:
- img_meta[key] = results[key]
- data['img_metas'] = DC(img_meta, cpu_only=True)
- for key in self.keys:
- data[key] = results[key]
- return data
-
- def __repr__(self):
- return self.__class__.__name__ + \
- f'(keys={self.keys}, meta_keys={self.meta_keys})'
diff --git a/spaces/PKUWilliamYang/VToonify/vtoonify/train_vtoonify_d.py b/spaces/PKUWilliamYang/VToonify/vtoonify/train_vtoonify_d.py
deleted file mode 100644
index 0c83e02d46097dad72b5e9f8ed239299d9da320a..0000000000000000000000000000000000000000
--- a/spaces/PKUWilliamYang/VToonify/vtoonify/train_vtoonify_d.py
+++ /dev/null
@@ -1,515 +0,0 @@
-import os
-#os.environ['CUDA_VISIBLE_DEVICES'] = "0"
-import argparse
-import math
-import random
-
-import numpy as np
-import torch
-from torch import nn, optim
-from torch.nn import functional as F
-from torch.utils import data
-import torch.distributed as dist
-from torchvision import transforms, utils
-from tqdm import tqdm
-from PIL import Image
-from util import *
-
-from model.stylegan import lpips
-from model.stylegan.model import Generator, Downsample
-from model.vtoonify import VToonify, ConditionalDiscriminator
-from model.bisenet.model import BiSeNet
-from model.simple_augment import random_apply_affine
-from model.stylegan.distributed import (
- get_rank,
- synchronize,
- reduce_loss_dict,
- reduce_sum,
- get_world_size,
-)
-
-class TrainOptions():
- def __init__(self):
-
- self.parser = argparse.ArgumentParser(description="Train VToonify-D")
- self.parser.add_argument("--iter", type=int, default=2000, help="total training iterations")
- self.parser.add_argument("--batch", type=int, default=8, help="batch sizes for each gpus")
- self.parser.add_argument("--lr", type=float, default=0.0001, help="learning rate")
- self.parser.add_argument("--local_rank", type=int, default=0, help="local rank for distributed training")
- self.parser.add_argument("--start_iter", type=int, default=0, help="start iteration")
- self.parser.add_argument("--save_every", type=int, default=30000, help="interval of saving a checkpoint")
- self.parser.add_argument("--save_begin", type=int, default=30000, help="when to start saving a checkpoint")
- self.parser.add_argument("--log_every", type=int, default=200, help="interval of saving a checkpoint")
-
- self.parser.add_argument("--adv_loss", type=float, default=0.01, help="the weight of adv loss")
- self.parser.add_argument("--grec_loss", type=float, default=0.1, help="the weight of mse recontruction loss")
- self.parser.add_argument("--perc_loss", type=float, default=0.01, help="the weight of perceptual loss")
- self.parser.add_argument("--tmp_loss", type=float, default=1.0, help="the weight of temporal consistency loss")
- self.parser.add_argument("--msk_loss", type=float, default=0.0005, help="the weight of attention mask loss")
-
- self.parser.add_argument("--fix_degree", action="store_true", help="use a fixed style degree")
- self.parser.add_argument("--fix_style", action="store_true", help="use a fixed style image")
- self.parser.add_argument("--fix_color", action="store_true", help="use the original color (no color transfer)")
- self.parser.add_argument("--exstyle_path", type=str, default='./checkpoint/cartoon/refined_exstyle_code.npy', help="path of the extrinsic style code")
- self.parser.add_argument("--style_id", type=int, default=26, help="the id of the style image")
- self.parser.add_argument("--style_degree", type=float, default=0.5, help="style degree for VToonify-D")
-
- self.parser.add_argument("--encoder_path", type=str, default=None, help="path to the pretrained encoder model")
- self.parser.add_argument("--direction_path", type=str, default='./checkpoint/directions.npy', help="path to the editing direction latents")
- self.parser.add_argument("--stylegan_path", type=str, default='./checkpoint/cartoon/generator.pt', help="path to the stylegan model")
- self.parser.add_argument("--faceparsing_path", type=str, default='./checkpoint/faceparsing.pth', help="path of the face parsing model")
- self.parser.add_argument("--style_encoder_path", type=str, default='./checkpoint/encoder.pt', help="path of the style encoder")
-
- self.parser.add_argument("--name", type=str, default='vtoonify_d_cartoon', help="saved model name")
- self.parser.add_argument("--pretrain", action="store_true", help="if true, only pretrain the encoder")
-
- def parse(self):
- self.opt = self.parser.parse_args()
- if self.opt.encoder_path is None:
- self.opt.encoder_path = os.path.join('./checkpoint/', self.opt.name, 'pretrain.pt')
- args = vars(self.opt)
- if self.opt.local_rank == 0:
- print('Load options')
- for name, value in sorted(args.items()):
- print('%s: %s' % (str(name), str(value)))
- return self.opt
-
-
-# pretrain E of vtoonify.
-# We train E so that its the last-layer feature matches the original 8-th-layer input feature of G1
-# See Model initialization in Sec. 4.2.2 for the detail
-def pretrain(args, generator, g_optim, g_ema, parsingpredictor, down, directions, styles, device):
- pbar = range(args.iter)
-
- if get_rank() == 0:
- pbar = tqdm(pbar, initial=args.start_iter, dynamic_ncols=True, smoothing=0.01)
-
- recon_loss = torch.tensor(0.0, device=device)
- loss_dict = {}
-
- if args.distributed:
- g_module = generator.module
- else:
- g_module = generator
-
- accum = 0.5 ** (32 / (10 * 1000))
-
- requires_grad(g_module.encoder, True)
-
- for idx in pbar:
- i = idx + args.start_iter
-
- if i > args.iter:
- print("Done!")
- break
-
- # during pretraining, the last 11 layers of DualStyleGAN (for color transfer) is not used.
- # so args.fix_color is not used. the last 11 elements in weight are not used.
- if args.fix_degree:
- d_s = args.style_degree
- else:
- d_s = 0 if i <= args.iter / 4.0 else np.random.rand(1)[0]
- weight = [d_s] * 18
-
- # sample pre-saved w''=E_s(s)
- if args.fix_style:
- style = styles[args.style_id:args.style_id+1].repeat(args.batch,1,1)
- else:
- style = styles[torch.randint(0, styles.size(0), (args.batch,))]
-
- with torch.no_grad():
- # during pretraining, no geometric transformations are applied.
- noise_sample = torch.randn(args.batch, 512).cuda()
- ws_ = g_ema.stylegan().style(noise_sample).unsqueeze(1).repeat(1,18,1) # random w
- ws_[:, 3:7] += directions[torch.randint(0, directions.shape[0], (args.batch,)), 3:7] # w'=w+n
- img_gen, _ = g_ema.stylegan()([ws_], input_is_latent=True, truncation=0.5, truncation_latent=0)
- img_gen = torch.clamp(img_gen, -1, 1).detach() # x''
- img_gen512 = down(img_gen.detach())
- img_gen256 = down(img_gen512.detach()) # image part of x''_down
- mask512 = parsingpredictor(2*torch.clamp(img_gen512, -1, 1))[0]
- real_input = torch.cat((img_gen256, down(mask512)/16.0), dim=1) # x''_down
- # f_G1^(8)(w', w'', d_s)
- real_feat, real_skip = g_ema.generator([ws_], style, input_is_latent=True, return_feat=True,
- truncation=0.5, truncation_latent=0, use_res=True, interp_weights=weight)
-
- real_input = real_input.detach()
- real_feat = real_feat.detach()
- real_skip = real_skip.detach()
-
- # f_E^(last)(x''_down, w'', d_s)
- fake_feat, fake_skip = generator(real_input, style, d_s, return_feat=True)
-
- # L_E in Eq.(8)
- recon_loss = F.mse_loss(fake_feat, real_feat) + F.mse_loss(fake_skip, real_skip)
-
- loss_dict["emse"] = recon_loss
-
- generator.zero_grad()
- recon_loss.backward()
- g_optim.step()
-
- accumulate(g_ema.encoder, g_module.encoder, accum)
-
- loss_reduced = reduce_loss_dict(loss_dict)
-
- emse_loss_val = loss_reduced["emse"].mean().item()
-
- if get_rank() == 0:
- pbar.set_description(
- (
- f"iter: {i:d}; emse: {emse_loss_val:.3f}"
- )
- )
-
- if ((i+1) >= args.save_begin and (i+1) % args.save_every == 0) or (i+1) == args.iter:
- if (i+1) == args.iter:
- savename = f"checkpoint/%s/pretrain.pt"%(args.name)
- else:
- savename = f"checkpoint/%s/pretrain-%05d.pt"%(args.name, i+1)
- torch.save(
- {
- #"g": g_module.encoder.state_dict(),
- "g_ema": g_ema.encoder.state_dict(),
- },
- savename,
- )
-
-
-# generate paired data and train vtoonify, see Sec. 4.2.2 for the detail
-def train(args, generator, discriminator, g_optim, d_optim, g_ema, percept, parsingpredictor, down, pspencoder, directions, styles, device):
- pbar = range(args.iter)
-
- if get_rank() == 0:
- pbar = tqdm(pbar, initial=args.start_iter, smoothing=0.01, ncols=130, dynamic_ncols=False)
-
- d_loss = torch.tensor(0.0, device=device)
- g_loss = torch.tensor(0.0, device=device)
- grec_loss = torch.tensor(0.0, device=device)
- gfeat_loss = torch.tensor(0.0, device=device)
- temporal_loss = torch.tensor(0.0, device=device)
- gmask_loss = torch.tensor(0.0, device=device)
- loss_dict = {}
-
- surffix = '_s'
- if args.fix_style:
- surffix += '%03d'%(args.style_id)
- surffix += '_d'
- if args.fix_degree:
- surffix += '%1.1f'%(args.style_degree)
- if not args.fix_color:
- surffix += '_c'
-
- if args.distributed:
- g_module = generator.module
- d_module = discriminator.module
-
- else:
- g_module = generator
- d_module = discriminator
-
- accum = 0.5 ** (32 / (10 * 1000))
-
- for idx in pbar:
- i = idx + args.start_iter
-
- if i > args.iter:
- print("Done!")
- break
-
- # sample style degree
- if args.fix_degree or idx == 0 or i == 0:
- d_s = args.style_degree
- else:
- d_s = np.random.randint(0,6) / 5.0
- if args.fix_color:
- weight = [d_s] * 7 + [0] * 11
- else:
- weight = [d_s] * 7 + [1] * 11
- # style degree condition for discriminator
- degree_label = torch.zeros(args.batch, 1).to(device) + d_s
-
- # style index condition for discriminator
- style_ind = torch.randint(0, styles.size(0), (args.batch,))
- if args.fix_style or idx == 0 or i == 0:
- style_ind = style_ind * 0 + args.style_id
- # sample pre-saved E_s(s)
- style = styles[style_ind]
-
- with torch.no_grad():
- noise_sample = torch.randn(args.batch, 512).cuda()
- wc = g_ema.stylegan().style(noise_sample).unsqueeze(1).repeat(1,18,1) # random w
- wc[:, 3:7] += directions[torch.randint(0, directions.shape[0], (args.batch,)), 3:7] # w'=w+n
- wc = wc.detach()
- xc, _ = g_ema.stylegan()([wc], input_is_latent=True, truncation=0.5, truncation_latent=0)
- xc = torch.clamp(xc, -1, 1).detach() # x''
- if not args.fix_color and args.fix_style: # only transfer this fixed style's color
- xl = style.clone()
- else:
- xl = pspencoder(F.adaptive_avg_pool2d(xc, 256))
- xl = g_ema.zplus2wplus(xl) # E_s(x''_down)
- xl = torch.cat((style[:,0:7], xl[:,7:18]), dim=1).detach() # w'' = concatenate E_s(s) and E_s(x''_down)
- xs, _ = g_ema.generator([wc], xl, input_is_latent=True,
- truncation=0.5, truncation_latent=0, use_res=True, interp_weights=weight)
- xs = torch.clamp(xs, -1, 1).detach() # y'=G1(w', w'', d_s, d_c)
- # apply color jitter to w'. we fuse w' of the current iteration with w' of the last iteration
- if idx > 0 and i >= (args.iter/2.0) and (not args.fix_color and not args.fix_style):
- wcfuse = wc.clone()
- wcfuse[:,7:] = wc_[:,7:] * (i/(args.iter/2.0)-1) + wcfuse[:,7:] * (2-i/(args.iter/2.0))
- xc, _ = g_ema.stylegan()([wcfuse], input_is_latent=True, truncation=0.5, truncation_latent=0)
- xc = torch.clamp(xc, -1, 1).detach() # x'
- wc_ = wc.clone() # wc_ is the w' in the last iteration
- # during training, random geometric transformations are applied.
- imgs, _ = random_apply_affine(torch.cat((xc.detach(),xs), dim=1), 0.2, None)
- real_input1024 = imgs[:,0:3].detach() # image part of x
- real_input512 = down(real_input1024).detach()
- real_input256 = down(real_input512).detach()
- mask512 = parsingpredictor(2*real_input512)[0]
- mask256 = down(mask512).detach()
- mask = F.adaptive_avg_pool2d(mask512, 1024).detach() # parsing part of x
- real_output = imgs[:,3:].detach() # y
- real_input = torch.cat((real_input256, mask256/16.0), dim=1) # x_down
- # for log, sample a fixed input-output pair (x_down, y, w'', d_s)
- if idx == 0 or i == 0:
- samplein = real_input.clone().detach()
- sampleout = real_output.clone().detach()
- samplexl = xl.clone().detach()
- sampleds = d_s
-
- ###### This part is for training discriminator
-
- requires_grad(g_module.encoder, False)
- requires_grad(g_module.fusion_out, False)
- requires_grad(g_module.fusion_skip, False)
- requires_grad(discriminator, True)
-
- fake_output = generator(real_input, xl, d_s)
- fake_pred = discriminator(F.adaptive_avg_pool2d(fake_output, 256), degree_label, style_ind)
- real_pred = discriminator(F.adaptive_avg_pool2d(real_output, 256), degree_label, style_ind)
-
- # L_adv in Eq.(3)
- d_loss = d_logistic_loss(real_pred, fake_pred) * args.adv_loss
- loss_dict["d"] = d_loss
-
- discriminator.zero_grad()
- d_loss.backward()
- d_optim.step()
-
- ###### This part is for training generator (encoder and fusion modules)
-
- requires_grad(g_module.encoder, True)
- requires_grad(g_module.fusion_out, True)
- requires_grad(g_module.fusion_skip, True)
- requires_grad(discriminator, False)
-
- fake_output, m_Es = generator(real_input, xl, d_s, return_mask=True)
- fake_pred = discriminator(F.adaptive_avg_pool2d(fake_output, 256), degree_label, style_ind)
-
- # L_adv in Eq.(3)
- g_loss = g_nonsaturating_loss(fake_pred) * args.adv_loss
- # L_rec in Eq.(2)
- grec_loss = F.mse_loss(fake_output, real_output) * args.grec_loss
- gfeat_loss = percept(F.adaptive_avg_pool2d(fake_output, 512), # 1024 will out of memory
- F.adaptive_avg_pool2d(real_output, 512)).sum() * args.perc_loss # 256 will get blurry output
-
- # L_msk in Eq.(9)
- gmask_loss = torch.tensor(0.0, device=device)
- if not args.fix_degree or args.msk_loss > 0:
- for jj, m_E in enumerate(m_Es):
- gd_s = (1 - d_s) ** 2 * 0.9 + 0.1
- gmask_loss += F.relu(torch.mean(m_E)-gd_s) * args.msk_loss
-
- loss_dict["g"] = g_loss
- loss_dict["gr"] = grec_loss
- loss_dict["gf"] = gfeat_loss
- loss_dict["msk"] = gmask_loss
-
- w = random.randint(0,1024-896)
- h = random.randint(0,1024-896)
- crop_input = torch.cat((real_input1024[:,:,w:w+896,h:h+896], mask[:,:,w:w+896,h:h+896]/16.0), dim=1).detach()
- crop_input = down(down(crop_input))
- crop_fake_output = fake_output[:,:,w:w+896,h:h+896]
- fake_crop_output = generator(crop_input, xl, d_s)
- # L_tmp in Eq.(4), gradually increase the weight of L_tmp
- temporal_loss = ((fake_crop_output-crop_fake_output)**2).mean() * max(idx/(args.iter/2.0)-1, 0) * args.tmp_loss
- loss_dict["tp"] = temporal_loss
-
- generator.zero_grad()
- (g_loss + grec_loss + gfeat_loss + temporal_loss + gmask_loss).backward()
- g_optim.step()
-
- accumulate(g_ema.encoder, g_module.encoder, accum)
- accumulate(g_ema.fusion_out, g_module.fusion_out, accum)
- accumulate(g_ema.fusion_skip, g_module.fusion_skip, accum)
-
- loss_reduced = reduce_loss_dict(loss_dict)
-
- d_loss_val = loss_reduced["d"].mean().item()
- g_loss_val = loss_reduced["g"].mean().item()
- gr_loss_val = loss_reduced["gr"].mean().item()
- gf_loss_val = loss_reduced["gf"].mean().item()
- tmp_loss_val = loss_reduced["tp"].mean().item()
- msk_loss_val = loss_reduced["msk"].mean().item()
-
- if get_rank() == 0:
- pbar.set_description(
- (
- f"iter: {i:d}; advd: {d_loss_val:.3f}; advg: {g_loss_val:.3f}; mse: {gr_loss_val:.3f}; "
- f"perc: {gf_loss_val:.3f}; tmp: {tmp_loss_val:.3f}; msk: {msk_loss_val:.3f}"
- )
- )
-
- if i == 0 or (i+1) % args.log_every == 0 or (i+1) == args.iter:
- with torch.no_grad():
- g_ema.eval()
- sample1 = g_ema(samplein, samplexl, sampleds)
- if args.fix_degree:
- sample = F.interpolate(torch.cat((sampleout, sample1), dim=0), 256)
- else:
- sample2 = g_ema(samplein, samplexl, d_s)
- sample = F.interpolate(torch.cat((sampleout, sample1, sample2), dim=0), 256)
- utils.save_image(
- sample,
- f"log/%s/%05d.jpg"%(args.name, (i+1)),
- nrow=int(args.batch),
- normalize=True,
- range=(-1, 1),
- )
-
- if ((i+1) >= args.save_begin and (i+1) % args.save_every == 0) or (i+1) == args.iter:
- if (i+1) == args.iter:
- savename = f"checkpoint/%s/vtoonify%s.pt"%(args.name, surffix)
- else:
- savename = f"checkpoint/%s/vtoonify%s_%05d.pt"%(args.name, surffix, i+1)
- torch.save(
- {
- #"g": g_module.state_dict(),
- #"d": d_module.state_dict(),
- "g_ema": g_ema.state_dict(),
- },
- savename,
- )
-
-
-
-if __name__ == "__main__":
-
- device = "cuda"
- parser = TrainOptions()
- args = parser.parse()
- if args.local_rank == 0:
- print('*'*98)
- if not os.path.exists("log/%s/"%(args.name)):
- os.makedirs("log/%s/"%(args.name))
- if not os.path.exists("checkpoint/%s/"%(args.name)):
- os.makedirs("checkpoint/%s/"%(args.name))
-
- n_gpu = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
- args.distributed = n_gpu > 1
-
- if args.distributed:
- torch.cuda.set_device(args.local_rank)
- torch.distributed.init_process_group(backend="nccl", init_method="env://")
- synchronize()
-
- generator = VToonify(backbone = 'dualstylegan').to(device)
- generator.apply(weights_init)
- g_ema = VToonify(backbone = 'dualstylegan').to(device)
- g_ema.eval()
-
- ckpt = torch.load(args.stylegan_path, map_location=lambda storage, loc: storage)
- generator.generator.load_state_dict(ckpt["g_ema"], strict=False)
- # load ModRes blocks of DualStyleGAN into the modified ModRes blocks (with dilation)
- generator.res.load_state_dict(generator.generator.res.state_dict(), strict=False)
- g_ema.generator.load_state_dict(ckpt["g_ema"], strict=False)
- g_ema.res.load_state_dict(g_ema.generator.res.state_dict(), strict=False)
- requires_grad(generator.generator, False)
- requires_grad(generator.res, False)
- requires_grad(g_ema.generator, False)
- requires_grad(g_ema.res, False)
-
- if not args.pretrain:
- generator.encoder.load_state_dict(torch.load(args.encoder_path, map_location=lambda storage, loc: storage)["g_ema"])
- # we initialize the fusion modules to map f_G \otimes f_E to f_G.
- for k in generator.fusion_out:
- k.conv.weight.data *= 0.01
- k.conv.weight[:,0:k.conv.weight.shape[0],1,1].data += torch.eye(k.conv.weight.shape[0]).cuda()
- for k in generator.fusion_skip:
- k.weight.data *= 0.01
- k.weight[:,0:k.weight.shape[0],1,1].data += torch.eye(k.weight.shape[0]).cuda()
-
- accumulate(g_ema.encoder, generator.encoder, 0)
- accumulate(g_ema.fusion_out, generator.fusion_out, 0)
- accumulate(g_ema.fusion_skip, generator.fusion_skip, 0)
-
- g_parameters = list(generator.encoder.parameters())
- if not args.pretrain:
- g_parameters = g_parameters + list(generator.fusion_out.parameters()) + list(generator.fusion_skip.parameters())
-
- g_optim = optim.Adam(
- g_parameters,
- lr=args.lr,
- betas=(0.9, 0.99),
- )
-
- if args.distributed:
- generator = nn.parallel.DistributedDataParallel(
- generator,
- device_ids=[args.local_rank],
- output_device=args.local_rank,
- broadcast_buffers=False,
- find_unused_parameters=True,
- )
-
- parsingpredictor = BiSeNet(n_classes=19)
- parsingpredictor.load_state_dict(torch.load(args.faceparsing_path, map_location=lambda storage, loc: storage))
- parsingpredictor.to(device).eval()
- requires_grad(parsingpredictor, False)
-
- # we apply gaussian blur to the images to avoid flickers caused during downsampling
- down = Downsample(kernel=[1, 3, 3, 1], factor=2).to(device)
- requires_grad(down, False)
-
- directions = torch.tensor(np.load(args.direction_path)).to(device)
-
- # load style codes of DualStyleGAN
- exstyles = np.load(args.exstyle_path, allow_pickle='TRUE').item()
- if args.local_rank == 0 and not os.path.exists('checkpoint/%s/exstyle_code.npy'%(args.name)):
- np.save('checkpoint/%s/exstyle_code.npy'%(args.name), exstyles, allow_pickle=True)
- styles = []
- with torch.no_grad():
- for stylename in exstyles.keys():
- exstyle = torch.tensor(exstyles[stylename]).to(device)
- exstyle = g_ema.zplus2wplus(exstyle)
- styles += [exstyle]
- styles = torch.cat(styles, dim=0)
-
- if not args.pretrain:
- discriminator = ConditionalDiscriminator(256, use_condition=True, style_num = styles.size(0)).to(device)
-
- d_optim = optim.Adam(
- discriminator.parameters(),
- lr=args.lr,
- betas=(0.9, 0.99),
- )
-
- if args.distributed:
- discriminator = nn.parallel.DistributedDataParallel(
- discriminator,
- device_ids=[args.local_rank],
- output_device=args.local_rank,
- broadcast_buffers=False,
- find_unused_parameters=True,
- )
-
- percept = lpips.PerceptualLoss(model="net-lin", net="vgg", use_gpu=device.startswith("cuda"), gpu_ids=[args.local_rank])
- requires_grad(percept.model.net, False)
-
- pspencoder = load_psp_standalone(args.style_encoder_path, device)
-
- if args.local_rank == 0:
- print('Load models and data successfully loaded!')
-
- if args.pretrain:
- pretrain(args, generator, g_optim, g_ema, parsingpredictor, down, directions, styles, device)
- else:
- train(args, generator, discriminator, g_optim, d_optim, g_ema, percept, parsingpredictor, down, pspencoder, directions, styles, device)
diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/srfi/srfi-41.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/srfi/srfi-41.go
deleted file mode 100644
index 569a69df3b61de094570a40904dd1de8aedf160e..0000000000000000000000000000000000000000
Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/srfi/srfi-41.go and /dev/null differ
diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/runner/optimizer/builder.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/runner/optimizer/builder.py
deleted file mode 100644
index f9234eed8f1f186d9d8dfda34562157ee39bdb3a..0000000000000000000000000000000000000000
--- a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/runner/optimizer/builder.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import copy
-import inspect
-
-import torch
-
-from ...utils import Registry, build_from_cfg
-
-OPTIMIZERS = Registry('optimizer')
-OPTIMIZER_BUILDERS = Registry('optimizer builder')
-
-
-def register_torch_optimizers():
- torch_optimizers = []
- for module_name in dir(torch.optim):
- if module_name.startswith('__'):
- continue
- _optim = getattr(torch.optim, module_name)
- if inspect.isclass(_optim) and issubclass(_optim,
- torch.optim.Optimizer):
- OPTIMIZERS.register_module()(_optim)
- torch_optimizers.append(module_name)
- return torch_optimizers
-
-
-TORCH_OPTIMIZERS = register_torch_optimizers()
-
-
-def build_optimizer_constructor(cfg):
- return build_from_cfg(cfg, OPTIMIZER_BUILDERS)
-
-
-def build_optimizer(model, cfg):
- optimizer_cfg = copy.deepcopy(cfg)
- constructor_type = optimizer_cfg.pop('constructor',
- 'DefaultOptimizerConstructor')
- paramwise_cfg = optimizer_cfg.pop('paramwise_cfg', None)
- optim_constructor = build_optimizer_constructor(
- dict(
- type=constructor_type,
- optimizer_cfg=optimizer_cfg,
- paramwise_cfg=paramwise_cfg))
- optimizer = optim_constructor(model)
- return optimizer
diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/decode_heads/__init__.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/decode_heads/__init__.py
deleted file mode 100644
index ac66d3cfe0ea04af45c0f3594bf135841c3812e3..0000000000000000000000000000000000000000
--- a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/decode_heads/__init__.py
+++ /dev/null
@@ -1,28 +0,0 @@
-from .ann_head import ANNHead
-from .apc_head import APCHead
-from .aspp_head import ASPPHead
-from .cc_head import CCHead
-from .da_head import DAHead
-from .dm_head import DMHead
-from .dnl_head import DNLHead
-from .ema_head import EMAHead
-from .enc_head import EncHead
-from .fcn_head import FCNHead
-from .fpn_head import FPNHead
-from .gc_head import GCHead
-from .lraspp_head import LRASPPHead
-from .nl_head import NLHead
-from .ocr_head import OCRHead
-# from .point_head import PointHead
-from .psa_head import PSAHead
-from .psp_head import PSPHead
-from .sep_aspp_head import DepthwiseSeparableASPPHead
-from .sep_fcn_head import DepthwiseSeparableFCNHead
-from .uper_head import UPerHead
-
-__all__ = [
- 'FCNHead', 'PSPHead', 'ASPPHead', 'PSAHead', 'NLHead', 'GCHead', 'CCHead',
- 'UPerHead', 'DepthwiseSeparableASPPHead', 'ANNHead', 'DAHead', 'OCRHead',
- 'EncHead', 'DepthwiseSeparableFCNHead', 'FPNHead', 'EMAHead', 'DNLHead',
- 'APCHead', 'DMHead', 'LRASPPHead'
-]
diff --git a/spaces/Pravincoder/Loan_Approval_Predictor/README.md b/spaces/Pravincoder/Loan_Approval_Predictor/README.md
deleted file mode 100644
index da4387628c76126f5bf0dee362ccbf8c79ae09ce..0000000000000000000000000000000000000000
--- a/spaces/Pravincoder/Loan_Approval_Predictor/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Loan Approval Predictor
-emoji: 🐨
-colorFrom: pink
-colorTo: yellow
-sdk: streamlit
-sdk_version: 1.21.0
-app_file: app.py
-pinned: false
-license: creativeml-openrail-m
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/RMXK/RVC_HFF/infer_uvr5.py b/spaces/RMXK/RVC_HFF/infer_uvr5.py
deleted file mode 100644
index 8c8c05429a1d65dd8b198f16a8ea8c6e68991c07..0000000000000000000000000000000000000000
--- a/spaces/RMXK/RVC_HFF/infer_uvr5.py
+++ /dev/null
@@ -1,363 +0,0 @@
-import os, sys, torch, warnings, pdb
-
-now_dir = os.getcwd()
-sys.path.append(now_dir)
-from json import load as ll
-
-warnings.filterwarnings("ignore")
-import librosa
-import importlib
-import numpy as np
-import hashlib, math
-from tqdm import tqdm
-from lib.uvr5_pack.lib_v5 import spec_utils
-from lib.uvr5_pack.utils import _get_name_params, inference
-from lib.uvr5_pack.lib_v5.model_param_init import ModelParameters
-import soundfile as sf
-from lib.uvr5_pack.lib_v5.nets_new import CascadedNet
-from lib.uvr5_pack.lib_v5 import nets_61968KB as nets
-
-
-class _audio_pre_:
- def __init__(self, agg, model_path, device, is_half):
- self.model_path = model_path
- self.device = device
- self.data = {
- # Processing Options
- "postprocess": False,
- "tta": False,
- # Constants
- "window_size": 512,
- "agg": agg,
- "high_end_process": "mirroring",
- }
- mp = ModelParameters("lib/uvr5_pack/lib_v5/modelparams/4band_v2.json")
- model = nets.CascadedASPPNet(mp.param["bins"] * 2)
- cpk = torch.load(model_path, map_location="cpu")
- model.load_state_dict(cpk)
- model.eval()
- if is_half:
- model = model.half().to(device)
- else:
- model = model.to(device)
-
- self.mp = mp
- self.model = model
-
- def _path_audio_(self, music_file, ins_root=None, vocal_root=None, format="flac"):
- if ins_root is None and vocal_root is None:
- return "No save root."
- name = os.path.basename(music_file)
- if ins_root is not None:
- os.makedirs(ins_root, exist_ok=True)
- if vocal_root is not None:
- os.makedirs(vocal_root, exist_ok=True)
- X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {}
- bands_n = len(self.mp.param["band"])
- # print(bands_n)
- for d in range(bands_n, 0, -1):
- bp = self.mp.param["band"][d]
- if d == bands_n: # high-end band
- (
- X_wave[d],
- _,
- ) = librosa.core.load(
- music_file,
- bp["sr"],
- False,
- dtype=np.float32,
- res_type=bp["res_type"],
- )
- if X_wave[d].ndim == 1:
- X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]])
- else: # lower bands
- X_wave[d] = librosa.core.resample(
- X_wave[d + 1],
- self.mp.param["band"][d + 1]["sr"],
- bp["sr"],
- res_type=bp["res_type"],
- )
- # Stft of wave source
- X_spec_s[d] = spec_utils.wave_to_spectrogram_mt(
- X_wave[d],
- bp["hl"],
- bp["n_fft"],
- self.mp.param["mid_side"],
- self.mp.param["mid_side_b2"],
- self.mp.param["reverse"],
- )
- # pdb.set_trace()
- if d == bands_n and self.data["high_end_process"] != "none":
- input_high_end_h = (bp["n_fft"] // 2 - bp["crop_stop"]) + (
- self.mp.param["pre_filter_stop"] - self.mp.param["pre_filter_start"]
- )
- input_high_end = X_spec_s[d][
- :, bp["n_fft"] // 2 - input_high_end_h : bp["n_fft"] // 2, :
- ]
-
- X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp)
- aggresive_set = float(self.data["agg"] / 100)
- aggressiveness = {
- "value": aggresive_set,
- "split_bin": self.mp.param["band"][1]["crop_stop"],
- }
- with torch.no_grad():
- pred, X_mag, X_phase = inference(
- X_spec_m, self.device, self.model, aggressiveness, self.data
- )
- # Postprocess
- if self.data["postprocess"]:
- pred_inv = np.clip(X_mag - pred, 0, np.inf)
- pred = spec_utils.mask_silence(pred, pred_inv)
- y_spec_m = pred * X_phase
- v_spec_m = X_spec_m - y_spec_m
-
- if ins_root is not None:
- if self.data["high_end_process"].startswith("mirroring"):
- input_high_end_ = spec_utils.mirroring(
- self.data["high_end_process"], y_spec_m, input_high_end, self.mp
- )
- wav_instrument = spec_utils.cmb_spectrogram_to_wave(
- y_spec_m, self.mp, input_high_end_h, input_high_end_
- )
- else:
- wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp)
- print("%s instruments done" % name)
- if format in ["wav", "flac"]:
- sf.write(
- os.path.join(
- ins_root,
- "instrument_{}_{}.{}".format(name, self.data["agg"], format),
- ),
- (np.array(wav_instrument) * 32768).astype("int16"),
- self.mp.param["sr"],
- ) #
- else:
- path = os.path.join(
- ins_root, "instrument_{}_{}.wav".format(name, self.data["agg"])
- )
- sf.write(
- path,
- (np.array(wav_instrument) * 32768).astype("int16"),
- self.mp.param["sr"],
- )
- if os.path.exists(path):
- os.system(
- "ffmpeg -i %s -vn %s -q:a 2 -y"
- % (path, path[:-4] + ".%s" % format)
- )
- if vocal_root is not None:
- if self.data["high_end_process"].startswith("mirroring"):
- input_high_end_ = spec_utils.mirroring(
- self.data["high_end_process"], v_spec_m, input_high_end, self.mp
- )
- wav_vocals = spec_utils.cmb_spectrogram_to_wave(
- v_spec_m, self.mp, input_high_end_h, input_high_end_
- )
- else:
- wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp)
- print("%s vocals done" % name)
- if format in ["wav", "flac"]:
- sf.write(
- os.path.join(
- vocal_root,
- "vocal_{}_{}.{}".format(name, self.data["agg"], format),
- ),
- (np.array(wav_vocals) * 32768).astype("int16"),
- self.mp.param["sr"],
- )
- else:
- path = os.path.join(
- vocal_root, "vocal_{}_{}.wav".format(name, self.data["agg"])
- )
- sf.write(
- path,
- (np.array(wav_vocals) * 32768).astype("int16"),
- self.mp.param["sr"],
- )
- if os.path.exists(path):
- os.system(
- "ffmpeg -i %s -vn %s -q:a 2 -y"
- % (path, path[:-4] + ".%s" % format)
- )
-
-
-class _audio_pre_new:
- def __init__(self, agg, model_path, device, is_half):
- self.model_path = model_path
- self.device = device
- self.data = {
- # Processing Options
- "postprocess": False,
- "tta": False,
- # Constants
- "window_size": 512,
- "agg": agg,
- "high_end_process": "mirroring",
- }
- mp = ModelParameters("lib/uvr5_pack/lib_v5/modelparams/4band_v3.json")
- nout = 64 if "DeReverb" in model_path else 48
- model = CascadedNet(mp.param["bins"] * 2, nout)
- cpk = torch.load(model_path, map_location="cpu")
- model.load_state_dict(cpk)
- model.eval()
- if is_half:
- model = model.half().to(device)
- else:
- model = model.to(device)
-
- self.mp = mp
- self.model = model
-
- def _path_audio_(
- self, music_file, vocal_root=None, ins_root=None, format="flac"
- ): # 3个VR模型vocal和ins是反的
- if ins_root is None and vocal_root is None:
- return "No save root."
- name = os.path.basename(music_file)
- if ins_root is not None:
- os.makedirs(ins_root, exist_ok=True)
- if vocal_root is not None:
- os.makedirs(vocal_root, exist_ok=True)
- X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {}
- bands_n = len(self.mp.param["band"])
- # print(bands_n)
- for d in range(bands_n, 0, -1):
- bp = self.mp.param["band"][d]
- if d == bands_n: # high-end band
- (
- X_wave[d],
- _,
- ) = librosa.core.load(
- music_file,
- bp["sr"],
- False,
- dtype=np.float32,
- res_type=bp["res_type"],
- )
- if X_wave[d].ndim == 1:
- X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]])
- else: # lower bands
- X_wave[d] = librosa.core.resample(
- X_wave[d + 1],
- self.mp.param["band"][d + 1]["sr"],
- bp["sr"],
- res_type=bp["res_type"],
- )
- # Stft of wave source
- X_spec_s[d] = spec_utils.wave_to_spectrogram_mt(
- X_wave[d],
- bp["hl"],
- bp["n_fft"],
- self.mp.param["mid_side"],
- self.mp.param["mid_side_b2"],
- self.mp.param["reverse"],
- )
- # pdb.set_trace()
- if d == bands_n and self.data["high_end_process"] != "none":
- input_high_end_h = (bp["n_fft"] // 2 - bp["crop_stop"]) + (
- self.mp.param["pre_filter_stop"] - self.mp.param["pre_filter_start"]
- )
- input_high_end = X_spec_s[d][
- :, bp["n_fft"] // 2 - input_high_end_h : bp["n_fft"] // 2, :
- ]
-
- X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp)
- aggresive_set = float(self.data["agg"] / 100)
- aggressiveness = {
- "value": aggresive_set,
- "split_bin": self.mp.param["band"][1]["crop_stop"],
- }
- with torch.no_grad():
- pred, X_mag, X_phase = inference(
- X_spec_m, self.device, self.model, aggressiveness, self.data
- )
- # Postprocess
- if self.data["postprocess"]:
- pred_inv = np.clip(X_mag - pred, 0, np.inf)
- pred = spec_utils.mask_silence(pred, pred_inv)
- y_spec_m = pred * X_phase
- v_spec_m = X_spec_m - y_spec_m
-
- if ins_root is not None:
- if self.data["high_end_process"].startswith("mirroring"):
- input_high_end_ = spec_utils.mirroring(
- self.data["high_end_process"], y_spec_m, input_high_end, self.mp
- )
- wav_instrument = spec_utils.cmb_spectrogram_to_wave(
- y_spec_m, self.mp, input_high_end_h, input_high_end_
- )
- else:
- wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp)
- print("%s instruments done" % name)
- if format in ["wav", "flac"]:
- sf.write(
- os.path.join(
- ins_root,
- "instrument_{}_{}.{}".format(name, self.data["agg"], format),
- ),
- (np.array(wav_instrument) * 32768).astype("int16"),
- self.mp.param["sr"],
- ) #
- else:
- path = os.path.join(
- ins_root, "instrument_{}_{}.wav".format(name, self.data["agg"])
- )
- sf.write(
- path,
- (np.array(wav_instrument) * 32768).astype("int16"),
- self.mp.param["sr"],
- )
- if os.path.exists(path):
- os.system(
- "ffmpeg -i %s -vn %s -q:a 2 -y"
- % (path, path[:-4] + ".%s" % format)
- )
- if vocal_root is not None:
- if self.data["high_end_process"].startswith("mirroring"):
- input_high_end_ = spec_utils.mirroring(
- self.data["high_end_process"], v_spec_m, input_high_end, self.mp
- )
- wav_vocals = spec_utils.cmb_spectrogram_to_wave(
- v_spec_m, self.mp, input_high_end_h, input_high_end_
- )
- else:
- wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp)
- print("%s vocals done" % name)
- if format in ["wav", "flac"]:
- sf.write(
- os.path.join(
- vocal_root,
- "vocal_{}_{}.{}".format(name, self.data["agg"], format),
- ),
- (np.array(wav_vocals) * 32768).astype("int16"),
- self.mp.param["sr"],
- )
- else:
- path = os.path.join(
- vocal_root, "vocal_{}_{}.wav".format(name, self.data["agg"])
- )
- sf.write(
- path,
- (np.array(wav_vocals) * 32768).astype("int16"),
- self.mp.param["sr"],
- )
- if os.path.exists(path):
- os.system(
- "ffmpeg -i %s -vn %s -q:a 2 -y"
- % (path, path[:-4] + ".%s" % format)
- )
-
-
-if __name__ == "__main__":
- device = "cuda"
- is_half = True
- # model_path = "uvr5_weights/2_HP-UVR.pth"
- # model_path = "uvr5_weights/VR-DeEchoDeReverb.pth"
- # model_path = "uvr5_weights/VR-DeEchoNormal.pth"
- model_path = "uvr5_weights/DeEchoNormal.pth"
- # pre_fun = _audio_pre_(model_path=model_path, device=device, is_half=True,agg=10)
- pre_fun = _audio_pre_new(model_path=model_path, device=device, is_half=True, agg=10)
- audio_path = "雪雪伴奏对消HP5.wav"
- save_path = "opt"
- pre_fun._path_audio_(audio_path, save_path, save_path)
diff --git a/spaces/RVVY/test01/Dockerfile b/spaces/RVVY/test01/Dockerfile
deleted file mode 100644
index eef259fa372a804549fb0af0913718a13344da34..0000000000000000000000000000000000000000
--- a/spaces/RVVY/test01/Dockerfile
+++ /dev/null
@@ -1,11 +0,0 @@
-FROM node:18-bullseye-slim
-RUN apt-get update && \
- apt-get install -y git
-RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app
-WORKDIR /app
-RUN npm install
-COPY Dockerfile greeting.md* .env* ./
-RUN npm run build
-EXPOSE 7860
-ENV NODE_ENV=production
-CMD [ "npm", "start" ]
diff --git a/spaces/RamAnanth1/T2I-Adapter/gradio_sketch.py b/spaces/RamAnanth1/T2I-Adapter/gradio_sketch.py
deleted file mode 100644
index 64cf265c680259c4cf16496e84bdd3c20f085f2a..0000000000000000000000000000000000000000
--- a/spaces/RamAnanth1/T2I-Adapter/gradio_sketch.py
+++ /dev/null
@@ -1,28 +0,0 @@
-
-import gradio as gr
-
-def create_demo(process):
- block = gr.Blocks().queue()
- with block:
- with gr.Row():
- with gr.Column():
- input_img = gr.Image(source='upload', type="numpy")
- prompt = gr.Textbox(label="Prompt")
- neg_prompt = gr.Textbox(label="Negative Prompt",
- value='ugly, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, extra limbs, disfigured, deformed, body out of frame, bad anatomy, watermark, signature, cut off, low contrast, underexposed, overexposed, bad art, beginner, amateur, distorted face')
- with gr.Row():
- type_in = gr.inputs.Radio(['Sketch', 'Image'], type="value", default='Image', label='Input Types\n (You can input an image or a sketch)')
- color_back = gr.inputs.Radio(['White', 'Black'], type="value", default='Black', label='Color of the sketch background\n (Only work for sketch input)')
- run_button = gr.Button(label="Run")
- with gr.Accordion("Advanced options", open=False):
- con_strength = gr.Slider(label="Controling Strength (The guidance strength of the sketch to the result)", minimum=0, maximum=1, value=0.4, step=0.1)
- scale = gr.Slider(label="Guidance Scale (Classifier free guidance)", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
- fix_sample = gr.inputs.Radio(['True', 'False'], type="value", default='False', label='Fix Sampling\n (Fix the random seed)')
- base_model = gr.inputs.Radio(['sd-v1-4.ckpt', 'anything-v4.0-pruned.ckpt'], type="value", default='sd-v1-4.ckpt', label='The base model you want to use')
- with gr.Column():
- result = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto')
- ips = [input_img, type_in, color_back, prompt, neg_prompt, fix_sample, scale, con_strength, base_model]
- run_button.click(fn=process, inputs=ips, outputs=[result])
-
- return block
-
\ No newline at end of file
diff --git a/spaces/Realcat/image-matching-webui/third_party/DeDoDe/DeDoDe/matchers/__init__.py b/spaces/Realcat/image-matching-webui/third_party/DeDoDe/DeDoDe/matchers/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/Ricecake123/RVC-demo/lib/uvr5_pack/lib_v5/nets_123821KB.py b/spaces/Ricecake123/RVC-demo/lib/uvr5_pack/lib_v5/nets_123821KB.py
deleted file mode 100644
index becbfae85683a13bbb19d3ea6c840da24e61e01e..0000000000000000000000000000000000000000
--- a/spaces/Ricecake123/RVC-demo/lib/uvr5_pack/lib_v5/nets_123821KB.py
+++ /dev/null
@@ -1,122 +0,0 @@
-import torch
-from torch import nn
-import torch.nn.functional as F
-
-from . import layers_123821KB as layers
-
-
-class BaseASPPNet(nn.Module):
- def __init__(self, nin, ch, dilations=(4, 8, 16)):
- super(BaseASPPNet, self).__init__()
- self.enc1 = layers.Encoder(nin, ch, 3, 2, 1)
- self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1)
- self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1)
- self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1)
-
- self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations)
-
- self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1)
- self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1)
- self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1)
- self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1)
-
- def __call__(self, x):
- h, e1 = self.enc1(x)
- h, e2 = self.enc2(h)
- h, e3 = self.enc3(h)
- h, e4 = self.enc4(h)
-
- h = self.aspp(h)
-
- h = self.dec4(h, e4)
- h = self.dec3(h, e3)
- h = self.dec2(h, e2)
- h = self.dec1(h, e1)
-
- return h
-
-
-class CascadedASPPNet(nn.Module):
- def __init__(self, n_fft):
- super(CascadedASPPNet, self).__init__()
- self.stg1_low_band_net = BaseASPPNet(2, 32)
- self.stg1_high_band_net = BaseASPPNet(2, 32)
-
- self.stg2_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0)
- self.stg2_full_band_net = BaseASPPNet(16, 32)
-
- self.stg3_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0)
- self.stg3_full_band_net = BaseASPPNet(32, 64)
-
- self.out = nn.Conv2d(64, 2, 1, bias=False)
- self.aux1_out = nn.Conv2d(32, 2, 1, bias=False)
- self.aux2_out = nn.Conv2d(32, 2, 1, bias=False)
-
- self.max_bin = n_fft // 2
- self.output_bin = n_fft // 2 + 1
-
- self.offset = 128
-
- def forward(self, x, aggressiveness=None):
- mix = x.detach()
- x = x.clone()
-
- x = x[:, :, : self.max_bin]
-
- bandw = x.size()[2] // 2
- aux1 = torch.cat(
- [
- self.stg1_low_band_net(x[:, :, :bandw]),
- self.stg1_high_band_net(x[:, :, bandw:]),
- ],
- dim=2,
- )
-
- h = torch.cat([x, aux1], dim=1)
- aux2 = self.stg2_full_band_net(self.stg2_bridge(h))
-
- h = torch.cat([x, aux1, aux2], dim=1)
- h = self.stg3_full_band_net(self.stg3_bridge(h))
-
- mask = torch.sigmoid(self.out(h))
- mask = F.pad(
- input=mask,
- pad=(0, 0, 0, self.output_bin - mask.size()[2]),
- mode="replicate",
- )
-
- if self.training:
- aux1 = torch.sigmoid(self.aux1_out(aux1))
- aux1 = F.pad(
- input=aux1,
- pad=(0, 0, 0, self.output_bin - aux1.size()[2]),
- mode="replicate",
- )
- aux2 = torch.sigmoid(self.aux2_out(aux2))
- aux2 = F.pad(
- input=aux2,
- pad=(0, 0, 0, self.output_bin - aux2.size()[2]),
- mode="replicate",
- )
- return mask * mix, aux1 * mix, aux2 * mix
- else:
- if aggressiveness:
- mask[:, :, : aggressiveness["split_bin"]] = torch.pow(
- mask[:, :, : aggressiveness["split_bin"]],
- 1 + aggressiveness["value"] / 3,
- )
- mask[:, :, aggressiveness["split_bin"] :] = torch.pow(
- mask[:, :, aggressiveness["split_bin"] :],
- 1 + aggressiveness["value"],
- )
-
- return mask * mix
-
- def predict(self, x_mag, aggressiveness=None):
- h = self.forward(x_mag, aggressiveness)
-
- if self.offset > 0:
- h = h[:, :, :, self.offset : -self.offset]
- assert h.size()[3] > 0
-
- return h
diff --git a/spaces/Rishabh055/Movie_recommendation_System/app.py b/spaces/Rishabh055/Movie_recommendation_System/app.py
deleted file mode 100644
index 81d41c8f9e3200e34e7d179da1a185517488371e..0000000000000000000000000000000000000000
--- a/spaces/Rishabh055/Movie_recommendation_System/app.py
+++ /dev/null
@@ -1,96 +0,0 @@
-import streamlit as st
-import pandas as pd
-import pickle
-import requests
-
-def fetch_poster(movie_id):
- url = "https://api.themoviedb.org/3/movie/{}?api_key=8265bd1679663a7ea12ac168da84d2e8&language=en-US".format(movie_id)
- data = requests.get(url)
- data = data.json()
- poster_path = data['poster_path']
- full_path = "https://image.tmdb.org/t/p/w500/" + poster_path
- return full_path
-
-def recommend(movie):
- movie_index = movies[movies['title'] == movie].index[0]
- distances = similarity[movie_index]
- movies_list = sorted(list(enumerate(distances)), reverse =True,key=lambda x: x[1])[1:6]
-
- recommended_movies = []
- recommended_movies_posters = []
- for i in movies_list:
- movie_id = movies.iloc[i[0]].movie_id
- # Help to fetch poster from api
- recommended_movies.append(movies.iloc[i[0]]['title'])
- recommended_movies_posters.append(fetch_poster(movie_id))
- return recommended_movies, recommended_movies_posters
-
-
-movies = pickle.load(open('movies.pkl','rb'))
-similarity = pickle.load(open('similarity.pkl', 'rb'))
-st.title('Movie Recommendation App')
-
-selected_movie = st.selectbox(
-'Select a movie to get recommendations',
-movies['title'].values)
-
-if st.button('Get Recommendations'):
- names,posters = recommend(selected_movie)
-
- col1, col2, col3,col4,col5 = st.columns(5)
- with col1:
- st.text(names[0])
- st.image(posters[0])
-
- with col2:
- st.text(names[1])
- st.image(posters[1])
-
- with col3:
- st.text(names[2])
- st.image(posters[2])
-
- with col4:
- st.text(names[3])
- st.image(posters[3])
-
- with col5:
- st.text(names[4])
- st.image(posters[4])
-
-
-hide_streamlit_style = """
-
- """
-st.markdown(hide_streamlit_style, unsafe_allow_html=True)
-footer="""
-
-"""
-st.markdown(footer,unsafe_allow_html=True)
\ No newline at end of file
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/roi_heads/scnet_roi_head.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/roi_heads/scnet_roi_head.py
deleted file mode 100644
index 85aaa2f0600afbdfc8b0917cb5f341740776a603..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/roi_heads/scnet_roi_head.py
+++ /dev/null
@@ -1,582 +0,0 @@
-import torch
-import torch.nn.functional as F
-
-from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, merge_aug_bboxes,
- merge_aug_masks, multiclass_nms)
-from ..builder import HEADS, build_head, build_roi_extractor
-from .cascade_roi_head import CascadeRoIHead
-
-
-@HEADS.register_module()
-class SCNetRoIHead(CascadeRoIHead):
- """RoIHead for `SCNet `_.
-
- Args:
- num_stages (int): number of cascade stages.
- stage_loss_weights (list): loss weight of cascade stages.
- semantic_roi_extractor (dict): config to init semantic roi extractor.
- semantic_head (dict): config to init semantic head.
- feat_relay_head (dict): config to init feature_relay_head.
- glbctx_head (dict): config to init global context head.
- """
-
- def __init__(self,
- num_stages,
- stage_loss_weights,
- semantic_roi_extractor=None,
- semantic_head=None,
- feat_relay_head=None,
- glbctx_head=None,
- **kwargs):
- super(SCNetRoIHead, self).__init__(num_stages, stage_loss_weights,
- **kwargs)
- assert self.with_bbox and self.with_mask
- assert not self.with_shared_head # shared head is not supported
-
- if semantic_head is not None:
- self.semantic_roi_extractor = build_roi_extractor(
- semantic_roi_extractor)
- self.semantic_head = build_head(semantic_head)
-
- if feat_relay_head is not None:
- self.feat_relay_head = build_head(feat_relay_head)
-
- if glbctx_head is not None:
- self.glbctx_head = build_head(glbctx_head)
-
- def init_mask_head(self, mask_roi_extractor, mask_head):
- """Initialize ``mask_head``"""
- if mask_roi_extractor is not None:
- self.mask_roi_extractor = build_roi_extractor(mask_roi_extractor)
- self.mask_head = build_head(mask_head)
-
- def init_weights(self, pretrained):
- """Initialize the weights in head.
-
- Args:
- pretrained (str, optional): Path to pre-trained weights.
- Defaults to None.
- """
- for i in range(self.num_stages):
- if self.with_bbox:
- self.bbox_roi_extractor[i].init_weights()
- self.bbox_head[i].init_weights()
- if self.with_mask:
- self.mask_roi_extractor.init_weights()
- self.mask_head.init_weights()
- if self.with_semantic:
- self.semantic_head.init_weights()
- if self.with_glbctx:
- self.glbctx_head.init_weights()
- if self.with_feat_relay:
- self.feat_relay_head.init_weights()
-
- @property
- def with_semantic(self):
- """bool: whether the head has semantic head"""
- return hasattr(self,
- 'semantic_head') and self.semantic_head is not None
-
- @property
- def with_feat_relay(self):
- """bool: whether the head has feature relay head"""
- return (hasattr(self, 'feat_relay_head')
- and self.feat_relay_head is not None)
-
- @property
- def with_glbctx(self):
- """bool: whether the head has global context head"""
- return hasattr(self, 'glbctx_head') and self.glbctx_head is not None
-
- def _fuse_glbctx(self, roi_feats, glbctx_feat, rois):
- """Fuse global context feats with roi feats."""
- assert roi_feats.size(0) == rois.size(0)
- img_inds = torch.unique(rois[:, 0].cpu(), sorted=True).long()
- fused_feats = torch.zeros_like(roi_feats)
- for img_id in img_inds:
- inds = (rois[:, 0] == img_id.item())
- fused_feats[inds] = roi_feats[inds] + glbctx_feat[img_id]
- return fused_feats
-
- def _slice_pos_feats(self, feats, sampling_results):
- """Get features from pos rois."""
- num_rois = [res.bboxes.size(0) for res in sampling_results]
- num_pos_rois = [res.pos_bboxes.size(0) for res in sampling_results]
- inds = torch.zeros(sum(num_rois), dtype=torch.bool)
- start = 0
- for i in range(len(num_rois)):
- start = 0 if i == 0 else start + num_rois[i - 1]
- stop = start + num_pos_rois[i]
- inds[start:stop] = 1
- sliced_feats = feats[inds]
- return sliced_feats
-
- def _bbox_forward(self,
- stage,
- x,
- rois,
- semantic_feat=None,
- glbctx_feat=None):
- """Box head forward function used in both training and testing."""
- bbox_roi_extractor = self.bbox_roi_extractor[stage]
- bbox_head = self.bbox_head[stage]
- bbox_feats = bbox_roi_extractor(
- x[:len(bbox_roi_extractor.featmap_strides)], rois)
- if self.with_semantic and semantic_feat is not None:
- bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat],
- rois)
- if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]:
- bbox_semantic_feat = F.adaptive_avg_pool2d(
- bbox_semantic_feat, bbox_feats.shape[-2:])
- bbox_feats += bbox_semantic_feat
- if self.with_glbctx and glbctx_feat is not None:
- bbox_feats = self._fuse_glbctx(bbox_feats, glbctx_feat, rois)
- cls_score, bbox_pred, relayed_feat = bbox_head(
- bbox_feats, return_shared_feat=True)
-
- bbox_results = dict(
- cls_score=cls_score,
- bbox_pred=bbox_pred,
- relayed_feat=relayed_feat)
- return bbox_results
-
- def _mask_forward(self,
- x,
- rois,
- semantic_feat=None,
- glbctx_feat=None,
- relayed_feat=None):
- """Mask head forward function used in both training and testing."""
- mask_feats = self.mask_roi_extractor(
- x[:self.mask_roi_extractor.num_inputs], rois)
- if self.with_semantic and semantic_feat is not None:
- mask_semantic_feat = self.semantic_roi_extractor([semantic_feat],
- rois)
- if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]:
- mask_semantic_feat = F.adaptive_avg_pool2d(
- mask_semantic_feat, mask_feats.shape[-2:])
- mask_feats += mask_semantic_feat
- if self.with_glbctx and glbctx_feat is not None:
- mask_feats = self._fuse_glbctx(mask_feats, glbctx_feat, rois)
- if self.with_feat_relay and relayed_feat is not None:
- mask_feats = mask_feats + relayed_feat
- mask_pred = self.mask_head(mask_feats)
- mask_results = dict(mask_pred=mask_pred)
-
- return mask_results
-
- def _bbox_forward_train(self,
- stage,
- x,
- sampling_results,
- gt_bboxes,
- gt_labels,
- rcnn_train_cfg,
- semantic_feat=None,
- glbctx_feat=None):
- """Run forward function and calculate loss for box head in training."""
- bbox_head = self.bbox_head[stage]
- rois = bbox2roi([res.bboxes for res in sampling_results])
- bbox_results = self._bbox_forward(
- stage,
- x,
- rois,
- semantic_feat=semantic_feat,
- glbctx_feat=glbctx_feat)
-
- bbox_targets = bbox_head.get_targets(sampling_results, gt_bboxes,
- gt_labels, rcnn_train_cfg)
- loss_bbox = bbox_head.loss(bbox_results['cls_score'],
- bbox_results['bbox_pred'], rois,
- *bbox_targets)
-
- bbox_results.update(
- loss_bbox=loss_bbox, rois=rois, bbox_targets=bbox_targets)
- return bbox_results
-
- def _mask_forward_train(self,
- x,
- sampling_results,
- gt_masks,
- rcnn_train_cfg,
- semantic_feat=None,
- glbctx_feat=None,
- relayed_feat=None):
- """Run forward function and calculate loss for mask head in
- training."""
- pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
- mask_results = self._mask_forward(
- x,
- pos_rois,
- semantic_feat=semantic_feat,
- glbctx_feat=glbctx_feat,
- relayed_feat=relayed_feat)
-
- mask_targets = self.mask_head.get_targets(sampling_results, gt_masks,
- rcnn_train_cfg)
- pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
- loss_mask = self.mask_head.loss(mask_results['mask_pred'],
- mask_targets, pos_labels)
-
- mask_results = loss_mask
- return mask_results
-
- def forward_train(self,
- x,
- img_metas,
- proposal_list,
- gt_bboxes,
- gt_labels,
- gt_bboxes_ignore=None,
- gt_masks=None,
- gt_semantic_seg=None):
- """
- Args:
- x (list[Tensor]): list of multi-level img features.
-
- img_metas (list[dict]): list of image info dict where each dict
- has: 'img_shape', 'scale_factor', 'flip', and may also contain
- 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
- For details on the values of these keys see
- `mmdet/datasets/pipelines/formatting.py:Collect`.
-
- proposal_list (list[Tensors]): list of region proposals.
-
- gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
- shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
-
- gt_labels (list[Tensor]): class indices corresponding to each box
-
- gt_bboxes_ignore (None, list[Tensor]): specify which bounding
- boxes can be ignored when computing the loss.
-
- gt_masks (None, Tensor) : true segmentation masks for each box
- used if the architecture supports a segmentation task.
-
- gt_semantic_seg (None, list[Tensor]): semantic segmentation masks
- used if the architecture supports semantic segmentation task.
-
- Returns:
- dict[str, Tensor]: a dictionary of loss components
- """
- losses = dict()
-
- # semantic segmentation branch
- if self.with_semantic:
- semantic_pred, semantic_feat = self.semantic_head(x)
- loss_seg = self.semantic_head.loss(semantic_pred, gt_semantic_seg)
- losses['loss_semantic_seg'] = loss_seg
- else:
- semantic_feat = None
-
- # global context branch
- if self.with_glbctx:
- mc_pred, glbctx_feat = self.glbctx_head(x)
- loss_glbctx = self.glbctx_head.loss(mc_pred, gt_labels)
- losses['loss_glbctx'] = loss_glbctx
- else:
- glbctx_feat = None
-
- for i in range(self.num_stages):
- self.current_stage = i
- rcnn_train_cfg = self.train_cfg[i]
- lw = self.stage_loss_weights[i]
-
- # assign gts and sample proposals
- sampling_results = []
- bbox_assigner = self.bbox_assigner[i]
- bbox_sampler = self.bbox_sampler[i]
- num_imgs = len(img_metas)
- if gt_bboxes_ignore is None:
- gt_bboxes_ignore = [None for _ in range(num_imgs)]
-
- for j in range(num_imgs):
- assign_result = bbox_assigner.assign(proposal_list[j],
- gt_bboxes[j],
- gt_bboxes_ignore[j],
- gt_labels[j])
- sampling_result = bbox_sampler.sample(
- assign_result,
- proposal_list[j],
- gt_bboxes[j],
- gt_labels[j],
- feats=[lvl_feat[j][None] for lvl_feat in x])
- sampling_results.append(sampling_result)
-
- bbox_results = \
- self._bbox_forward_train(
- i, x, sampling_results, gt_bboxes, gt_labels,
- rcnn_train_cfg, semantic_feat, glbctx_feat)
- roi_labels = bbox_results['bbox_targets'][0]
-
- for name, value in bbox_results['loss_bbox'].items():
- losses[f's{i}.{name}'] = (
- value * lw if 'loss' in name else value)
-
- # refine boxes
- if i < self.num_stages - 1:
- pos_is_gts = [res.pos_is_gt for res in sampling_results]
- with torch.no_grad():
- proposal_list = self.bbox_head[i].refine_bboxes(
- bbox_results['rois'], roi_labels,
- bbox_results['bbox_pred'], pos_is_gts, img_metas)
-
- if self.with_feat_relay:
- relayed_feat = self._slice_pos_feats(bbox_results['relayed_feat'],
- sampling_results)
- relayed_feat = self.feat_relay_head(relayed_feat)
- else:
- relayed_feat = None
-
- mask_results = self._mask_forward_train(x, sampling_results, gt_masks,
- rcnn_train_cfg, semantic_feat,
- glbctx_feat, relayed_feat)
- mask_lw = sum(self.stage_loss_weights)
- losses['loss_mask'] = mask_lw * mask_results['loss_mask']
-
- return losses
-
- def simple_test(self, x, proposal_list, img_metas, rescale=False):
- """Test without augmentation."""
- if self.with_semantic:
- _, semantic_feat = self.semantic_head(x)
- else:
- semantic_feat = None
-
- if self.with_glbctx:
- mc_pred, glbctx_feat = self.glbctx_head(x)
- else:
- glbctx_feat = None
-
- num_imgs = len(proposal_list)
- img_shapes = tuple(meta['img_shape'] for meta in img_metas)
- ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
- scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
-
- # "ms" in variable names means multi-stage
- ms_scores = []
- rcnn_test_cfg = self.test_cfg
-
- rois = bbox2roi(proposal_list)
- for i in range(self.num_stages):
- bbox_head = self.bbox_head[i]
- bbox_results = self._bbox_forward(
- i,
- x,
- rois,
- semantic_feat=semantic_feat,
- glbctx_feat=glbctx_feat)
- # split batch bbox prediction back to each image
- cls_score = bbox_results['cls_score']
- bbox_pred = bbox_results['bbox_pred']
- num_proposals_per_img = tuple(len(p) for p in proposal_list)
- rois = rois.split(num_proposals_per_img, 0)
- cls_score = cls_score.split(num_proposals_per_img, 0)
- bbox_pred = bbox_pred.split(num_proposals_per_img, 0)
- ms_scores.append(cls_score)
-
- if i < self.num_stages - 1:
- bbox_label = [s[:, :-1].argmax(dim=1) for s in cls_score]
- rois = torch.cat([
- bbox_head.regress_by_class(rois[i], bbox_label[i],
- bbox_pred[i], img_metas[i])
- for i in range(num_imgs)
- ])
-
- # average scores of each image by stages
- cls_score = [
- sum([score[i] for score in ms_scores]) / float(len(ms_scores))
- for i in range(num_imgs)
- ]
-
- # apply bbox post-processing to each image individually
- det_bboxes = []
- det_labels = []
- for i in range(num_imgs):
- det_bbox, det_label = self.bbox_head[-1].get_bboxes(
- rois[i],
- cls_score[i],
- bbox_pred[i],
- img_shapes[i],
- scale_factors[i],
- rescale=rescale,
- cfg=rcnn_test_cfg)
- det_bboxes.append(det_bbox)
- det_labels.append(det_label)
- det_bbox_results = [
- bbox2result(det_bboxes[i], det_labels[i],
- self.bbox_head[-1].num_classes)
- for i in range(num_imgs)
- ]
-
- if self.with_mask:
- if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
- mask_classes = self.mask_head.num_classes
- det_segm_results = [[[] for _ in range(mask_classes)]
- for _ in range(num_imgs)]
- else:
- if rescale and not isinstance(scale_factors[0], float):
- scale_factors = [
- torch.from_numpy(scale_factor).to(det_bboxes[0].device)
- for scale_factor in scale_factors
- ]
- _bboxes = [
- det_bboxes[i][:, :4] *
- scale_factors[i] if rescale else det_bboxes[i]
- for i in range(num_imgs)
- ]
- mask_rois = bbox2roi(_bboxes)
-
- # get relay feature on mask_rois
- bbox_results = self._bbox_forward(
- -1,
- x,
- mask_rois,
- semantic_feat=semantic_feat,
- glbctx_feat=glbctx_feat)
- relayed_feat = bbox_results['relayed_feat']
- relayed_feat = self.feat_relay_head(relayed_feat)
-
- mask_results = self._mask_forward(
- x,
- mask_rois,
- semantic_feat=semantic_feat,
- glbctx_feat=glbctx_feat,
- relayed_feat=relayed_feat)
- mask_pred = mask_results['mask_pred']
-
- # split batch mask prediction back to each image
- num_bbox_per_img = tuple(len(_bbox) for _bbox in _bboxes)
- mask_preds = mask_pred.split(num_bbox_per_img, 0)
-
- # apply mask post-processing to each image individually
- det_segm_results = []
- for i in range(num_imgs):
- if det_bboxes[i].shape[0] == 0:
- det_segm_results.append(
- [[] for _ in range(self.mask_head.num_classes)])
- else:
- segm_result = self.mask_head.get_seg_masks(
- mask_preds[i], _bboxes[i], det_labels[i],
- self.test_cfg, ori_shapes[i], scale_factors[i],
- rescale)
- det_segm_results.append(segm_result)
-
- # return results
- if self.with_mask:
- return list(zip(det_bbox_results, det_segm_results))
- else:
- return det_bbox_results
-
- def aug_test(self, img_feats, proposal_list, img_metas, rescale=False):
- if self.with_semantic:
- semantic_feats = [
- self.semantic_head(feat)[1] for feat in img_feats
- ]
- else:
- semantic_feats = [None] * len(img_metas)
-
- if self.with_glbctx:
- glbctx_feats = [self.glbctx_head(feat)[1] for feat in img_feats]
- else:
- glbctx_feats = [None] * len(img_metas)
-
- rcnn_test_cfg = self.test_cfg
- aug_bboxes = []
- aug_scores = []
- for x, img_meta, semantic_feat, glbctx_feat in zip(
- img_feats, img_metas, semantic_feats, glbctx_feats):
- # only one image in the batch
- img_shape = img_meta[0]['img_shape']
- scale_factor = img_meta[0]['scale_factor']
- flip = img_meta[0]['flip']
-
- proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
- scale_factor, flip)
- # "ms" in variable names means multi-stage
- ms_scores = []
-
- rois = bbox2roi([proposals])
- for i in range(self.num_stages):
- bbox_head = self.bbox_head[i]
- bbox_results = self._bbox_forward(
- i,
- x,
- rois,
- semantic_feat=semantic_feat,
- glbctx_feat=glbctx_feat)
- ms_scores.append(bbox_results['cls_score'])
- if i < self.num_stages - 1:
- bbox_label = bbox_results['cls_score'].argmax(dim=1)
- rois = bbox_head.regress_by_class(
- rois, bbox_label, bbox_results['bbox_pred'],
- img_meta[0])
-
- cls_score = sum(ms_scores) / float(len(ms_scores))
- bboxes, scores = self.bbox_head[-1].get_bboxes(
- rois,
- cls_score,
- bbox_results['bbox_pred'],
- img_shape,
- scale_factor,
- rescale=False,
- cfg=None)
- aug_bboxes.append(bboxes)
- aug_scores.append(scores)
-
- # after merging, bboxes will be rescaled to the original image size
- merged_bboxes, merged_scores = merge_aug_bboxes(
- aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)
- det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,
- rcnn_test_cfg.score_thr,
- rcnn_test_cfg.nms,
- rcnn_test_cfg.max_per_img)
-
- det_bbox_results = bbox2result(det_bboxes, det_labels,
- self.bbox_head[-1].num_classes)
-
- if self.with_mask:
- if det_bboxes.shape[0] == 0:
- det_segm_results = [[]
- for _ in range(self.mask_head.num_classes)]
- else:
- aug_masks = []
- for x, img_meta, semantic_feat, glbctx_feat in zip(
- img_feats, img_metas, semantic_feats, glbctx_feats):
- img_shape = img_meta[0]['img_shape']
- scale_factor = img_meta[0]['scale_factor']
- flip = img_meta[0]['flip']
- _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
- scale_factor, flip)
- mask_rois = bbox2roi([_bboxes])
- # get relay feature on mask_rois
- bbox_results = self._bbox_forward(
- -1,
- x,
- mask_rois,
- semantic_feat=semantic_feat,
- glbctx_feat=glbctx_feat)
- relayed_feat = bbox_results['relayed_feat']
- relayed_feat = self.feat_relay_head(relayed_feat)
- mask_results = self._mask_forward(
- x,
- mask_rois,
- semantic_feat=semantic_feat,
- glbctx_feat=glbctx_feat,
- relayed_feat=relayed_feat)
- mask_pred = mask_results['mask_pred']
- aug_masks.append(mask_pred.sigmoid().cpu().numpy())
- merged_masks = merge_aug_masks(aug_masks, img_metas,
- self.test_cfg)
- ori_shape = img_metas[0][0]['ori_shape']
- det_segm_results = self.mask_head.get_seg_masks(
- merged_masks,
- det_bboxes,
- det_labels,
- rcnn_test_cfg,
- ori_shape,
- scale_factor=1.0,
- rescale=False)
- return [(det_bbox_results, det_segm_results)]
- else:
- return [det_bbox_results]
diff --git a/spaces/Rongjiehuang/ProDiff/data_gen/tts/bin/binarize.py b/spaces/Rongjiehuang/ProDiff/data_gen/tts/bin/binarize.py
deleted file mode 100644
index 4bd3c1f69fa59ed52fdd32eb80e746dedbae7535..0000000000000000000000000000000000000000
--- a/spaces/Rongjiehuang/ProDiff/data_gen/tts/bin/binarize.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import os
-
-os.environ["OMP_NUM_THREADS"] = "1"
-
-import importlib
-from utils.hparams import set_hparams, hparams
-
-
-def binarize():
- binarizer_cls = hparams.get("binarizer_cls", 'data_gen.tts.base_binarizer.BaseBinarizer')
- pkg = ".".join(binarizer_cls.split(".")[:-1])
- cls_name = binarizer_cls.split(".")[-1]
- binarizer_cls = getattr(importlib.import_module(pkg), cls_name)
- print("| Binarizer: ", binarizer_cls)
- binarizer_cls().process()
-
-
-if __name__ == '__main__':
- set_hparams()
- binarize()
diff --git a/spaces/Rothfeld/stable-diffusion-mat-outpainting-primer/networks/mat.py b/spaces/Rothfeld/stable-diffusion-mat-outpainting-primer/networks/mat.py
deleted file mode 100644
index c640dc45ed5df64ae0eaa5d1f277618ff3791d6b..0000000000000000000000000000000000000000
--- a/spaces/Rothfeld/stable-diffusion-mat-outpainting-primer/networks/mat.py
+++ /dev/null
@@ -1,996 +0,0 @@
-import numpy as np
-import math
-import sys
-sys.path.insert(0, '../')
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-import torch.utils.checkpoint as checkpoint
-from timm.models.layers import DropPath, to_2tuple, trunc_normal_
-
-from torch_utils import misc
-from torch_utils import persistence
-from networks.basic_module import FullyConnectedLayer, Conv2dLayer, MappingNet, MinibatchStdLayer, DisFromRGB, DisBlock, StyleConv, ToRGB, get_style_code
-
-
-@misc.profiled_function
-def nf(stage, channel_base=32768, channel_decay=1.0, channel_max=512):
- NF = {512: 64, 256: 128, 128: 256, 64: 512, 32: 512, 16: 512, 8: 512, 4: 512}
- return NF[2 ** stage]
-
-
-@persistence.persistent_class
-class Mlp(nn.Module):
- def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
- super().__init__()
- out_features = out_features or in_features
- hidden_features = hidden_features or in_features
- self.fc1 = FullyConnectedLayer(in_features=in_features, out_features=hidden_features, activation='lrelu')
- self.fc2 = FullyConnectedLayer(in_features=hidden_features, out_features=out_features)
-
- def forward(self, x):
- x = self.fc1(x)
- x = self.fc2(x)
- return x
-
-
-@misc.profiled_function
-def window_partition(x, window_size):
- """
- Args:
- x: (B, H, W, C)
- window_size (int): window size
- Returns:
- windows: (num_windows*B, window_size, window_size, C)
- """
- B, H, W, C = x.shape
- x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
- windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
- return windows
-
-
-@misc.profiled_function
-def window_reverse(windows, window_size, H, W):
- """
- Args:
- windows: (num_windows*B, window_size, window_size, C)
- window_size (int): Window size
- H (int): Height of image
- W (int): Width of image
- Returns:
- x: (B, H, W, C)
- """
- B = int(windows.shape[0] / (H * W / window_size / window_size))
- x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
- x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
- return x
-
-
-@persistence.persistent_class
-class Conv2dLayerPartial(nn.Module):
- def __init__(self,
- in_channels, # Number of input channels.
- out_channels, # Number of output channels.
- kernel_size, # Width and height of the convolution kernel.
- bias = True, # Apply additive bias before the activation function?
- activation = 'linear', # Activation function: 'relu', 'lrelu', etc.
- up = 1, # Integer upsampling factor.
- down = 1, # Integer downsampling factor.
- resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
- conv_clamp = None, # Clamp the output to +-X, None = disable clamping.
- trainable = True, # Update the weights of this layer during training?
- ):
- super().__init__()
- self.conv = Conv2dLayer(in_channels, out_channels, kernel_size, bias, activation, up, down, resample_filter,
- conv_clamp, trainable)
-
- self.weight_maskUpdater = torch.ones(1, 1, kernel_size, kernel_size)
- self.slide_winsize = kernel_size ** 2
- self.stride = down
- self.padding = kernel_size // 2 if kernel_size % 2 == 1 else 0
-
- def forward(self, x, mask=None):
- if mask is not None:
- with torch.no_grad():
- if self.weight_maskUpdater.type() != x.type():
- self.weight_maskUpdater = self.weight_maskUpdater.to(x)
- update_mask = F.conv2d(mask, self.weight_maskUpdater, bias=None, stride=self.stride, padding=self.padding)
- mask_ratio = self.slide_winsize / (update_mask + 1e-8)
- update_mask = torch.clamp(update_mask, 0, 1) # 0 or 1
- mask_ratio = torch.mul(mask_ratio, update_mask)
- x = self.conv(x)
- x = torch.mul(x, mask_ratio)
- return x, update_mask
- else:
- x = self.conv(x)
- return x, None
-
-
-@persistence.persistent_class
-class WindowAttention(nn.Module):
- r""" Window based multi-head self attention (W-MSA) module with relative position bias.
- It supports both of shifted and non-shifted window.
- Args:
- dim (int): Number of input channels.
- window_size (tuple[int]): The height and width of the window.
- num_heads (int): Number of attention heads.
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
- qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
- attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
- proj_drop (float, optional): Dropout ratio of output. Default: 0.0
- """
-
- def __init__(self, dim, window_size, num_heads, down_ratio=1, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
-
- super().__init__()
- self.dim = dim
- self.window_size = window_size # Wh, Ww
- self.num_heads = num_heads
- head_dim = dim // num_heads
- self.scale = qk_scale or head_dim ** -0.5
-
- self.q = FullyConnectedLayer(in_features=dim, out_features=dim)
- self.k = FullyConnectedLayer(in_features=dim, out_features=dim)
- self.v = FullyConnectedLayer(in_features=dim, out_features=dim)
- self.proj = FullyConnectedLayer(in_features=dim, out_features=dim)
-
- self.softmax = nn.Softmax(dim=-1)
-
- def forward(self, x, mask_windows=None, mask=None):
- """
- Args:
- x: input features with shape of (num_windows*B, N, C)
- mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
- """
- B_, N, C = x.shape
- norm_x = F.normalize(x, p=2.0, dim=-1)
- q = self.q(norm_x).reshape(B_, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
- k = self.k(norm_x).view(B_, -1, self.num_heads, C // self.num_heads).permute(0, 2, 3, 1)
- v = self.v(x).view(B_, -1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
-
- attn = (q @ k) * self.scale
-
- if mask is not None:
- nW = mask.shape[0]
- attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
- attn = attn.view(-1, self.num_heads, N, N)
-
- if mask_windows is not None:
- attn_mask_windows = mask_windows.squeeze(-1).unsqueeze(1).unsqueeze(1)
- attn = attn + attn_mask_windows.masked_fill(attn_mask_windows == 0, float(-100.0)).masked_fill(
- attn_mask_windows == 1, float(0.0))
- with torch.no_grad():
- mask_windows = torch.clamp(torch.sum(mask_windows, dim=1, keepdim=True), 0, 1).repeat(1, N, 1)
-
- attn = self.softmax(attn)
-
- x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
- x = self.proj(x)
- return x, mask_windows
-
-
-@persistence.persistent_class
-class SwinTransformerBlock(nn.Module):
- r""" Swin Transformer Block.
- Args:
- dim (int): Number of input channels.
- input_resolution (tuple[int]): Input resulotion.
- num_heads (int): Number of attention heads.
- window_size (int): Window size.
- shift_size (int): Shift size for SW-MSA.
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
- qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
- drop (float, optional): Dropout rate. Default: 0.0
- attn_drop (float, optional): Attention dropout rate. Default: 0.0
- drop_path (float, optional): Stochastic depth rate. Default: 0.0
- act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
- """
-
- def __init__(self, dim, input_resolution, num_heads, down_ratio=1, window_size=7, shift_size=0,
- mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
- act_layer=nn.GELU, norm_layer=nn.LayerNorm):
- super().__init__()
- self.dim = dim
- self.input_resolution = input_resolution
- self.num_heads = num_heads
- self.window_size = window_size
- self.shift_size = shift_size
- self.mlp_ratio = mlp_ratio
- if min(self.input_resolution) <= self.window_size:
- # if window size is larger than input resolution, we don't partition windows
- self.shift_size = 0
- self.window_size = min(self.input_resolution)
- assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
-
- if self.shift_size > 0:
- down_ratio = 1
- self.attn = WindowAttention(dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
- down_ratio=down_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop,
- proj_drop=drop)
-
- self.fuse = FullyConnectedLayer(in_features=dim * 2, out_features=dim, activation='lrelu')
-
- mlp_hidden_dim = int(dim * mlp_ratio)
- self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
-
- if self.shift_size > 0:
- attn_mask = self.calculate_mask(self.input_resolution)
- else:
- attn_mask = None
-
- self.register_buffer("attn_mask", attn_mask)
-
- def calculate_mask(self, x_size):
- # calculate attention mask for SW-MSA
- H, W = x_size
- img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
- h_slices = (slice(0, -self.window_size),
- slice(-self.window_size, -self.shift_size),
- slice(-self.shift_size, None))
- w_slices = (slice(0, -self.window_size),
- slice(-self.window_size, -self.shift_size),
- slice(-self.shift_size, None))
- cnt = 0
- for h in h_slices:
- for w in w_slices:
- img_mask[:, h, w, :] = cnt
- cnt += 1
-
- mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
- mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
- attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
- attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
-
- return attn_mask
-
- def forward(self, x, x_size, mask=None):
- # H, W = self.input_resolution
- H, W = x_size
- B, L, C = x.shape
- assert L == H * W, "input feature has wrong size"
-
- shortcut = x
- x = x.view(B, H, W, C)
- if mask is not None:
- mask = mask.view(B, H, W, 1)
-
- # cyclic shift
- if self.shift_size > 0:
- shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
- if mask is not None:
- shifted_mask = torch.roll(mask, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
- else:
- shifted_x = x
- if mask is not None:
- shifted_mask = mask
-
- # partition windows
- x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
- x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
- if mask is not None:
- mask_windows = window_partition(shifted_mask, self.window_size)
- mask_windows = mask_windows.view(-1, self.window_size * self.window_size, 1)
- else:
- mask_windows = None
-
- # W-MSA/SW-MSA (to be compatible for testing on images whose shapes are the multiple of window size
- if self.input_resolution == x_size:
- attn_windows, mask_windows = self.attn(x_windows, mask_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
- else:
- attn_windows, mask_windows = self.attn(x_windows, mask_windows, mask=self.calculate_mask(x_size).to(x.device)) # nW*B, window_size*window_size, C
-
- # merge windows
- attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
- shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
- if mask is not None:
- mask_windows = mask_windows.view(-1, self.window_size, self.window_size, 1)
- shifted_mask = window_reverse(mask_windows, self.window_size, H, W)
-
- # reverse cyclic shift
- if self.shift_size > 0:
- x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
- if mask is not None:
- mask = torch.roll(shifted_mask, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
- else:
- x = shifted_x
- if mask is not None:
- mask = shifted_mask
- x = x.view(B, H * W, C)
- if mask is not None:
- mask = mask.view(B, H * W, 1)
-
- # FFN
- x = self.fuse(torch.cat([shortcut, x], dim=-1))
- x = self.mlp(x)
-
- return x, mask
-
-
-@persistence.persistent_class
-class PatchMerging(nn.Module):
- def __init__(self, in_channels, out_channels, down=2):
- super().__init__()
- self.conv = Conv2dLayerPartial(in_channels=in_channels,
- out_channels=out_channels,
- kernel_size=3,
- activation='lrelu',
- down=down,
- )
- self.down = down
-
- def forward(self, x, x_size, mask=None):
- x = token2feature(x, x_size)
- if mask is not None:
- mask = token2feature(mask, x_size)
- x, mask = self.conv(x, mask)
- if self.down != 1:
- ratio = 1 / self.down
- x_size = (int(x_size[0] * ratio), int(x_size[1] * ratio))
- x = feature2token(x)
- if mask is not None:
- mask = feature2token(mask)
- return x, x_size, mask
-
-
-@persistence.persistent_class
-class PatchUpsampling(nn.Module):
- def __init__(self, in_channels, out_channels, up=2):
- super().__init__()
- self.conv = Conv2dLayerPartial(in_channels=in_channels,
- out_channels=out_channels,
- kernel_size=3,
- activation='lrelu',
- up=up,
- )
- self.up = up
-
- def forward(self, x, x_size, mask=None):
- x = token2feature(x, x_size)
- if mask is not None:
- mask = token2feature(mask, x_size)
- x, mask = self.conv(x, mask)
- if self.up != 1:
- x_size = (int(x_size[0] * self.up), int(x_size[1] * self.up))
- x = feature2token(x)
- if mask is not None:
- mask = feature2token(mask)
- return x, x_size, mask
-
-
-
-@persistence.persistent_class
-class BasicLayer(nn.Module):
- """ A basic Swin Transformer layer for one stage.
- Args:
- dim (int): Number of input channels.
- input_resolution (tuple[int]): Input resolution.
- depth (int): Number of blocks.
- num_heads (int): Number of attention heads.
- window_size (int): Local window size.
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
- qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
- drop (float, optional): Dropout rate. Default: 0.0
- attn_drop (float, optional): Attention dropout rate. Default: 0.0
- drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
- downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
- use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
- """
-
- def __init__(self, dim, input_resolution, depth, num_heads, window_size, down_ratio=1,
- mlp_ratio=2., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
- drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
-
- super().__init__()
- self.dim = dim
- self.input_resolution = input_resolution
- self.depth = depth
- self.use_checkpoint = use_checkpoint
-
- # patch merging layer
- if downsample is not None:
- # self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
- self.downsample = downsample
- else:
- self.downsample = None
-
- # build blocks
- self.blocks = nn.ModuleList([
- SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
- num_heads=num_heads, down_ratio=down_ratio, window_size=window_size,
- shift_size=0 if (i % 2 == 0) else window_size // 2,
- mlp_ratio=mlp_ratio,
- qkv_bias=qkv_bias, qk_scale=qk_scale,
- drop=drop, attn_drop=attn_drop,
- drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
- norm_layer=norm_layer)
- for i in range(depth)])
-
- self.conv = Conv2dLayerPartial(in_channels=dim, out_channels=dim, kernel_size=3, activation='lrelu')
-
- def forward(self, x, x_size, mask=None):
- if self.downsample is not None:
- x, x_size, mask = self.downsample(x, x_size, mask)
- identity = x
- for blk in self.blocks:
- if self.use_checkpoint:
- x, mask = checkpoint.checkpoint(blk, x, x_size, mask)
- else:
- x, mask = blk(x, x_size, mask)
- if mask is not None:
- mask = token2feature(mask, x_size)
- x, mask = self.conv(token2feature(x, x_size), mask)
- x = feature2token(x) + identity
- if mask is not None:
- mask = feature2token(mask)
- return x, x_size, mask
-
-
-@persistence.persistent_class
-class ToToken(nn.Module):
- def __init__(self, in_channels=3, dim=128, kernel_size=5, stride=1):
- super().__init__()
-
- self.proj = Conv2dLayerPartial(in_channels=in_channels, out_channels=dim, kernel_size=kernel_size, activation='lrelu')
-
- def forward(self, x, mask):
- x, mask = self.proj(x, mask)
-
- return x, mask
-
-#----------------------------------------------------------------------------
-
-@persistence.persistent_class
-class EncFromRGB(nn.Module):
- def __init__(self, in_channels, out_channels, activation): # res = 2, ..., resolution_log2
- super().__init__()
- self.conv0 = Conv2dLayer(in_channels=in_channels,
- out_channels=out_channels,
- kernel_size=1,
- activation=activation,
- )
- self.conv1 = Conv2dLayer(in_channels=out_channels,
- out_channels=out_channels,
- kernel_size=3,
- activation=activation,
- )
-
- def forward(self, x):
- x = self.conv0(x)
- x = self.conv1(x)
-
- return x
-
-@persistence.persistent_class
-class ConvBlockDown(nn.Module):
- def __init__(self, in_channels, out_channels, activation): # res = 2, ..., resolution_log
- super().__init__()
-
- self.conv0 = Conv2dLayer(in_channels=in_channels,
- out_channels=out_channels,
- kernel_size=3,
- activation=activation,
- down=2,
- )
- self.conv1 = Conv2dLayer(in_channels=out_channels,
- out_channels=out_channels,
- kernel_size=3,
- activation=activation,
- )
-
- def forward(self, x):
- x = self.conv0(x)
- x = self.conv1(x)
-
- return x
-
-
-def token2feature(x, x_size):
- B, N, C = x.shape
- h, w = x_size
- x = x.permute(0, 2, 1).reshape(B, C, h, w)
- return x
-
-
-def feature2token(x):
- B, C, H, W = x.shape
- x = x.view(B, C, -1).transpose(1, 2)
- return x
-
-
-@persistence.persistent_class
-class Encoder(nn.Module):
- def __init__(self, res_log2, img_channels, activation, patch_size=5, channels=16, drop_path_rate=0.1):
- super().__init__()
-
- self.resolution = []
-
- for idx, i in enumerate(range(res_log2, 3, -1)): # from input size to 16x16
- res = 2 ** i
- self.resolution.append(res)
- if i == res_log2:
- block = EncFromRGB(img_channels * 2 + 1, nf(i), activation)
- else:
- block = ConvBlockDown(nf(i+1), nf(i), activation)
- setattr(self, 'EncConv_Block_%dx%d' % (res, res), block)
-
- def forward(self, x):
- out = {}
- for res in self.resolution:
- res_log2 = int(np.log2(res))
- x = getattr(self, 'EncConv_Block_%dx%d' % (res, res))(x)
- out[res_log2] = x
-
- return out
-
-
-@persistence.persistent_class
-class ToStyle(nn.Module):
- def __init__(self, in_channels, out_channels, activation, drop_rate):
- super().__init__()
- self.conv = nn.Sequential(
- Conv2dLayer(in_channels=in_channels, out_channels=in_channels, kernel_size=3, activation=activation, down=2),
- Conv2dLayer(in_channels=in_channels, out_channels=in_channels, kernel_size=3, activation=activation, down=2),
- Conv2dLayer(in_channels=in_channels, out_channels=in_channels, kernel_size=3, activation=activation, down=2),
- )
-
- self.pool = nn.AdaptiveAvgPool2d(1)
- self.fc = FullyConnectedLayer(in_features=in_channels,
- out_features=out_channels,
- activation=activation)
- # self.dropout = nn.Dropout(drop_rate)
-
- def forward(self, x):
- x = self.conv(x)
- x = self.pool(x)
- x = self.fc(x.flatten(start_dim=1))
- # x = self.dropout(x)
-
- return x
-
-
-@persistence.persistent_class
-class DecBlockFirstV2(nn.Module):
- def __init__(self, res, in_channels, out_channels, activation, style_dim, use_noise, demodulate, img_channels):
- super().__init__()
- self.res = res
-
- self.conv0 = Conv2dLayer(in_channels=in_channels,
- out_channels=in_channels,
- kernel_size=3,
- activation=activation,
- )
- self.conv1 = StyleConv(in_channels=in_channels,
- out_channels=out_channels,
- style_dim=style_dim,
- resolution=2**res,
- kernel_size=3,
- use_noise=use_noise,
- activation=activation,
- demodulate=demodulate,
- )
- self.toRGB = ToRGB(in_channels=out_channels,
- out_channels=img_channels,
- style_dim=style_dim,
- kernel_size=1,
- demodulate=False,
- )
-
- def forward(self, x, ws, gs, E_features, noise_mode='random'):
- # x = self.fc(x).view(x.shape[0], -1, 4, 4)
- x = self.conv0(x)
- x = x + E_features[self.res]
- style = get_style_code(ws[:, 0], gs)
- x = self.conv1(x, style, noise_mode=noise_mode)
- style = get_style_code(ws[:, 1], gs)
- img = self.toRGB(x, style, skip=None)
-
- return x, img
-
-#----------------------------------------------------------------------------
-
-@persistence.persistent_class
-class DecBlock(nn.Module):
- def __init__(self, res, in_channels, out_channels, activation, style_dim, use_noise, demodulate, img_channels): # res = 4, ..., resolution_log2
- super().__init__()
- self.res = res
-
- self.conv0 = StyleConv(in_channels=in_channels,
- out_channels=out_channels,
- style_dim=style_dim,
- resolution=2**res,
- kernel_size=3,
- up=2,
- use_noise=use_noise,
- activation=activation,
- demodulate=demodulate,
- )
- self.conv1 = StyleConv(in_channels=out_channels,
- out_channels=out_channels,
- style_dim=style_dim,
- resolution=2**res,
- kernel_size=3,
- use_noise=use_noise,
- activation=activation,
- demodulate=demodulate,
- )
- self.toRGB = ToRGB(in_channels=out_channels,
- out_channels=img_channels,
- style_dim=style_dim,
- kernel_size=1,
- demodulate=False,
- )
-
- def forward(self, x, img, ws, gs, E_features, noise_mode='random'):
- style = get_style_code(ws[:, self.res * 2 - 9], gs)
- x = self.conv0(x, style, noise_mode=noise_mode)
- x = x + E_features[self.res]
- style = get_style_code(ws[:, self.res * 2 - 8], gs)
- x = self.conv1(x, style, noise_mode=noise_mode)
- style = get_style_code(ws[:, self.res * 2 - 7], gs)
- img = self.toRGB(x, style, skip=img)
-
- return x, img
-
-
-@persistence.persistent_class
-class Decoder(nn.Module):
- def __init__(self, res_log2, activation, style_dim, use_noise, demodulate, img_channels):
- super().__init__()
- self.Dec_16x16 = DecBlockFirstV2(4, nf(4), nf(4), activation, style_dim, use_noise, demodulate, img_channels)
- for res in range(5, res_log2 + 1):
- setattr(self, 'Dec_%dx%d' % (2 ** res, 2 ** res),
- DecBlock(res, nf(res - 1), nf(res), activation, style_dim, use_noise, demodulate, img_channels))
- self.res_log2 = res_log2
-
- def forward(self, x, ws, gs, E_features, noise_mode='random'):
- x, img = self.Dec_16x16(x, ws, gs, E_features, noise_mode=noise_mode)
- for res in range(5, self.res_log2 + 1):
- block = getattr(self, 'Dec_%dx%d' % (2 ** res, 2 ** res))
- x, img = block(x, img, ws, gs, E_features, noise_mode=noise_mode)
-
- return img
-
-
-@persistence.persistent_class
-class DecStyleBlock(nn.Module):
- def __init__(self, res, in_channels, out_channels, activation, style_dim, use_noise, demodulate, img_channels):
- super().__init__()
- self.res = res
-
- self.conv0 = StyleConv(in_channels=in_channels,
- out_channels=out_channels,
- style_dim=style_dim,
- resolution=2**res,
- kernel_size=3,
- up=2,
- use_noise=use_noise,
- activation=activation,
- demodulate=demodulate,
- )
- self.conv1 = StyleConv(in_channels=out_channels,
- out_channels=out_channels,
- style_dim=style_dim,
- resolution=2**res,
- kernel_size=3,
- use_noise=use_noise,
- activation=activation,
- demodulate=demodulate,
- )
- self.toRGB = ToRGB(in_channels=out_channels,
- out_channels=img_channels,
- style_dim=style_dim,
- kernel_size=1,
- demodulate=False,
- )
-
- def forward(self, x, img, style, skip, noise_mode='random'):
- x = self.conv0(x, style, noise_mode=noise_mode)
- x = x + skip
- x = self.conv1(x, style, noise_mode=noise_mode)
- img = self.toRGB(x, style, skip=img)
-
- return x, img
-
-
-@persistence.persistent_class
-class FirstStage(nn.Module):
- def __init__(self, img_channels, img_resolution=256, dim=180, w_dim=512, use_noise=False, demodulate=True, activation='lrelu'):
- super().__init__()
- res = 64
-
- self.conv_first = Conv2dLayerPartial(in_channels=img_channels+1, out_channels=dim, kernel_size=3, activation=activation)
- self.enc_conv = nn.ModuleList()
- down_time = int(np.log2(img_resolution // res))
- for i in range(down_time): # from input size to 64
- self.enc_conv.append(
- Conv2dLayerPartial(in_channels=dim, out_channels=dim, kernel_size=3, down=2, activation=activation)
- )
-
- # from 64 -> 16 -> 64
- depths = [2, 3, 4, 3, 2]
- ratios = [1, 1/2, 1/2, 2, 2]
- num_heads = 6
- window_sizes = [8, 16, 16, 16, 8]
- drop_path_rate = 0.1
- dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
-
- self.tran = nn.ModuleList()
- for i, depth in enumerate(depths):
- res = int(res * ratios[i])
- if ratios[i] < 1:
- merge = PatchMerging(dim, dim, down=int(1/ratios[i]))
- elif ratios[i] > 1:
- merge = PatchUpsampling(dim, dim, up=ratios[i])
- else:
- merge = None
- self.tran.append(
- BasicLayer(dim=dim, input_resolution=[res, res], depth=depth, num_heads=num_heads,
- window_size=window_sizes[i], drop_path=dpr[sum(depths[:i]):sum(depths[:i + 1])],
- downsample=merge)
- )
-
- # global style
- down_conv = []
- for i in range(int(np.log2(16))):
- down_conv.append(Conv2dLayer(in_channels=dim, out_channels=dim, kernel_size=3, down=2, activation=activation))
- down_conv.append(nn.AdaptiveAvgPool2d((1, 1)))
- self.down_conv = nn.Sequential(*down_conv)
- self.to_style = FullyConnectedLayer(in_features=dim, out_features=dim*2, activation=activation)
- self.ws_style = FullyConnectedLayer(in_features=w_dim, out_features=dim, activation=activation)
- self.to_square = FullyConnectedLayer(in_features=dim, out_features=16*16, activation=activation)
-
- style_dim = dim * 3
- self.dec_conv = nn.ModuleList()
- for i in range(down_time): # from 64 to input size
- res = res * 2
- self.dec_conv.append(DecStyleBlock(res, dim, dim, activation, style_dim, use_noise, demodulate, img_channels))
-
- def forward(self, images_in, masks_in, ws, noise_mode='random'):
- x = torch.cat([masks_in - 0.5, images_in * masks_in], dim=1)
-
- skips = []
- x, mask = self.conv_first(x, masks_in) # input size
- skips.append(x)
- for i, block in enumerate(self.enc_conv): # input size to 64
- x, mask = block(x, mask)
- if i != len(self.enc_conv) - 1:
- skips.append(x)
-
- x_size = x.size()[-2:]
- x = feature2token(x)
- mask = feature2token(mask)
- mid = len(self.tran) // 2
- for i, block in enumerate(self.tran): # 64 to 16
- if i < mid:
- x, x_size, mask = block(x, x_size, mask)
- skips.append(x)
- elif i > mid:
- x, x_size, mask = block(x, x_size, None)
- x = x + skips[mid - i]
- else:
- x, x_size, mask = block(x, x_size, None)
-
- mul_map = torch.ones_like(x) * 0.5
- mul_map = F.dropout(mul_map, training=True)
- ws = self.ws_style(ws[:, -1])
- add_n = self.to_square(ws).unsqueeze(1)
- add_n = F.interpolate(add_n, size=x.size(1), mode='linear', align_corners=False).squeeze(1).unsqueeze(-1)
- x = x * mul_map + add_n * (1 - mul_map)
- gs = self.to_style(self.down_conv(token2feature(x, x_size)).flatten(start_dim=1))
- style = torch.cat([gs, ws], dim=1)
-
- x = token2feature(x, x_size).contiguous()
- img = None
- for i, block in enumerate(self.dec_conv):
- x, img = block(x, img, style, skips[len(self.dec_conv)-i-1], noise_mode=noise_mode)
-
- # ensemble
- img = img * (1 - masks_in) + images_in * masks_in
-
- return img
-
-
-@persistence.persistent_class
-class SynthesisNet(nn.Module):
- def __init__(self,
- w_dim, # Intermediate latent (W) dimensionality.
- img_resolution, # Output image resolution.
- img_channels = 3, # Number of color channels.
- channel_base = 32768, # Overall multiplier for the number of channels.
- channel_decay = 1.0,
- channel_max = 512, # Maximum number of channels in any layer.
- activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
- drop_rate = 0.5,
- use_noise = True,
- demodulate = True,
- ):
- super().__init__()
- resolution_log2 = int(np.log2(img_resolution))
- assert img_resolution == 2 ** resolution_log2 and img_resolution >= 4
-
- self.num_layers = resolution_log2 * 2 - 3 * 2
- self.img_resolution = img_resolution
- self.resolution_log2 = resolution_log2
-
- # first stage
- self.first_stage = FirstStage(img_channels, img_resolution=img_resolution, w_dim=w_dim, use_noise=False, demodulate=demodulate)
-
- # second stage
- self.enc = Encoder(resolution_log2, img_channels, activation, patch_size=5, channels=16)
- self.to_square = FullyConnectedLayer(in_features=w_dim, out_features=16*16, activation=activation)
- self.to_style = ToStyle(in_channels=nf(4), out_channels=nf(2) * 2, activation=activation, drop_rate=drop_rate)
- style_dim = w_dim + nf(2) * 2
- self.dec = Decoder(resolution_log2, activation, style_dim, use_noise, demodulate, img_channels)
-
- def forward(self, images_in, masks_in, ws, noise_mode='random', return_stg1=False):
- out_stg1 = self.first_stage(images_in, masks_in, ws, noise_mode=noise_mode)
-
- # encoder
- x = images_in * masks_in + out_stg1 * (1 - masks_in)
- x = torch.cat([masks_in - 0.5, x, images_in * masks_in], dim=1)
- E_features = self.enc(x)
-
- fea_16 = E_features[4]
- mul_map = torch.ones_like(fea_16) * 0.5
- mul_map = F.dropout(mul_map, training=True)
- add_n = self.to_square(ws[:, 0]).view(-1, 16, 16).unsqueeze(1)
- add_n = F.interpolate(add_n, size=fea_16.size()[-2:], mode='bilinear', align_corners=False)
- fea_16 = fea_16 * mul_map + add_n * (1 - mul_map)
- E_features[4] = fea_16
-
- # style
- gs = self.to_style(fea_16)
-
- # decoder
- img = self.dec(fea_16, ws, gs, E_features, noise_mode=noise_mode)
-
- # ensemble
- img = img * (1 - masks_in) + images_in * masks_in
-
- if not return_stg1:
- return img
- else:
- return img, out_stg1
-
-
-@persistence.persistent_class
-class Generator(nn.Module):
- def __init__(self,
- z_dim, # Input latent (Z) dimensionality, 0 = no latent.
- c_dim, # Conditioning label (C) dimensionality, 0 = no label.
- w_dim, # Intermediate latent (W) dimensionality.
- img_resolution, # resolution of generated image
- img_channels, # Number of input color channels.
- synthesis_kwargs = {}, # Arguments for SynthesisNetwork.
- mapping_kwargs = {}, # Arguments for MappingNetwork.
- ):
- super().__init__()
- self.z_dim = z_dim
- self.c_dim = c_dim
- self.w_dim = w_dim
- self.img_resolution = img_resolution
- self.img_channels = img_channels
-
- self.synthesis = SynthesisNet(w_dim=w_dim,
- img_resolution=img_resolution,
- img_channels=img_channels,
- **synthesis_kwargs)
- self.mapping = MappingNet(z_dim=z_dim,
- c_dim=c_dim,
- w_dim=w_dim,
- num_ws=self.synthesis.num_layers,
- **mapping_kwargs)
-
- def forward(self, images_in, masks_in, z, c, truncation_psi=1, truncation_cutoff=None, skip_w_avg_update=False,
- noise_mode='random', return_stg1=False):
- ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff,
- skip_w_avg_update=skip_w_avg_update)
-
- if not return_stg1:
- img = self.synthesis(images_in, masks_in, ws, noise_mode=noise_mode)
- return img
- else:
- img, out_stg1 = self.synthesis(images_in, masks_in, ws, noise_mode=noise_mode, return_stg1=True)
- return img, out_stg1
-
-
-@persistence.persistent_class
-class Discriminator(torch.nn.Module):
- def __init__(self,
- c_dim, # Conditioning label (C) dimensionality.
- img_resolution, # Input resolution.
- img_channels, # Number of input color channels.
- channel_base = 32768, # Overall multiplier for the number of channels.
- channel_max = 512, # Maximum number of channels in any layer.
- channel_decay = 1,
- cmap_dim = None, # Dimensionality of mapped conditioning label, None = default.
- activation = 'lrelu',
- mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, None = entire minibatch.
- mbstd_num_channels = 1, # Number of features for the minibatch standard deviation layer, 0 = disable.
- ):
- super().__init__()
- self.c_dim = c_dim
- self.img_resolution = img_resolution
- self.img_channels = img_channels
-
- resolution_log2 = int(np.log2(img_resolution))
- assert img_resolution == 2 ** resolution_log2 and img_resolution >= 4
- self.resolution_log2 = resolution_log2
-
- if cmap_dim == None:
- cmap_dim = nf(2)
- if c_dim == 0:
- cmap_dim = 0
- self.cmap_dim = cmap_dim
-
- if c_dim > 0:
- self.mapping = MappingNet(z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None)
-
- Dis = [DisFromRGB(img_channels+1, nf(resolution_log2), activation)]
- for res in range(resolution_log2, 2, -1):
- Dis.append(DisBlock(nf(res), nf(res-1), activation))
-
- if mbstd_num_channels > 0:
- Dis.append(MinibatchStdLayer(group_size=mbstd_group_size, num_channels=mbstd_num_channels))
- Dis.append(Conv2dLayer(nf(2) + mbstd_num_channels, nf(2), kernel_size=3, activation=activation))
- self.Dis = nn.Sequential(*Dis)
-
- self.fc0 = FullyConnectedLayer(nf(2)*4**2, nf(2), activation=activation)
- self.fc1 = FullyConnectedLayer(nf(2), 1 if cmap_dim == 0 else cmap_dim)
-
- # for 64x64
- Dis_stg1 = [DisFromRGB(img_channels+1, nf(resolution_log2) // 2, activation)]
- for res in range(resolution_log2, 2, -1):
- Dis_stg1.append(DisBlock(nf(res) // 2, nf(res - 1) // 2, activation))
-
- if mbstd_num_channels > 0:
- Dis_stg1.append(MinibatchStdLayer(group_size=mbstd_group_size, num_channels=mbstd_num_channels))
- Dis_stg1.append(Conv2dLayer(nf(2) // 2 + mbstd_num_channels, nf(2) // 2, kernel_size=3, activation=activation))
- self.Dis_stg1 = nn.Sequential(*Dis_stg1)
-
- self.fc0_stg1 = FullyConnectedLayer(nf(2) // 2 * 4 ** 2, nf(2) // 2, activation=activation)
- self.fc1_stg1 = FullyConnectedLayer(nf(2) // 2, 1 if cmap_dim == 0 else cmap_dim)
-
- def forward(self, images_in, masks_in, images_stg1, c):
- x = self.Dis(torch.cat([masks_in - 0.5, images_in], dim=1))
- x = self.fc1(self.fc0(x.flatten(start_dim=1)))
-
- x_stg1 = self.Dis_stg1(torch.cat([masks_in - 0.5, images_stg1], dim=1))
- x_stg1 = self.fc1_stg1(self.fc0_stg1(x_stg1.flatten(start_dim=1)))
-
- if self.c_dim > 0:
- cmap = self.mapping(None, c)
-
- if self.cmap_dim > 0:
- x = (x * cmap).sum(dim=1, keepdim=True) * (1 / np.sqrt(self.cmap_dim))
- x_stg1 = (x_stg1 * cmap).sum(dim=1, keepdim=True) * (1 / np.sqrt(self.cmap_dim))
-
- return x, x_stg1
-
-
-if __name__ == '__main__':
- device = torch.device('cuda:0')
- batch = 1
- res = 512
- G = Generator(z_dim=512, c_dim=0, w_dim=512, img_resolution=512, img_channels=3).to(device)
- D = Discriminator(c_dim=0, img_resolution=res, img_channels=3).to(device)
- img = torch.randn(batch, 3, res, res).to(device)
- mask = torch.randn(batch, 1, res, res).to(device)
- z = torch.randn(batch, 512).to(device)
- G.eval()
-
- # def count(block):
- # return sum(p.numel() for p in block.parameters()) / 10 ** 6
- # print('Generator', count(G))
- # print('discriminator', count(D))
-
- with torch.no_grad():
- img, img_stg1 = G(img, mask, z, None, return_stg1=True)
- print('output of G:', img.shape, img_stg1.shape)
- score, score_stg1 = D(img, mask, img_stg1, None)
- print('output of D:', score.shape, score_stg1.shape)
diff --git a/spaces/S0h9l/Coherent_Speech/app.py b/spaces/S0h9l/Coherent_Speech/app.py
deleted file mode 100644
index d9acec4a125589fc8319e19c1b9f05d3e408cb15..0000000000000000000000000000000000000000
--- a/spaces/S0h9l/Coherent_Speech/app.py
+++ /dev/null
@@ -1,68 +0,0 @@
-import gradio as gr
-import whisper
-import cohere
-from deep_translator import GoogleTranslator
-from gtts import gTTS
-import gtts.langs
-#from dotenv import load_dotenv
-
-#load_dotenv()
-
-model = whisper.load_model("base")
-
-LANGUAGES = list(gtts.lang.tts_langs())
-
-def transcribe(api,audio,language):
- co = cohere.Client(api)
-
- #time.sleep(3)
- # load audio and pad/trim it to fit 30 seconds
- audio = whisper.load_audio(audio)
- audio = whisper.pad_or_trim(audio)
-
- # make log-Mel spectrogram and move to the same device as the model
- mel = whisper.log_mel_spectrogram(audio).to(model.device)
-
- # detect the spoken language
- _, probs = model.detect_language(mel)
- print(f"Detected language: {max(probs, key=probs.get)}")
-
- # decode the audio
- options = whisper.DecodingOptions(fp16 = False)
- result = whisper.decode(model, mel, options)
-
- #cohere
- response = co.generate(
- model='xlarge',
- prompt=f'This program will generate an introductory paragraph to a blog post given a blog title, audience, and tone of voice.\n--\nBlog Title: Best Activities in Toronto\nAudience: Millennials\nTone of Voice: Lighthearted\nFirst Paragraph: Looking for fun things to do in Toronto? When it comes to exploring Canada\'s largest city, there\'s an ever-evolving set of activities to choose from. Whether you\'re looking to visit a local museum or sample the city\'s varied cuisine, there is plenty to fill any itinerary. In this blog post, I\'ll share some of my favorite recommendations\n--\nBlog Title: Mastering Dynamic Programming\nAudience: Developers\nTone: Informative\nFirst Paragraph: In this piece, we\'ll help you understand the fundamentals of dynamic programming, and when to apply this optimization technique. We\'ll break down bottom-up and top-down approaches to solve dynamic programming problems.\n--\nBlog Title: How to Get Started with Rock Climbing\nAudience: Athletes\nTone: Enthusiastic\nFirst Paragraph:If you\'re an athlete who\'s looking to learn how to rock climb, then you\'ve come to the right place! This blog post will give you all the information you need to know about how to get started in the sport. Rock climbing is a great way to stay active and challenge yourself in a new way. It\'s also a great way to make new friends and explore new places. So, what are you waiting for? Get out there and start climbing!\n--\nBlog Title: {result.text}\nAudience: Engineers\nTone: Enthusiastic\nFirst Paragraph:',
- max_tokens=200,
- temperature=0.8,
- k=0,
- p=1,
- frequency_penalty=0,
- presence_penalty=0,
- stop_sequences=["--"],
- return_likelihoods='NONE')
- #result.text
- reptxt = response.generations[0].text.strip("--")
-
- #Google models
- translated = GoogleTranslator(source='auto', target=language).translate(reptxt)
- filename = 'result.mp3'
- tts = gTTS(text=translated, lang=language)
- tts.save(filename)
- return filename, translated
-
-
-
-gr.Interface(
- title = 'Coherent Speech',
- description = 'Enter the API key, then start recording give your input, stop recording, select language;language can also be selected after the output. Do not worry about error message in the output section',
- fn=transcribe,
- inputs=[
- gr.inputs.Textbox(lines=1, label="Enter your Cohere API Key"),
- gr.inputs.Audio(source="microphone", type="filepath"),
- gr.Radio(label="Language", choices=LANGUAGES, value="en")
- ],
- outputs=[gr.Audio(label="Output",type="filepath"),gr.outputs.Textbox(label="Generated Text")],
- live=True).launch()
\ No newline at end of file
diff --git a/spaces/SAAZIZI/SummarizeAV/summarization_service/__init__.py b/spaces/SAAZIZI/SummarizeAV/summarization_service/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/Sapphire-356/Video2MC/model/block/refine.py b/spaces/Sapphire-356/Video2MC/model/block/refine.py
deleted file mode 100644
index 407ba5d63290f3bf2143ba7dc85e020267dd71b9..0000000000000000000000000000000000000000
--- a/spaces/Sapphire-356/Video2MC/model/block/refine.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import torch
-import torch.nn as nn
-from torch.autograd import Variable
-
-fc_out = 256
-fc_unit = 1024
-
-class refine(nn.Module):
- def __init__(self, opt):
- super().__init__()
-
- out_seqlen = 1
- fc_in = opt.out_channels*2*out_seqlen*opt.n_joints
- fc_out = opt.in_channels * opt.n_joints
-
- self.post_refine = nn.Sequential(
- nn.Linear(fc_in, fc_unit),
- nn.ReLU(),
- nn.Dropout(0.5,inplace=True),
- nn.Linear(fc_unit, fc_out),
- nn.Sigmoid()
- )
-
- def forward(self, x, x_1):
- N, T, V,_ = x.size()
- x_in = torch.cat((x, x_1), -1)
- x_in = x_in.view(N, -1)
-
- score = self.post_refine(x_in).view(N,T,V,2)
- score_cm = Variable(torch.ones(score.size()), requires_grad=False) - score
- x_out = x.clone()
- x_out[:, :, :, :2] = score * x[:, :, :, :2] + score_cm * x_1[:, :, :, :2]
-
- return x_out
-
-
diff --git a/spaces/Shredder/CONBERT/fin_readability_sustainability.py b/spaces/Shredder/CONBERT/fin_readability_sustainability.py
deleted file mode 100644
index 53ea0c60eab0dd27868f9bdc6d4652ea0ddc71b9..0000000000000000000000000000000000000000
--- a/spaces/Shredder/CONBERT/fin_readability_sustainability.py
+++ /dev/null
@@ -1,110 +0,0 @@
-import torch
-import transformers
-from torch.utils.data import Dataset, DataLoader
-from transformers import RobertaModel, RobertaTokenizer, BertModel, BertTokenizer
-import pandas as pd
-
-device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
-
-MAX_LEN = 128
-BATCH_SIZE = 20
-text_col_name = 'sentence'
-
-def scoring_data_prep(dataset):
- out = []
- target = []
- mask = []
-
- for i in range(len(dataset)):
- rec = dataset[i]
- out.append(rec['ids'].reshape(-1,MAX_LEN))
- mask.append(rec['mask'].reshape(-1,MAX_LEN))
-
- out_stack = torch.cat(out, dim = 0)
- mask_stack = torch.cat(mask, dim =0 )
- out_stack = out_stack.to(device, dtype = torch.long)
- mask_stack = mask_stack.to(device, dtype = torch.long)
-
- return out_stack, mask_stack
-
-class Triage(Dataset):
- """
- This is a subclass of torch packages Dataset class. It processes input to create ids, masks and targets required for model training.
- """
-
- def __init__(self, dataframe, tokenizer, max_len, text_col_name):
- self.len = len(dataframe)
- self.data = dataframe
- self.tokenizer = tokenizer
- self.max_len = max_len
- self.text_col_name = text_col_name
-
-
- def __getitem__(self, index):
- title = str(self.data[self.text_col_name][index])
- title = " ".join(title.split())
- inputs = self.tokenizer.encode_plus(
- title,
- None,
- add_special_tokens=True,
- max_length=self.max_len,
- pad_to_max_length=True, #padding='max_length' #For future version use `padding='max_length'`
- return_token_type_ids=True,
- truncation=True,
- )
- ids = inputs["input_ids"]
- mask = inputs["attention_mask"]
-
- return {
- "ids": torch.tensor(ids, dtype=torch.long),
- "mask": torch.tensor(mask, dtype=torch.long),
-
- }
-
- def __len__(self):
- return self.len
-
-class BERTClass(torch.nn.Module):
- def __init__(self, num_class, task):
- super(BERTClass, self).__init__()
- self.num_class = num_class
- if task =="sustanability":
- self.l1 = RobertaModel.from_pretrained("roberta-base")
- else:
- self.l1 = BertModel.from_pretrained("ProsusAI/finbert")
- self.pre_classifier = torch.nn.Linear(768, 768)
- self.dropout = torch.nn.Dropout(0.3)
- self.classifier = torch.nn.Linear(768, self.num_class)
- self.history = dict()
-
- def forward(self, input_ids, attention_mask):
- output_1 = self.l1(input_ids=input_ids, attention_mask=attention_mask)
- hidden_state = output_1[0]
- pooler = hidden_state[:, 0]
- pooler = self.pre_classifier(pooler)
- pooler = torch.nn.ReLU()(pooler)
- pooler = self.dropout(pooler)
- output = self.classifier(pooler)
- return output
-
-def do_predict(model, tokenizer, test_df):
- test_set = Triage(test_df, tokenizer, MAX_LEN, text_col_name)
- test_params = {'batch_size' : BATCH_SIZE, 'shuffle': False, 'num_workers':0}
- test_loader = DataLoader(test_set, **test_params)
- out_stack, mask_stack = scoring_data_prep(dataset = test_set)
- n = 0
- combined_output = []
- model.eval()
- with torch.no_grad():
- while n < test_df.shape[0]:
- output = model(out_stack[n:n+BATCH_SIZE,:],mask_stack[n:n+BATCH_SIZE,:])
- n = n + BATCH_SIZE
- combined_output.append(output)
- combined_output = torch.cat(combined_output, dim = 0)
- preds = torch.argsort(combined_output, axis = 1, descending = True)
- preds = preds.to('cpu')
- actual_predictions = [i[0] for i in preds.tolist()]
- combined_output = combined_output.to('cpu')
- prob_predictions= [i[1] for i in combined_output.tolist()]
- return (actual_predictions, prob_predictions)
-
\ No newline at end of file
diff --git a/spaces/StatsByZach/app/games.py b/spaces/StatsByZach/app/games.py
deleted file mode 100644
index cd04533a8f926b11b72b27d1a2698e0cd71b0c7c..0000000000000000000000000000000000000000
--- a/spaces/StatsByZach/app/games.py
+++ /dev/null
@@ -1,174 +0,0 @@
-##### games.,py #####
-
-# Import modules
-from shiny import *
-import shinyswatch
-import plotly.express as px
-from shinywidgets import output_widget, render_widget
-import pandas as pd
-from configure import base_url
-import math
-import datetime
-
-
-# Paths to data
-gsaxt = "data/game_list.csv"
-data = pd.read_csv(gsaxt)
-data = data[['Home','Away','Game_Id','Date','Link']]
-game_dates = ['All']
-game_dates_temp = data['Date'].value_counts().keys().tolist()
-game_dates_temp=game_dates_temp[::-1]
-dates = [datetime.datetime.strptime(ts, "%Y-%m-%d") for ts in game_dates_temp]
-dates.sort()
-sorteddates = [datetime.datetime.strftime(ts, "%Y-%m-%d") for ts in dates]
-sorteddates = sorteddates[::-1]
-game_dates.extend(sorteddates)
-print(game_dates)
-default=game_dates[1]
-def server(input,output,session):
- @output
- @render.text
- def text():
- t= 'Vi'
- return t
-
- @output
- @render.table
- def table():
- df = pd.read_csv(gsaxt)
- df = df[['Home','Away','Date','Link']]
- if input.team() =="All":
- df = df
- else:
- df = df[(df['Home']==input.team())|(df['Away']==input.team())]
- if input.date() == "All":
- df = df
- else:
- df = df[df['Date']==input.date()]
- #return df.style.set_table_attributes('escape=False class="dataframe shiny-table table w-auto"').hide_index()
- return df.style.set_table_attributes(
- 'class="dataframe shiny-table table w-auto"'
- ).set_properties(**{'border': '1.3px #222222'},).hide().set_table_styles(
- [dict(selector="th", props=[("text-align", "right"),('font-size','25px')]),
- dict(selector="tr", props=[('font-size','21px')]),]
- )
-
-games = App(ui.page_fluid(
- ui.tags.base(href=base_url),
- ui.tags.div(
- {"style": "width:75%;margin: 0 auto"},
- ui.tags.style(
- """
- h4 {
- margin-top: 1em;font-size:35px;
- }
- h2{
- font-size:25px;
- }
- """
- ),
- shinyswatch.theme.darkly(),
- ui.tags.h4("Stats By Zach"),
- ui.tags.i("A website for hockey analytics"),
- ui.navset_tab(
- ui.nav_control(
- ui.a(
- "Home",
- href="home/"
- ),
- ),
- ui.nav_menu(
- "Skater Charts",
- ui.nav_control(
- ui.a(
- "On-Ice xG Rates",
- href="skater-xg-rates/"
- ),
- ui.a(
- "On-Ice xGF%",
- href="skater-xg-percentages/"
- ),
- ),
- ),
- ui.nav_menu(
- "Goalie Charts",
- ui.nav_control(
- ui.a(
- "GSAx Timeline",
- href="gsax-timeline/"
- ),
- ui.a(
- "GSAx Leaderboard",
- href="gsax-leaderboard/"
- ),
- ui.a(
- "GSAx Comparison",
- href="gsax-comparison/"
- )
- ),
- ),ui.nav_menu(
- "Team Charts",
- ui.nav_control(
- ui.a(
- "Team xG Rates",
- href="team-xg-rates/"
- ),
- ),
- ),ui.nav_control(
- ui.a(
- "Games",
- href="games/"
- ),
- ),ui.nav_control(
- ui.a(
- "About",
- href="about/"
- ),
- )),ui.row(
- ui.column(5,ui.tags.br(),ui.tags.h2("Games"),ui.input_select(
- "team",
- "Filter by Team:",
- {
- "All":"All",
- "ANA": "Anaheim Ducks",
- "ARI": "Arizona Coyotes",
- "BOS": "Boston Bruins",
- "BUF": "Buffalo Sabres",
- "CGY": "Calgary Flames",
- "CAR": "Carolina Hurricanes",
- "CHI": "Chicago Blackhawks",
- "COL": "Colorado Avalanche",
- "CBJ": "Columbus Blue Jackets",
- "DAL": "Dallas Stars",
- "DET": "Detroit Red Wings",
- "EDM": "Edmonton Oilers",
- "FLA": "Florida Panthers",
- "L.A": "Los Angeles Kings",
- "MIN": "Minnesota Wild",
- "MTL": "Montreal Canadiens",
- "NSH": "Nashville Predators",
- "N.J": "New Jersey Devils",
- "NYI": "New York Islanders",
- "NYR": "New York Rangers",
- "OTT": "Ottawa Senators",
- "PHI": "Philadelphia Flyers",
- "PIT": "Pittsburgh Penguins",
- "S.J": "San Jose Sharks",
- "SEA":"Seattle Kraken",
- "STL": "St. Louis Blues",
- "T.B": "Tampa Bay Lightning",
- "TOR": "Toronto Maple Leafs",
- "VAN": "Vancouver Canucks",
- "VGK": "Vegas Golden Knights",
- "WSH": "Washington Capitals",
- "WPG": "Winnipeg Jets"
- },
- ),
- ui.input_select(
- "date",
- "Filter by Date:",
- game_dates,
- selected=default
- ),),ui.column(7,ui.tags.br(),ui.tags.br(),ui.tags.br(),ui.tags.br(),ui.tags.br(),ui.tags.br(),ui.tags.br(),ui.tags.br(),ui.tags.br(),
- ui.output_table("table"),
- )),)),server)
\ No newline at end of file
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/utils/shimmodule.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/utils/shimmodule.py
deleted file mode 100644
index 8af44caa98b2ef51a7e557f8a8930e37a27857de..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/utils/shimmodule.py
+++ /dev/null
@@ -1,89 +0,0 @@
-"""A shim module for deprecated imports
-"""
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-import importlib.abc
-import importlib.util
-import sys
-import types
-from importlib import import_module
-
-from .importstring import import_item
-
-
-class ShimWarning(Warning):
- """A warning to show when a module has moved, and a shim is in its place."""
-
-
-class ShimImporter(importlib.abc.MetaPathFinder):
- """Import hook for a shim.
-
- This ensures that submodule imports return the real target module,
- not a clone that will confuse `is` and `isinstance` checks.
- """
- def __init__(self, src, mirror):
- self.src = src
- self.mirror = mirror
-
- def _mirror_name(self, fullname):
- """get the name of the mirrored module"""
-
- return self.mirror + fullname[len(self.src) :]
-
- def find_spec(self, fullname, path, target=None):
- if fullname.startswith(self.src + "."):
- mirror_name = self._mirror_name(fullname)
- return importlib.util.find_spec(mirror_name)
-
-
-class ShimModule(types.ModuleType):
-
- def __init__(self, *args, **kwargs):
- self._mirror = kwargs.pop("mirror")
- src = kwargs.pop("src", None)
- if src:
- kwargs['name'] = src.rsplit('.', 1)[-1]
- super(ShimModule, self).__init__(*args, **kwargs)
- # add import hook for descendent modules
- if src:
- sys.meta_path.append(
- ShimImporter(src=src, mirror=self._mirror)
- )
-
- @property
- def __path__(self):
- return []
-
- @property
- def __spec__(self):
- """Don't produce __spec__ until requested"""
- return import_module(self._mirror).__spec__
-
- def __dir__(self):
- return dir(import_module(self._mirror))
-
- @property
- def __all__(self):
- """Ensure __all__ is always defined"""
- mod = import_module(self._mirror)
- try:
- return mod.__all__
- except AttributeError:
- return [name for name in dir(mod) if not name.startswith('_')]
-
- def __getattr__(self, key):
- # Use the equivalent of import_item(name), see below
- name = "%s.%s" % (self._mirror, key)
- try:
- return import_item(name)
- except ImportError as e:
- raise AttributeError(key) from e
-
- def __repr__(self):
- # repr on a module can be called during error handling; make sure
- # it does not fail, even if the import fails
- try:
- return self.__getattr__("__repr__")()
- except AttributeError:
- return f""
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/cc_sqlalchemy/ddl/custom.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/cc_sqlalchemy/ddl/custom.py
deleted file mode 100644
index b7eee4ad7db99c447732e3f3ebf2e8c108fe93a8..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/cc_sqlalchemy/ddl/custom.py
+++ /dev/null
@@ -1,40 +0,0 @@
-from sqlalchemy.sql.ddl import DDL
-from sqlalchemy.exc import ArgumentError
-
-from clickhouse_connect.driver.query import quote_identifier
-
-
-# pylint: disable=too-many-ancestors,abstract-method
-class CreateDatabase(DDL):
- """
- SqlAlchemy DDL statement that is essentially an alternative to the built in CreateSchema DDL class
- """
- # pylint: disable-msg=too-many-arguments
- def __init__(self, name: str, engine: str = None, zoo_path: str = None, shard_name: str = '{shard}',
- replica_name: str = '{replica}'):
- """
- :param name: Database name
- :param engine: Database ClickHouse engine type
- :param zoo_path: ClickHouse zookeeper path for Replicated database engine
- :param shard_name: Clickhouse shard name for Replicated database engine
- :param replica_name: Replica name for Replicated database engine
- """
- if engine and engine not in ('Ordinary', 'Atomic', 'Lazy', 'Replicated'):
- raise ArgumentError(f'Unrecognized engine type {engine}')
- stmt = f'CREATE DATABASE {quote_identifier(name)}'
- if engine:
- stmt += f' Engine {engine}'
- if engine == 'Replicated':
- if not zoo_path:
- raise ArgumentError('zoo_path is required for Replicated Database Engine')
- stmt += f" ('{zoo_path}', '{shard_name}', '{replica_name}'"
- super().__init__(stmt)
-
-
-# pylint: disable=too-many-ancestors,abstract-method
-class DropDatabase(DDL):
- """
- Alternative DDL statement for built in SqlAlchemy DropSchema DDL class
- """
- def __init__(self, name: str):
- super().__init__(f'DROP DATABASE {quote_identifier(name)}')
diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/configs/_base_/models/fpn_uniformer.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/configs/_base_/models/fpn_uniformer.py
deleted file mode 100644
index 8aae98c5991055bfcc08e82ccdc09f8b1d9f8a8d..0000000000000000000000000000000000000000
--- a/spaces/Superlang/ImageProcessor/annotator/uniformer/configs/_base_/models/fpn_uniformer.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# model settings
-norm_cfg = dict(type='SyncBN', requires_grad=True)
-model = dict(
- type='EncoderDecoder',
- backbone=dict(
- type='UniFormer',
- embed_dim=[64, 128, 320, 512],
- layers=[3, 4, 8, 3],
- head_dim=64,
- mlp_ratio=4.,
- qkv_bias=True,
- drop_rate=0.,
- attn_drop_rate=0.,
- drop_path_rate=0.1),
- neck=dict(
- type='FPN',
- in_channels=[64, 128, 320, 512],
- out_channels=256,
- num_outs=4),
- decode_head=dict(
- type='FPNHead',
- in_channels=[256, 256, 256, 256],
- in_index=[0, 1, 2, 3],
- feature_strides=[4, 8, 16, 32],
- channels=128,
- dropout_ratio=0.1,
- num_classes=150,
- norm_cfg=norm_cfg,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
- # model training and testing settings
- train_cfg=dict(),
- test_cfg=dict(mode='whole')
-)
diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/fileio/io.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/fileio/io.py
deleted file mode 100644
index aaefde58aa3ea5b58f86249ce7e1c40c186eb8dd..0000000000000000000000000000000000000000
--- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/fileio/io.py
+++ /dev/null
@@ -1,151 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from io import BytesIO, StringIO
-from pathlib import Path
-
-from ..utils import is_list_of, is_str
-from .file_client import FileClient
-from .handlers import BaseFileHandler, JsonHandler, PickleHandler, YamlHandler
-
-file_handlers = {
- 'json': JsonHandler(),
- 'yaml': YamlHandler(),
- 'yml': YamlHandler(),
- 'pickle': PickleHandler(),
- 'pkl': PickleHandler()
-}
-
-
-def load(file, file_format=None, file_client_args=None, **kwargs):
- """Load data from json/yaml/pickle files.
-
- This method provides a unified api for loading data from serialized files.
-
- Note:
- In v1.3.16 and later, ``load`` supports loading data from serialized
- files those can be storaged in different backends.
-
- Args:
- file (str or :obj:`Path` or file-like object): Filename or a file-like
- object.
- file_format (str, optional): If not specified, the file format will be
- inferred from the file extension, otherwise use the specified one.
- Currently supported formats include "json", "yaml/yml" and
- "pickle/pkl".
- file_client_args (dict, optional): Arguments to instantiate a
- FileClient. See :class:`mmcv.fileio.FileClient` for details.
- Default: None.
-
- Examples:
- >>> load('/path/of/your/file') # file is storaged in disk
- >>> load('https://path/of/your/file') # file is storaged in Internet
- >>> load('s3://path/of/your/file') # file is storaged in petrel
-
- Returns:
- The content from the file.
- """
- if isinstance(file, Path):
- file = str(file)
- if file_format is None and is_str(file):
- file_format = file.split('.')[-1]
- if file_format not in file_handlers:
- raise TypeError(f'Unsupported format: {file_format}')
-
- handler = file_handlers[file_format]
- if is_str(file):
- file_client = FileClient.infer_client(file_client_args, file)
- if handler.str_like:
- with StringIO(file_client.get_text(file)) as f:
- obj = handler.load_from_fileobj(f, **kwargs)
- else:
- with BytesIO(file_client.get(file)) as f:
- obj = handler.load_from_fileobj(f, **kwargs)
- elif hasattr(file, 'read'):
- obj = handler.load_from_fileobj(file, **kwargs)
- else:
- raise TypeError('"file" must be a filepath str or a file-object')
- return obj
-
-
-def dump(obj, file=None, file_format=None, file_client_args=None, **kwargs):
- """Dump data to json/yaml/pickle strings or files.
-
- This method provides a unified api for dumping data as strings or to files,
- and also supports custom arguments for each file format.
-
- Note:
- In v1.3.16 and later, ``dump`` supports dumping data as strings or to
- files which is saved to different backends.
-
- Args:
- obj (any): The python object to be dumped.
- file (str or :obj:`Path` or file-like object, optional): If not
- specified, then the object is dumped to a str, otherwise to a file
- specified by the filename or file-like object.
- file_format (str, optional): Same as :func:`load`.
- file_client_args (dict, optional): Arguments to instantiate a
- FileClient. See :class:`mmcv.fileio.FileClient` for details.
- Default: None.
-
- Examples:
- >>> dump('hello world', '/path/of/your/file') # disk
- >>> dump('hello world', 's3://path/of/your/file') # ceph or petrel
-
- Returns:
- bool: True for success, False otherwise.
- """
- if isinstance(file, Path):
- file = str(file)
- if file_format is None:
- if is_str(file):
- file_format = file.split('.')[-1]
- elif file is None:
- raise ValueError(
- 'file_format must be specified since file is None')
- if file_format not in file_handlers:
- raise TypeError(f'Unsupported format: {file_format}')
-
- handler = file_handlers[file_format]
- if file is None:
- return handler.dump_to_str(obj, **kwargs)
- elif is_str(file):
- file_client = FileClient.infer_client(file_client_args, file)
- if handler.str_like:
- with StringIO() as f:
- handler.dump_to_fileobj(obj, f, **kwargs)
- file_client.put_text(f.getvalue(), file)
- else:
- with BytesIO() as f:
- handler.dump_to_fileobj(obj, f, **kwargs)
- file_client.put(f.getvalue(), file)
- elif hasattr(file, 'write'):
- handler.dump_to_fileobj(obj, file, **kwargs)
- else:
- raise TypeError('"file" must be a filename str or a file-object')
-
-
-def _register_handler(handler, file_formats):
- """Register a handler for some file extensions.
-
- Args:
- handler (:obj:`BaseFileHandler`): Handler to be registered.
- file_formats (str or list[str]): File formats to be handled by this
- handler.
- """
- if not isinstance(handler, BaseFileHandler):
- raise TypeError(
- f'handler must be a child of BaseFileHandler, not {type(handler)}')
- if isinstance(file_formats, str):
- file_formats = [file_formats]
- if not is_list_of(file_formats, str):
- raise TypeError('file_formats must be a str or a list of str')
- for ext in file_formats:
- file_handlers[ext] = handler
-
-
-def register_handler(file_formats, **kwargs):
-
- def wrap(cls):
- _register_handler(cls(**kwargs), file_formats)
- return cls
-
- return wrap
diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/docs/tutorials/extend.md b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/docs/tutorials/extend.md
deleted file mode 100644
index a6af550fdb2aa79c818cef54b009f2fe816d46a9..0000000000000000000000000000000000000000
--- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/docs/tutorials/extend.md
+++ /dev/null
@@ -1,141 +0,0 @@
-# Extend Detectron2's Defaults
-
-__Research is about doing things in new ways__.
-This brings a tension in how to create abstractions in code,
-which is a challenge for any research engineering project of a significant size:
-
-1. On one hand, it needs to have very thin abstractions to allow for the possibility of doing
- everything in new ways. It should be reasonably easy to break existing
- abstractions and replace them with new ones.
-
-2. On the other hand, such a project also needs reasonably high-level
- abstractions, so that users can easily do things in standard ways,
- without worrying too much about the details that only certain researchers care about.
-
-In detectron2, there are two types of interfaces that address this tension together:
-
-1. Functions and classes that take a config (`cfg`) argument
- created from a yaml file
- (sometimes with few extra arguments).
-
- Such functions and classes implement
- the "standard default" behavior: it will read what it needs from a given
- config and do the "standard" thing.
- Users only need to load an expert-made config and pass it around, without having to worry about
- which arguments are used and what they all mean.
-
- See [Yacs Configs](configs.md) for a detailed tutorial.
-
-2. Functions and classes that have well-defined explicit arguments.
-
- Each of these is a small building block of the entire system.
- They require users' expertise to understand what each argument should be,
- and require more effort to stitch together to a larger system.
- But they can be stitched together in more flexible ways.
-
- When you need to implement something not supported by the "standard defaults"
- included in detectron2, these well-defined components can be reused.
-
- The [LazyConfig system](lazyconfigs.md) relies on such functions and classes.
-
-3. A few functions and classes are implemented with the
- [@configurable](../modules/config.html#detectron2.config.configurable)
- decorator - they can be called with either a config, or with explicit arguments, or a mixture of both.
- Their explicit argument interfaces are currently experimental.
-
- As an example, a Mask R-CNN model can be built in the following ways:
-
- 1. Config-only:
- ```python
- # load proper yaml config file, then
- model = build_model(cfg)
- ```
-
- 2. Mixture of config and additional argument overrides:
- ```python
- model = GeneralizedRCNN(
- cfg,
- roi_heads=StandardROIHeads(cfg, batch_size_per_image=666),
- pixel_std=[57.0, 57.0, 57.0])
- ```
-
- 3. Full explicit arguments:
-
-
- (click to expand)
-
-
- ```python
- model = GeneralizedRCNN(
- backbone=FPN(
- ResNet(
- BasicStem(3, 64, norm="FrozenBN"),
- ResNet.make_default_stages(50, stride_in_1x1=True, norm="FrozenBN"),
- out_features=["res2", "res3", "res4", "res5"],
- ).freeze(2),
- ["res2", "res3", "res4", "res5"],
- 256,
- top_block=LastLevelMaxPool(),
- ),
- proposal_generator=RPN(
- in_features=["p2", "p3", "p4", "p5", "p6"],
- head=StandardRPNHead(in_channels=256, num_anchors=3),
- anchor_generator=DefaultAnchorGenerator(
- sizes=[[32], [64], [128], [256], [512]],
- aspect_ratios=[0.5, 1.0, 2.0],
- strides=[4, 8, 16, 32, 64],
- offset=0.0,
- ),
- anchor_matcher=Matcher([0.3, 0.7], [0, -1, 1], allow_low_quality_matches=True),
- box2box_transform=Box2BoxTransform([1.0, 1.0, 1.0, 1.0]),
- batch_size_per_image=256,
- positive_fraction=0.5,
- pre_nms_topk=(2000, 1000),
- post_nms_topk=(1000, 1000),
- nms_thresh=0.7,
- ),
- roi_heads=StandardROIHeads(
- num_classes=80,
- batch_size_per_image=512,
- positive_fraction=0.25,
- proposal_matcher=Matcher([0.5], [0, 1], allow_low_quality_matches=False),
- box_in_features=["p2", "p3", "p4", "p5"],
- box_pooler=ROIPooler(7, (1.0 / 4, 1.0 / 8, 1.0 / 16, 1.0 / 32), 0, "ROIAlignV2"),
- box_head=FastRCNNConvFCHead(
- ShapeSpec(channels=256, height=7, width=7), conv_dims=[], fc_dims=[1024, 1024]
- ),
- box_predictor=FastRCNNOutputLayers(
- ShapeSpec(channels=1024),
- test_score_thresh=0.05,
- box2box_transform=Box2BoxTransform((10, 10, 5, 5)),
- num_classes=80,
- ),
- mask_in_features=["p2", "p3", "p4", "p5"],
- mask_pooler=ROIPooler(14, (1.0 / 4, 1.0 / 8, 1.0 / 16, 1.0 / 32), 0, "ROIAlignV2"),
- mask_head=MaskRCNNConvUpsampleHead(
- ShapeSpec(channels=256, width=14, height=14),
- num_classes=80,
- conv_dims=[256, 256, 256, 256, 256],
- ),
- ),
- pixel_mean=[103.530, 116.280, 123.675],
- pixel_std=[1.0, 1.0, 1.0],
- input_format="BGR",
- )
- ```
-
-
-
-
-If you only need the standard behavior, the [Beginner's Tutorial](./getting_started.md)
-should suffice. If you need to extend detectron2 to your own needs,
-see the following tutorials for more details:
-
-* Detectron2 includes a few standard datasets. To use custom ones, see
- [Use Custom Datasets](./datasets.md).
-* Detectron2 contains the standard logic that creates a data loader for training/testing from a
- dataset, but you can write your own as well. See [Use Custom Data Loaders](./data_loading.md).
-* Detectron2 implements many standard detection models, and provide ways for you
- to overwrite their behaviors. See [Use Models](./models.md) and [Write Models](./write-models.md).
-* Detectron2 provides a default training loop that is good for common training tasks.
- You can customize it with hooks, or write your own loop instead. See [training](./training.md).
diff --git a/spaces/Testys/diabetes-app/model.py b/spaces/Testys/diabetes-app/model.py
deleted file mode 100644
index d41dc36072b60c1852bab49e2e592e523ac0e730..0000000000000000000000000000000000000000
--- a/spaces/Testys/diabetes-app/model.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# importing python libraries
-import pandas as pd
-import pickle as pkl
-from lightgbm.sklearn import LGBMClassifier
-from sklearn.model_selection import StratifiedShuffleSplit
-from sklearn.preprocessing import RobustScaler, OrdinalEncoder
-from sklearn.metrics import f1_score
-
-import warnings
-warnings.filterwarnings("ignore")
-
-# loading diabetes data into variable data
-data = pd.read_csv("./dataset/diabetes.csv")
-
-# wrangling dataset.
-data.chol_hdl_ratio = round(data.cholesterol / data.hdl_chol, 2)
-data.waist_hip_ratio = round(data.waist / data.hip, 2)
-
-# correcting comma separated number to decimal separated number.
-data.bmi = pd.to_numeric(data.bmi.str.replace(",", "."))
-
-print(data.head())
-# encoding columns with object values using Ordinal Encoding
-s = (data.dtypes == "object")
-obj_col = s[s].index
-
-print("Ordinal Encoding")
-orde = OrdinalEncoder()
-data[obj_col] = orde.fit_transform(data[obj_col])
-
-print("Splitting features and target.")
-# dropping off target and unnecessary columns (diabetes and patient number columns)
-X = data.drop(["patient_number", "diabetes"], axis=1)
-y = data.diabetes
-
-print("Robust Scaling on X, y.")
-# scaling data using RobustScaler
-scale = RobustScaler()
-scaled_X = scale.fit_transform(X, y)
-
-print("Stratified Split.")
-# StratifiedShuffleSplit on Data
-split = StratifiedShuffleSplit(n_splits=4, random_state=42)
-
-for train_index, test_index in split.split(scaled_X, y):
- X_train, X_test = scaled_X[train_index], scaled_X[test_index]
- y_train, y_test = y[train_index], y[test_index]
-
-# Loading LightGBM classifier to be used for training model
-lgbm = LGBMClassifier(n_estimators=200, max_depth=-2, random_state=42)
-lgbm.fit(X_train, y_train)
-pred = lgbm.predict(X_test)
-
-f1 = f1_score(pred, y_test)
-print(f"F1 Score for LightGBM: {f1}.")
-
-# Using pickle to save model
-lightgbm = open("./lightgbm.pickle", "wb")
-pkl.dump(lgbm, lightgbm)
-lightgbm.close()
diff --git a/spaces/Vision-CAIR/minigpt4/minigpt4/__init__.py b/spaces/Vision-CAIR/minigpt4/minigpt4/__init__.py
deleted file mode 100644
index ec06cef0e2e4e39e450746b0f3136776f6bcf143..0000000000000000000000000000000000000000
--- a/spaces/Vision-CAIR/minigpt4/minigpt4/__init__.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""
- Copyright (c) 2022, salesforce.com, inc.
- All rights reserved.
- SPDX-License-Identifier: BSD-3-Clause
- For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
-"""
-
-import os
-import sys
-
-from omegaconf import OmegaConf
-
-from minigpt4.common.registry import registry
-
-from minigpt4.datasets.builders import *
-from minigpt4.models import *
-from minigpt4.processors import *
-from minigpt4.tasks import *
-
-
-root_dir = os.path.dirname(os.path.abspath(__file__))
-default_cfg = OmegaConf.load(os.path.join(root_dir, "configs/default.yaml"))
-
-registry.register_path("library_root", root_dir)
-repo_root = os.path.join(root_dir, "..")
-registry.register_path("repo_root", repo_root)
-cache_root = os.path.join(repo_root, default_cfg.env.cache_root)
-registry.register_path("cache_root", cache_root)
-
-registry.register("MAX_INT", sys.maxsize)
-registry.register("SPLIT_NAMES", ["train", "val", "test"])
diff --git a/spaces/Wootang01/chatbot_three/app.py b/spaces/Wootang01/chatbot_three/app.py
deleted file mode 100644
index 3a728850f4d53121b27c94716f14bcf472672285..0000000000000000000000000000000000000000
--- a/spaces/Wootang01/chatbot_three/app.py
+++ /dev/null
@@ -1,14 +0,0 @@
-import gradio as gr
-
-from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration
-
-tokenizer = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-1B-distill')
-model = BlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-1B-distill')
-
-def func (message):
- inputs = tokenizer(message, return_tensors='pt')
- result = model.generate(**inputs)
- return tokenizer.decode(result[0])
-
-app = gr.Interface(fn=func, inputs = 'textbox', outputs = 'textbox', title='Chatbot Three')
-app.launch()
\ No newline at end of file
diff --git a/spaces/Xhaheen/stable-diffusion-21/README.md b/spaces/Xhaheen/stable-diffusion-21/README.md
deleted file mode 100644
index eb51073361e38a23d6f9e75c6b9acca08b45fb92..0000000000000000000000000000000000000000
--- a/spaces/Xhaheen/stable-diffusion-21/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Stable Diffusion 2
-emoji: 💩
-colorFrom: yellow
-colorTo: pink
-sdk: gradio
-sdk_version: 3.11.0
-app_file: app.py
-pinned: false
-duplicated_from: anzorq/stable-diffusion-2
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/XzJosh/Lumi-Bert-VITS2/text/cleaner.py b/spaces/XzJosh/Lumi-Bert-VITS2/text/cleaner.py
deleted file mode 100644
index 64bd5f7296f66c94f3a335666c53706bb5fe5b39..0000000000000000000000000000000000000000
--- a/spaces/XzJosh/Lumi-Bert-VITS2/text/cleaner.py
+++ /dev/null
@@ -1,27 +0,0 @@
-from text import chinese, cleaned_text_to_sequence
-
-
-language_module_map = {
- 'ZH': chinese
-}
-
-
-def clean_text(text, language):
- language_module = language_module_map[language]
- norm_text = language_module.text_normalize(text)
- phones, tones, word2ph = language_module.g2p(norm_text)
- return norm_text, phones, tones, word2ph
-
-def clean_text_bert(text, language):
- language_module = language_module_map[language]
- norm_text = language_module.text_normalize(text)
- phones, tones, word2ph = language_module.g2p(norm_text)
- bert = language_module.get_bert_feature(norm_text, word2ph)
- return phones, tones, bert
-
-def text_to_sequence(text, language):
- norm_text, phones, tones, word2ph = clean_text(text, language)
- return cleaned_text_to_sequence(phones, tones, language)
-
-if __name__ == '__main__':
- pass
diff --git a/spaces/XzJosh/nine2-Bert-VITS2/text/cleaner.py b/spaces/XzJosh/nine2-Bert-VITS2/text/cleaner.py
deleted file mode 100644
index 64bd5f7296f66c94f3a335666c53706bb5fe5b39..0000000000000000000000000000000000000000
--- a/spaces/XzJosh/nine2-Bert-VITS2/text/cleaner.py
+++ /dev/null
@@ -1,27 +0,0 @@
-from text import chinese, cleaned_text_to_sequence
-
-
-language_module_map = {
- 'ZH': chinese
-}
-
-
-def clean_text(text, language):
- language_module = language_module_map[language]
- norm_text = language_module.text_normalize(text)
- phones, tones, word2ph = language_module.g2p(norm_text)
- return norm_text, phones, tones, word2ph
-
-def clean_text_bert(text, language):
- language_module = language_module_map[language]
- norm_text = language_module.text_normalize(text)
- phones, tones, word2ph = language_module.g2p(norm_text)
- bert = language_module.get_bert_feature(norm_text, word2ph)
- return phones, tones, bert
-
-def text_to_sequence(text, language):
- norm_text, phones, tones, word2ph = clean_text(text, language)
- return cleaned_text_to_sequence(phones, tones, language)
-
-if __name__ == '__main__':
- pass
diff --git a/spaces/Yash911/IMAGEALCHEMY-TEXT-TO-VISUALS/app.py b/spaces/Yash911/IMAGEALCHEMY-TEXT-TO-VISUALS/app.py
deleted file mode 100644
index ba16188d14a7ce8728e1d2ce4322a2cfa2d3afcd..0000000000000000000000000000000000000000
--- a/spaces/Yash911/IMAGEALCHEMY-TEXT-TO-VISUALS/app.py
+++ /dev/null
@@ -1,212 +0,0 @@
-import streamlit as st
-import requests
-import cloudinary
-import cloudinary.uploader
-from PIL import Image
-import io
-from google_auth_oauthlib.flow import InstalledAppFlow
-from googleapiclient.discovery import build
-import os
-
-# Configure Cloudinary with your credentials
-cloudinary.config(
- cloud_name="dvuowbmrz",
- api_key="177664162661619",
- api_secret="qVMYel17N_C5QUUUuBIuatB5tq0"
-)
-#
-# # Set up OAuth2 client details
-# CLIENT_SECRET_FILE = 'client_secret.json'
-# SCOPES = ['https://www.googleapis.com/auth/drive.metadata.readonly'] # Adjust scopes as needed
-#
-# # Set up Streamlit app
-# #st.title("Google Authentication Demo")
-#
-# # Check if the user is authenticated
-# if 'credentials' not in st.session_state:
-# #st.write("WELCOME")
-# flow = InstalledAppFlow.from_client_secrets_file(CLIENT_SECRET_FILE, SCOPES)
-# credentials = flow.run_local_server(port=8501, authorization_prompt_message='')
-#
-# # Save credentials to a file for future use (optional)
-# with open('token.json', 'w') as token_file:
-# token_file.write(credentials.to_json())
-#
-# st.session_state.credentials = credentials
-# st.success("Authentication successful. You can now use the app.")
-#
-# # Use authenticated credentials to interact with Google API
-# credentials = st.session_state.credentials
-# service = build('drive', 'v3', credentials=credentials)
-#
-# # Fetch user's name from Google API
-# try:
-# user_info = service.about().get(fields="user").execute()
-# user_name = user_info["user"]["displayName"]
-# #st.header("Google Profile Information")
-# st.markdown(f"
Userame: {user_name.upper()}
", unsafe_allow_html=True)
-# except Exception as e:
-# st.error(f"Error fetching user profile: {str(e)}")
-#
-# # Your app's functionality goes here
-# # # Display Google Drive contents
-# # st.header("Google Drive Contents")
-# # results = service.files().list(pageSize=10).execute()
-# # files = results.get('files', [])
-# # if not files:
-# # st.write('No files found in Google Drive.')
-# # else:
-# # st.write('Files in Google Drive:')
-# # for file in files:
-# # st.write(f"- {file['name']} ({file['mimeType']})")
-#
-# # Logout button
-# if st.button("Logout"):
-# del st.session_state.credentials
-# os.remove("token_dir/token.json") # Remove the token file
-#
-
-
-# Set up Hugging Face API endpoint
-API_URL = "https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4"
-headers = {"Authorization": "Bearer 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"}
-
-
-def query_model_with_image(image_description):
- payload = {
- "inputs": image_description
- }
- response = requests.post(API_URL, headers=headers, json=payload)
- image_bytes = response.content
-
- image = Image.open(io.BytesIO(image_bytes))
- return image
-
-def upload_to_cloudinary(image, prompt_text):
- image_data = io.BytesIO()
- image.save(image_data, format="JPEG")
- image_data.seek(0)
-
- upload_result = cloudinary.uploader.upload(
- image_data,
- folder="compvis_app",
- public_id=prompt_text
- )
- return upload_result["secure_url"]
-
-
-def fetch_latest_images_from_cloudinary(num_images=9):
- # Use the Cloudinary Admin API to list resources
- url = f"https://api.cloudinary.com/v1_1/{cloudinary.config().cloud_name}/resources/image"
- params = {
- "max_results": num_images,
- "type": "upload"
- }
- response = requests.get(url, params=params, auth=(cloudinary.config().api_key, cloudinary.config().api_secret))
-
- if response.status_code == 200:
- images = response.json()["resources"]
- else:
- images = []
-
- return images
-
-# Streamlit app
-st.markdown("""""", unsafe_allow_html=True)
-
-st.title("Text to Image Generator")
-
-image_description = st.text_input("Enter the image description")
-
-if st.button("Generate Image"):
- processed_image = query_model_with_image(image_description)
- st.image(processed_image, use_column_width=True, output_format="JPEG") # Use use_column_width=True
- st.session_state.processed_image = processed_image
- st.session_state.image_description = image_description
- st.write("Image generated.")
-
-if st.button("Upload"):
- if 'processed_image' in st.session_state:
- uploaded_url = upload_to_cloudinary(st.session_state.processed_image, st.session_state.image_description)
- st.write("Image uploaded to Cloudinary. Prompt Text:", st.session_state.image_description)
- st.write("Image URL on Cloudinary:", uploaded_url)
- else:
- st.write("Generate an image first before uploading.")
-
-# Fetch and display the latest images from Cloudinary
-st.header("Latest Images created")
-
-# Use the 'fetch_latest_images_from_cloudinary' function to get the latest images
-latest_images = fetch_latest_images_from_cloudinary()
-
-# Define the number of columns in the grid
-num_columns = 3 # You can adjust this number as needed
-
-# Calculate the width for each column
-column_width = f"calc(33.33% - {10}px)" # Adjust the width and margin as needed
-
-# Add CSS styling for the grid and rounded images
-st.markdown(
- f"""
-
- """,
- unsafe_allow_html=True,
-)
-
-# Create the responsive grid layout
-st.markdown('
', unsafe_allow_html=True)
-
-for i, image in enumerate(latest_images):
- image_url = image.get('secure_url', '') # Get the image URL
- public_id = image.get('public_id', '') # Get the full public_id
-
- # Extract just the filename (without the folder)
- filename = public_id.split('/')[-1]
-
- # Add some spacing around the image and its name
- st.markdown(f'
', unsafe_allow_html=True)
- st.markdown(f'
{filename}
', unsafe_allow_html=True)
-
- # Add rounded corners to the image using HTML
- st.markdown(f'', unsafe_allow_html=True)
-
- # Add an arrow icon instead of "Download" button with black color
- download_link = f'↓'
- st.markdown(download_link, unsafe_allow_html=True)
-
- st.write("") # Add empty spaces for separation
- st.markdown('
', unsafe_allow_html=True)
-
-# Close the responsive grid layout
-st.markdown('
', unsafe_allow_html=True)
\ No newline at end of file
diff --git a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/pipelines/score_sde_ve/__init__.py b/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/pipelines/score_sde_ve/__init__.py
deleted file mode 100644
index 000d61f6e9b183728cb6fc137e7180cac3a616df..0000000000000000000000000000000000000000
--- a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/pipelines/score_sde_ve/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-# flake8: noqa
-from .pipeline_score_sde_ve import ScoreSdeVePipeline
diff --git a/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.py b/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.py
deleted file mode 100644
index 40cf18131810307157a9a7d1f6d5922b00fd73d5..0000000000000000000000000000000000000000
--- a/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.py
+++ /dev/null
@@ -1,8 +0,0 @@
-from ..common.optim import SGD as optimizer
-from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
-from ..common.data.coco_panoptic_separated import dataloader
-from ..common.models.panoptic_fpn import model
-from ..common.train import train
-
-model.backbone.bottom_up.freeze_at = 2
-train.init_checkpoint = "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
diff --git a/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/detectron2/data/dataset_mapper.py b/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/detectron2/data/dataset_mapper.py
deleted file mode 100644
index a8714f7990f11e146a01e03d108518e0356b50c4..0000000000000000000000000000000000000000
--- a/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/detectron2/data/dataset_mapper.py
+++ /dev/null
@@ -1,191 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import copy
-import logging
-import numpy as np
-from typing import List, Optional, Union
-import torch
-
-from detectron2.config import configurable
-
-from . import detection_utils as utils
-from . import transforms as T
-
-"""
-This file contains the default mapping that's applied to "dataset dicts".
-"""
-
-__all__ = ["DatasetMapper"]
-
-
-class DatasetMapper:
- """
- A callable which takes a dataset dict in Detectron2 Dataset format,
- and map it into a format used by the model.
-
- This is the default callable to be used to map your dataset dict into training data.
- You may need to follow it to implement your own one for customized logic,
- such as a different way to read or transform images.
- See :doc:`/tutorials/data_loading` for details.
-
- The callable currently does the following:
-
- 1. Read the image from "file_name"
- 2. Applies cropping/geometric transforms to the image and annotations
- 3. Prepare data and annotations to Tensor and :class:`Instances`
- """
-
- @configurable
- def __init__(
- self,
- is_train: bool,
- *,
- augmentations: List[Union[T.Augmentation, T.Transform]],
- image_format: str,
- use_instance_mask: bool = False,
- use_keypoint: bool = False,
- instance_mask_format: str = "polygon",
- keypoint_hflip_indices: Optional[np.ndarray] = None,
- precomputed_proposal_topk: Optional[int] = None,
- recompute_boxes: bool = False,
- ):
- """
- NOTE: this interface is experimental.
-
- Args:
- is_train: whether it's used in training or inference
- augmentations: a list of augmentations or deterministic transforms to apply
- image_format: an image format supported by :func:`detection_utils.read_image`.
- use_instance_mask: whether to process instance segmentation annotations, if available
- use_keypoint: whether to process keypoint annotations if available
- instance_mask_format: one of "polygon" or "bitmask". Process instance segmentation
- masks into this format.
- keypoint_hflip_indices: see :func:`detection_utils.create_keypoint_hflip_indices`
- precomputed_proposal_topk: if given, will load pre-computed
- proposals from dataset_dict and keep the top k proposals for each image.
- recompute_boxes: whether to overwrite bounding box annotations
- by computing tight bounding boxes from instance mask annotations.
- """
- if recompute_boxes:
- assert use_instance_mask, "recompute_boxes requires instance masks"
- # fmt: off
- self.is_train = is_train
- self.augmentations = T.AugmentationList(augmentations)
- self.image_format = image_format
- self.use_instance_mask = use_instance_mask
- self.instance_mask_format = instance_mask_format
- self.use_keypoint = use_keypoint
- self.keypoint_hflip_indices = keypoint_hflip_indices
- self.proposal_topk = precomputed_proposal_topk
- self.recompute_boxes = recompute_boxes
- # fmt: on
- logger = logging.getLogger(__name__)
- mode = "training" if is_train else "inference"
- logger.info(f"[DatasetMapper] Augmentations used in {mode}: {augmentations}")
-
- @classmethod
- def from_config(cls, cfg, is_train: bool = True):
- augs = utils.build_augmentation(cfg, is_train)
- if cfg.INPUT.CROP.ENABLED and is_train:
- augs.insert(0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE))
- recompute_boxes = cfg.MODEL.MASK_ON
- else:
- recompute_boxes = False
-
- ret = {
- "is_train": is_train,
- "augmentations": augs,
- "image_format": cfg.INPUT.FORMAT,
- "use_instance_mask": cfg.MODEL.MASK_ON,
- "instance_mask_format": cfg.INPUT.MASK_FORMAT,
- "use_keypoint": cfg.MODEL.KEYPOINT_ON,
- "recompute_boxes": recompute_boxes,
- }
-
- if cfg.MODEL.KEYPOINT_ON:
- ret["keypoint_hflip_indices"] = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN)
-
- if cfg.MODEL.LOAD_PROPOSALS:
- ret["precomputed_proposal_topk"] = (
- cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN
- if is_train
- else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST
- )
- return ret
-
- def _transform_annotations(self, dataset_dict, transforms, image_shape):
- # USER: Modify this if you want to keep them for some reason.
- for anno in dataset_dict["annotations"]:
- if not self.use_instance_mask:
- anno.pop("segmentation", None)
- if not self.use_keypoint:
- anno.pop("keypoints", None)
-
- # USER: Implement additional transformations if you have other types of data
- annos = [
- utils.transform_instance_annotations(
- obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices
- )
- for obj in dataset_dict.pop("annotations")
- if obj.get("iscrowd", 0) == 0
- ]
- instances = utils.annotations_to_instances(
- annos, image_shape, mask_format=self.instance_mask_format
- )
-
- # After transforms such as cropping are applied, the bounding box may no longer
- # tightly bound the object. As an example, imagine a triangle object
- # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight
- # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to
- # the intersection of original bounding box and the cropping box.
- if self.recompute_boxes:
- instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
- dataset_dict["instances"] = utils.filter_empty_instances(instances)
-
- def __call__(self, dataset_dict):
- """
- Args:
- dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
-
- Returns:
- dict: a format that builtin models in detectron2 accept
- """
- dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
- # USER: Write your own image loading if it's not from a file
- image = utils.read_image(dataset_dict["file_name"], format=self.image_format)
- utils.check_image_size(dataset_dict, image)
-
- # USER: Remove if you don't do semantic/panoptic segmentation.
- if "sem_seg_file_name" in dataset_dict:
- sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name"), "L").squeeze(2)
- else:
- sem_seg_gt = None
-
- aug_input = T.AugInput(image, sem_seg=sem_seg_gt)
- transforms = self.augmentations(aug_input)
- image, sem_seg_gt = aug_input.image, aug_input.sem_seg
-
- image_shape = image.shape[:2] # h, w
- # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
- # but not efficient on large generic data structures due to the use of pickle & mp.Queue.
- # Therefore it's important to use torch.Tensor.
- dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
- if sem_seg_gt is not None:
- dataset_dict["sem_seg"] = torch.as_tensor(sem_seg_gt.astype("long"))
-
- # USER: Remove if you don't use pre-computed proposals.
- # Most users would not need this feature.
- if self.proposal_topk is not None:
- utils.transform_proposals(
- dataset_dict, image_shape, transforms, proposal_topk=self.proposal_topk
- )
-
- if not self.is_train:
- # USER: Modify this if you want to keep them for some reason.
- dataset_dict.pop("annotations", None)
- dataset_dict.pop("sem_seg_file_name", None)
- return dataset_dict
-
- if "annotations" in dataset_dict:
- self._transform_annotations(dataset_dict, transforms, image_shape)
-
- return dataset_dict
diff --git a/spaces/Yuankai/ChatReviewer/get_paper_from_pdf.py b/spaces/Yuankai/ChatReviewer/get_paper_from_pdf.py
deleted file mode 100644
index 7bae3b4b7c64e691208c221c869d6a06c3023652..0000000000000000000000000000000000000000
--- a/spaces/Yuankai/ChatReviewer/get_paper_from_pdf.py
+++ /dev/null
@@ -1,193 +0,0 @@
-import fitz, io, os
-from PIL import Image
-from collections import Counter
-import json
-import re
-
-class Paper:
- def __init__(self, path, title='', url='', abs='', authors=[]):
- # 初始化函数,根据pdf路径初始化Paper对象
- self.url = url # 文章链接
- self.path = path # pdf路径
- self.section_names = [] # 段落标题
- self.section_texts = {} # 段落内容
- self.abs = abs
- self.title_page = 0
- if title == '':
- self.pdf = fitz.open(self.path) # pdf文档
- self.title = self.get_title()
- self.parse_pdf()
- else:
- self.title = title
- self.authors = authors
- self.roman_num = ["I", "II", 'III', "IV", "V", "VI", "VII", "VIII", "IIX", "IX", "X"]
- self.digit_num = [str(d + 1) for d in range(10)]
- self.first_image = ''
-
- def parse_pdf(self):
- self.pdf = fitz.open(self.path) # pdf文档
- self.text_list = [page.get_text() for page in self.pdf]
- self.all_text = ' '.join(self.text_list)
- self.extract_section_infomation()
- self.section_texts.update({"title": self.title})
- self.pdf.close()
-
- # 定义一个函数,根据字体的大小,识别每个章节名称,并返回一个列表
- def get_chapter_names(self, ):
- # # 打开一个pdf文件
- doc = fitz.open(self.path) # pdf文档
- text_list = [page.get_text() for page in doc]
- all_text = ''
- for text in text_list:
- all_text += text
- # # 创建一个空列表,用于存储章节名称
- chapter_names = []
- for line in all_text.split('\n'):
- line_list = line.split(' ')
- if '.' in line:
- point_split_list = line.split('.')
- space_split_list = line.split(' ')
- if 1 < len(space_split_list) < 5:
- if 1 < len(point_split_list) < 5 and (
- point_split_list[0] in self.roman_num or point_split_list[0] in self.digit_num):
- # print("line:", line)
- chapter_names.append(line)
-
- return chapter_names
-
- def get_title(self):
- doc = self.pdf # 打开pdf文件
- max_font_size = 0 # 初始化最大字体大小为0
- max_string = "" # 初始化最大字体大小对应的字符串为空
- max_font_sizes = [0]
- for page_index, page in enumerate(doc): # 遍历每一页
- text = page.get_text("dict") # 获取页面上的文本信息
- blocks = text["blocks"] # 获取文本块列表
- for block in blocks: # 遍历每个文本块
- if block["type"] == 0 and len(block['lines']): # 如果是文字类型
- if len(block["lines"][0]["spans"]):
- font_size = block["lines"][0]["spans"][0]["size"] # 获取第一行第一段文字的字体大小
- max_font_sizes.append(font_size)
- if font_size > max_font_size: # 如果字体大小大于当前最大值
- max_font_size = font_size # 更新最大值
- max_string = block["lines"][0]["spans"][0]["text"] # 更新最大值对应的字符串
- max_font_sizes.sort()
- # print("max_font_sizes", max_font_sizes[-10:])
- cur_title = ''
- for page_index, page in enumerate(doc): # 遍历每一页
- text = page.get_text("dict") # 获取页面上的文本信息
- blocks = text["blocks"] # 获取文本块列表
- for block in blocks: # 遍历每个文本块
- if block["type"] == 0 and len(block['lines']): # 如果是文字类型
- if len(block["lines"][0]["spans"]):
- cur_string = block["lines"][0]["spans"][0]["text"] # 更新最大值对应的字符串
- font_flags = block["lines"][0]["spans"][0]["flags"] # 获取第一行第一段文字的字体特征
- font_size = block["lines"][0]["spans"][0]["size"] # 获取第一行第一段文字的字体大小
- # print(font_size)
- if abs(font_size - max_font_sizes[-1]) < 0.3 or abs(font_size - max_font_sizes[-2]) < 0.3:
- # print("The string is bold.", max_string, "font_size:", font_size, "font_flags:", font_flags)
- if len(cur_string) > 4 and "arXiv" not in cur_string:
- # print("The string is bold.", max_string, "font_size:", font_size, "font_flags:", font_flags)
- if cur_title == '':
- cur_title += cur_string
- else:
- cur_title += ' ' + cur_string
- self.title_page = page_index
- # break
- title = cur_title.replace('\n', ' ')
- return title
-
- def extract_section_infomation(self):
- doc = fitz.open(self.path)
-
- # 获取文档中所有字体大小
- font_sizes = []
- for page in doc:
- blocks = page.get_text("dict")["blocks"]
- for block in blocks:
- if 'lines' not in block:
- continue
- lines = block["lines"]
- for line in lines:
- for span in line["spans"]:
- font_sizes.append(span["size"])
- most_common_size, _ = Counter(font_sizes).most_common(1)[0]
-
- # 按照最频繁的字体大小确定标题字体大小的阈值
- threshold = most_common_size * 1
-
- section_dict = {}
- last_heading = None
- subheadings = []
- heading_font = -1
- # 遍历每一页并查找子标题
- found_abstract = False
- upper_heading = False
- font_heading = False
- for page in doc:
- blocks = page.get_text("dict")["blocks"]
- for block in blocks:
- if not found_abstract:
- try:
- text = json.dumps(block)
- except:
- continue
- if re.search(r"\bAbstract\b", text, re.IGNORECASE):
- found_abstract = True
- last_heading = "Abstract"
- section_dict["Abstract"] = ""
- if found_abstract:
- if 'lines' not in block:
- continue
- lines = block["lines"]
- for line in lines:
- for span in line["spans"]:
- # 如果当前文本是子标题
- if not font_heading and span["text"].isupper() and sum(1 for c in span["text"] if c.isupper() and ('A' <= c <='Z')) > 4: # 针对一些标题大小一样,但是全大写的论文
- upper_heading = True
- heading = span["text"].strip()
- if "References" in heading: # reference 以后的内容不考虑
- self.section_names = subheadings
- self.section_texts = section_dict
- return
- subheadings.append(heading)
- if last_heading is not None:
- section_dict[last_heading] = section_dict[last_heading].strip()
- section_dict[heading] = ""
- last_heading = heading
- if not upper_heading and span["size"] > threshold and re.match( # 正常情况下,通过字体大小判断
- r"[A-Z][a-z]+(?:\s[A-Z][a-z]+)*",
- span["text"].strip()):
- font_heading = True
- if heading_font == -1:
- heading_font = span["size"]
- elif heading_font != span["size"]:
- continue
- heading = span["text"].strip()
- if "References" in heading: # reference 以后的内容不考虑
- self.section_names = subheadings
- self.section_texts = section_dict
- return
- subheadings.append(heading)
- if last_heading is not None:
- section_dict[last_heading] = section_dict[last_heading].strip()
- section_dict[heading] = ""
- last_heading = heading
- # 否则将当前文本添加到上一个子标题的文本中
- elif last_heading is not None:
- section_dict[last_heading] += " " + span["text"].strip()
- self.section_names = subheadings
- self.section_texts = section_dict
-
-
-def main():
- path = r'demo.pdf'
- paper = Paper(path=path)
- paper.parse_pdf()
- # for key, value in paper.section_text_dict.items():
- # print(key, value)
- # print("*"*40)
-
-
-if __name__ == '__main__':
- main()
diff --git a/spaces/Yuelili/RealNagrse/realesrgan/train.py b/spaces/Yuelili/RealNagrse/realesrgan/train.py
deleted file mode 100644
index 8a9cec9ed80d9f362984779548dcec921a636a04..0000000000000000000000000000000000000000
--- a/spaces/Yuelili/RealNagrse/realesrgan/train.py
+++ /dev/null
@@ -1,11 +0,0 @@
-# flake8: noqa
-import os.path as osp
-from basicsr.train import train_pipeline
-
-import realesrgan.archs
-import realesrgan.data
-import realesrgan.models
-
-if __name__ == '__main__':
- root_path = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir))
- train_pipeline(root_path)
diff --git a/spaces/Yukki-Yui/moe-tts/export_model.py b/spaces/Yukki-Yui/moe-tts/export_model.py
deleted file mode 100644
index 98a49835df5a7a2486e76ddf94fbbb4444b52203..0000000000000000000000000000000000000000
--- a/spaces/Yukki-Yui/moe-tts/export_model.py
+++ /dev/null
@@ -1,13 +0,0 @@
-import torch
-
-if __name__ == '__main__':
- model_path = "saved_model/11/model.pth"
- output_path = "saved_model/11/model1.pth"
- checkpoint_dict = torch.load(model_path, map_location='cpu')
- checkpoint_dict_new = {}
- for k, v in checkpoint_dict.items():
- if k == "optimizer":
- print("remove optimizer")
- continue
- checkpoint_dict_new[k] = v
- torch.save(checkpoint_dict_new, output_path)
diff --git a/spaces/Zaid/whisper-large-v2-ar/README.md b/spaces/Zaid/whisper-large-v2-ar/README.md
deleted file mode 100644
index 69c9369e306b8bba2cf6e99fdba85897526cf4bb..0000000000000000000000000000000000000000
--- a/spaces/Zaid/whisper-large-v2-ar/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Whisper large Arabic
-emoji: 🐠
-colorFrom: blue
-colorTo: gray
-sdk: gradio
-sdk_version: 3.9.1
-app_file: app.py
-pinned: false
-license: apache-2.0
-duplicated_from: arbml/whisper-small-ar-1000
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/abdvl/datahub_qa_bot/docs/managed-datahub/release-notes/v_0_1_72.md b/spaces/abdvl/datahub_qa_bot/docs/managed-datahub/release-notes/v_0_1_72.md
deleted file mode 100644
index 1881308b69f355cd645e594b8db7ab1d20367324..0000000000000000000000000000000000000000
--- a/spaces/abdvl/datahub_qa_bot/docs/managed-datahub/release-notes/v_0_1_72.md
+++ /dev/null
@@ -1,17 +0,0 @@
-# v0.1.72
----
-
-Release Availability Date
----
-18 Jan 2023
-
-
-## Release Changlog
----
-- Since `v0.1.70` these changes from OSS DataHub https://github.com/datahub-project/datahub/compare/43c566ee4ff2ee950a4f845c2fd8a1c690c1d607...afaee58ded40dc4cf39f94f1b4331ceb0a4d93eb have been pulled in
-- add GZip compression to lineage cache
-- Make browse paths upgrade non-blocking
-
-## Special Notes
----
-- If anyone faces issues with login please clear your cookies. Some security updates are part of this release. That may cause login issues until cookies are cleared.
\ No newline at end of file
diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/runner/fp16_utils.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/runner/fp16_utils.py
deleted file mode 100644
index 1981011d6859192e3e663e29d13500d56ba47f6c..0000000000000000000000000000000000000000
--- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/runner/fp16_utils.py
+++ /dev/null
@@ -1,410 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import functools
-import warnings
-from collections import abc
-from inspect import getfullargspec
-
-import numpy as np
-import torch
-import torch.nn as nn
-
-from annotator.uniformer.mmcv.utils import TORCH_VERSION, digit_version
-from .dist_utils import allreduce_grads as _allreduce_grads
-
-try:
- # If PyTorch version >= 1.6.0, torch.cuda.amp.autocast would be imported
- # and used; otherwise, auto fp16 will adopt mmcv's implementation.
- # Note that when PyTorch >= 1.6.0, we still cast tensor types to fp16
- # manually, so the behavior may not be consistent with real amp.
- from torch.cuda.amp import autocast
-except ImportError:
- pass
-
-
-def cast_tensor_type(inputs, src_type, dst_type):
- """Recursively convert Tensor in inputs from src_type to dst_type.
-
- Args:
- inputs: Inputs that to be casted.
- src_type (torch.dtype): Source type..
- dst_type (torch.dtype): Destination type.
-
- Returns:
- The same type with inputs, but all contained Tensors have been cast.
- """
- if isinstance(inputs, nn.Module):
- return inputs
- elif isinstance(inputs, torch.Tensor):
- return inputs.to(dst_type)
- elif isinstance(inputs, str):
- return inputs
- elif isinstance(inputs, np.ndarray):
- return inputs
- elif isinstance(inputs, abc.Mapping):
- return type(inputs)({
- k: cast_tensor_type(v, src_type, dst_type)
- for k, v in inputs.items()
- })
- elif isinstance(inputs, abc.Iterable):
- return type(inputs)(
- cast_tensor_type(item, src_type, dst_type) for item in inputs)
- else:
- return inputs
-
-
-def auto_fp16(apply_to=None, out_fp32=False):
- """Decorator to enable fp16 training automatically.
-
- This decorator is useful when you write custom modules and want to support
- mixed precision training. If inputs arguments are fp32 tensors, they will
- be converted to fp16 automatically. Arguments other than fp32 tensors are
- ignored. If you are using PyTorch >= 1.6, torch.cuda.amp is used as the
- backend, otherwise, original mmcv implementation will be adopted.
-
- Args:
- apply_to (Iterable, optional): The argument names to be converted.
- `None` indicates all arguments.
- out_fp32 (bool): Whether to convert the output back to fp32.
-
- Example:
-
- >>> import torch.nn as nn
- >>> class MyModule1(nn.Module):
- >>>
- >>> # Convert x and y to fp16
- >>> @auto_fp16()
- >>> def forward(self, x, y):
- >>> pass
-
- >>> import torch.nn as nn
- >>> class MyModule2(nn.Module):
- >>>
- >>> # convert pred to fp16
- >>> @auto_fp16(apply_to=('pred', ))
- >>> def do_something(self, pred, others):
- >>> pass
- """
-
- def auto_fp16_wrapper(old_func):
-
- @functools.wraps(old_func)
- def new_func(*args, **kwargs):
- # check if the module has set the attribute `fp16_enabled`, if not,
- # just fallback to the original method.
- if not isinstance(args[0], torch.nn.Module):
- raise TypeError('@auto_fp16 can only be used to decorate the '
- 'method of nn.Module')
- if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled):
- return old_func(*args, **kwargs)
-
- # get the arg spec of the decorated method
- args_info = getfullargspec(old_func)
- # get the argument names to be casted
- args_to_cast = args_info.args if apply_to is None else apply_to
- # convert the args that need to be processed
- new_args = []
- # NOTE: default args are not taken into consideration
- if args:
- arg_names = args_info.args[:len(args)]
- for i, arg_name in enumerate(arg_names):
- if arg_name in args_to_cast:
- new_args.append(
- cast_tensor_type(args[i], torch.float, torch.half))
- else:
- new_args.append(args[i])
- # convert the kwargs that need to be processed
- new_kwargs = {}
- if kwargs:
- for arg_name, arg_value in kwargs.items():
- if arg_name in args_to_cast:
- new_kwargs[arg_name] = cast_tensor_type(
- arg_value, torch.float, torch.half)
- else:
- new_kwargs[arg_name] = arg_value
- # apply converted arguments to the decorated method
- if (TORCH_VERSION != 'parrots' and
- digit_version(TORCH_VERSION) >= digit_version('1.6.0')):
- with autocast(enabled=True):
- output = old_func(*new_args, **new_kwargs)
- else:
- output = old_func(*new_args, **new_kwargs)
- # cast the results back to fp32 if necessary
- if out_fp32:
- output = cast_tensor_type(output, torch.half, torch.float)
- return output
-
- return new_func
-
- return auto_fp16_wrapper
-
-
-def force_fp32(apply_to=None, out_fp16=False):
- """Decorator to convert input arguments to fp32 in force.
-
- This decorator is useful when you write custom modules and want to support
- mixed precision training. If there are some inputs that must be processed
- in fp32 mode, then this decorator can handle it. If inputs arguments are
- fp16 tensors, they will be converted to fp32 automatically. Arguments other
- than fp16 tensors are ignored. If you are using PyTorch >= 1.6,
- torch.cuda.amp is used as the backend, otherwise, original mmcv
- implementation will be adopted.
-
- Args:
- apply_to (Iterable, optional): The argument names to be converted.
- `None` indicates all arguments.
- out_fp16 (bool): Whether to convert the output back to fp16.
-
- Example:
-
- >>> import torch.nn as nn
- >>> class MyModule1(nn.Module):
- >>>
- >>> # Convert x and y to fp32
- >>> @force_fp32()
- >>> def loss(self, x, y):
- >>> pass
-
- >>> import torch.nn as nn
- >>> class MyModule2(nn.Module):
- >>>
- >>> # convert pred to fp32
- >>> @force_fp32(apply_to=('pred', ))
- >>> def post_process(self, pred, others):
- >>> pass
- """
-
- def force_fp32_wrapper(old_func):
-
- @functools.wraps(old_func)
- def new_func(*args, **kwargs):
- # check if the module has set the attribute `fp16_enabled`, if not,
- # just fallback to the original method.
- if not isinstance(args[0], torch.nn.Module):
- raise TypeError('@force_fp32 can only be used to decorate the '
- 'method of nn.Module')
- if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled):
- return old_func(*args, **kwargs)
- # get the arg spec of the decorated method
- args_info = getfullargspec(old_func)
- # get the argument names to be casted
- args_to_cast = args_info.args if apply_to is None else apply_to
- # convert the args that need to be processed
- new_args = []
- if args:
- arg_names = args_info.args[:len(args)]
- for i, arg_name in enumerate(arg_names):
- if arg_name in args_to_cast:
- new_args.append(
- cast_tensor_type(args[i], torch.half, torch.float))
- else:
- new_args.append(args[i])
- # convert the kwargs that need to be processed
- new_kwargs = dict()
- if kwargs:
- for arg_name, arg_value in kwargs.items():
- if arg_name in args_to_cast:
- new_kwargs[arg_name] = cast_tensor_type(
- arg_value, torch.half, torch.float)
- else:
- new_kwargs[arg_name] = arg_value
- # apply converted arguments to the decorated method
- if (TORCH_VERSION != 'parrots' and
- digit_version(TORCH_VERSION) >= digit_version('1.6.0')):
- with autocast(enabled=False):
- output = old_func(*new_args, **new_kwargs)
- else:
- output = old_func(*new_args, **new_kwargs)
- # cast the results back to fp32 if necessary
- if out_fp16:
- output = cast_tensor_type(output, torch.float, torch.half)
- return output
-
- return new_func
-
- return force_fp32_wrapper
-
-
-def allreduce_grads(params, coalesce=True, bucket_size_mb=-1):
- warnings.warning(
- '"mmcv.runner.fp16_utils.allreduce_grads" is deprecated, and will be '
- 'removed in v2.8. Please switch to "mmcv.runner.allreduce_grads')
- _allreduce_grads(params, coalesce=coalesce, bucket_size_mb=bucket_size_mb)
-
-
-def wrap_fp16_model(model):
- """Wrap the FP32 model to FP16.
-
- If you are using PyTorch >= 1.6, torch.cuda.amp is used as the
- backend, otherwise, original mmcv implementation will be adopted.
-
- For PyTorch >= 1.6, this function will
- 1. Set fp16 flag inside the model to True.
-
- Otherwise:
- 1. Convert FP32 model to FP16.
- 2. Remain some necessary layers to be FP32, e.g., normalization layers.
- 3. Set `fp16_enabled` flag inside the model to True.
-
- Args:
- model (nn.Module): Model in FP32.
- """
- if (TORCH_VERSION == 'parrots'
- or digit_version(TORCH_VERSION) < digit_version('1.6.0')):
- # convert model to fp16
- model.half()
- # patch the normalization layers to make it work in fp32 mode
- patch_norm_fp32(model)
- # set `fp16_enabled` flag
- for m in model.modules():
- if hasattr(m, 'fp16_enabled'):
- m.fp16_enabled = True
-
-
-def patch_norm_fp32(module):
- """Recursively convert normalization layers from FP16 to FP32.
-
- Args:
- module (nn.Module): The modules to be converted in FP16.
-
- Returns:
- nn.Module: The converted module, the normalization layers have been
- converted to FP32.
- """
- if isinstance(module, (nn.modules.batchnorm._BatchNorm, nn.GroupNorm)):
- module.float()
- if isinstance(module, nn.GroupNorm) or torch.__version__ < '1.3':
- module.forward = patch_forward_method(module.forward, torch.half,
- torch.float)
- for child in module.children():
- patch_norm_fp32(child)
- return module
-
-
-def patch_forward_method(func, src_type, dst_type, convert_output=True):
- """Patch the forward method of a module.
-
- Args:
- func (callable): The original forward method.
- src_type (torch.dtype): Type of input arguments to be converted from.
- dst_type (torch.dtype): Type of input arguments to be converted to.
- convert_output (bool): Whether to convert the output back to src_type.
-
- Returns:
- callable: The patched forward method.
- """
-
- def new_forward(*args, **kwargs):
- output = func(*cast_tensor_type(args, src_type, dst_type),
- **cast_tensor_type(kwargs, src_type, dst_type))
- if convert_output:
- output = cast_tensor_type(output, dst_type, src_type)
- return output
-
- return new_forward
-
-
-class LossScaler:
- """Class that manages loss scaling in mixed precision training which
- supports both dynamic or static mode.
-
- The implementation refers to
- https://github.com/NVIDIA/apex/blob/master/apex/fp16_utils/loss_scaler.py.
- Indirectly, by supplying ``mode='dynamic'`` for dynamic loss scaling.
- It's important to understand how :class:`LossScaler` operates.
- Loss scaling is designed to combat the problem of underflowing
- gradients encountered at long times when training fp16 networks.
- Dynamic loss scaling begins by attempting a very high loss
- scale. Ironically, this may result in OVERflowing gradients.
- If overflowing gradients are encountered, :class:`FP16_Optimizer` then
- skips the update step for this particular iteration/minibatch,
- and :class:`LossScaler` adjusts the loss scale to a lower value.
- If a certain number of iterations occur without overflowing gradients
- detected,:class:`LossScaler` increases the loss scale once more.
- In this way :class:`LossScaler` attempts to "ride the edge" of always
- using the highest loss scale possible without incurring overflow.
-
- Args:
- init_scale (float): Initial loss scale value, default: 2**32.
- scale_factor (float): Factor used when adjusting the loss scale.
- Default: 2.
- mode (str): Loss scaling mode. 'dynamic' or 'static'
- scale_window (int): Number of consecutive iterations without an
- overflow to wait before increasing the loss scale. Default: 1000.
- """
-
- def __init__(self,
- init_scale=2**32,
- mode='dynamic',
- scale_factor=2.,
- scale_window=1000):
- self.cur_scale = init_scale
- self.cur_iter = 0
- assert mode in ('dynamic',
- 'static'), 'mode can only be dynamic or static'
- self.mode = mode
- self.last_overflow_iter = -1
- self.scale_factor = scale_factor
- self.scale_window = scale_window
-
- def has_overflow(self, params):
- """Check if params contain overflow."""
- if self.mode != 'dynamic':
- return False
- for p in params:
- if p.grad is not None and LossScaler._has_inf_or_nan(p.grad.data):
- return True
- return False
-
- def _has_inf_or_nan(x):
- """Check if params contain NaN."""
- try:
- cpu_sum = float(x.float().sum())
- except RuntimeError as instance:
- if 'value cannot be converted' not in instance.args[0]:
- raise
- return True
- else:
- if cpu_sum == float('inf') or cpu_sum == -float('inf') \
- or cpu_sum != cpu_sum:
- return True
- return False
-
- def update_scale(self, overflow):
- """update the current loss scale value when overflow happens."""
- if self.mode != 'dynamic':
- return
- if overflow:
- self.cur_scale = max(self.cur_scale / self.scale_factor, 1)
- self.last_overflow_iter = self.cur_iter
- else:
- if (self.cur_iter - self.last_overflow_iter) % \
- self.scale_window == 0:
- self.cur_scale *= self.scale_factor
- self.cur_iter += 1
-
- def state_dict(self):
- """Returns the state of the scaler as a :class:`dict`."""
- return dict(
- cur_scale=self.cur_scale,
- cur_iter=self.cur_iter,
- mode=self.mode,
- last_overflow_iter=self.last_overflow_iter,
- scale_factor=self.scale_factor,
- scale_window=self.scale_window)
-
- def load_state_dict(self, state_dict):
- """Loads the loss_scaler state dict.
-
- Args:
- state_dict (dict): scaler state.
- """
- self.cur_scale = state_dict['cur_scale']
- self.cur_iter = state_dict['cur_iter']
- self.mode = state_dict['mode']
- self.last_overflow_iter = state_dict['last_overflow_iter']
- self.scale_factor = state_dict['scale_factor']
- self.scale_window = state_dict['scale_window']
-
- @property
- def loss_scale(self):
- return self.cur_scale
diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/detectors/cornernet.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/detectors/cornernet.py
deleted file mode 100644
index bb8ccc1465ab66d1615ca16701a533a22b156295..0000000000000000000000000000000000000000
--- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/detectors/cornernet.py
+++ /dev/null
@@ -1,95 +0,0 @@
-import torch
-
-from mmdet.core import bbox2result, bbox_mapping_back
-from ..builder import DETECTORS
-from .single_stage import SingleStageDetector
-
-
-@DETECTORS.register_module()
-class CornerNet(SingleStageDetector):
- """CornerNet.
-
- This detector is the implementation of the paper `CornerNet: Detecting
- Objects as Paired Keypoints `_ .
- """
-
- def __init__(self,
- backbone,
- neck,
- bbox_head,
- train_cfg=None,
- test_cfg=None,
- pretrained=None):
- super(CornerNet, self).__init__(backbone, neck, bbox_head, train_cfg,
- test_cfg, pretrained)
-
- def merge_aug_results(self, aug_results, img_metas):
- """Merge augmented detection bboxes and score.
-
- Args:
- aug_results (list[list[Tensor]]): Det_bboxes and det_labels of each
- image.
- img_metas (list[list[dict]]): Meta information of each image, e.g.,
- image size, scaling factor, etc.
-
- Returns:
- tuple: (bboxes, labels)
- """
- recovered_bboxes, aug_labels = [], []
- for bboxes_labels, img_info in zip(aug_results, img_metas):
- img_shape = img_info[0]['img_shape'] # using shape before padding
- scale_factor = img_info[0]['scale_factor']
- flip = img_info[0]['flip']
- bboxes, labels = bboxes_labels
- bboxes, scores = bboxes[:, :4], bboxes[:, -1:]
- bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip)
- recovered_bboxes.append(torch.cat([bboxes, scores], dim=-1))
- aug_labels.append(labels)
-
- bboxes = torch.cat(recovered_bboxes, dim=0)
- labels = torch.cat(aug_labels)
-
- if bboxes.shape[0] > 0:
- out_bboxes, out_labels = self.bbox_head._bboxes_nms(
- bboxes, labels, self.bbox_head.test_cfg)
- else:
- out_bboxes, out_labels = bboxes, labels
-
- return out_bboxes, out_labels
-
- def aug_test(self, imgs, img_metas, rescale=False):
- """Augment testing of CornerNet.
-
- Args:
- imgs (list[Tensor]): Augmented images.
- img_metas (list[list[dict]]): Meta information of each image, e.g.,
- image size, scaling factor, etc.
- rescale (bool): If True, return boxes in original image space.
- Default: False.
-
- Note:
- ``imgs`` must including flipped image pairs.
-
- Returns:
- list[list[np.ndarray]]: BBox results of each image and classes.
- The outer list corresponds to each image. The inner list
- corresponds to each class.
- """
- img_inds = list(range(len(imgs)))
-
- assert img_metas[0][0]['flip'] + img_metas[1][0]['flip'], (
- 'aug test must have flipped image pair')
- aug_results = []
- for ind, flip_ind in zip(img_inds[0::2], img_inds[1::2]):
- img_pair = torch.cat([imgs[ind], imgs[flip_ind]])
- x = self.extract_feat(img_pair)
- outs = self.bbox_head(x)
- bbox_list = self.bbox_head.get_bboxes(
- *outs, [img_metas[ind], img_metas[flip_ind]], False, False)
- aug_results.append(bbox_list[0])
- aug_results.append(bbox_list[1])
-
- bboxes, labels = self.merge_aug_results(aug_results, img_metas)
- bbox_results = bbox2result(bboxes, labels, self.bbox_head.num_classes)
-
- return [bbox_results]
diff --git a/spaces/abidlabs/keras-image-classifier/app.py b/spaces/abidlabs/keras-image-classifier/app.py
deleted file mode 100644
index 704062af215e339b654b0eca4b874155768f863e..0000000000000000000000000000000000000000
--- a/spaces/abidlabs/keras-image-classifier/app.py
+++ /dev/null
@@ -1,28 +0,0 @@
-import requests
-import tensorflow as tf
-
-inception_net = tf.keras.applications.MobileNetV2()
-
-import requests
-
-# Download human-readable labels for ImageNet.
-response = requests.get("https://git.io/JJkYN")
-labels = response.text.split("\n")
-
-
-def classify_image(inp):
- inp = inp.reshape((-1, 224, 224, 3))
- inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)
- prediction = inception_net.predict(inp).flatten()
- confidences = {labels[i]: float(prediction[i]) for i in range(1000)}
- return confidences
-
-
-import gradio as gr
-
-gr.Interface(fn=classify_image,
- inputs=gr.inputs.Image(shape=(224, 224)),
- outputs=gr.outputs.Label(num_top_classes=3),
- examples=["banana.jpg", "car.jpg"],
- theme="default",
- css=".footer{display:none !important}").launch()
diff --git a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/app/base.py b/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/app/base.py
deleted file mode 100644
index be2c641580d80f0868452ad4a127456b4deccf30..0000000000000000000000000000000000000000
--- a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/app/base.py
+++ /dev/null
@@ -1,314 +0,0 @@
-import sys
-import queue
-import threading
-
-from pyglet import app
-from pyglet import clock
-from pyglet import event
-
-_is_pyglet_doc_run = hasattr(sys, "is_pyglet_doc_run") and sys.is_pyglet_doc_run
-
-
-class PlatformEventLoop:
- """ Abstract class, implementation depends on platform.
-
- .. versionadded:: 1.2
- """
- def __init__(self):
- self._event_queue = queue.Queue()
- self._is_running = threading.Event()
-
- def is_running(self):
- """Return True if the event loop is currently processing, or False
- if it is blocked or not activated.
-
- :rtype: bool
- """
- return self._is_running.is_set()
-
- def post_event(self, dispatcher, event, *args):
- """Post an event into the main application thread.
-
- The event is queued internally until the :py:meth:`run` method's thread
- is able to dispatch the event. This method can be safely called
- from any thread.
-
- If the method is called from the :py:meth:`run` method's thread (for
- example, from within an event handler), the event may be dispatched
- within the same runloop iteration or the next one; the choice is
- nondeterministic.
-
- :Parameters:
- `dispatcher` : EventDispatcher
- Dispatcher to process the event.
- `event` : str
- Event name.
- `args` : sequence
- Arguments to pass to the event handlers.
-
- """
- self._event_queue.put((dispatcher, event, args))
- self.notify()
-
- def dispatch_posted_events(self):
- """Immediately dispatch all pending events.
-
- Normally this is called automatically by the runloop iteration.
- """
- while True:
- try:
- dispatcher, evnt, args = self._event_queue.get(False)
- dispatcher.dispatch_event(evnt, *args)
- except queue.Empty:
- break
- except ReferenceError:
- # weakly-referenced object no longer exists
- pass
-
- def notify(self):
- """Notify the event loop that something needs processing.
-
- If the event loop is blocked, it will unblock and perform an iteration
- immediately. If the event loop is running, another iteration is
- scheduled for immediate execution afterwards.
- """
- raise NotImplementedError('abstract')
-
- def start(self):
- pass
-
- def step(self, timeout=None):
- raise NotImplementedError('abstract')
-
- def set_timer(self, func, interval):
- pass
-
- def stop(self):
- pass
-
-
-class EventLoop(event.EventDispatcher):
- """The main run loop of the application.
-
- Calling `run` begins the application event loop, which processes
- operating system events, calls :py:func:`pyglet.clock.tick` to call
- scheduled functions and calls :py:meth:`pyglet.window.Window.on_draw` and
- :py:meth:`pyglet.window.Window.flip` to update window contents.
-
- Applications can subclass :py:class:`EventLoop` and override certain methods
- to integrate another framework's run loop, or to customise processing
- in some other way. You should not in general override :py:meth:`run`, as
- this method contains platform-specific code that ensures the application
- remains responsive to the user while keeping CPU usage to a minimum.
- """
-
- _has_exit_condition = None
- _has_exit = False
-
- def __init__(self):
- self._has_exit_condition = threading.Condition()
- self.clock = clock.get_default()
- self.is_running = False
-
- @staticmethod
- def _redraw_windows(dt):
- # Redraw all windows
- for window in app.windows:
- window.switch_to()
- window.dispatch_event('on_draw')
- window.dispatch_event('on_refresh', dt)
- window.flip()
-
- def run(self, interval=1/60):
- """Begin processing events, scheduled functions and window updates.
-
- This method returns when :py:attr:`has_exit` is set to True.
-
- Developers are discouraged from overriding this method, as the
- implementation is platform-specific.
- """
- if not interval:
- self.clock.schedule(self._redraw_windows)
- else:
- self.clock.schedule_interval(self._redraw_windows, interval)
-
- self.has_exit = False
-
- from pyglet.window import Window
- Window._enable_event_queue = False
-
- # Dispatch pending events
- for window in app.windows:
- window.switch_to()
- window.dispatch_pending_events()
-
- platform_event_loop = app.platform_event_loop
- platform_event_loop.start()
- self.dispatch_event('on_enter')
- self.is_running = True
-
- while not self.has_exit:
- timeout = self.idle()
- platform_event_loop.step(timeout)
-
- self.is_running = False
- self.dispatch_event('on_exit')
- platform_event_loop.stop()
-
- def enter_blocking(self):
- """Called by pyglet internal processes when the operating system
- is about to block due to a user interaction. For example, this
- is common when the user begins resizing or moving a window.
-
- This method provides the event loop with an opportunity to set up
- an OS timer on the platform event loop, which will continue to
- be invoked during the blocking operation.
-
- The default implementation ensures that :py:meth:`idle` continues to be
- called as documented.
-
- .. versionadded:: 1.2
- """
- timeout = self.idle()
- app.platform_event_loop.set_timer(self._blocking_timer, timeout)
-
- @staticmethod
- def exit_blocking():
- """Called by pyglet internal processes when the blocking operation
- completes. See :py:meth:`enter_blocking`.
- """
- app.platform_event_loop.set_timer(None, None)
-
- def _blocking_timer(self):
- timeout = self.idle()
- app.platform_event_loop.set_timer(self._blocking_timer, timeout)
-
- def idle(self):
- """Called during each iteration of the event loop.
-
- The method is called immediately after any window events (i.e., after
- any user input). The method can return a duration after which
- the idle method will be called again. The method may be called
- earlier if the user creates more input events. The method
- can return `None` to only wait for user events.
-
- For example, return ``1.0`` to have the idle method called every
- second, or immediately after any user events.
-
- The default implementation dispatches the
- :py:meth:`pyglet.window.Window.on_draw` event for all windows and uses
- :py:func:`pyglet.clock.tick` and :py:func:`pyglet.clock.get_sleep_time`
- on the default clock to determine the return value.
-
- This method should be overridden by advanced users only. To have
- code execute at regular intervals, use the
- :py:func:`pyglet.clock.schedule` methods.
-
- :rtype: float
- :return: The number of seconds before the idle method should
- be called again, or `None` to block for user input.
- """
- dt = self.clock.update_time()
- self.clock.call_scheduled_functions(dt)
-
- # Update timout
- return self.clock.get_sleep_time(True)
-
- @property
- def has_exit(self):
- """Flag indicating if the event loop will exit in
- the next iteration. When set, all waiting threads are interrupted (see
- :py:meth:`sleep`).
-
- Thread-safe since pyglet 1.2.
-
- :see: `exit`
- :type: bool
- """
- self._has_exit_condition.acquire()
- result = self._has_exit
- self._has_exit_condition.release()
- return result
-
- @has_exit.setter
- def has_exit(self, value):
- self._has_exit_condition.acquire()
- self._has_exit = value
- self._has_exit_condition.notify()
- self._has_exit_condition.release()
-
- def exit(self):
- """Safely exit the event loop at the end of the current iteration.
-
- This method is a thread-safe equivalent for setting
- :py:attr:`has_exit` to ``True``. All waiting threads will be
- interrupted (see :py:meth:`sleep`).
- """
- self.has_exit = True
- app.platform_event_loop.notify()
-
- def sleep(self, timeout):
- """Wait for some amount of time, or until the :py:attr:`has_exit` flag
- is set or :py:meth:`exit` is called.
-
- This method is thread-safe.
-
- :Parameters:
- `timeout` : float
- Time to wait, in seconds.
-
- .. versionadded:: 1.2
-
- :rtype: bool
- :return: ``True`` if the `has_exit` flag is set, otherwise ``False``.
- """
- self._has_exit_condition.acquire()
- self._has_exit_condition.wait(timeout)
- result = self._has_exit
- self._has_exit_condition.release()
- return result
-
- def on_window_close(self, window):
- """Default window close handler."""
- if len(app.windows) == 0:
- self.exit()
-
- if _is_pyglet_doc_run:
- def on_window_close(self, window):
- """A window was closed.
-
- This event is dispatched when a window is closed. It is not
- dispatched if the window's close button was pressed but the
- window did not close.
-
- The default handler calls :py:meth:`exit` if no more windows are
- open. You can override this handler to base your application exit
- on some other policy.
-
- :event:
- """
-
- def on_enter(self):
- """The event loop is about to begin.
-
- This is dispatched when the event loop is prepared to enter
- the main run loop, and represents the last chance for an
- application to initialise itself.
-
- :event:
- """
-
- def on_exit(self):
- """The event loop is about to exit.
-
- After dispatching this event, the :py:meth:`run` method returns (the
- application may not actually exit if you have more code
- following the :py:meth:`run` invocation).
-
- :event:
- """
-
-
-EventLoop.register_event_type('on_window_close')
-EventLoop.register_event_type('on_enter')
-EventLoop.register_event_type('on_exit')
diff --git a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/graphics/allocation.py b/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/graphics/allocation.py
deleted file mode 100644
index 39b8965ae33abb99bbdb100409da041d72608556..0000000000000000000000000000000000000000
--- a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/graphics/allocation.py
+++ /dev/null
@@ -1,365 +0,0 @@
-"""Memory allocation algorithm for vertex arrays and buffers.
-
-The region allocator is used to allocate vertex indices within a vertex
-domain's multiple buffers. ("Buffer" refers to any abstract buffer presented
-by :py:mod:`pyglet.graphics.vertexbuffer`.
-
-The allocator will at times request more space from the buffers. The current
-policy is to double the buffer size when there is not enough room to fulfil an
-allocation. The buffer is never resized smaller.
-
-The allocator maintains references to free space only; it is the caller's
-responsibility to maintain the allocated regions.
-"""
-
-# Common cases:
-# -regions will be the same size (instances of same object, e.g. sprites)
-# -regions will not usually be resized (only exception is text)
-# -alignment of 4 vertices (glyphs, sprites, images, ...)
-#
-# Optimise for:
-# -keeping regions adjacent, reduce the number of entries in glMultiDrawArrays
-# -finding large blocks of allocated regions quickly (for drawing)
-# -finding block of unallocated space is the _uncommon_ case!
-#
-# Decisions:
-# -don't over-allocate regions to any alignment -- this would require more
-# work in finding the allocated spaces (for drawing) and would result in
-# more entries in glMultiDrawArrays
-# -don't move blocks when they truncate themselves. try not to allocate the
-# space they freed too soon (they will likely need grow back into it later,
-# and growing will usually require a reallocation).
-# -allocator does not track individual allocated regions. Trusts caller
-# to provide accurate (start, size) tuple, which completely describes
-# a region from the allocator's point of view.
-# -this means that compacting is probably not feasible, or would be hideously
-# expensive
-
-
-class AllocatorMemoryException(Exception):
- """The buffer is not large enough to fulfil an allocation.
-
- Raised by `Allocator` methods when the operation failed due to
- lack of buffer space. The buffer should be increased to at least
- requested_capacity and then the operation retried (guaranteed to
- pass second time).
- """
-
- def __init__(self, requested_capacity):
- self.requested_capacity = requested_capacity
-
-
-class Allocator:
- """Buffer space allocation implementation."""
-
- __slots__ = 'capacity', 'starts', 'sizes'
-
- def __init__(self, capacity):
- """Create an allocator for a buffer of the specified capacity.
-
- :Parameters:
- `capacity` : int
- Maximum size of the buffer.
-
- """
- self.capacity = capacity
-
- # Allocated blocks. Start index and size in parallel lists.
- #
- # # = allocated, - = free
- #
- # 0 3 5 15 20 24 40
- # |###--##########-----####----------------------|
- #
- # starts = [0, 5, 20]
- # sizes = [3, 10, 4]
- #
- # To calculate free blocks:
- # for i in range(0, len(starts)):
- # free_start[i] = starts[i] + sizes[i]
- # free_size[i] = starts[i+1] - free_start[i]
- # free_size[i+1] = self.capacity - free_start[-1]
-
- self.starts = []
- self.sizes = []
-
- def set_capacity(self, size):
- """Resize the maximum buffer size.
-
- The capaity cannot be reduced.
-
- :Parameters:
- `size` : int
- New maximum size of the buffer.
-
- """
- assert size > self.capacity
- self.capacity = size
-
- def alloc(self, size):
- """Allocate memory in the buffer.
-
- Raises `AllocatorMemoryException` if the allocation cannot be
- fulfilled.
-
- :Parameters:
- `size` : int
- Size of region to allocate.
-
- :rtype: int
- :return: Starting index of the allocated region.
- """
- assert size >= 0
-
- if size == 0:
- return 0
-
- # Return start, or raise AllocatorMemoryException
- if not self.starts:
- if size <= self.capacity:
- self.starts.append(0)
- self.sizes.append(size)
- return 0
- else:
- raise AllocatorMemoryException(size)
-
- # Restart from zero if space exists
- if self.starts[0] > size:
- self.starts.insert(0, 0)
- self.sizes.insert(0, size)
- return 0
-
- # Allocate in a free space
- free_start = self.starts[0] + self.sizes[0]
- for i, (alloc_start, alloc_size) in enumerate(zip(self.starts[1:], self.sizes[1:])):
- # Danger!
- # i is actually index - 1 because of slicing above...
- # starts[i] points to the block before this free space
- # starts[i+1] points to the block after this free space, and is always valid.
- free_size = alloc_start - free_start
- if free_size == size:
- # Merge previous block with this one (removing this free space)
- self.sizes[i] += free_size + alloc_size
- del self.starts[i+1]
- del self.sizes[i+1]
- return free_start
- elif free_size > size:
- # Increase size of previous block to intrude into this free
- # space.
- self.sizes[i] += size
- return free_start
- free_start = alloc_start + alloc_size
-
- # Allocate at end of capacity
- free_size = self.capacity - free_start
- if free_size >= size:
- self.sizes[-1] += size
- return free_start
-
- raise AllocatorMemoryException(self.capacity + size - free_size)
-
- def realloc(self, start, size, new_size):
- """Reallocate a region of the buffer.
-
- This is more efficient than separate `dealloc` and `alloc` calls, as
- the region can often be resized in-place.
-
- Raises `AllocatorMemoryException` if the allocation cannot be
- fulfilled.
-
- :Parameters:
- `start` : int
- Current starting index of the region.
- `size` : int
- Current size of the region.
- `new_size` : int
- New size of the region.
-
- """
- assert size >= 0 and new_size >= 0
-
- if new_size == 0:
- if size != 0:
- self.dealloc(start, size)
- return 0
- elif size == 0:
- return self.alloc(new_size)
-
- # return start, or raise AllocatorMemoryException
-
- # Truncation is the same as deallocating the tail cruft
- if new_size < size:
- self.dealloc(start + new_size, size - new_size)
- return start
-
- # Find which block it lives in
- for i, (alloc_start, alloc_size) in enumerate(zip(*(self.starts, self.sizes))):
- p = start - alloc_start
- if p >= 0 and size <= alloc_size - p:
- break
- if not (p >= 0 and size <= alloc_size - p):
- print(list(zip(self.starts, self.sizes)))
- print(start, size, new_size)
- print(p, alloc_start, alloc_size)
- assert p >= 0 and size <= alloc_size - p, 'Region not allocated'
-
- if size == alloc_size - p:
- # Region is at end of block. Find how much free space is after it.
- is_final_block = i == len(self.starts) - 1
- if not is_final_block:
- free_size = self.starts[i + 1] - (start + size)
- else:
- free_size = self.capacity - (start + size)
-
- # TODO If region is an entire block being an island in free space,
- # can possibly extend in both directions.
-
- if free_size == new_size - size and not is_final_block:
- # Merge block with next (region is expanded in place to
- # exactly fill the free space)
- self.sizes[i] += free_size + self.sizes[i + 1]
- del self.starts[i + 1]
- del self.sizes[i + 1]
- return start
- elif free_size > new_size - size:
- # Expand region in place
- self.sizes[i] += new_size - size
- return start
-
- # The block must be repositioned. Dealloc then alloc.
-
- # But don't do this! If alloc fails, we've already silently dealloc'd
- # the original block.
- # self.dealloc(start, size)
- # return self.alloc(new_size)
-
- # It must be alloc'd first. We're not missing an optimisation
- # here, because if freeing the block would've allowed for the block to
- # be placed in the resulting free space, one of the above in-place
- # checks would've found it.
- result = self.alloc(new_size)
- self.dealloc(start, size)
- return result
-
- def dealloc(self, start, size):
- """Free a region of the buffer.
-
- :Parameters:
- `start` : int
- Starting index of the region.
- `size` : int
- Size of the region.
-
- """
- assert size >= 0
-
- if size == 0:
- return
-
- assert self.starts
-
- # Find which block needs to be split
- for i, (alloc_start, alloc_size) in enumerate(zip(*(self.starts, self.sizes))):
- p = start - alloc_start
- if p >= 0 and size <= alloc_size - p:
- break
-
- # Assert we left via the break
- assert p >= 0 and size <= alloc_size - p, 'Region not allocated'
-
- if p == 0 and size == alloc_size:
- # Remove entire block
- del self.starts[i]
- del self.sizes[i]
- elif p == 0:
- # Truncate beginning of block
- self.starts[i] += size
- self.sizes[i] -= size
- elif size == alloc_size - p:
- # Truncate end of block
- self.sizes[i] -= size
- else:
- # Reduce size of left side, insert block at right side
- # $ = dealloc'd block, # = alloc'd region from same block
- #
- # <------8------>
- # <-5-><-6-><-7->
- # 1 2 3 4
- # #####$$$$$#####
- #
- # 1 = alloc_start
- # 2 = start
- # 3 = start + size
- # 4 = alloc_start + alloc_size
- # 5 = start - alloc_start = p
- # 6 = size
- # 7 = {8} - ({5} + {6}) = alloc_size - (p + size)
- # 8 = alloc_size
- #
- self.sizes[i] = p
- self.starts.insert(i + 1, start + size)
- self.sizes.insert(i + 1, alloc_size - (p + size))
-
- def get_allocated_regions(self):
- """Get a list of (aggregate) allocated regions.
-
- The result of this method is ``(starts, sizes)``, where ``starts`` is
- a list of starting indices of the regions and ``sizes`` their
- corresponding lengths.
-
- :rtype: (list, list)
- """
- # return (starts, sizes); len(starts) == len(sizes)
- return self.starts, self.sizes
-
- def get_fragmented_free_size(self):
- """Returns the amount of space unused, not including the final
- free block.
-
- :rtype: int
- """
- if not self.starts:
- return 0
-
- # Variation of search for free block.
- total_free = 0
- free_start = self.starts[0] + self.sizes[0]
- for i, (alloc_start, alloc_size) in enumerate(zip(self.starts[1:], self.sizes[1:])):
- total_free += alloc_start - free_start
- free_start = alloc_start + alloc_size
-
- return total_free
-
- def get_free_size(self):
- """Return the amount of space unused.
-
- :rtype: int
- """
- if not self.starts:
- return self.capacity
-
- free_end = self.capacity - (self.starts[-1] + self.sizes[-1])
- return self.get_fragmented_free_size() + free_end
-
- def get_usage(self):
- """Return fraction of capacity currently allocated.
-
- :rtype: float
- """
- return 1. - self.get_free_size() / float(self.capacity)
-
- def get_fragmentation(self):
- """Return fraction of free space that is not expandable.
-
- :rtype: float
- """
- free_size = self.get_free_size()
- if free_size == 0:
- return 0.
- return self.get_fragmented_free_size() / float(self.get_free_size())
-
- def __str__(self):
- return 'allocs=' + repr(list(zip(self.starts, self.sizes)))
-
- def __repr__(self):
- return '<%s %s>' % (self.__class__.__name__, str(self))
diff --git a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/media/drivers/silent/adaptation.py b/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/media/drivers/silent/adaptation.py
deleted file mode 100644
index 538f1081ae1a75142872495f508b00969c1a3c6d..0000000000000000000000000000000000000000
--- a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/media/drivers/silent/adaptation.py
+++ /dev/null
@@ -1,83 +0,0 @@
-from pyglet.media.drivers.base import AbstractAudioDriver, AbstractAudioPlayer
-from pyglet.media.drivers.listener import AbstractListener
-
-
-class SilentAudioPlayer(AbstractAudioPlayer):
-
- def delete(self):
- pass
-
- def play(self):
- pass
-
- def stop(self):
- pass
-
- def clear(self):
- pass
-
- def write(self, audio_data, length):
- pass
-
- def get_time(self):
- return 0
-
- def set_volume(self, volume):
- pass
-
- def set_position(self, position):
- pass
-
- def set_min_distance(self, min_distance):
- pass
-
- def set_max_distance(self, max_distance):
- pass
-
- def set_pitch(self, pitch):
- pass
-
- def set_cone_orientation(self, cone_orientation):
- pass
-
- def set_cone_inner_angle(self, cone_inner_angle):
- pass
-
- def set_cone_outer_angle(self, cone_outer_angle):
- pass
-
- def set_cone_outer_gain(self, cone_outer_gain):
- pass
-
- def prefill_audio(self):
- pass
-
-
-class SilentDriver(AbstractAudioDriver):
-
- def create_audio_player(self, source, player):
- return SilentAudioPlayer(source, player)
-
- def get_listener(self):
- return SilentListener()
-
- def delete(self):
- pass
-
-
-class SilentListener(AbstractListener):
-
- def _set_volume(self, volume):
- pass
-
- def _set_position(self, position):
- pass
-
- def _set_forward_orientation(self, orientation):
- pass
-
- def _set_up_orientation(self, orientation):
- pass
-
- def _set_orientation(self):
- pass
diff --git a/spaces/ai-danger/hot-or-not/app.py b/spaces/ai-danger/hot-or-not/app.py
deleted file mode 100644
index 582dc64d7723b96221afcb9ecf0f457b12e400b6..0000000000000000000000000000000000000000
--- a/spaces/ai-danger/hot-or-not/app.py
+++ /dev/null
@@ -1,61 +0,0 @@
-import torch
-import clip
-from PIL import Image
-import gradio as gr
-
-device = "cuda" if torch.cuda.is_available() else "cpu"
-model, preprocess = clip.load("ViT-B/32", device=device)
-
-def hotornot(image, gender):
- image = Image.fromarray(image.astype("uint8"), "RGB")
-
- image = preprocess(image).unsqueeze(0).to(device)
- positive_terms = [f'a hot {gender}', f'a beautiful {gender}', f'an attractive {gender}']
- negative_terms = [f'a gross {gender}', f'an ugly {gender}', f'a hideous {gender}']
-
- pairs = list(zip(positive_terms, negative_terms))
-
- def evaluate(terms):
- text = clip.tokenize(terms).to(device)
-
- with torch.no_grad():
- logits_per_image, logits_per_text = model(image, text)
- probs = logits_per_image.softmax(dim=-1).cpu().numpy()
- return probs[0]
-
- probs = [evaluate(pair) for pair in pairs]
-
- positive_probs = [prob[0] for prob in probs]
- negative_probs = [prob[1] for prob in probs]
-
- hotness_score = round((probs[0][0] - probs[0][1] + 1) * 50, 2)
- beauty_score = round((probs[1][0] - probs[1][1] + 1) * 50, 2)
- attractiveness_score = round((probs[2][0] - probs[2][1] + 1) * 50, 2)
-
- hot_score = sum(positive_probs)/len(positive_probs)
- ugly_score = sum(negative_probs)/len(negative_probs)
- composite = ((hot_score - ugly_score)+1) * 50
- composite = round(composite, 2)
- return composite, hotness_score, beauty_score, attractiveness_score
-
-iface = gr.Interface(
- fn=hotornot,
- inputs=[
- gr.inputs.Image(label="Image"),
- gr.inputs.Dropdown(
- [
- 'person', 'man', 'woman'
- ],
- default='person',
- )
- ],
- outputs=[
- gr.Textbox(label="Total Hot or Not™ Score"),
- gr.Textbox(label="Hotness Score"),
- gr.Textbox(label="Beauty Score"),
- gr.Textbox(label="Attractiveness Score"),
- ],
- title="Hot or Not",
- description="A simple hot or not app using OpenAI's CLIP model. How it works: the input image is passed to OpenAI's CLIP image captioning model and evaluated for how much it conforms to the model's idea of hotness, beauty, and attractiveness. These values are then combined to produce a composite score on a scale of 0 to 100.",
-)
-iface.launch()
diff --git a/spaces/akhaliq/Detic/detic/data/custom_dataset_dataloader.py b/spaces/akhaliq/Detic/detic/data/custom_dataset_dataloader.py
deleted file mode 100644
index 8f8d6817704026796d2c2f457fe2624800693267..0000000000000000000000000000000000000000
--- a/spaces/akhaliq/Detic/detic/data/custom_dataset_dataloader.py
+++ /dev/null
@@ -1,331 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-# Part of the code is from https://github.com/xingyizhou/UniDet/blob/master/projects/UniDet/unidet/data/multi_dataset_dataloader.py (Apache-2.0 License)
-import copy
-import logging
-import numpy as np
-import operator
-import torch
-import torch.utils.data
-import json
-from detectron2.utils.comm import get_world_size
-from detectron2.utils.logger import _log_api_usage, log_first_n
-
-from detectron2.config import configurable
-from detectron2.data import samplers
-from torch.utils.data.sampler import BatchSampler, Sampler
-from detectron2.data.common import DatasetFromList, MapDataset
-from detectron2.data.dataset_mapper import DatasetMapper
-from detectron2.data.build import get_detection_dataset_dicts, build_batch_data_loader
-from detectron2.data.samplers import TrainingSampler, RepeatFactorTrainingSampler
-from detectron2.data.build import worker_init_reset_seed, print_instances_class_histogram
-from detectron2.data.build import filter_images_with_only_crowd_annotations
-from detectron2.data.build import filter_images_with_few_keypoints
-from detectron2.data.build import check_metadata_consistency
-from detectron2.data.catalog import MetadataCatalog, DatasetCatalog
-from detectron2.utils import comm
-import itertools
-import math
-from collections import defaultdict
-from typing import Optional
-
-
-def _custom_train_loader_from_config(cfg, mapper=None, *, dataset=None, sampler=None):
- sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
- if 'MultiDataset' in sampler_name:
- dataset_dicts = get_detection_dataset_dicts_with_source(
- cfg.DATASETS.TRAIN,
- filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
- min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
- if cfg.MODEL.KEYPOINT_ON else 0,
- proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
- )
- else:
- dataset_dicts = get_detection_dataset_dicts(
- cfg.DATASETS.TRAIN,
- filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
- min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
- if cfg.MODEL.KEYPOINT_ON else 0,
- proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
- )
-
- if mapper is None:
- mapper = DatasetMapper(cfg, True)
-
- if sampler is not None:
- pass
- elif sampler_name == "TrainingSampler":
- sampler = TrainingSampler(len(dataset))
- elif sampler_name == "MultiDatasetSampler":
- sampler = MultiDatasetSampler(
- dataset_dicts,
- dataset_ratio = cfg.DATALOADER.DATASET_RATIO,
- use_rfs = cfg.DATALOADER.USE_RFS,
- dataset_ann = cfg.DATALOADER.DATASET_ANN,
- repeat_threshold = cfg.DATALOADER.REPEAT_THRESHOLD,
- )
- elif sampler_name == "RepeatFactorTrainingSampler":
- repeat_factors = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency(
- dataset_dicts, cfg.DATALOADER.REPEAT_THRESHOLD
- )
- sampler = RepeatFactorTrainingSampler(repeat_factors)
- else:
- raise ValueError("Unknown training sampler: {}".format(sampler_name))
-
- return {
- "dataset": dataset_dicts,
- "sampler": sampler,
- "mapper": mapper,
- "total_batch_size": cfg.SOLVER.IMS_PER_BATCH,
- "aspect_ratio_grouping": cfg.DATALOADER.ASPECT_RATIO_GROUPING,
- "num_workers": cfg.DATALOADER.NUM_WORKERS,
- 'multi_dataset_grouping': cfg.DATALOADER.MULTI_DATASET_GROUPING,
- 'use_diff_bs_size': cfg.DATALOADER.USE_DIFF_BS_SIZE,
- 'dataset_bs': cfg.DATALOADER.DATASET_BS,
- 'num_datasets': len(cfg.DATASETS.TRAIN)
- }
-
-
-@configurable(from_config=_custom_train_loader_from_config)
-def build_custom_train_loader(
- dataset, *, mapper, sampler,
- total_batch_size=16,
- aspect_ratio_grouping=True,
- num_workers=0,
- num_datasets=1,
- multi_dataset_grouping=False,
- use_diff_bs_size=False,
- dataset_bs=[]
- ):
- """
- Modified from detectron2.data.build.build_custom_train_loader, but supports
- different samplers
- """
- if isinstance(dataset, list):
- dataset = DatasetFromList(dataset, copy=False)
- if mapper is not None:
- dataset = MapDataset(dataset, mapper)
- if sampler is None:
- sampler = TrainingSampler(len(dataset))
- assert isinstance(sampler, torch.utils.data.sampler.Sampler)
- if multi_dataset_grouping:
- return build_multi_dataset_batch_data_loader(
- use_diff_bs_size,
- dataset_bs,
- dataset,
- sampler,
- total_batch_size,
- num_datasets=num_datasets,
- num_workers=num_workers,
- )
- else:
- return build_batch_data_loader(
- dataset,
- sampler,
- total_batch_size,
- aspect_ratio_grouping=aspect_ratio_grouping,
- num_workers=num_workers,
- )
-
-
-def build_multi_dataset_batch_data_loader(
- use_diff_bs_size, dataset_bs,
- dataset, sampler, total_batch_size, num_datasets, num_workers=0
-):
- """
- """
- world_size = get_world_size()
- assert (
- total_batch_size > 0 and total_batch_size % world_size == 0
- ), "Total batch size ({}) must be divisible by the number of gpus ({}).".format(
- total_batch_size, world_size
- )
-
- batch_size = total_batch_size // world_size
- data_loader = torch.utils.data.DataLoader(
- dataset,
- sampler=sampler,
- num_workers=num_workers,
- batch_sampler=None,
- collate_fn=operator.itemgetter(0), # don't batch, but yield individual elements
- worker_init_fn=worker_init_reset_seed,
- ) # yield individual mapped dict
- if use_diff_bs_size:
- return DIFFMDAspectRatioGroupedDataset(
- data_loader, dataset_bs, num_datasets)
- else:
- return MDAspectRatioGroupedDataset(
- data_loader, batch_size, num_datasets)
-
-
-def get_detection_dataset_dicts_with_source(
- dataset_names, filter_empty=True, min_keypoints=0, proposal_files=None
-):
- assert len(dataset_names)
- dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in dataset_names]
- for dataset_name, dicts in zip(dataset_names, dataset_dicts):
- assert len(dicts), "Dataset '{}' is empty!".format(dataset_name)
-
- for source_id, (dataset_name, dicts) in \
- enumerate(zip(dataset_names, dataset_dicts)):
- assert len(dicts), "Dataset '{}' is empty!".format(dataset_name)
- for d in dicts:
- d['dataset_source'] = source_id
-
- if "annotations" in dicts[0]:
- try:
- class_names = MetadataCatalog.get(dataset_name).thing_classes
- check_metadata_consistency("thing_classes", dataset_name)
- print_instances_class_histogram(dicts, class_names)
- except AttributeError: # class names are not available for this dataset
- pass
-
- assert proposal_files is None
-
- dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts))
-
- has_instances = "annotations" in dataset_dicts[0]
- if filter_empty and has_instances:
- dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts)
- if min_keypoints > 0 and has_instances:
- dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints)
-
- return dataset_dicts
-
-
-class MultiDatasetSampler(Sampler):
- def __init__(
- self,
- dataset_dicts,
- dataset_ratio,
- use_rfs,
- dataset_ann,
- repeat_threshold=0.001,
- seed: Optional[int] = None,
- ):
- """
- """
- sizes = [0 for _ in range(len(dataset_ratio))]
- for d in dataset_dicts:
- sizes[d['dataset_source']] += 1
- print('dataset sizes', sizes)
- self.sizes = sizes
- assert len(dataset_ratio) == len(sizes), \
- 'length of dataset ratio {} should be equal to number if dataset {}'.format(
- len(dataset_ratio), len(sizes)
- )
- if seed is None:
- seed = comm.shared_random_seed()
- self._seed = int(seed)
- self._rank = comm.get_rank()
- self._world_size = comm.get_world_size()
-
- self.dataset_ids = torch.tensor(
- [d['dataset_source'] for d in dataset_dicts], dtype=torch.long)
-
- dataset_weight = [torch.ones(s) * max(sizes) / s * r / sum(dataset_ratio) \
- for i, (r, s) in enumerate(zip(dataset_ratio, sizes))]
- dataset_weight = torch.cat(dataset_weight)
-
- rfs_factors = []
- st = 0
- for i, s in enumerate(sizes):
- if use_rfs[i]:
- if dataset_ann[i] == 'box':
- rfs_func = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency
- else:
- rfs_func = repeat_factors_from_tag_frequency
- rfs_factor = rfs_func(
- dataset_dicts[st: st + s],
- repeat_thresh=repeat_threshold)
- rfs_factor = rfs_factor * (s / rfs_factor.sum())
- else:
- rfs_factor = torch.ones(s)
- rfs_factors.append(rfs_factor)
- st = st + s
- rfs_factors = torch.cat(rfs_factors)
-
- self.weights = dataset_weight * rfs_factors
- self.sample_epoch_size = len(self.weights)
-
- def __iter__(self):
- start = self._rank
- yield from itertools.islice(
- self._infinite_indices(), start, None, self._world_size)
-
-
- def _infinite_indices(self):
- g = torch.Generator()
- g.manual_seed(self._seed)
- while True:
- ids = torch.multinomial(
- self.weights, self.sample_epoch_size, generator=g,
- replacement=True)
- nums = [(self.dataset_ids[ids] == i).sum().int().item() \
- for i in range(len(self.sizes))]
- yield from ids
-
-
-class MDAspectRatioGroupedDataset(torch.utils.data.IterableDataset):
- def __init__(self, dataset, batch_size, num_datasets):
- """
- """
- self.dataset = dataset
- self.batch_size = batch_size
- self._buckets = [[] for _ in range(2 * num_datasets)]
-
- def __iter__(self):
- for d in self.dataset:
- w, h = d["width"], d["height"]
- aspect_ratio_bucket_id = 0 if w > h else 1
- bucket_id = d['dataset_source'] * 2 + aspect_ratio_bucket_id
- bucket = self._buckets[bucket_id]
- bucket.append(d)
- if len(bucket) == self.batch_size:
- yield bucket[:]
- del bucket[:]
-
-
-class DIFFMDAspectRatioGroupedDataset(torch.utils.data.IterableDataset):
- def __init__(self, dataset, batch_sizes, num_datasets):
- """
- """
- self.dataset = dataset
- self.batch_sizes = batch_sizes
- self._buckets = [[] for _ in range(2 * num_datasets)]
-
- def __iter__(self):
- for d in self.dataset:
- w, h = d["width"], d["height"]
- aspect_ratio_bucket_id = 0 if w > h else 1
- bucket_id = d['dataset_source'] * 2 + aspect_ratio_bucket_id
- bucket = self._buckets[bucket_id]
- bucket.append(d)
- if len(bucket) == self.batch_sizes[d['dataset_source']]:
- yield bucket[:]
- del bucket[:]
-
-
-def repeat_factors_from_tag_frequency(dataset_dicts, repeat_thresh):
- """
- """
- category_freq = defaultdict(int)
- for dataset_dict in dataset_dicts:
- cat_ids = dataset_dict['pos_category_ids']
- for cat_id in cat_ids:
- category_freq[cat_id] += 1
- num_images = len(dataset_dicts)
- for k, v in category_freq.items():
- category_freq[k] = v / num_images
-
- category_rep = {
- cat_id: max(1.0, math.sqrt(repeat_thresh / cat_freq))
- for cat_id, cat_freq in category_freq.items()
- }
-
- rep_factors = []
- for dataset_dict in dataset_dicts:
- cat_ids = dataset_dict['pos_category_ids']
- rep_factor = max({category_rep[cat_id] for cat_id in cat_ids}, default=1.0)
- rep_factors.append(rep_factor)
-
- return torch.tensor(rep_factors, dtype=torch.float32)
\ No newline at end of file
diff --git a/spaces/akhaliq/Pop_Music_Transformer/app.py b/spaces/akhaliq/Pop_Music_Transformer/app.py
deleted file mode 100644
index ba07eb97452da655c78459d36741ea0d1b34b503..0000000000000000000000000000000000000000
--- a/spaces/akhaliq/Pop_Music_Transformer/app.py
+++ /dev/null
@@ -1,55 +0,0 @@
-from model import PopMusicTransformer
-import os
-os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
-import tensorflow as tf
-tf.compat.v1.disable_eager_execution()
-import gradio as gr
-import requests
-import torchtext
-import zipfile
-
-torchtext.utils.download_from_url("https://drive.google.com/uc?id=1gxuTSkF51NP04JZgTE46Pg4KQsbHQKGo", root=".")
-torchtext.utils.download_from_url("https://drive.google.com/uc?id=1nAKjaeahlzpVAX0F9wjQEG_hL4UosSbo", root=".")
-
-with zipfile.ZipFile("REMI-tempo-checkpoint.zip","r") as zip_ref:
- zip_ref.extractall(".")
-with zipfile.ZipFile("REMI-tempo-chord-checkpoint.zip","r") as zip_ref:
- zip_ref.extractall(".")
-
-url = 'https://github.com/AK391/remi/blob/master/input.midi?raw=true'
-r = requests.get(url, allow_redirects=True)
-open("input.midi", 'wb').write(r.content)
-
-
-# declare model
-model = PopMusicTransformer(
- checkpoint='REMI-tempo-checkpoint',
- is_training=False)
-
-def inference(midi):
- # generate continuation
- model.generate(
- n_target_bar=4,
- temperature=1.2,
- topk=5,
- output_path='./result/continuation.midi',
- prompt=midi.name)
- return './result/continuation.midi'
-
-
-title = "Pop Music Transformer"
-description = "demo for Pop Music Transformer. To use it, simply upload your midi file, or click one of the examples to load them. Read more at the links below."
-article = "
"
-
-examples = [
- ['input.midi']
-]
-gr.Interface(
- inference,
- gr.inputs.File(label="Input Midi"),
- gr.outputs.File(label="Output Midi"),
- title=title,
- description=description,
- article=article,
- examples=examples
- ).launch()
diff --git a/spaces/akhaliq/VQMIVC/ParallelWaveGAN/egs/arctic/voc1/local/data_download.sh b/spaces/akhaliq/VQMIVC/ParallelWaveGAN/egs/arctic/voc1/local/data_download.sh
deleted file mode 100644
index d9bff0f9606dcb8a210ee610509bd86a4e352716..0000000000000000000000000000000000000000
--- a/spaces/akhaliq/VQMIVC/ParallelWaveGAN/egs/arctic/voc1/local/data_download.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 Tomoki Hayashi
-# MIT License (https://opensource.org/licenses/MIT)
-
-download_dir=$1
-spk=$2
-
-available_spks=(
- "slt" "clb" "bdl" "rms" "jmk" "awb" "ksp"
-)
-
-# check arguments
-if [ $# != 2 ]; then
- echo "Usage: $0 "
- echo "Available speakers: ${available_spks[*]}"
- exit 1
-fi
-
-set -euo pipefail
-
-# check speakers
-if ! echo "${available_spks[*]}" | grep -q "${spk}"; then
- echo "Specified spk (${spk}) is not available or not supported." >&2
- exit 1
-fi
-
-# download dataset
-cwd=$(pwd)
-if [ ! -e "${download_dir}/cmu_us_${spk}_arctic" ]; then
- mkdir -p "${download_dir}"
- cd "${download_dir}"
- wget "http://festvox.org/cmu_arctic/cmu_arctic/packed/cmu_us_${spk}_arctic-0.95-release.tar.bz2"
- tar xf "cmu_us_${spk}_arctic-0.95-release.tar.bz2"
- rm "cmu_us_${spk}_arctic-0.95-release.tar.bz2"
- cd "${cwd}"
- echo "Successfully finished download."
-else
- echo "Already exists. Skip download."
-fi
diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/operations/check.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/operations/check.py
deleted file mode 100644
index fb3ac8b9c9ea57ec1bb667cb8e904a8b5b2f9df2..0000000000000000000000000000000000000000
--- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/operations/check.py
+++ /dev/null
@@ -1,149 +0,0 @@
-"""Validation of dependencies of packages
-"""
-
-import logging
-from typing import Callable, Dict, List, NamedTuple, Optional, Set, Tuple
-
-from pip._vendor.packaging.requirements import Requirement
-from pip._vendor.packaging.utils import NormalizedName, canonicalize_name
-
-from pip._internal.distributions import make_distribution_for_install_requirement
-from pip._internal.metadata import get_default_environment
-from pip._internal.metadata.base import DistributionVersion
-from pip._internal.req.req_install import InstallRequirement
-
-logger = logging.getLogger(__name__)
-
-
-class PackageDetails(NamedTuple):
- version: DistributionVersion
- dependencies: List[Requirement]
-
-
-# Shorthands
-PackageSet = Dict[NormalizedName, PackageDetails]
-Missing = Tuple[NormalizedName, Requirement]
-Conflicting = Tuple[NormalizedName, DistributionVersion, Requirement]
-
-MissingDict = Dict[NormalizedName, List[Missing]]
-ConflictingDict = Dict[NormalizedName, List[Conflicting]]
-CheckResult = Tuple[MissingDict, ConflictingDict]
-ConflictDetails = Tuple[PackageSet, CheckResult]
-
-
-def create_package_set_from_installed() -> Tuple[PackageSet, bool]:
- """Converts a list of distributions into a PackageSet."""
- package_set = {}
- problems = False
- env = get_default_environment()
- for dist in env.iter_installed_distributions(local_only=False, skip=()):
- name = dist.canonical_name
- try:
- dependencies = list(dist.iter_dependencies())
- package_set[name] = PackageDetails(dist.version, dependencies)
- except (OSError, ValueError) as e:
- # Don't crash on unreadable or broken metadata.
- logger.warning("Error parsing requirements for %s: %s", name, e)
- problems = True
- return package_set, problems
-
-
-def check_package_set(
- package_set: PackageSet, should_ignore: Optional[Callable[[str], bool]] = None
-) -> CheckResult:
- """Check if a package set is consistent
-
- If should_ignore is passed, it should be a callable that takes a
- package name and returns a boolean.
- """
-
- missing = {}
- conflicting = {}
-
- for package_name, package_detail in package_set.items():
- # Info about dependencies of package_name
- missing_deps: Set[Missing] = set()
- conflicting_deps: Set[Conflicting] = set()
-
- if should_ignore and should_ignore(package_name):
- continue
-
- for req in package_detail.dependencies:
- name = canonicalize_name(req.name)
-
- # Check if it's missing
- if name not in package_set:
- missed = True
- if req.marker is not None:
- missed = req.marker.evaluate()
- if missed:
- missing_deps.add((name, req))
- continue
-
- # Check if there's a conflict
- version = package_set[name].version
- if not req.specifier.contains(version, prereleases=True):
- conflicting_deps.add((name, version, req))
-
- if missing_deps:
- missing[package_name] = sorted(missing_deps, key=str)
- if conflicting_deps:
- conflicting[package_name] = sorted(conflicting_deps, key=str)
-
- return missing, conflicting
-
-
-def check_install_conflicts(to_install: List[InstallRequirement]) -> ConflictDetails:
- """For checking if the dependency graph would be consistent after \
- installing given requirements
- """
- # Start from the current state
- package_set, _ = create_package_set_from_installed()
- # Install packages
- would_be_installed = _simulate_installation_of(to_install, package_set)
-
- # Only warn about directly-dependent packages; create a whitelist of them
- whitelist = _create_whitelist(would_be_installed, package_set)
-
- return (
- package_set,
- check_package_set(
- package_set, should_ignore=lambda name: name not in whitelist
- ),
- )
-
-
-def _simulate_installation_of(
- to_install: List[InstallRequirement], package_set: PackageSet
-) -> Set[NormalizedName]:
- """Computes the version of packages after installing to_install."""
- # Keep track of packages that were installed
- installed = set()
-
- # Modify it as installing requirement_set would (assuming no errors)
- for inst_req in to_install:
- abstract_dist = make_distribution_for_install_requirement(inst_req)
- dist = abstract_dist.get_metadata_distribution()
- name = dist.canonical_name
- package_set[name] = PackageDetails(dist.version, list(dist.iter_dependencies()))
-
- installed.add(name)
-
- return installed
-
-
-def _create_whitelist(
- would_be_installed: Set[NormalizedName], package_set: PackageSet
-) -> Set[NormalizedName]:
- packages_affected = set(would_be_installed)
-
- for package_name in package_set:
- if package_name in packages_affected:
- continue
-
- for req in package_set[package_name].dependencies:
- if canonicalize_name(req.name) in packages_affected:
- packages_affected.add(package_name)
- break
-
- return packages_affected
diff --git a/spaces/aliabd/SummerTime/model/third_party/HMNet/ThirdParty/ROUGE/ROUGE-1.5.5/XML/DOM/PerlSAX.pm b/spaces/aliabd/SummerTime/model/third_party/HMNet/ThirdParty/ROUGE/ROUGE-1.5.5/XML/DOM/PerlSAX.pm
deleted file mode 100644
index f025cce0afdeb00a79a7c1d72cb522e1131062c0..0000000000000000000000000000000000000000
--- a/spaces/aliabd/SummerTime/model/third_party/HMNet/ThirdParty/ROUGE/ROUGE-1.5.5/XML/DOM/PerlSAX.pm
+++ /dev/null
@@ -1,47 +0,0 @@
-package XML::DOM::PerlSAX;
-use strict;
-
-BEGIN
-{
- if ($^W)
- {
- warn "XML::DOM::PerlSAX has been renamed to XML::Handler::BuildDOM, please modify your code accordingly.";
- }
-}
-
-use XML::Handler::BuildDOM;
-use vars qw{ @ISA };
-@ISA = qw{ XML::Handler::BuildDOM };
-
-1; # package return code
-
-__END__
-
-=head1 NAME
-
-XML::DOM::PerlSAX - Old name of L
-
-=head1 SYNOPSIS
-
- See L
-
-=head1 DESCRIPTION
-
-XML::DOM::PerlSAX was renamed to L to comply
-with naming conventions for PerlSAX filters/handlers.
-
-For backward compatibility, this package will remain in existence
-(it simply includes XML::Handler::BuildDOM), but it will print a warning when
-running with I<'perl -w'>.
-
-=head1 AUTHOR
-
-Enno Derksen is the original author.
-
-Send bug reports, hints, tips, suggestions to T.J Mather at
->.
-
-=head1 SEE ALSO
-
-L, L
-
diff --git a/spaces/aliabid94/AutoGPT/run.sh b/spaces/aliabid94/AutoGPT/run.sh
deleted file mode 100644
index edcbc44155b9ca9df83e283fdf976472c13e6492..0000000000000000000000000000000000000000
--- a/spaces/aliabid94/AutoGPT/run.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/bash
-python scripts/check_requirements.py requirements.txt
-if [ $? -eq 1 ]
-then
- echo Installing missing packages...
- pip install -r requirements.txt
-fi
-python -m autogpt $@
-read -p "Press any key to continue..."
diff --git a/spaces/allandclive/Uganda_MMS/vits/monotonic_align/setup.py b/spaces/allandclive/Uganda_MMS/vits/monotonic_align/setup.py
deleted file mode 100644
index 30c224807a70faa9df9c9eb75f8e80c8c867b16b..0000000000000000000000000000000000000000
--- a/spaces/allandclive/Uganda_MMS/vits/monotonic_align/setup.py
+++ /dev/null
@@ -1,9 +0,0 @@
-from distutils.core import setup
-from Cython.Build import cythonize
-import numpy
-
-setup(
- name = 'monotonic_align',
- ext_modules = cythonize("core.pyx"),
- include_dirs=[numpy.get_include()]
-)
diff --git a/spaces/allknowingroger/Image-Models-Test167/README.md b/spaces/allknowingroger/Image-Models-Test167/README.md
deleted file mode 100644
index f91e4b31ab345f987b425de029c057bfb69d9e1b..0000000000000000000000000000000000000000
--- a/spaces/allknowingroger/Image-Models-Test167/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: More Image Models
-emoji: 😻
-colorFrom: red
-colorTo: gray
-sdk: gradio
-sdk_version: 3.23.0
-app_file: app.py
-pinned: true
-duplicated_from: allknowingroger/Image-Models-Test
----
-
-
\ No newline at end of file
diff --git a/spaces/allknowingroger/Image-Models-Test177/README.md b/spaces/allknowingroger/Image-Models-Test177/README.md
deleted file mode 100644
index f91e4b31ab345f987b425de029c057bfb69d9e1b..0000000000000000000000000000000000000000
--- a/spaces/allknowingroger/Image-Models-Test177/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: More Image Models
-emoji: 😻
-colorFrom: red
-colorTo: gray
-sdk: gradio
-sdk_version: 3.23.0
-app_file: app.py
-pinned: true
-duplicated_from: allknowingroger/Image-Models-Test
----
-
-
\ No newline at end of file
diff --git a/spaces/allknowingroger/huggingface/assets/index-28811a6d.js b/spaces/allknowingroger/huggingface/assets/index-28811a6d.js
deleted file mode 100644
index c6b9922854c2de7dc26081fc7f474d5e3cd7e70f..0000000000000000000000000000000000000000
--- a/spaces/allknowingroger/huggingface/assets/index-28811a6d.js
+++ /dev/null
@@ -1,41 +0,0 @@
-var hc=Object.defineProperty;var yc=(e,t,n)=>t in e?hc(e,t,{enumerable:!0,configurable:!0,writable:!0,value:n}):e[t]=n;var Et=(e,t,n)=>(yc(e,typeof t!="symbol"?t+"":t,n),n);(function(){const t=document.createElement("link").relList;if(t&&t.supports&&t.supports("modulepreload"))return;for(const l of document.querySelectorAll('link[rel="modulepreload"]'))r(l);new MutationObserver(l=>{for(const i of l)if(i.type==="childList")for(const o of i.addedNodes)o.tagName==="LINK"&&o.rel==="modulepreload"&&r(o)}).observe(document,{childList:!0,subtree:!0});function n(l){const i={};return l.integrity&&(i.integrity=l.integrity),l.referrerPolicy&&(i.referrerPolicy=l.referrerPolicy),l.crossOrigin==="use-credentials"?i.credentials="include":l.crossOrigin==="anonymous"?i.credentials="omit":i.credentials="same-origin",i}function r(l){if(l.ep)return;l.ep=!0;const i=n(l);fetch(l.href,i)}})();var Mr={},vc={get exports(){return Mr},set exports(e){Mr=e}},ul={},ne={},gc={get exports(){return ne},set exports(e){ne=e}},T={};/**
- * @license React
- * react.production.min.js
- *
- * Copyright (c) Facebook, Inc. and its affiliates.
- *
- * This source code is licensed under the MIT license found in the
- * LICENSE file in the root directory of this source tree.
- */var bn=Symbol.for("react.element"),wc=Symbol.for("react.portal"),kc=Symbol.for("react.fragment"),Sc=Symbol.for("react.strict_mode"),Ec=Symbol.for("react.profiler"),xc=Symbol.for("react.provider"),_c=Symbol.for("react.context"),Cc=Symbol.for("react.forward_ref"),Nc=Symbol.for("react.suspense"),Pc=Symbol.for("react.memo"),zc=Symbol.for("react.lazy"),Qo=Symbol.iterator;function Oc(e){return e===null||typeof e!="object"?null:(e=Qo&&e[Qo]||e["@@iterator"],typeof e=="function"?e:null)}var ns={isMounted:function(){return!1},enqueueForceUpdate:function(){},enqueueReplaceState:function(){},enqueueSetState:function(){}},rs=Object.assign,ls={};function cn(e,t,n){this.props=e,this.context=t,this.refs=ls,this.updater=n||ns}cn.prototype.isReactComponent={};cn.prototype.setState=function(e,t){if(typeof e!="object"&&typeof e!="function"&&e!=null)throw Error("setState(...): takes an object of state variables to update or a function which returns an object of state variables.");this.updater.enqueueSetState(this,e,t,"setState")};cn.prototype.forceUpdate=function(e){this.updater.enqueueForceUpdate(this,e,"forceUpdate")};function is(){}is.prototype=cn.prototype;function Xi(e,t,n){this.props=e,this.context=t,this.refs=ls,this.updater=n||ns}var Yi=Xi.prototype=new is;Yi.constructor=Xi;rs(Yi,cn.prototype);Yi.isPureReactComponent=!0;var Ko=Array.isArray,os=Object.prototype.hasOwnProperty,Gi={current:null},us={key:!0,ref:!0,__self:!0,__source:!0};function ss(e,t,n){var r,l={},i=null,o=null;if(t!=null)for(r in t.ref!==void 0&&(o=t.ref),t.key!==void 0&&(i=""+t.key),t)os.call(t,r)&&!us.hasOwnProperty(r)&&(l[r]=t[r]);var u=arguments.length-2;if(u===1)l.children=n;else if(1]+)>;\s+rel="([^"]+)"/g;return Object.fromEntries([...e.matchAll(t)].map(([,n,r])=>[r,n]))}var Qc=["pipeline_tag","private","gated","downloads","likes"];async function*Kc(e){var r,l;Hc(e==null?void 0:e.credentials);const t=new URLSearchParams([...Object.entries({limit:"500",...(r=e==null?void 0:e.search)!=null&&r.owner?{author:e.search.owner}:void 0,...(l=e==null?void 0:e.search)!=null&&l.task?{pipeline_tag:e.search.task}:void 0}),...Qc.map(i=>["expand",i])]).toString();let n=`${(e==null?void 0:e.hubUrl)||$c}/api/models?${t}`;for(;n;){const i=await fetch(n,{headers:{accept:"application/json",...e!=null&&e.credentials?{Authorization:`Bearer ${e.credentials.accessToken}`}:void 0}});if(!i.ok)throw Vc(i);const o=await i.json();for(const s of o)yield{id:s._id,name:s.id,private:s.private,task:s.pipeline_tag,downloads:s.downloads,gated:s.gated,likes:s.likes,updatedAt:new Date(s.lastModified)};const u=i.headers.get("Link");n=u?Wc(u).next:void 0}}var Xc=Object.defineProperty,Yc=(e,t)=>{for(var n in t)Xc(e,n,{get:t[n],enumerable:!0})},Ji={};Yc(Ji,{audioClassification:()=>bc,automaticSpeechRecognition:()=>ef,conversational:()=>uf,featureExtraction:()=>sf,fillMask:()=>af,imageClassification:()=>tf,imageSegmentation:()=>nf,imageToText:()=>rf,objectDetection:()=>lf,questionAnswering:()=>cf,request:()=>K,sentenceSimilarity:()=>ff,streamingRequest:()=>qi,summarization:()=>df,tableQuestionAnswering:()=>pf,textClassification:()=>mf,textGeneration:()=>hf,textGenerationStream:()=>yf,textToImage:()=>of,tokenClassification:()=>vf,translation:()=>gf,zeroShotClassification:()=>wf});var Gc="https://api-inference.huggingface.co/models/";function cs(e,t){const{model:n,accessToken:r,...l}=e,i={};r&&(i.Authorization=`Bearer ${r}`);const o="data"in e&&!!e.data;o?(t!=null&&t.wait_for_model&&(i["X-Wait-For-Model"]="true"),(t==null?void 0:t.use_cache)===!1&&(i["X-Use-Cache"]="false"),t!=null&&t.dont_load_model&&(i["X-Load-Model"]="0")):i["Content-Type"]="application/json";const u=/^http(s?):/.test(n)||n.startsWith("/")?n:`${Gc}${n}`,s={headers:i,method:"POST",body:o?e.data:JSON.stringify({...l,options:t}),credentials:t!=null&&t.includeCredentials?"include":"same-origin"};return{url:u,info:s}}async function K(e,t){var i,o;const{url:n,info:r}=cs(e,t),l=await fetch(n,r);if((t==null?void 0:t.retry_on_error)!==!1&&l.status===503&&!(t!=null&&t.wait_for_model))return K(e,{...t,wait_for_model:!0});if(!l.ok){if((i=l.headers.get("Content-Type"))!=null&&i.startsWith("application/json")){const u=await l.json();if(u.error)throw new Error(u.error)}throw new Error("An error occurred while fetching the blob")}return(o=l.headers.get("Content-Type"))!=null&&o.startsWith("application/json")?await l.json():await l.blob()}function Zc(e){let t,n,r,l=!1;return function(o){t===void 0?(t=o,n=0,r=-1):t=qc(t,o);const u=t.length;let s=0;for(;n0){const s=l.decode(o.subarray(0,u)),c=u+(o[u+1]===32?2:1),m=l.decode(o.subarray(c));switch(s){case"data":r.data=r.data?r.data+`
-`+m:m;break;case"event":r.event=m;break;case"id":e(r.id=m);break;case"retry":const h=parseInt(m,10);isNaN(h)||t(r.retry=h);break}}}}function qc(e,t){const n=new Uint8Array(e.length+t.length);return n.set(e),n.set(t,e.length),n}function Yo(){return{data:"",event:"",id:"",retry:void 0}}async function*qi(e,t){var c;const{url:n,info:r}=cs({...e,stream:!0},t),l=await fetch(n,r);if((t==null?void 0:t.retry_on_error)!==!1&&l.status===503&&!(t!=null&&t.wait_for_model))return qi(e,{...t,wait_for_model:!0});if(!l.ok){if((c=l.headers.get("Content-Type"))!=null&&c.startsWith("application/json")){const m=await l.json();if(m.error)throw new Error(m.error)}throw new Error(`Server response contains error: ${l.status}`)}if(l.headers.get("content-type")!=="text/event-stream")throw new Error("Server does not support event stream content type, it returned "+l.headers.get("content-type"));if(!l.body)return;const i=l.body.getReader();let o=[];const s=Zc(Jc(()=>{},()=>{},m=>{o.push(m)}));try{for(;;){const{done:m,value:h}=await i.read();if(m)return;s(h);for(const p of o)p.data.length>0&&(yield JSON.parse(p.data));o=[]}}finally{i.releaseLock()}}var Z=class extends TypeError{constructor(e){super(`Invalid inference output: ${e}. Use the 'request' method with the same parameters to do a custom call with no type checking.`),this.name="InferenceOutputError"}};async function bc(e,t){const n=await K(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof l.label=="string"&&typeof l.score=="number")))throw new Z("Expected Array<{label: string, score: number}>");return n}async function ef(e,t){const n=await K(e,t);if(!(typeof(n==null?void 0:n.text)=="string"))throw new Z("Expected {text: string}");return n}async function tf(e,t){const n=await K(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof l.label=="string"&&typeof l.score=="number")))throw new Z("Expected Array<{label: string, score: number}>");return n}async function nf(e,t){const n=await K(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof l.label=="string"&&typeof l.mask=="string"&&typeof l.score=="number")))throw new Z("Expected Array<{label: string, mask: string, score: number}>");return n}async function rf(e,t){var r;const n=(r=await K(e,t))==null?void 0:r[0];if(typeof(n==null?void 0:n.generated_text)!="string")throw new Z("Expected {generated_text: string}");return n}async function lf(e,t){const n=await K(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof l.label=="string"&&typeof l.score=="number"&&typeof l.box.xmin=="number"&&typeof l.box.ymin=="number"&&typeof l.box.xmax=="number"&&typeof l.box.ymax=="number")))throw new Z("Expected Array<{label:string; score:number; box:{xmin:number; ymin:number; xmax:number; ymax:number}}>");return n}async function of(e,t){const n=await K(e,t);if(!(n&&n instanceof Blob))throw new Z("Expected Blob");return n}async function uf(e,t){const n=await K(e,t);if(!(Array.isArray(n.conversation.generated_responses)&&n.conversation.generated_responses.every(l=>typeof l=="string")&&Array.isArray(n.conversation.past_user_inputs)&&n.conversation.past_user_inputs.every(l=>typeof l=="string")&&typeof n.generated_text=="string"&&Array.isArray(n.warnings)&&n.warnings.every(l=>typeof l=="string")))throw new Z("Expected {conversation: {generated_responses: string[], past_user_inputs: string[]}, generated_text: string, warnings: string[]}");return n}async function sf(e,t){const n=await K(e,t);let r=!0;if(Array.isArray(n)){for(const l of n)if(Array.isArray(l)){if(r=l.every(i=>typeof i=="number"),!r)break}else if(typeof l!="number"){r=!1;break}}else r=!1;if(!r)throw new Z("Expected Array");return n}async function af(e,t){const n=await K(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof l.score=="number"&&typeof l.sequence=="string"&&typeof l.token=="number"&&typeof l.token_str=="string")))throw new Z("Expected Array<{score: number, sequence: string, token: number, token_str: string}>");return n}async function cf(e,t){const n=await K(e,t);if(!(typeof(n==null?void 0:n.answer)=="string"&&typeof n.end=="number"&&typeof n.score=="number"&&typeof n.start=="number"))throw new Z("Expected {answer: string, end: number, score: number, start: number}");return n}async function ff(e,t){const n=await K(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof l=="number")))throw new Z("Expected number[]");return n}async function df(e,t){const n=await K(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof(l==null?void 0:l.summary_text)=="string")))throw new Z("Expected Array<{summary_text: string}>");return n==null?void 0:n[0]}async function pf(e,t){const n=await K(e,t);if(!(typeof(n==null?void 0:n.aggregator)=="string"&&typeof n.answer=="string"&&Array.isArray(n.cells)&&n.cells.every(l=>typeof l=="string")&&Array.isArray(n.coordinates)&&n.coordinates.every(l=>Array.isArray(l)&&l.every(i=>typeof i=="number"))))throw new Z("Expected {aggregator: string, answer: string, cells: string[], coordinates: number[][]}");return n}async function mf(e,t){var l;const n=(l=await K(e,t))==null?void 0:l[0];if(!(Array.isArray(n)&&n.every(i=>typeof(i==null?void 0:i.label)=="string"&&typeof i.score=="number")))throw new Z("Expected Array<{label: string, score: number}>");return n}async function hf(e,t){const n=await K(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof(l==null?void 0:l.generated_text)=="string")))throw new Z("Expected Array<{generated_text: string}>");return n==null?void 0:n[0]}async function*yf(e,t){yield*qi(e,t)}function fs(e){return Array.isArray(e)?e:[e]}async function vf(e,t){const n=fs(await K(e,t));if(!(Array.isArray(n)&&n.every(l=>typeof l.end=="number"&&typeof l.entity_group=="string"&&typeof l.score=="number"&&typeof l.start=="number"&&typeof l.word=="string")))throw new Z("Expected Array<{end: number, entity_group: string, score: number, start: number, word: string}>");return n}async function gf(e,t){const n=await K(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof(l==null?void 0:l.translation_text)=="string")))throw new Z("Expected type Array<{translation_text: string}>");return n==null?void 0:n[0]}async function wf(e,t){const n=fs(await K(e,t));if(!(Array.isArray(n)&&n.every(l=>Array.isArray(l.labels)&&l.labels.every(i=>typeof i=="string")&&Array.isArray(l.scores)&&l.scores.every(i=>typeof i=="number")&&typeof l.sequence=="string")))throw new Z("Expected Array<{labels: string[], scores: number[], sequence: string}>");return n}var kf=class{constructor(e="",t={}){Et(this,"accessToken");Et(this,"defaultOptions");this.accessToken=e,this.defaultOptions=t;for(const[n,r]of Object.entries(Ji))Object.defineProperty(this,n,{enumerable:!1,value:(l,i)=>r({...l,accessToken:e},{...t,...i})})}endpoint(e){return new Sf(e,this.accessToken,this.defaultOptions)}},Sf=class{constructor(e,t="",n={}){for(const[r,l]of Object.entries(Ji))Object.defineProperty(this,r,{enumerable:!1,value:(i,o)=>l({...i,accessToken:t,model:e},{...n,...o})})}},jr=function(){return jr=Object.assign||function(t){for(var n,r=1,l=arguments.length;r0&&n>="0"&&n<="9"?"_"+n+r:""+n.toUpperCase()+r}function Nf(e,t){return t===void 0&&(t={}),Cf(e,jr({delimiter:"",transform:ds},t))}function Pf(e,t){return t===0?e.toLowerCase():ds(e,t)}function zf(e,t){return t===void 0&&(t={}),Nf(e,jr({transform:Pf},t))}var bl={},Of={get exports(){return bl},set exports(e){bl=e}},Ee={},ei={},Tf={get exports(){return ei},set exports(e){ei=e}},ps={};/**
- * @license React
- * scheduler.production.min.js
- *
- * Copyright (c) Facebook, Inc. and its affiliates.
- *
- * This source code is licensed under the MIT license found in the
- * LICENSE file in the root directory of this source tree.
- */(function(e){function t(x,z){var O=x.length;x.push(z);e:for(;0>>1,J=x[W];if(0>>1;Wl(Cl,O))Stl(ir,Cl)?(x[W]=ir,x[St]=O,W=St):(x[W]=Cl,x[kt]=O,W=kt);else if(Stl(ir,O))x[W]=ir,x[St]=O,W=St;else break e}}return z}function l(x,z){var O=x.sortIndex-z.sortIndex;return O!==0?O:x.id-z.id}if(typeof performance=="object"&&typeof performance.now=="function"){var i=performance;e.unstable_now=function(){return i.now()}}else{var o=Date,u=o.now();e.unstable_now=function(){return o.now()-u}}var s=[],c=[],m=1,h=null,p=3,g=!1,w=!1,k=!1,D=typeof setTimeout=="function"?setTimeout:null,f=typeof clearTimeout=="function"?clearTimeout:null,a=typeof setImmediate<"u"?setImmediate:null;typeof navigator<"u"&&navigator.scheduling!==void 0&&navigator.scheduling.isInputPending!==void 0&&navigator.scheduling.isInputPending.bind(navigator.scheduling);function d(x){for(var z=n(c);z!==null;){if(z.callback===null)r(c);else if(z.startTime<=x)r(c),z.sortIndex=z.expirationTime,t(s,z);else break;z=n(c)}}function y(x){if(k=!1,d(x),!w)if(n(s)!==null)w=!0,xl(E);else{var z=n(c);z!==null&&_l(y,z.startTime-x)}}function E(x,z){w=!1,k&&(k=!1,f(N),N=-1),g=!0;var O=p;try{for(d(z),h=n(s);h!==null&&(!(h.expirationTime>z)||x&&!Le());){var W=h.callback;if(typeof W=="function"){h.callback=null,p=h.priorityLevel;var J=W(h.expirationTime<=z);z=e.unstable_now(),typeof J=="function"?h.callback=J:h===n(s)&&r(s),d(z)}else r(s);h=n(s)}if(h!==null)var lr=!0;else{var kt=n(c);kt!==null&&_l(y,kt.startTime-z),lr=!1}return lr}finally{h=null,p=O,g=!1}}var _=!1,C=null,N=-1,H=5,L=-1;function Le(){return!(e.unstable_now()-Lx||125W?(x.sortIndex=O,t(c,x),n(s)===null&&x===n(c)&&(k?(f(N),N=-1):k=!0,_l(y,O-W))):(x.sortIndex=J,t(s,x),w||g||(w=!0,xl(E))),x},e.unstable_shouldYield=Le,e.unstable_wrapCallback=function(x){var z=p;return function(){var O=p;p=z;try{return x.apply(this,arguments)}finally{p=O}}}})(ps);(function(e){e.exports=ps})(Tf);/**
- * @license React
- * react-dom.production.min.js
- *
- * Copyright (c) Facebook, Inc. and its affiliates.
- *
- * This source code is licensed under the MIT license found in the
- * LICENSE file in the root directory of this source tree.
- */var ms=ne,Se=ei;function v(e){for(var t="https://reactjs.org/docs/error-decoder.html?invariant="+e,n=1;n"u"||typeof window.document>"u"||typeof window.document.createElement>"u"),ti=Object.prototype.hasOwnProperty,Lf=/^[:A-Z_a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD][:A-Z_a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\-.0-9\u00B7\u0300-\u036F\u203F-\u2040]*$/,Zo={},Jo={};function Rf(e){return ti.call(Jo,e)?!0:ti.call(Zo,e)?!1:Lf.test(e)?Jo[e]=!0:(Zo[e]=!0,!1)}function If(e,t,n,r){if(n!==null&&n.type===0)return!1;switch(typeof t){case"function":case"symbol":return!0;case"boolean":return r?!1:n!==null?!n.acceptsBooleans:(e=e.toLowerCase().slice(0,5),e!=="data-"&&e!=="aria-");default:return!1}}function Af(e,t,n,r){if(t===null||typeof t>"u"||If(e,t,n,r))return!0;if(r)return!1;if(n!==null)switch(n.type){case 3:return!t;case 4:return t===!1;case 5:return isNaN(t);case 6:return isNaN(t)||1>t}return!1}function de(e,t,n,r,l,i,o){this.acceptsBooleans=t===2||t===3||t===4,this.attributeName=r,this.attributeNamespace=l,this.mustUseProperty=n,this.propertyName=e,this.type=t,this.sanitizeURL=i,this.removeEmptyString=o}var le={};"children dangerouslySetInnerHTML defaultValue defaultChecked innerHTML suppressContentEditableWarning suppressHydrationWarning style".split(" ").forEach(function(e){le[e]=new de(e,0,!1,e,null,!1,!1)});[["acceptCharset","accept-charset"],["className","class"],["htmlFor","for"],["httpEquiv","http-equiv"]].forEach(function(e){var t=e[0];le[t]=new de(t,1,!1,e[1],null,!1,!1)});["contentEditable","draggable","spellCheck","value"].forEach(function(e){le[e]=new de(e,2,!1,e.toLowerCase(),null,!1,!1)});["autoReverse","externalResourcesRequired","focusable","preserveAlpha"].forEach(function(e){le[e]=new de(e,2,!1,e,null,!1,!1)});"allowFullScreen async autoFocus autoPlay controls default defer disabled disablePictureInPicture disableRemotePlayback formNoValidate hidden loop noModule noValidate open playsInline readOnly required reversed scoped seamless itemScope".split(" ").forEach(function(e){le[e]=new de(e,3,!1,e.toLowerCase(),null,!1,!1)});["checked","multiple","muted","selected"].forEach(function(e){le[e]=new de(e,3,!0,e,null,!1,!1)});["capture","download"].forEach(function(e){le[e]=new de(e,4,!1,e,null,!1,!1)});["cols","rows","size","span"].forEach(function(e){le[e]=new de(e,6,!1,e,null,!1,!1)});["rowSpan","start"].forEach(function(e){le[e]=new de(e,5,!1,e.toLowerCase(),null,!1,!1)});var bi=/[\-:]([a-z])/g;function eo(e){return e[1].toUpperCase()}"accent-height alignment-baseline arabic-form baseline-shift cap-height clip-path clip-rule color-interpolation color-interpolation-filters color-profile color-rendering dominant-baseline enable-background fill-opacity fill-rule flood-color flood-opacity font-family font-size font-size-adjust font-stretch font-style font-variant font-weight glyph-name glyph-orientation-horizontal glyph-orientation-vertical horiz-adv-x horiz-origin-x image-rendering letter-spacing lighting-color marker-end marker-mid marker-start overline-position overline-thickness paint-order panose-1 pointer-events rendering-intent shape-rendering stop-color stop-opacity strikethrough-position strikethrough-thickness stroke-dasharray stroke-dashoffset stroke-linecap stroke-linejoin stroke-miterlimit stroke-opacity stroke-width text-anchor text-decoration text-rendering underline-position underline-thickness unicode-bidi unicode-range units-per-em v-alphabetic v-hanging v-ideographic v-mathematical vector-effect vert-adv-y vert-origin-x vert-origin-y word-spacing writing-mode xmlns:xlink x-height".split(" ").forEach(function(e){var t=e.replace(bi,eo);le[t]=new de(t,1,!1,e,null,!1,!1)});"xlink:actuate xlink:arcrole xlink:role xlink:show xlink:title xlink:type".split(" ").forEach(function(e){var t=e.replace(bi,eo);le[t]=new de(t,1,!1,e,"http://www.w3.org/1999/xlink",!1,!1)});["xml:base","xml:lang","xml:space"].forEach(function(e){var t=e.replace(bi,eo);le[t]=new de(t,1,!1,e,"http://www.w3.org/XML/1998/namespace",!1,!1)});["tabIndex","crossOrigin"].forEach(function(e){le[e]=new de(e,1,!1,e.toLowerCase(),null,!1,!1)});le.xlinkHref=new de("xlinkHref",1,!1,"xlink:href","http://www.w3.org/1999/xlink",!0,!1);["src","href","action","formAction"].forEach(function(e){le[e]=new de(e,1,!1,e.toLowerCase(),null,!0,!0)});function to(e,t,n,r){var l=le.hasOwnProperty(t)?le[t]:null;(l!==null?l.type!==0:r||!(2u||l[o]!==i[u]){var s=`
-`+l[o].replace(" at new "," at ");return e.displayName&&s.includes("")&&(s=s.replace("",e.displayName)),s}while(1<=o&&0<=u);break}}}finally{zl=!1,Error.prepareStackTrace=n}return(e=e?e.displayName||e.name:"")?xn(e):""}function Mf(e){switch(e.tag){case 5:return xn(e.type);case 16:return xn("Lazy");case 13:return xn("Suspense");case 19:return xn("SuspenseList");case 0:case 2:case 15:return e=Ol(e.type,!1),e;case 11:return e=Ol(e.type.render,!1),e;case 1:return e=Ol(e.type,!0),e;default:return""}}function ii(e){if(e==null)return null;if(typeof e=="function")return e.displayName||e.name||null;if(typeof e=="string")return e;switch(e){case Ut:return"Fragment";case Ft:return"Portal";case ni:return"Profiler";case no:return"StrictMode";case ri:return"Suspense";case li:return"SuspenseList"}if(typeof e=="object")switch(e.$$typeof){case vs:return(e.displayName||"Context")+".Consumer";case ys:return(e._context.displayName||"Context")+".Provider";case ro:var t=e.render;return e=e.displayName,e||(e=t.displayName||t.name||"",e=e!==""?"ForwardRef("+e+")":"ForwardRef"),e;case lo:return t=e.displayName||null,t!==null?t:ii(e.type)||"Memo";case tt:t=e._payload,e=e._init;try{return ii(e(t))}catch{}}return null}function jf(e){var t=e.type;switch(e.tag){case 24:return"Cache";case 9:return(t.displayName||"Context")+".Consumer";case 10:return(t._context.displayName||"Context")+".Provider";case 18:return"DehydratedFragment";case 11:return e=t.render,e=e.displayName||e.name||"",t.displayName||(e!==""?"ForwardRef("+e+")":"ForwardRef");case 7:return"Fragment";case 5:return t;case 4:return"Portal";case 3:return"Root";case 6:return"Text";case 16:return ii(t);case 8:return t===no?"StrictMode":"Mode";case 22:return"Offscreen";case 12:return"Profiler";case 21:return"Scope";case 13:return"Suspense";case 19:return"SuspenseList";case 25:return"TracingMarker";case 1:case 0:case 17:case 2:case 14:case 15:if(typeof t=="function")return t.displayName||t.name||null;if(typeof t=="string")return t}return null}function ht(e){switch(typeof e){case"boolean":case"number":case"string":case"undefined":return e;case"object":return e;default:return""}}function ws(e){var t=e.type;return(e=e.nodeName)&&e.toLowerCase()==="input"&&(t==="checkbox"||t==="radio")}function Df(e){var t=ws(e)?"checked":"value",n=Object.getOwnPropertyDescriptor(e.constructor.prototype,t),r=""+e[t];if(!e.hasOwnProperty(t)&&typeof n<"u"&&typeof n.get=="function"&&typeof n.set=="function"){var l=n.get,i=n.set;return Object.defineProperty(e,t,{configurable:!0,get:function(){return l.call(this)},set:function(o){r=""+o,i.call(this,o)}}),Object.defineProperty(e,t,{enumerable:n.enumerable}),{getValue:function(){return r},setValue:function(o){r=""+o},stopTracking:function(){e._valueTracker=null,delete e[t]}}}}function sr(e){e._valueTracker||(e._valueTracker=Df(e))}function ks(e){if(!e)return!1;var t=e._valueTracker;if(!t)return!0;var n=t.getValue(),r="";return e&&(r=ws(e)?e.checked?"true":"false":e.value),e=r,e!==n?(t.setValue(e),!0):!1}function Dr(e){if(e=e||(typeof document<"u"?document:void 0),typeof e>"u")return null;try{return e.activeElement||e.body}catch{return e.body}}function oi(e,t){var n=t.checked;return V({},t,{defaultChecked:void 0,defaultValue:void 0,value:void 0,checked:n??e._wrapperState.initialChecked})}function bo(e,t){var n=t.defaultValue==null?"":t.defaultValue,r=t.checked!=null?t.checked:t.defaultChecked;n=ht(t.value!=null?t.value:n),e._wrapperState={initialChecked:r,initialValue:n,controlled:t.type==="checkbox"||t.type==="radio"?t.checked!=null:t.value!=null}}function Ss(e,t){t=t.checked,t!=null&&to(e,"checked",t,!1)}function ui(e,t){Ss(e,t);var n=ht(t.value),r=t.type;if(n!=null)r==="number"?(n===0&&e.value===""||e.value!=n)&&(e.value=""+n):e.value!==""+n&&(e.value=""+n);else if(r==="submit"||r==="reset"){e.removeAttribute("value");return}t.hasOwnProperty("value")?si(e,t.type,n):t.hasOwnProperty("defaultValue")&&si(e,t.type,ht(t.defaultValue)),t.checked==null&&t.defaultChecked!=null&&(e.defaultChecked=!!t.defaultChecked)}function eu(e,t,n){if(t.hasOwnProperty("value")||t.hasOwnProperty("defaultValue")){var r=t.type;if(!(r!=="submit"&&r!=="reset"||t.value!==void 0&&t.value!==null))return;t=""+e._wrapperState.initialValue,n||t===e.value||(e.value=t),e.defaultValue=t}n=e.name,n!==""&&(e.name=""),e.defaultChecked=!!e._wrapperState.initialChecked,n!==""&&(e.name=n)}function si(e,t,n){(t!=="number"||Dr(e.ownerDocument)!==e)&&(n==null?e.defaultValue=""+e._wrapperState.initialValue:e.defaultValue!==""+n&&(e.defaultValue=""+n))}var _n=Array.isArray;function Zt(e,t,n,r){if(e=e.options,t){t={};for(var l=0;l"+t.valueOf().toString()+"",t=ar.firstChild;e.firstChild;)e.removeChild(e.firstChild);for(;t.firstChild;)e.appendChild(t.firstChild)}});function Dn(e,t){if(t){var n=e.firstChild;if(n&&n===e.lastChild&&n.nodeType===3){n.nodeValue=t;return}}e.textContent=t}var Pn={animationIterationCount:!0,aspectRatio:!0,borderImageOutset:!0,borderImageSlice:!0,borderImageWidth:!0,boxFlex:!0,boxFlexGroup:!0,boxOrdinalGroup:!0,columnCount:!0,columns:!0,flex:!0,flexGrow:!0,flexPositive:!0,flexShrink:!0,flexNegative:!0,flexOrder:!0,gridArea:!0,gridRow:!0,gridRowEnd:!0,gridRowSpan:!0,gridRowStart:!0,gridColumn:!0,gridColumnEnd:!0,gridColumnSpan:!0,gridColumnStart:!0,fontWeight:!0,lineClamp:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,tabSize:!0,widows:!0,zIndex:!0,zoom:!0,fillOpacity:!0,floodOpacity:!0,stopOpacity:!0,strokeDasharray:!0,strokeDashoffset:!0,strokeMiterlimit:!0,strokeOpacity:!0,strokeWidth:!0},Ff=["Webkit","ms","Moz","O"];Object.keys(Pn).forEach(function(e){Ff.forEach(function(t){t=t+e.charAt(0).toUpperCase()+e.substring(1),Pn[t]=Pn[e]})});function Cs(e,t,n){return t==null||typeof t=="boolean"||t===""?"":n||typeof t!="number"||t===0||Pn.hasOwnProperty(e)&&Pn[e]?(""+t).trim():t+"px"}function Ns(e,t){e=e.style;for(var n in t)if(t.hasOwnProperty(n)){var r=n.indexOf("--")===0,l=Cs(n,t[n],r);n==="float"&&(n="cssFloat"),r?e.setProperty(n,l):e[n]=l}}var Uf=V({menuitem:!0},{area:!0,base:!0,br:!0,col:!0,embed:!0,hr:!0,img:!0,input:!0,keygen:!0,link:!0,meta:!0,param:!0,source:!0,track:!0,wbr:!0});function fi(e,t){if(t){if(Uf[e]&&(t.children!=null||t.dangerouslySetInnerHTML!=null))throw Error(v(137,e));if(t.dangerouslySetInnerHTML!=null){if(t.children!=null)throw Error(v(60));if(typeof t.dangerouslySetInnerHTML!="object"||!("__html"in t.dangerouslySetInnerHTML))throw Error(v(61))}if(t.style!=null&&typeof t.style!="object")throw Error(v(62))}}function di(e,t){if(e.indexOf("-")===-1)return typeof t.is=="string";switch(e){case"annotation-xml":case"color-profile":case"font-face":case"font-face-src":case"font-face-uri":case"font-face-format":case"font-face-name":case"missing-glyph":return!1;default:return!0}}var pi=null;function io(e){return e=e.target||e.srcElement||window,e.correspondingUseElement&&(e=e.correspondingUseElement),e.nodeType===3?e.parentNode:e}var mi=null,Jt=null,qt=null;function ru(e){if(e=nr(e)){if(typeof mi!="function")throw Error(v(280));var t=e.stateNode;t&&(t=dl(t),mi(e.stateNode,e.type,t))}}function Ps(e){Jt?qt?qt.push(e):qt=[e]:Jt=e}function zs(){if(Jt){var e=Jt,t=qt;if(qt=Jt=null,ru(e),t)for(e=0;e>>=0,e===0?32:31-(Zf(e)/Jf|0)|0}var cr=64,fr=4194304;function Cn(e){switch(e&-e){case 1:return 1;case 2:return 2;case 4:return 4;case 8:return 8;case 16:return 16;case 32:return 32;case 64:case 128:case 256:case 512:case 1024:case 2048:case 4096:case 8192:case 16384:case 32768:case 65536:case 131072:case 262144:case 524288:case 1048576:case 2097152:return e&4194240;case 4194304:case 8388608:case 16777216:case 33554432:case 67108864:return e&130023424;case 134217728:return 134217728;case 268435456:return 268435456;case 536870912:return 536870912;case 1073741824:return 1073741824;default:return e}}function Vr(e,t){var n=e.pendingLanes;if(n===0)return 0;var r=0,l=e.suspendedLanes,i=e.pingedLanes,o=n&268435455;if(o!==0){var u=o&~l;u!==0?r=Cn(u):(i&=o,i!==0&&(r=Cn(i)))}else o=n&~l,o!==0?r=Cn(o):i!==0&&(r=Cn(i));if(r===0)return 0;if(t!==0&&t!==r&&!(t&l)&&(l=r&-r,i=t&-t,l>=i||l===16&&(i&4194240)!==0))return t;if(r&4&&(r|=n&16),t=e.entangledLanes,t!==0)for(e=e.entanglements,t&=r;0n;n++)t.push(e);return t}function er(e,t,n){e.pendingLanes|=t,t!==536870912&&(e.suspendedLanes=0,e.pingedLanes=0),e=e.eventTimes,t=31-je(t),e[t]=n}function td(e,t){var n=e.pendingLanes&~t;e.pendingLanes=t,e.suspendedLanes=0,e.pingedLanes=0,e.expiredLanes&=t,e.mutableReadLanes&=t,e.entangledLanes&=t,t=e.entanglements;var r=e.eventTimes;for(e=e.expirationTimes;0=On),du=String.fromCharCode(32),pu=!1;function Ys(e,t){switch(e){case"keyup":return Od.indexOf(t.keyCode)!==-1;case"keydown":return t.keyCode!==229;case"keypress":case"mousedown":case"focusout":return!0;default:return!1}}function Gs(e){return e=e.detail,typeof e=="object"&&"data"in e?e.data:null}var $t=!1;function Ld(e,t){switch(e){case"compositionend":return Gs(t);case"keypress":return t.which!==32?null:(pu=!0,du);case"textInput":return e=t.data,e===du&&pu?null:e;default:return null}}function Rd(e,t){if($t)return e==="compositionend"||!mo&&Ys(e,t)?(e=Ks(),Nr=co=it=null,$t=!1,e):null;switch(e){case"paste":return null;case"keypress":if(!(t.ctrlKey||t.altKey||t.metaKey)||t.ctrlKey&&t.altKey){if(t.char&&1=t)return{node:n,offset:t-e};e=r}e:{for(;n;){if(n.nextSibling){n=n.nextSibling;break e}n=n.parentNode}n=void 0}n=vu(n)}}function bs(e,t){return e&&t?e===t?!0:e&&e.nodeType===3?!1:t&&t.nodeType===3?bs(e,t.parentNode):"contains"in e?e.contains(t):e.compareDocumentPosition?!!(e.compareDocumentPosition(t)&16):!1:!1}function ea(){for(var e=window,t=Dr();t instanceof e.HTMLIFrameElement;){try{var n=typeof t.contentWindow.location.href=="string"}catch{n=!1}if(n)e=t.contentWindow;else break;t=Dr(e.document)}return t}function ho(e){var t=e&&e.nodeName&&e.nodeName.toLowerCase();return t&&(t==="input"&&(e.type==="text"||e.type==="search"||e.type==="tel"||e.type==="url"||e.type==="password")||t==="textarea"||e.contentEditable==="true")}function Vd(e){var t=ea(),n=e.focusedElem,r=e.selectionRange;if(t!==n&&n&&n.ownerDocument&&bs(n.ownerDocument.documentElement,n)){if(r!==null&&ho(n)){if(t=r.start,e=r.end,e===void 0&&(e=t),"selectionStart"in n)n.selectionStart=t,n.selectionEnd=Math.min(e,n.value.length);else if(e=(t=n.ownerDocument||document)&&t.defaultView||window,e.getSelection){e=e.getSelection();var l=n.textContent.length,i=Math.min(r.start,l);r=r.end===void 0?i:Math.min(r.end,l),!e.extend&&i>r&&(l=r,r=i,i=l),l=gu(n,i);var o=gu(n,r);l&&o&&(e.rangeCount!==1||e.anchorNode!==l.node||e.anchorOffset!==l.offset||e.focusNode!==o.node||e.focusOffset!==o.offset)&&(t=t.createRange(),t.setStart(l.node,l.offset),e.removeAllRanges(),i>r?(e.addRange(t),e.extend(o.node,o.offset)):(t.setEnd(o.node,o.offset),e.addRange(t)))}}for(t=[],e=n;e=e.parentNode;)e.nodeType===1&&t.push({element:e,left:e.scrollLeft,top:e.scrollTop});for(typeof n.focus=="function"&&n.focus(),n=0;n=document.documentMode,Vt=null,ki=null,Ln=null,Si=!1;function wu(e,t,n){var r=n.window===n?n.document:n.nodeType===9?n:n.ownerDocument;Si||Vt==null||Vt!==Dr(r)||(r=Vt,"selectionStart"in r&&ho(r)?r={start:r.selectionStart,end:r.selectionEnd}:(r=(r.ownerDocument&&r.ownerDocument.defaultView||window).getSelection(),r={anchorNode:r.anchorNode,anchorOffset:r.anchorOffset,focusNode:r.focusNode,focusOffset:r.focusOffset}),Ln&&Hn(Ln,r)||(Ln=r,r=Wr(ki,"onSelect"),0Wt||(e.current=Pi[Wt],Pi[Wt]=null,Wt--)}function A(e,t){Wt++,Pi[Wt]=e.current,e.current=t}var yt={},se=gt(yt),he=gt(!1),Tt=yt;function rn(e,t){var n=e.type.contextTypes;if(!n)return yt;var r=e.stateNode;if(r&&r.__reactInternalMemoizedUnmaskedChildContext===t)return r.__reactInternalMemoizedMaskedChildContext;var l={},i;for(i in n)l[i]=t[i];return r&&(e=e.stateNode,e.__reactInternalMemoizedUnmaskedChildContext=t,e.__reactInternalMemoizedMaskedChildContext=l),l}function ye(e){return e=e.childContextTypes,e!=null}function Kr(){j(he),j(se)}function Nu(e,t,n){if(se.current!==yt)throw Error(v(168));A(se,t),A(he,n)}function aa(e,t,n){var r=e.stateNode;if(t=t.childContextTypes,typeof r.getChildContext!="function")return n;r=r.getChildContext();for(var l in r)if(!(l in t))throw Error(v(108,jf(e)||"Unknown",l));return V({},n,r)}function Xr(e){return e=(e=e.stateNode)&&e.__reactInternalMemoizedMergedChildContext||yt,Tt=se.current,A(se,e),A(he,he.current),!0}function Pu(e,t,n){var r=e.stateNode;if(!r)throw Error(v(169));n?(e=aa(e,t,Tt),r.__reactInternalMemoizedMergedChildContext=e,j(he),j(se),A(se,e)):j(he),A(he,n)}var Qe=null,pl=!1,Hl=!1;function ca(e){Qe===null?Qe=[e]:Qe.push(e)}function bd(e){pl=!0,ca(e)}function wt(){if(!Hl&&Qe!==null){Hl=!0;var e=0,t=I;try{var n=Qe;for(I=1;e>=o,l-=o,Ke=1<<32-je(t)+l|n<N?(H=C,C=null):H=C.sibling;var L=p(f,C,d[N],y);if(L===null){C===null&&(C=H);break}e&&C&&L.alternate===null&&t(f,C),a=i(L,a,N),_===null?E=L:_.sibling=L,_=L,C=H}if(N===d.length)return n(f,C),F&&xt(f,N),E;if(C===null){for(;NN?(H=C,C=null):H=C.sibling;var Le=p(f,C,L.value,y);if(Le===null){C===null&&(C=H);break}e&&C&&Le.alternate===null&&t(f,C),a=i(Le,a,N),_===null?E=Le:_.sibling=Le,_=Le,C=H}if(L.done)return n(f,C),F&&xt(f,N),E;if(C===null){for(;!L.done;N++,L=d.next())L=h(f,L.value,y),L!==null&&(a=i(L,a,N),_===null?E=L:_.sibling=L,_=L);return F&&xt(f,N),E}for(C=r(f,C);!L.done;N++,L=d.next())L=g(C,f,N,L.value,y),L!==null&&(e&&L.alternate!==null&&C.delete(L.key===null?N:L.key),a=i(L,a,N),_===null?E=L:_.sibling=L,_=L);return e&&C.forEach(function(pn){return t(f,pn)}),F&&xt(f,N),E}function D(f,a,d,y){if(typeof d=="object"&&d!==null&&d.type===Ut&&d.key===null&&(d=d.props.children),typeof d=="object"&&d!==null){switch(d.$$typeof){case ur:e:{for(var E=d.key,_=a;_!==null;){if(_.key===E){if(E=d.type,E===Ut){if(_.tag===7){n(f,_.sibling),a=l(_,d.props.children),a.return=f,f=a;break e}}else if(_.elementType===E||typeof E=="object"&&E!==null&&E.$$typeof===tt&&Au(E)===_.type){n(f,_.sibling),a=l(_,d.props),a.ref=kn(f,_,d),a.return=f,f=a;break e}n(f,_);break}else t(f,_);_=_.sibling}d.type===Ut?(a=Ot(d.props.children,f.mode,y,d.key),a.return=f,f=a):(y=Ar(d.type,d.key,d.props,null,f.mode,y),y.ref=kn(f,a,d),y.return=f,f=y)}return o(f);case Ft:e:{for(_=d.key;a!==null;){if(a.key===_)if(a.tag===4&&a.stateNode.containerInfo===d.containerInfo&&a.stateNode.implementation===d.implementation){n(f,a.sibling),a=l(a,d.children||[]),a.return=f,f=a;break e}else{n(f,a);break}else t(f,a);a=a.sibling}a=Jl(d,f.mode,y),a.return=f,f=a}return o(f);case tt:return _=d._init,D(f,a,_(d._payload),y)}if(_n(d))return w(f,a,d,y);if(hn(d))return k(f,a,d,y);gr(f,d)}return typeof d=="string"&&d!==""||typeof d=="number"?(d=""+d,a!==null&&a.tag===6?(n(f,a.sibling),a=l(a,d),a.return=f,f=a):(n(f,a),a=Zl(d,f.mode,y),a.return=f,f=a),o(f)):n(f,a)}return D}var on=ga(!0),wa=ga(!1),rr={},He=gt(rr),Xn=gt(rr),Yn=gt(rr);function Pt(e){if(e===rr)throw Error(v(174));return e}function _o(e,t){switch(A(Yn,t),A(Xn,e),A(He,rr),e=t.nodeType,e){case 9:case 11:t=(t=t.documentElement)?t.namespaceURI:ci(null,"");break;default:e=e===8?t.parentNode:t,t=e.namespaceURI||null,e=e.tagName,t=ci(t,e)}j(He),A(He,t)}function un(){j(He),j(Xn),j(Yn)}function ka(e){Pt(Yn.current);var t=Pt(He.current),n=ci(t,e.type);t!==n&&(A(Xn,e),A(He,n))}function Co(e){Xn.current===e&&(j(He),j(Xn))}var U=gt(0);function br(e){for(var t=e;t!==null;){if(t.tag===13){var n=t.memoizedState;if(n!==null&&(n=n.dehydrated,n===null||n.data==="$?"||n.data==="$!"))return t}else if(t.tag===19&&t.memoizedProps.revealOrder!==void 0){if(t.flags&128)return t}else if(t.child!==null){t.child.return=t,t=t.child;continue}if(t===e)break;for(;t.sibling===null;){if(t.return===null||t.return===e)return null;t=t.return}t.sibling.return=t.return,t=t.sibling}return null}var Wl=[];function No(){for(var e=0;en?n:4,e(!0);var r=Ql.transition;Ql.transition={};try{e(!1),t()}finally{I=n,Ql.transition=r}}function ja(){return Te().memoizedState}function rp(e,t,n){var r=pt(e);if(n={lane:r,action:n,hasEagerState:!1,eagerState:null,next:null},Da(e))Fa(t,n);else if(n=ma(e,t,n,r),n!==null){var l=ce();De(n,e,r,l),Ua(n,t,r)}}function lp(e,t,n){var r=pt(e),l={lane:r,action:n,hasEagerState:!1,eagerState:null,next:null};if(Da(e))Fa(t,l);else{var i=e.alternate;if(e.lanes===0&&(i===null||i.lanes===0)&&(i=t.lastRenderedReducer,i!==null))try{var o=t.lastRenderedState,u=i(o,n);if(l.hasEagerState=!0,l.eagerState=u,Fe(u,o)){var s=t.interleaved;s===null?(l.next=l,Eo(t)):(l.next=s.next,s.next=l),t.interleaved=l;return}}catch{}finally{}n=ma(e,t,l,r),n!==null&&(l=ce(),De(n,e,r,l),Ua(n,t,r))}}function Da(e){var t=e.alternate;return e===$||t!==null&&t===$}function Fa(e,t){Rn=el=!0;var n=e.pending;n===null?t.next=t:(t.next=n.next,n.next=t),e.pending=t}function Ua(e,t,n){if(n&4194240){var r=t.lanes;r&=e.pendingLanes,n|=r,t.lanes=n,uo(e,n)}}var tl={readContext:Oe,useCallback:ie,useContext:ie,useEffect:ie,useImperativeHandle:ie,useInsertionEffect:ie,useLayoutEffect:ie,useMemo:ie,useReducer:ie,useRef:ie,useState:ie,useDebugValue:ie,useDeferredValue:ie,useTransition:ie,useMutableSource:ie,useSyncExternalStore:ie,useId:ie,unstable_isNewReconciler:!1},ip={readContext:Oe,useCallback:function(e,t){return $e().memoizedState=[e,t===void 0?null:t],e},useContext:Oe,useEffect:ju,useImperativeHandle:function(e,t,n){return n=n!=null?n.concat([e]):null,Tr(4194308,4,La.bind(null,t,e),n)},useLayoutEffect:function(e,t){return Tr(4194308,4,e,t)},useInsertionEffect:function(e,t){return Tr(4,2,e,t)},useMemo:function(e,t){var n=$e();return t=t===void 0?null:t,e=e(),n.memoizedState=[e,t],e},useReducer:function(e,t,n){var r=$e();return t=n!==void 0?n(t):t,r.memoizedState=r.baseState=t,e={pending:null,interleaved:null,lanes:0,dispatch:null,lastRenderedReducer:e,lastRenderedState:t},r.queue=e,e=e.dispatch=rp.bind(null,$,e),[r.memoizedState,e]},useRef:function(e){var t=$e();return e={current:e},t.memoizedState=e},useState:Mu,useDebugValue:Lo,useDeferredValue:function(e){return $e().memoizedState=e},useTransition:function(){var e=Mu(!1),t=e[0];return e=np.bind(null,e[1]),$e().memoizedState=e,[t,e]},useMutableSource:function(){},useSyncExternalStore:function(e,t,n){var r=$,l=$e();if(F){if(n===void 0)throw Error(v(407));n=n()}else{if(n=t(),ee===null)throw Error(v(349));Rt&30||xa(r,t,n)}l.memoizedState=n;var i={value:n,getSnapshot:t};return l.queue=i,ju(Ca.bind(null,r,i,e),[e]),r.flags|=2048,Jn(9,_a.bind(null,r,i,n,t),void 0,null),n},useId:function(){var e=$e(),t=ee.identifierPrefix;if(F){var n=Xe,r=Ke;n=(r&~(1<<32-je(r)-1)).toString(32)+n,t=":"+t+"R"+n,n=Gn++,0<\/script>",e=e.removeChild(e.firstChild)):typeof r.is=="string"?e=o.createElement(n,{is:r.is}):(e=o.createElement(n),n==="select"&&(o=e,r.multiple?o.multiple=!0:r.size&&(o.size=r.size))):e=o.createElementNS(e,n),e[Ve]=t,e[Kn]=r,Ya(e,t,!1,!1),t.stateNode=e;e:{switch(o=di(n,r),n){case"dialog":M("cancel",e),M("close",e),l=r;break;case"iframe":case"object":case"embed":M("load",e),l=r;break;case"video":case"audio":for(l=0;lan&&(t.flags|=128,r=!0,Sn(i,!1),t.lanes=4194304)}else{if(!r)if(e=br(o),e!==null){if(t.flags|=128,r=!0,n=e.updateQueue,n!==null&&(t.updateQueue=n,t.flags|=4),Sn(i,!0),i.tail===null&&i.tailMode==="hidden"&&!o.alternate&&!F)return oe(t),null}else 2*Q()-i.renderingStartTime>an&&n!==1073741824&&(t.flags|=128,r=!0,Sn(i,!1),t.lanes=4194304);i.isBackwards?(o.sibling=t.child,t.child=o):(n=i.last,n!==null?n.sibling=o:t.child=o,i.last=o)}return i.tail!==null?(t=i.tail,i.rendering=t,i.tail=t.sibling,i.renderingStartTime=Q(),t.sibling=null,n=U.current,A(U,r?n&1|2:n&1),t):(oe(t),null);case 22:case 23:return Do(),r=t.memoizedState!==null,e!==null&&e.memoizedState!==null!==r&&(t.flags|=8192),r&&t.mode&1?ge&1073741824&&(oe(t),t.subtreeFlags&6&&(t.flags|=8192)):oe(t),null;case 24:return null;case 25:return null}throw Error(v(156,t.tag))}function pp(e,t){switch(vo(t),t.tag){case 1:return ye(t.type)&&Kr(),e=t.flags,e&65536?(t.flags=e&-65537|128,t):null;case 3:return un(),j(he),j(se),No(),e=t.flags,e&65536&&!(e&128)?(t.flags=e&-65537|128,t):null;case 5:return Co(t),null;case 13:if(j(U),e=t.memoizedState,e!==null&&e.dehydrated!==null){if(t.alternate===null)throw Error(v(340));ln()}return e=t.flags,e&65536?(t.flags=e&-65537|128,t):null;case 19:return j(U),null;case 4:return un(),null;case 10:return So(t.type._context),null;case 22:case 23:return Do(),null;case 24:return null;default:return null}}var kr=!1,ue=!1,mp=typeof WeakSet=="function"?WeakSet:Set,S=null;function Yt(e,t){var n=e.ref;if(n!==null)if(typeof n=="function")try{n(null)}catch(r){B(e,t,r)}else n.current=null}function Ui(e,t,n){try{n()}catch(r){B(e,t,r)}}var Qu=!1;function hp(e,t){if(Ei=Br,e=ea(),ho(e)){if("selectionStart"in e)var n={start:e.selectionStart,end:e.selectionEnd};else e:{n=(n=e.ownerDocument)&&n.defaultView||window;var r=n.getSelection&&n.getSelection();if(r&&r.rangeCount!==0){n=r.anchorNode;var l=r.anchorOffset,i=r.focusNode;r=r.focusOffset;try{n.nodeType,i.nodeType}catch{n=null;break e}var o=0,u=-1,s=-1,c=0,m=0,h=e,p=null;t:for(;;){for(var g;h!==n||l!==0&&h.nodeType!==3||(u=o+l),h!==i||r!==0&&h.nodeType!==3||(s=o+r),h.nodeType===3&&(o+=h.nodeValue.length),(g=h.firstChild)!==null;)p=h,h=g;for(;;){if(h===e)break t;if(p===n&&++c===l&&(u=o),p===i&&++m===r&&(s=o),(g=h.nextSibling)!==null)break;h=p,p=h.parentNode}h=g}n=u===-1||s===-1?null:{start:u,end:s}}else n=null}n=n||{start:0,end:0}}else n=null;for(xi={focusedElem:e,selectionRange:n},Br=!1,S=t;S!==null;)if(t=S,e=t.child,(t.subtreeFlags&1028)!==0&&e!==null)e.return=t,S=e;else for(;S!==null;){t=S;try{var w=t.alternate;if(t.flags&1024)switch(t.tag){case 0:case 11:case 15:break;case 1:if(w!==null){var k=w.memoizedProps,D=w.memoizedState,f=t.stateNode,a=f.getSnapshotBeforeUpdate(t.elementType===t.type?k:Ie(t.type,k),D);f.__reactInternalSnapshotBeforeUpdate=a}break;case 3:var d=t.stateNode.containerInfo;d.nodeType===1?d.textContent="":d.nodeType===9&&d.documentElement&&d.removeChild(d.documentElement);break;case 5:case 6:case 4:case 17:break;default:throw Error(v(163))}}catch(y){B(t,t.return,y)}if(e=t.sibling,e!==null){e.return=t.return,S=e;break}S=t.return}return w=Qu,Qu=!1,w}function In(e,t,n){var r=t.updateQueue;if(r=r!==null?r.lastEffect:null,r!==null){var l=r=r.next;do{if((l.tag&e)===e){var i=l.destroy;l.destroy=void 0,i!==void 0&&Ui(t,n,i)}l=l.next}while(l!==r)}}function yl(e,t){if(t=t.updateQueue,t=t!==null?t.lastEffect:null,t!==null){var n=t=t.next;do{if((n.tag&e)===e){var r=n.create;n.destroy=r()}n=n.next}while(n!==t)}}function $i(e){var t=e.ref;if(t!==null){var n=e.stateNode;switch(e.tag){case 5:e=n;break;default:e=n}typeof t=="function"?t(e):t.current=e}}function Ja(e){var t=e.alternate;t!==null&&(e.alternate=null,Ja(t)),e.child=null,e.deletions=null,e.sibling=null,e.tag===5&&(t=e.stateNode,t!==null&&(delete t[Ve],delete t[Kn],delete t[Ni],delete t[Jd],delete t[qd])),e.stateNode=null,e.return=null,e.dependencies=null,e.memoizedProps=null,e.memoizedState=null,e.pendingProps=null,e.stateNode=null,e.updateQueue=null}function qa(e){return e.tag===5||e.tag===3||e.tag===4}function Ku(e){e:for(;;){for(;e.sibling===null;){if(e.return===null||qa(e.return))return null;e=e.return}for(e.sibling.return=e.return,e=e.sibling;e.tag!==5&&e.tag!==6&&e.tag!==18;){if(e.flags&2||e.child===null||e.tag===4)continue e;e.child.return=e,e=e.child}if(!(e.flags&2))return e.stateNode}}function Vi(e,t,n){var r=e.tag;if(r===5||r===6)e=e.stateNode,t?n.nodeType===8?n.parentNode.insertBefore(e,t):n.insertBefore(e,t):(n.nodeType===8?(t=n.parentNode,t.insertBefore(e,n)):(t=n,t.appendChild(e)),n=n._reactRootContainer,n!=null||t.onclick!==null||(t.onclick=Qr));else if(r!==4&&(e=e.child,e!==null))for(Vi(e,t,n),e=e.sibling;e!==null;)Vi(e,t,n),e=e.sibling}function Bi(e,t,n){var r=e.tag;if(r===5||r===6)e=e.stateNode,t?n.insertBefore(e,t):n.appendChild(e);else if(r!==4&&(e=e.child,e!==null))for(Bi(e,t,n),e=e.sibling;e!==null;)Bi(e,t,n),e=e.sibling}var te=null,Ae=!1;function et(e,t,n){for(n=n.child;n!==null;)ba(e,t,n),n=n.sibling}function ba(e,t,n){if(Be&&typeof Be.onCommitFiberUnmount=="function")try{Be.onCommitFiberUnmount(sl,n)}catch{}switch(n.tag){case 5:ue||Yt(n,t);case 6:var r=te,l=Ae;te=null,et(e,t,n),te=r,Ae=l,te!==null&&(Ae?(e=te,n=n.stateNode,e.nodeType===8?e.parentNode.removeChild(n):e.removeChild(n)):te.removeChild(n.stateNode));break;case 18:te!==null&&(Ae?(e=te,n=n.stateNode,e.nodeType===8?Bl(e.parentNode,n):e.nodeType===1&&Bl(e,n),Vn(e)):Bl(te,n.stateNode));break;case 4:r=te,l=Ae,te=n.stateNode.containerInfo,Ae=!0,et(e,t,n),te=r,Ae=l;break;case 0:case 11:case 14:case 15:if(!ue&&(r=n.updateQueue,r!==null&&(r=r.lastEffect,r!==null))){l=r=r.next;do{var i=l,o=i.destroy;i=i.tag,o!==void 0&&(i&2||i&4)&&Ui(n,t,o),l=l.next}while(l!==r)}et(e,t,n);break;case 1:if(!ue&&(Yt(n,t),r=n.stateNode,typeof r.componentWillUnmount=="function"))try{r.props=n.memoizedProps,r.state=n.memoizedState,r.componentWillUnmount()}catch(u){B(n,t,u)}et(e,t,n);break;case 21:et(e,t,n);break;case 22:n.mode&1?(ue=(r=ue)||n.memoizedState!==null,et(e,t,n),ue=r):et(e,t,n);break;default:et(e,t,n)}}function Xu(e){var t=e.updateQueue;if(t!==null){e.updateQueue=null;var n=e.stateNode;n===null&&(n=e.stateNode=new mp),t.forEach(function(r){var l=_p.bind(null,e,r);n.has(r)||(n.add(r),r.then(l,l))})}}function Re(e,t){var n=t.deletions;if(n!==null)for(var r=0;rl&&(l=o),r&=~i}if(r=l,r=Q()-r,r=(120>r?120:480>r?480:1080>r?1080:1920>r?1920:3e3>r?3e3:4320>r?4320:1960*vp(r/1960))-r,10e?16:e,ot===null)var r=!1;else{if(e=ot,ot=null,ll=0,R&6)throw Error(v(331));var l=R;for(R|=4,S=e.current;S!==null;){var i=S,o=i.child;if(S.flags&16){var u=i.deletions;if(u!==null){for(var s=0;sQ()-Mo?zt(e,0):Ao|=n),ve(e,t)}function uc(e,t){t===0&&(e.mode&1?(t=fr,fr<<=1,!(fr&130023424)&&(fr=4194304)):t=1);var n=ce();e=Je(e,t),e!==null&&(er(e,t,n),ve(e,n))}function xp(e){var t=e.memoizedState,n=0;t!==null&&(n=t.retryLane),uc(e,n)}function _p(e,t){var n=0;switch(e.tag){case 13:var r=e.stateNode,l=e.memoizedState;l!==null&&(n=l.retryLane);break;case 19:r=e.stateNode;break;default:throw Error(v(314))}r!==null&&r.delete(t),uc(e,n)}var sc;sc=function(e,t,n){if(e!==null)if(e.memoizedProps!==t.pendingProps||he.current)me=!0;else{if(!(e.lanes&n)&&!(t.flags&128))return me=!1,fp(e,t,n);me=!!(e.flags&131072)}else me=!1,F&&t.flags&1048576&&fa(t,Gr,t.index);switch(t.lanes=0,t.tag){case 2:var r=t.type;Lr(e,t),e=t.pendingProps;var l=rn(t,se.current);en(t,n),l=zo(null,t,r,e,l,n);var i=Oo();return t.flags|=1,typeof l=="object"&&l!==null&&typeof l.render=="function"&&l.$$typeof===void 0?(t.tag=1,t.memoizedState=null,t.updateQueue=null,ye(r)?(i=!0,Xr(t)):i=!1,t.memoizedState=l.state!==null&&l.state!==void 0?l.state:null,xo(t),l.updater=ml,t.stateNode=l,l._reactInternals=t,Ri(t,r,e,n),t=Mi(null,t,r,!0,i,n)):(t.tag=0,F&&i&&yo(t),ae(null,t,l,n),t=t.child),t;case 16:r=t.elementType;e:{switch(Lr(e,t),e=t.pendingProps,l=r._init,r=l(r._payload),t.type=r,l=t.tag=Np(r),e=Ie(r,e),l){case 0:t=Ai(null,t,r,e,n);break e;case 1:t=Bu(null,t,r,e,n);break e;case 11:t=$u(null,t,r,e,n);break e;case 14:t=Vu(null,t,r,Ie(r.type,e),n);break e}throw Error(v(306,r,""))}return t;case 0:return r=t.type,l=t.pendingProps,l=t.elementType===r?l:Ie(r,l),Ai(e,t,r,l,n);case 1:return r=t.type,l=t.pendingProps,l=t.elementType===r?l:Ie(r,l),Bu(e,t,r,l,n);case 3:e:{if(Qa(t),e===null)throw Error(v(387));r=t.pendingProps,i=t.memoizedState,l=i.element,ha(e,t),qr(t,r,null,n);var o=t.memoizedState;if(r=o.element,i.isDehydrated)if(i={element:r,isDehydrated:!1,cache:o.cache,pendingSuspenseBoundaries:o.pendingSuspenseBoundaries,transitions:o.transitions},t.updateQueue.baseState=i,t.memoizedState=i,t.flags&256){l=sn(Error(v(423)),t),t=Hu(e,t,r,n,l);break e}else if(r!==l){l=sn(Error(v(424)),t),t=Hu(e,t,r,n,l);break e}else for(we=ct(t.stateNode.containerInfo.firstChild),ke=t,F=!0,Me=null,n=wa(t,null,r,n),t.child=n;n;)n.flags=n.flags&-3|4096,n=n.sibling;else{if(ln(),r===l){t=qe(e,t,n);break e}ae(e,t,r,n)}t=t.child}return t;case 5:return ka(t),e===null&&Oi(t),r=t.type,l=t.pendingProps,i=e!==null?e.memoizedProps:null,o=l.children,_i(r,l)?o=null:i!==null&&_i(r,i)&&(t.flags|=32),Wa(e,t),ae(e,t,o,n),t.child;case 6:return e===null&&Oi(t),null;case 13:return Ka(e,t,n);case 4:return _o(t,t.stateNode.containerInfo),r=t.pendingProps,e===null?t.child=on(t,null,r,n):ae(e,t,r,n),t.child;case 11:return r=t.type,l=t.pendingProps,l=t.elementType===r?l:Ie(r,l),$u(e,t,r,l,n);case 7:return ae(e,t,t.pendingProps,n),t.child;case 8:return ae(e,t,t.pendingProps.children,n),t.child;case 12:return ae(e,t,t.pendingProps.children,n),t.child;case 10:e:{if(r=t.type._context,l=t.pendingProps,i=t.memoizedProps,o=l.value,A(Zr,r._currentValue),r._currentValue=o,i!==null)if(Fe(i.value,o)){if(i.children===l.children&&!he.current){t=qe(e,t,n);break e}}else for(i=t.child,i!==null&&(i.return=t);i!==null;){var u=i.dependencies;if(u!==null){o=i.child;for(var s=u.firstContext;s!==null;){if(s.context===r){if(i.tag===1){s=Ye(-1,n&-n),s.tag=2;var c=i.updateQueue;if(c!==null){c=c.shared;var m=c.pending;m===null?s.next=s:(s.next=m.next,m.next=s),c.pending=s}}i.lanes|=n,s=i.alternate,s!==null&&(s.lanes|=n),Ti(i.return,n,t),u.lanes|=n;break}s=s.next}}else if(i.tag===10)o=i.type===t.type?null:i.child;else if(i.tag===18){if(o=i.return,o===null)throw Error(v(341));o.lanes|=n,u=o.alternate,u!==null&&(u.lanes|=n),Ti(o,n,t),o=i.sibling}else o=i.child;if(o!==null)o.return=i;else for(o=i;o!==null;){if(o===t){o=null;break}if(i=o.sibling,i!==null){i.return=o.return,o=i;break}o=o.return}i=o}ae(e,t,l.children,n),t=t.child}return t;case 9:return l=t.type,r=t.pendingProps.children,en(t,n),l=Oe(l),r=r(l),t.flags|=1,ae(e,t,r,n),t.child;case 14:return r=t.type,l=Ie(r,t.pendingProps),l=Ie(r.type,l),Vu(e,t,r,l,n);case 15:return Ba(e,t,t.type,t.pendingProps,n);case 17:return r=t.type,l=t.pendingProps,l=t.elementType===r?l:Ie(r,l),Lr(e,t),t.tag=1,ye(r)?(e=!0,Xr(t)):e=!1,en(t,n),va(t,r,l),Ri(t,r,l,n),Mi(null,t,r,!0,e,n);case 19:return Xa(e,t,n);case 22:return Ha(e,t,n)}throw Error(v(156,t.tag))};function ac(e,t){return Ms(e,t)}function Cp(e,t,n,r){this.tag=e,this.key=n,this.sibling=this.child=this.return=this.stateNode=this.type=this.elementType=null,this.index=0,this.ref=null,this.pendingProps=t,this.dependencies=this.memoizedState=this.updateQueue=this.memoizedProps=null,this.mode=r,this.subtreeFlags=this.flags=0,this.deletions=null,this.childLanes=this.lanes=0,this.alternate=null}function Ne(e,t,n,r){return new Cp(e,t,n,r)}function Uo(e){return e=e.prototype,!(!e||!e.isReactComponent)}function Np(e){if(typeof e=="function")return Uo(e)?1:0;if(e!=null){if(e=e.$$typeof,e===ro)return 11;if(e===lo)return 14}return 2}function mt(e,t){var n=e.alternate;return n===null?(n=Ne(e.tag,t,e.key,e.mode),n.elementType=e.elementType,n.type=e.type,n.stateNode=e.stateNode,n.alternate=e,e.alternate=n):(n.pendingProps=t,n.type=e.type,n.flags=0,n.subtreeFlags=0,n.deletions=null),n.flags=e.flags&14680064,n.childLanes=e.childLanes,n.lanes=e.lanes,n.child=e.child,n.memoizedProps=e.memoizedProps,n.memoizedState=e.memoizedState,n.updateQueue=e.updateQueue,t=e.dependencies,n.dependencies=t===null?null:{lanes:t.lanes,firstContext:t.firstContext},n.sibling=e.sibling,n.index=e.index,n.ref=e.ref,n}function Ar(e,t,n,r,l,i){var o=2;if(r=e,typeof e=="function")Uo(e)&&(o=1);else if(typeof e=="string")o=5;else e:switch(e){case Ut:return Ot(n.children,l,i,t);case no:o=8,l|=8;break;case ni:return e=Ne(12,n,t,l|2),e.elementType=ni,e.lanes=i,e;case ri:return e=Ne(13,n,t,l),e.elementType=ri,e.lanes=i,e;case li:return e=Ne(19,n,t,l),e.elementType=li,e.lanes=i,e;case gs:return gl(n,l,i,t);default:if(typeof e=="object"&&e!==null)switch(e.$$typeof){case ys:o=10;break e;case vs:o=9;break e;case ro:o=11;break e;case lo:o=14;break e;case tt:o=16,r=null;break e}throw Error(v(130,e==null?e:typeof e,""))}return t=Ne(o,n,t,l),t.elementType=e,t.type=r,t.lanes=i,t}function Ot(e,t,n,r){return e=Ne(7,e,r,t),e.lanes=n,e}function gl(e,t,n,r){return e=Ne(22,e,r,t),e.elementType=gs,e.lanes=n,e.stateNode={isHidden:!1},e}function Zl(e,t,n){return e=Ne(6,e,null,t),e.lanes=n,e}function Jl(e,t,n){return t=Ne(4,e.children!==null?e.children:[],e.key,t),t.lanes=n,t.stateNode={containerInfo:e.containerInfo,pendingChildren:null,implementation:e.implementation},t}function Pp(e,t,n,r,l){this.tag=t,this.containerInfo=e,this.finishedWork=this.pingCache=this.current=this.pendingChildren=null,this.timeoutHandle=-1,this.callbackNode=this.pendingContext=this.context=null,this.callbackPriority=0,this.eventTimes=Ll(0),this.expirationTimes=Ll(-1),this.entangledLanes=this.finishedLanes=this.mutableReadLanes=this.expiredLanes=this.pingedLanes=this.suspendedLanes=this.pendingLanes=0,this.entanglements=Ll(0),this.identifierPrefix=r,this.onRecoverableError=l,this.mutableSourceEagerHydrationData=null}function $o(e,t,n,r,l,i,o,u,s){return e=new Pp(e,t,n,u,s),t===1?(t=1,i===!0&&(t|=8)):t=0,i=Ne(3,null,null,t),e.current=i,i.stateNode=e,i.memoizedState={element:r,isDehydrated:n,cache:null,transitions:null,pendingSuspenseBoundaries:null},xo(i),e}function zp(e,t,n){var r=3"u"||typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE!="function"))try{__REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE(t)}catch(n){console.error(n)}}t(),e.exports=Ee})(Of);var pc,ts=bl;pc=ts.createRoot,ts.hydrateRoot;const q=new kf,Ip=["audio-classification","audio-to-audio","automatic-speech-recognition","conversational","depth-estimation","document-question-answering","feature-extraction","fill-mask","graph-ml","image-classification","image-segmentation","image-to-image","image-to-text","multiple-choice","object-detection","other","question-answering","reinforcement-learning","robotics","sentence-similarity","summarization","table-question-answering","table-to-text","tabular-classification","tabular-regression","tabular-to-text","text-classification","text-generation","text-retrieval","text-to-image","text-to-speech","text2text-generation","time-series-forecasting","token-classification","translation","unconditional-image-generation","video-classification","visual-question-answering","voice-activity-detection","zero-shot-classification","zero-shot-image-classification"].filter(e=>Object.getOwnPropertyNames(q).includes(zf(e))),ql={},Ap=async e=>{if(ql[e])return ql[e];const t=[];for await(const n of Kc({search:{task:e}}))t.push(n);return t.sort((n,r)=>n.downloads>r.downloads?-1:n.downloadsr.likes?-1:n.likesr.name?-1:n.nameze("div",{className:"w-full",children:[P("p",{className:"text-xl",children:"Task"}),ze("select",{className:"bg-yellow-200 cursor-pointer py-6 text-center w-full",onChange:t=>e.setTask(t.target.value),placeholder:"Select a task",value:e.task,children:[P("option",{children:"Select a task"}),Ip.map(t=>P("option",{value:t,children:t},t))]})]}),jp=e=>{const[t,n]=ne.useState(!1),[r,l]=ne.useState([]);return ne.useEffect(()=>{e.task&&(n(!0),Ap(e.task).then(i=>l(i)).finally(()=>n(!1)))},[e.task]),r.length>0?ze("div",{className:"w-full",children:[P("p",{className:"text-xl",children:"Model"}),ze("select",{className:"bg-yellow-200 cursor-pointer py-6 text-center w-full",onChange:i=>e.setModel(i.target.value),placeholder:"Select a model",value:e.model,children:[P("option",{children:"Select a model"}),r.map(i=>P("option",{value:i.name,children:i.name},i.name))]})]}):P("p",{className:"text-center w-full",children:e.task?t?"Loading models for this task":"No models available for this task":"Select a task to view available models"})},Dp=e=>ze("div",{className:"w-full",children:[P("p",{className:"text-xl",children:"Inputs"}),e.inputs?P("audio",{className:"w-full",controls:!0,src:URL.createObjectURL(e.inputs)}):ze("label",{className:"bg-yellow-200 block cursor-pointer py-6 text-center w-full",children:["No file chosen",P("input",{accept:"audio/*",className:"hidden",onChange:t=>{t.target.files&&t.target.files[0]&&e.setInputs(t.target.files[0])},type:"file"})]})]}),Fp=e=>ze("div",{className:"w-full",children:[P("p",{className:"text-xl",children:"Inputs"}),e.inputs?P("img",{className:"w-full",src:URL.createObjectURL(e.inputs)}):ze("label",{className:"bg-yellow-200 block cursor-pointer py-6 text-center w-full",children:["No file chosen",P("input",{accept:"image/*",className:"hidden",onChange:t=>{t.target.files&&t.target.files[0]&&e.setInputs(t.target.files[0])},type:"file"})]})]}),Up=e=>ze("div",{className:"w-full",children:[P("p",{className:"text-xl",children:"Inputs"}),P("input",{className:"bg-yellow-200 py-6 text-center w-full",onChange:t=>{t.target.value?e.setInputs(t.target.value):e.setInputs("")},type:"text",value:e.inputs??""})]}),$p=e=>e.model&&e.task?["audio-classification","automatic-speech-recognition"].includes(e.task)?P(Dp,{inputs:e.inputs,model:e.model,setInputs:e.setInputs,task:e.task}):["image-classification","image-segmentation","object-detection"].includes(e.task)?P(Fp,{inputs:e.inputs,model:e.model,setInputs:e.setInputs,task:e.task}):["conversational","feature-extraction","fill-mask","question-answering","summarization","table-question-answering","text-classification","text-generation","text-to-image","token-classification","translation","zero-shot-classification"].includes(e.task)?P(Up,{inputs:e.inputs,model:e.model,setInputs:e.setInputs,task:e.task}):P("div",{className:"w-full",children:P("p",{className:"text-center",children:"Inference for this task is not yet supported."})}):P(ne.Fragment,{}),Vp=e=>{if(e.inputs&&e.model&&e.task){const t=()=>{e.setInputs(void 0),e.setOutput(void 0)};return P("button",{className:`border-4 border-yellow-200 py-6 text-center w-full ${e.loading?"cursor-not-allowed opacity-50":""}`,disabled:e.loading,onClick:t,children:"Clear"})}return P(ne.Fragment,{})},Bp=e=>{if(e.inputs&&e.model&&e.task){const t=async()=>{if(e.inputs&&e.model&&e.task){e.setLoading(!0);try{switch(e.task){case"audio-classification":{const n=await q.audioClassification({data:e.inputs,model:e.model});e.setOutput(n);break}case"automatic-speech-recognition":{const n=await q.automaticSpeechRecognition({data:e.inputs,model:e.model});e.setOutput(n);break}case"conversational":{const n=await q.conversational({inputs:{text:e.inputs},model:e.model});e.setOutput(n);break}case"feature-extraction":{const n=await q.featureExtraction({inputs:e.inputs,model:e.model});e.setOutput(n);break}case"fill-mask":{const n=await q.fillMask({inputs:e.inputs,model:e.model});e.setOutput(n);break}case"image-classification":{const n=await q.imageClassification({data:e.inputs,model:e.model});e.setOutput(n);break}case"image-segmentation":{const n=await q.imageSegmentation({data:e.inputs,model:e.model});e.setOutput(n);break}case"object-detection":{const n=await q.objectDetection({data:e.inputs,model:e.model});e.setOutput(n);break}case"question-answering":{const n=await q.questionAnswering({inputs:{context:e.inputs,question:e.inputs},model:e.model});e.setOutput(n);break}case"summarization":{const n=await q.summarization({inputs:e.inputs,model:e.model});e.setOutput(n);break}case"table-question-answering":{const n=await q.tableQuestionAnswering({inputs:{query:e.inputs,table:{[e.inputs]:[e.inputs]}},model:e.model});e.setOutput(n);break}case"text-classification":{const n=await q.textClassification({inputs:e.inputs,model:e.model});e.setOutput(n);break}case"text-generation":{const n=await q.textGeneration({inputs:e.inputs,model:e.model});e.setOutput(n);break}case"text-to-image":{const n=await q.textToImage({inputs:e.inputs,model:e.model});e.setOutput(n);break}case"token-classification":{const n=await q.tokenClassification({inputs:e.inputs,model:e.model});e.setOutput(n);break}case"translation":{const n=await q.translation({inputs:e.inputs,model:e.model});e.setOutput(n);break}case"zero-shot-classification":{const n=await q.zeroShotClassification({inputs:e.inputs,model:e.model,parameters:{candidate_labels:[e.inputs]}});e.setOutput(n);break}}}catch(n){n instanceof Error&&e.setOutput(n.message)}e.setLoading(!1)}};return P("button",{className:`bg-yellow-200 py-6 text-center w-full ${e.loading?"cursor-not-allowed opacity-50":""}`,disabled:e.loading,onClick:t,children:e.loading?"Submitting":"Submit"})}return P(ne.Fragment,{})},Hp=e=>ze("div",{className:"w-full",children:[P("p",{className:"text-xl",children:"Output"}),P("img",{className:`w-full ${e.loading?"cursor-wait opacity-50":""}`,src:URL.createObjectURL(e.output)})]}),Wp=e=>{const t=(()=>{try{return JSON.stringify(e.output,void 0,2)}catch(n){if(n instanceof Error)return`Error during JSON.stringify: ${n.message}`}})();return ze("div",{className:"w-full",children:[P("p",{className:"text-xl",children:"Output"}),P("pre",{className:`bg-yellow-200 p-6 select-text w-full whitespace-pre-wrap ${e.loading?"cursor-wait opacity-50":""}`,children:t})]})},Qp=e=>e.output&&e.task?["text-to-image"].includes(e.task)?P(Hp,{loading:e.loading,output:e.output}):P(Wp,{loading:e.loading,output:e.output}):P(ne.Fragment,{}),Kp=()=>{const[e,t]=ne.useState(),[n,r]=ne.useState(),[l,i]=ne.useState(),[o,u]=ne.useState(!1),[s,c]=ne.useState();return console.log("App",{task:e,model:n,inputs:l,loading:o,output:s}),P("div",{className:"bg-yellow-500 flex flex-col h-full items-center min-h-screen min-w-screen overflow-auto w-full",children:ze("div",{className:"flex flex-col items-center justify-center py-24 space-y-12 w-2/3 lg:w-1/3",children:[P("header",{className:"text-center text-6xl",children:"🤗"}),P(Mp,{setTask:t,task:e}),P(jp,{model:n,setModel:r,task:e}),P($p,{inputs:l,model:n,setInputs:i,task:e}),P(Vp,{inputs:l,loading:o,model:n,setInputs:i,setOutput:c,task:e}),P(Bp,{inputs:l,loading:o,model:n,setLoading:u,setOutput:c,task:e}),P(Qp,{loading:o,output:s,task:e})]})})},Xp=()=>{const e="root",t=document.getElementById(e);if(t){const n=pc(t),r=P(ne.StrictMode,{children:P(Kp,{})});n.render(r)}};Xp();
diff --git a/spaces/almakedon/faster-whisper-webui/src/whisper/fasterWhisperContainer.py b/spaces/almakedon/faster-whisper-webui/src/whisper/fasterWhisperContainer.py
deleted file mode 100644
index 5bd640eeba90f7ad2c6a2795ed14e40d30e90c4c..0000000000000000000000000000000000000000
--- a/spaces/almakedon/faster-whisper-webui/src/whisper/fasterWhisperContainer.py
+++ /dev/null
@@ -1,207 +0,0 @@
-import os
-from typing import List, Union
-
-from faster_whisper import WhisperModel, download_model
-from src.config import ModelConfig, VadInitialPromptMode
-from src.hooks.progressListener import ProgressListener
-from src.languages import get_language_from_name
-from src.modelCache import ModelCache
-from src.prompts.abstractPromptStrategy import AbstractPromptStrategy
-from src.whisper.abstractWhisperContainer import AbstractWhisperCallback, AbstractWhisperContainer
-from src.utils import format_timestamp
-
-class FasterWhisperContainer(AbstractWhisperContainer):
- def __init__(self, model_name: str, device: str = None, compute_type: str = "float16",
- download_root: str = None,
- cache: ModelCache = None, models: List[ModelConfig] = []):
- super().__init__(model_name, device, compute_type, download_root, cache, models)
-
- def ensure_downloaded(self):
- """
- Ensure that the model is downloaded. This is useful if you want to ensure that the model is downloaded before
- passing the container to a subprocess.
- """
- model_config = self._get_model_config()
-
- if os.path.isdir(model_config.url):
- model_config.path = model_config.url
- else:
- model_config.path = download_model(model_config.url, output_dir=self.download_root)
-
- def _get_model_config(self) -> ModelConfig:
- """
- Get the model configuration for the model.
- """
- for model in self.models:
- if model.name == self.model_name:
- return model
- return None
-
- def _create_model(self):
- print("Loading faster whisper model " + self.model_name + " for device " + str(self.device))
- model_config = self._get_model_config()
- model_url = model_config.url
-
- if model_config.type == "whisper":
- if model_url not in ["tiny", "base", "small", "medium", "large", "large-v1", "large-v2"]:
- raise Exception("FasterWhisperContainer does not yet support Whisper models. Use ct2-transformers-converter to convert the model to a faster-whisper model.")
- if model_url == "large":
- # large is an alias for large-v1
- model_url = "large-v1"
-
- device = self.device
-
- if (device is None):
- device = "auto"
-
- model = WhisperModel(model_url, device=device, compute_type=self.compute_type)
- return model
-
- def create_callback(self, language: str = None, task: str = None,
- prompt_strategy: AbstractPromptStrategy = None,
- **decodeOptions: dict) -> AbstractWhisperCallback:
- """
- Create a WhisperCallback object that can be used to transcript audio files.
-
- Parameters
- ----------
- language: str
- The target language of the transcription. If not specified, the language will be inferred from the audio content.
- task: str
- The task - either translate or transcribe.
- prompt_strategy: AbstractPromptStrategy
- The prompt strategy to use. If not specified, the prompt from Whisper will be used.
- decodeOptions: dict
- Additional options to pass to the decoder. Must be pickleable.
-
- Returns
- -------
- A WhisperCallback object.
- """
- return FasterWhisperCallback(self, language=language, task=task, prompt_strategy=prompt_strategy, **decodeOptions)
-
-class FasterWhisperCallback(AbstractWhisperCallback):
- def __init__(self, model_container: FasterWhisperContainer, language: str = None, task: str = None,
- prompt_strategy: AbstractPromptStrategy = None,
- **decodeOptions: dict):
- self.model_container = model_container
- self.language = language
- self.task = task
- self.prompt_strategy = prompt_strategy
- self.decodeOptions = decodeOptions
-
- self._printed_warning = False
-
- def invoke(self, audio, segment_index: int, prompt: str, detected_language: str, progress_listener: ProgressListener = None):
- """
- Peform the transcription of the given audio file or data.
-
- Parameters
- ----------
- audio: Union[str, np.ndarray, torch.Tensor]
- The audio file to transcribe, or the audio data as a numpy array or torch tensor.
- segment_index: int
- The target language of the transcription. If not specified, the language will be inferred from the audio content.
- task: str
- The task - either translate or transcribe.
- progress_listener: ProgressListener
- A callback to receive progress updates.
- """
- model: WhisperModel = self.model_container.get_model()
- language_code = self._lookup_language_code(self.language) if self.language else None
-
- # Copy decode options and remove options that are not supported by faster-whisper
- decodeOptions = self.decodeOptions.copy()
- verbose = decodeOptions.pop("verbose", None)
-
- logprob_threshold = decodeOptions.pop("logprob_threshold", None)
-
- patience = decodeOptions.pop("patience", None)
- length_penalty = decodeOptions.pop("length_penalty", None)
- suppress_tokens = decodeOptions.pop("suppress_tokens", None)
-
- if (decodeOptions.pop("fp16", None) is not None):
- if not self._printed_warning:
- print("WARNING: fp16 option is ignored by faster-whisper - use compute_type instead.")
- self._printed_warning = True
-
- # Fix up decode options
- if (logprob_threshold is not None):
- decodeOptions["log_prob_threshold"] = logprob_threshold
-
- decodeOptions["patience"] = float(patience) if patience is not None else 1.0
- decodeOptions["length_penalty"] = float(length_penalty) if length_penalty is not None else 1.0
-
- # See if supress_tokens is a string - if so, convert it to a list of ints
- decodeOptions["suppress_tokens"] = self._split_suppress_tokens(suppress_tokens)
-
- initial_prompt = self.prompt_strategy.get_segment_prompt(segment_index, prompt, detected_language) \
- if self.prompt_strategy else prompt
-
- segments_generator, info = model.transcribe(audio, \
- language=language_code if language_code else detected_language, task=self.task, \
- initial_prompt=initial_prompt, \
- **decodeOptions
- )
-
- segments = []
-
- for segment in segments_generator:
- segments.append(segment)
-
- if progress_listener is not None:
- progress_listener.on_progress(segment.end, info.duration)
- if verbose:
- print("[{}->{}] {}".format(format_timestamp(segment.start, True), format_timestamp(segment.end, True),
- segment.text))
-
- text = " ".join([segment.text for segment in segments])
-
- # Convert the segments to a format that is easier to serialize
- whisper_segments = [{
- "text": segment.text,
- "start": segment.start,
- "end": segment.end,
-
- # Extra fields added by faster-whisper
- "words": [{
- "start": word.start,
- "end": word.end,
- "word": word.word,
- "probability": word.probability
- } for word in (segment.words if segment.words is not None else []) ]
- } for segment in segments]
-
- result = {
- "segments": whisper_segments,
- "text": text,
- "language": info.language if info else None,
-
- # Extra fields added by faster-whisper
- "language_probability": info.language_probability if info else None,
- "duration": info.duration if info else None
- }
-
- # If we have a prompt strategy, we need to increment the current prompt
- if self.prompt_strategy:
- self.prompt_strategy.on_segment_finished(segment_index, prompt, detected_language, result)
-
- if progress_listener is not None:
- progress_listener.on_finished()
- return result
-
- def _split_suppress_tokens(self, suppress_tokens: Union[str, List[int]]):
- if (suppress_tokens is None):
- return None
- if (isinstance(suppress_tokens, list)):
- return suppress_tokens
-
- return [int(token) for token in suppress_tokens.split(",")]
-
- def _lookup_language_code(self, language: str):
- language = get_language_from_name(language)
-
- if language is None:
- raise ValueError("Invalid language: " + language)
-
- return language.code
diff --git a/spaces/amarchheda/ChordDuplicate/portaudio/examples/paex_wmme_surround.c b/spaces/amarchheda/ChordDuplicate/portaudio/examples/paex_wmme_surround.c
deleted file mode 100644
index 55fc2551316fc00effee811c5c9ddf0ba8f49d76..0000000000000000000000000000000000000000
--- a/spaces/amarchheda/ChordDuplicate/portaudio/examples/paex_wmme_surround.c
+++ /dev/null
@@ -1,210 +0,0 @@
-/** @file paex_wmme_surround.c
- @ingroup examples_src
- @brief Use WMME-specific channelMask to request 5.1 surround sound output.
- @author Ross Bencina
-*/
-/*
- * $Id: $
- * Portable Audio I/O Library
- * Windows MME surround sound output test
- *
- * Copyright (c) 2007 Ross Bencina
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files
- * (the "Software"), to deal in the Software without restriction,
- * including without limitation the rights to use, copy, modify, merge,
- * publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so,
- * subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
- * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * The text above constitutes the entire PortAudio license; however,
- * the PortAudio community also makes the following non-binding requests:
- *
- * Any person wishing to distribute modifications to the Software is
- * requested to send the modifications to the original developer so that
- * they can be incorporated into the canonical version. It is also
- * requested that these non-binding requests be included along with the
- * license above.
- */
-
-#include
-#include
-
-#include /* required when using pa_win_wmme.h */
-#include /* required when using pa_win_wmme.h */
-
-#include "portaudio.h"
-#include "pa_win_wmme.h"
-
-#define NUM_SECONDS (12)
-#define SAMPLE_RATE (44100)
-#define FRAMES_PER_BUFFER (64)
-
-#ifndef M_PI
-#define M_PI (3.14159265)
-#endif
-
-#define TABLE_SIZE (100)
-
-#define CHANNEL_COUNT (6)
-
-
-
-typedef struct
-{
- float sine[TABLE_SIZE];
- int phase;
- int currentChannel;
- int cycleCount;
-}
-paTestData;
-
-/* This routine will be called by the PortAudio engine when audio is needed.
-** It may called at interrupt level on some machines so don't do anything
-** that could mess up the system like calling malloc() or free().
-*/
-static int patestCallback( const void *inputBuffer, void *outputBuffer,
- unsigned long framesPerBuffer,
- const PaStreamCallbackTimeInfo* timeInfo,
- PaStreamCallbackFlags statusFlags,
- void *userData )
-{
- paTestData *data = (paTestData*)userData;
- float *out = (float*)outputBuffer;
- unsigned long i,j;
-
- (void) timeInfo; /* Prevent unused variable warnings. */
- (void) statusFlags;
- (void) inputBuffer;
-
- for( i=0; icurrentChannel && data->cycleCount < 4410 ){
- *out++ = data->sine[data->phase];
- data->phase += 1 + j; // play each channel at a different pitch so they can be distinguished
- if( data->phase >= TABLE_SIZE ){
- data->phase -= TABLE_SIZE;
- }
- }else{
- *out++ = 0;
- }
- }
-
- data->cycleCount++;
- if( data->cycleCount > 44100 ){
- data->cycleCount = 0;
-
- ++data->currentChannel;
- if( data->currentChannel >= CHANNEL_COUNT )
- data->currentChannel -= CHANNEL_COUNT;
- }
- }
-
- return paContinue;
-}
-
-/*******************************************************************/
-int main(int argc, char* argv[])
-{
- PaStreamParameters outputParameters;
- PaWinMmeStreamInfo wmmeStreamInfo;
- PaStream *stream;
- PaError err;
- paTestData data;
- int i;
- int deviceIndex;
-
- printf("PortAudio Test: output a sine blip on each channel. SR = %d, BufSize = %d, Chans = %d\n", SAMPLE_RATE, FRAMES_PER_BUFFER, CHANNEL_COUNT);
-
- err = Pa_Initialize();
- if( err != paNoError ) goto error;
-
- deviceIndex = Pa_GetHostApiInfo( Pa_HostApiTypeIdToHostApiIndex( paMME ) )->defaultOutputDevice;
- if( argc == 2 ){
- sscanf( argv[1], "%d", &deviceIndex );
- }
-
- printf( "using device id %d (%s)\n", deviceIndex, Pa_GetDeviceInfo(deviceIndex)->name );
-
- /* initialise sinusoidal wavetable */
- for( i=0; idefaultLowOutputLatency;
- outputParameters.hostApiSpecificStreamInfo = NULL;
-
- /* it's not strictly necessary to provide a channelMask for surround sound
- output. But if you want to be sure which channel mask PortAudio will use
- then you should supply one */
- wmmeStreamInfo.size = sizeof(PaWinMmeStreamInfo);
- wmmeStreamInfo.hostApiType = paMME;
- wmmeStreamInfo.version = 1;
- wmmeStreamInfo.flags = paWinMmeUseChannelMask;
- wmmeStreamInfo.channelMask = PAWIN_SPEAKER_5POINT1; /* request 5.1 output format */
- outputParameters.hostApiSpecificStreamInfo = &wmmeStreamInfo;
-
-
- if( Pa_IsFormatSupported( 0, &outputParameters, SAMPLE_RATE ) == paFormatIsSupported ){
- printf( "Pa_IsFormatSupported reports device will support %d channels.\n", CHANNEL_COUNT );
- }else{
- printf( "Pa_IsFormatSupported reports device will not support %d channels.\n", CHANNEL_COUNT );
- }
-
- err = Pa_OpenStream(
- &stream,
- NULL, /* no input */
- &outputParameters,
- SAMPLE_RATE,
- FRAMES_PER_BUFFER,
- paClipOff, /* we won't output out of range samples so don't bother clipping them */
- patestCallback,
- &data );
- if( err != paNoError ) goto error;
-
- err = Pa_StartStream( stream );
- if( err != paNoError ) goto error;
-
- printf("Play for %d seconds.\n", NUM_SECONDS );
- Pa_Sleep( NUM_SECONDS * 1000 );
-
- err = Pa_StopStream( stream );
- if( err != paNoError ) goto error;
-
- err = Pa_CloseStream( stream );
- if( err != paNoError ) goto error;
-
- Pa_Terminate();
- printf("Test finished.\n");
-
- return err;
-error:
- Pa_Terminate();
- fprintf( stderr, "An error occurred while using the portaudio stream\n" );
- fprintf( stderr, "Error number: %d\n", err );
- fprintf( stderr, "Error message: %s\n", Pa_GetErrorText( err ) );
- return err;
-}
diff --git a/spaces/ardha27/rvc-models/infer_pack/transforms.py b/spaces/ardha27/rvc-models/infer_pack/transforms.py
deleted file mode 100644
index a11f799e023864ff7082c1f49c0cc18351a13b47..0000000000000000000000000000000000000000
--- a/spaces/ardha27/rvc-models/infer_pack/transforms.py
+++ /dev/null
@@ -1,209 +0,0 @@
-import torch
-from torch.nn import functional as F
-
-import numpy as np
-
-
-DEFAULT_MIN_BIN_WIDTH = 1e-3
-DEFAULT_MIN_BIN_HEIGHT = 1e-3
-DEFAULT_MIN_DERIVATIVE = 1e-3
-
-
-def piecewise_rational_quadratic_transform(
- inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- tails=None,
- tail_bound=1.0,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE,
-):
- if tails is None:
- spline_fn = rational_quadratic_spline
- spline_kwargs = {}
- else:
- spline_fn = unconstrained_rational_quadratic_spline
- spline_kwargs = {"tails": tails, "tail_bound": tail_bound}
-
- outputs, logabsdet = spline_fn(
- inputs=inputs,
- unnormalized_widths=unnormalized_widths,
- unnormalized_heights=unnormalized_heights,
- unnormalized_derivatives=unnormalized_derivatives,
- inverse=inverse,
- min_bin_width=min_bin_width,
- min_bin_height=min_bin_height,
- min_derivative=min_derivative,
- **spline_kwargs
- )
- return outputs, logabsdet
-
-
-def searchsorted(bin_locations, inputs, eps=1e-6):
- bin_locations[..., -1] += eps
- return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1
-
-
-def unconstrained_rational_quadratic_spline(
- inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- tails="linear",
- tail_bound=1.0,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE,
-):
- inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
- outside_interval_mask = ~inside_interval_mask
-
- outputs = torch.zeros_like(inputs)
- logabsdet = torch.zeros_like(inputs)
-
- if tails == "linear":
- unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
- constant = np.log(np.exp(1 - min_derivative) - 1)
- unnormalized_derivatives[..., 0] = constant
- unnormalized_derivatives[..., -1] = constant
-
- outputs[outside_interval_mask] = inputs[outside_interval_mask]
- logabsdet[outside_interval_mask] = 0
- else:
- raise RuntimeError("{} tails are not implemented.".format(tails))
-
- (
- outputs[inside_interval_mask],
- logabsdet[inside_interval_mask],
- ) = rational_quadratic_spline(
- inputs=inputs[inside_interval_mask],
- unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
- unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
- unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
- inverse=inverse,
- left=-tail_bound,
- right=tail_bound,
- bottom=-tail_bound,
- top=tail_bound,
- min_bin_width=min_bin_width,
- min_bin_height=min_bin_height,
- min_derivative=min_derivative,
- )
-
- return outputs, logabsdet
-
-
-def rational_quadratic_spline(
- inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- left=0.0,
- right=1.0,
- bottom=0.0,
- top=1.0,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE,
-):
- if torch.min(inputs) < left or torch.max(inputs) > right:
- raise ValueError("Input to a transform is not within its domain")
-
- num_bins = unnormalized_widths.shape[-1]
-
- if min_bin_width * num_bins > 1.0:
- raise ValueError("Minimal bin width too large for the number of bins")
- if min_bin_height * num_bins > 1.0:
- raise ValueError("Minimal bin height too large for the number of bins")
-
- widths = F.softmax(unnormalized_widths, dim=-1)
- widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
- cumwidths = torch.cumsum(widths, dim=-1)
- cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0)
- cumwidths = (right - left) * cumwidths + left
- cumwidths[..., 0] = left
- cumwidths[..., -1] = right
- widths = cumwidths[..., 1:] - cumwidths[..., :-1]
-
- derivatives = min_derivative + F.softplus(unnormalized_derivatives)
-
- heights = F.softmax(unnormalized_heights, dim=-1)
- heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
- cumheights = torch.cumsum(heights, dim=-1)
- cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0)
- cumheights = (top - bottom) * cumheights + bottom
- cumheights[..., 0] = bottom
- cumheights[..., -1] = top
- heights = cumheights[..., 1:] - cumheights[..., :-1]
-
- if inverse:
- bin_idx = searchsorted(cumheights, inputs)[..., None]
- else:
- bin_idx = searchsorted(cumwidths, inputs)[..., None]
-
- input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
- input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
-
- input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
- delta = heights / widths
- input_delta = delta.gather(-1, bin_idx)[..., 0]
-
- input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
- input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
-
- input_heights = heights.gather(-1, bin_idx)[..., 0]
-
- if inverse:
- a = (inputs - input_cumheights) * (
- input_derivatives + input_derivatives_plus_one - 2 * input_delta
- ) + input_heights * (input_delta - input_derivatives)
- b = input_heights * input_derivatives - (inputs - input_cumheights) * (
- input_derivatives + input_derivatives_plus_one - 2 * input_delta
- )
- c = -input_delta * (inputs - input_cumheights)
-
- discriminant = b.pow(2) - 4 * a * c
- assert (discriminant >= 0).all()
-
- root = (2 * c) / (-b - torch.sqrt(discriminant))
- outputs = root * input_bin_widths + input_cumwidths
-
- theta_one_minus_theta = root * (1 - root)
- denominator = input_delta + (
- (input_derivatives + input_derivatives_plus_one - 2 * input_delta)
- * theta_one_minus_theta
- )
- derivative_numerator = input_delta.pow(2) * (
- input_derivatives_plus_one * root.pow(2)
- + 2 * input_delta * theta_one_minus_theta
- + input_derivatives * (1 - root).pow(2)
- )
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
-
- return outputs, -logabsdet
- else:
- theta = (inputs - input_cumwidths) / input_bin_widths
- theta_one_minus_theta = theta * (1 - theta)
-
- numerator = input_heights * (
- input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta
- )
- denominator = input_delta + (
- (input_derivatives + input_derivatives_plus_one - 2 * input_delta)
- * theta_one_minus_theta
- )
- outputs = input_cumheights + numerator / denominator
-
- derivative_numerator = input_delta.pow(2) * (
- input_derivatives_plus_one * theta.pow(2)
- + 2 * input_delta * theta_one_minus_theta
- + input_derivatives * (1 - theta).pow(2)
- )
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
-
- return outputs, logabsdet
diff --git a/spaces/arixiii/open-reverse-proxy/Dockerfile b/spaces/arixiii/open-reverse-proxy/Dockerfile
deleted file mode 100644
index 6953fc05439efb70991552cf56f28365b5b6c15b..0000000000000000000000000000000000000000
--- a/spaces/arixiii/open-reverse-proxy/Dockerfile
+++ /dev/null
@@ -1,11 +0,0 @@
-FROM node:18
-
-WORKDIR /app
-
-RUN npm install express express-http-proxy
-
-COPY . .
-
-EXPOSE 7860
-
-CMD [ "node", "server.js" ]
\ No newline at end of file
diff --git a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/utils/text/chinese_mandarin/phonemizer.py b/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/utils/text/chinese_mandarin/phonemizer.py
deleted file mode 100644
index 727c881e1062badc57df7418aa07e7434d57335c..0000000000000000000000000000000000000000
--- a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/utils/text/chinese_mandarin/phonemizer.py
+++ /dev/null
@@ -1,37 +0,0 @@
-from typing import List
-
-import jieba
-import pypinyin
-
-from .pinyinToPhonemes import PINYIN_DICT
-
-
-def _chinese_character_to_pinyin(text: str) -> List[str]:
- pinyins = pypinyin.pinyin(text, style=pypinyin.Style.TONE3, heteronym=False, neutral_tone_with_five=True)
- pinyins_flat_list = [item for sublist in pinyins for item in sublist]
- return pinyins_flat_list
-
-
-def _chinese_pinyin_to_phoneme(pinyin: str) -> str:
- segment = pinyin[:-1]
- tone = pinyin[-1]
- phoneme = PINYIN_DICT.get(segment, [""])[0]
- return phoneme + tone
-
-
-def chinese_text_to_phonemes(text: str, seperator: str = "|") -> str:
- tokenized_text = jieba.cut(text, HMM=False)
- tokenized_text = " ".join(tokenized_text)
- pinyined_text: List[str] = _chinese_character_to_pinyin(tokenized_text)
-
- results: List[str] = []
-
- for token in pinyined_text:
- if token[-1] in "12345": # TODO transform to is_pinyin()
- pinyin_phonemes = _chinese_pinyin_to_phoneme(token)
-
- results += list(pinyin_phonemes)
- else: # is ponctuation or other
- results += list(token)
-
- return seperator.join(results)
diff --git a/spaces/artificialimagination/ai_detect_v0.1/app.py b/spaces/artificialimagination/ai_detect_v0.1/app.py
deleted file mode 100644
index a2bc4e6f8578c06c53ab50a9483e9ce9227e10d1..0000000000000000000000000000000000000000
--- a/spaces/artificialimagination/ai_detect_v0.1/app.py
+++ /dev/null
@@ -1,331 +0,0 @@
-import datetime
-import os
-import subprocess
-import pathlib
-import time
-import torch
-import torchaudio
-import traceback
-
-import xgboost as xgb
-import numpy as np
-
-# from pydub import AudioSegment
-# from pydub.silence import split_on_silence
-from spleeter.separator import Separator
-from spleeter.audio.adapter import AudioAdapter
-
-from transformers import (
- Wav2Vec2Processor,
- HubertForCTC,
- AutoProcessor,
- HubertModel,
- HubertForSequenceClassification,
- Wav2Vec2FeatureExtractor
- )
-
-
-# TARGET_SR = 22050
-TARGET_SR = 16_000
-NUM_SAMPLES = 5 * TARGET_SR
-
-MODELS_OUTPUT_DIR = os.getcwd()
-
-
-# https://web.stanford.edu/~nanbhas/blog/forward-hooks-pytorch/#extracting-activations-from-a-layer
-class SaveOutputHook:
- def __init__(self):
- self.outputs = None
-
- def __call__(self, module, module_in, module_out):
- self.outputs = module_out.detach().numpy()
-
- def clear(self):
- self.outputs = None
-
-
-
-def copy_from_cloud(cloud_model_path, verbose):
- local_path = os.path.join(MODELS_OUTPUT_DIR,
- os.path.split(cloud_model_path)[-1])
- subprocess.run(
- ["gsutil","-m", "cp", "-r", cloud_model_path, local_path],
- check=True
- )
-
- if verbose:
- print(f'Copied from {cloud_model_path} to {local_path}')
-
- return local_path
-
-
-def model_from_cloud(cloud_path, verbose=False):
- local_path = copy_from_cloud(cloud_path, verbose)
- return xgb.Booster(model_file=local_path)
-
-
-class XGB_AIDetect:
-
- def __init__(
- self,
- xgb_model_filename, # path to the model file
- number_of_samples=NUM_SAMPLES,
- target_sample_rate=TARGET_SR,
- ):
- self.xgb_model_filename = xgb_model_filename
- self.target_sample_rate = target_sample_rate
- # self.transform_name = transform_name
- self.number_of_samples = number_of_samples
- # self.vocal_extractor = None
- self.model = None
- self.initialized = False
-
- if torch.cuda.is_available():
- self.device = "cuda"
- else:
- self.device = "cpu"
- return
-
- def initialize(self):
- # initialize the model
- # self.model = model_from_cloud(self.xgb_model_filename, verbose=True)
- self.model = xgb.Booster(model_file=self.xgb_model_filename)
-
- # initial vocal extractor
- self.vocal_extractor = Separator('spleeter:2stems')
-
- self.sid_model = HubertForSequenceClassification.from_pretrained(
- "superb/hubert-base-superb-sid")
- self.sid_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
- "superb/hubert-base-superb-sid")
- self.hook = SaveOutputHook() # for grabbing the penultimate layer of the SID model
-
- if self.model and self.sid_model and self.sid_feature_extractor:
- self.initialized = True
- else:
- raise Exception("Ai Detection improperly initialized. Please try again.")
-
- return
-
- def normalize_audio(
- self,
- audio,
- sample_rate,
- target_sample_rate=TARGET_SR,
- number_samples=NUM_SAMPLES,
- ):
- # TODO: sort this out, working ok with out because we load the audio and set the sample rate
- # resample to make sure it is what the model expects
- # resampler = torchaudio.transforms.Resample(sample_rate, target_sample_rate)
- # audio = resampler(audio)
-
- # # mix down to mono 2 -> 1 channel
- # if audio.shape[0] > 1:
- # audio = torch.mean(audio, dim=0, keepdim=True)
-
- # clip
- if audio.shape[1] > self.number_of_samples:
- audio = audio[:, :self.number_of_samples]
-
- # pad
- length_audio = audio.shape[1]
- if length_audio < self.number_of_samples:
- pad = self.number_of_samples - length_audio
- padding = (0, pad)
- audio = torch.nn.functional.pad(audio, padding)
-
- return audio
-
- def make_slices(self, audio_squeeze):
- duration = self.number_of_samples
- output = []
-
- # handle stereo
- if audio_squeeze.shape[0] == 2:
- for i in range(0, len(audio_squeeze[1]), duration):
- if (i + duration) <= len(audio_squeeze[1]):
- output.append(audio_squeeze[:2, i:i + duration])
- else:
- print("yo")
- print(f"{len(audio_squeeze[1]) - duration}")
- output.append(audio_squeeze[:2, len(audio_squeeze[1]) - duration:len(audio_squeeze[1])])
- else:
- for i in range(0, len(audio_squeeze), duration):
- if (i + duration) <= len(audio_squeeze):
- output.append(audio_squeeze[i:i + duration])
- else:
- output.append(audio_squeeze[len(audio_squeeze) - duration:len(audio_squeeze)])
- return output
-
- def extract_vox(self, audio):
- try:
- prediction = self.vocal_extractor.separate(audio)
- except:
- print('audio shape:', audio.shape)
- raise
- vox = prediction["vocals"]
- return vox
-
- # Not in use for inference
- # def strip_silences(self, data):
- # sound = AudioSegment.from_raw(data)
- # chunks = split_on_silence(
- # sound,
-
- # # split on silences longer than 1000ms (1 sec)
- # min_silence_len=100,
-
- # # anything under -16 dBFS is considered silence
- # silence_thresh=-16,
-
- # # keep 200 ms of leading/trailing silence
- # keep_silence=200
- # )
- # output = AudioSegment.empty()
- # for c in chunks:
- # output += c
- # return output
-
- def shape_audio(self, audio):
- # made for LFCC or MFCC
- pad = torch.zeros(1, 64 - 13, 214)
- pad = pad.to(self.device)
- audio = torch.cat([audio, pad], dim=1)
- pad_2 = torch.zeros(1, 64, 216 - 214)
- pad_2 = pad_2.to(self.device)
- audio = torch.cat([audio, pad_2], dim=2)
- return audio
-
- def make_embedding(self, audio):
- inputs = self.sid_feature_extractor(
- audio, sampling_rate=self.target_sample_rate, padding=True,
- return_tensors="pt")
- # print()
- handle = self.sid_model.projector.register_forward_hook(self.hook)
-
- model_output = self.sid_model(
- input_values=inputs['input_values'].squeeze(),
- attention_mask=inputs['attention_mask'].squeeze(),
- )
-
- penultimate_outputs = self.hook.outputs
- # print('penultimates shape:', penultimate_outputs.shape)
- handle.remove()
- self.hook.clear()
- return penultimate_outputs.mean(axis=0).flatten()
-
-
- def prepare(self, input_file):
- audio_loader = AudioAdapter.default()
- waveform, sr = audio_loader.load(
- input_file,
- sample_rate=self.target_sample_rate)
-
- # waveform, sr = torchaudio.load(input_file)
-
- print(f"Loaded audio at sr: {sr}")
-
- # extract vocals
- vox = self.extract_vox(waveform)
-
- # TODO
- # vox = self.strip_silences(vox)
-
- # process data
- # convert vox to tensor
- audio = torch.from_numpy(vox)
-
- # swap dims
- audio = audio.permute(1, 0)
-
- # # make mono. # jarfa: commented this out, the embeddings run on stereo
- # # if audio.shape[0] > 1:
- # audio = torch.mean(audio, dim=0, keepdim=True)
-
- # # get rid of one dim
- # audio_squeeze = torch.squeeze(audio)
-
- # audio_squeeze = torchaudio.functional.vad(
- # audio_squeeze,
- # self.target_sample_rate)
-
- # make slices here
- audio_slices = self.make_slices(audio) #(audio_squeeze)
-
- # # apply transform. # not applicable for this one
- # input_list = [self.transform(i) for i in audio_slices]
-
- # # unsqueeze to add dimension
- # input_list = [torch.unsqueeze(i, 0) for i in input_list]
-
- # if self.transform_name in ["MFCC", "LFCC"]:
- # input_list = [self.shape_audio(i) for i in input_list]
-
- # TODO: make embedding
- input_list = [
- self.make_embedding(aslice.to(self.device))
- for aslice in audio_slices
- ]
- return input_list
-
-
- def predict(self, input_list):
- batch = xgb.DMatrix(
- np.stack(input_list, axis=0)
- )
- probs = self.model.predict(batch)
- return list(probs)
-
-
- def run_prediction(self, input_file):
- start_time = time.process_time()
- input_list = self.prepare(input_file)
- predictions = self.predict(input_list)
- elapsed_time = time.process_time() - start_time
- output = {}
- output["file_name"] = input_file
- output["model"] = self.xgb_model_filename
- output["elapsed_time"] = str(datetime.timedelta(seconds=elapsed_time))
- output["predictions"] = self.pretty_response(predictions)
- print(predictions)
- return output
-
- def pretty_response(self, predictions):
- output = {}
- for i, pred in enumerate(predictions):
- secs = i * 5
- t = str(datetime.timedelta(seconds=secs))
- output[t] = str(pred)
- return output
-
-
-if __name__ == '__main__':
- # just for testing I literally downloaded this from gs://artificial_imagination/models/xgb_v0_epoch010_20230829_07_30.bin
- # and uploaded it to the local colab storage
- detect = XGB_AIDetect(
- 'models_xgb_v0_epoch010_20230829_07_30.bin')
-
- # intitialize everything needed to detect from mp3 to prediction
- detect.initialize()
-
- import json
-
- import gradio as gr
-
-
- def inter_predict(local_file):
- try:
- response = detect.run_prediction(local_file)
- except Exception as e:
- traceback.print_exc(e)
- raise e
-
- return json.dumps(response, indent=2)
-
-
- demo = gr.Interface(fn=inter_predict,
- inputs=gr.Audio(source="upload", type="filepath"),
- outputs="text",
- cache_examples=True,
- )
- demo.launch()
\ No newline at end of file
diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/stacked_bar_chart_with_text.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/stacked_bar_chart_with_text.py
deleted file mode 100644
index 180d20871a3f8ab9161ab1cfa7ded80f7b6f1431..0000000000000000000000000000000000000000
--- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/stacked_bar_chart_with_text.py
+++ /dev/null
@@ -1,27 +0,0 @@
-"""
-Stacked Bar Chart with Text Overlay
-===================================
-This example shows how to overlay text on a stacked bar chart. For both the
-bar and text marks, we use the ``stack`` argument in the ``x`` encoding to
-cause the values to be stacked horizontally.
-"""
-# category: bar charts
-import altair as alt
-from vega_datasets import data
-
-source=data.barley()
-
-bars = alt.Chart(source).mark_bar().encode(
- x=alt.X('sum(yield):Q', stack='zero'),
- y=alt.Y('variety:N'),
- color=alt.Color('site')
-)
-
-text = alt.Chart(source).mark_text(dx=-15, dy=3, color='white').encode(
- x=alt.X('sum(yield):Q', stack='zero'),
- y=alt.Y('variety:N'),
- detail='site:N',
- text=alt.Text('sum(yield):Q', format='.1f')
-)
-
-bars + text
diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/antlr4/Token.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/antlr4/Token.py
deleted file mode 100644
index 6f4d5e2629e544b5e75eec65f9e2fd64a9588984..0000000000000000000000000000000000000000
--- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/antlr4/Token.py
+++ /dev/null
@@ -1,155 +0,0 @@
-# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-# Use of this file is governed by the BSD 3-clause license that
-# can be found in the LICENSE.txt file in the project root.
-#
-
-# A token has properties: text, type, line, character position in the line
-# (so we can ignore tabs), token channel, index, and source from which
-# we obtained this token.
-from io import StringIO
-
-
-class Token (object):
-
- INVALID_TYPE = 0
-
- # During lookahead operations, this "token" signifies we hit rule end ATN state
- # and did not follow it despite needing to.
- EPSILON = -2
-
- MIN_USER_TOKEN_TYPE = 1
-
- EOF = -1
-
- # All tokens go to the parser (unless skip() is called in that rule)
- # on a particular "channel". The parser tunes to a particular channel
- # so that whitespace etc... can go to the parser on a "hidden" channel.
-
- DEFAULT_CHANNEL = 0
-
- # Anything on different channel than DEFAULT_CHANNEL is not parsed
- # by parser.
-
- HIDDEN_CHANNEL = 1
-
- def __init__(self):
- self.source = None
- self.type = None # token type of the token
- self.channel = None # The parser ignores everything not on DEFAULT_CHANNEL
- self.start = None # optional; return -1 if not implemented.
- self.stop = None # optional; return -1 if not implemented.
- self.tokenIndex = None # from 0..n-1 of the token object in the input stream
- self.line = None # line=1..n of the 1st character
- self.column = None # beginning of the line at which it occurs, 0..n-1
- self._text = None # text of the token.
-
- @property
- def text(self):
- return self._text
-
- # Explicitly set the text for this token. If {code text} is not
- # {@code null}, then {@link #getText} will return this value rather than
- # extracting the text from the input.
- #
- # @param text The explicit text of the token, or {@code null} if the text
- # should be obtained from the input along with the start and stop indexes
- # of the token.
-
- @text.setter
- def text(self, text:str):
- self._text = text
-
-
- def getTokenSource(self):
- return self.source[0]
-
- def getInputStream(self):
- return self.source[1]
-
-class CommonToken(Token):
-
-
- # An empty {@link Pair} which is used as the default value of
- # {@link #source} for tokens that do not have a source.
- EMPTY_SOURCE = (None, None)
-
- def __init__(self, source:tuple = EMPTY_SOURCE, type:int = None, channel:int=Token.DEFAULT_CHANNEL, start:int=-1, stop:int=-1):
- super().__init__()
- self.source = source
- self.type = type
- self.channel = channel
- self.start = start
- self.stop = stop
- self.tokenIndex = -1
- if source[0] is not None:
- self.line = source[0].line
- self.column = source[0].column
- else:
- self.column = -1
-
- # Constructs a new {@link CommonToken} as a copy of another {@link Token}.
- #
- #
- # If {@code oldToken} is also a {@link CommonToken} instance, the newly
- # constructed token will share a reference to the {@link #text} field and
- # the {@link Pair} stored in {@link #source}. Otherwise, {@link #text} will
- # be assigned the result of calling {@link #getText}, and {@link #source}
- # will be constructed from the result of {@link Token#getTokenSource} and
- # {@link Token#getInputStream}.
- #
- # @param oldToken The token to copy.
- #
- def clone(self):
- t = CommonToken(self.source, self.type, self.channel, self.start, self.stop)
- t.tokenIndex = self.tokenIndex
- t.line = self.line
- t.column = self.column
- t.text = self.text
- return t
-
- @property
- def text(self):
- if self._text is not None:
- return self._text
- input = self.getInputStream()
- if input is None:
- return None
- n = input.size
- if self.start < n and self.stop < n:
- return input.getText(self.start, self.stop)
- else:
- return ""
-
- @text.setter
- def text(self, text:str):
- self._text = text
-
- def __str__(self):
- with StringIO() as buf:
- buf.write("[@")
- buf.write(str(self.tokenIndex))
- buf.write(",")
- buf.write(str(self.start))
- buf.write(":")
- buf.write(str(self.stop))
- buf.write("='")
- txt = self.text
- if txt is not None:
- txt = txt.replace("\n","\\n")
- txt = txt.replace("\r","\\r")
- txt = txt.replace("\t","\\t")
- else:
- txt = ""
- buf.write(txt)
- buf.write("',<")
- buf.write(str(self.type))
- buf.write(">")
- if self.channel > 0:
- buf.write(",channel=")
- buf.write(str(self.channel))
- buf.write(",")
- buf.write(str(self.line))
- buf.write(":")
- buf.write(str(self.column))
- buf.write("]")
- return buf.getvalue()
diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/contourpy/util/__init__.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/contourpy/util/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/attention-refocusing/Attention-refocusing/gligen/ldm/modules/diffusionmodules/util.py b/spaces/attention-refocusing/Attention-refocusing/gligen/ldm/modules/diffusionmodules/util.py
deleted file mode 100644
index 753ddfbdd20fdfbf9ce72d960fadf76abfbca6d7..0000000000000000000000000000000000000000
--- a/spaces/attention-refocusing/Attention-refocusing/gligen/ldm/modules/diffusionmodules/util.py
+++ /dev/null
@@ -1,277 +0,0 @@
-import os
-import math
-import torch
-import torch.nn as nn
-import numpy as np
-from einops import repeat
-
-from ldm.util import instantiate_from_config
-
-
-
-class FourierEmbedder():
- def __init__(self, num_freqs=64, temperature=100):
-
- self.num_freqs = num_freqs
- self.temperature = temperature
- self.freq_bands = temperature ** ( torch.arange(num_freqs) / num_freqs )
-
- @ torch.no_grad()
- def __call__(self, x, cat_dim=-1):
- "x: arbitrary shape of tensor. dim: cat dim"
- out = []
- for freq in self.freq_bands:
- out.append( torch.sin( freq*x ) )
- out.append( torch.cos( freq*x ) )
- return torch.cat(out, cat_dim)
-
-
-
-def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
- if schedule == "linear":
- betas = (
- torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2
- )
-
- elif schedule == "cosine":
- timesteps = (
- torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s
- )
- alphas = timesteps / (1 + cosine_s) * np.pi / 2
- alphas = torch.cos(alphas).pow(2)
- alphas = alphas / alphas[0]
- betas = 1 - alphas[1:] / alphas[:-1]
- betas = np.clip(betas, a_min=0, a_max=0.999)
-
- elif schedule == "sqrt_linear":
- betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)
- elif schedule == "sqrt":
- betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5
- else:
- raise ValueError(f"schedule '{schedule}' unknown.")
- return betas.numpy()
-
-
-def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True):
- if ddim_discr_method == 'uniform':
- c = num_ddpm_timesteps // num_ddim_timesteps
- ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))
- elif ddim_discr_method == 'quad':
- ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int)
- else:
- raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"')
-
- # assert ddim_timesteps.shape[0] == num_ddim_timesteps
- # add one to get the final alpha values right (the ones from first scale to data during sampling)
- steps_out = ddim_timesteps + 1
- if verbose:
- print(f'Selected timesteps for ddim sampler: {steps_out}')
- return steps_out
-
-
-def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):
- # select alphas for computing the variance schedule
- alphas = alphacums[ddim_timesteps]
- alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())
-
- # according the the formula provided in https://arxiv.org/abs/2010.02502
- sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev))
- if verbose:
- print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}')
- print(f'For the chosen value of eta, which is {eta}, '
- f'this results in the following sigma_t schedule for ddim sampler {sigmas}')
- return sigmas, alphas, alphas_prev
-
-
-def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
- """
- Create a beta schedule that discretizes the given alpha_t_bar function,
- which defines the cumulative product of (1-beta) over time from t = [0,1].
- :param num_diffusion_timesteps: the number of betas to produce.
- :param alpha_bar: a lambda that takes an argument t from 0 to 1 and
- produces the cumulative product of (1-beta) up to that
- part of the diffusion process.
- :param max_beta: the maximum beta to use; use values lower than 1 to
- prevent singularities.
- """
- betas = []
- for i in range(num_diffusion_timesteps):
- t1 = i / num_diffusion_timesteps
- t2 = (i + 1) / num_diffusion_timesteps
- betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
- return np.array(betas)
-
-
-def extract_into_tensor(a, t, x_shape):
- b, *_ = t.shape
- out = a.gather(-1, t)
- return out.reshape(b, *((1,) * (len(x_shape) - 1)))
-
-
-def checkpoint(func, inputs, params, flag):
- """
- Evaluate a function without caching intermediate activations, allowing for
- reduced memory at the expense of extra compute in the backward pass.
- :param func: the function to evaluate.
- :param inputs: the argument sequence to pass to `func`.
- :param params: a sequence of parameters `func` depends on but does not
- explicitly take as arguments.
- :param flag: if False, disable gradient checkpointing.
- """
- if flag:
- args = tuple(inputs) + tuple(params)
- return CheckpointFunction.apply(func, len(inputs), *args)
- else:
- return func(*inputs)
-
-
-class CheckpointFunction(torch.autograd.Function):
- @staticmethod
- def forward(ctx, run_function, length, *args):
- ctx.run_function = run_function
- ctx.input_tensors = list(args[:length])
- ctx.input_params = list(args[length:])
-
- with torch.no_grad():
- output_tensors = ctx.run_function(*ctx.input_tensors)
- return output_tensors
-
- @staticmethod
- def backward(ctx, *output_grads):
- ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
- with torch.enable_grad():
- # Fixes a bug where the first op in run_function modifies the
- # Tensor storage in place, which is not allowed for detach()'d
- # Tensors.
- shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
- output_tensors = ctx.run_function(*shallow_copies)
- input_grads = torch.autograd.grad(
- output_tensors,
- ctx.input_tensors + ctx.input_params,
- output_grads,
- allow_unused=True,
- )
- del ctx.input_tensors
- del ctx.input_params
- del output_tensors
- return (None, None) + input_grads
-
-
-def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):
- """
- Create sinusoidal timestep embeddings.
- :param timesteps: a 1-D Tensor of N indices, one per batch element.
- These may be fractional.
- :param dim: the dimension of the output.
- :param max_period: controls the minimum frequency of the embeddings.
- :return: an [N x dim] Tensor of positional embeddings.
- """
- if not repeat_only:
- half = dim // 2
- freqs = torch.exp(
- -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
- ).to(device=timesteps.device)
- args = timesteps[:, None].float() * freqs[None]
- embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
- if dim % 2:
- embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
- else:
- embedding = repeat(timesteps, 'b -> b d', d=dim)
- return embedding
-
-
-def zero_module(module):
- """
- Zero out the parameters of a module and return it.
- """
- for p in module.parameters():
- p.detach().zero_()
- return module
-
-
-def scale_module(module, scale):
- """
- Scale the parameters of a module and return it.
- """
- for p in module.parameters():
- p.detach().mul_(scale)
- return module
-
-
-def mean_flat(tensor):
- """
- Take the mean over all non-batch dimensions.
- """
- return tensor.mean(dim=list(range(1, len(tensor.shape))))
-
-
-def normalization(channels):
- """
- Make a standard normalization layer.
- :param channels: number of input channels.
- :return: an nn.Module for normalization.
- """
- return GroupNorm32(32, channels)
-
-
-# PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
-class SiLU(nn.Module):
- def forward(self, x):
- return x * torch.sigmoid(x)
-
-
-class GroupNorm32(nn.GroupNorm):
- def forward(self, x):
- return super().forward(x.float()).type(x.dtype)
- #return super().forward(x).type(x.dtype)
-
-def conv_nd(dims, *args, **kwargs):
- """
- Create a 1D, 2D, or 3D convolution module.
- """
- if dims == 1:
- return nn.Conv1d(*args, **kwargs)
- elif dims == 2:
- return nn.Conv2d(*args, **kwargs)
- elif dims == 3:
- return nn.Conv3d(*args, **kwargs)
- raise ValueError(f"unsupported dimensions: {dims}")
-
-
-def linear(*args, **kwargs):
- """
- Create a linear module.
- """
- return nn.Linear(*args, **kwargs)
-
-
-def avg_pool_nd(dims, *args, **kwargs):
- """
- Create a 1D, 2D, or 3D average pooling module.
- """
- if dims == 1:
- return nn.AvgPool1d(*args, **kwargs)
- elif dims == 2:
- return nn.AvgPool2d(*args, **kwargs)
- elif dims == 3:
- return nn.AvgPool3d(*args, **kwargs)
- raise ValueError(f"unsupported dimensions: {dims}")
-
-
-class HybridConditioner(nn.Module):
-
- def __init__(self, c_concat_config, c_crossattn_config):
- super().__init__()
- self.concat_conditioner = instantiate_from_config(c_concat_config)
- self.crossattn_conditioner = instantiate_from_config(c_crossattn_config)
-
- def forward(self, c_concat, c_crossattn):
- c_concat = self.concat_conditioner(c_concat)
- c_crossattn = self.crossattn_conditioner(c_crossattn)
- return {'c_concat': [c_concat], 'c_crossattn': [c_crossattn]}
-
-
-def noise_like(shape, device, repeat=False):
- repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))
- noise = lambda: torch.randn(shape, device=device)
- return repeat_noise() if repeat else noise()
\ No newline at end of file
diff --git a/spaces/avivdm1/AutoGPT/autogpt/config/singleton.py b/spaces/avivdm1/AutoGPT/autogpt/config/singleton.py
deleted file mode 100644
index 55b2aeea120bbe51ca837265fcb7fbff467e55f2..0000000000000000000000000000000000000000
--- a/spaces/avivdm1/AutoGPT/autogpt/config/singleton.py
+++ /dev/null
@@ -1,24 +0,0 @@
-"""The singleton metaclass for ensuring only one instance of a class."""
-import abc
-
-
-class Singleton(abc.ABCMeta, type):
- """
- Singleton metaclass for ensuring only one instance of a class.
- """
-
- _instances = {}
-
- def __call__(cls, *args, **kwargs):
- """Call method for the singleton metaclass."""
- if cls not in cls._instances:
- cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
- return cls._instances[cls]
-
-
-class AbstractSingleton(abc.ABC, metaclass=Singleton):
- """
- Abstract singleton class for ensuring only one instance of a class.
- """
-
- pass
diff --git a/spaces/awaawawawa/iurf7irfuyytruyyugb/optimizedSD/inpaint_gradio.py b/spaces/awaawawawa/iurf7irfuyytruyyugb/optimizedSD/inpaint_gradio.py
deleted file mode 100644
index f547c0f9789bb29c1b016edb28426b18f78f259b..0000000000000000000000000000000000000000
--- a/spaces/awaawawawa/iurf7irfuyytruyyugb/optimizedSD/inpaint_gradio.py
+++ /dev/null
@@ -1,328 +0,0 @@
-import argparse
-import os
-import re
-import time
-from contextlib import nullcontext
-from itertools import islice
-from random import randint
-
-import gradio as gr
-import numpy as np
-import torch
-from PIL import Image
-from einops import rearrange, repeat
-from omegaconf import OmegaConf
-from pytorch_lightning import seed_everything
-from torch import autocast
-from torchvision.utils import make_grid
-from tqdm import tqdm, trange
-from transformers import logging
-
-from ldmlib.util import instantiate_from_config
-from optimUtils import split_weighted_subprompts, logger
-
-logging.set_verbosity_error()
-import mimetypes
-
-mimetypes.init()
-mimetypes.add_type("application/javascript", ".js")
-
-
-def chunk(it, size):
- it = iter(it)
- return iter(lambda: tuple(islice(it, size)), ())
-
-
-def load_model_from_config(ckpt, verbose=False):
- print(f"Loading model from {ckpt}")
- pl_sd = torch.load(ckpt, map_location="cpu")
- if "global_step" in pl_sd:
- print(f"Global Step: {pl_sd['global_step']}")
- sd = pl_sd["state_dict"]
- return sd
-
-
-def load_img(image, h0, w0):
- image = image.convert("RGB")
- w, h = image.size
- print(f"loaded input image of size ({w}, {h})")
- if h0 is not None and w0 is not None:
- h, w = h0, w0
-
- w, h = map(lambda x: x - x % 64, (w, h)) # resize to integer multiple of 32
-
- print(f"New image size ({w}, {h})")
- image = image.resize((w, h), resample=Image.LANCZOS)
- image = np.array(image).astype(np.float32) / 255.0
- image = image[None].transpose(0, 3, 1, 2)
- image = torch.from_numpy(image)
- return 2.0 * image - 1.0
-
-
-def load_mask(mask, h0, w0, newH, newW, invert=False):
- image = mask.convert("RGB")
- w, h = image.size
- print(f"loaded input mask of size ({w}, {h})")
- if h0 is not None and w0 is not None:
- h, w = h0, w0
-
- w, h = map(lambda x: x - x % 64, (w, h)) # resize to integer multiple of 32
-
- print(f"New mask size ({w}, {h})")
- image = image.resize((newW, newH), resample=Image.LANCZOS)
- # image = image.resize((64, 64), resample=Image.LANCZOS)
- image = np.array(image)
-
- if invert:
- print("inverted")
- where_0, where_1 = np.where(image == 0), np.where(image == 255)
- image[where_0], image[where_1] = 255, 0
- image = image.astype(np.float32) / 255.0
- image = image[None].transpose(0, 3, 1, 2)
- image = torch.from_numpy(image)
- return image
-
-
-def generate(
- image,
- mask_image,
- prompt,
- strength,
- ddim_steps,
- n_iter,
- batch_size,
- Height,
- Width,
- scale,
- ddim_eta,
- unet_bs,
- device,
- seed,
- outdir,
- img_format,
- turbo,
- full_precision,
-):
- if seed == "":
- seed = randint(0, 1000000)
- seed = int(seed)
- seed_everything(seed)
- sampler = "ddim"
-
- # Logging
- logger(locals(), log_csv="logs/inpaint_gradio_logs.csv")
-
- init_image = load_img(image['image'], Height, Width).to(device)
-
- model.unet_bs = unet_bs
- model.turbo = turbo
- model.cdevice = device
- modelCS.cond_stage_model.device = device
-
- if device != "cpu" and full_precision == False:
- model.half()
- modelCS.half()
- modelFS.half()
- init_image = init_image.half()
- # mask.half()
-
- tic = time.time()
- os.makedirs(outdir, exist_ok=True)
- outpath = outdir
- sample_path = os.path.join(outpath, "_".join(re.split(":| ", prompt)))[:150]
- os.makedirs(sample_path, exist_ok=True)
- base_count = len(os.listdir(sample_path))
-
- # n_rows = opt.n_rows if opt.n_rows > 0 else batch_size
- assert prompt is not None
- data = [batch_size * [prompt]]
-
- modelFS.to(device)
-
- init_latent = modelFS.get_first_stage_encoding(modelFS.encode_first_stage(init_image)) # move to latent space
- init_latent = repeat(init_latent, "1 ... -> b ...", b=batch_size)
- if mask_image is None:
- mask = load_mask(image['mask'], Height, Width, init_latent.shape[2], init_latent.shape[3], True).to(device)
- else:
- image['mask']=mask_image
- mask = load_mask(mask_image, Height, Width, init_latent.shape[2], init_latent.shape[3], True).to(device)
-
- mask = mask[0][0].unsqueeze(0).repeat(4, 1, 1).unsqueeze(0)
- mask = repeat(mask, '1 ... -> b ...', b=batch_size)
-
- if device != "cpu":
- mem = torch.cuda.memory_allocated() / 1e6
- modelFS.to("cpu")
- while torch.cuda.memory_allocated() / 1e6 >= mem:
- time.sleep(1)
-
- if strength == 1:
- print("strength should be less than 1, setting it to 0.999")
- strength = 0.999
- assert 0.0 <= strength < 1.0, "can only work with strength in [0.0, 1.0]"
- t_enc = int(strength * ddim_steps)
- print(f"target t_enc is {t_enc} steps")
-
- if full_precision == False and device != "cpu":
- precision_scope = autocast
- else:
- precision_scope = nullcontext
-
- all_samples = []
- seeds = ""
- with torch.no_grad():
- all_samples = list()
- for _ in trange(n_iter, desc="Sampling"):
- for prompts in tqdm(data, desc="data"):
- with precision_scope("cuda"):
- modelCS.to(device)
- uc = None
- if scale != 1.0:
- uc = modelCS.get_learned_conditioning(batch_size * [""])
- if isinstance(prompts, tuple):
- prompts = list(prompts)
-
- subprompts, weights = split_weighted_subprompts(prompts[0])
- if len(subprompts) > 1:
- c = torch.zeros_like(uc)
- totalWeight = sum(weights)
- # normalize each "sub prompt" and add it
- for i in range(len(subprompts)):
- weight = weights[i]
- # if not skip_normalize:
- weight = weight / totalWeight
- c = torch.add(c, modelCS.get_learned_conditioning(subprompts[i]), alpha=weight)
- else:
- c = modelCS.get_learned_conditioning(prompts)
-
- if device != "cpu":
- mem = torch.cuda.memory_allocated() / 1e6
- modelCS.to("cpu")
- while torch.cuda.memory_allocated() / 1e6 >= mem:
- time.sleep(1)
-
- # encode (scaled latent)
- z_enc = model.stochastic_encode(
- init_latent, torch.tensor([t_enc] * batch_size).to(device),
- seed, ddim_eta, ddim_steps)
-
- # decode it
- samples_ddim = model.sample(
- t_enc,
- c,
- z_enc,
- unconditional_guidance_scale=scale,
- unconditional_conditioning=uc,
- mask=mask,
- x_T=init_latent,
- sampler=sampler,
- )
-
- modelFS.to(device)
- print("saving images")
- for i in range(batch_size):
- x_samples_ddim = modelFS.decode_first_stage(samples_ddim[i].unsqueeze(0))
- x_sample = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
- all_samples.append(x_sample.to("cpu"))
- x_sample = 255.0 * rearrange(x_sample[0].cpu().numpy(), "c h w -> h w c")
- Image.fromarray(x_sample.astype(np.uint8)).save(
- os.path.join(sample_path, "seed_" + str(seed) + "_" + f"{base_count:05}.{img_format}")
- )
- seeds += str(seed) + ","
- seed += 1
- base_count += 1
-
- if device != "cpu":
- mem = torch.cuda.memory_allocated() / 1e6
- modelFS.to("cpu")
- while torch.cuda.memory_allocated() / 1e6 >= mem:
- time.sleep(1)
-
- del samples_ddim
- del x_sample
- del x_samples_ddim
- print("memory_final = ", torch.cuda.memory_allocated() / 1e6)
-
- toc = time.time()
-
- time_taken = (toc - tic) / 60.0
- grid = torch.cat(all_samples, 0)
- grid = make_grid(grid, nrow=n_iter)
- grid = 255.0 * rearrange(grid, "c h w -> h w c").cpu().numpy()
-
- txt = (
- "Samples finished in "
- + str(round(time_taken, 3))
- + " minutes and exported to \n"
- + sample_path
- + "\nSeeds used = "
- + seeds[:-1]
- )
- return Image.fromarray(grid.astype(np.uint8)), image['mask'], txt
-
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser(description='txt2img using gradio')
- parser.add_argument('--config_path', default="optimizedSD/v1-inference.yaml", type=str, help='config path')
- parser.add_argument('--ckpt_path', default="models/ldm/stable-diffusion-v1/model.ckpt", type=str, help='ckpt path')
- args = parser.parse_args()
- config = args.config_path
- ckpt = args.ckpt_path
- sd = load_model_from_config(f"{ckpt}")
- li, lo = [], []
- for key, v_ in sd.items():
- sp = key.split(".")
- if (sp[0]) == "model":
- if "input_blocks" in sp:
- li.append(key)
- elif "middle_block" in sp:
- li.append(key)
- elif "time_embed" in sp:
- li.append(key)
- else:
- lo.append(key)
- for key in li:
- sd["model1." + key[6:]] = sd.pop(key)
- for key in lo:
- sd["model2." + key[6:]] = sd.pop(key)
-
- config = OmegaConf.load(f"{config}")
-
- model = instantiate_from_config(config.modelUNet)
- _, _ = model.load_state_dict(sd, strict=False)
- model.eval()
-
- modelCS = instantiate_from_config(config.modelCondStage)
- _, _ = modelCS.load_state_dict(sd, strict=False)
- modelCS.eval()
-
- modelFS = instantiate_from_config(config.modelFirstStage)
- _, _ = modelFS.load_state_dict(sd, strict=False)
- modelFS.eval()
- del sd
-
- demo = gr.Interface(
- fn=generate,
- inputs=[
- gr.Image(tool="sketch", type="pil"),
- gr.Image(tool="editor", type="pil"),
- "text",
- gr.Slider(0, 0.99, value=0.99, step=0.01),
- gr.Slider(1, 1000, value=50),
- gr.Slider(1, 100, step=1),
- gr.Slider(1, 100, step=1),
- gr.Slider(64, 4096, value=512, step=64),
- gr.Slider(64, 4096, value=512, step=64),
- gr.Slider(0, 50, value=7.5, step=0.1),
- gr.Slider(0, 1, step=0.01),
- gr.Slider(1, 2, value=1, step=1),
- gr.Text(value="cuda"),
- "text",
- gr.Text(value="outputs/inpaint-samples"),
- gr.Radio(["png", "jpg"], value='png'),
- "checkbox",
- "checkbox",
- ],
- outputs=["image", "image", "text"],
- )
- demo.launch()
diff --git a/spaces/awacke1/AI.Dashboard.Maps/index.html b/spaces/awacke1/AI.Dashboard.Maps/index.html
deleted file mode 100644
index 019df6f46613420f023433b7ad23953f3a770059..0000000000000000000000000000000000000000
--- a/spaces/awacke1/AI.Dashboard.Maps/index.html
+++ /dev/null
@@ -1,36 +0,0 @@
-
-
-
-
-
- My static Space
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/spaces/awacke1/AIandSmartTools/style.css b/spaces/awacke1/AIandSmartTools/style.css
deleted file mode 100644
index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000
--- a/spaces/awacke1/AIandSmartTools/style.css
+++ /dev/null
@@ -1,28 +0,0 @@
-body {
- padding: 2rem;
- font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
-}
-
-h1 {
- font-size: 16px;
- margin-top: 0;
-}
-
-p {
- color: rgb(107, 114, 128);
- font-size: 15px;
- margin-bottom: 10px;
- margin-top: 5px;
-}
-
-.card {
- max-width: 620px;
- margin: 0 auto;
- padding: 16px;
- border: 1px solid lightgray;
- border-radius: 16px;
-}
-
-.card p:last-child {
- margin-bottom: 0;
-}
diff --git a/spaces/awacke1/ActingGameMechanicsForSocialIntelligence/README.md b/spaces/awacke1/ActingGameMechanicsForSocialIntelligence/README.md
deleted file mode 100644
index a2b3f2f229acb333b5d3094e7648914b622061bb..0000000000000000000000000000000000000000
--- a/spaces/awacke1/ActingGameMechanicsForSocialIntelligence/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: ActingGameMechanicsForSocialIntelligence
-emoji: 🐠
-colorFrom: red
-colorTo: indigo
-sdk: streamlit
-sdk_version: 1.17.0
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/awacke1/Docker-FlanT5-TextGeneratorTranslator/static/style.css b/spaces/awacke1/Docker-FlanT5-TextGeneratorTranslator/static/style.css
deleted file mode 100644
index 7b50df8f6904c75f560224034d8aadd76656c6f8..0000000000000000000000000000000000000000
--- a/spaces/awacke1/Docker-FlanT5-TextGeneratorTranslator/static/style.css
+++ /dev/null
@@ -1,45 +0,0 @@
-body {
- --text: hsl(0 0% 15%);
- padding: 2.5rem;
- font-family: sans-serif;
- color: var(--text);
-}
-
-body.dark-theme {
- --text: hsl(0 0% 90%);
- background-color: hsl(223 39% 7%);
-}
-
-main {
- max-width: 80rem;
- text-align: center;
-}
-
-section {
- display: flex;
- flex-direction: column;
- align-items: center;
-}
-
-a {
- color: var(--text);
-}
-
-form {
- width: 30rem;
- margin: 0 auto;
-}
-
-input {
- width: 100%;
-}
-
-button {
- cursor: pointer;
-}
-
-.text-gen-output {
- min-height: 1.2rem;
- margin: 1rem;
- border: 0.5px solid grey;
-}
diff --git a/spaces/awacke1/chatGPT/utils.py b/spaces/awacke1/chatGPT/utils.py
deleted file mode 100644
index b09b072410049e2aa6f82cdd775084d8c0f7064e..0000000000000000000000000000000000000000
--- a/spaces/awacke1/chatGPT/utils.py
+++ /dev/null
@@ -1,54 +0,0 @@
-import json, os
-from tencentcloud.common import credential
-from tencentcloud.common.profile.client_profile import ClientProfile
-from tencentcloud.common.profile.http_profile import HttpProfile
-from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
-from tencentcloud.tmt.v20180321 import tmt_client, models
-
-def get_tmt_client():
- try:
- # 实例化一个认证对象,入参需要传入腾讯云账户 SecretId 和 SecretKey,此处还需注意密钥对的保密
- # 代码泄露可能会导致 SecretId 和 SecretKey 泄露,并威胁账号下所有资源的安全性。以下代码示例仅供参考,建议采用更安全的方式来使用密钥,请参见:https://cloud.tencent.com/document/product/1278/85305
- # 密钥可前往官网控制台 https://console.cloud.tencent.com/cam/capi 进行获取
- SecretId = os.environ.get("TENCENTCLOUD_SECRET_ID")
- SecretKey = os.environ.get("TENCENTCLOUD_SECRET_KEY")
- cred = credential.Credential(SecretId, SecretKey)
- # 实例化一个http选项,可选的,没有特殊需求可以跳过
- httpProfile = HttpProfile()
- httpProfile.endpoint = "tmt.tencentcloudapi.com"
-
- # 实例化一个client选项,可选的,没有特殊需求可以跳过
- clientProfile = ClientProfile()
- clientProfile.httpProfile = httpProfile
- # 实例化要请求产品的client对象,clientProfile是可选的
- client = tmt_client.TmtClient(cred, "ap-shanghai", clientProfile)
- print(f'client_{client}')
- return client
- except TencentCloudSDKException as err:
- print(f'client_err_{err}')
- return None
-
-def getTextTrans_tmt(tmt_client, text, source='zh', target='en'):
- def is_chinese(string):
- for ch in string:
- if u'\u4e00' <= ch <= u'\u9fff':
- return True
- return False
-
- if tmt_client is None:
- return text
- if not is_chinese(text) and target == 'en':
- return text
- try:
- req = models.TextTranslateRequest()
- params = {
- "SourceText": text,
- "Source": source,
- "Target": target,
- "ProjectId": 0
- }
- req.from_json_string(json.dumps(params))
- resp = tmt_client.TextTranslate(req)
- return resp.TargetText
- except Exception as e:
- return text
\ No newline at end of file
diff --git a/spaces/azusarang/so-vits-svc-models-ba_P/inference/infer_tool.py b/spaces/azusarang/so-vits-svc-models-ba_P/inference/infer_tool.py
deleted file mode 100644
index 985eea3ab5ad86dfcb98472a9bd17456fe8d5763..0000000000000000000000000000000000000000
--- a/spaces/azusarang/so-vits-svc-models-ba_P/inference/infer_tool.py
+++ /dev/null
@@ -1,407 +0,0 @@
-import hashlib
-import io
-import json
-import logging
-import os
-import time
-from pathlib import Path
-from inference import slicer
-import gc
-
-import librosa
-import numpy as np
-# import onnxruntime
-import soundfile
-import torch
-import torchaudio
-
-import cluster
-import utils
-from models import SynthesizerTrn
-
-from diffusion.unit2mel import load_model_vocoder
-import yaml
-
-logging.getLogger('matplotlib').setLevel(logging.WARNING)
-
-
-def read_temp(file_name):
- if not os.path.exists(file_name):
- with open(file_name, "w") as f:
- f.write(json.dumps({"info": "temp_dict"}))
- return {}
- else:
- try:
- with open(file_name, "r") as f:
- data = f.read()
- data_dict = json.loads(data)
- if os.path.getsize(file_name) > 50 * 1024 * 1024:
- f_name = file_name.replace("\\", "/").split("/")[-1]
- print(f"clean {f_name}")
- for wav_hash in list(data_dict.keys()):
- if int(time.time()) - int(data_dict[wav_hash]["time"]) > 14 * 24 * 3600:
- del data_dict[wav_hash]
- except Exception as e:
- print(e)
- print(f"{file_name} error,auto rebuild file")
- data_dict = {"info": "temp_dict"}
- return data_dict
-
-
-def write_temp(file_name, data):
- with open(file_name, "w") as f:
- f.write(json.dumps(data))
-
-
-def timeit(func):
- def run(*args, **kwargs):
- t = time.time()
- res = func(*args, **kwargs)
- print('executing \'%s\' costed %.3fs' % (func.__name__, time.time() - t))
- return res
-
- return run
-
-
-def format_wav(audio_path):
- if Path(audio_path).suffix == '.wav':
- return
- raw_audio, raw_sample_rate = librosa.load(audio_path, mono=True, sr=None)
- soundfile.write(Path(audio_path).with_suffix(".wav"), raw_audio, raw_sample_rate)
-
-
-def get_end_file(dir_path, end):
- file_lists = []
- for root, dirs, files in os.walk(dir_path):
- files = [f for f in files if f[0] != '.']
- dirs[:] = [d for d in dirs if d[0] != '.']
- for f_file in files:
- if f_file.endswith(end):
- file_lists.append(os.path.join(root, f_file).replace("\\", "/"))
- return file_lists
-
-
-def get_md5(content):
- return hashlib.new("md5", content).hexdigest()
-
-def fill_a_to_b(a, b):
- if len(a) < len(b):
- for _ in range(0, len(b) - len(a)):
- a.append(a[0])
-
-def mkdir(paths: list):
- for path in paths:
- if not os.path.exists(path):
- os.mkdir(path)
-
-def pad_array(arr, target_length):
- current_length = arr.shape[0]
- if current_length >= target_length:
- return arr
- else:
- pad_width = target_length - current_length
- pad_left = pad_width // 2
- pad_right = pad_width - pad_left
- padded_arr = np.pad(arr, (pad_left, pad_right), 'constant', constant_values=(0, 0))
- return padded_arr
-
-def split_list_by_n(list_collection, n, pre=0):
- for i in range(0, len(list_collection), n):
- yield list_collection[i-pre if i-pre>=0 else i: i + n]
-
-
-class F0FilterException(Exception):
- pass
-
-class Svc(object):
- def __init__(self, net_g_path, config_path,
- device=None,
- cluster_model_path="logs/44k/kmeans_10000.pt",
- nsf_hifigan_enhance = False,
- diffusion_model_path="logs/44k/diffusion/model_0.pt",
- diffusion_config_path="configs/diffusion.yaml",
- shallow_diffusion = False,
- only_diffusion = False,
- ):
- self.net_g_path = net_g_path
- self.only_diffusion = only_diffusion
- self.shallow_diffusion = shallow_diffusion
- if device is None:
- self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
- # self.dev = torch.device("cpu")
- else:
- self.dev = torch.device(device)
- self.net_g_ms = None
- if not self.only_diffusion:
- self.hps_ms = utils.get_hparams_from_file(config_path)
- self.target_sample = self.hps_ms.data.sampling_rate
- self.hop_size = self.hps_ms.data.hop_length
- self.spk2id = self.hps_ms.spk
- try:
- self.speech_encoder = self.hps_ms.model.speech_encoder
- except Exception as e:
- self.speech_encoder = 'vec768l12'
-
- self.nsf_hifigan_enhance = nsf_hifigan_enhance
- if self.shallow_diffusion or self.only_diffusion:
- if os.path.exists(diffusion_model_path) and os.path.exists(diffusion_model_path):
- self.diffusion_model,self.vocoder,self.diffusion_args = load_model_vocoder(diffusion_model_path,self.dev,config_path=diffusion_config_path)
- if self.only_diffusion:
- self.target_sample = self.diffusion_args.data.sampling_rate
- self.hop_size = self.diffusion_args.data.block_size
- self.spk2id = self.diffusion_args.spk
- self.speech_encoder = self.diffusion_args.data.encoder
- else:
- print("No diffusion model or config found. Shallow diffusion mode will False")
- self.shallow_diffusion = self.only_diffusion = False
-
- # load hubert and model
- if not self.only_diffusion:
- self.load_model()
- self.hubert_model = utils.get_speech_encoder(self.speech_encoder,device=self.dev)
- self.volume_extractor = utils.Volume_Extractor(self.hop_size)
- else:
- self.hubert_model = utils.get_speech_encoder(self.diffusion_args.data.encoder,device=self.dev)
- self.volume_extractor = utils.Volume_Extractor(self.diffusion_args.data.block_size)
-
- if os.path.exists(cluster_model_path):
- self.cluster_model = cluster.get_cluster_model(cluster_model_path)
- if self.shallow_diffusion : self.nsf_hifigan_enhance = False
- if self.nsf_hifigan_enhance:
- from modules.enhancer import Enhancer
- self.enhancer = Enhancer('nsf-hifigan', 'pretrain/nsf_hifigan/model',device=self.dev)
-
- def load_model(self):
- # get model configuration
- self.net_g_ms = SynthesizerTrn(
- self.hps_ms.data.filter_length // 2 + 1,
- self.hps_ms.train.segment_size // self.hps_ms.data.hop_length,
- **self.hps_ms.model)
- _ = utils.load_checkpoint(self.net_g_path, self.net_g_ms, None)
- if "half" in self.net_g_path and torch.cuda.is_available():
- _ = self.net_g_ms.half().eval().to(self.dev)
- else:
- _ = self.net_g_ms.eval().to(self.dev)
-
-
-
- def get_unit_f0(self, wav, tran, cluster_infer_ratio, speaker, f0_filter ,f0_predictor,cr_threshold=0.05):
-
- f0_predictor_object = utils.get_f0_predictor(f0_predictor,hop_length=self.hop_size,sampling_rate=self.target_sample,device=self.dev,threshold=cr_threshold)
-
- f0, uv = f0_predictor_object.compute_f0_uv(wav)
- if f0_filter and sum(f0) == 0:
- raise F0FilterException("No voice detected")
- f0 = torch.FloatTensor(f0).to(self.dev)
- uv = torch.FloatTensor(uv).to(self.dev)
-
- f0 = f0 * 2 ** (tran / 12)
- f0 = f0.unsqueeze(0)
- uv = uv.unsqueeze(0)
-
- wav16k = librosa.resample(wav, orig_sr=self.target_sample, target_sr=16000)
- wav16k = torch.from_numpy(wav16k).to(self.dev)
- c = self.hubert_model.encoder(wav16k)
- c = utils.repeat_expand_2d(c.squeeze(0), f0.shape[1])
-
- if cluster_infer_ratio !=0:
- cluster_c = cluster.get_cluster_center_result(self.cluster_model, c.cpu().numpy().T, speaker).T
- cluster_c = torch.FloatTensor(cluster_c).to(self.dev)
- c = cluster_infer_ratio * cluster_c + (1 - cluster_infer_ratio) * c
-
- c = c.unsqueeze(0)
- return c, f0, uv
-
- def infer(self, speaker, tran, raw_path,
- cluster_infer_ratio=0,
- auto_predict_f0=False,
- noice_scale=0.4,
- f0_filter=False,
- f0_predictor='pm',
- enhancer_adaptive_key = 0,
- cr_threshold = 0.05,
- k_step = 100
- ):
-
- speaker_id = self.spk2id.get(speaker)
- if not speaker_id and type(speaker) is int:
- if len(self.spk2id.__dict__) >= speaker:
- speaker_id = speaker
- sid = torch.LongTensor([int(speaker_id)]).to(self.dev).unsqueeze(0)
- wav, sr = librosa.load(raw_path, sr=self.target_sample)
- c, f0, uv = self.get_unit_f0(wav, tran, cluster_infer_ratio, speaker, f0_filter,f0_predictor,cr_threshold=cr_threshold)
- if "half" in self.net_g_path and torch.cuda.is_available():
- c = c.half()
- with torch.no_grad():
- start = time.time()
- if not self.only_diffusion:
- audio,f0 = self.net_g_ms.infer(c, f0=f0, g=sid, uv=uv, predict_f0=auto_predict_f0, noice_scale=noice_scale)
- audio = audio[0,0].data.float()
- if self.shallow_diffusion:
- audio_mel = self.vocoder.extract(audio[None,:],self.target_sample)
- else:
- audio = torch.FloatTensor(wav).to(self.dev)
- audio_mel = None
- if self.only_diffusion or self.shallow_diffusion:
- vol = self.volume_extractor.extract(audio[None,:])[None,:,None].to(self.dev)
- f0 = f0[:,:,None]
- c = c.transpose(-1,-2)
- audio_mel = self.diffusion_model(
- c,
- f0,
- vol,
- spk_id = sid,
- spk_mix_dict = None,
- gt_spec=audio_mel,
- infer=True,
- infer_speedup=self.diffusion_args.infer.speedup,
- method=self.diffusion_args.infer.method,
- k_step=k_step)
- audio = self.vocoder.infer(audio_mel, f0).squeeze()
- if self.nsf_hifigan_enhance:
- audio, _ = self.enhancer.enhance(
- audio[None,:],
- self.target_sample,
- f0[:,:,None],
- self.hps_ms.data.hop_length,
- adaptive_key = enhancer_adaptive_key)
- use_time = time.time() - start
- print("vits use time:{}".format(use_time))
- return audio, audio.shape[-1]
-
- def clear_empty(self):
- # clean up vram
- torch.cuda.empty_cache()
-
- def unload_model(self):
- # unload model
- self.net_g_ms = self.net_g_ms.to("cpu")
- del self.net_g_ms
- if hasattr(self,"enhancer"):
- self.enhancer.enhancer = self.enhancer.enhancer.to("cpu")
- del self.enhancer.enhancer
- del self.enhancer
- gc.collect()
-
- def slice_inference(self,
- raw_audio_path,
- spk,
- tran,
- slice_db,
- cluster_infer_ratio,
- auto_predict_f0,
- noice_scale,
- pad_seconds=0.5,
- clip_seconds=0,
- lg_num=0,
- lgr_num =0.75,
- f0_predictor='pm',
- enhancer_adaptive_key = 0,
- cr_threshold = 0.05,
- k_step = 100
- ):
- wav_path = Path(raw_audio_path).with_suffix('.wav')
- chunks = slicer.cut(wav_path, db_thresh=slice_db)
- audio_data, audio_sr = slicer.chunks2audio(wav_path, chunks)
- per_size = int(clip_seconds*audio_sr)
- lg_size = int(lg_num*audio_sr)
- lg_size_r = int(lg_size*lgr_num)
- lg_size_c_l = (lg_size-lg_size_r)//2
- lg_size_c_r = lg_size-lg_size_r-lg_size_c_l
- lg = np.linspace(0,1,lg_size_r) if lg_size!=0 else 0
-
- audio = []
- for (slice_tag, data) in audio_data:
- print(f'#=====segment start, {round(len(data) / audio_sr, 3)}s======')
- # padd
- length = int(np.ceil(len(data) / audio_sr * self.target_sample))
- if slice_tag:
- print('jump empty segment')
- _audio = np.zeros(length)
- audio.extend(list(pad_array(_audio, length)))
- continue
- if per_size != 0:
- datas = split_list_by_n(data, per_size,lg_size)
- else:
- datas = [data]
- for k,dat in enumerate(datas):
- per_length = int(np.ceil(len(dat) / audio_sr * self.target_sample)) if clip_seconds!=0 else length
- if clip_seconds!=0: print(f'###=====segment clip start, {round(len(dat) / audio_sr, 3)}s======')
- # padd
- pad_len = int(audio_sr * pad_seconds)
- dat = np.concatenate([np.zeros([pad_len]), dat, np.zeros([pad_len])])
- raw_path = io.BytesIO()
- soundfile.write(raw_path, dat, audio_sr, format="wav")
- raw_path.seek(0)
- out_audio, out_sr = self.infer(spk, tran, raw_path,
- cluster_infer_ratio=cluster_infer_ratio,
- auto_predict_f0=auto_predict_f0,
- noice_scale=noice_scale,
- f0_predictor = f0_predictor,
- enhancer_adaptive_key = enhancer_adaptive_key,
- cr_threshold = cr_threshold,
- k_step = k_step
- )
- _audio = out_audio.cpu().numpy()
- pad_len = int(self.target_sample * pad_seconds)
- _audio = _audio[pad_len:-pad_len]
- _audio = pad_array(_audio, per_length)
- if lg_size!=0 and k!=0:
- lg1 = audio[-(lg_size_r+lg_size_c_r):-lg_size_c_r] if lgr_num != 1 else audio[-lg_size:]
- lg2 = _audio[lg_size_c_l:lg_size_c_l+lg_size_r] if lgr_num != 1 else _audio[0:lg_size]
- lg_pre = lg1*(1-lg)+lg2*lg
- audio = audio[0:-(lg_size_r+lg_size_c_r)] if lgr_num != 1 else audio[0:-lg_size]
- audio.extend(lg_pre)
- _audio = _audio[lg_size_c_l+lg_size_r:] if lgr_num != 1 else _audio[lg_size:]
- audio.extend(list(_audio))
- return np.array(audio)
-
-class RealTimeVC:
- def __init__(self):
- self.last_chunk = None
- self.last_o = None
- self.chunk_len = 16000 # chunk length
- self.pre_len = 3840 # cross fade length, multiples of 640
-
- # Input and output are 1-dimensional numpy waveform arrays
-
- def process(self, svc_model, speaker_id, f_pitch_change, input_wav_path,
- cluster_infer_ratio=0,
- auto_predict_f0=False,
- noice_scale=0.4,
- f0_filter=False):
-
- import maad
- audio, sr = torchaudio.load(input_wav_path)
- audio = audio.cpu().numpy()[0]
- temp_wav = io.BytesIO()
- if self.last_chunk is None:
- input_wav_path.seek(0)
-
- audio, sr = svc_model.infer(speaker_id, f_pitch_change, input_wav_path,
- cluster_infer_ratio=cluster_infer_ratio,
- auto_predict_f0=auto_predict_f0,
- noice_scale=noice_scale,
- f0_filter=f0_filter)
-
- audio = audio.cpu().numpy()
- self.last_chunk = audio[-self.pre_len:]
- self.last_o = audio
- return audio[-self.chunk_len:]
- else:
- audio = np.concatenate([self.last_chunk, audio])
- soundfile.write(temp_wav, audio, sr, format="wav")
- temp_wav.seek(0)
-
- audio, sr = svc_model.infer(speaker_id, f_pitch_change, temp_wav,
- cluster_infer_ratio=cluster_infer_ratio,
- auto_predict_f0=auto_predict_f0,
- noice_scale=noice_scale,
- f0_filter=f0_filter)
-
- audio = audio.cpu().numpy()
- ret = maad.util.crossfade(self.last_o, audio, self.pre_len)
- self.last_chunk = audio[-self.pre_len:]
- self.last_o = audio
- return ret[self.chunk_len:2 * self.chunk_len]
-
\ No newline at end of file
diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/loaders/PlayCanvasLoader.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/loaders/PlayCanvasLoader.js
deleted file mode 100644
index 00f8ced7dc1284caa3ac252e0b754b172242b6cb..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/examples/js/loaders/PlayCanvasLoader.js
+++ /dev/null
@@ -1,203 +0,0 @@
-/**
- * @author mrdoob / http://mrdoob.com/
- * @author Mugen87 / https://github.com/Mugen87
- */
-
-THREE.PlayCanvasLoader = function ( manager ) {
-
- this.manager = ( manager !== undefined ) ? manager : THREE.DefaultLoadingManager;
-
-};
-
-THREE.PlayCanvasLoader.prototype = {
-
- constructor: THREE.PlayCanvasLoader,
-
- load: function ( url, onLoad, onProgress, onError ) {
-
- var scope = this;
-
- var loader = new THREE.FileLoader( scope.manager );
- loader.setPath( scope.path );
- loader.load( url, function ( text ) {
-
- onLoad( scope.parse( JSON.parse( text ) ) );
-
- }, onProgress, onError );
-
- },
-
- setPath: function ( value ) {
-
- this.path = value;
- return this;
-
- },
-
- parse: function ( json ) {
-
- function parseVertices( data ) {
-
- var attributes = {};
-
- // create a buffer attribute for each array that contains vertex information
-
- for ( var name in data ) {
-
- var array = data[ name ];
-
- var type = array.type;
- var size = array.components;
-
- var attribute;
-
- switch ( type ) {
-
- case 'float32':
- attribute = new THREE.Float32BufferAttribute( array.data, size );
- break;
-
- case 'uint8':
- attribute = new THREE.Uint8BufferAttribute( array.data, size );
- break;
-
- case 'uint16':
- attribute = new THREE.Uint16BufferAttribute( array.data, size );
- break;
-
- default:
- console.log( 'THREE.PlayCanvasLoader: Array type "%s" not yet supported.', type );
-
- }
-
- attributes[ name ] = attribute;
-
- }
-
- data._attributes = attributes;
-
- }
-
- function parseMeshes( data ) {
-
- // create buffer geometry
-
- var geometry = new THREE.BufferGeometry();
-
- geometry.setIndex( data.indices );
-
- var attributes = model.vertices[ data.vertices ]._attributes;
-
- for ( var name in attributes ) {
-
- var attribute = attributes[ name ];
-
- if ( name === 'texCoord0' ) name = 'uv';
-
- geometry.addAttribute( name, attribute );
-
- }
-
- data._geometry = geometry;
-
- }
-
- function parseMeshInstances( data ) {
-
- var node = model.nodes[ data.node ];
- var mesh = model.meshes[ data.mesh ];
-
- if ( node._geometries === undefined ) {
-
- node._geometries = [];
-
- }
-
- node._geometries.push( mesh._geometry );
-
- }
-
- function parseNodes( data ) {
-
- var object = new THREE.Group();
-
- var geometries = data._geometries;
-
- if ( geometries !== undefined ) {
-
- var material = new THREE.MeshPhongMaterial();
-
- for ( var i = 0, l = geometries.length; i < l; i ++ ) {
-
- var geometry = geometries[ i ];
-
- object.add( new THREE.Mesh( geometry, material ) );
-
- }
-
- }
-
- for ( var i = 0, l = data.rotation.length; i < l; i ++ ) {
-
- data.rotation[ i ] *= Math.PI / 180;
-
- }
-
- //
-
- object.name = data.name;
-
- object.position.fromArray( data.position );
- object.quaternion.setFromEuler( new THREE.Euler().fromArray( data.rotation ) );
- object.scale.fromArray( data.scale );
-
- data._object = object;
-
- }
-
- //
-
- var model = json.model;
-
- for ( var i = 0, l = model.vertices.length; i < l; i ++ ) {
-
- parseVertices( model.vertices[ i ] );
-
- }
-
- for ( var i = 0, l = model.meshes.length; i < l; i ++ ) {
-
- parseMeshes( model.meshes[ i ] );
-
- }
-
- for ( var i = 0, l = model.meshInstances.length; i < l; i ++ ) {
-
- parseMeshInstances( model.meshInstances[ i ] );
-
- }
-
- for ( var i = 0, l = model.nodes.length; i < l; i ++ ) {
-
- parseNodes( model.nodes[ i ] );
-
- }
-
- // setup scene hierarchy
-
- for ( var i = 0, l = model.parents.length; i < l; i ++ ) {
-
- var parent = model.parents[ i ];
-
- if ( parent === - 1 ) continue;
-
- model.nodes[ parent ]._object.add( model.nodes[ i ]._object );
-
-
- }
-
- return model.nodes[ 0 ]._object;
-
- }
-
-};
diff --git a/spaces/beihai/GFPGAN-V1.3-whole-image/.history/app_20220327195551.py b/spaces/beihai/GFPGAN-V1.3-whole-image/.history/app_20220327195551.py
deleted file mode 100644
index 09dff2dbf40f5889c786c1e27d513f55b2aca90a..0000000000000000000000000000000000000000
--- a/spaces/beihai/GFPGAN-V1.3-whole-image/.history/app_20220327195551.py
+++ /dev/null
@@ -1,68 +0,0 @@
-import os
-os.system("pip install gfpgan")
-
-#os.system("pip freeze")
-#os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v0.2.0/GFPGANCleanv1-NoCE-C2.pth -P .")
-import random
-import gradio as gr
-from PIL import Image
-import torch
-# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/a/ab/Abraham_Lincoln_O-77_matte_collodion_print.jpg/1024px-Abraham_Lincoln_O-77_matte_collodion_print.jpg', 'lincoln.jpg')
-# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/5/50/Albert_Einstein_%28Nobel%29.png', 'einstein.png')
-# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/9/9d/Thomas_Edison2.jpg/1024px-Thomas_Edison2.jpg', 'edison.jpg')
-# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/a/a9/Henry_Ford_1888.jpg/1024px-Henry_Ford_1888.jpg', 'Henry.jpg')
-# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/0/06/Frida_Kahlo%2C_by_Guillermo_Kahlo.jpg/800px-Frida_Kahlo%2C_by_Guillermo_Kahlo.jpg', 'Frida.jpg')
-
-
-import cv2
-import glob
-import numpy as np
-from basicsr.utils import imwrite
-from gfpgan import GFPGANer
-
-bg_upsampler = None
-
-
-
-# set up GFPGAN restorer
-restorer = GFPGANer(
- model_path='experiments/pretrained_models/GFPGANv1.3.pth',
- upscale=2,
- arch='clean',
- channel_multiplier=2,
- bg_upsampler=bg_upsampler)
-
-
-def inference(img):
- input_img = cv2.imread(img, cv2.IMREAD_COLOR)
- cropped_faces, restored_faces, restored_img = restorer.enhance(
- input_img, has_aligned=False, only_center_face=False, paste_back=True)
-
- #return Image.fromarray(restored_faces[0][:,:,::-1])
- return Image.fromarray(restored_img[:, :, ::-1])
-
-title = "让美好回忆更清晰"
-
-
-description = "上传老照片,点击Submit,稍等片刻,右侧Output将照片另存为即可。"
-article = "
"
-
-gr.Interface(
- inference,
- [gr.inputs.Image(type="filepath", label="Input")],
- gr.outputs.Image(type="pil", label="Output"),
- title=title,
- description=description,
- article=article,
- examples=[
- ['lincoln.jpg'],
- ['einstein.png'],
- ['edison.jpg'],
- ['Henry.jpg'],
- ['Frida.jpg']
- ]
- ).launch(enable_queue=True,cache_examples=True,share=True)
-
-
diff --git a/spaces/beihai/GFPGAN-V1.3-whole-image/setup.py b/spaces/beihai/GFPGAN-V1.3-whole-image/setup.py
deleted file mode 100644
index 474e9188aa2dc5c19614921760ce4ad99bd19c13..0000000000000000000000000000000000000000
--- a/spaces/beihai/GFPGAN-V1.3-whole-image/setup.py
+++ /dev/null
@@ -1,107 +0,0 @@
-#!/usr/bin/env python
-
-from setuptools import find_packages, setup
-
-import os
-import subprocess
-import time
-
-version_file = 'gfpgan/version.py'
-
-
-def readme():
- with open('README.md', encoding='utf-8') as f:
- content = f.read()
- return content
-
-
-def get_git_hash():
-
- def _minimal_ext_cmd(cmd):
- # construct minimal environment
- env = {}
- for k in ['SYSTEMROOT', 'PATH', 'HOME']:
- v = os.environ.get(k)
- if v is not None:
- env[k] = v
- # LANGUAGE is used on win32
- env['LANGUAGE'] = 'C'
- env['LANG'] = 'C'
- env['LC_ALL'] = 'C'
- out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
- return out
-
- try:
- out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
- sha = out.strip().decode('ascii')
- except OSError:
- sha = 'unknown'
-
- return sha
-
-
-def get_hash():
- if os.path.exists('.git'):
- sha = get_git_hash()[:7]
- else:
- sha = 'unknown'
-
- return sha
-
-
-def write_version_py():
- content = """# GENERATED VERSION FILE
-# TIME: {}
-__version__ = '{}'
-__gitsha__ = '{}'
-version_info = ({})
-"""
- sha = get_hash()
- with open('VERSION', 'r') as f:
- SHORT_VERSION = f.read().strip()
- VERSION_INFO = ', '.join([x if x.isdigit() else f'"{x}"' for x in SHORT_VERSION.split('.')])
-
- version_file_str = content.format(time.asctime(), SHORT_VERSION, sha, VERSION_INFO)
- with open(version_file, 'w') as f:
- f.write(version_file_str)
-
-
-def get_version():
- with open(version_file, 'r') as f:
- exec(compile(f.read(), version_file, 'exec'))
- return locals()['__version__']
-
-
-def get_requirements(filename='requirements.txt'):
- here = os.path.dirname(os.path.realpath(__file__))
- with open(os.path.join(here, filename), 'r') as f:
- requires = [line.replace('\n', '') for line in f.readlines()]
- return requires
-
-
-if __name__ == '__main__':
- write_version_py()
- setup(
- name='gfpgan',
- version=get_version(),
- description='GFPGAN aims at developing Practical Algorithms for Real-world Face Restoration',
- long_description=readme(),
- long_description_content_type='text/markdown',
- author='Xintao Wang',
- author_email='xintao.wang@outlook.com',
- keywords='computer vision, pytorch, image restoration, super-resolution, face restoration, gan, gfpgan',
- url='https://github.com/TencentARC/GFPGAN',
- include_package_data=True,
- packages=find_packages(exclude=('options', 'datasets', 'experiments', 'results', 'tb_logger', 'wandb')),
- classifiers=[
- 'Development Status :: 4 - Beta',
- 'License :: OSI Approved :: Apache Software License',
- 'Operating System :: OS Independent',
- 'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.7',
- 'Programming Language :: Python :: 3.8',
- ],
- license='Apache License Version 2.0',
- setup_requires=['cython', 'numpy'],
- install_requires=get_requirements(),
- zip_safe=False)
diff --git a/spaces/bertin-project/bertin/README.md b/spaces/bertin-project/bertin/README.md
deleted file mode 100644
index bd3e4cc5b10e65379dbbc69896aa365049ddc6b5..0000000000000000000000000000000000000000
--- a/spaces/bertin-project/bertin/README.md
+++ /dev/null
@@ -1,33 +0,0 @@
----
-title: BERTIN
-emoji: 🔥
-colorFrom: yellow
-colorTo: red
-sdk: streamlit
-app_file: app.py
-pinned: false
----
-
-# Configuration
-
-`title`: _string_
-Display title for the Space
-
-`emoji`: _string_
-Space emoji (emoji-only character allowed)
-
-`colorFrom`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`colorTo`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`sdk`: _string_
-Can be either `gradio` or `streamlit`
-
-`app_file`: _string_
-Path to your main application file (which contains either `gradio` or `streamlit` Python code).
-Path is relative to the root of the repository.
-
-`pinned`: _boolean_
-Whether the Space stays on top of your list.
diff --git a/spaces/bguberfain/Detic/tools/convert-thirdparty-pretrained-model-to-d2.py b/spaces/bguberfain/Detic/tools/convert-thirdparty-pretrained-model-to-d2.py
deleted file mode 100644
index ec042b8ce48d193b40fd1e6311b2cc4b0c4e4086..0000000000000000000000000000000000000000
--- a/spaces/bguberfain/Detic/tools/convert-thirdparty-pretrained-model-to-d2.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-import argparse
-import pickle
-import torch
-
-"""
-Usage:
-
-cd DETIC_ROOT/models/
-wget https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/resnet50_miil_21k.pth
-python ../tools/convert-thirdparty-pretrained-model-to-d2.py --path resnet50_miil_21k.pth
-
-wget https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth
-python ../tools/convert-thirdparty-pretrained-model-to-d2.py --path swin_base_patch4_window7_224_22k.pth
-
-"""
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument('--path', default='')
- args = parser.parse_args()
-
- print('Loading', args.path)
- model = torch.load(args.path, map_location="cpu")
- # import pdb; pdb.set_trace()
- if 'model' in model:
- model = model['model']
- if 'state_dict' in model:
- model = model['state_dict']
- ret = {
- "model": model,
- "__author__": "third_party",
- "matching_heuristics": True
- }
- out_path = args.path.replace('.pth', '.pkl')
- print('Saving to', out_path)
- pickle.dump(ret, open(out_path, "wb"))
diff --git a/spaces/bino-ocle/audio-intelligence-dash/app/css_components/file.css b/spaces/bino-ocle/audio-intelligence-dash/app/css_components/file.css
deleted file mode 100644
index 93058e03a2f4dd7cdad73bb9051474b4be985405..0000000000000000000000000000000000000000
--- a/spaces/bino-ocle/audio-intelligence-dash/app/css_components/file.css
+++ /dev/null
@@ -1,81 +0,0 @@
-body {
- font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Raleway, Helvetica,
- Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol";
-}
-
-.logo {
- width: 180px;
-}
-
-.title {
- font-weight: 600;
- text-align: left;
- color: black;
- font-size: 18px;
-}
-
-.alert,
-#component-2,
-#component-3 {
- padding: 24px;
- color: black;
- background-color: #f4f8fb;
- border: 1px solid #d6dce7;
- border-radius: 8px;
- box-shadow: 0px 6px 15px rgb(0 0 0 / 2%), 0px 2px 5px rgb(0 0 0 / 4%);
-}
-
-ol {
- list-style: disc;
-}
-
-.alert__info {
- background-color: #f4f8fb;
- color: #323552;
-}
-
-.alert__warning {
- background-color: #fffae5;
- color: #917115;
- border: 1px solid #e4cf2b;
-}
-
-#pw {
- -webkit-text-security: disc;
-}
-
-/* unvisited link */
-a:link {
- color: #52DFDF;
-}
-
-/* visited link */
-a:visited {
- color: #52DFDF;
-}
-
-/* mouse over link */
-a:hover {
- color: #52DFDF;
-}
-
-/* selected link */
-a:active {
- color: #52DFDF;
-}
-
-li {
- margin-left: 1em;
-}
-
-.apikey {
-}
-
-.entity-list {
- color: #52DFDF;
- font-size: 16px
-}
-
-.entity-elt {
- color: black
-}
\ No newline at end of file
diff --git a/spaces/bioriAsaeru/text-to-voice/Big Fish Game Universal Crack How to Solve Common Problems and Errors.md b/spaces/bioriAsaeru/text-to-voice/Big Fish Game Universal Crack How to Solve Common Problems and Errors.md
deleted file mode 100644
index 27fb31da7b2fc5ab6dae666ee2d5bd4a9e143b5e..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/Big Fish Game Universal Crack How to Solve Common Problems and Errors.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
The boy was brave and very beautiful. His tribespeople called him the Tenas Tyee7 and they loved him. Of all his wealth of fish and furs, of game and hykwa8 he gave to the boys who had none; he hunted food for the old people; he tanned skins and furs for those whose feet were feeble, whose eyes were fading, whose blood ran thin with age.
NBCUNIVERSAL TELEVISION AND STREAMING BOLSTERS ITS INVESTMENT IN CABLE PORTFOLIO WITH 33 NEW UNSCRIPTED SERIES, OVER 30 ADDICTIVE RETURNING HITS, MUST-WATCH LIVE EVENTS & A CONTINUED FOCUS ON SCRIPTEDNEW BRAVO SHOWS INCLUDE: "Life is a Ballroom (WT)," "Love Without Borders (WT)," "Love Match Atlanta (WT)," and Franchise Spinoffs "Below Deck Adventure," "Summer House Winter Charm" and "Kandi OLG Project (WT)" NEW E! SHOWS INCLUDE: "Clash of the Cover Bands (WT)" from Executive Producer Jimmy Fallon, "Jason Biggs Cash at Your Door" with Actor Jason Biggs, "We Got Love Iman & Teyana," "Showbiz Moms & Dads," and "Relatively Famous: Ranch Rules (WT)," Plus Laverne Cox Joins as New Signature E! Talent, and the "People's Choice Awards" Sets Date for Dec. 7NEW OXYGEN SHOWS AND SPECIALS INCLUDE: "911 Crisis Center," "Alaska Man Murders (WT)," Four All-New "Snapped Notorious" Specials, "Family Massacre," "New York Homicide," "The Real Murders of Atlanta," "Relationship Killer with Faith Jenkins (WT)," "The Toolbox Killer" and MoreNEW SYFY ORIGINALS INCLUDE: "Chucky" from Executive Producer Don Mancini (also airing on USA), "Day of the Dead," "Astrid & Lilly Save the World," "Bring It On: Halloween (WT)" and "Slumber Party Massacre" NEW USA NETWORK ORIGINALS INCLUDE: "Nash Bridges" Movie with Original Cast Members Don Johnson and Cheech Marin, "Chucky" from Executive Producer Don Mancini (also airing on SYFY), "America's Big Deal" with Inventor and Entrepreneur Joy Mangano and "Mud, Sweat & Beards (WT)" NEW YORK - May 13, 2021 - NBCUniversal Television and Streaming is solidifying its commitment to its cable portfolio following a year where each of its cable brands - Bravo, E!, Oxygen, SYFY, Universal Kids and USA Network - remained leaders in their respective areas of focus, including Bravo and USA once again securing positions among the top five ad-supported cable entertainment networks in the key 18-49 demo. NBCUniversal unveils a wide-ranging slate for its cable portfolio, which will collectively add 33 new unscripted shows or specials, bring back over 30 fan-favorite hits with more pick-ups to come, launch five scripted series and air three original movies in addition to multiple live events. "A key driver of success at NBCUniversal's cable networks is our ability to tap into the scope and scale of our collective portfolio while leveraging each brand's distinct audience to build unrivaled fan destinations across our networks and on Peacock," said Frances Berwick, Chairman, Entertainment Networks, NBCUniversal Television and Streaming. "Our cable networks represent a vital part of our business and we will continue to invest in our brands and IP in innovative ways, whether that be through mixing live sports and big event series on USA, introducing a new host to E!'s flagship Red Carpet, or bringing our addictive Bravo shows to life with fan experiences, our networks remain the home of culture-defining television." "With more than 40 new series and specials across the cable networks, our content team has been firing on all cylinders to ensure our brands are at the center of destination viewing," continued Susan Rovner, Chairman, Entertainment Content, NBCUniversal Television and Streaming. "Our high-profile competition shows, spinoffs of fan favorite franchises and much-anticipated scripted originals super-serve existing fans while also enticing new ones." NBCUniversal's cable portfolio is one of the strongest in the marketplace. Not only are Bravo and USA maintaining their positions as top-five ad supported cable entertainment networks in the key 18-49 demo, but both cablers are pacing for their best years ever in both on-demand views and livestreams. Bravo has the most series in the top 20 reality programs on cable across linear, social and VOD year-to-date, and USA's "WWE Monday Night Raw" ranks as the #1 cable entertainment program in Live viewership. Additionally, within their respective audience groups, NBCU's other cable networks - E!, Oxygen, SYFY and Universal Kids - dominated in 2020 and are on track to deliver in 2021. E! ended the year as the #1 most social cable reality network with a record-breaking 706M social engagements across platforms. Oxygen had its fourth consecutive year of true crime growth and viewers spent more time watching than ever before, ranking as the #1 cable network in audience engagement among all key demos and outpacing its top competitor by double digits. With over 350 hours of original programming for 2021, Oxygen is poised to continue this trend. SYFY is setting new records in on-demand views and livestreams, posting its best 1Q ever and pacing for its best year ever in both metrics. The network is also home to the most-watched new cable drama in the last year with breakout hit "Resident Alien." Universal Kids was the fastest growing kids' network of 2020 with double-digit year-over-year growth in their key K2-11 demo and among total views. On digital, the Universal Kids' YouTube page secured 170M organic views in 2020, up from 16M two years ago, and last quarter the page hit a major milestone as it surpassed 1M subscribers. Below please find full slate breakdowns for each network. For photos and additional show information, go to NETWORK SCHEDULE BREAKDOWNS: BRAVO As a top five cable entertainment brand with the #1 most loyal fanbase, Bravo is a premiere lifestyle and entertainment destination with sought-after programming that drives cultural conversation. Bravo will fortify its unscripted slate with new "can't miss" reality series, spinoffs of fan-favorite hits and the return of the network's top franchises. All-new spinoffs include "Below Deck Adventure," which follows yachties on an adventure-filled experience with charter guests; "Summer House Winter Charm" in which fan-favorites from the beloved "Summer House" and "Southern Charm" franchises will leave their warmer climates and come together for a winter adventure in Stowe, VT;and more. The network will also bring back top franchises "Million Dollar Listing Los Angeles," "Vanderpump Rules," all-new seasons of multiple "Real Housewives" series, among many others. New programming coming to Bravo includes: "Below Deck Adventure" - Premieres Early 2022 This new series takes wealthy thrill-seekers on the mega-yacht trip of a lifetime to experience thrilling YOLO adventures and daredevil activities all against some of the world's most beautiful backdrops. Season 1 will be set in the glacial fjords of Norway where charter guests will dog sled, heli ski and cold-water plunge their way through the day, dine on freshly caught seafood straight from the Scandinavian waters in the evening, and still get all steamed up in the hot tub by night. The exacting expectations of high-end luxury coupled with the physical demands of cold-weather adventures will push the crew to new heights and unparalleled pressures that makes this new series one-of-a-kind. Produced by Shed Media with Lisa Shannon and Dan Peirson serving as executive producers. Mark Cronin is also set to executive produce, along with Grace Lee serving as showrunner and executive producer. "Kandi OLG Project (WT)" - Premieres Late 2021In this new occu-series, we'll watch as Kandi, Todd and the Old Lady Gang (comprised of Mama Joyce, and Aunts Nora & Bertha) continue to build their restaurant empire while keeping their over-the-top and opinionated staff in line and making their vision come to fruition. The new series follows the staff in and out of the restaurant, witnessing how workplace slights bleed into their social lives. Everyone at OLG has a passion - be it for dancing, comedy or just to follow in Kandi's mogul footsteps, and these dreams can get in the way of running food and ensuring customers don't leave those dreaded one-star Yelp reviews. And the fact that some are single and constantly flirting with the cute clientele - and each other - doesn't exactly help things either. It's a monumental task to face, but if anyone can do it, it's Kandi! Produced by Truly Original with Steven Weinstock, Glenda Hersh, Kandi Burruss and Todd Tucker serving as executive producers. "Life is a Ballroom (WT)"- Premieres Late 2021Professional Amateur dancing is the fastest growing division of ballroom dance in the world. In this sport, known as Pro-Am, amateur dancers pay professional dance partners to compete with them in competitions around the country each week. From the producers of "90 Day Fiancé," this access driven docuseries will follow and intercut the stories of the most fascinating, talented and obsessed ballroom amateur dancers in the country. Produced by Sharp Entertainment with Matt Sharp and Dan Adler serving as executive producers. "Love Match Atlanta (WT)" - Premieres Early 2022 This all-new series explores Atlanta's most highly regarded Black matchmaking companies as they utilize distinct methodology, charm and skill to find a match for some of the city's most eligible and demanding singles. Celebrating the business of Black love, the series follows these matchmakers and their teams as they work to outmaneuver their competitors, matching high profile singles who are on the hunt for the ultimate relationship and willing to pay top dollar for it. Produced by Matador Content, a Boat Rocker Company, with Todd Lubin and Jay Peterson serving as executive producers. "Love Without Borders (WT)" - Premieres Mid 2022Singles dream of escaping their everyday lives to find their soulmate by risking everything for a chance at love with a stranger, in a strange land. Participants will leave their jobs, homes, possessions and families behind, but they don't know where in the world they're going or who they'll meet, all in the name of love! Produced by Kinetic Content with Chris Coelen, Eric Detwiler, Karrie Wolfe and Katie Griffin serving as executive producers. "Summer House Winter Charm" - Premieres this Fall When the temperature drops, the drama heats up! This all-new series follows some of Bravo's favorite "Summer House" and "Southern Charm" cast members and their friends during a two-week vacation at a ski house in Vermont. Produced by Truly Original with Steven Weinstock, Glenda Hersh, Lauren Eskelin, Jamie Jakimo, Maggie Langtry and Trish Gold serving as executive producers. Shows premiering on Bravo this summer include "Below Deck Mediterranean," "Family Karma," "The Real Housewives of Beverly Hills," "The Real Housewives of Potomac," "Shahs of Sunset" and the all-new "Top Chef Amateurs" and "Million Dollar Listing: Ryan's Renovation." E! As the #1 most social cable reality network, E! will continue to be a leader in pop culture programming with the return of its highest-rated new series, "The Bradshaw Bunch," which featuresfour-time NFL Super Bowl champion quarterback Terry Bradshaw and his family; the return of the only awards show for the people, by the people, the "People's Choice Awards" on Dec. 7;and more.The network also picks up five new unscripted shows, including a music showdown competition series "Clash of the Cover Bands (WT)," which hails from executive producer Jimmy Fallon; "Jason Biggs Cash at Your Door" with actor and comedian Jason Biggs; and more.E! continues to cement its position as a leader in the pop culture zeitgeist with the recent announcement of actress and LGBTQ+ advocate Laverne Cox joining the network as the new host of E!'s signature red carpet franchise, "Live from E!," beginning in 2022. Additionally, Cox will host a series of celebrity-based interview specials spotlighting trendsetters, tastemakers and those making an impact in the Hollywood community throughout next year. New programming coming to E! includes: "Clash of the Cover Bands (WT)" - Premieres this Fall Jimmy Fallon will executive produce "Clash of the Cover Bands (WT)," a new music competition series on E!. In each episode, two bands of similar musical genre (e.g. Pop Divas, Boy Bands, Heavy Metal etc.) go head-to-head to see which band delivers the most impressive cover performance for a chance to win a cash prize and bragging rights. "Clash of the Cover Bands (WT)" is produced by Universal Television Alternative Studio, a division of Universal Studio Group, and Electric Hotdog with Jimmy Fallon and Jim Juvonen serving as executive producers. "Jason Biggs Cash at Your Door" - Premieres this Fall From the producers of "Cash Cab" comes this new game show, hosted by Jason Biggs, that comes to you, in your very own home, when you least expect it. Because who doesn't want to win cash without leaving the house? Produced by Lion Television with Jason Biggs, Allison Corn, Tony Tackaberry, Stan Hsue and Adam Dolgins serving as executive producers, with the format by Ami Amir. The format was created by Matar and is distributed by All3Media. "Relatively Famous: Ranch Rules (WT)" - Premieres Early 2022Eight celebrity offspring come together to live and work as ranch hands for four weeks in Steamboat Springs, Colorado, hoping to prove to themselves, their parents, and the world that they are more than their last names. Each episode throws the cast into fish-out-of-water situations and hilarious misadventures as they work to restore and reopen Saddleback Ranch to the public after a year of shutdown and loss due to the pandemic. Produced by Fremantle with Kevin Lee, Justin Berfield, Jason Felts, Joe Simpson and Angela Rae Berg serving as executive producers. "Showbiz Moms & Dads" - Premieres Early 2022From the Emmy-winning producers of "RuPaul's Drag Race" and "Million Dollar Listing," Bravo's classic series "Showbiz Moms & Dads" is coming to E! with all-new bigger-than-life parents of budding stars. This time around, these talented kids are not just dancers, actors, models and musicians... but are social media influencers with new content platforms with millions of followers. And stage parents are more competitive than ever before! Produced by World of Wonder with Fenton Bailey, Randy Barbato and Tom Campbell serving as executive producers. "We Got Love Iman & Teyana" - Premieres this FallThis refreshingly authentic and completely unfiltered new docu-series follows Teyana Taylor and Iman Shumpert as they take the world by storm, all while juggling music, fashion, business and family. With their tight entourage of family and friends, Teyana and Iman are ready to continue building their empire, despite the whirlwind chaos and drama that goes along with it. Produced by STX Unscripted with Jason Goldberg, Teyana Taylor, Iman Shumpert, Shanta Conic, Michelle Kongkasuwan and Ailee O'Neil serving as executive producers. Shows premiering on E! this summer include "Botched" and the all-new "Reunion Road Trip." The "E! News" brand is the leading multi-platform publisher delivering breaking entertainment news and pop culture coverage 24/7 across linear, digital and social media. The brand's programming slate includes "Daily Pop" and "Nightly Pop" on linear; one of the top entertainment news websites with EOnline.com and a growing presence on YouTube; "E! News' The Rundown" on Snapchat; and an impressive social media presence across Facebook, Twitter and Instagram, with the latter boasting over 16 million Instagram follows on the @enews handle. OXYGEN Last year in a highly competitive market, Oxygen maintained four consecutive years of true crime growth and was the fastest growing top 30 cable entertainment network among its key F25-54 demo. With multiple returning shows, including the 30th season of the flagship series "Snapped;" the bone-chilling spinoff "Snapped: Behind Bars;" Dick Wolf's "Cold Justice" and more; plus 17 all-new series or specials, including "911 Crisis Center," which gives viewers a behind-the-scenes look at the fast-paced, high-stakes world of a dynamic 911 call center outside of Cleveland and four "Snapped Notorious" specials, the brand will continue to be a leading multiplatform destination for the genre. New programming coming to Oxygen includes: "911 Crisis Center" - Premieres this Fall This one-of-a-kind documentary series brings viewers behind-the-scenes of the fast-paced, high-stakes world of a dynamic 911 call center outside of Cleveland. This is an up-close and personal look at an amazing team of 911 dispatchers as they take on a never-ending bombardment of panic-stricken callers, and save lives. These dedicated professionals really are the unsung heroes of law enforcement. Produced by Shed Media with Dan Peirson, Lisa Shannon and Adam Kassen serving as executive producers. "Alaska Man Murders (WT)" - Premieres this Fall Investigating homicide cases is hard. Investigating homicide cases in Alaska is even harder. "Alaska Man Murders (WT)" explores the darkness that lurks within America's Last Frontier. Isolation, extreme weather, challenging terrain and other unique factors make for cases that can be incredibly difficult to crack. These are the stories of investigators who succeeded despite the odds. Produced by RIVR Media with Dee Haslam, Lori Styer and Myles Reiff serving as executive producers. "Family Massacre" - Premieres Late 2021"Family Massacre" is a gripping and powerful exploration of some of the most ruthless murders ever committed. This series follows the true and gruesome tales of the unthinkable: multiple members of the same family slain in cold blood. In each episode, we hear from friends and surviving relatives, those people closest to the family that was massacred, while also detailing the work of the dedicated investigators and prosecutors tasked with finding their killer and bringing them to justice. Through first-hand accounts, archival footage and cinematic recreations, we see the twists and turns of the investigation unfold and delve deep into who could have committed such a shocking crime and just how they were caught and made to answer for it. Produced by Renegade 83 with David Garfinkle, Jay Renfroe and Chris Rowe serving as Executive Producers. "Final Moments" - Premieres Early 2022This all-new series delves into heart-wrenching crimes, revealing the emotional truth of the victims leading up to their death. Each episode tracks a new investigation and features real footage, pictures and social media posts that shed light on the life of the victim and the crime. What were they doing? What might they have been thinking and feeling? What was their Final Moment? Produced by Dick Wolf's Wolf Entertainment and ITV America's Good Caper Content with Dick Wolf, Tom Thayer, Kathryn Vaughan, Jordana Hochman and Tim McConville serving as executive producers. "The Girl Who Lived (WT)" - Two-Hour Special Premieres this Fall From Executive Producer Elizabeth Smart, "The Girl Who Lived (WT)" tells the story of Kara Robinson: abducted in broad daylight from a friend's front yard, the 15-year-old is held captive and sexually assaulted for 18 harrowing hours. When she bravely engineers her own escape and leads authorities to her assailant's apartment, they uncover a series of crimes far darker and more deadly than anyone ever imagined. Produced by Marwar Junction Productions and Entertainment One with Elizabeth Smart, Kara Robinson Chamberlain, Allison Berkley, Joseph Freed, Tara Long, Geno McDermott and Carolyn Blackstone-Day serving as executive producers. "New York Homicide" - Premieres Early 2022This all-new series dives deep into some of the worst murder cases in the city's recent history. Each hour-long, self-contained episode lets viewers bear witness to the tragedy, the trauma, and the triumph of New Yorkers in the face of Gotham City's worst crimes. With exclusive access to former and current detectives, investigators guide viewers through their most complex cases, while the victim's loved ones recount the ongoing emotional impact. Archival material and dramatic re-creations immerse the audience in the twists and turns of these unforgettable capers. NEW YORK HOMICIDE. Real cases. Real victims. Real heroes. Real New Yorkers. Produced by ITV America's Good Caper Content with Kathryn Vaughan, Jordana Hochman, Brain DeCubellis and Diane and Michele Warshay serving as executive producers. "The Real Murders of Atlanta" - Premieres Early 2022"The Real Murders of Atlanta" portrays the shocking, sinful and salacious cases of homicide that highlight the boundaries between gentrified Southern dynasties, hip hop hustlers and the flashy nouveau riche of this metropolitan mecca of music, entertainment and tech. It's the dark side of the New South, where deadly battles for status and affluence emerge between those who are willing to kill for the good life and those willing to kill to keep it. Produced by 44 Blue Productions with Stephanie Drachkovitch, David Hale, Dan Snook and Robert Wise serving as Executive Producers. "Relationship Killer with Faith Jenkins (WT)" - Premieres Early 2022This true Crime series delves into jaw-droppingly evil stories of love that sours, and break-ups that turn downright murderous. Hosted by Divorce Court's presiding judge, Faith Jenkins, these twisted tales of relationships gone bad show what happens when breaking up means only one thing: someone has to die. Produced by Texas Crew Productions and Faith in Justice Productions with David Karabinas, Mary Bissell and Brad Bernstein serving as executive producers for Texas Crew, and Faith Jenkins serving as executive producer for Faith in Justice. "Snapped Notorious: The Cleveland Strangler" - Two-Hour Special Premieres this Fall For over two years, the Cleveland Strangler murdered eleven women and lived with their bodies decomposing inside his house. Five women managed to escape from his attacks and share details about the terror they experienced inside his house of horrors. Produced by Catalina Content with Jeff Collins, Deborah Allen and Russell Heldt serving as executive producers. "Snapped Notorious: Happy Face Killer" - Two-Hour Special Premieres this Fall The man known as the Happy Face Killer viciously strangled 8 women and dumped their bodies along the road. Crime novelist, M William Phelps, shares rare on-camera interviews and never-heard-before chilling recordings with the psychopathic killer. Produced by Catalina Content with Jeff Collins, Deborah Allen, Russell Heldt and M. William Phelps serving as executive producers. "Snapped Notorious: River Valley Killer" - Two-Hour Special Premieres this Fall From 1993 to 2000, the quiet community of Fort Smith, Arkansas was terrorized by a twisted serial killer, a deranged necrophiliac who targeted elderly and vulnerable women. He became known as The River Valley Killer. Produced by Catalina Content with Jeff Collins, Deborah Allen and Russell Heldt serving as executive producers. "The Toolbox Killer" - Two-Hour Special Premieres this Fall In his own words, America's most sadistic serial killer describes his 1979 killing spree in this 2-hour documentary. Known as "The Toolbox Killer," Lawrence Bittaker was silent about his crimes for 40 years, until he met investigator Laura Brand. Over the course of five years, Brand recorded her many conversations with Bittaker as he spoke from death row about his methods and motives, providing unique insights into the mind of a criminal sadist. Produced by Mike Mathis Productions with Mike Mathis, Matthew Testa and Laura Brand serving as executive producers. "Twisted Killers" - Premieres Early 2022The most baffling cases. The most bizarre killers. What drives acts of evil? "Twisted Killers" tells the shocking stories of some of America's darkest, most unusual murderers. Along the way, a trio of criminal experts, including former NYC DA Beth Karas, retired LAPD Homicide Detective Tracey Benjamin and Forensic Psychologist Kate Termini, provide insight and expertise on how these twisted killers were brought to justice. Produced by ITN Productions with Bruce Kennedy and Ian Russell serving as executive producers. "Untitled Carolyn Warmus Project (WT)" - Special Event Series Premieres Early 2022This true crime limited series exposes new theories about the "Fatal Attraction Killer," a case named for its similarity to the blockbuster film centered around a woman who becomes obsessed with her married lover. After spending 27 years in prison for the crime, Carolyn Warmus is speaking for the first time since her release. The three-hour series aims to untangle the twisted web of sex, lies and deceit that defined the infamous case. Produced by Entertainment One (eOne) and Bee Holder Productions with Tara Long, Geno McDermott, Ben Megargel and Lorri Leighton serving as executive producers for Entertainment One (eOne), and Steve Lee Jones serving as executive producer for Bee Holder Productions. Shows premiering on Oxygen this summer include "Killer Couples," the all-new series "Charmed to Death" and the all-new two-hour specials "Snapped Notorious: The Girl in the Box" and "Murdered and Missing in Montana." SYFY As a fan-focused network redefining genre programming, SYFY continues to rank among the top 10 highest-reaching cable entertainment networks across all key demos and is home to the Alan Tudyk-led dramedy "Resident Alien," which was the most-watched new cable drama in the last year. The serieswill return for its sophomore run in 2022, joining the previously announced cross-network drama "Chucky,"which will air on USA and SYFY and features a vintage Chucky doll wreaking havoc on an idyllic American town. These titles join the all-new scripted pickups "Day of the Dead" and "Astrid & Lilly Save the World." The sci-fi brand will also put a horror spin on the popular "Bring It On" film franchise with an all-new original movie, "Bring It On: Halloween (WT)," in partnership with Universal 1440 Entertainment, set for 2022, in addition to the slasher film reboot "Slumber Party Massacre," which will air as an original movie this Fall. New programming coming to SYFY includes: "Astrid & Lilly Save the World" - Premieres in 2022When outcast high school BFFs Astrid & Lilly accidentally crack open a portal to a terrifying monster dimension, they have to figure out how to save the world, if they can survive high school. Produced by Blue Ice Pictures with Lance Samuels, Daniel Iron and Armand Leo serving as executive producers. Noelle Stehman and Betsy Van Stone wrote the pilot and will also serve as executive producers. "Bring It On: Halloween (WT)" - All-New Original Movie Premieres in 2022 Held down by restrictive rules, an embattled cheerleading squad seeks the freedom of a creepy, closed school gym to practice for regionals, but when members of the squad start to disappear, the cheerleaders must unmask their assailant to save themselves. Produced by Universal 1440 Entertainment, "Bring It On: Halloween (WT)" will be released on non-theatrical platforms in 2022. Universal 1440 Entertainment is a production arm of Universal Filmed Entertainment Group (UFEG). "Chucky" - Premieres on SYFY and USA this Fall After a vintage Chucky doll turns up at a suburban yard sale, an idyllic American town is thrown into chaos as a series of horrifying murders begin to expose the town's hypocrisies and secrets. Meanwhile, the arrival of enemies - and allies - from Chucky's past threatens to expose the truth behind the killings, as well as the demon doll's untold origins, as a seemingly ordinary child who somehow became this notorious monster. Produced by UCP, the series will be executive produced by creator Don Mancini, David Kirschner, and Nick Antosca via his banner Eat the Cat, through his overall deal with the studio. Alex Hedlund and Harley Peyton will also serve as executive producers. Mancini, who penned the film franchise, will also write the adaptation, serve as showrunner and direct the first episode. "Day of the Dead" - Premieres this Fall "Day of the Dead" is the intense story of six strangers trying to survive the first 24 hours of an undead invasion. This ode to George A. Romero's famous flesh-eaters reminds us that sometimes all it takes to bring people together is a horde of hungry zombies trying to rip them apart. Produced by Cartel and HiTide Studios with Stan Spry, Jeff Holland and Drew Brown serving as executive producers for Cartel, and James Dudelson, Robert Dudelson, Jordan Kizwani and Matt Drake serving as executive producers for HiTide Studios. Jed Elinoff and Scott Thomas will serve as co-showrunners and executive producers and Steven Kostanski will also serve as executive producer. "Slumber Party Massacre" - All-New Original Movie Premieres this Fall A new contemporary twist-filled reimagining of the 1982 slasher cult classic just in time for Halloween. A slumber party turns into a bloodbath, as a psychotic serial killer wielding a power drill disrupts the fun. "Slumber Party Massacre" is produced in partnership with Shout! Studios with Brent Haynes, Bob Emmer, Garson Foos and Jordan Fields serving as executive producers. Danishka Esterhazy serves as director and worked off the screenplay by Suzanne Keilly. The film is also produced by Blue Ice Pictures. Premiering on SYFY this summer is "SurrealEstate." USA NETWORK Building off its more than 25-year streak as a top five cable network, USA is furthering its commitment to scripted programming this fall with the pickup of the highly coveted revival movie "Nash Bridges," which hails from Village Roadshow and features original cast members Don Johnson and Cheech Marin.This title joins the critically acclaimed drama "The Sinner" which recently announced Frances Fisher ("Watchmen"), Michael Mosley ("Ozark") and Alice Kremelberg ("The Trial of the Chicago 7") will join Bill Pullman for its fourth installment, and cross-network drama "Chucky" (with SYFY)from executive producer Don Mancini. The cabler is bolstering its unscripted lineup with the return of tentpole series "Chrisley Knows Best," "Temptation Island" and more, plus the pickup of two all-new unscripted series, "America's Big Deal" and "Mud, Sweat & Beards" (WT)." The network will also continue its over three-decade partnership with the WWE by airing "WWE Monday Night Raw," the #1 cable entertainment program in Live viewership,and the recently renewed "WWE NXT," every Monday and Tuesday, respectively, for all 52 weeks of this year. Since its shift to Tuesdays, "NXT" is posting more than 30% growth and contributing to the network's investment in live event programming. USA will also gain the rights to highly sought-after sports coverage beginning in 2022. New programming coming to USA includes: "America's Big Deal" - Premieres this Fall This groundbreaking competition series invites inventors from across the nation to sell their products LIVE on-air and compete for the chance to strike a life-changing deal with a retail giant. The mastermind behind the series is America's most celebrated entrepreneur, Joy Mangano, who is making it her personal mission to lift up America's greatest inventors and small businesses to give them the same opportunity that launched her business dynasty... the chance to make the biggest deal of their life. Tapping into One Platform Commerce, contestants will sell their wares in real time through NBCUniversal Checkout, with live sales numbers determining who stays and who goes. Produced by DIGA Studios with Tony DiSanto, Nick Rigg, Tommy Coriale and Alison Holloway serving as executive producers. Joy Mangano also serves as an executive producer. "Chucky" - Premieres on USA and SYFY this Fall After a vintage Chucky doll turns up at a suburban yard sale, an idyllic American town is thrown into chaos as a series of horrifying murders begin to expose the town's hypocrisies and secrets. Meanwhile, the arrival of enemies - and allies - from Chucky's past threatens to expose the truth behind the killings, as well as the demon doll's untold origins, as a seemingly ordinary child who somehow became this notorious monster. Produced by UCP, the series will be executive produced by creator Don Mancini, David Kirschner, and Nick Antosca via his banner Eat the Cat, through his overall deal with the studio. Alex Hedlund and Harley Peyton will also serve as executive producers. Mancini, who penned the film franchise, will also write the adaptation, serve as showrunner and direct the first episode. "Mud, Sweat & Beards" (WT) - Premieres in 2022On each episode of this all-new series, Donny Dust and Ray Livingston tackle the earth's most remote locations, where they'll build a new primitive paradise using their unrivaled wit and wilderness ingenuity. These best friends and current world-class survivalists will work hand-in-hand as they do everything from building elaborate shelters to tracking down natural food sources, all while combating extreme weather, hunger, predators and wicked BO. Produced by Leftfield Pictures with Shawn Witt, Gretchen Palek, Ryan Pender, Zach Green and Andrew Schechter serving as executive producers. "Nash Bridges" - Special Revival Movie Premieres this Fall"Nash Bridges" returns as a 2-hour movie for USA Network with stars Don Johnson (Nash Bridges) and Cheech Marin (Joe Rodriguez) reprising their roles. The two-hour movie brings the duo back together as elite investigators for the San Francisco Police Department Special Investigations Unit. The film will be produced by Village Roadshow Television.
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/bioriAsaeru/text-to-voice/FNet Utilities Fat32 Format Tool V1.84.21l HOT.md b/spaces/bioriAsaeru/text-to-voice/FNet Utilities Fat32 Format Tool V1.84.21l HOT.md
deleted file mode 100644
index 67e7a760b5c6e1b937307a9d24565a42ea9184e1..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/FNet Utilities Fat32 Format Tool V1.84.21l HOT.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
- 3cee63e6c2
-
-
-
diff --git a/spaces/bioriAsaeru/text-to-voice/How to Choose the Optimal Siemens Motox Geared Motor for Your Needs.md b/spaces/bioriAsaeru/text-to-voice/How to Choose the Optimal Siemens Motox Geared Motor for Your Needs.md
deleted file mode 100644
index aa3e40cea2e0138106456a0cba32c8cbc172afca..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/How to Choose the Optimal Siemens Motox Geared Motor for Your Needs.md
+++ /dev/null
@@ -1,23 +0,0 @@
-
-
Our extensive range of Siemens geared motors ensures that you will find the optimal product for your needs. With the new SIMOGEAR you will benefit especially from its ability to deliver the highest level of flexibility due to our wide range of gear units, total adaptability and compact design. We also supply servo-geared motors for Motion Control applications.
-
The new SIMOGEAR Siemens geared motors deliver performance from 0.09 kW up to 55 kW. It can achieve a gear unit torque up to 8.000 Nm with helical, parallel shaft, helical bevel and worm geared gear units, additional types and sizes will follow. Due to accordance to the current measures, SIMOGEAR is compatible to many other suppliers of gear motors.
-
With MOTOX, Siemens provides the complete range of geared motors. Our portfolio comprises all commonly used types of gear units and are suitable for many drive applications. Together, the MOTOX family offers a whole world of geared motor options that definitely has the answer to your specific requirements.
-
The SIMOTICS S-1FG1 servo geared motors are compact geared motors. Compared to standard Siemens geared motors with induction machines, they have smaller dimensions, weigh less and have a higher dynamic response.
-
-
The range of types covers helical, parallel shaft, bevel and helical worm geared motors in the usual frame sizes and speed/torque classes. The SIMOTICS S-1FG1 servo geared motors use the same operating heads as the SIMOGEAR geared motors.
-
Flexible servo drive system for demanding applications. Siemens has expanded its drive portfolio for servo applications to include the SIMOTICS S-1FG1 servo geared motor that is optimally harmonized with the SINAMICS S120 inverter system.
-
Siemens is integrating its geared motors and motion control divisions more closely in the UK, and has promoted Julie Ferguson to be the new manager of its geared motors business with the aim of doubling the business within three years.
-
The Siemens Drive Technologies Division is supplementing its Motox geared motors with a new range of worm gears; they are particularly suitable for conveyor systems (Fig.). The single-stage worm geared motor of the S range is available in the three frame sizes S08, S18 and S28, in a torque range from 18 to 80 Nm and in a power range from 0.12 to 0.75 kW (4-pole). Installation can take the shaft-mounted, foot-mounted or flange-mounted form. The input shaft is available either solid or hollow. The use of high-grade materials allows high rated gear torques up to 80 Nm and high transmission ratios of up to 100 in a single stage. The worm shafts are case-hardened and ground, which makes running quieter. A strong radial force is achieved by high-grade roller bearings and greater distances between bearings. The tooth root security of the Cavex concave-profile teeth allows high load peaks. The gears are maintenance-free due to lubrication-for-life with synthetic oil, as well as high-grade bearings and seals.
-
Helical geared motors are the conventional solution for your drive application. Helical gear units are coaxial units where the Siemens gear unit output shaft is in-line with the Siemens motor shaft. A solid shaft is always used as output shaft.
-
Parallel shaft Siemens gear motors are the modern version of coaxial geared motors. As a result of their compact and short design, they take up less space than helical geared motors. Parallel shaft geared motors can either have a solid shaft - or alternatively a hollow shaft as so-called plug-on gear unit.
-
Helical worm geared motors are the favorably-priced solution for drives with angular geared motors. The efficiency is significantly better than that of pure worm geared motors due to the implementation as helical-worm geared motors.
-
Helical bevel Siemens gear motors are angular geared motors where the Siemens gear unit output shaft is rotated through 90° to the motor shaft. Helical bevel geared units can either have a solid or a hollow shaft.
-
The SIMOGEAR geared motor delivers performance from 0.09 kW up to 55 kW. It can achieve a gear unit torque up to 19 500 Nm with the helical, parallel shaft, bevel, helical worm, and worm geared motors. Due to the current measures, SIMOGEAR is compatible with many other geared motors suppliers.
-
The extensive range of geared motors ensures that you will find the optimal product for your needs. With the new SIMOGEAR, you will benefit especially from its ability to deliver the highest level of flexibility due to the wide range of gear units, total adaptability, and compact design.
-
The Simogear family includes geared motors in various designs such as helical, parallel shaft, bevel helical and worm gear units with outputs ranging from 0.09 kW to 200 kW and with output torques up to 50,000 Nm. By offering additional frame sizes, Siemens can now supply a complete portfolio of geared motor designs. With their high gear ratios in the two- and three-stage ranges, finer torque grading, high power density and outstanding efficiency, the new models are especially well-equipped to meet the requirements of modern conveyor systems. By virtue of its connection dimensions in conformity with market standards, the new motor range is completely compatible with existing installations.
-
The extensive range of servo geared motors includes a series designed especially for motion control applications. Among their distinguishing features are their excellent balance quality and compact design. Siemens proven 1FK7 and 1FT7 synchronous servomotors are also available as factory fitted motors with planetary gear units. The 1FK7 motors are additionally available with offset shaft and angle geared units in various specifications. For main motors they optionally offer switchgear units.
-
Siemens Mechanical Drives in South Africa has many examples of where it has supplied its units and couplings. On the geared motors side it can easily supply up to 1 000 units and more a month. This does not include the comprehensive service department that refurbishes both the motors and geared units. The Division employs 108 staff and 95 of these are located in the assembly, manufacturing and services departments.
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/docs/tutorials/README.md b/spaces/brjathu/HMR2.0/vendor/detectron2/docs/tutorials/README.md
deleted file mode 100644
index 1ca9c94d042ef838143a45490fe6b4556c19f3c9..0000000000000000000000000000000000000000
--- a/spaces/brjathu/HMR2.0/vendor/detectron2/docs/tutorials/README.md
+++ /dev/null
@@ -1,4 +0,0 @@
-# Read the docs:
-
-The latest documentation built from this directory is available at [detectron2.readthedocs.io](https://detectron2.readthedocs.io/).
-Documents in this directory are not meant to be read on github.
diff --git a/spaces/camenduru-com/terminal/README.md b/spaces/camenduru-com/terminal/README.md
deleted file mode 100644
index 1f846beaf2540732e3dd1123ad615e04dd853590..0000000000000000000000000000000000000000
--- a/spaces/camenduru-com/terminal/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: Terminal
-emoji: 💻
-colorFrom: pink
-colorTo: pink
-sdk: docker
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/utils/collect_env.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/utils/collect_env.py
deleted file mode 100644
index 807b6c7e6245d0a21221b1b8d29b841ec8251761..0000000000000000000000000000000000000000
--- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/utils/collect_env.py
+++ /dev/null
@@ -1,242 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import importlib
-import numpy as np
-import os
-import re
-import subprocess
-import sys
-from collections import defaultdict
-import PIL
-import torch
-import torchvision
-from tabulate import tabulate
-
-__all__ = ["collect_env_info"]
-
-
-def collect_torch_env():
- try:
- import torch.__config__
-
- return torch.__config__.show()
- except ImportError:
- # compatible with older versions of pytorch
- from torch.utils.collect_env import get_pretty_env_info
-
- return get_pretty_env_info()
-
-
-def get_env_module():
- var_name = "DETECTRON2_ENV_MODULE"
- return var_name, os.environ.get(var_name, "")
-
-
-def detect_compute_compatibility(CUDA_HOME, so_file):
- try:
- cuobjdump = os.path.join(CUDA_HOME, "bin", "cuobjdump")
- if os.path.isfile(cuobjdump):
- output = subprocess.check_output(
- "'{}' --list-elf '{}'".format(cuobjdump, so_file), shell=True
- )
- output = output.decode("utf-8").strip().split("\n")
- arch = []
- for line in output:
- line = re.findall(r"\.sm_([0-9]*)\.", line)[0]
- arch.append(".".join(line))
- arch = sorted(set(arch))
- return ", ".join(arch)
- else:
- return so_file + "; cannot find cuobjdump"
- except Exception:
- # unhandled failure
- return so_file
-
-
-def collect_env_info():
- has_gpu = torch.cuda.is_available() # true for both CUDA & ROCM
- torch_version = torch.__version__
-
- # NOTE that CUDA_HOME/ROCM_HOME could be None even when CUDA runtime libs are functional
- from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
-
- has_rocm = False
- if (getattr(torch.version, "hip", None) is not None) and (ROCM_HOME is not None):
- has_rocm = True
- has_cuda = has_gpu and (not has_rocm)
-
- data = []
- data.append(("sys.platform", sys.platform)) # check-template.yml depends on it
- data.append(("Python", sys.version.replace("\n", "")))
- data.append(("numpy", np.__version__))
-
- try:
- import detectron2 # noqa
-
- data.append(
- ("detectron2", detectron2.__version__ + " @" + os.path.dirname(detectron2.__file__))
- )
- except ImportError:
- data.append(("detectron2", "failed to import"))
- except AttributeError:
- data.append(("detectron2", "imported a wrong installation"))
-
- try:
- import detectron2._C as _C
- except ImportError as e:
- data.append(("detectron2._C", f"not built correctly: {e}"))
-
- # print system compilers when extension fails to build
- if sys.platform != "win32": # don't know what to do for windows
- try:
- # this is how torch/utils/cpp_extensions.py choose compiler
- cxx = os.environ.get("CXX", "c++")
- cxx = subprocess.check_output("'{}' --version".format(cxx), shell=True)
- cxx = cxx.decode("utf-8").strip().split("\n")[0]
- except subprocess.SubprocessError:
- cxx = "Not found"
- data.append(("Compiler ($CXX)", cxx))
-
- if has_cuda and CUDA_HOME is not None:
- try:
- nvcc = os.path.join(CUDA_HOME, "bin", "nvcc")
- nvcc = subprocess.check_output("'{}' -V".format(nvcc), shell=True)
- nvcc = nvcc.decode("utf-8").strip().split("\n")[-1]
- except subprocess.SubprocessError:
- nvcc = "Not found"
- data.append(("CUDA compiler", nvcc))
- if has_cuda and sys.platform != "win32":
- try:
- so_file = importlib.util.find_spec("detectron2._C").origin
- except (ImportError, AttributeError):
- pass
- else:
- data.append(
- ("detectron2 arch flags", detect_compute_compatibility(CUDA_HOME, so_file))
- )
- else:
- # print compilers that are used to build extension
- data.append(("Compiler", _C.get_compiler_version()))
- data.append(("CUDA compiler", _C.get_cuda_version())) # cuda or hip
- if has_cuda and getattr(_C, "has_cuda", lambda: True)():
- data.append(
- ("detectron2 arch flags", detect_compute_compatibility(CUDA_HOME, _C.__file__))
- )
-
- data.append(get_env_module())
- data.append(("PyTorch", torch_version + " @" + os.path.dirname(torch.__file__)))
- data.append(("PyTorch debug build", torch.version.debug))
-
- if not has_gpu:
- has_gpu_text = "No: torch.cuda.is_available() == False"
- else:
- has_gpu_text = "Yes"
- data.append(("GPU available", has_gpu_text))
- if has_gpu:
- devices = defaultdict(list)
- for k in range(torch.cuda.device_count()):
- cap = ".".join((str(x) for x in torch.cuda.get_device_capability(k)))
- name = torch.cuda.get_device_name(k) + f" (arch={cap})"
- devices[name].append(str(k))
- for name, devids in devices.items():
- data.append(("GPU " + ",".join(devids), name))
-
- if has_rocm:
- msg = " - invalid!" if not (ROCM_HOME and os.path.isdir(ROCM_HOME)) else ""
- data.append(("ROCM_HOME", str(ROCM_HOME) + msg))
- else:
- try:
- from torch.utils.collect_env import get_nvidia_driver_version, run as _run
-
- data.append(("Driver version", get_nvidia_driver_version(_run)))
- except Exception:
- pass
- msg = " - invalid!" if not (CUDA_HOME and os.path.isdir(CUDA_HOME)) else ""
- data.append(("CUDA_HOME", str(CUDA_HOME) + msg))
-
- cuda_arch_list = os.environ.get("TORCH_CUDA_ARCH_LIST", None)
- if cuda_arch_list:
- data.append(("TORCH_CUDA_ARCH_LIST", cuda_arch_list))
- data.append(("Pillow", PIL.__version__))
-
- try:
- data.append(
- (
- "torchvision",
- str(torchvision.__version__) + " @" + os.path.dirname(torchvision.__file__),
- )
- )
- if has_cuda:
- try:
- torchvision_C = importlib.util.find_spec("torchvision._C").origin
- msg = detect_compute_compatibility(CUDA_HOME, torchvision_C)
- data.append(("torchvision arch flags", msg))
- except (ImportError, AttributeError):
- data.append(("torchvision._C", "Not found"))
- except AttributeError:
- data.append(("torchvision", "unknown"))
-
- try:
- import fvcore
-
- data.append(("fvcore", fvcore.__version__))
- except (ImportError, AttributeError):
- pass
-
- try:
- import iopath
-
- data.append(("iopath", iopath.__version__))
- except (ImportError, AttributeError):
- pass
-
- try:
- import cv2
-
- data.append(("cv2", cv2.__version__))
- except (ImportError, AttributeError):
- data.append(("cv2", "Not found"))
- env_str = tabulate(data) + "\n"
- env_str += collect_torch_env()
- return env_str
-
-
-def test_nccl_ops():
- num_gpu = torch.cuda.device_count()
- if os.access("/tmp", os.W_OK):
- import torch.multiprocessing as mp
-
- dist_url = "file:///tmp/nccl_tmp_file"
- print("Testing NCCL connectivity ... this should not hang.")
- mp.spawn(_test_nccl_worker, nprocs=num_gpu, args=(num_gpu, dist_url), daemon=False)
- print("NCCL succeeded.")
-
-
-def _test_nccl_worker(rank, num_gpu, dist_url):
- import torch.distributed as dist
-
- dist.init_process_group(backend="NCCL", init_method=dist_url, rank=rank, world_size=num_gpu)
- dist.barrier(device_ids=[rank])
-
-
-if __name__ == "__main__":
- try:
- from detectron2.utils.collect_env import collect_env_info as f
-
- print(f())
- except ImportError:
- print(collect_env_info())
-
- if torch.cuda.is_available():
- num_gpu = torch.cuda.device_count()
- for k in range(num_gpu):
- device = f"cuda:{k}"
- try:
- x = torch.tensor([1, 2.0], dtype=torch.float32)
- x = x.to(device)
- except Exception as e:
- print(
- f"Unable to copy tensor to device={device}: {e}. "
- "Your CUDA environment is broken."
- )
- if num_gpu > 1:
- test_nccl_ops()
diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/tools/train_net.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/tools/train_net.py
deleted file mode 100644
index 8a6f29715da49f524604acc7bd38bda1bab99fd5..0000000000000000000000000000000000000000
--- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/tools/train_net.py
+++ /dev/null
@@ -1,163 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) Facebook, Inc. and its affiliates.
-"""
-A main training script.
-
-This scripts reads a given config file and runs the training or evaluation.
-It is an entry point that is made to train standard models in detectron2.
-
-In order to let one script support training of many models,
-this script contains logic that are specific to these built-in models and therefore
-may not be suitable for your own project.
-For example, your research project perhaps only needs a single "evaluator".
-
-Therefore, we recommend you to use detectron2 as an library and take
-this file as an example of how to use the library.
-You may want to write your own script with your datasets and other customizations.
-"""
-
-import logging
-import os
-from collections import OrderedDict
-
-import detectron2.utils.comm as comm
-from detectron2.checkpoint import DetectionCheckpointer
-from detectron2.config import get_cfg
-from detectron2.data import MetadataCatalog
-from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, hooks, launch
-from detectron2.evaluation import (
- CityscapesInstanceEvaluator,
- CityscapesSemSegEvaluator,
- COCOEvaluator,
- COCOPanopticEvaluator,
- DatasetEvaluators,
- LVISEvaluator,
- PascalVOCDetectionEvaluator,
- SemSegEvaluator,
- verify_results,
-)
-from detectron2.modeling import GeneralizedRCNNWithTTA
-
-
-def build_evaluator(cfg, dataset_name, output_folder=None):
- """
- Create evaluator(s) for a given dataset.
- This uses the special metadata "evaluator_type" associated with each builtin dataset.
- For your own dataset, you can simply create an evaluator manually in your
- script and do not have to worry about the hacky if-else logic here.
- """
- if output_folder is None:
- output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
- evaluator_list = []
- evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
- if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
- evaluator_list.append(
- SemSegEvaluator(
- dataset_name,
- distributed=True,
- output_dir=output_folder,
- )
- )
- if evaluator_type in ["coco", "coco_panoptic_seg"]:
- evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder))
- if evaluator_type == "coco_panoptic_seg":
- evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
- if evaluator_type == "cityscapes_instance":
- return CityscapesInstanceEvaluator(dataset_name)
- if evaluator_type == "cityscapes_sem_seg":
- return CityscapesSemSegEvaluator(dataset_name)
- elif evaluator_type == "pascal_voc":
- return PascalVOCDetectionEvaluator(dataset_name)
- elif evaluator_type == "lvis":
- return LVISEvaluator(dataset_name, output_dir=output_folder)
- if len(evaluator_list) == 0:
- raise NotImplementedError(
- "no Evaluator for the dataset {} with the type {}".format(dataset_name, evaluator_type)
- )
- elif len(evaluator_list) == 1:
- return evaluator_list[0]
- return DatasetEvaluators(evaluator_list)
-
-
-class Trainer(DefaultTrainer):
- """
- We use the "DefaultTrainer" which contains pre-defined default logic for
- standard training workflow. They may not work for you, especially if you
- are working on a new research project. In that case you can write your
- own training loop. You can use "tools/plain_train_net.py" as an example.
- """
-
- @classmethod
- def build_evaluator(cls, cfg, dataset_name, output_folder=None):
- return build_evaluator(cfg, dataset_name, output_folder)
-
- @classmethod
- def test_with_TTA(cls, cfg, model):
- logger = logging.getLogger("detectron2.trainer")
- # In the end of training, run an evaluation with TTA
- # Only support some R-CNN models.
- logger.info("Running inference with test-time augmentation ...")
- model = GeneralizedRCNNWithTTA(cfg, model)
- evaluators = [
- cls.build_evaluator(
- cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA")
- )
- for name in cfg.DATASETS.TEST
- ]
- res = cls.test(cfg, model, evaluators)
- res = OrderedDict({k + "_TTA": v for k, v in res.items()})
- return res
-
-
-def setup(args):
- """
- Create configs and perform basic setups.
- """
- cfg = get_cfg()
- cfg.merge_from_file(args.config_file)
- cfg.merge_from_list(args.opts)
- cfg.freeze()
- default_setup(cfg, args)
- return cfg
-
-
-def main(args):
- cfg = setup(args)
-
- if args.eval_only:
- model = Trainer.build_model(cfg)
- DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
- cfg.MODEL.WEIGHTS, resume=args.resume
- )
- res = Trainer.test(cfg, model)
- if cfg.TEST.AUG.ENABLED:
- res.update(Trainer.test_with_TTA(cfg, model))
- if comm.is_main_process():
- verify_results(cfg, res)
- return res
-
- """
- If you'd like to do anything fancier than the standard training logic,
- consider writing your own training loop (see plain_train_net.py) or
- subclassing the trainer.
- """
- trainer = Trainer(cfg)
- trainer.resume_or_load(resume=args.resume)
- if cfg.TEST.AUG.ENABLED:
- trainer.register_hooks(
- [hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model))]
- )
- return trainer.train()
-
-
-if __name__ == "__main__":
- args = default_argument_parser().parse_args()
- print("Command Line Args:", args)
- launch(
- main,
- args.num_gpus,
- num_machines=args.num_machines,
- machine_rank=args.machine_rank,
- dist_url=args.dist_url,
- args=(args,),
- )
diff --git a/spaces/changkeyculing/chatgpt-detector-single/app.py b/spaces/changkeyculing/chatgpt-detector-single/app.py
deleted file mode 100644
index e46b34655f2d46f78c6c602c47fd748a2c0e5a1a..0000000000000000000000000000000000000000
--- a/spaces/changkeyculing/chatgpt-detector-single/app.py
+++ /dev/null
@@ -1,60 +0,0 @@
-import os
-import gradio as gr
-from transformers import pipeline
-
-auth_token = "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"
-pipeline_en = pipeline(task="text-classification", model="changkeyculing/en-gpt-detect",use_auth_token=auth_token)
-pipeline_zh = pipeline(task="text-classification", model="changkeyculing/zh-gpt-detect",use_auth_token=auth_token)
-
-
-
-def predict_en(text):
- res = pipeline_en(text)[0]
- return res['label'],res['score']
-
-def predict_zh(text):
- res = pipeline_zh(text)[0]
- return res['label'],res['score']
-
-
-
-
-with gr.Blocks() as demo:
- gr.Markdown("""
- ## ChatGPT Detector 🔬 (Sinlge-text version)
- Visit our project on Github: [chatgpt-comparison-detection project](https://github.com/Hello-SimpleAI/chatgpt-comparison-detection)
- 欢迎在 Github 上关注我们的 [ChatGPT 对比与检测项目](https://github.com/Hello-SimpleAI/chatgpt-comparison-detection)
-
- We provide three kinds of detectors, all in Bilingual / 我们提供了三个版本的检测器,且都支持中英文:
- - [**QA version / 问答版**](https://huggingface.co/spaces/Hello-SimpleAI/chatgpt-detector-qa)
- detect whether an **answer** is generated by ChatGPT for certain **question**, using PLM-based classifiers / 判断某个**问题的回答**是否由ChatGPT生成,使用基于PTM的分类器来开发;
- - [Sinlge-text version / 独立文本版 (👈 Current / 当前使用)](https://huggingface.co/spaces/Hello-SimpleAI/chatgpt-detector-single)
- detect whether a piece of text is ChatGPT generated, using PLM-based classifiers / 判断**单条文本**是否由ChatGPT生成,使用基于PTM的分类器来开发;
- - [Linguistic version / 语言学版](https://huggingface.co/spaces/Hello-SimpleAI/chatgpt-detector-ling)
- detect whether a piece of text is ChatGPT generated, using linguistic features / 判断**单条文本**是否由ChatGPT生成,使用基于语言学特征的模型来开发;
-
-
- """)
- with gr.Tab("English"):
- gr.Markdown("""
- Note: Providing more text to the `Text` box can make the prediction more accurate!
- """)
- t1 = gr.Textbox(lines=5, label='Text',value="There are a few things that can help protect your credit card information from being misused when you give it to a restaurant or any other business:\n\nEncryption: Many businesses use encryption to protect your credit card information when it is being transmitted or stored. This means that the information is transformed into a code that is difficult for anyone to read without the right key.")
- button1 = gr.Button("🤖 Predict!")
- label1 = gr.Textbox(lines=1, label='Predicted Label 🎃')
- score1 = gr.Textbox(lines=1, label='Prob')
- with gr.Tab("中文版"):
- gr.Markdown("""
- 注意: 在`文本`栏中输入更多的文本,可以让预测更准确哦!
- """)
- t2 = gr.Textbox(lines=5, label='文本',value="对于OpenAI大力出奇迹的工作,自然每个人都有自己的看点。我自己最欣赏的地方是ChatGPT如何解决 “AI校正(Alignment)“这个问题。这个问题也是我们课题组这两年在探索的学术问题之一。")
- button2 = gr.Button("🤖 预测!")
- label2 = gr.Textbox(lines=1, label='预测结果 🎃')
- score2 = gr.Textbox(lines=1, label='模型概率')
-
- button1.click(predict_en, inputs=[t1], outputs=[label1,score1], api_name='predict_en')
- button2.click(predict_zh, inputs=[t2], outputs=[label2,score2], api_name='predict_zh')
-
-
-
-demo.launch()
\ No newline at end of file
diff --git a/spaces/chansung/LLaMA-7B/gen.py b/spaces/chansung/LLaMA-7B/gen.py
deleted file mode 100644
index 534f57f7ce5ed64b5155cb29b164fdbc3d1b7beb..0000000000000000000000000000000000000000
--- a/spaces/chansung/LLaMA-7B/gen.py
+++ /dev/null
@@ -1,97 +0,0 @@
-from typing import Tuple
-
-import os
-import time
-import json
-from pathlib import Path
-
-import torch
-from fairscale.nn.model_parallel.initialize import initialize_model_parallel
-from llama.generation import LLaMA
-from llama.model import ModelArgs, Transformer
-from llama.tokenizer import Tokenizer
-
-from google.cloud import storage
-
-bucket_name = os.environ.get("GCS_BUCKET")
-
-llama_weight_path = "weights/llama"
-tokenizer_weight_path = "weights/tokenizer"
-
-def setup_model_parallel() -> Tuple[int, int]:
- local_rank = int(os.environ.get("LOCAL_RANK", -1))
- world_size = int(os.environ.get("WORLD_SIZE", -1))
-
- torch.distributed.init_process_group("nccl")
- initialize_model_parallel(world_size)
- torch.cuda.set_device(local_rank)
-
- # seed must be the same in all processes
- torch.manual_seed(1)
- return local_rank, world_size
-
-def download_pretrained_models(
- ckpt_path: str,
- tokenizer_path: str
-):
- os.makedirs(llama_weight_path)
- os.makedirs(tokenizer_weight_path)
-
- storage_client = storage.Client.create_anonymous_client()
- bucket = storage_client.bucket(bucket_name)
-
- blobs = bucket.list_blobs(prefix=f"{ckpt_path}/")
- for blob in blobs:
- filename = blob.name.split("/")[1]
- blob.download_to_filename(f"{llama_weight_path}/{filename}")
-
- blobs = bucket.list_blobs(prefix=f"{tokenizer_path}/")
- for blob in blobs:
- filename = blob.name.split("/")[1]
- blob.download_to_filename(f"{tokenizer_weight_path}/{filename}")
-
-def get_pretrained_models(
- ckpt_path: str,
- tokenizer_path: str,
- local_rank: int,
- world_size: int) -> LLaMA:
-
- download_pretrained_models(ckpt_path, tokenizer_path)
-
- start_time = time.time()
- checkpoints = sorted(Path(llama_weight_path).glob("*.pth"))
-
- llama_ckpt_path = checkpoints[local_rank]
- print("Loading")
- checkpoint = torch.load(llama_ckpt_path, map_location="cpu")
- with open(Path(llama_weight_path) / "params.json", "r") as f:
- params = json.loads(f.read())
-
- model_args: ModelArgs = ModelArgs(max_seq_len=512, max_batch_size=1, **params)
- tokenizer = Tokenizer(model_path=f"{tokenizer_weight_path}/tokenizer.model")
- model_args.vocab_size = tokenizer.n_words
- torch.set_default_tensor_type(torch.cuda.HalfTensor)
- model = Transformer(model_args).cuda().half()
- torch.set_default_tensor_type(torch.FloatTensor)
- model.load_state_dict(checkpoint, strict=False)
-
- generator = LLaMA(model, tokenizer)
- print(f"Loaded in {time.time() - start_time:.2f} seconds")
- return generator
-
-def get_output(
- generator: LLaMA,
- prompt: str,
- max_gen_len: int = 256,
- temperature: float = 0.8,
- top_p: float = 0.95):
-
- prompts = [prompt]
- results = generator.generate(
- prompts,
- max_gen_len=max_gen_len,
- temperature=temperature,
- top_p=top_p
- )
-
- return results
\ No newline at end of file
diff --git a/spaces/chendl/compositional_test/multimodal/YOLOX/exps/default/yolox_tiny.py b/spaces/chendl/compositional_test/multimodal/YOLOX/exps/default/yolox_tiny.py
deleted file mode 100644
index 5220de2f2e6760d5c9a966d5dd397aad721fc60a..0000000000000000000000000000000000000000
--- a/spaces/chendl/compositional_test/multimodal/YOLOX/exps/default/yolox_tiny.py
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding:utf-8 -*-
-# Copyright (c) Megvii, Inc. and its affiliates.
-
-import os
-
-from yolox.exp import Exp as MyExp
-
-
-class Exp(MyExp):
- def __init__(self):
- super(Exp, self).__init__()
- self.depth = 0.33
- self.width = 0.375
- self.input_size = (416, 416)
- self.mosaic_scale = (0.5, 1.5)
- self.random_size = (10, 20)
- self.test_size = (416, 416)
- self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
- self.enable_mixup = False
diff --git a/spaces/chendl/compositional_test/transformers/examples/research_projects/rag/distributed_ray_retriever.py b/spaces/chendl/compositional_test/transformers/examples/research_projects/rag/distributed_ray_retriever.py
deleted file mode 100644
index dd5baaf726116f8569228af74c221c67b477d1cb..0000000000000000000000000000000000000000
--- a/spaces/chendl/compositional_test/transformers/examples/research_projects/rag/distributed_ray_retriever.py
+++ /dev/null
@@ -1,152 +0,0 @@
-import logging
-import random
-
-import ray
-
-from transformers import RagConfig, RagRetriever, RagTokenizer
-from transformers.models.rag.retrieval_rag import CustomHFIndex
-
-
-logger = logging.getLogger(__name__)
-
-
-class RayRetriever:
- def __init__(self):
- self.initialized = False
-
- def create_rag_retriever(self, config, question_encoder_tokenizer, generator_tokenizer, index):
- if not self.initialized:
- self.retriever = RagRetriever(
- config,
- question_encoder_tokenizer=question_encoder_tokenizer,
- generator_tokenizer=generator_tokenizer,
- index=index,
- init_retrieval=False,
- )
- self.initialized = True
-
- def init_retrieval(self):
- self.retriever.index.init_index()
-
- def retrieve(self, question_hidden_states, n_docs):
- doc_ids, retrieved_doc_embeds = self.retriever._main_retrieve(question_hidden_states, n_docs)
- return doc_ids, retrieved_doc_embeds
-
-
-class RagRayDistributedRetriever(RagRetriever):
- """
- A distributed retriever built on top of the ``Ray`` API, a library
- for building distributed applications (https://docs.ray.io/en/master/).
- package. During training, all training workers initialize their own
- instance of a `RagRayDistributedRetriever`, and each instance of
- this distributed retriever shares a common set of Retrieval Ray
- Actors (https://docs.ray.io/en/master/walkthrough.html#remote
- -classes-actors) that load the index on separate processes. Ray
- handles the communication between the `RagRayDistributedRetriever`
- instances and the remote Ray actors. If training is done in a
- non-distributed setup, the index will simply be loaded in the same
- process as the training worker and Ray will not be used.
-
- Args:
- config (:class:`~transformers.RagConfig`):
- The configuration of the RAG model this Retriever is used with. Contains parameters indicating which ``Index`` to build.
- question_encoder_tokenizer (:class:`~transformers.PreTrainedTokenizer`):
- The tokenizer that was used to tokenize the question.
- It is used to decode the question and then use the generator_tokenizer.
- generator_tokenizer (:class:`~transformers.PreTrainedTokenizer`):
- The tokenizer used for the generator part of the RagModel.
- retrieval_workers (:obj:`List[ray.ActorClass(RayRetriever)]`): A list of already initialized `RayRetriever` actors.
- These actor classes run on remote processes and are responsible for performing the index lookup.
- index (:class:`~transformers.retrieval_rag.Index`, optional, defaults to the one defined by the configuration):
- If specified, use this index instead of the one built using the configuration
- """
-
- def __init__(self, config, question_encoder_tokenizer, generator_tokenizer, retrieval_workers, index=None):
- if index is not None and index.is_initialized() and len(retrieval_workers) > 0:
- raise ValueError(
- "When using Ray for distributed fine-tuning, "
- "you'll need to provide the paths instead, "
- "as the dataset and the index are loaded "
- "separately. More info in examples/rag/use_own_knowledge_dataset.py "
- )
- super().__init__(
- config,
- question_encoder_tokenizer=question_encoder_tokenizer,
- generator_tokenizer=generator_tokenizer,
- index=index,
- init_retrieval=False,
- )
- self.retrieval_workers = retrieval_workers
- if len(self.retrieval_workers) > 0:
- ray.get(
- [
- worker.create_rag_retriever.remote(config, question_encoder_tokenizer, generator_tokenizer, index)
- for worker in self.retrieval_workers
- ]
- )
-
- def init_retrieval(self):
- """
- Retriever initialization function, needs to be called from the
- training process. This function triggers retrieval initialization
- for all retrieval actors if using distributed setting, or loads
- index into current process if training is not distributed.
- """
- logger.info("initializing retrieval")
-
- if len(self.retrieval_workers) > 0:
- ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers])
- else:
- # Non-distributed training. Load index into this same process.
- self.index.init_index()
-
- def retrieve(self, question_hidden_states, n_docs):
- """
- Retrieves documents for specified ``question_hidden_states``. If
- running training with multiple workers, a random retrieval actor is
- selected to perform the index lookup and return the result.
-
- Args:
- question_hidden_states (:obj:`np.ndarray` of shape :obj:`(batch_size, vector_size)`):
- A batch of query vectors to retrieve with.
- n_docs (:obj:`int`):
- The number of docs retrieved per query.
-
- Output:
- retrieved_doc_embeds (:obj:`np.ndarray` of shape :obj:`(batch_size, n_docs, dim)`
- The retrieval embeddings of the retrieved docs per query.
- doc_ids (:obj:`np.ndarray` of shape :obj:`batch_size, n_docs`)
- The ids of the documents in the index
- doc_dicts (:obj:`List[dict]`):
- The retrieved_doc_embeds examples per query.
- """
- if len(self.retrieval_workers) > 0:
- # Select a random retrieval actor.
- random_worker = self.retrieval_workers[random.randint(0, len(self.retrieval_workers) - 1)]
- doc_ids, retrieved_doc_embeds = ray.get(random_worker.retrieve.remote(question_hidden_states, n_docs))
- else:
- doc_ids, retrieved_doc_embeds = self._main_retrieve(question_hidden_states, n_docs)
- return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(doc_ids)
-
- @classmethod
- def get_tokenizers(cls, retriever_name_or_path, indexed_dataset=None, **kwargs):
- return super(RagRayDistributedRetriever, cls).get_tokenizers(retriever_name_or_path, indexed_dataset, **kwargs)
-
- @classmethod
- def from_pretrained(cls, retriever_name_or_path, actor_handles, indexed_dataset=None, **kwargs):
- config = kwargs.pop("config", None) or RagConfig.from_pretrained(retriever_name_or_path, **kwargs)
- rag_tokenizer = RagTokenizer.from_pretrained(retriever_name_or_path, config=config)
- question_encoder_tokenizer = rag_tokenizer.question_encoder
- generator_tokenizer = rag_tokenizer.generator
- if indexed_dataset is not None:
- config.index_name = "custom"
- index = CustomHFIndex(config.retrieval_vector_size, indexed_dataset)
- else:
- index = cls._build_index(config)
- return cls(
- config,
- question_encoder_tokenizer=question_encoder_tokenizer,
- generator_tokenizer=generator_tokenizer,
- retrieval_workers=actor_handles,
- index=index,
- )
diff --git a/spaces/chompionsawelo/whisper_transcribe/ui/lang_setting.py b/spaces/chompionsawelo/whisper_transcribe/ui/lang_setting.py
deleted file mode 100644
index 25e3005a26c2d93bf56c7755f97bdf923836ec58..0000000000000000000000000000000000000000
--- a/spaces/chompionsawelo/whisper_transcribe/ui/lang_setting.py
+++ /dev/null
@@ -1,85 +0,0 @@
-from ui.ui_component import *
-
-
-def change_lang(input):
- # Change language function
- global current_ui_lang
- current_ui_lang = get_ui_dict(input)
- print(f"Change language to {available_ui_lang[input]}")
- return [
- # Top
- top_markdown.update(
- current_ui_lang["top_markdown"]),
- input_url.update(
- label=current_ui_lang["input_url_label"], info=current_ui_lang["input_url_info"]),
- url_download_button.update(
- current_ui_lang["download_button_value"]),
- input_video.update(
- label=current_ui_lang["input_video_label"]),
- start_time.update(
- label=current_ui_lang["start_time_label"]),
- end_time.update(
- label=current_ui_lang["end_time_label"]),
- lang_radio.update(
- choices=current_ui_lang["lang_radio_choices"], value=None, label=current_ui_lang["lang_radio_label"], info=current_ui_lang["lang_radio_info"],),
- model_dropdown.update(
- choices=current_ui_lang["model_dropdown_choices"], value=None, label=current_ui_lang["model_dropdown_label"], info=current_ui_lang["model_dropdown_info"]),
- start_button.update(
- current_ui_lang["start_button_value"]),
-
- # Middle
- middle_markdown.update(
- current_ui_lang["middle_markdown"]),
- adjust_speaker.update(
- label=current_ui_lang["adjust_speaker_value"]),
- prev_button.update(
- current_ui_lang["prev_button_value"]),
- next_button.update(
- current_ui_lang["next_button_value"]),
- adjust_button.update(
- current_ui_lang["adjust_button_value"]),
-
- # Bottom
- bottom_markdown.update(
- current_ui_lang["bottom_markdown"]),
- output_video.update(
- label=current_ui_lang["output_video_label"]),
- download_video_subtitle_button.update(
- current_ui_lang["download_video_button_value"]),
- output_transcribe.update(
- label=current_ui_lang["output_transcribe_label"]),
-
- # Summary
- summary_markdown.update(
- current_ui_lang["summary_markdown"]),
- summary_button.update(
- current_ui_lang["summary_button_value"]),
- output_summary.update(
- label=current_ui_lang["output_summary_label"]),
- ]
-
-
-# comp_to_update and change_lang return must always be equal
-comp_to_update = [
- top_markdown,
- input_url,
- url_download_button,
- input_video,
- start_time,
- end_time,
- lang_radio,
- model_dropdown,
- start_button,
- middle_markdown,
- adjust_speaker,
- prev_button,
- next_button,
- adjust_button,
- bottom_markdown,
- output_video,
- download_video_subtitle_button,
- output_transcribe,
- summary_markdown,
- summary_button,
- output_summary,
-]
diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/altair/_magics.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/altair/_magics.py
deleted file mode 100644
index 7fe6131182952ff30bf63543de528657f7ba77a2..0000000000000000000000000000000000000000
--- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/altair/_magics.py
+++ /dev/null
@@ -1,109 +0,0 @@
-"""
-Magic functions for rendering vega-lite specifications
-"""
-__all__ = ["vegalite"]
-
-import json
-import warnings
-
-import IPython
-from IPython.core import magic_arguments
-import pandas as pd
-from toolz import curried
-
-from altair.vegalite import v5 as vegalite_v5
-
-try:
- import yaml
-
- YAML_AVAILABLE = True
-except ImportError:
- YAML_AVAILABLE = False
-
-
-RENDERERS = {
- "vega-lite": {
- "5": vegalite_v5.VegaLite,
- },
-}
-
-
-TRANSFORMERS = {
- "vega-lite": {
- "5": vegalite_v5.data_transformers,
- },
-}
-
-
-def _prepare_data(data, data_transformers):
- """Convert input data to data for use within schema"""
- if data is None or isinstance(data, dict):
- return data
- elif isinstance(data, pd.DataFrame):
- return curried.pipe(data, data_transformers.get())
- elif isinstance(data, str):
- return {"url": data}
- else:
- warnings.warn("data of type {} not recognized".format(type(data)), stacklevel=1)
- return data
-
-
-def _get_variable(name):
- """Get a variable from the notebook namespace."""
- ip = IPython.get_ipython()
- if ip is None:
- raise ValueError(
- "Magic command must be run within an IPython "
- "environemnt, in which get_ipython() is defined."
- )
- if name not in ip.user_ns:
- raise NameError(
- "argument '{}' does not match the "
- "name of any defined variable".format(name)
- )
- return ip.user_ns[name]
-
-
-@magic_arguments.magic_arguments()
-@magic_arguments.argument(
- "data",
- nargs="?",
- help="local variablename of a pandas DataFrame to be used as the dataset",
-)
-@magic_arguments.argument("-v", "--version", dest="version", default="v5")
-@magic_arguments.argument("-j", "--json", dest="json", action="store_true")
-def vegalite(line, cell):
- """Cell magic for displaying vega-lite visualizations in CoLab.
-
- %%vegalite [dataframe] [--json] [--version='v5']
-
- Visualize the contents of the cell using Vega-Lite, optionally
- specifying a pandas DataFrame object to be used as the dataset.
-
- if --json is passed, then input is parsed as json rather than yaml.
- """
- args = magic_arguments.parse_argstring(vegalite, line)
- existing_versions = {"v5": "5"}
- version = existing_versions[args.version]
- assert version in RENDERERS["vega-lite"]
- VegaLite = RENDERERS["vega-lite"][version]
- data_transformers = TRANSFORMERS["vega-lite"][version]
-
- if args.json:
- spec = json.loads(cell)
- elif not YAML_AVAILABLE:
- try:
- spec = json.loads(cell)
- except json.JSONDecodeError as err:
- raise ValueError(
- "%%vegalite: spec is not valid JSON. "
- "Install pyyaml to parse spec as yaml"
- ) from err
- else:
- spec = yaml.load(cell, Loader=yaml.SafeLoader)
-
- if args.data is not None:
- data = _get_variable(args.data)
- spec["data"] = _prepare_data(data, data_transformers)
-
- return VegaLite(spec)
diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/docx/image/png.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/docx/image/png.py
deleted file mode 100644
index 4e899fa5c448bfbe38d27a20b50315287901de97..0000000000000000000000000000000000000000
--- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/docx/image/png.py
+++ /dev/null
@@ -1,303 +0,0 @@
-# encoding: utf-8
-
-from __future__ import absolute_import, division, print_function
-
-from .constants import MIME_TYPE, PNG_CHUNK_TYPE
-from .exceptions import InvalidImageStreamError
-from .helpers import BIG_ENDIAN, StreamReader
-from .image import BaseImageHeader
-
-
-class Png(BaseImageHeader):
- """
- Image header parser for PNG images
- """
- @property
- def content_type(self):
- """
- MIME content type for this image, unconditionally `image/png` for
- PNG images.
- """
- return MIME_TYPE.PNG
-
- @property
- def default_ext(self):
- """
- Default filename extension, always 'png' for PNG images.
- """
- return 'png'
-
- @classmethod
- def from_stream(cls, stream):
- """
- Return a |Png| instance having header properties parsed from image in
- *stream*.
- """
- parser = _PngParser.parse(stream)
-
- px_width = parser.px_width
- px_height = parser.px_height
- horz_dpi = parser.horz_dpi
- vert_dpi = parser.vert_dpi
-
- return cls(px_width, px_height, horz_dpi, vert_dpi)
-
-
-class _PngParser(object):
- """
- Parses a PNG image stream to extract the image properties found in its
- chunks.
- """
- def __init__(self, chunks):
- super(_PngParser, self).__init__()
- self._chunks = chunks
-
- @classmethod
- def parse(cls, stream):
- """
- Return a |_PngParser| instance containing the header properties
- parsed from the PNG image in *stream*.
- """
- chunks = _Chunks.from_stream(stream)
- return cls(chunks)
-
- @property
- def px_width(self):
- """
- The number of pixels in each row of the image.
- """
- IHDR = self._chunks.IHDR
- return IHDR.px_width
-
- @property
- def px_height(self):
- """
- The number of stacked rows of pixels in the image.
- """
- IHDR = self._chunks.IHDR
- return IHDR.px_height
-
- @property
- def horz_dpi(self):
- """
- Integer dots per inch for the width of this image. Defaults to 72
- when not present in the file, as is often the case.
- """
- pHYs = self._chunks.pHYs
- if pHYs is None:
- return 72
- return self._dpi(pHYs.units_specifier, pHYs.horz_px_per_unit)
-
- @property
- def vert_dpi(self):
- """
- Integer dots per inch for the height of this image. Defaults to 72
- when not present in the file, as is often the case.
- """
- pHYs = self._chunks.pHYs
- if pHYs is None:
- return 72
- return self._dpi(pHYs.units_specifier, pHYs.vert_px_per_unit)
-
- @staticmethod
- def _dpi(units_specifier, px_per_unit):
- """
- Return dots per inch value calculated from *units_specifier* and
- *px_per_unit*.
- """
- if units_specifier == 1 and px_per_unit:
- return int(round(px_per_unit * 0.0254))
- return 72
-
-
-class _Chunks(object):
- """
- Collection of the chunks parsed from a PNG image stream
- """
- def __init__(self, chunk_iterable):
- super(_Chunks, self).__init__()
- self._chunks = list(chunk_iterable)
-
- @classmethod
- def from_stream(cls, stream):
- """
- Return a |_Chunks| instance containing the PNG chunks in *stream*.
- """
- chunk_parser = _ChunkParser.from_stream(stream)
- chunks = [chunk for chunk in chunk_parser.iter_chunks()]
- return cls(chunks)
-
- @property
- def IHDR(self):
- """
- IHDR chunk in PNG image
- """
- match = lambda chunk: chunk.type_name == PNG_CHUNK_TYPE.IHDR # noqa
- IHDR = self._find_first(match)
- if IHDR is None:
- raise InvalidImageStreamError('no IHDR chunk in PNG image')
- return IHDR
-
- @property
- def pHYs(self):
- """
- pHYs chunk in PNG image, or |None| if not present
- """
- match = lambda chunk: chunk.type_name == PNG_CHUNK_TYPE.pHYs # noqa
- return self._find_first(match)
-
- def _find_first(self, match):
- """
- Return first chunk in stream order returning True for function
- *match*.
- """
- for chunk in self._chunks:
- if match(chunk):
- return chunk
- return None
-
-
-class _ChunkParser(object):
- """
- Extracts chunks from a PNG image stream
- """
- def __init__(self, stream_rdr):
- super(_ChunkParser, self).__init__()
- self._stream_rdr = stream_rdr
-
- @classmethod
- def from_stream(cls, stream):
- """
- Return a |_ChunkParser| instance that can extract the chunks from the
- PNG image in *stream*.
- """
- stream_rdr = StreamReader(stream, BIG_ENDIAN)
- return cls(stream_rdr)
-
- def iter_chunks(self):
- """
- Generate a |_Chunk| subclass instance for each chunk in this parser's
- PNG stream, in the order encountered in the stream.
- """
- for chunk_type, offset in self._iter_chunk_offsets():
- chunk = _ChunkFactory(chunk_type, self._stream_rdr, offset)
- yield chunk
-
- def _iter_chunk_offsets(self):
- """
- Generate a (chunk_type, chunk_offset) 2-tuple for each of the chunks
- in the PNG image stream. Iteration stops after the IEND chunk is
- returned.
- """
- chunk_offset = 8
- while True:
- chunk_data_len = self._stream_rdr.read_long(chunk_offset)
- chunk_type = self._stream_rdr.read_str(4, chunk_offset, 4)
- data_offset = chunk_offset + 8
- yield chunk_type, data_offset
- if chunk_type == 'IEND':
- break
- # incr offset for chunk len long, chunk type, chunk data, and CRC
- chunk_offset += (4 + 4 + chunk_data_len + 4)
-
-
-def _ChunkFactory(chunk_type, stream_rdr, offset):
- """
- Return a |_Chunk| subclass instance appropriate to *chunk_type* parsed
- from *stream_rdr* at *offset*.
- """
- chunk_cls_map = {
- PNG_CHUNK_TYPE.IHDR: _IHDRChunk,
- PNG_CHUNK_TYPE.pHYs: _pHYsChunk,
- }
- chunk_cls = chunk_cls_map.get(chunk_type, _Chunk)
- return chunk_cls.from_offset(chunk_type, stream_rdr, offset)
-
-
-class _Chunk(object):
- """
- Base class for specific chunk types. Also serves as the default chunk
- type.
- """
- def __init__(self, chunk_type):
- super(_Chunk, self).__init__()
- self._chunk_type = chunk_type
-
- @classmethod
- def from_offset(cls, chunk_type, stream_rdr, offset):
- """
- Return a default _Chunk instance that only knows its chunk type.
- """
- return cls(chunk_type)
-
- @property
- def type_name(self):
- """
- The chunk type name, e.g. 'IHDR', 'pHYs', etc.
- """
- return self._chunk_type
-
-
-class _IHDRChunk(_Chunk):
- """
- IHDR chunk, contains the image dimensions
- """
- def __init__(self, chunk_type, px_width, px_height):
- super(_IHDRChunk, self).__init__(chunk_type)
- self._px_width = px_width
- self._px_height = px_height
-
- @classmethod
- def from_offset(cls, chunk_type, stream_rdr, offset):
- """
- Return an _IHDRChunk instance containing the image dimensions
- extracted from the IHDR chunk in *stream* at *offset*.
- """
- px_width = stream_rdr.read_long(offset)
- px_height = stream_rdr.read_long(offset, 4)
- return cls(chunk_type, px_width, px_height)
-
- @property
- def px_width(self):
- return self._px_width
-
- @property
- def px_height(self):
- return self._px_height
-
-
-class _pHYsChunk(_Chunk):
- """
- pYHs chunk, contains the image dpi information
- """
- def __init__(self, chunk_type, horz_px_per_unit, vert_px_per_unit,
- units_specifier):
- super(_pHYsChunk, self).__init__(chunk_type)
- self._horz_px_per_unit = horz_px_per_unit
- self._vert_px_per_unit = vert_px_per_unit
- self._units_specifier = units_specifier
-
- @classmethod
- def from_offset(cls, chunk_type, stream_rdr, offset):
- """
- Return a _pHYsChunk instance containing the image resolution
- extracted from the pHYs chunk in *stream* at *offset*.
- """
- horz_px_per_unit = stream_rdr.read_long(offset)
- vert_px_per_unit = stream_rdr.read_long(offset, 4)
- units_specifier = stream_rdr.read_byte(offset, 8)
- return cls(
- chunk_type, horz_px_per_unit, vert_px_per_unit, units_specifier
- )
-
- @property
- def horz_px_per_unit(self):
- return self._horz_px_per_unit
-
- @property
- def vert_px_per_unit(self):
- return self._vert_px_per_unit
-
- @property
- def units_specifier(self):
- return self._units_specifier
diff --git a/spaces/cihyFjudo/fairness-paper-search/Kahin Hai Mera Pyar 720p Full Mo .md b/spaces/cihyFjudo/fairness-paper-search/Kahin Hai Mera Pyar 720p Full Mo .md
deleted file mode 100644
index 752804a3b8cf994b16c5ee0a4d2eeb8d81a2d71e..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Kahin Hai Mera Pyar 720p Full Mo .md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/cihyFjudo/fairness-paper-search/Skandal Seks Di Pejabat Risda (video Part 02) EXCLUSIVE.md b/spaces/cihyFjudo/fairness-paper-search/Skandal Seks Di Pejabat Risda (video Part 02) EXCLUSIVE.md
deleted file mode 100644
index be9a010193fe38e667bdd97c0f5c51b76f251764..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Skandal Seks Di Pejabat Risda (video Part 02) EXCLUSIVE.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
Download Game Yeager Hunter Legend: A Guide for Beginners
-
If you are looking for a thrilling action role-playing game that lets you hunt monsters, explore an alien world, and customize your character, then you should check out Yeager Hunter Legend. This game is developed by IGG.COM, the same company behind popular titles like Lords Mobile and Castle Clash. In this game, you play as Yeager, an elite hunter from the Vyderan clan who is sent to retrieve a stolen relic from the mysterious planet of Ekors. Along the way, you will encounter ferocious beasts, ancient civilizations, and dark secrets.
-
Yeager Hunter Legend is available for Android, iOS, and PC devices. You can download it for free from the Google Play Store, App Store, or BlueStacks emulator. However, before you do that, you might want to read this guide first. In this article, we will show you how to download and install the game on your device, how to choose your weapon and customize your character, how to hunt monsters and explore the world of Ekors, how to play with other players and join alliances, and some tips and tricks to get stronger and have more fun. Let's get started!
How to Download and Install Yeager Hunter Legend on Your Device
-
Depending on what device you are using, there are different steps you need to follow to download and install Yeager Hunter Legend. Here are the instructions for each platform:
-
Android
-
If you have an Android device, you can download Yeager Hunter Legend from the Google Play Store. Here are the steps:
-
-
Open the Google Play Store app on your device.
-
Search for "Yeager Hunter Legend" in the search bar.
-
Tap on the game icon from the results.
-
Tap on "Install" to start downloading the game.
-
Wait for the download to finish. The game will automatically install on your device.
-
Tap on "Open" or find the game icon on your home screen or app drawer.
-
Enjoy playing Yeager Hunter Legend!
-
-
Before you download the game, make sure you have enough storage space on your device. The game requires about 1.5 GB of free space to run smoothly. You also need to have a stable internet connection to play the game online. The game is compatible with Android devices running version 4.4 or higher. The recommended system requirements are 3 GB of RAM and a quad-core processor.
-
iOS
-
If you have an iOS device, you can download Yeager Hunter Legend from the App Store. Here are the steps:
-
-
Open the App Store app on your device.
-
Search for "Yeager Hunter Legend" in the search bar.
-
Tap on the game icon from the results.
-
Tap on "Get" to start downloading the game.
-
Enter your Apple ID password or use Touch ID or Face ID to confirm the download.
-
Wait for the download to finish. The game will automatically install on your device.
-
Tap on "Open" or find the game icon on your home screen or app library.
-
Enjoy playing Yeager Hunter Legend!
-
-
Before you download the game, make sure you have enough storage space on your device. The game requires about 1.6 GB of free space to run smoothly. You also need to have a stable internet connection to play the game online. The game is compatible with iOS devices running version 10.0 or higher. The recommended system requirements are iPhone 7 or newer, iPad Air 2 or newer, or iPod touch 7th generation or newer.
-
PC
-
If you have a PC, you can download Yeager Hunter Legend from the BlueStacks emulator. BlueStacks is a software that allows you to play Android games on your PC. Here are the steps:
Open BlueStacks and sign in with your Google account.
-
Search for "Yeager Hunter Legend" in the search bar.
-
Click on the game icon from the results.
-
Click on "Install" to start downloading the game.
-
Wait for the download to finish. The game will automatically install on your PC.
-
Click on "Open" or find the game icon on your BlueStacks home screen.
-
Enjoy playing Yeager Hunter Legend!
-
-
Before you download the game, make sure you have enough storage space on your PC. The game requires about 2 GB of free space to run smoothly. You also need to have a stable internet connection to play the game online. The game is compatible with Windows 7 or higher and Mac OS X 10.11 or higher. The recommended system requirements are 4 GB of RAM and an Intel or AMD processor.
-
How to download Yeager: Hunter Legend on Android
-Yeager: Hunter Legend best weapon class and skills
-Yeager: Hunter Legend review and gameplay tips
-Yeager: Hunter Legend APK download for free
-Yeager: Hunter Legend monster hunting guide and strategies
-Yeager: Hunter Legend forge and upgrade equipment tutorial
-Yeager: Hunter Legend secrets and hidden locations
-Yeager: Hunter Legend Kallarite and Crystal hack
-Yeager: Hunter Legend iOS download and installation
-Yeager: Hunter Legend ancient seals and sigils guide
-Yeager: Hunter Legend discord server and community
-Yeager: Hunter Legend multiplayer mode and alliance features
-Yeager: Hunter Legend cinematic trailer and story
-Yeager: Hunter Legend comparison with other RPG games
-Yeager: Hunter Legend cheats and codes
-Yeager: Hunter Legend mod APK download and features
-Yeager: Hunter Legend PC version and emulator
-Yeager: Hunter Legend best beasts to hunt and materials to farm
-Yeager: Hunter Legend subscription details and benefits
-Yeager: Hunter Legend latest update and patch notes
-Yeager: Hunter Legend official Facebook page and news
-Yeager: Hunter Legend system requirements and compatibility
-Yeager: Hunter Legend weapons school and movesets guide
-Yeager: Hunter Legend planet Ekors map and exploration tips
-Yeager: Hunter Legend graphics settings and performance optimization
-Yeager: Hunter Legend customer service and support
-Yeager: Hunter Legend fan art and wallpapers
-Yeager: Hunter Legend lore and backstory
-Yeager: Hunter Legend achievements and rewards
-Yeager: Hunter Legend Easter eggs and references
-Yeager: Hunter Legend bugs and glitches report
-Yeager: Hunter Legend voice actors and motion capture technology
-Yeager: Hunter Legend Vyderan clan and Empire history
-Yeager: Hunter Legend character customization and appearance options
-Yeager: Hunter Legend data privacy and security practices
-Yeager: Hunter Legend ratings and reviews from users
-Yeager: Hunter Legend in-app purchases and prices
-Yeager: Hunter Legend screenshots and videos
-Yeager: Hunter Legend future plans and development roadmap
-Yeager: Hunter Legend FAQs and common issues
-
How to Choose Your Weapon and Customize Your Character
-
After you download and install Yeager Hunter Legend, you can start creating your own hunter character and choose your weapon class. There are five weapon classes in the game, each with its own advantages and disadvantages. You can also customize your character's appearance, name, and gender. Here are some details about each weapon class and how to customize your character:
-
Weapon Classes
-
The five weapon classes in Yeager Hunter Legend are Hunting Sword, Force Hammer, Fury Blades, Flux Blaster, and Eidolon Spear. Each weapon class has its own moveset, abilities, and sigils that affect your combat style and performance. Here is a brief overview of each weapon class:
-
-
Weapon Class
Description
-
Hunting Sword
A balanced weapon that can deal quick slashes and powerful thrusts. It has a high mobility and can use sigils to enhance its attacks or heal itself.
-
Force Hammer
A heavy weapon that can deal massive damage and stun enemies with its swings and slams. It has a low mobility but can use sigils to increase its defense or unleash shockwaves.
-
Fury Blades
A dual-wielding weapon that can deal rapid strikes and combos with its blades. It has a medium mobility and can use sigils to boost its speed or unleash elemental attacks.
-
Flux Blaster
A ranged weapon that can deal precise shots and explosive blasts with its gun. It has a medium mobility and can use sigils to switch between different ammo types or activate special modes. tr>
Eidolon Spear
A versatile weapon that can deal swift stabs and wide sweeps with its spear. It has a high mobility and can use sigils to summon an eidolon that fights alongside you or transforms your weapon.
-
-
You can switch between different weapon classes at any time in the game. You can also learn different weapon schools that give you access to different moves and abilities for each weapon class. You can unlock new weapon schools by completing quests, hunting monsters, or buying them with in-game currency.
-
Character Creation
-
After you choose your weapon class, you can customize your character's appearance, name, and gender. You can choose from various options for your character's face, hair, eyes, skin, and outfit. You can also enter a name for your character and select a male or female voice. You can change your character's appearance and name at any time in the game by visiting the barber shop or the name changer in the main hub.
-
Equipment
-
As you progress in the game, you will be able to forge, upgrade, and equip different gear for your character. Gear includes weapons, armor, accessories, and sigils. Gear can improve your character's stats, such as attack, defense, speed, and health. Gear can also have special effects, such as elemental damage, critical chance, or resistance.
-
You can forge new gear by using Kallar-infused beast parts that you obtain from hunting monsters. You can upgrade your gear by using more beast parts or other materials. You can equip up to four pieces of gear at a time: one weapon, one armor, one accessory, and one sigil. You can also have multiple sets of gear that you can switch between depending on the situation.
-
Ancient Seals
-
Ancient seals are artifacts that grant you hunting skills and bonuses. Hunting skills are special abilities that you can use in combat, such as healing, buffing, debuffing, or attacking. Bonuses are passive effects that enhance your stats or performance, such as increased damage, reduced cooldowns, or extra rewards.
-
You can equip up to three ancient seals at a time: one primary seal, one secondary seal, and one tertiary seal. The primary seal determines your hunting skill and its level. The secondary seal determines the bonus effect and its level. The tertiary seal determines the sigil effect and its level.
-
You can obtain ancient seals by completing quests, hunting monsters, or buying them with in-game currency. You can also fuse two ancient seals of the same type to create a new one with a higher level or a different effect.
-
How to Hunt Monsters and Explore the World of Ekors
-
One of the main features of Yeager Hunter Legend is hunting monsters and exploring the world of Ekors. Ekors is a planet full of diverse environments, such as forests, canyons, deserts, volcanoes, and ruins. Each environment has its own monsters, secrets, and challenges.
-
To hunt monsters and explore Ekors, you need to accept quests from the quest board or NPCs in the main hub. Quests will assign you a specific monster to hunt or a specific area to explore. You can also choose to hunt or explore freely without any quest objectives.
-
Once you accept a quest or choose a destination, you will be transported to the hunting grounds or the exploration zone. There you will encounter various monsters that you can fight or avoid. You will also find various items that you can collect or interact with.
-
Combat System
-
The combat system in Yeager Hunter Legend is fast-paced and dynamic. You can use various moves and abilities to attack enemies or defend yourself. You can also use combos to chain your attacks and deal more damage. You can also use sigils to activate special effects or modes that enhance your combat abilities.
-
The combat system is different for each weapon class and weapon school. You need to learn the moves and abilities of your weapon and how to use them effectively. You also need to pay attention to your stamina, health, and sigil gauges. Stamina is used for performing moves and abilities. Health is your life force that decreases when you take damage. Sigil is your energy that allows you to use sigils.
-
You can replenish your stamina, health, and sigil by using items, skills, or sigils. You can also replenish them by resting at a campsite or returning to the main hub. You can carry up to 10 items at a time, such as potions, bombs, traps, or whistles. You can use items by tapping on their icons on the screen or by assigning them to quick slots.
-
To fight enemies, you need to target them by tapping on them or by using the auto-target feature. You can switch targets by swiping left or right on the screen or by using the target switch button. You can also lock on a target by tapping on the lock button. You can move around by using the virtual joystick on the left side of the screen. You can attack by tapping on the attack button on the right side of the screen. You can perform different attacks by tapping, holding, or swiping the attack button. You can also use special moves or abilities by tapping on their icons on the right side of the screen.
-
To defend yourself, you need to dodge, block, or parry enemy attacks. You can dodge by tapping on the dodge button on the right side of the screen. You can block by holding the block button on the right side of the screen. You can parry by timing your block right before an enemy attack hits you. Dodging, blocking, and parrying consume stamina and have different effects depending on your weapon class and weapon school.
-
To use sigils, you need to tap on the sigil button on the right side of the screen. Sigils are special effects or modes that enhance your combat abilities for a limited time. Sigils consume sigil energy and have different effects depending on your weapon class, weapon school, and ancient seal.
-
Monster Types
-
The world of Ekors is inhabited by various types of monsters that you can hunt or encounter in your quests or exploration. Monsters have different abilities, behaviors, and weaknesses that you need to learn and exploit. Monsters also drop different parts that you can use to forge or upgrade your gear.
-
There are four main types of monsters in Yeager Hunter Legend: beasts, reptiles, insects, and ancients. Each type has its own subtypes that have specific characteristics and traits. Here is a brief overview of each type and some examples of each subtype:
-
-
Type
Subtype
Example
-
Beasts
Furry mammals that are agile and ferocious.
Wolvar: A wolf-like beast that hunts in packs and uses its claws and fangs to attack.
-
Feathered birds that are swift and cunning.
Roc: A giant eagle-like beast that flies in the sky and uses its talons and beak to attack.
-
Horned ungulates that are sturdy and powerful.
Bullhorn: A bull-like beast that charges at its enemies and uses its horns and hooves to attack.
-
Reptiles
Scaled lizards that are stealthy and venomous.
Viper: A snake-like reptile that slithers on the ground and uses its fangs and tail to attack.
-
Armored turtles that are defensive and explosive.
Blastoise: A turtle-like reptile that hides in its shell and uses its cannons and mines to attack.
-
Spiked crocodiles that are aggressive and durable.
Crocus: A crocodile-like reptile that lurks in the water and uses its jaws and spikes to attack.
-
Insects
Winged bugs that are nimble and annoying.
Beezle: A bee-like insect that buzzes in the air and uses its stinger and swarm to attack. td>
-
Segmented worms that are flexible and corrosive.
Acidworm: A worm-like insect that burrows in the ground and uses its acid and tentacles to attack.
-
Crustacean crabs that are hardy and clawed.
Crabster: A crab-like insect that scuttles on the land and uses its pincers and shells to attack.
-
Ancients
Dragon-like creatures that are majestic and elemental.
Drake: A dragon-like ancient that breathes fire and uses its wings and claws to attack.
-
Giant-like creatures that are colossal and destructive.
Titan: A giant-like ancient that causes earthquakes and uses its fists and rocks to attack.
-
Mech-like creatures that are advanced and technological.
Cyber: A mech-like ancient that shoots lasers and uses its gears and missiles to attack.
-
-
You can learn more about each monster type and subtype by checking the monster encyclopedia in the game. The monster encyclopedia will show you the monster's name, appearance, description, stats, abilities, weaknesses, drops, and locations. You can also see your hunting record for each monster, such as how many times you have hunted it, how long it took you to hunt it, and what rewards you got from it.
-
Exploration
-
Besides hunting monsters, you can also explore the world of Ekors and discover its secrets and wonders. Ekors is a vast and diverse planet with different environments, such as forests, canyons, deserts, volcanoes, and ruins. Each environment has its own features, such as plants, animals, weather, terrain, and structures.
-
You can explore Ekors by using the map feature in the game. The map will show you the different regions of Ekors and the different areas within each region. You can also see your current location, your quest objectives, your allies' locations, and your enemies' locations. You can also mark points of interest on the map, such as campsites, resources, items, or secrets.
-
You can navigate through Ekors by using the virtual joystick on the left side of the screen. You can also use the sprint button on the right side of the screen to run faster. You can also use the jump button on the right side of the screen to jump over obstacles or gaps. You can also use the glide button on the right side of the screen to glide in the air with your jetpack. You can also use the interact button on the right side of the screen to interact with objects or NPCs in the environment.
-
As you explore Ekors, you will find various items that you can collect or interact with. These items include resources, such as plants, minerals, or beast parts; consumables, such as potions, bombs, or traps; equipment, such as weapons, armor, or accessories; ancient seals, such as artifacts that grant hunting skills or bonuses; secrets, such as hidden items, lore, or quests; and NPCs, such as allies or enemies that you can talk to or fight with.
-
How to Play with Other Players and Join Alliances
-
Another feature of Yeager Hunter Legend is playing with other players and joining alliances. You can team up with other hunters from around the world and take on bigger challenges together. You can also join or create an alliance and participate in events with your alliance members. Here are some details about how to play with other players and join alliances:
-
Co-op Mode
-
Co-op mode is a mode where you can team up with other players and hunt monsters or explore Ekors together. You can play co-op mode by using the co-op feature in the game. The co-op feature will allow you to join or create a co-op room where you can invite or find other players to play with. You can also use the quick match feature to join a random co-op room with other players who have similar levels or preferences as you.
-
You can play co-op mode with up to four players at a time. You can communicate with your co-op partners by using the chat feature or the voice chat feature in the game. You can also use emotes or stickers to express yourself or convey messages. You can also see your co-op partners' names , health, and sigil gauges on the screen. You can also see their weapon class, weapon school, and ancient seal icons on the screen.
-
When you play co-op mode, you will share the same quest objectives, rewards, and items with your co-op partners. You will also share the same monster health, stamina, and sigil gauges with your co-op partners. You can help your co-op partners by healing them, buffing them, or reviving them when they are down. You can also cooperate with your co-op partners by using combos, skills, or sigils that complement each other.
-
Alliance System
-
Alliance system is a system where you can join or create an alliance and participate in events with your alliance members. You can access the alliance system by using the alliance feature in the game. The alliance feature will allow you to join or create an alliance where you can invite or find other players to join. You can also use the alliance chat feature to communicate with your alliance members.
-
You can join or create an alliance with up to 50 players at a time. You can see your alliance name, logo, level, rank, and members on the screen. You can also see your alliance contribution, reputation, and rewards on the screen. You can contribute to your alliance by completing quests, hunting monsters, or donating resources or items. You can earn reputation and rewards by participating in alliance events, such as raids, wars, or tournaments.
-
When you join or create an alliance, you will be able to access exclusive features and benefits that are only available for alliance members. These include:
-
-
Alliance camp: A special campsite where you can rest, heal, and interact with your alliance members.
-
Alliance shop: A special shop where you can buy rare items or services with alliance currency.
-
Alliance vault: A special storage where you can deposit or withdraw resources or items with your alliance members.
-
Alliance quests: Special quests that are only available for alliance members and offer higher rewards and challenges.
-
Alliance skills: Special skills that are only available for alliance members and offer passive bonuses or active effects for your character.
-
-
Tips and Tricks to Get Stronger and Have More Fun
-
Now that you know how to download and install Yeager Hunter Legend, how to choose your weapon and customize your character, how to hunt monsters and explore Ekors, and how to play with other players and join alliances, you might want to know some tips and tricks to get stronger and have more fun in the game. Here are some of them:
-
Main Quests and Side Quests
-
Main quests are quests that advance the main story of the game. They will introduce you to new characters, locations, and events in the game. They will also reward you with experience points, Kallarite, crystals, gear, ancient seals, and other items. You can access main quests by using the quest feature in the game. The quest feature will show you the current main quest that you need to complete and its objectives. You can also see the previous main quests that you have completed and their summaries.
-
Side quests are quests that are not related to the main story of the game. They will give you more information about the world of Ekors and its inhabitants. They will also reward you with experience points, Kallarite, crystals, gear, ancient seals , and other items. You can access side quests by talking to NPCs in the main hub or in the hunting grounds or exploration zones. The NPCs will have a yellow exclamation mark above their heads if they have a side quest for you. You can also see the available side quests and their objectives by using the quest feature in the game.
-
We recommend that you complete both main quests and side quests as much as possible. They will help you level up your character, improve your gear, unlock new features and benefits, and learn more about the game. They will also make your gameplay more fun and varied.
-
Commissions and Bounties
-
Commissions are daily tasks that you can complete to earn extra resources and items. They are similar to side quests, but they are more simple and repetitive. They will ask you to do things like hunting a certain number of monsters, collecting a certain amount of resources, or forging a certain piece of gear. You can access commissions by using the commission feature in the game. The commission feature will show you the available commissions and their objectives and rewards. You can also see the progress and status of your commissions.
-
Bounties are weekly challenges that you can complete to earn extra crystals and ancient seals. They are similar to commissions, but they are more difficult and rewarding. They will ask you to do things like hunting a specific monster, completing a specific quest, or achieving a specific goal. You can access bounties by using the bounty feature in the game. The bounty feature will show you the available bounties and their objectives and rewards. You can also see the progress and status of your bounties.
-
We recommend that you complete both commissions and bounties as much as possible. They will help you earn more resources and items that you can use to upgrade your character and gear. They will also make your gameplay more challenging and satisfying.
-
In-game Currency
-
In-game currency is the money that you can use to buy items or services in the game. There are two types of in-game currency: Kallarite and crystals. Kallarite is the common currency that you can earn by completing quests, hunting monsters, or selling items. Crystals are the premium currency that you can buy with real money or earn by completing bounties or achievements.
-
You can use Kallarite to buy items or services from various shops in the game, such as the weapon shop, the armor shop, the accessory shop, the sigil shop, the item shop, the forge shop, or the barber shop. You can also use Kallarite to upgrade your gear or ancient seals at the forge shop or the seal shop.
-
You can use crystals to buy premium items or services from various shops in the game, such as the crystal shop, the ancient seal shop, or the alliance shop. You can also use crystals to buy extra slots for your gear, ancient seals, or inventory at the storage shop. You can also use crystals to speed up your forging or upgrading process at the forge shop or the seal shop.
-
We recommend that you spend your in-game currency wisely and save it for important purchases or upgrades. You should also try to earn more in-game currency by completing quests, hunting monsters, selling items, completing bounties, or achieving achievements.
-
In-app Purchases
-
In-app purchases are optional features that you can buy with real money to enhance your gameplay experience. There are two types of in-app purchases: premium items and subscriptions. Premium items are one-time purchases that give you access to exclusive items or benefits in the game. Subscriptions are recurring purchases that give you access to exclusive features or benefits in the game for a limited time.
-
Premium items include things like:
-
-
Starter pack: A bundle of items that help you start your adventure in Yeager Hunter Legend.
-
Booster pack: A bundle of items that boost your character's stats or performance for a limited time.
-
Cosmetic pack: A bundle of items that change your character's appearance or style.
-
Limited pack: A bundle of items that are only available for a limited time or quantity.
-
-
Subscriptions include things like:
-
-
VIP membership: A monthly subscription that gives you access to exclusive features and benefits in Yeager Hunter Legend.
-
Alliance membership: A monthly subscription that gives you access to exclusive features and benefits for your alliance in Yeager Hunter Legend.
-
Season pass: A seasonal subscription that gives you access to exclusive quests, rewards, and events in Yeager Hunter Legend.
-
-
We recommend that you only buy in-app purchases if you really want to support the game developers or if you really want to enjoy the game more. You should also be aware of the terms and conditions of each in-app purchase and how to cancel them if you want to. You should also be responsible and not spend more than you can afford.
-
Conclusion
-
Yeager Hunter Legend is a game that offers you a lot of fun and excitement. You can hunt monsters, explore Ekors, customize your character, choose your weapon, play with other players, join alliances, and more. You can also learn some tips and tricks to get stronger and have more fun in the game. If you are looking for a thrilling action role-playing game that lets you hunt monsters, explore an alien world, and customize your character, then you should download Yeager Hunter Legend today and start your adventure!
-
Do you have any questions or comments about Yeager Hunter Legend? Do you have any suggestions or feedback for the game developers? Do you want to share your hunting stories or screenshots with other players? If so, feel free to leave a comment below or visit the official website, Facebook page, or Discord server of Yeager Hunter Legend. We would love to hear from you!
-
FAQs
-
Here are some frequently asked questions and answers about Yeager Hunter Legend:
-
-
What is the difference between Kallar and Ekors?
-Kallar is the name of the planet where the Vyderan clan lives. Ekors is the name of the planet where the stolen relic is located and where the game takes place.
-
What is the difference between hunting grounds and exploration zones?
-Hunting grounds are areas where you can hunt specific monsters for quests or rewards. Exploration zones are areas where you can explore freely and find secrets or items.
-
What is the difference between weapon classes and weapon schools?
-Weapon classes are the types of weapons that you can use in the game, such as Hunting Sword, Force Hammer, Fury Blades, Flux Blaster, or Eidolon Spear. Weapon schools are the subtypes of weapons that have different movesets and abilities for each weapon class, such as Fire Sword, Ice Hammer, Lightning Blades, Plasma Blaster, or Wind Spear.
-
What is the difference between ancient seals and sigils?
-Ancient seals are artifacts that grant you hunting skills and bonuses. Hunting skills are special abilities that you can use in combat, such as healing, buffing, debuffing, or attacking. Bonuses are passive effects that enhance your stats or performance, such as increased damage, reduced cooldowns, or extra rewards. Sigils are special effects or modes that enhance your combat abilities for a limited time. Sigils consume sigil energy and have different effects depending on your weapon class, weapon school, and ancient seal.
-
What is the difference between Kallarite and crystals?
-Kallarite is the common currency that you can earn by completing quests, hunting monsters, or selling items. Crystals are the premium currency that you can buy with real money or earn by completing bounties or achievements.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/FIFA Mobile APK Para Hilesi Futbol Yldzlarnz Toplayn ve Dnya Kupasna Hazrlann.md b/spaces/congsaPfin/Manga-OCR/logs/FIFA Mobile APK Para Hilesi Futbol Yldzlarnz Toplayn ve Dnya Kupasna Hazrlann.md
deleted file mode 100644
index 9b76759a1426bab56be767d09da3140c323bb3c3..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/FIFA Mobile APK Para Hilesi Futbol Yldzlarnz Toplayn ve Dnya Kupasna Hazrlann.md
+++ /dev/null
@@ -1,112 +0,0 @@
-
-
FIFA Mobile APK Para Hilesi 2022 Apk Dayı: How to Get Unlimited Money in FIFA Mobile
-
If you are a fan of soccer games, you probably have heard of FIFA Mobile, the official mobile game of the FIFA World Cup 2022™. FIFA Mobile is a popular soccer game that lets you build your ultimate team of soccer stars, compete in various modes, and relive the world's greatest soccer tournament. But what if you want to get unlimited money in FIFA Mobile without spending real money? That's where FIFA Mobile APK Para Hilesi 2022 Apk Dayı comes in.
-
Introduction
-
FIFA Mobile is a free-to-play soccer game that is available for iOS and Android devices. It features over 15,000 authentic soccer players from over 600 teams, including world-class talent like Kylian Mbappé, Christian Pulisic, Vinicius Jr, and Son Heung-min. You can play through the entire tournament with any of the 32 qualified national teams, or rewrite history and take control of 15 non-qualified national teams. You can also compete against other players in pvp modes, such as Head-to-Head, VS Attack, and Manager Mode.
However, as with most free-to-play games, FIFA Mobile also has a currency system that limits your progress and enjoyment. You need coins and gems to buy player packs, upgrade your players, unlock new modes, and more. While you can earn some coins and gems by playing the game, they are not enough to get you the best players and teams. That's why some players resort to using FIFA Mobile APK Para Hilesi 2022 Apk Dayı, a modded version of FIFA Mobile that gives you unlimited money in the game.
-
FIFA Mobile APK Para Hilesi 2022 Apk Dayı is a modified version of FIFA Mobile that has been hacked by a third-party developer. It allows you to access a hidden feature in the game that lets you generate unlimited coins and gems. With this feature, you can buy any player pack you want, upgrade your players to their maximum potential, unlock all modes and features, and more. You can also enjoy the game with enhanced graphics and gameplay, thanks to the new engine that supports up to 60 fps.
-
By using FIFA Mobile APK Para Hilesi 2022 Apk Dayı, you can have more fun and excitement in playing FIFA Mobile. You can build your dream team of soccer legends, such as Paolo Maldini, Ronaldinho, Zidane, Beckham, Ronaldo, and more. You can also dominate your opponents in pvp modes with your superior team and skills. You can relive the world's greatest soccer tournament with any team you want, even if they are not qualified for the World Cup. You can experience realistic soccer simulation with new stadiums, SFX, commentary, and more.
-
How to Download and Install FIFA Mobile APK Para Hilesi 2022 Apk Dayı
-
If you want to try out FIFA Mobile APK Para Hilesi 2022 Apk Dayı, you need to download and install it on your device. However, before you do that, you need to take some precautions to avoid any problems or risks. Here are the steps to download and install FIFA Mobile APK Para Hilesi 2022 Apk Dayı:
-
-
First, you need to uninstall the original version of FIFA Mobile from your device. This is because the modded version will not work if you have the original version installed. To uninstall FIFA Mobile, go to your device settings, find the app, and tap on uninstall.
-
Second, you need to enable the installation of apps from unknown sources on your device. This is because the modded version is not available on the official app stores, and you need to download it from a third-party website. To enable the installation of apps from unknown sources, go to your device settings, find the security option, and toggle on the unknown sources option.
-
Third, you need to download the FIFA Mobile APK Para Hilesi 2022 Apk Dayı file from a reliable and trusted website. There are many websites that claim to offer the modded version of FIFA Mobile, but some of them may contain viruses or malware that can harm your device or steal your data. To avoid this, you need to do some research and find a reputable website that has positive reviews and feedback from other users. One such website is [Apk Dayı], which is a Turkish website that offers various modded games and apps for free.
-
Fourth, you need to locate the downloaded file on your device and tap on it to start the installation process. You may see a warning message that says the file may harm your device, but you can ignore it and proceed with the installation. The installation process may take a few minutes, depending on your device and internet speed.
-
Fifth, you need to launch the modded version of FIFA Mobile and enjoy the game with unlimited money. You may need to grant some permissions to the app, such as access to your storage, camera, microphone, etc. You may also need to sign in with your Google Play Games account or create a new one if you don't have one.
-
-
These are the steps to download and install FIFA Mobile APK Para Hilesi 2022 Apk Dayı on your device. However, you should be aware that using the modded version of FIFA Mobile may have some drawbacks and risks. For example:
-
-
You may not be able to access some features or modes that require an internet connection or a server verification, such as live events, leaderboards, tournaments, etc.
-
You may face some compatibility or performance issues with your device or game version, such as crashes, glitches, errors, etc.
-
You may violate the terms and conditions of FIFA Mobile and EA Sports, which may result in a ban or suspension of your account or device.
-
You may expose your device or data to potential threats or attacks from hackers or malware that may be hidden in the modded file or website.
-
-
Therefore, you should use FIFA Mobile APK Para Hilesi 2022 Apk Dayı at your own risk and discretion. We are not responsible for any consequences or damages that may arise from using the modded version of FIFA Mobile.
-
How to Use FIFA Mobile APK Para Hilesi 2022 Apk Dayı
-
Once you have downloaded and installed FIFA Mobile APK Para Hilesi 2022 Apk Dayı on your device, you can start using it and enjoy the game with unlimited money. Here are some tips on how to use FIFA Mobile APK Para Hilesi 2022 Apk Dayı:
-
How to access the unlimited money feature in the game
-
The main feature of FIFA Mobile APK Para Hilesi 2022 Apk Dayı is that it gives you unlimited coins and gems in the game. To access this feature, you need to go to the store section in the game menu. There, you will see that all the player packs and bundles are free and have no limit. You can buy as many packs as you want without spending any real money. You can also see that your coin and gem balance is always maxed out at 99999999. You can use these coins and gems to buy anything else in the game, such as upgrades, modes, items, etc.
-
fifa mobile mod apk sınırsız para hilesi 2022
-fifa mobile 2022 apk dayı indir ücretsiz
-fifa mobile apk para hilesi nasıl yapılır 2022
-fifa mobile 18.1.03 mod apk unlocked all menu
-fifa mobile world cup 2022 apk download
-fifa mobile apk para hilesi 2022 güncel
-fifa mobile apk dayı hileli oyun indir
-fifa mobile mod apk unlimited money 2022
-fifa mobile apk para hilesi yapma programı
-fifa mobile 2022 apk dayı kurulumu
-fifa mobile apk para hilesi 2022 ios
-fifa mobile apk dayı son sürüm indir
-fifa mobile mod apk menu hileli 2022
-fifa mobile apk para hilesi 2022 android
-fifa mobile apk dayı yorumları ve puanları
-fifa mobile mod apk free download 2022
-fifa mobile apk para hilesi 2022 online
-fifa mobile apk dayı güvenilir mi
-fifa mobile mod apk latest version 2022
-fifa mobile apk para hilesi 2022 video
-fifa mobile apk dayı sorunları ve çözümleri
-fifa mobile mod apk no root 2022
-fifa mobile apk para hilesi 2022 türkçe
-fifa mobile apk dayı destek ve iletişim
-fifa mobile mod apk hack 2022
-
How to use the money to build your ultimate team and compete in various modes
-
With unlimited money in FIFA Mobile APK Para Hilesi 2022 Apk Dayı, you can build your ultimate team of soccer stars without any restrictions. You can buy any player pack you want and get the best players in the game, such as Lionel Messi, Cristiano Ronaldo, Neymar, Mbappé, and more. You can also upgrade your players to their maximum ratings and skills, and customize their appearance, kits, badges, etc. You can also create your own formations and tactics, and optimize your team chemistry and performance. With your ultimate team, you can compete in various modes in FIFA Mobile APK Para Hilesi 2022 Apk Dayı. You can play through the entire World Cup 2022 tournament with any team you want, even if they are not qualified. You can also play against other players in pvp modes, such as Head-to-Head, VS Attack, and Manager Mode. You can also participate in live events, tournaments, seasons, and more. You can win rewards, trophies, and glory with your team.
How to enjoy the game with enhanced graphics and gameplay
-
Another feature of FIFA Mobile APK Para Hilesi 2022 Apk Dayı is that it improves the graphics and gameplay of FIFA Mobile. Thanks to the new engine that supports up to 60 fps, you can enjoy smooth and realistic soccer simulation on your device. You can also experience new stadiums, SFX, commentary, animations, and more. You can also adjust the graphics settings according to your device and preference. With enhanced graphics and gameplay, you can enjoy FIFA Mobile APK Para Hilesi 2022 Apk Dayı more than ever. You can feel the thrill and excitement of playing soccer on your device. You can also immerse yourself in the atmosphere and emotion of the World Cup 2022 tournament. You can also appreciate the details and quality of the game.
-
Tips and Tricks for FIFA Mobile APK Para Hilesi 2022 Apk Dayı
-
While FIFA Mobile APK Para Hilesi 2022 Apk Dayı gives you unlimited money and other advantages in FIFA Mobile, you still need some tips and tricks to make the most out of it. Here are some tips and tricks for FIFA Mobile APK Para Hilesi 2022 Apk Dayı:
-
How to optimize your team chemistry and performance
-
Team chemistry is an important factor in FIFA Mobile that affects your team's performance on the pitch. Team chemistry is determined by various factors, such as player nationality, league, club, position, formation, etc. The higher your team chemistry, the better your team will play together. To optimize your team chemistry in FIFA Mobile APK Para Hilesi 2022 Apk Dayı, you need to consider these factors when building your team. You need to choose players that have a high compatibility with each other based on their attributes. You also need to choose a formation that suits your play style and strategy. You also need to adjust your tactics and instructions according to your opponent and situation.
-
How to score goals and win matches with ease
-
Scoring goals is the main objective of soccer games, and FIFA Mobile is no exception. To score goals and win matches in FIFA Mobile APK Para Hilesi 2022 Apk Dayı, you need to master some skills and techniques. Here are some of them:
-
-
Use the sprint button wisely. The sprint button allows you to run faster with the ball, but it also reduces your control and accuracy. Use it only when you have enough space or when you need to outrun your defender.
-
Use the skill moves effectively. The skill moves allow you to perform various tricks and feints with the ball, such as roulette, rainbow flick, heel-to-heel flick, etc. Use them to confuse or beat your defender or goalkeeper.
-
Use the shoot button correctly. The shoot button allows you to take a shot at the goal, but it also depends on various factors, such as power, angle, distance, etc. Use it only when you have a clear chance or when you are close to the goal.
-
Use the pass button smartly. The pass button allows you to pass the ball to your teammate or cross it into the box. Use it to create chances or opportunities for yourself or your teammates.
-
Use the switch button properly. The switch button allows you to change the player you are controlling on the pitch. Use it to select the best player for each situation or scenario.
-
-
These are some of the skills and techniques that can help you score goals and win matches in FIFA Mobile APK Para Hilesi 2022 Apk Dayı.
-
How to avoid bans and other issues while using the modded version
-
As mentioned earlier, using FIFA Mobile APK Para Hilesi 2022 Apk Dayı may have some drawbacks and risks. One of them is that you may get banned or suspended by FIFA Mobile or EA Sports for violating their terms and conditions. To avoid this, you need to follow some precautions and tips while using the modded version. Here are some of them:
-
-
Do not use the modded version on your main account or device. Use a secondary account or device that you don't care about losing or getting banned.
-
Do not use the modded version on online modes or features that require an internet connection or a server verification, such as live events, leaderboards, tournaments, etc. Use the modded version only on offline modes or features that do not require an internet connection or a server verification, such as World Cup, VS Attack, Manager Mode, etc.
-
Do not use the modded version excessively or excessively. Use the modded version moderately and reasonably. Do not buy too many player packs, upgrade too many players, unlock too many modes, etc. Do not win too many matches, score too many goals, dominate too many opponents, etc.
-
Do not brag or boast about using the modded version to other players or on social media. Keep it a secret and do not share it with anyone. Do not provoke or challenge other players who are using the original version of FIFA Mobile.
-
-
These are some of the precautions and tips that can help you avoid bans and other issues while using FIFA Mobile APK Para Hilesi 2022 Apk Dayı.
-
Conclusion
-
FIFA Mobile APK Para Hilesi 2022 Apk Dayı is a modded version of FIFA Mobile that gives you unlimited money in the game. It allows you to buy any player pack you want, upgrade your players to their maximum potential, unlock all modes and features, and more. It also improves the graphics and gameplay of FIFA Mobile with a new engine that supports up to 60 fps. It lets you enjoy FIFA Mobile more than ever with your ultimate team of soccer stars. However, FIFA Mobile APK Para Hilesi 2022 Apk Dayı also has some drawbacks and risks. It may not work on some features or modes that require an internet connection or a server verification. It may cause some compatibility or performance issues with your device or game version. It may violate the terms and conditions of FIFA Mobile and EA Sports, which may result in a ban or suspension of your account or device. It may expose your device or data to potential threats or attacks from hackers or malware. Therefore, you should use FIFA Mobile APK Para Hilesi 2022 Apk Dayı at your own risk and discretion. We are not responsible for any consequences or damages that may arise from using the modded version of FIFA Mobile. If you want to try it out, you can follow the steps and tips we have provided in this article. But if you want to play FIFA Mobile safely and legally, you can download the original version of FIFA Mobile from the official app stores.
-
We hope this article has been helpful and informative for you. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and have a great day!
-
FAQs
-
Here are some frequently asked questions about FIFA Mobile APK Para Hilesi 2022 Apk Dayı:
-
Q: Is FIFA Mobile APK Para Hilesi 2022 Apk Dayı safe to use?
-
A: FIFA Mobile APK Para Hilesi 2022 Apk Dayı is not safe to use, as it is a modded version of FIFA Mobile that has been hacked by a third-party developer. It may contain viruses or malware that can harm your device or steal your data. It may also violate the terms and conditions of FIFA Mobile and EA Sports, which may result in a ban or suspension of your account or device.
-
Q: Is FIFA Mobile APK Para Hilesi 2022 Apk Dayı legal to use?
-
A: FIFA Mobile APK Para Hilesi 2022 Apk Dayı is not legal to use, as it is a modded version of FIFA Mobile that has been hacked by a third-party developer. It infringes the intellectual property rights of FIFA Mobile and EA Sports, which may result in legal action against you.
-
Q: How can I download FIFA Mobile APK Para Hilesi 2022 Apk Dayı?
-
A: You can download FIFA Mobile APK Para Hilesi 2022 Apk Dayı from a reliable and trusted website that offers various modded games and apps for free. One such website is [Apk Dayı], which is a Turkish website that has positive reviews and feedback from other users.
-
Q: How can I install FIFA Mobile APK Para Hilesi 202 2 Apk Dayı?
-
A: You can install FIFA Mobile APK Para Hilesi 2022 Apk Dayı by following these steps:
-
-
Uninstall the original version of FIFA Mobile from your device.
-
Enable the installation of apps from unknown sources on your device.
-
Download the FIFA Mobile APK Para Hilesi 2022 Apk Dayı file from a reliable and trusted website.
-
Locate the downloaded file on your device and tap on it to start the installation process.
-
Launch the modded version of FIFA Mobile and enjoy the game with unlimited money.
-
-
Q: How can I use FIFA Mobile APK Para Hilesi 2022 Apk Dayı?
-
A: You can use FIFA Mobile APK Para Hilesi 2022 Apk Dayı by following these tips:
-
-
Access the unlimited money feature in the game by going to the store section in the game menu.
-
Use the money to buy any player pack you want, upgrade your players to their maximum potential, unlock all modes and features, and more.
-
Build your ultimate team of soccer stars and compete in various modes in the game.
-
Enjoy the game with enhanced graphics and gameplay with a new engine that supports up to 60 fps.
-
Avoid bans and other issues while using the modded version by following some precautions and tips.
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Grow Your Business with APK Shopee Merchant Features Benefits and Tips.md b/spaces/congsaPfin/Manga-OCR/logs/Grow Your Business with APK Shopee Merchant Features Benefits and Tips.md
deleted file mode 100644
index 8b891aec63d6036a4b31e7c753381982c1fecbd5..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Grow Your Business with APK Shopee Merchant Features Benefits and Tips.md
+++ /dev/null
@@ -1,161 +0,0 @@
-
-
Download APK Shopee Merchant: A Guide for Android Users
-
If you are an online seller who wants to grow your business with Shopee, you might be interested in downloading APK Shopee Merchant. This is a practical and reliable application that helps you manage your business more easily with Shopee, no. 1 online shopping platform in Indonesia, anytime and anywhere.
-
But what is Shopee Merchant, and what is an APK file? And why would you want to download it instead of getting it from Google Play? In this article, we will answer these questions and show you how to download and use APK Shopee Merchant on your Android device.
Shopee Merchant is an app that allows you to join ShopeePay and ShopeeFood easily in one app. ShopeePay is a digital payment service that lets you accept payments from customers using QR codes or phone numbers. ShopeeFood is a food delivery service that lets you sell your food products to hungry customers in your area.
-
As a merchant, you will get the following benefits from using Shopee Merchant:
-
-
Self-registration: You can sign up as a seller on Shopee without any hassle or fees.
-
Supporting features: You can access various features that help you manage your inventory, orders , payments, promotions, and customer service.
-
Integrated wallet: You can receive and withdraw your earnings directly from your ShopeePay wallet.
-
Self-promo creation: You can create and customize your own promotional materials, such as banners, flyers, and stickers, to attract more customers.
-
Analytics and insights: You can monitor your business performance and get useful tips and suggestions to improve your sales.
-
-
With Shopee Merchant, you can enjoy the convenience and security of selling online with Shopee, the leading e-commerce platform in Southeast Asia and Taiwan.
-
What is an APK file?
-
An APK file is a file format that stands for Android Package Kit. It is used to distribute and install applications on Android devices. An APK file contains all the components of an app, such as the code, resources, assets, certificates, and manifest.
-
How to download apk shopee partner app for android
-Shopee partner apk latest version free download
-Benefits of using shopee partner app for shopeepay and shopeefood merchant
-Shopee partner app review and rating by users
-Tips and tricks to manage your business with shopee partner app
-Shopee partner app download size and compatibility
-How to join shopeepay and shopeefood easily with shopee partner app
-How to track your wallet balance and transaction history with shopee partner app
-How to organize your menu and create promotion with shopee partner app
-How to update your information and menu with shopee partner app
-Shopee partner app vs other apps for online shopping platform merchants
-How to contact shopee customer service through shopee partner app
-How to register and verify your account with shopee partner app
-How to use shopee partner app offline mode
-How to sync your data across devices with shopee partner app
-How to backup and restore your data with shopee partner app
-How to enable notifications and alerts with shopee partner app
-How to customize your settings and preferences with shopee partner app
-How to troubleshoot common issues with shopee partner app
-How to uninstall and reinstall shopee partner app
-How to get the best deals and discounts with shopee partner app
-How to increase your sales and revenue with shopee partner app
-How to attract more customers and reviews with shopee partner app
-How to improve your ranking and visibility with shopee partner app
-How to integrate your social media accounts with shopee partner app
-How to access analytics and reports with shopee partner app
-How to use QR code scanner and generator with shopee partner app
-How to accept multiple payment methods with shopee partner app
-How to manage your inventory and orders with shopee partner app
-How to handle refunds and cancellations with shopee partner app
-How to join the shopee community and network with other merchants with shopee partner app
-How to participate in contests and events with shopee partner app
-How to earn rewards and points with shopee partner app
-How to redeem vouchers and coupons with shopee partner app
-How to share feedback and suggestions with shopee partner app
-Shopee partner apk modded version download link
-Shopee partner apk cracked version download link
-Shopee partner apk premium version download link
-Shopee partner apk pro version download link
-Shopee partner apk hacked version download link
-Shopee partner apk old version download link
-Shopee partner apk beta version download link
-Shopee partner apk original version download link
-Shopee partner apk mirror version download link
-Shopee partner apk alternative version download link
-
An APK file can be opened on Android devices by using a file manager app or a web browser. However, before installing an APK file, you need to enable the option to allow installation of apps from unknown sources in your device settings. This is because APK files are not verified by Google Play, which is the official app store for Android devices.
-
Why download APK Shopee Merchant?
-
Access the latest version of the app
-
One of the reasons why you might want to download APK Shopee Merchant is to access the latest version of the app. Sometimes, the app updates are not available on Google Play due to various reasons, such as compatibility issues, regional restrictions, or technical errors. By downloading the APK file from a reliable source, you can get the most updated version of Shopee Merchant, which may have new features, bug fixes, or performance improvements.
-
Install the app on unsupported devices
-
Another reason why you might want to download APK Shopee Merchant is to install the app on devices that are not supported by Google Play. Some devices may not be compatible with Google Play due to their hardware specifications, software versions, or manufacturer policies. Some devices may also have limited storage space that prevents them from downloading large apps from Google Play. By downloading the APK file from a website, you can install Shopee Merchant on any device that runs on Android OS, as long as it meets the minimum requirements of the app.
-
Avoid regional restrictions
-
A third reason why you might want to download APK Shopee Merchant is to avoid regional restrictions. Some apps may not be available or accessible in certain regions due to legal regulations, licensing agreements, or censorship policies. For example, Shopee Merchant may not be available in some countries where Shopee does not operate or where online selling is prohibited or regulated. By downloading the APK file from a website, you can bypass these restrictions and use Shopee Merchant wherever you are.
-
How to download APK Shopee Merchant?
-
Find a reliable source
-
The first step to download APK Shopee Merchant is to find a reliable source that offers the APK file for download. There are many websites that provide APK files for various apps, but not all of them are trustworthy or safe. Some websites may contain malware, viruses, or fake files that can harm your device or steal your data.
-
To find a reliable source, you should look for the following criteria:
-
-
The website has a good reputation and positive reviews from other users.
-
The website has a secure connection (HTTPS) and a valid certificate.
-
The website provides clear and accurate information about the APK file, such as the name, size, version, developer, and permissions.
-
The website does not require you to register, pay, or complete surveys to download the APK file.
-
The website does not have excessive ads or pop-ups that interfere with your browsing experience.
-
-
One example of a reliable source that offers APK Shopee Merchant for download is [APKPure], which is one of the most popular and trusted websites for downloading APK files.
-
Enable unknown sources
-
The second step to download APK Shopee Merchant is to enable unknown sources on your device settings. This will allow you to install apps from sources other than Google Play. To do this, follow these steps:
-
-
Go to your device settings and tap on Security or Privacy.
-
Find the option that says Unknown sources or Install unknown apps and toggle it on.
-
A warning message will appear asking you to confirm your action. Tap on OK or Allow.
-
-
Note that this option may vary depending on your device model and Android version.
Download and install the file
-
The third step to download APK Shopee Merchant is to download and install the file on your device. To do this, follow these steps:
-
-
Go to the website that offers the APK file for download and tap on the download button or link.
-
A pop-up window will appear asking you to confirm your download. Tap on OK or Download.
-
Wait for the download to complete. You can check the progress on your notification bar or your download folder.
-
Once the download is finished, tap on the APK file to open it. You may need to use a file manager app to locate it on your device.
-
A prompt will appear asking you to install the app. Tap on Install or Next.
-
Wait for the installation to complete. You can check the progress on your screen or your notification bar.
-
Once the installation is finished, tap on Open or Done.
-
-
Congratulations! You have successfully downloaded and installed APK Shopee Merchant on your device. You can now start using the app to manage your business with Shopee.
-
How to use APK Shopee Merchant?
-
Register as a merchant
-
The first step to use APK Shopee Merchant is to register as a merchant on Shopee. To do this, follow these steps:
-
-
Open the app and tap on Sign Up or Register.
-
Select your country and enter your phone number. Tap on Next or Send OTP.
-
Enter the one-time password (OTP) that you received via SMS. Tap on Next or Verify.
-
Create a password and a username for your account. Tap on Next or Register.
-
Fill in your personal information, such as your name, email address, and date of birth. Tap on Next or Continue.
-
Select the type of business you want to run, such as food, beverage, or others. Tap on Next or Continue.
-
Fill in your business information, such as your business name, address, category, and description. Tap on Next or Continue.
-
Upload your identity document, such as your ID card, passport, or driver's license. Tap on Next or Continue.
-
Upload your business document, such as your business license, tax number, or bank statement. Tap on Next or Continue.
-
Review and confirm your information and documents. Tap on Submit or Finish.
-
-
Your registration is now complete. You will receive a confirmation message from Shopee within 24 hours. Once your account is verified, you can start selling on ShopeePay and ShopeeFood.
-
Manage your business
-
The second step to use APK Shopee Merchant is to manage your business using the app. To do this, you can access various features and functions that help you with the following tasks:
-
-
Task
Feature
Description
-
Create and edit your menu
Menu
You can add, edit, delete, or arrange your products in different categories and subcategories. You can also set the prices, discounts, stock availability, and delivery options for each product.
-
Track your orders and payments
Orders
You can view, accept, reject, or cancel your orders from customers. You can also update the status of your orders, such as preparing, ready, or delivered. You can also view the payment details and history of each order.
-
Promote your products
Promotions
You can create and manage various types of promotions for your products, such as vouchers, flash sales, free shipping, or bundle deals. You can also set the duration, budget, and target audience for each promotion.
-
Communicate with customers
Chat
You can chat with your customers directly from the app. You can send and receive text messages, images, videos, voice notes, or stickers. You can also use quick replies or templates to answer common questions or requests.
-
-
With these features, you can manage your business more efficiently and effectively with Shopee Merchant.
-
Grow your sales
-
The third step to use APK Shopee Merchant is to grow your sales using the app. To do this, you can access various features and benefits that help you with the following goals:
-
-
Goal
Feature
Benefit
-
Increase your visibility
Self-promo creation
You can create and customize your own promotional materials, such as banners, flyers, and stickers, to attract more customers. You can also print or share them on social media platforms.
-
Improve your reputation
Ratings and reviews
You can collect and display ratings and reviews from your customers on your menu page. You can also respond to them and thank them for their feedback. This can help you build trust and loyalty among your customers.
-
Expand your market
Regional expansion
You can expand your market to other regions where Shopee operates, such as Malaysia, Singapore, Thailand, Vietnam, Philippines, or Taiwan. You can also adjust your menu and prices according to the local preferences and demand.
-
Optimize your performance
Analytics and insights
You can monitor your business performance and get useful tips and suggestions to improve your sales. You can also access various reports and statistics, such as sales volume, revenue, customer behavior, and market trends.
-
-
With these features and benefits, you can grow your sales and customer satisfaction with Shopee Merchant.
-
Conclusion
-
In conclusion, downloading APK Shopee Merchant is a smart and convenient way to manage your business with Shopee on your Android device. You can access the latest version of the app, install it on unsupported devices, and avoid regional restrictions. You can also register as a merchant, manage your business, and grow your sales using various features and benefits that Shopee Merchant offers. If you are an online seller who wants to join ShopeePay and ShopeeFood easily in one app, you should download APK Shopee Merchant today and start selling more with Shopee.
-
FAQs
-
Here are some frequently asked questions that you might have about downloading APK Shopee Merchant:
-
-
Is it safe to download APK files from unknown sources?
-
It depends on the source that you download the APK file from. Some sources may be reliable and safe, while others may be malicious or fraudulent. To ensure your safety, you should only download APK files from reputable and trusted websites, such as [APKPure]. You should also scan the APK file with an antivirus app before installing it on your device.
-
How can I update my APK Shopee Merchant app?
-
You can update your APK Shopee Merchant app by downloading the latest version of the APK file from the same source that you downloaded it from. You can also check for updates within the app by tapping on the menu icon and selecting Settings > About > Check for updates.
-
What if I encounter problems or errors while using the app?
-
If you encounter any problems or errors while using the app, you can try the following solutions:
-
-
Clear the cache and data of the app by going to your device settings > Apps > Shopee Merchant > Storage > Clear cache / Clear data.
-
Uninstall and reinstall the app by deleting the APK file from your device and downloading it again from the website.
-
Contact Shopee for support or feedback by tapping on the menu icon and selecting Help Center > Contact Us.
-
-
Can I use APK Shopee Merchant on other operating systems besides Android?
-
No, you cannot use APK Shopee Merchant on other operating systems besides Android. APK files are only compatible with Android devices. If you want to use Shopee Merchant on other devices, such as iOS or Windows, you will need to download the app from their respective app stores or use the web version of Shopee Merchant.
-
How can I contact Shopee for support or feedback?
-
You can contact Shopee for support or feedback by tapping on the menu icon and selecting Help Center > Contact Us. You can also email them at [merchant.support@shopee.com] or call them at [1500 407]. They are available 24/7 to assist you with any issues or inquiries that you may have.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Kiss of War Mod Apk A Unique Strategy Game with Historical Maps and Weapons.md b/spaces/congsaPfin/Manga-OCR/logs/Kiss of War Mod Apk A Unique Strategy Game with Historical Maps and Weapons.md
deleted file mode 100644
index 4a51f1d9ad54dda1bade088bf392bf41e1aaf235..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Kiss of War Mod Apk A Unique Strategy Game with Historical Maps and Weapons.md
+++ /dev/null
@@ -1,105 +0,0 @@
-
-
Kiss of War Hacked APK: What Is It and How to Get It?
-
Kiss of War is a war strategy game set in the late modern period. It tells a story about a group of charming women with different pasts fighting against the invaders with allies. You will play as a commander in the game, train powerful troops, recruit beautiful agents as officers, rally other commanders in the world, and conquer this land.
-
The game features vivid war scenes based on actual geography of Europe, real-time multiplayer combat, multiple countries to select, and a wide selection of weapons and vehicles. The game has received positive reviews from millions of players who enjoy its immersive gameplay and stunning graphics.
However, some players may find it hard to progress in the game due to its challenging levels and limited resources. That's why some players resort to using a hacked APK to get an edge over their opponents. A hacked APK is a modified version of the original game that gives you access to unlimited resources, unlocked officers, enhanced combat, and other features that are not available in the official version.
-
Using a hacked APK can be tempting, but it also comes with some risks. You may encounter malware, viruses, or spyware that can harm your device or steal your personal information. You may also face legal issues or bans from the game developers if they detect your cheating activities. Therefore, you need to be careful and responsible when using a hacked APK.
-
Features of Kiss of War Hacked APK
-
Unlimited Resources
-
One of the main features of Kiss of War hacked APK is that it gives you unlimited resources such as money, gold, food, steel, oil, energy, and more. These resources are essential for building and upgrading your base, training and producing your troops, researching new technologies, and recruiting new officers.
-
With unlimited resources, you don't have to worry about running out of them or spending real money to buy them. You can easily max out your buildings, troops, officers, and researches without any limitations. You can also use them to buy items from the shop or participate in events.
-
Unlocked Officers
-
Another feature of Kiss of War hacked APK is that it unlocks all the officers in the game. Officers are characters that lead your army and boost your power. They have different skills, talents, traits, and stories that make them unique and charming.
-
The game has three types of officers: legendary, epic, and elite. Legendary officers are the most powerful and rare ones that can only be obtained from elite recruitment events or spending a lot of gold. Epic officers are the second most powerful and rare ones that can be obtained from normal recruitment events or spending some gold. Elite officers are the least powerful and common ones that can be obtained from free recruitment events or spending a little gold.
-
With unlocked officers, you don't have to wait for the events or spend any gold to get them. You can have all the officers in the game at your disposal and use them to form your dream team. You can also level up, upgrade, and customize your officers without any restrictions.
-
Enhanced Combat
-
The third feature of Kiss of War hacked APK is that it enhances your combat performance and experience in the game. The game offers various modes of combat such as campaign, arena, alliance war, world war, and more. You will face different enemies and challenges in each mode and need to use your skills and strategies to win.
-
kiss of war mod apk latest version 2022
-kiss of war hack mod apk unlimited money and gold
-kiss of war mod apk download for android
-kiss of war cheat mod apk free download
-kiss of war modded apk with female army
-kiss of war hacked version apk online
-kiss of war mod apk no root required
-kiss of war hack apk full unlocked
-kiss of war mod apk offline mode
-kiss of war cheat apk with unlimited resources
-kiss of war modded version apk 2023 update
-kiss of war hack apk for ios devices
-kiss of war mod apk with fascinated battles
-kiss of war cheat mod apk with sophisticated weapons
-kiss of war modded apk with realistic graphics
-kiss of war hacked apk with strategic depth
-kiss of war mod apk with 18th century Europe map
-kiss of war hack mod apk with powerful commanders
-kiss of war mod apk free shopping and upgrades
-kiss of war cheat apk with easy controls
-kiss of war modded version apk with multiplayer mode
-kiss of war hacked version apk with unlimited gems and coins
-kiss of war mod apk with original soundtracks and effects
-kiss of war hack apk with anti-ban feature
-kiss of war mod apk with daily rewards and missions
-kiss of war cheat mod apk with fast loading and installation
-kiss of war modded apk with no ads and pop-ups
-kiss of war hacked apk with auto-update feature
-kiss of war mod apk with customisable settings and options
-kiss of war hack mod apk with smooth gameplay and performance
-kiss of war mod apk with different languages support
-kiss of war cheat apk with social media integration
-kiss of war modded version apk with leaderboards and achievements
-kiss of war hacked version apk with unlimited energy and stamina
-kiss of war mod apk with various game modes and levels
-kiss of war hack apk with VIP features and benefits
-kiss of war mod apk with friendly user interface and design
-kiss of war cheat mod apk with high compatibility and security
-kiss of war modded apk with regular bug fixes and improvements
-kiss of war hacked apk with awesome reviews and ratings
-
With enhanced combat, you can get better control over your troops and strategies in the game. You can adjust the speed, formation, direction, and target of your troops according to the situation. You can also use special skills and items to boost your power and damage. You can win battles against real players and invaders with ease and earn rewards and glory.
-
How to Download and Install Kiss of War Hacked APK
-
Requirements
-
Before you download and install Kiss of War hacked APK, you need to make sure that your device meets the minimum system requirements for running the hacked APK. These are:
-
-
Android 4.4 or higher
-
At least 2 GB of RAM
-
At least 1 GB of free storage space
-
A stable internet connection
-
-
You also need to enable some permissions and settings on your device to allow the installation of the hacked APK. These are:
-
-
Allow installation from unknown sources
-
Disable antivirus or firewall software
-
Backup your original game data
-
-
Steps
-
After you have checked the requirements and enabled the permissions and settings, you can follow these steps to download and install Kiss of War hacked APK on your device:
-
-
Find a reliable source for downloading the hacked APK. You can search online for websites or forums that offer the latest version of Kiss of War hacked APK. Make sure that the source is trustworthy and has positive feedback from other users.
-
Verify and install the hacked APK on your device. After you have downloaded the hacked APK file, you need to scan it with a security tool to make sure that it is safe and virus-free. Then, you need to tap on the file and follow the instructions to install it on your device.
-
Launch and enjoy the hacked APK on your device. After you have installed the hacked APK on your device, you need to launch it from your app drawer or home screen. You will see a new icon with a different name than the original game. Tap on it and enjoy the features of Kiss of War hacked APK.
-
-
Conclusion
-
Kiss of War is a war strategy game that lets you play as a commander in a world full of charming women fighting against invaders. The game offers various features such as vivid war scenes, real-time multiplayer combat, multiple countries to select, and a wide selection of weapons and vehicles.
-
If you want to get an edge over your opponents in the game, you can use a hacked APK to get unlimited resources, unlocked officers, enhanced combat, and other features that are not available in the official version. However, you need to be careful and responsible when using a hacked APK as it may come with some risks such as malware, viruses, spyware, legal issues, or bans.
-
If you are interested in trying out Kiss of War hacked APK, you can follow our guide on how to download and install it on your device. Make sure that you meet the requirements and enable the permissions and settings before installing it. Also, make sure that you find a reliable source for downloading it and verify it before installing it.
-
We hope that this article has helped you understand what Kiss of War hacked APK is and how to get it. If you have any questions or feedback, please feel free to leave a comment below.
-
FAQs
-
-
Q: Is Kiss of War hacked APK safe to use?
-
A: Kiss of War hacked APK is not an official product of the game developers or publishers. It is a modified version of the original game that gives you access to features that are not available in the official version. Therefore, it may not be safe to use as it may contain malware, viruses, spyware, or other harmful programs that can damage your device or steal your personal information. Therefore, you need to be careful and responsible when using a hacked APK and only download it from a reliable source and scan it with a security tool before installing it.
-
Q: Is Kiss of War hacked APK legal to use?
-
A: Kiss of War hacked APK is not an official product of the game developers or publishers. It is a modified version of the original game that violates the terms of service and the intellectual property rights of the game. Therefore, it may not be legal to use as it may infringe the rights of the game developers or publishers and expose you to legal issues or bans. Therefore, you need to be careful and responsible when using a hacked APK and only use it for personal and educational purposes and not for commercial or malicious purposes.
-
Q: Is Kiss of War hacked APK compatible with all devices?
-
A: Kiss of War hacked APK is not an official product of the game developers or publishers. It is a modified version of the original game that may not be compatible with all devices or versions of the game. Therefore, it may not work properly or cause errors or crashes on some devices or versions of the game. Therefore, you need to be careful and responsible when using a hacked APK and only use it on devices that meet the minimum system requirements and have the latest version of the game installed.
-
Q: How to update Kiss of War hacked APK?
-
A: Kiss of War hacked APK is not an official product of the game developers or publishers. It is a modified version of the original game that may not receive regular updates or support from the game developers or publishers. Therefore, it may not have the latest features or fixes that are available in the official version of the game. Therefore, you need to be careful and responsible when using a hacked APK and only download it from a reliable source that provides the latest version of the hacked APK.
-
Q: How to uninstall Kiss of War hacked APK?
-
A: Kiss of War hacked APK is not an official product of the game developers or publishers. It is a modified version of the original game that may not be easy to uninstall from your device. Therefore, you need to be careful and responsible when using a hacked APK and only install it on devices that you can easily restore or reset if needed. To uninstall Kiss of War hacked APK from your device, you can follow these steps:
-
-
Go to your device settings and find the app manager or applications option.
-
Find and select Kiss of War hacked APK from the list of apps installed on your device.
-
Tap on the uninstall button and confirm your action.
-
Wait for the app to be removed from your device.
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Los Angeles Crimes 1.6 APK Unlimited Ammo Realistic Graphics and More.md b/spaces/congsaPfin/Manga-OCR/logs/Los Angeles Crimes 1.6 APK Unlimited Ammo Realistic Graphics and More.md
deleted file mode 100644
index b44779d1beae9dc7f4c6a41969dea0555bd25bb6..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Los Angeles Crimes 1.6 APK Unlimited Ammo Realistic Graphics and More.md
+++ /dev/null
@@ -1,96 +0,0 @@
-
-
Los Angeles Crimes 1.6 APK: A Sandbox Game for Android
-
If you are looking for a fun and realistic sandbox game for your Android device, you might want to check out Los Angeles Crimes 1.6 APK. This is a fan-made game inspired by the GTA series, where you can roam freely in a huge city, engage in various activities, and interact with other players online. In this article, we will tell you everything you need to know about Los Angeles Crimes, including its features, how to download and install it, why you should play it, and some tips and tricks to help you enjoy it more.
Los Angeles Crimes is an action game developed by Mohammad Alizadeh, an independent developer from Iran. It is not affiliated with Rockstar Games or GTA in any way, but it does share some similarities with them. Los Angeles Crimes is a sandbox game, which means that you can do whatever you want in the game world, without any limitations or objectives. You can drive cars, bikes, boats, helicopters, and planes, shoot guns, fight with fists or melee weapons, play soccer or car race, join team deathmatch or zombie survival modes, or just explore the city and its surroundings.
-
Features of Los Angeles Crimes
-
Los Angeles Crimes has many features that make it an enjoyable and immersive game for Android users. Some of these features are:
-
-
Free roam mode: You can wander around the city and its outskirts, which are based on real locations in Los Angeles. You can find various landmarks, such as the Hollywood sign, the Santa Monica pier, the Griffith Observatory, and more. You can also enter some buildings, such as shops, hotels, restaurants, and apartments.
-
Multiplayer mode: You can create or join online games with up to 10 players per server. You can chat with other players using text or voice messages. You can also choose from different game modes, such as team deathmatch, zombie survival, car race, soccer, or free roam.
-
Active ragdoll and realistic physics: The game uses a physics engine that simulates the movements and interactions of objects and characters in a realistic way. You can see the effects of gravity, collisions, explosions, bullet impacts, and more. The game also uses an active ragdoll system that makes the characters react to forces and injuries in a natural way.
-
LAN support: You can play with your friends on a local network without using the internet. You just need to connect your devices to the same Wi-Fi network and create or join a LAN game.
-
PS4 controller support: You can use a PS4 controller to play the game via Bluetooth. You can customize the controller settings in the options menu.
-
-
How to download and install Los Angeles Crimes 1.6 APK
-
To download and install Los Angeles Crimes 1.6 APK on your Android device, you need to follow these steps:
-
-
Go to APKCombo, FileHippo, or any other trusted website that offers the latest version of Los Angeles Crimes APK.
-
Download the APK file (416 MB) to your device.
-
Enable the installation of apps from unknown sources in your device settings.
-
Locate the downloaded APK file in your file manager and tap on it to install it.
-
Wait for the installation process to finish and launch the game from your app drawer.
-
-
Why play
Why play Los Angeles Crimes?
-
Los Angeles Crimes is a game that offers you a lot of freedom and fun. You can play it for various reasons, such as:
-
los angeles crimes mod apk unlimited ammo
-los angeles crimes online multiplayer apk
-los angeles crimes android game download
-los angeles crimes gta 5 mod apk
-los angeles crimes apk obb latest version
-los angeles crimes ragdoll physics apk
-los angeles crimes open world game apk
-los angeles crimes offline mode apk
-los angeles crimes apk pure download
-los angeles crimes cheats and hacks apk
-los angeles crimes free roam apk
-los angeles crimes zombie mode apk
-los angeles crimes realistic graphics apk
-los angeles crimes beta 1.6 apk
-los angeles crimes full version apk
-los angeles crimes voice chat apk
-los angeles crimes mod menu apk
-los angeles crimes car mods apk
-los angeles crimes fps mode apk
-los angeles crimes new update 1.6 apk
-los angeles crimes best settings apk
-los angeles crimes map editor apk
-los angeles crimes how to install apk
-los angeles crimes system requirements apk
-los angeles crimes gameplay review apk
-los angeles crimes tips and tricks apk
-los angeles crimes skins and outfits apk
-los angeles crimes weapons and vehicles apk
-los angeles crimes missions and quests apk
-los angeles crimes bugs and glitches apk
-los angeles crimes fan made videos apk
-los angeles crimes discord server apk
-los angeles crimes developer contact apk
-los angeles crimes rating and feedback apk
-los angeles crimes alternatives and similar games apk
-los angeles crimes download for pc windows 10 apk
-los angeles crimes emulator for mac os apk
-los angeles crimes compatible devices list apk
-los angeles crimes file size and storage space apk
-los angeles crimes safe and secure download link apk
-
Explore a vast open world
-
One of the main attractions of Los Angeles Crimes is its open world, which is based on real locations in Los Angeles. You can explore the city and its outskirts, which are full of details and surprises. You can find different types of vehicles, such as cars, bikes, boats, helicopters, and planes, and drive them wherever you want. You can also discover hidden places, such as underground tunnels, rooftops, or secret bases. You can also interact with various objects and NPCs, such as vending machines, ATMs, pedestrians, animals, and more.
-
Create and join multiplayer games
-
Another reason to play Los Angeles Crimes is its multiplayer mode, which allows you to play with other players online. You can create or join online games with up to 10 players per server. You can chat with other players using text or voice messages. You can also choose from different game modes, such as team deathmatch, zombie survival, car race, soccer, or free roam. You can cooperate or compete with other players in these modes, and have fun together.
-
Customize your character and vehicles
-
A third reason to play Los Angeles Crimes is its customization options. You can customize your character's appearance, clothes, accessories, and weapons. You can also customize your vehicles' color, wheels, engine, suspension, and more. You can save your customizations and use them in any game mode. You can also share your customizations with other players online.
-
Tips and tricks for Los Angeles Crimes
-
If you want to enjoy Los Angeles Crimes more, you might want to follow these tips and tricks:
-
Use the map and radar to navigate
-
The game has a map and a radar that show you the layout of the city and the locations of important things. You can use the map to see the whole city and zoom in or out. You can also set waypoints on the map to guide you to your destination. You can use the radar to see the nearby vehicles, players, weapons, crates, police, gangs, and more. You can also see the health and armor bars of yourself and other players on the radar.
-
Collect weapons and ammo from crates
-
The game has various weapons that you can use to fight or defend yourself. You can find weapons and ammo in crates that are scattered around the city. You can also buy weapons from shops or get them from other players. You can carry up to four weapons at a time: one melee weapon, one handgun, one shotgun or rifle, and one heavy weapon or explosive. You can switch between your weapons using the weapon wheel.
-
Avoid the police and gangs
-
The game has police and gangs that will chase you if you commit crimes or enter their territory. The police will try to arrest you or shoot you if you resist. The gangs will try to kill you or rob you if you cross them. You can see the police's wanted level and the gangs' hostility level on the radar. You can lower these levels by hiding or escaping from them. You can also fight back if you have enough weapons and ammo.
-
Conclusion
-
Los Angeles Crimes is a sandbox game for Android that lets you do whatever you want in a huge city inspired by GTA. You can explore the city and its surroundings, create or join multiplayer games online or offline, customize your character and vehicles, and have fun with realistic physics and ragdoll effects. If you are looking for a fun and realistic sandbox game for your Android device, you might want to download Los Angeles Crimes 1.6 APK from APKCombo, FileHippo, or any other trusted website that offers it.
-
Summary of the main points
-
-
Los Angeles Crimes is a fan-made sandbox game inspired by GTA for Android devices.
-
You can do whatever you want in the game world without any limitations or objectives.
-
You can explore a vast open world based on real locations in Los Angeles.
-
You can create or join multiplayer games with up to 10 players per server online or offline.
-
You can customize your character's appearance, clothes, accessories, and weapons.
-
You can customize your vehicles' color, wheels, engine, suspension, and more.
-
You can enjoy realistic physics and ragdoll effects in the game.
-
You can download Los Angeles Crimes 1.6 APK from [mailto:alizadeh.mohammad@gmail.com](alizadeh.mohammad@gmail.com). You can also follow him on his Instagram account: @mohammad_alizadeh_. You can also visit his website: lac-game.com, where you can find more information about the game, such as the changelog, the FAQ, the forum, and more.
-
How can I support the development of Los Angeles Crimes?
-
You can support the development of Los Angeles Crimes by donating to the developer via PayPal or Patreon. You can find the links to these platforms on his website: lac-game.com. You can also support him by rating and reviewing the game on the website that you downloaded it from, and by sharing it with your friends and family.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Raees Full Movie Download in HD 720p Shah Rukh Khans Best Performance as a Crime Lord.md b/spaces/congsaPfin/Manga-OCR/logs/Raees Full Movie Download in HD 720p Shah Rukh Khans Best Performance as a Crime Lord.md
deleted file mode 100644
index b45b9d4bf30273f8d3509d0e6515804c7b47f03e..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Raees Full Movie Download in HD 720p Shah Rukh Khans Best Performance as a Crime Lord.md
+++ /dev/null
@@ -1,87 +0,0 @@
-
-
Raees Full Movie HD Download 2017 YouTube: How to Watch the Bollywood Blockbuster Online
-
Raees is a 2017 Bollywood crime drama film starring Shah Rukh Khan, Mahira Khan and Nawazuddin Siddiqui. The film is directed by Rahul Dholakia and produced by Red Chillies Entertainment and Excel Entertainment. The film is based on the life of Abdul Latif, a notorious bootlegger and gangster who operated in Gujarat in the 1980s and 1990s.
-
The film was released on 25 January 2017 and received positive reviews from critics and audiences alike. The film was praised for its performances, direction, music, action and cinematography. The film also faced some controversies due to its portrayal of a Muslim protagonist and its release date clashing with another film, Kaabil.
The film was a commercial success and became one of the highest-grossing Bollywood films of 2017. The film also won several awards and nominations, including five Filmfare Awards.
-
How to Watch Raees Full Movie HD Online
-
If you are looking for ways to watch Raees full movie HD online, you have several options to choose from. Here are some of the best ways to watch the film legally and safely:
-
Netflix
-
Netflix is one of the most popular streaming platforms in the world and it has Raees available for its subscribers. You can watch Raees full movie HD on Netflix with a monthly subscription fee of $8.99 for the basic plan, $13.99 for the standard plan or $17.99 for the premium plan. You can also get a free trial for 30 days if you are a new user. Netflix also allows you to download the film for offline viewing on your devices.
-
YouTube
-
YouTube is another popular platform where you can watch Raees full movie HD online. You can rent or buy the film on YouTube for $3.99 or $12.99 respectively. You can also watch the film for free with ads if you have a YouTube Premium subscription, which costs $11.99 per month or $6.99 per month for students. YouTube also allows you to download the film for offline viewing on your devices.
-
Amazon Prime Video
-
Amazon Prime Video is another option where you can watch Raees full movie HD online. You can rent or buy the film on Amazon Prime Video for $3.99 or $12.99 respectively. You can also watch the film for free if you have an Amazon Prime membership, which costs $12.99 per month or $119 per year. Amazon Prime Video also allows you to download the film for offline viewing on your devices.
-
Raees Full Movie HD Download 2017 YouTube: Pros and Cons
-
Watching Raees full movie HD online has its pros and cons. Here are some of them:
-
raees 2017 full movie watch online hd free
-raees shahrukh khan full movie download hd 720p
-raees full movie hd 1080p download filmywap
-raees full movie online with english subtitles
-raees full movie hd download utorrent
-raees full movie netflix watch online free
-raees full movie hindi 2017 youtube hd
-raees full movie download mp4 hd 480p
-raees full movie hd download pagalworld
-raees full movie online dailymotion hd
-raees full movie download filmyzilla hd 720p
-raees full movie watch online hotstar hd
-raees full movie download in hindi hd 1080p
-raees full movie online streaming hd free
-raees full movie download worldfree4u hd
-raees full movie youtube video hd 2017
-raees full movie download moviespyhd net
-raees full movie online amazon prime video hd
-raees full movie download khatrimaza hd 720p
-raees full movie watch online zee5 hd free
-raees full movie download bolly4u hd 480p
-raees full movie online mx player hd free
-raees full movie download skymovies hd 1080p
-raees full movie watch online sonyliv hd free
-raees full movie download coolmoviez hd 720p
-raees full movie online eros now hd free
-raees full movie download movierulz hd 480p
-raees full movie watch online voot hd free
-raees full movie download tamilrockers hd 1080p
-raees full movie online jiocinema hd free
-raees full movie download rdxhd hd 720p
-raees full movie watch online airtel xstream hd free
-raees full movie download okjatt hd 480p
-raees full movie watch online viu hd free
-raees full movie download filmyhit hd 1080p
-raees full movie online hungama play hd free
-raees full movie download mp4moviez hd 720p
-raees full movie watch online altbalaji hd free
-raees full movie download extramovies hd 480p
-raees full movie watch online youtube premium hd free
-
Pros
-
-
You can watch the film anytime and anywhere with an internet connection.
-
You can enjoy the film in high quality and with subtitles.
-
You can save money and time by not going to the theater.
-
You can avoid piracy and support the filmmakers.
-
-
Cons
-
-
You may experience buffering or lagging issues depending on your internet speed and device.
-
You may miss out on the theater experience and the social aspect of watching a film with others.
-
You may face geo-restrictions or unavailability of the film on some platforms in some regions.
-
You may need to pay extra fees or subscriptions to access some platforms.
-
-
Conclusion
-
Raees is a Bollywood crime drama film that tells the story of a bootlegger who rises to become a powerful figure in Gujarat. The film stars Shah Rukh Khan, Mahira Khan and Nawazuddin Siddiqui in lead roles and is directed by Rahul Dholakia. The film was released in 2017 and became a hit among critics and audiences alike.
-
If you want to watch Raees full movie HD online, you have several options to choose from, such as Netflix, YouTube and Amazon Prime Video. Each option has its pros and cons, so you should weigh them carefully before deciding which one suits you best. You should also avoid illegal downloads and torrents, as they are harmful to your device and the film industry. You should always watch the film legally and safely on the platforms that have the rights to stream it.
-
FAQs
-
Here are some frequently asked questions about Raees full movie HD download 2017 YouTube:
-
Q: Is Raees based on a true story?
-
A: Raees is loosely based on the life of Abdul Latif, a notorious bootlegger and gangster who operated in Gujarat in the 1980s and 1990s. However, the filmmakers have denied any direct connection and have stated that the film is a fictional story.
-
Q: Who is the female lead in Raees?
-
A: The female lead in Raees is Mahira Khan, a Pakistani actress who made her Bollywood debut with this film. She plays the role of Aasiya, the wife of Raees.
-
Q: What is the meaning of Raees?
-
A: Raees is an Arabic word that means "rich" or "wealthy". It is also a common name for boys in Muslim communities. In the film, Raees is the name of the protagonist, who is a wealthy and influential bootlegger.
-
Q: What is the box office collection of Raees?
-
A: Raees was a commercial success and grossed over ₹308 crore worldwide. It became the highest-grossing Bollywood film of 2017 until it was surpassed by Baahubali 2: The Conclusion.
-
Q: Where can I find the songs of Raees?
-
A: The songs of Raees are composed by Ram Sampath, JAM8 and Kalyanji-Anandji. The lyrics are written by Javed Akhtar, Amitabh Bhattacharya, Mayur Puri and others. The songs are available on various music platforms such as Spotify, Gaana, JioSaavn, Wynk and YouTube Music.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/The Secrets Behind Love Catcher in Bali How to Download and Enjoy the Show.md b/spaces/congsaPfin/Manga-OCR/logs/The Secrets Behind Love Catcher in Bali How to Download and Enjoy the Show.md
deleted file mode 100644
index b3c013b4ff25fefb227f595ac00139f3ca34ed4c..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/The Secrets Behind Love Catcher in Bali How to Download and Enjoy the Show.md
+++ /dev/null
@@ -1,103 +0,0 @@
-
-
Download Love Catcher in Bali
-
If you are looking for a dating show that will keep you on the edge of your seat, then you should check out Love Catcher in Bali. This is the fourth season of the popular Korean reality show Love Catcher, where participants have to choose between love or money. In this article, we will tell you what Love Catcher in Bali is all about, why you should watch it, and how to download it.
Love Catcher in Bali is a dating show that takes place in the beautiful island of Bali, Indonesia. The show features eight regular members who live together for two weeks and try to find their true love. However, there is a twist. Among the eight members, there are four love catchers and four money catchers. The love catchers are looking for genuine romance, while the money catchers are looking for a cash prize of 50 million won (about $42,000). The catch is that no one knows who is who.
-
The show follows the members as they go on dates, play games, and face various missions and tests. At the end of each episode, they have to vote for their partner. If both partners are love catchers, they can leave together as a couple. If one partner is a love catcher and the other is a money catcher, the money catcher can take all the money and leave alone. If both partners are money catchers, they get nothing and have to stay until the end.
-
The show is hosted by five celebrities who provide commentary and analysis on the members' actions and interactions. They are comedienne Jang Do Yeon, actor Joo Woo Jae, TV personality Jun Hyun
Moo, singer Gabee, and idol Kim Yo Han. They also interact with the members through video calls and surprise visits.
-
How to download love catcher in bali episodes
-Watch love catcher in bali online free
-Love catcher in bali eng sub download
-Love catcher in bali season 4 recap
-Love catcher in bali cast and crew
-Love catcher in bali ratings and reviews
-Love catcher in bali full episodes download
-Love catcher in bali episode 1 eng sub
-Love catcher in bali finale spoilers
-Love catcher in bali behind the scenes
-Download love catcher in bali with subtitles
-Love catcher in bali best moments and clips
-Love catcher in bali theme song download
-Love catcher in bali couples update
-Love catcher in bali filming location
-Download love catcher in bali on TVING
-Love catcher in bali episode guide and summary
-Love catcher in bali netflix release date
-Love catcher in bali trailer and teaser
-Love catcher in bali reunion show download
-Download love catcher in bali season 1 2 3
-Love catcher in bali korean dating show
-Love catcher in bali money or love game
-Love catcher in bali kim yo han and lee yu jeong
-Download love catcher in bali hd quality
-Love catcher in bali fan theories and predictions
-Love catcher in bali ost download mp3
-Love catcher in bali merchandise and products
-Download love catcher in bali on android or ios
-Love catcher in bali memes and jokes
-
Why you should watch Love Catcher in Bali?
-
There are many reasons why you should watch Love Catcher in Bali, but here are some of the most compelling ones:
-
It's a thrilling psychological love game
-
Love Catcher in Bali is not your typical dating show. It's a game of deception, manipulation, and strategy. The members have to hide their true identities and intentions, while trying to figure out who is who. They have to balance their feelings and their goals, while facing the risk of betrayal and disappointment. The show keeps you guessing who is a love catcher and who is a money catcher, and who will end up with whom. You will find yourself rooting for some couples, while suspecting others.
-
It's set in a beautiful island
-
Love Catcher in Bali is also a visual treat. The show takes advantage of the stunning scenery and attractions of Bali, one of the most popular tourist destinations in the world. The members get to enjoy the sun, the sea, the sand, and the culture of the island. They visit various places, such as temples, beaches, waterfalls, markets, and restaurants. They also experience various activities, such as surfing, yoga, cooking, and dancing. The show makes you want to pack your bags and go to Bali yourself.
-
It's full of romance and drama
-
Of course, Love Catcher in Bali is also a show about love. The show features eight attractive and charming members who have different personalities and backgrounds. They form various couples, some of which are sweet and adorable, while others are fiery and intense. They have chemistry and sparks, but they also have conflicts and misunderstandings. They make you laugh, cry, swoon, and scream. The show will make you feel all kinds of emotions.
-
How to download Love Catcher in Bali?
-
If you are interested in watching Love Catcher in Bali, you might be wondering how to download it. Well, there are several options that you can choose from, depending on your preference and convenience. Here are some of them:
-
Option 1: TVING
-
The easiest way to watch and download Love Catcher in Bali is to use TVING, a Korean streaming service that offers various content, including dramas, movies, variety shows, sports, and more. TVING is the official platform that broadcasts Love Catcher in Bali, so you can watch it with high quality and subtitles. You can also download the episodes to watch offline later.
-
To use TVING, you need to create an account and subscribe to a plan. There are different plans that you can choose from, depending on your budget and needs. The cheapest plan costs 7,900 won (about $6.60) per month. You can pay with your credit card or PayPal.
-
To watch and download Love Catcher in Bali on TVING, follow these steps:
-
-
Go to the TVING website or download the TVING app on your device.
-
Login with your account.
-
Search for Love Catcher in Bali or go to this link: [text].
-
Select the episode that you want to watch or download.
-
If you want to watch it online, click on the play button.
-
If you want to download it offline, click on the download button.
-
Enjoy watching Love Catcher in Bali.
-
-
Option 2: Dailymotion
-
An alternative way to watch and download Love Catcher in Bali is to use Dailymotion, a video-sharing platform that hosts various content from around the world. Dailymotion allows users to upload and share videos for free. You can find Love Catcher in Bali episodes on Dailymotion uploaded by other users who have recorded them from TV or other sources.
-
To use Dailymotion, you don't need to create an account or pay anything. However, you might encounter some drawbacks, such as low quality, ads, incomplete episodes, or broken links. You also need to be careful about spoilers or fake videos.
-
To watch and download Love Catcher in Bali
-
-
Go to the Dailymotion website or download the Dailymotion app on your device.
-
Search for Love Catcher in Bali or go to this link: [text].
-
Select the episode that you want to watch or download.
-
If you want to watch it online, click on the play button.
-
If you want to download it offline, click on the three dots icon and select "Download".
-
Enjoy watching Love Catcher in Bali.
-
-
Option 3: MyDramaList
-
A third way to watch and download Love Catcher in Bali is to use MyDramaList, a website for Asian drama fans. MyDramaList provides information, reviews, ratings, recommendations, and discussions about various Asian dramas, movies, and shows. You can also find links to watch and download them from different sources.
-
To use MyDramaList, you need to create an account and join the community. You can also create your own watchlist, rate and review the shows that you have watched, and interact with other fans. You can access MyDramaList for free, but you can also support the site by becoming a premium member or donating.
-
To watch and download Love Catcher in Bali on MyDramaList, follow these steps:
-
-
Go to the MyDramaList website or download the MyDramaList app on your device.
-
Login with your account.
-
Search for Love Catcher in Bali or go to this link: [text].
-
Select the episode that you want to watch or download.
-
Scroll down to the "Watch Online" section and choose a source that suits you. Some of the sources are TVING, Dailymotion, YouTube, Viki, etc.
-
If you want to watch it online, click on the link and follow the instructions on the source website or app.
-
If you want to download it offline, look for a download option on the source website or app. If there is no download option, you can use a third-party tool or extension to download the video.
-
Enjoy watching Love Catcher in Bali.
-
-
Conclusion
-
Love Catcher in Bali is a dating show that will keep you entertained and intrigued. It's a psychological love game that tests the members' honesty and sincerity. It's set in a beautiful island that showcases the beauty and culture of Bali. It's full of romance and drama that will make you feel all kinds of emotions. If you are looking for a show that will make your heart race and your mind wonder, then you should watch Love Catcher in Bali.
-
You can watch and download Love Catcher in Bali from various options, such as TVING, Dailymotion, or MyDramaList. Choose the option that works best for you and enjoy watching this thrilling show. You won't regret it!
-
FAQs
-
Q1: When did Love Catcher in Bali air?
-
A1: Love Catcher in Bali aired from November 18, 2022 to January 6, 2023.
-
Q2: How many episodes are there in Love Catcher in Bali?
-
A2: There are eight episodes in Love Catcher in Bali.
-
Q3: Who are the hosts of Love Catcher in Bali?
-
A3: The hosts of Love Catcher in Bali are Jang Do Yeon, Joo Woo Jae, Jun Hyun Moo, Gabee, and Kim Yo Han.
-
Q4: Who are the regular members of Love Catcher in Bali?
-
A4: The regular members of Love Catcher in Bali are Lee Yu Jeong, Choi Tae Eun, Hong Seung Yeon, Kim Su Ji, Lee Seung Hyun, Park Ji Won, Kim Min Jae, and Lee Ji Eun.
-
Q5: Who won Love Catcher in Bali?
-
A5: Spoiler alert! The winners of Love Catcher in Bali were Choi Tae Eun and Kim Su Ji.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/contluForse/HuggingGPT/assets/Avast antivirus license key 2038 The ultimate guide to download and install.md b/spaces/contluForse/HuggingGPT/assets/Avast antivirus license key 2038 The ultimate guide to download and install.md
deleted file mode 100644
index 9a2681c52567510700addca54748df697d831257..0000000000000000000000000000000000000000
--- a/spaces/contluForse/HuggingGPT/assets/Avast antivirus license key 2038 The ultimate guide to download and install.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
Avast Antivirus Crack Applications is well-known worldwide because of its robust and powerful antivirus as well as other anti-malware programs. The antivirus software license key is a variety of distinct cyber security technologies to offer real-time protection against new and existing risks.
Avast Antivirus license key Software is famed worldwide for its efficient and robust antivirus along with other anti-malware applications. Avast Premier 2021 License File Serial Number is a mix of different cyber security technologies to provide real-time protection against existing and new The user interface of the Avast Antivirus Crack is lightweight and has all the necessary options required by the consumer to safeguard the workstations. The interface has four tabs like Scan Computer Summary, Realtime Shields, as well as Care.
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/contluForse/HuggingGPT/assets/Cv Pcc A Irmandade Do Crime Download Pdf.md b/spaces/contluForse/HuggingGPT/assets/Cv Pcc A Irmandade Do Crime Download Pdf.md
deleted file mode 100644
index 317a8a0e7c398b6ec44dd7763becf1eec28f2dea..0000000000000000000000000000000000000000
--- a/spaces/contluForse/HuggingGPT/assets/Cv Pcc A Irmandade Do Crime Download Pdf.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
- 3cee63e6c2
-
-
-
diff --git a/spaces/contluForse/HuggingGPT/assets/Deewaar Hindi Full Movie HD Download for Free The Film that Changed the Face of Indian Cinema.md b/spaces/contluForse/HuggingGPT/assets/Deewaar Hindi Full Movie HD Download for Free The Film that Changed the Face of Indian Cinema.md
deleted file mode 100644
index deabb992b6999e7a8ad7480b17a2df4a9fda2265..0000000000000000000000000000000000000000
--- a/spaces/contluForse/HuggingGPT/assets/Deewaar Hindi Full Movie HD Download for Free The Film that Changed the Face of Indian Cinema.md
+++ /dev/null
@@ -1,5 +0,0 @@
-
-
download Kishore Kumar For Shashi Kapoor unlimited Movies and videos Download Here.Kishore Kumar For Shashi Kapoor Hd,3gp. mp4 320p and More Videos You Can Download Easyly. tamilrockers and movierulz, tamilgun, filmywap, and pagalworld videos and Movies download.
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/contluForse/HuggingGPT/assets/Download Alien Covenant (English) - Where to Find the Best Deals and Offers for the Movie.md b/spaces/contluForse/HuggingGPT/assets/Download Alien Covenant (English) - Where to Find the Best Deals and Offers for the Movie.md
deleted file mode 100644
index ed81d74694af1ea33bbf95784a63f3ecfd1e75e9..0000000000000000000000000000000000000000
--- a/spaces/contluForse/HuggingGPT/assets/Download Alien Covenant (English) - Where to Find the Best Deals and Offers for the Movie.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-787 Web TORRENT JOKER 2019 movie download full HD 720p Hindi Filmywap ... DOWNLOAD Chhapaak 2020 Hindi 720p PreDVD DHURALA 2020 marathi 720p PreDVD x264 AAC 1. ... A teen girl finds herself besieged by an evil supernatural force after she played Ouija with two classmates. ... Running time 1 45 00. 4d29de3e1b
-
-
-
diff --git a/spaces/diacanFperku/AutoGPT/Fiat EPER V84 05.2014 Multilanguage.md b/spaces/diacanFperku/AutoGPT/Fiat EPER V84 05.2014 Multilanguage.md
deleted file mode 100644
index e48b3b11e65072cbee4dd6dfc875ef3ec2c677e1..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Fiat EPER V84 05.2014 Multilanguage.md
+++ /dev/null
@@ -1,9 +0,0 @@
-
-
The program Fiat ePER was developed at 19-year. Its programme is aimed to the catalogues of parts for the cars and the minivans. ePER is important catalogues of spare parts and is a part of the program Fiat ePER to Search Engine for spare parts. EPER [06.2013.06.03].
ePER is the catalog of spare parts for the cars. The program has been designed for search engines and is also used as a graphical catalogue of spare parts.The software ePER was developed in the 80's. ePER is based on webservices, so that the list of models (and of details (v20w32nt.dll). Fazuka ePER is the program for the catalogue of spare parts for the cars Fiat ePER. Does not only have ePER an automatic search engine for spare parts, but also for engines.Fazuka is a program for the ePER. The catalog of ePER is based on G-in. Windows XP SP2 and newer.
-
Fiat ePER is a part of the program Fiat ePER that is the search engine for spare parts for the cars, and for the engines.. The catalog of spare parts for the cars, and for the engines., and is based on the webservice of the parts. The program is based on the file system for the webservices list of parts. In the file system, you can include the details of the lists of parts, and you can carry over the catalogue of parts.The operation of such a file system may be asked of the standard installer as an administrator. The program is a part of the program Fiat ePER to search the engines. In this operation search to carry over the catalog of parts.. The program is based on the file system for the webservices list of parts.
-
Fiat ePER is a part of the program Fiat ePER that is the search engine for spare parts for the cars, and for the engines.. The catalog of spare parts for the cars, and for the engines., and is based on the webservice of the parts. The operation of such a file system may be asked of the standard installer as an administrator. The program is a part of the program Fiat ePER to search the engines. In this operation search to carry over the catalog of parts..
- 899543212b
-
-
\ No newline at end of file
diff --git a/spaces/diacanFperku/AutoGPT/Goldplay Gp-1005 Driver Indirl ((FULL)).md b/spaces/diacanFperku/AutoGPT/Goldplay Gp-1005 Driver Indirl ((FULL)).md
deleted file mode 100644
index 0a7e5d9bb6c0a5fdea35a23fb60608af42f13a22..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Goldplay Gp-1005 Driver Indirl ((FULL)).md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-Download solution matlab code this solution is available in pdf and may read any files with same name. 0 jessie 0 jessie-7. some of the questions, statements and formulas of the present assignment are as follows: 1. matlab_data_sets. This assignment consists of two parts: part A contains five short-quiz questions with multiple choice answers and part B contains several assignments to help you. The heat transfer problem is studied in chapter 3. This online book deals with the modeling of real world application. heat transfer solutions for small and large plants are discussed in this chapter the effect of temperature on the specific heat of a system is considered in this chapter the heat conduction of a material is studied in this chapter. find the errors in the following code. With more experience you will be able to obtain a clear picture of the functions of data types. C. It is an independent work. ) 4. the value of temperature at the outside boundary is 100 degrees. thermo module — heat transfer for building science materials and structures thermo. heat transfer coefficient hc is related to convection coefficient. This can be simply done using the analyze button, which is located on the toolbar (see Figure 3. [Insert Description Here] 2. 5. Fretting. with environmental systems [Insert Description Here] 1. 13). a branch in the middle of the copper tubing is heated to 900 degrees and is held constant at that temperature for 50 minutes. online book the heat transfer coefficient hc is related to convection coefficient. the first issue of the associated. 5e – Heat Transfer. ck co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie kennedy co charlie k 4fefd39f24
-
-
-
diff --git a/spaces/diacanFperku/AutoGPT/Queen Discography 19672009 Mp3 320 Kbps.md b/spaces/diacanFperku/AutoGPT/Queen Discography 19672009 Mp3 320 Kbps.md
deleted file mode 100644
index 783d650627d7eb5b033911c4c7ba040b800636e6..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Queen Discography 19672009 Mp3 320 Kbps.md
+++ /dev/null
@@ -1,101 +0,0 @@
-
-
Queen Discography 1967-2009 Mp3 320 Kbps: How to Download and Enjoy the Music of the Rock Legends
-
-
Queen is one of the most legendary and influential rock bands of all time. Their music has captivated millions of fans around the world for decades. Their discography includes 15 studio albums, 10 live albums, and countless singles and compilations. If you are a fan of Queen or want to discover their amazing music, you might want to download their discography in high-quality Mp3 format.
In this article, we will show you how to download Queen Discography 1967-2009 Mp3 320 Kbps, the most complete and comprehensive collection of their music in the best possible quality. We will also give you some information about their albums and songs, and some tips on how to enjoy their music.
-
-
How to Download Queen Discography 1967-2009 Mp3 320 Kbps
-
-
There are many websites and platforms where you can download Queen Discography 1967-2009 Mp3 320 Kbps, but not all of them are reliable or safe. Some of them might contain viruses, malware, or low-quality files. Some of them might require you to pay or register before accessing the download links. Some of them might have incomplete or inaccurate information about the albums and songs.
-
-
To avoid these problems, we recommend you to use a trusted and reputable torrent website that offers Queen Discography 1967-2009 Mp3 320 Kbps as a magnet link or a torrent file. A torrent website is a website where users can share and download files using a peer-to-peer network. A magnet link is a URL that contains the information needed to download the files from other users. A torrent file is a small file that contains the same information as a magnet link.
-
-
-
Some of the benefits of using a torrent website are:
-
-
You can download large files faster and more efficiently.
-
You can pause and resume your downloads at any time.
-
You can check the quality and authenticity of the files before downloading them.
-
You can access a wide variety of content from different sources.
-
-
-
Some of the drawbacks of using a torrent website are:
-
-
You might need to install a torrent client software on your device to download the files.
-
You might need to use a VPN service to protect your privacy and security online.
-
You might need to follow some rules and etiquette when using the torrent website and the peer-to-peer network.
-
-
-
One of the best torrent websites that offers Queen Discography 1967-2009 Mp3 320 Kbps is SolidTorrents. SolidTorrents is a modern and user-friendly torrent search engine that indexes millions of torrents from various sources. It has a simple and intuitive interface that allows you to search for any content you want. It also has some useful features such as filters, categories, ratings, comments, and more.
-
-
To download Queen Discography 1967-2009 Mp3 320 Kbps from SolidTorrents, follow these steps:
-
-
Go to https://solidtorrents.to/ on your web browser.
-
Type "Queen Discography 1967-2009 Mp3 320 Kbps" in the search box and hit enter.
-
You will see a list of results that match your query. Look for the one that has the most seeders, leechers, file size, and downloads. Seeders are users who have the complete file and are sharing it with others. Leechers are users who are downloading the file but have not completed it yet. File size is the total amount of data that you need to download. Downloads are the number of times that the file has been downloaded by other users.
-
Click on the result that you want to download. You will see more details about the file, such as its name, description, files list, trackers list, and more info.
-
Click on either "Torrent Download" or "Magnet Download" to start downloading the file. If you click on "Torrent Download", you will need to save the torrent file on your device and open it with your torrent client software. If you click on "Magnet Download", you will need to copy the magnet link URL and paste it on your torrent client software.
-
Wait for your download to finish. Depending on your internet speed and the availability of seeders, this might take some time.
-
Enjoy listening to Queen Discography 1967-2009 Mp3 320 Kbps on your device!
-
-
-
How to Enjoy Queen Discography 1967-2009 Mp3 320 Kbps
-
-
Now that you have downloaded Queen Discography 1967-2009 Mp3 320 Kbps, you might be wondering how to enjoy their music to the fullest. Here are some tips that might help you:
-
-
Use a good quality headphones or speakers to listen to their songs. You will appreciate their sound quality better if you use a device that can deliver clear and crisp audio.
-
Listen to their albums in chronological order. You will be able to appreciate their musical evolution and diversity better if you listen to their albums from their debut in 1973 to their last in 2008.
-
Read the lyrics and sing along with their songs. You will be able to understand their messages and emotions better if you read the words that they wrote and sang.
-
Watch their live performances and documentaries online. You will be able to see their charisma and energy on stage better if you watch them perform live in front of thousands of fans. You will also be able to learn more about their history and achievements if you watch documentaries that feature interviews, footage, and anecdotes about them.
-
Join online communities and forums dedicated to Queen fans. You will be able to interact with other fans who share your passion and enthusiasm for Queen. You will also be able to discover new facts, trivia, opinions, and recommendations about them.
-
-
-
Conclusion
-
-
Queen Discography 1967-2009 Mp3 320 Kbps is a must-have for any rock fan who wants to enjoy the music of one of the greatest bands of all time. By following this guide, you will be able to download their discography in high-quality Mp3 format from a reliable torrent website. You will also be able to enjoy their songs in various ways by using some tips that we provided.
-
-
We hope that this article was helpful and informative for you. If you have any questions or feedback, feel free to leave a comment below. Thank you for reading!
-
Some Highlights of Queen Discography 1967-2009 Mp3 320 Kbps
-
-
Queen Discography 1967-2009 Mp3 320 Kbps contains all the albums and songs that Queen released during their career. Each album and song has its own story and significance, but here are some of the highlights that you should not miss:
-
-
Queen (1973): The debut album that introduced Queen to the world. It features some of their early classics, such as "Keep Yourself Alive", "Liar", and "Seven Seas of Rhye". It also showcases their diverse influences, from hard rock to progressive rock to glam rock.
-
Sheer Heart Attack (1974): The third album that marked a breakthrough for Queen. It features their first international hit, "Killer Queen", as well as other fan favorites, such as "Brighton Rock", "Now I'm Here", and "Stone Cold Crazy". It also demonstrates their versatility and experimentation, from ballads to metal to music hall.
-
A Night at the Opera (1975): The fourth album that is widely regarded as their masterpiece. It features their most famous song, "Bohemian Rhapsody", as well as other classics, such as "You're My Best Friend", "'39", and "Love of My Life". It also showcases their ambition and creativity, from opera to folk to rockabilly.
-
News of the World (1977): The sixth album that is one of their best-selling albums. It features two of their most iconic anthems, "We Will Rock You" and "We Are the Champions", as well as other gems, such as "Spread Your Wings", "It's Late", and "My Melancholy Blues". It also reflects their adaptation and simplification, from complex arrangements to catchy hooks.
-
The Game (1980): The eighth album that is their most successful album in the US. It features two of their biggest hits, "Another One Bites the Dust" and "Crazy Little Thing Called Love", as well as other tracks, such as "Play the Game", "Save Me", and "Dragon Attack". It also marks their first use of synthesizers, from rock to funk to disco.
-
The Works (1984): The eleventh album that is one of their most underrated albums. It features one of their most popular songs, "Radio Ga Ga", as well as other singles, such as "I Want to Break Free", "It's a Hard Life", and "Hammer to Fall". It also represents their comeback and resilience, from commercial failure to critical acclaim.
-
Innuendo (1991): The fourteenth album that is their final album with Freddie Mercury. It features one of their most poignant songs, "The Show Must Go On", as well as other masterpieces, such as "Innuendo", "I'm Going Slightly Mad", and "These Are the Days of Our Lives". It also displays their courage and dignity, from illness to legacy.
-
-
-
Conclusion
-
-
Queen Discography 1967-2009 Mp3 320 Kbps is a must-have for any rock fan who wants to enjoy the music of one of the greatest bands of all time. By following this guide, you will be able to download their discography in high-quality Mp3 format from a reliable torrent website. You will also be able to enjoy their songs in various ways by using some tips that we provided. You will also be able to discover some of the highlights of their discography that you should not miss.
-
-
We hope that this article was helpful and informative for you. If you have any questions or feedback, feel free to leave a comment below. Thank you for reading!
-
Some Tips on How to Organize and Manage Queen Discography 1967-2009 Mp3 320 Kbps
-
-
Queen Discography 1967-2009 Mp3 320 Kbps is a large and comprehensive collection of music that might take up a lot of space and time on your device. You might want to organize and manage it in a way that makes it easier and more convenient for you to access and enjoy. Here are some tips that might help you:
-
-
Create a separate folder for Queen Discography 1967-2009 Mp3 320 Kbps on your device. You can name it whatever you want, but make sure it is easy to find and remember.
-
Within the folder, create subfolders for each album that Queen released. You can name them according to the album title and year of release, such as "1973 - Queen" or "1980 - The Game".
-
Within each subfolder, place the Mp3 files of the songs that belong to that album. You can name them according to the track number and title, such as "01 - Keep Yourself Alive.mp3" or "11 - Bohemian Rhapsody.mp3".
-
Add album art and metadata to each Mp3 file. Album art is the image that represents the album cover. Metadata is the information that describes the file, such as artist name, album name, song title, genre, etc. You can use a software or an online tool to add album art and metadata to your Mp3 files.
-
Create playlists for your favorite songs or albums. Playlists are collections of songs that you can play in a specific order or randomly. You can create playlists based on your mood, preference, occasion, theme, etc. You can use a software or an online tool to create playlists for your Mp3 files.
-
Backup your Queen Discography 1967-2009 Mp3 320 Kbps folder regularly. Backup is the process of copying your files to another location or device for safekeeping. You might want to backup your folder in case something happens to your original device or files, such as damage, loss, theft, corruption, etc. You can use a software or an online tool to backup your folder.
-
-
-
Conclusion
-
-
Queen Discography 1967-2009 Mp3 320 Kbps is a must-have for any rock fan who wants to enjoy the music of one of the greatest bands of all time. By following this guide, you will be able to download their discography in high-quality Mp3 format from a reliable torrent website. You will also be able to enjoy their songs in various ways by using some tips that we provided. You will also be able to discover some of the highlights of their discography that you should not miss. You will also be able to organize and manage their discography in a way that makes it easier and more convenient for you to access and enjoy.
-
-
We hope that this article was helpful and informative for you. If you have any questions or feedback, feel free to leave a comment below. Thank you for reading!
-
Conclusion
-
-
Queen Discography 1967-2009 Mp3 320 Kbps is a must-have for any rock fan who wants to enjoy the music of one of the greatest bands of all time. By following this guide, you will be able to download their discography in high-quality Mp3 format from a reliable torrent website. You will also be able to enjoy their songs in various ways by using some tips that we provided. You will also be able to discover some of the highlights of their discography that you should not miss. You will also be able to organize and manage their discography in a way that makes it easier and more convenient for you to access and enjoy.
-
-
We hope that this article was helpful and informative for you. If you have any questions or feedback, feel free to leave a comment below. Thank you for reading!
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/diagaiwei/ir_chinese_medqa/colbert/training/training.py b/spaces/diagaiwei/ir_chinese_medqa/colbert/training/training.py
deleted file mode 100644
index c3264fc1796aafb654ff3cc0176904aa5f02d68b..0000000000000000000000000000000000000000
--- a/spaces/diagaiwei/ir_chinese_medqa/colbert/training/training.py
+++ /dev/null
@@ -1,158 +0,0 @@
-import time
-import torch
-import random
-import torch.nn as nn
-import numpy as np
-
-from transformers import AdamW, get_linear_schedule_with_warmup
-from colbert.infra import ColBERTConfig
-from colbert.training.rerank_batcher import RerankBatcher
-
-from colbert.utils.amp import MixedPrecisionManager
-from colbert.training.lazy_batcher import LazyBatcher
-from colbert.parameters import DEVICE
-
-from colbert.modeling.colbert import ColBERT
-from colbert.modeling.reranker.electra import ElectraReranker
-
-from colbert.utils.utils import print_message
-from colbert.training.utils import print_progress, manage_checkpoints
-
-
-
-def train(config: ColBERTConfig, triples, queries=None, collection=None):
- config.checkpoint = config.checkpoint or 'bert-base-uncased'
-
- if config.rank < 1:
- config.help()
-
- random.seed(12345)
- np.random.seed(12345)
- torch.manual_seed(12345)
- torch.cuda.manual_seed_all(12345)
-
- assert config.bsize % config.nranks == 0, (config.bsize, config.nranks)
- config.bsize = config.bsize // config.nranks
-
- print("Using config.bsize =", config.bsize, "(per process) and config.accumsteps =", config.accumsteps)
-
- if collection is not None:
- if config.reranker:
- reader = RerankBatcher(config, triples, queries, collection, (0 if config.rank == -1 else config.rank), config.nranks)
- else:
- reader = LazyBatcher(config, triples, queries, collection, (0 if config.rank == -1 else config.rank), config.nranks)
- else:
- raise NotImplementedError()
-
- if not config.reranker:
- colbert = ColBERT(name=config.checkpoint, colbert_config=config)
- else:
- colbert = ElectraReranker.from_pretrained(config.checkpoint)
-
- colbert = colbert.to(DEVICE)
- colbert.train()
-
- colbert = torch.nn.parallel.DistributedDataParallel(colbert, device_ids=[config.rank],
- output_device=config.rank,
- find_unused_parameters=True)
-
- optimizer = AdamW(filter(lambda p: p.requires_grad, colbert.parameters()), lr=config.lr, eps=1e-8)
- optimizer.zero_grad()
-
- scheduler = None
- if config.warmup is not None:
- print(f"#> LR will use {config.warmup} warmup steps and linear decay over {config.maxsteps} steps.")
- scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=config.warmup,
- num_training_steps=config.maxsteps)
-
- warmup_bert = config.warmup_bert
- if warmup_bert is not None:
- set_bert_grad(colbert, False)
-
- amp = MixedPrecisionManager(config.amp)
- labels = torch.zeros(config.bsize, dtype=torch.long, device=DEVICE)
-
- start_time = time.time()
- train_loss = None
- train_loss_mu = 0.999
-
- start_batch_idx = 0
-
- # if config.resume:
- # assert config.checkpoint is not None
- # start_batch_idx = checkpoint['batch']
-
- # reader.skip_to_batch(start_batch_idx, checkpoint['arguments']['bsize'])
-
- for batch_idx, BatchSteps in zip(range(start_batch_idx, config.maxsteps), reader):
- if (warmup_bert is not None) and warmup_bert <= batch_idx:
- set_bert_grad(colbert, True)
- warmup_bert = None
-
- this_batch_loss = 0.0
-
- for batch in BatchSteps:
- with amp.context():
- try:
- queries, passages, target_scores = batch
- encoding = [queries, passages]
- except:
- encoding, target_scores = batch
- encoding = [encoding.to(DEVICE)]
-
- scores = colbert(*encoding)
-
- if config.use_ib_negatives:
- scores, ib_loss = scores
-
- scores = scores.view(-1, config.nway)
-
- if len(target_scores) and not config.ignore_scores:
- target_scores = torch.tensor(target_scores).view(-1, config.nway).to(DEVICE)
- target_scores = target_scores * config.distillation_alpha
- target_scores = torch.nn.functional.log_softmax(target_scores, dim=-1)
-
- log_scores = torch.nn.functional.log_softmax(scores, dim=-1)
- loss = torch.nn.KLDivLoss(reduction='batchmean', log_target=True)(log_scores, target_scores)
- else:
- loss = nn.CrossEntropyLoss()(scores, labels[:scores.size(0)])
-
- if config.use_ib_negatives:
- if config.rank < 1:
- print('\t\t\t\t', loss.item(), ib_loss.item())
-
- loss += ib_loss
-
- loss = loss / config.accumsteps
-
- if config.rank < 1:
- print_progress(scores)
-
- amp.backward(loss)
-
- this_batch_loss += loss.item()
-
- train_loss = this_batch_loss if train_loss is None else train_loss
- train_loss = train_loss_mu * train_loss + (1 - train_loss_mu) * this_batch_loss
-
- amp.step(colbert, optimizer, scheduler)
-
- if config.rank < 1:
- print_message(batch_idx, train_loss)
- manage_checkpoints(config, colbert, optimizer, batch_idx+1, savepath=None)
-
- if config.rank < 1:
- print_message("#> Done with all triples!")
- ckpt_path = manage_checkpoints(config, colbert, optimizer, batch_idx+1, savepath=None, consumed_all_triples=True)
-
- return ckpt_path # TODO: This should validate and return the best checkpoint, not just the last one.
-
-
-
-def set_bert_grad(colbert, value):
- try:
- for p in colbert.bert.parameters():
- assert p.requires_grad is (not value)
- p.requires_grad = value
- except AttributeError:
- set_bert_grad(colbert.module, value)
diff --git a/spaces/digitalxingtong/Taffy-Bert-VITS2/start.bat b/spaces/digitalxingtong/Taffy-Bert-VITS2/start.bat
deleted file mode 100644
index 418d21233dbf720b0dd09821904d9d6a31b123a2..0000000000000000000000000000000000000000
--- a/spaces/digitalxingtong/Taffy-Bert-VITS2/start.bat
+++ /dev/null
@@ -1,2 +0,0 @@
-set PYTHON=venv\python.exe
-start cmd /k "set PYTHON=%PYTHON%"
\ No newline at end of file
diff --git a/spaces/dineshreddy/WALT/mmdet/models/dense_heads/ssd_head.py b/spaces/dineshreddy/WALT/mmdet/models/dense_heads/ssd_head.py
deleted file mode 100644
index 145622b64e3f0b3f7f518fc61a2a01348ebfa4f3..0000000000000000000000000000000000000000
--- a/spaces/dineshreddy/WALT/mmdet/models/dense_heads/ssd_head.py
+++ /dev/null
@@ -1,265 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from mmcv.cnn import xavier_init
-from mmcv.runner import force_fp32
-
-from mmdet.core import (build_anchor_generator, build_assigner,
- build_bbox_coder, build_sampler, multi_apply)
-from ..builder import HEADS
-from ..losses import smooth_l1_loss
-from .anchor_head import AnchorHead
-
-
-# TODO: add loss evaluator for SSD
-@HEADS.register_module()
-class SSDHead(AnchorHead):
- """SSD head used in https://arxiv.org/abs/1512.02325.
-
- Args:
- num_classes (int): Number of categories excluding the background
- category.
- in_channels (int): Number of channels in the input feature map.
- anchor_generator (dict): Config dict for anchor generator
- bbox_coder (dict): Config of bounding box coder.
- reg_decoded_bbox (bool): If true, the regression loss would be
- applied directly on decoded bounding boxes, converting both
- the predicted boxes and regression targets to absolute
- coordinates format. Default False. It should be `True` when
- using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.
- train_cfg (dict): Training config of anchor head.
- test_cfg (dict): Testing config of anchor head.
- """ # noqa: W605
-
- def __init__(self,
- num_classes=80,
- in_channels=(512, 1024, 512, 256, 256, 256),
- anchor_generator=dict(
- type='SSDAnchorGenerator',
- scale_major=False,
- input_size=300,
- strides=[8, 16, 32, 64, 100, 300],
- ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]),
- basesize_ratio_range=(0.1, 0.9)),
- bbox_coder=dict(
- type='DeltaXYWHBBoxCoder',
- clip_border=True,
- target_means=[.0, .0, .0, .0],
- target_stds=[1.0, 1.0, 1.0, 1.0],
- ),
- reg_decoded_bbox=False,
- train_cfg=None,
- test_cfg=None):
- super(AnchorHead, self).__init__()
- self.num_classes = num_classes
- self.in_channels = in_channels
- self.cls_out_channels = num_classes + 1 # add background class
- self.anchor_generator = build_anchor_generator(anchor_generator)
- num_anchors = self.anchor_generator.num_base_anchors
-
- reg_convs = []
- cls_convs = []
- for i in range(len(in_channels)):
- reg_convs.append(
- nn.Conv2d(
- in_channels[i],
- num_anchors[i] * 4,
- kernel_size=3,
- padding=1))
- cls_convs.append(
- nn.Conv2d(
- in_channels[i],
- num_anchors[i] * (num_classes + 1),
- kernel_size=3,
- padding=1))
- self.reg_convs = nn.ModuleList(reg_convs)
- self.cls_convs = nn.ModuleList(cls_convs)
-
- self.bbox_coder = build_bbox_coder(bbox_coder)
- self.reg_decoded_bbox = reg_decoded_bbox
- self.use_sigmoid_cls = False
- self.cls_focal_loss = False
- self.train_cfg = train_cfg
- self.test_cfg = test_cfg
- # set sampling=False for archor_target
- self.sampling = False
- if self.train_cfg:
- self.assigner = build_assigner(self.train_cfg.assigner)
- # SSD sampling=False so use PseudoSampler
- sampler_cfg = dict(type='PseudoSampler')
- self.sampler = build_sampler(sampler_cfg, context=self)
- self.fp16_enabled = False
-
- def init_weights(self):
- """Initialize weights of the head."""
- for m in self.modules():
- if isinstance(m, nn.Conv2d):
- xavier_init(m, distribution='uniform', bias=0)
-
- def forward(self, feats):
- """Forward features from the upstream network.
-
- Args:
- feats (tuple[Tensor]): Features from the upstream network, each is
- a 4D-tensor.
-
- Returns:
- tuple:
- cls_scores (list[Tensor]): Classification scores for all scale
- levels, each is a 4D-tensor, the channels number is
- num_anchors * num_classes.
- bbox_preds (list[Tensor]): Box energies / deltas for all scale
- levels, each is a 4D-tensor, the channels number is
- num_anchors * 4.
- """
- cls_scores = []
- bbox_preds = []
- for feat, reg_conv, cls_conv in zip(feats, self.reg_convs,
- self.cls_convs):
- cls_scores.append(cls_conv(feat))
- bbox_preds.append(reg_conv(feat))
- return cls_scores, bbox_preds
-
- def loss_single(self, cls_score, bbox_pred, anchor, labels, label_weights,
- bbox_targets, bbox_weights, num_total_samples):
- """Compute loss of a single image.
-
- Args:
- cls_score (Tensor): Box scores for eachimage
- Has shape (num_total_anchors, num_classes).
- bbox_pred (Tensor): Box energies / deltas for each image
- level with shape (num_total_anchors, 4).
- anchors (Tensor): Box reference for each scale level with shape
- (num_total_anchors, 4).
- labels (Tensor): Labels of each anchors with shape
- (num_total_anchors,).
- label_weights (Tensor): Label weights of each anchor with shape
- (num_total_anchors,)
- bbox_targets (Tensor): BBox regression targets of each anchor wight
- shape (num_total_anchors, 4).
- bbox_weights (Tensor): BBox regression loss weights of each anchor
- with shape (num_total_anchors, 4).
- num_total_samples (int): If sampling, num total samples equal to
- the number of total anchors; Otherwise, it is the number of
- positive anchors.
-
- Returns:
- dict[str, Tensor]: A dictionary of loss components.
- """
-
- loss_cls_all = F.cross_entropy(
- cls_score, labels, reduction='none') * label_weights
- # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
- pos_inds = ((labels >= 0) &
- (labels < self.num_classes)).nonzero().reshape(-1)
- neg_inds = (labels == self.num_classes).nonzero().view(-1)
-
- num_pos_samples = pos_inds.size(0)
- num_neg_samples = self.train_cfg.neg_pos_ratio * num_pos_samples
- if num_neg_samples > neg_inds.size(0):
- num_neg_samples = neg_inds.size(0)
- topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples)
- loss_cls_pos = loss_cls_all[pos_inds].sum()
- loss_cls_neg = topk_loss_cls_neg.sum()
- loss_cls = (loss_cls_pos + loss_cls_neg) / num_total_samples
-
- if self.reg_decoded_bbox:
- # When the regression loss (e.g. `IouLoss`, `GIouLoss`)
- # is applied directly on the decoded bounding boxes, it
- # decodes the already encoded coordinates to absolute format.
- bbox_pred = self.bbox_coder.decode(anchor, bbox_pred)
-
- loss_bbox = smooth_l1_loss(
- bbox_pred,
- bbox_targets,
- bbox_weights,
- beta=self.train_cfg.smoothl1_beta,
- avg_factor=num_total_samples)
- return loss_cls[None], loss_bbox
-
- @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
- def loss(self,
- cls_scores,
- bbox_preds,
- gt_bboxes,
- gt_labels,
- img_metas,
- gt_bboxes_ignore=None):
- """Compute losses of the head.
-
- Args:
- cls_scores (list[Tensor]): Box scores for each scale level
- Has shape (N, num_anchors * num_classes, H, W)
- bbox_preds (list[Tensor]): Box energies / deltas for each scale
- level with shape (N, num_anchors * 4, H, W)
- gt_bboxes (list[Tensor]): each item are the truth boxes for each
- image in [tl_x, tl_y, br_x, br_y] format.
- gt_labels (list[Tensor]): class indices corresponding to each box
- img_metas (list[dict]): Meta information of each image, e.g.,
- image size, scaling factor, etc.
- gt_bboxes_ignore (None | list[Tensor]): specify which bounding
- boxes can be ignored when computing the loss.
-
- Returns:
- dict[str, Tensor]: A dictionary of loss components.
- """
- featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
- assert len(featmap_sizes) == self.anchor_generator.num_levels
-
- device = cls_scores[0].device
-
- anchor_list, valid_flag_list = self.get_anchors(
- featmap_sizes, img_metas, device=device)
- cls_reg_targets = self.get_targets(
- anchor_list,
- valid_flag_list,
- gt_bboxes,
- img_metas,
- gt_bboxes_ignore_list=gt_bboxes_ignore,
- gt_labels_list=gt_labels,
- label_channels=1,
- unmap_outputs=False)
- if cls_reg_targets is None:
- return None
- (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
- num_total_pos, num_total_neg) = cls_reg_targets
-
- num_images = len(img_metas)
- all_cls_scores = torch.cat([
- s.permute(0, 2, 3, 1).reshape(
- num_images, -1, self.cls_out_channels) for s in cls_scores
- ], 1)
- all_labels = torch.cat(labels_list, -1).view(num_images, -1)
- all_label_weights = torch.cat(label_weights_list,
- -1).view(num_images, -1)
- all_bbox_preds = torch.cat([
- b.permute(0, 2, 3, 1).reshape(num_images, -1, 4)
- for b in bbox_preds
- ], -2)
- all_bbox_targets = torch.cat(bbox_targets_list,
- -2).view(num_images, -1, 4)
- all_bbox_weights = torch.cat(bbox_weights_list,
- -2).view(num_images, -1, 4)
-
- # concat all level anchors to a single tensor
- all_anchors = []
- for i in range(num_images):
- all_anchors.append(torch.cat(anchor_list[i]))
-
- # check NaN and Inf
- assert torch.isfinite(all_cls_scores).all().item(), \
- 'classification scores become infinite or NaN!'
- assert torch.isfinite(all_bbox_preds).all().item(), \
- 'bbox predications become infinite or NaN!'
-
- losses_cls, losses_bbox = multi_apply(
- self.loss_single,
- all_cls_scores,
- all_bbox_preds,
- all_anchors,
- all_labels,
- all_label_weights,
- all_bbox_targets,
- all_bbox_weights,
- num_total_samples=num_total_pos)
- return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
diff --git a/spaces/dmvaldman/ICLR2023/app.py b/spaces/dmvaldman/ICLR2023/app.py
deleted file mode 100644
index 851d326f9ddfb280efde5d483489fa7452bf73d9..0000000000000000000000000000000000000000
--- a/spaces/dmvaldman/ICLR2023/app.py
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/usr/bin/env python
-
-from __future__ import annotations
-
-import gradio as gr
-
-from paper_list import PaperList
-
-DESCRIPTION = '# ICLR 2023 Paper Submissions'
-NOTES = '''
-- [ICLR 2023](https://openreview.net/group?id=ICLR.cc/2023/Conference)
-- [List of submitted papers](https://docs.google.com/spreadsheets/d/1dQMjjetud2edTEREdLiuD4giC244lxY67ZxaL7NiMUc/edit#gid=1277917086)
-'''
-
-
-def main():
- paper_list = PaperList()
-
- with gr.Blocks(css='style.css') as demo:
- gr.Markdown(DESCRIPTION)
-
- search_box = gr.Textbox(
- label='Search Title',
- placeholder=
- 'You can search for titles with regular expressions. e.g. (?
-
-
-
-
-
-
-
-
-