diff --git a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/deepai/README.md b/spaces/101-5/gpt4free/g4f/.v1/gpt4free/deepai/README.md
deleted file mode 100644
index a287cdb7a4a3a52a17adb6d0a01e064fa21b3b54..0000000000000000000000000000000000000000
--- a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/deepai/README.md
+++ /dev/null
@@ -1,26 +0,0 @@
-# DeepAI Wrapper
-Written by [ading2210](https://github.com/ading2210/).
-
-## Examples:
-These functions are generators which yield strings containing the newly generated text.
-
-### Completion:
-```python
-for chunk in deepai.Completion.create("Who are you?"):
- print(chunk, end="", flush=True)
-print()
-```
-
-### Chat Completion:
-Use the same format for the messages as you would for the [official OpenAI API](https://platform.openai.com/docs/guides/chat/introduction).
-```python
-messages = [
- {"role": "system", "content": "You are a helpful assistant."},
- {"role": "user", "content": "Who won the world series in 2020?"},
- {"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
- {"role": "user", "content": "Where was it played?"}
-]
-for chunk in deepai.ChatCompletion.create(messages):
- print(chunk, end="", flush=True)
-print()
-```
\ No newline at end of file
diff --git a/spaces/101-5/gpt4free/g4f/.v1/unfinished/chatpdf/__init__.py b/spaces/101-5/gpt4free/g4f/.v1/unfinished/chatpdf/__init__.py
deleted file mode 100644
index 30dc1d3e60365e97957dbfe6d702b1d5b2e39d01..0000000000000000000000000000000000000000
--- a/spaces/101-5/gpt4free/g4f/.v1/unfinished/chatpdf/__init__.py
+++ /dev/null
@@ -1,82 +0,0 @@
-import requests
-import json
-
-from queue import Queue, Empty
-from threading import Thread
-from json import loads
-from re import findall
-
-
-class Completion:
-
- def request(prompt: str):
- '''TODO: some sort of authentication + upload PDF from URL or local file
- Then you should get the atoken and chat ID
- '''
-
- token = "your_token_here"
- chat_id = "your_chat_id_here"
-
- url = "https://chat-pr4yueoqha-ue.a.run.app/"
-
- payload = json.dumps({
- "v": 2,
- "chatSession": {
- "type": "join",
- "chatId": chat_id
- },
- "history": [
- {
- "id": "VNsSyJIq_0",
- "author": "p_if2GPSfyN8hjDoA7unYe",
- "msg": "",
- "time": 1682672009270
- },
- {
- "id": "Zk8DRUtx_6",
- "author": "uplaceholder",
- "msg": prompt,
- "time": 1682672181339
- }
- ]
- })
-
- # TODO: fix headers, use random user-agent, streaming response, etc
- headers = {
- 'authority': 'chat-pr4yueoqha-ue.a.run.app',
- 'accept': '*/*',
- 'accept-language': 'en-US,en;q=0.9',
- 'atoken': token,
- 'content-type': 'application/json',
- 'origin': 'https://www.chatpdf.com',
- 'referer': 'https://www.chatpdf.com/',
- 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"Windows"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'cross-site',
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
- }
-
- response = requests.request(
- "POST", url, headers=headers, data=payload).text
- Completion.stream_completed = True
- return {'response': response}
-
- @staticmethod
- def create(prompt: str):
- Thread(target=Completion.request, args=[prompt]).start()
-
- while Completion.stream_completed != True or not Completion.message_queue.empty():
- try:
- message = Completion.message_queue.get(timeout=0.01)
- for message in findall(Completion.regex, message):
- yield loads(Completion.part1 + message + Completion.part2)['delta']
-
- except Empty:
- pass
-
- @staticmethod
- def handle_stream_response(response):
- Completion.message_queue.put(response.decode())
diff --git a/spaces/101-5/gpt4free/testing/wewordle/README.md b/spaces/101-5/gpt4free/testing/wewordle/README.md
deleted file mode 100644
index ec2289c21fe0d4041d8189f3e67343cf8daa401d..0000000000000000000000000000000000000000
--- a/spaces/101-5/gpt4free/testing/wewordle/README.md
+++ /dev/null
@@ -1 +0,0 @@
-original from website https://chat-gpt.com/chat https://github.com/xtekky/gpt4free/issues/40#issuecomment-1629152431, i got api https://wewordle.org/gptapi/v1/web/turbo but it got limit so i try to try reverse they android app and i got api https://wewordle.org/gptapi/v1/android/turbo and just randomize user id to bypass limit
\ No newline at end of file
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Everything You Need to Know About X Particles Download for Cinema 4D.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Everything You Need to Know About X Particles Download for Cinema 4D.md
deleted file mode 100644
index 9a6066e0e3e3c24e1d5e57bc13ff899c79d32062..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Everything You Need to Know About X Particles Download for Cinema 4D.md
+++ /dev/null
@@ -1,31 +0,0 @@
-
-
X Particles Download: How to Get the Best Plugin for Cinema 4D
-
X Particles is a powerful and versatile plugin for Cinema 4D that allows you to create stunning particle effects and simulations. Whether you want to create fire, smoke, fluids, cloth, trails, or abstract art, X Particles has the tools you need to bring your vision to life.
-
But how can you get X Particles download for your Cinema 4D project? In this article, we will show you the best way to download and install X Particles, as well as some tips and tricks to make the most of this amazing plugin.
The first step to get X Particles download is to visit the official website of the plugin: https://insydium.ltd/products/x-particles/. Here you can find all the information about the features, pricing, and system requirements of X Particles.
-
To download X Particles, you need to purchase a license from the website. You can choose between a perpetual license or a subscription license, depending on your budget and needs. A perpetual license gives you lifetime access to X Particles and all the updates for the current version, while a subscription license gives you access to X Particles and all the updates for as long as you pay the monthly or yearly fee.
-
Once you have purchased a license, you will receive an email with a link to download X Particles. You can also log in to your account on the website and access the download link from there. The download file is a zip file that contains the plugin files and an installer.
-
How to Install X Particles
-
The next step to get X Particles download is to install the plugin on your Cinema 4D software. To do this, you need to follow these steps:
-
-
Unzip the download file and run the installer. The installer will guide you through the installation process and ask you to enter your license key.
-
Choose the Cinema 4D version that you want to install X Particles on. You can install X Particles on multiple versions of Cinema 4D if you have them on your computer.
-
Choose the location where you want to install X Particles. The default location is the plugins folder of your Cinema 4D installation.
-
Click on Install and wait for the installation to finish.
-
Restart Cinema 4D and check if X Particles is available in your plugins menu.
-
-
Congratulations! You have successfully installed X Particles on your Cinema 4D software. Now you can start creating amazing particle effects and simulations with X Particles.
-
How to Use X Particles
-
X Particles is a very intuitive and user-friendly plugin that lets you create particle effects and simulations with ease. You can use X Particles in two ways: by using the built-in presets or by creating your own custom setups.
-
The built-in presets are ready-made particle effects that you can apply to any object or scene in Cinema 4D. You can find them in the content browser of Cinema 4D under the X Particles folder. There are hundreds of presets available for different types of effects, such as fire, smoke, fluids, cloth, trails, and more. You can simply drag and drop a preset onto your object or scene and adjust the parameters as you like.
-
The custom setups are particle effects that you can create from scratch using the various tools and modifiers of X Particles. You can find them in the objects menu of Cinema 4D under the X-Particles menu. There are four main types of objects that you can use to create custom setups: emitters, generators, modifiers, and questions & actions.
-
-
-
Emitters are objects that emit particles from a source point or area. You can control the number, size, shape, color, speed, direction, and lifespan of the particles using the emitter settings.
-
Generators are objects that create geometry from particles. You can use generators to create meshes, splines, trails, or sprites from particles.
-
Modifiers are objects that affect particles in various ways. You can use modifiers to add forces, collisions, deformations, dynamics, fields, or shaders to particles.
-
Questions & actions are objects that control the behavior of particles based on certain
ddb901b051
-
-
\ No newline at end of file
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Download Nancy Drew Games Full Version The History and Legacy of Nancy Drew.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Download Nancy Drew Games Full Version The History and Legacy of Nancy Drew.md
deleted file mode 100644
index 60d560f508d6f4a0824ca7a2b0fc450f792bf6bf..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Download Nancy Drew Games Full Version The History and Legacy of Nancy Drew.md
+++ /dev/null
@@ -1,132 +0,0 @@
-
-
Free Download Nancy Drew Games Full Version
-
If you are a fan of mystery, adventure, and puzzle-solving games, you might have heard of Nancy Drew games. These are a series of video games based on the popular books by Carolyn Keene, featuring the teenage detective Nancy Drew. In this article, we will tell you what are Nancy Drew games, why are they popular, and how to download them for free. We will also give you a list of the top 5 Nancy Drew games to play in 2021.
Nancy Drew games are point-and-click adventure games that put you in the shoes of Nancy Drew, a young sleuth who travels around the world and solves various mysteries. The games are developed by Her Interactive and have been released since 1998. There are currently 33 games in the main series, plus some spin-offs and remakes. The games are suitable for players of all ages and genders, as they offer different difficulty levels and modes.
-
Why are they popular?
-
Nancy Drew games are popular because they combine engaging stories, immersive environments, challenging puzzles, and educational elements. The games let you explore different cultures, locations, and historical periods, while learning about topics such as art, science, literature, and more. The games also have a loyal fan base that enjoys the characters, the humor, and the references to the original books. The games have won several awards and have been praised by critics and players alike.
-
How to download them for free?
-
If you want to download Nancy Drew games for free, you have a few options. One is to use a torrent site or a file-sharing platform that hosts the game files. However, this is not recommended, as it is illegal and risky. You might end up downloading viruses or malware that can harm your computer or compromise your personal data. Another option is to use a free trial or a demo version of the game. This way, you can play the game for a limited time or with limited features, without paying anything. However, this is also not ideal, as you might miss out on some content or experience glitches or bugs. The best option is to use a legitimate site that offers free downloads of Nancy Drew games. For example, you can use GameTop.com, which is a safe and reliable site that has a large collection of Nancy Drew games that you can download for free. You can choose from different genres and themes, such as mystery, horror, romance, and more. You can also enjoy high-quality graphics and sound effects, as well as full compatibility with your Windows PC.
-
Top 5 Nancy Drew Games to Play in 2021
-
Now that you know how to download Nancy Drew games for free, you might be wondering which ones to play first. To help you decide, we have compiled a list of the top 5 Nancy Drew games to play in 2021. These are based on our personal preferences and opinions, as well as on user ratings and reviews.
-
The Silent Spy
-
Plot
-
In this game, you play as Nancy Drew who travels to Scotland to investigate the mysterious death of her mother, who was a spy. You will have to uncover secrets from your mother's past, while avoiding danger from an unknown enemy. You will also have to deal with your father's disapproval and your boyfriend's jealousy.
-
Features
-
-
A thrilling story that mixes espionage and family drama.
-
A beautiful setting that showcases the Scottish culture and landscape.
-
A variety of puzzles that test your logic, memory, and creativity.
-
A choice-based system that affects the outcome of the game.
-
A spy gadget kit that includes a phone, a camera, a lock pick, and more.
-
-
The Haunting of Castle Malloy
-
Plot
-
In this game, you play as Nancy Drew who travels to Ireland to attend the wedding of her friend Kyler Mallory. However, things go wrong when the groom disappears on the eve of the wedding. You will have to find out what happened to him, while exploring the haunted castle and its surroundings. You will also have to deal with legends of banshees, fairies, and leprechauns.
-
Features
-
-
A spooky story that mixes mystery and folklore.
-
A stunning setting that showcases the Irish culture and landscape.
-
A variety of puzzles that test your observation, deduction, and coordination.
-
A jet pack that lets you fly around the castle grounds.
-
A sheep-shearing mini-game that is fun and challenging.
-
-
Ghost of Thornton Hall
-
Plot
-
Features
-
-
A creepy story that mixes horror and family drama.
-
A gloomy setting that showcases the Southern Gothic style and atmosphere.
-
A variety of puzzles that test your courage, intuition, and skill.
-
A ghost-hunting device that lets you communicate with the spirits.
-
A phone charm that changes color depending on your mood.
-
-
The Captive Curse
-
Plot
-
In this game, you play as Nancy Drew who travels to Germany to investigate a series of attacks at a castle. You will have to find out who or what is behind the attacks, while staying at the castle as a guest. You will also have to deal with legends of a monster, a curse, and a hidden treasure.
-
Features
-
-
A captivating story that mixes fantasy and history.
-
A magnificent setting that showcases the German culture and landscape.
-
A variety of puzzles that test your knowledge, logic, and patience.
-
A board game that lets you play against other characters.
-
A costume trunk that lets you dress up as different characters.
-
-
Shadow at the Water's Edge
-
Plot
-
In this game, you play as Nancy Drew who travels to Japan to teach English at a school. You will have to find out why the students are scared of staying at a nearby inn, while staying there yourself. You will also have to deal with the culture shock, the language barrier, and the secrets of your host family.
-
How to get Nancy Drew games for free on PC
-Nancy Drew mystery games free download full version
-Download Nancy Drew games for Mac free
-Best site to download Nancy Drew games for free
-Nancy Drew games free online no download
-Free Nancy Drew games download for Windows 10
-Nancy Drew games download free full version torrent
-Nancy Drew games free trial download
-Download Nancy Drew games for Android free
-Nancy Drew games free download full version crack
-Nancy Drew games collection free download
-Nancy Drew games free download full version iso
-Download Nancy Drew games for iPad free
-Nancy Drew games free download full version rar
-Nancy Drew games free download full version mega
-Nancy Drew games free download full version zip
-Download Nancy Drew games for iPhone free
-Nancy Drew games free download full version no survey
-Nancy Drew games free download full version mediafire
-Nancy Drew games free download full version utorrent
-Download all Nancy Drew games for free
-Nancy Drew games free download full version highly compressed
-Nancy Drew games free download full version direct link
-Download Nancy Drew games for Kindle Fire free
-Nancy Drew games free download full version no virus
-Nancy Drew games free download full version with key
-Download old Nancy Drew games for free
-Nancy Drew games free download full version skidrow
-Nancy Drew games free download full version no password
-Download new Nancy Drew games for free
-Nancy Drew hidden object games free download full version
-Nancy Drew games free download full version repack
-Download classic Nancy Drew games for free
-Nancy Drew adventure games free download full version
-Nancy Drew detective games free download full version
-Download latest Nancy Drew games for free
-Nancy Drew puzzle games free download full version
-Nancy Drew interactive games free download full version
-Download original Nancy Drew games for free
-Nancy Drew point and click games free download full version
-Download complete Nancy Drew games for free
-Nancy Drew strategy games free download full version
-Download best Nancy Drew games for free
-Nancy Drew horror games free download full version
-Download rare Nancy Drew games for free
-Nancy Drew mystery stories game books pdf ebook epub mobi kindle azw3 docx txt lit rtf djvu fb2 html xhtml odt prc pdb chm cbr cbz epub3 kf8 azw tcr lrf ibooks ibook pdb pml rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2
-
Features
-
-
A chilling story that mixes mystery and culture.
-
A colorful setting that showcases the Japanese culture and landscape.
-
A variety of puzzles that test your memory, math, and origami skills.
-
A digital camera that lets you take pictures and edit them.
-
A bento box that lets you make your own lunch.
-
-
Conclusion
-
Nancy Drew games are a great way to enjoy mystery, adventure, and puzzle-solving games. They offer engaging stories, immersive environments, challenging puzzles, and educational elements. They are suitable for players of all ages and genders, as they offer different difficulty levels and modes. You can download them for free from GameTop.com, which is a safe and reliable site that has a large collection of Nancy Drew games. You can also try out some of the top 5 Nancy Drew games to play in 2021, such as The Silent Spy, The Haunting of Castle Malloy, Ghost of Thornton Hall, The Captive Curse, and Shadow at the Water's Edge. We hope you have fun playing these games and solving these mysteries!
-
FAQs
-
Here are some frequently asked questions about Nancy Drew games:
-
-
Q: How long does it take to finish a Nancy Drew game?
-
A: It depends on the game, the difficulty level, and your playing style. On average, it takes about 10 hours to finish a Nancy Drew game.
-
Q: Can I play Nancy Drew games on my Mac or mobile device?
-
A: Some Nancy Drew games are compatible with Mac or mobile devices, but not all of them. You can check the system requirements for each game on the official website or on GameTop.com.
-
Q: Can I play Nancy Drew games with my friends or family?
-
A: Yes, you can play Nancy Drew games with your friends or family. Some games have a multiplayer mode that lets you cooperate or compete with other players online or offline. You can also share your progress and achievements with other players on social media or on the official forum.
-
Q: What is the order of the Nancy Drew games?
-
A: The order of the Nancy Drew games is based on their release date. The first game in the main series is Secrets Can Kill (1998), and the latest game is Midnight in Salem (2019). You can find the complete list of the Nancy Drew games on Wikipedia or on GameTop.com.
-
Q: What is the best Nancy Drew game?
-
A: There is no definitive answer to this question, as different players might have different preferences and opinions. However, some of the most popular and highly rated Nancy Drew games are The Final Scene (2001), Curse of Blackmoor Manor (2004), Last Train to Blue Moon Canyon (2005), Warnings at Waverly Academy (2009), and Sea of Darkness (2015).
-
- 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Disney Characters 3d Models Free Download Maya.md b/spaces/1gistliPinn/ChatGPT4/Examples/Disney Characters 3d Models Free Download Maya.md
deleted file mode 100644
index cd738ff199650303e00622a7e93fb4f2d6d18b3d..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Disney Characters 3d Models Free Download Maya.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-3D movie character models download , free movie character 3d models and 3d objects for computer graphics ... Li Shang from Disney Mulan 3d preview. 4d29de3e1b
-
-
-
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Evermotion - 3D People V.1 - C4D.rar.md b/spaces/1gistliPinn/ChatGPT4/Examples/Evermotion - 3D People V.1 - C4D.rar.md
deleted file mode 100644
index 1f22a4bf48348e0e118cca67d62889655e9035e9..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Evermotion - 3D People V.1 - C4D.rar.md
+++ /dev/null
@@ -1,94 +0,0 @@
-
-
Evermotion - 3D People V.1 - C4D.rar: A Review of the 3D Models Collection for Cinema 4D
-
Are you looking for realistic and high-quality 3D models of people for your Cinema 4D projects? Do you want to create stunning and dynamic scenes with 3D human characters? Do you want to save time and money by using ready-made and shadered models of people? If you answered yes to any of these questions, then you may be interested in Evermotion - 3D People V.1 - C4D.rar. This is a collection of 50 highly detailed and shadered models of people for Cinema 4D. In this article, we will review the features, benefits and drawbacks of Evermotion - 3D People V.1 - C4D.rar.
Evermotion - 3D People V.1 - C4D.rar is a collection of 50 highly detailed and shadered models of people for Cinema 4D. It is part of the Evermotion product range, which is a well-known company that produces high-quality 3D models and assets for architectural visualizations, animations, games and more. Evermotion - 3D People V.1 - C4D.rar contains 50 models of different ages, genders, ethnicities and poses. Each model is shadered and ready to render in Cinema 4D. The models are compatible with Scanline, V-Ray and Mental Ray render engines. The models are also optimized for low polygon count and fast rendering.
-
What are the features of Evermotion - 3D People V.1 - C4D.rar?
-
Evermotion - 3D People V.1 - C4D.rar has many features that make it a valuable and versatile collection of 3D models of people for Cinema 4D. Some of them are:
-
-
It contains 50 highly detailed and shadered models of people for Cinema 4D.
-
It covers different ages, genders, ethnicities and poses.
-
It is shadered and ready to render in Cinema 4D.
-
It is compatible with Scanline, V-Ray and Mental Ray render engines.
-
It is optimized for low polygon count and fast rendering.
-
It includes a catalog with previews and information about each model.
-
It includes a download link with a .rar file that contains the models in .c4d format.
-
-
What are the benefits of Evermotion - 3D People V.1 - C4D.rar?
-
Evermotion - 3D People V.1 - C4D.rar has many benefits that make it a worthwhile investment for Cinema 4D users. Some of them are:
-
-
It can save you time by using ready-made and shadered models of people for your Cinema 4D projects.
-
It can save you money by using high-quality models of people that are cheaper than hiring or creating your own.
-
It can enhance your creativity by giving you a variety of models of people to choose from and combine in your scenes.
-
It can improve your realism by using realistic and natural models of people that match your scenes and lighting.
-
It can increase your performance by using optimized models of people that do not slow down your rendering or editing.
-
-
What are the drawbacks of Evermotion - 3D People V.1 - C4D.rar?
-
Evermotion - 3D People V.1 - C4D.rar has some drawbacks that you should be aware of before buying it. Some of them are:
-
-
-
It requires a Cinema 4D software to use it. You cannot use it with other 3D software or applications.
-
It may not suit your specific needs or preferences. You may not find the exact model or pose that you want in the collection.
-
It may not be updated or supported by Evermotion in the future. You may not get new models or fixes for the existing ones.
-
It may not be legal or ethical to use it for some purposes or projects. You may need to check the license terms and conditions before using it.
-
-
Conclusion
-
Evermotion - 3D People V.1 - C4D.rar is a collection of 50 highly detailed and shadered models of people for Cinema 4D. It is part of the Evermotion product range, which is a well-known company that produces high-quality 3D models and assets for architectural visualizations, animations, games and more. It has many features, benefits and drawbacks that you should consider before buying it. It can save you time, money and creativity by using ready-made and realistic models of people for your Cinema 4D projects. However, it may also not suit your specific needs or preferences, not be updated or supported by Evermotion in the future, or not be legal or ethical to use it for some purposes or projects. You should weigh them carefully and decide what is best for you and your Cinema 4D projects.
-
How to use Evermotion - 3D People V.1 - C4D.rar?
-
Using Evermotion - 3D People V.1 - C4D.rar is very easy and simple. You just need to follow these steps:
-
-
Download Evermotion - 3D People V.1 - C4D.rar from the link provided in this article or from the official website of Evermotion.
-
Extract the .rar file using a software like WinRAR or 7-Zip.
-
Open Cinema 4D and create a new project or open an existing one.
-
Go to File > Merge and browse to the folder where you extracted the .rar file.
-
Select the model of your choice from the list and click Open.
-
The model will be imported into your scene with all the shaders and textures applied.
-
You can adjust the position, rotation, scale and other parameters of the model as you wish.
-
You can also add lights, cameras, animations and other elements to your scene.
-
When you are satisfied with your scene, you can render it using your preferred render engine.
-
-
What are the alternatives to Evermotion - 3D People V.1 - C4D.rar?
-
If you are not satisfied with Evermotion - 3D People V.1 - C4D.rar or you want to try other collections of 3D models of people for Cinema 4D, you have some alternatives to choose from. Some of them are:
-
-
Viz-People: This is a company that offers high-quality 3D models of people, cars, furniture and other objects for various 3D software and applications. They have a free non-commercial version of their HDRI collection that contains 10 high-resolution spherical environmental maps.
-
Dosch Design: This is a company that provides high-quality 3D models, textures, HDRI and sound effects for various 3D software and applications. They have a collection of 3D people that contains over 100 realistic and fully textured models of people in different poses and clothing styles.
-
Renderpeople: This is a company that specializes in creating realistic and lifelike 3D models of people for various 3D software and applications. They have a collection of 3D people that contains over 5000 models of people in different poses, clothing styles, ethnicities and ages.
-
-
Conclusion
-
Evermotion - 3D People V.1 - C4D.rar is a collection of 50 highly detailed and shadered models of people for Cinema 4D. It is part of the Evermotion product range, which is a well-known company that produces high-quality 3D models and assets for architectural visualizations, animations, games and more. It has many features, benefits and drawbacks that you should consider before buying it. It can save you time, money and creativity by using ready-made and realistic models of people for your Cinema 4D projects. However, it may also not suit your specific needs or preferences, not be updated or supported by Evermotion in the future, or not be legal or ethical to use it for some purposes or projects. You should weigh them carefully and decide what is best for you and your Cinema 4D projects.
-
How to download Evermotion - 3D People V.1 - C4D.rar?
-
If you want to download Evermotion - 3D People V.1 - C4D.rar, you have two options. You can either buy it from the official website of Evermotion or you can download it from a third-party link provided in this article. Both options have their advantages and disadvantages. Let's see them in detail.
-
If you buy Evermotion - 3D People V.1 - C4D.rar from the official website of Evermotion, you will get the following benefits:
-
-
You will get the original and updated version of the collection.
-
You will get a secure and easy payment method.
-
You will get a download link with a .rar file that contains the models in .c4d format.
-
You will get access to the catalog with previews and information about each model.
-
You will get support and updates from Evermotion in case of any issues or questions.
-
-
However, buying Evermotion - 3D People V.1 - C4D.rar from the official website of Evermotion also has some drawbacks:
-
-
You will have to pay a certain amount of money to buy the collection.
-
You will have to register an account on Evermotion website and provide your personal information.
-
You will have to agree to the license terms and conditions of Evermotion before using the collection.
-
-
If you download Evermotion - 3D People V.1 - C4D.rar from a third-party link provided in this article, you will get the following benefits:
-
-
You will get the collection for free without paying any money.
-
You will get the collection instantly without waiting for any delivery.
-
You will not have to register an account or provide any personal information on any website.
-
-
However, downloading Evermotion - 3D People V.1 - C4D.rar from a third-party link also has some drawbacks:
-
-
You may not get the original or updated version of the collection.
-
You may not get access to the catalog or any support or updates from Evermotion.
-
You may get a virus or malware along with the collection that can harm your computer or steal your data.
-
You may violate some laws or terms of service by downloading pirated or illegal content.
-
-
Is Evermotion - 3D People V.1 - C4D.rar worth it?
-
The answer to this question depends on your needs and preferences. If you are looking for realistic and high-quality 3D models of people for your Cinema 4D projects, Evermotion - 3D People V.1 - C4D.rar may be worth it. It has many features, benefits and drawbacks that you should consider before buying it. It can save you time and money by using ready-made and shadered models of people for your Cinema 4D projects. However, it may also not suit your specific needs or preferences, not be updated or supported by Evermotion in the future, or not be legal or ethical to use it for some purposes or projects. You should weigh them carefully and decide what is best for you and your Cinema 4D projects.
-
Conclusion
-
Evermotion - 3D People V.1 - C4D.rar is a collection of 50 highly detailed and shadered models of people for Cinema 4D. It is part of the Evermotion product range, which is a well-known company that produces high-quality 3D models and assets for architectural visualizations, animations, games and more. It has many features, benefits and drawbacks that you should consider before buying it. It can save you time, money and creativity by using ready-made and realistic models of people for your Cinema 4D projects. However, it may also not suit your specific needs or preferences, not be updated or supported by Evermotion in the future, or not be legal or ethical to use it for some purposes or projects. You can either buy it from the official website of Evermotion or download it from a third-party link provided in this article. Both options have their advantages and disadvantages. You should weigh them carefully and decide what is best for you and your Cinema 4D projects.
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/1line/AutoGPT/Dockerfile b/spaces/1line/AutoGPT/Dockerfile
deleted file mode 100644
index 8396154998f32a50d55c199a674b638d5cf7bda2..0000000000000000000000000000000000000000
--- a/spaces/1line/AutoGPT/Dockerfile
+++ /dev/null
@@ -1,38 +0,0 @@
-# Use an official Python base image from the Docker Hub
-FROM python:3.10-slim
-
-# Install git
-RUN apt-get -y update
-RUN apt-get -y install git chromium-driver
-
-# Install Xvfb and other dependencies for headless browser testing
-RUN apt-get update \
- && apt-get install -y wget gnupg2 libgtk-3-0 libdbus-glib-1-2 dbus-x11 xvfb ca-certificates
-
-# Install Firefox / Chromium
-RUN wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - \
- && echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list \
- && apt-get update \
- && apt-get install -y chromium firefox-esr
-
-# Set environment variables
-ENV PIP_NO_CACHE_DIR=yes \
- PYTHONUNBUFFERED=1 \
- PYTHONDONTWRITEBYTECODE=1
-
-# Create a non-root user and set permissions
-RUN useradd --create-home appuser
-WORKDIR /home/appuser
-RUN chown appuser:appuser /home/appuser
-USER appuser
-
-# Copy the requirements.txt file and install the requirements
-COPY --chown=appuser:appuser requirements.txt .
-RUN sed -i '/Items below this point will not be included in the Docker Image/,$d' requirements.txt && \
- pip install --no-cache-dir --user -r requirements.txt
-
-# Copy the application files
-COPY --chown=appuser:appuser autogpt/ ./autogpt
-
-# Set the entrypoint
-ENTRYPOINT ["python", "-m", "autogpt"]
diff --git a/spaces/1phancelerku/anime-remove-background/Emsa-Register-Dll-Tool-Crack.md b/spaces/1phancelerku/anime-remove-background/Emsa-Register-Dll-Tool-Crack.md
deleted file mode 100644
index 4b0a7c0ca7cf045ab33b7debb709f65df4278dac..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Emsa-Register-Dll-Tool-Crack.md
+++ /dev/null
@@ -1,84 +0,0 @@
-## Emsa Register Dll Tool Crack
-
-
-
-
-
-
-
-
-
-**Click Here 🌟 [https://www.google.com/url?q=https%3A%2F%2Fblltly.com%2F2txiC0&sa=D&sntz=1&usg=AOvVaw2tOYkFopTq9fhcDyUqgUmE](https://www.google.com/url?q=https%3A%2F%2Fblltly.com%2F2txiC0&sa=D&sntz=1&usg=AOvVaw2tOYkFopTq9fhcDyUqgUmE)**
-
-
-
-
-
-
-
-
-
-
-
-
-
-# How to Use EMSA Register DLL Tool to Register and Unregister ActiveX Files
-
-
-
-EMSA Register DLL Tool is a free and multipurpose tool for Windows that allows you to register and unregister ActiveX files, such as dll, ocx and exe files. ActiveX files are components that enable various functions and features in Windows applications. Sometimes, you may need to manually register or unregister these files if they are corrupted, missing or causing errors. In this article, we will show you how to use EMSA Register DLL Tool to perform these tasks easily and quickly.
-
-
-
-## Download and Install EMSA Register DLL Tool
-
-
-
-The first step is to download and install EMSA Register DLL Tool from the official website of Emsai Industrial[^1^]. The tool is compatible with Windows 98/ME/NT/2000/XP/2003. The installation process is simple and straightforward. Just follow the instructions on the screen and choose the destination folder for the tool.
-
-
-
-## Enable Shell Extensions
-
-
-
-One of the features of EMSA Register DLL Tool is that it integrates with the Windows Explorer context menu, which means you can right-click on any ActiveX file and choose to register or unregister it. To enable this feature, you need to enable shell extensions in the tool. To do this, launch the tool and go to the Options & Help tab. Check the box that says "Enable Shell Extensions" and click OK. You may need to restart your computer for the changes to take effect.
-
-
-
-## Register or Unregister ActiveX Files
-
-
-
-There are two ways to register or unregister ActiveX files with EMSA Register DLL Tool. One way is to use the tool's GUI, and the other way is to use the Windows Explorer context menu.
-
-
-
-To use the tool's GUI, launch the tool and go to the Reg & Tool File Info tab. Click on the Browse button and select the ActiveX file you want to register or unregister. The tool will display detailed information about the file, such as its name, type, version, description, etc. You can also compare two ActiveX files with identical filenames by using the File Comparison tab. To register or unregister the file, click on the appropriate button at the bottom of the window. You will see a confirmation message if the operation is successful.
-
-
-
-To use the Windows Explorer context menu, locate the ActiveX file you want to register or unregister in your file system. Right-click on it and select Register or Unregister from the menu. You will see a confirmation message if the operation is successful.
-
-
-
-## Generate an ActiveX Report
-
-
-
-Another feature of EMSA Register DLL Tool is that it can generate a report of all ActiveX files in a folder. This can be useful if you want to check the status of multiple files at once. To generate an ActiveX report, right-click on any folder that contains ActiveX files and select ActiveX Report from the menu. The tool will scan the folder for ActiveX files and create a text file (output.txt) containing the report in the same folder. The report will also be opened automatically for viewing. The report will show information such as file name, type, registration status, GUID, etc.
-
-
-
-## Conclusion
-
-
-
-EMSA Register DLL Tool is a handy tool for anyone who needs to register or unregister ActiveX files in Windows. It provides extensive information about these files and allows you to perform these tasks easily and quickly. It also integrates with the Windows Explorer context menu for convenience. You can download EMSA Register DLL Tool for free from Emsai Industrial's website[^1^] and try it out yourself.
-
- 1b8d091108
-
-
-
-
-
diff --git a/spaces/801artistry/RVC801/infer/lib/infer_pack/modules/F0Predictor/F0Predictor.py b/spaces/801artistry/RVC801/infer/lib/infer_pack/modules/F0Predictor/F0Predictor.py
deleted file mode 100644
index f56e49e7f0e6eab3babf0711cae2933371b9f9cc..0000000000000000000000000000000000000000
--- a/spaces/801artistry/RVC801/infer/lib/infer_pack/modules/F0Predictor/F0Predictor.py
+++ /dev/null
@@ -1,16 +0,0 @@
-class F0Predictor(object):
- def compute_f0(self, wav, p_len):
- """
- input: wav:[signal_length]
- p_len:int
- output: f0:[signal_length//hop_length]
- """
- pass
-
- def compute_f0_uv(self, wav, p_len):
- """
- input: wav:[signal_length]
- p_len:int
- output: f0:[signal_length//hop_length],uv:[signal_length//hop_length]
- """
- pass
diff --git a/spaces/AIFILMS/Pix2Pix-Video/style.css b/spaces/AIFILMS/Pix2Pix-Video/style.css
deleted file mode 100644
index 3cf565d3e03852436a405cf632d1d22433bb4087..0000000000000000000000000000000000000000
--- a/spaces/AIFILMS/Pix2Pix-Video/style.css
+++ /dev/null
@@ -1,101 +0,0 @@
-#col-container {max-width: 820px; margin-left: auto; margin-right: auto;}
-#duplicate-container{
- display: flex;
- justify-content: space-between;
- align-items: center;
- line-height: 1em;
- flex-direction: row-reverse;
- font-size:1em;
-}
-a, a:hover, a:visited {
- text-decoration-line: underline;
- font-weight: 600;
- color: #1f2937 !important;
-}
-
-.dark a, .dark a:hover, .dark a:visited {
- color: #f3f4f6 !important;
-}
-
-.footer {
- margin-bottom: 45px;
- margin-top: 10px;
- text-align: center;
- border-bottom: 1px solid #e5e5e5;
-}
-
-.footer>p {
- font-size: .8rem!important;
- display: inline-block;
- padding: 0 10px;
- transform: translateY(26px);
- background: white;
-}
-.dark .footer {
- border-color: #303030;
-}
-.dark .footer>p {
- background: #0b0f19;
-}
-
-div#may-like-container > p {
- font-size: .8em;
- margin-bottom: 4px;
-}
-
-.animate-spin {
- animation: spin 1s linear infinite;
-}
-
-@keyframes spin {
- from {
- transform: rotate(0deg);
- }
- to {
- transform: rotate(360deg);
- }
-}
-
-#share-btn-container {
- display: flex;
- padding-left: 0.5rem !important;
- padding-right: 0.5rem !important;
- background-color: #000000;
- justify-content: center;
- align-items: center;
- border-radius: 9999px !important;
- max-width: 13rem;
-}
-
-#share-btn-container:hover {
- background-color: #060606;
-}
-
-#share-btn {
- all: initial;
- color: #ffffff;
- font-weight: 600;
- cursor:pointer;
- font-family: 'IBM Plex Sans', sans-serif;
- margin-left: 0.5rem !important;
- padding-top: 0.5rem !important;
- padding-bottom: 0.5rem !important;
- right:0;
-}
-
-#share-btn * {
- all: unset;
-}
-
-#share-btn-container div:nth-child(-n+2){
- width: auto !important;
- min-height: 0px !important;
-}
-
-#share-btn-container .wrap {
- display: none !important;
-}
-
-#share-btn-container.hidden {
- display: none!important;
-}
\ No newline at end of file
diff --git a/spaces/AIFILMS/generate_human_motion/pyrender/pyrender/__init__.py b/spaces/AIFILMS/generate_human_motion/pyrender/pyrender/__init__.py
deleted file mode 100644
index ee3709846823b7c4b71b22da0e24d63d805528a8..0000000000000000000000000000000000000000
--- a/spaces/AIFILMS/generate_human_motion/pyrender/pyrender/__init__.py
+++ /dev/null
@@ -1,24 +0,0 @@
-from .camera import (Camera, PerspectiveCamera, OrthographicCamera,
- IntrinsicsCamera)
-from .light import Light, PointLight, DirectionalLight, SpotLight
-from .sampler import Sampler
-from .texture import Texture
-from .material import Material, MetallicRoughnessMaterial
-from .primitive import Primitive
-from .mesh import Mesh
-from .node import Node
-from .scene import Scene
-from .renderer import Renderer
-from .viewer import Viewer
-from .offscreen import OffscreenRenderer
-from .version import __version__
-from .constants import RenderFlags, TextAlign, GLTF
-
-__all__ = [
- 'Camera', 'PerspectiveCamera', 'OrthographicCamera', 'IntrinsicsCamera',
- 'Light', 'PointLight', 'DirectionalLight', 'SpotLight',
- 'Sampler', 'Texture', 'Material', 'MetallicRoughnessMaterial',
- 'Primitive', 'Mesh', 'Node', 'Scene', 'Renderer', 'Viewer',
- 'OffscreenRenderer', '__version__', 'RenderFlags', 'TextAlign',
- 'GLTF'
-]
diff --git a/spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/discriminator/model.py b/spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/discriminator/model.py
deleted file mode 100644
index 5263368a5e74d9d07840399469ca12a54e7fecbc..0000000000000000000000000000000000000000
--- a/spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/discriminator/model.py
+++ /dev/null
@@ -1,295 +0,0 @@
-import functools
-import torch.nn as nn
-
-
-class ActNorm(nn.Module):
- def __init__(self, num_features, logdet=False, affine=True,
- allow_reverse_init=False):
- assert affine
- super().__init__()
- self.logdet = logdet
- self.loc = nn.Parameter(torch.zeros(1, num_features, 1, 1))
- self.scale = nn.Parameter(torch.ones(1, num_features, 1, 1))
- self.allow_reverse_init = allow_reverse_init
-
- self.register_buffer('initialized', torch.tensor(0, dtype=torch.uint8))
-
- def initialize(self, input):
- with torch.no_grad():
- flatten = input.permute(1, 0, 2, 3).contiguous().view(input.shape[1], -1)
- mean = (
- flatten.mean(1)
- .unsqueeze(1)
- .unsqueeze(2)
- .unsqueeze(3)
- .permute(1, 0, 2, 3)
- )
- std = (
- flatten.std(1)
- .unsqueeze(1)
- .unsqueeze(2)
- .unsqueeze(3)
- .permute(1, 0, 2, 3)
- )
-
- self.loc.data.copy_(-mean)
- self.scale.data.copy_(1 / (std + 1e-6))
-
- def forward(self, input, reverse=False):
- if reverse:
- return self.reverse(input)
- if len(input.shape) == 2:
- input = input[:, :, None, None]
- squeeze = True
- else:
- squeeze = False
-
- _, _, height, width = input.shape
-
- if self.training and self.initialized.item() == 0:
- self.initialize(input)
- self.initialized.fill_(1)
-
- h = self.scale * (input + self.loc)
-
- if squeeze:
- h = h.squeeze(-1).squeeze(-1)
-
- if self.logdet:
- log_abs = torch.log(torch.abs(self.scale))
- logdet = height * width * torch.sum(log_abs)
- logdet = logdet * torch.ones(input.shape[0]).to(input)
- return h, logdet
-
- return h
-
- def reverse(self, output):
- if self.training and self.initialized.item() == 0:
- if not self.allow_reverse_init:
- raise RuntimeError(
- "Initializing ActNorm in reverse direction is "
- "disabled by default. Use allow_reverse_init=True to enable."
- )
- else:
- self.initialize(output)
- self.initialized.fill_(1)
-
- if len(output.shape) == 2:
- output = output[:, :, None, None]
- squeeze = True
- else:
- squeeze = False
-
- h = output / self.scale - self.loc
-
- if squeeze:
- h = h.squeeze(-1).squeeze(-1)
- return h
-
-def weights_init(m):
- classname = m.__class__.__name__
- if classname.find('Conv') != -1:
- nn.init.normal_(m.weight.data, 0.0, 0.02)
- elif classname.find('BatchNorm') != -1:
- nn.init.normal_(m.weight.data, 1.0, 0.02)
- nn.init.constant_(m.bias.data, 0)
-
-
-class NLayerDiscriminator(nn.Module):
- """Defines a PatchGAN discriminator as in Pix2Pix
- --> see https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/networks.py
- """
- def __init__(self, input_nc=3, ndf=64, n_layers=3, use_actnorm=False):
- """Construct a PatchGAN discriminator
- Parameters:
- input_nc (int) -- the number of channels in input images
- ndf (int) -- the number of filters in the last conv layer
- n_layers (int) -- the number of conv layers in the discriminator
- norm_layer -- normalization layer
- """
- super(NLayerDiscriminator, self).__init__()
- if not use_actnorm:
- norm_layer = nn.BatchNorm2d
- else:
- norm_layer = ActNorm
- if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
- use_bias = norm_layer.func != nn.BatchNorm2d
- else:
- use_bias = norm_layer != nn.BatchNorm2d
-
- kw = 4
- padw = 1
- sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
- nf_mult = 1
- nf_mult_prev = 1
- for n in range(1, n_layers): # gradually increase the number of filters
- nf_mult_prev = nf_mult
- nf_mult = min(2 ** n, 8)
- sequence += [
- nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
- norm_layer(ndf * nf_mult),
- nn.LeakyReLU(0.2, True)
- ]
-
- nf_mult_prev = nf_mult
- nf_mult = min(2 ** n_layers, 8)
- sequence += [
- nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
- norm_layer(ndf * nf_mult),
- nn.LeakyReLU(0.2, True)
- ]
- # output 1 channel prediction map
- sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
- self.main = nn.Sequential(*sequence)
-
- def forward(self, input):
- """Standard forward."""
- return self.main(input)
-
-class NLayerDiscriminator1dFeats(NLayerDiscriminator):
- """Defines a PatchGAN discriminator as in Pix2Pix
- --> see https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/networks.py
- """
- def __init__(self, input_nc=3, ndf=64, n_layers=3, use_actnorm=False):
- """Construct a PatchGAN discriminator
- Parameters:
- input_nc (int) -- the number of channels in input feats
- ndf (int) -- the number of filters in the last conv layer
- n_layers (int) -- the number of conv layers in the discriminator
- norm_layer -- normalization layer
- """
- super().__init__(input_nc=input_nc, ndf=64, n_layers=n_layers, use_actnorm=use_actnorm)
-
- if not use_actnorm:
- norm_layer = nn.BatchNorm1d
- else:
- norm_layer = ActNorm
- if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm has affine parameters
- use_bias = norm_layer.func != nn.BatchNorm1d
- else:
- use_bias = norm_layer != nn.BatchNorm1d
-
- kw = 4
- padw = 1
- sequence = [nn.Conv1d(input_nc, input_nc//2, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
- nf_mult = input_nc//2
- nf_mult_prev = 1
- for n in range(1, n_layers): # gradually decrease the number of filters
- nf_mult_prev = nf_mult
- nf_mult = max(nf_mult_prev // (2 ** n), 8)
- sequence += [
- nn.Conv1d(nf_mult_prev, nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
- norm_layer(nf_mult),
- nn.LeakyReLU(0.2, True)
- ]
-
- nf_mult_prev = nf_mult
- nf_mult = max(nf_mult_prev // (2 ** n), 8)
- sequence += [
- nn.Conv1d(nf_mult_prev, nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
- norm_layer(nf_mult),
- nn.LeakyReLU(0.2, True)
- ]
- nf_mult_prev = nf_mult
- nf_mult = max(nf_mult_prev // (2 ** n), 8)
- sequence += [
- nn.Conv1d(nf_mult_prev, nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
- norm_layer(nf_mult),
- nn.LeakyReLU(0.2, True)
- ]
- # output 1 channel prediction map
- sequence += [nn.Conv1d(nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
- self.main = nn.Sequential(*sequence)
-
-
-class NLayerDiscriminator1dSpecs(NLayerDiscriminator):
- """Defines a PatchGAN discriminator as in Pix2Pix
- --> see https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/networks.py
- """
- def __init__(self, input_nc=80, ndf=64, n_layers=3, use_actnorm=False):
- """Construct a PatchGAN discriminator
- Parameters:
- input_nc (int) -- the number of channels in input specs
- ndf (int) -- the number of filters in the last conv layer
- n_layers (int) -- the number of conv layers in the discriminator
- norm_layer -- normalization layer
- """
- super().__init__(input_nc=input_nc, ndf=64, n_layers=n_layers, use_actnorm=use_actnorm)
-
- if not use_actnorm:
- norm_layer = nn.BatchNorm1d
- else:
- norm_layer = ActNorm
- if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm has affine parameters
- use_bias = norm_layer.func != nn.BatchNorm1d
- else:
- use_bias = norm_layer != nn.BatchNorm1d
-
- kw = 4
- padw = 1
- sequence = [nn.Conv1d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
- nf_mult = 1
- nf_mult_prev = 1
- for n in range(1, n_layers): # gradually decrease the number of filters
- nf_mult_prev = nf_mult
- nf_mult = min(2 ** n, 8)
- sequence += [
- nn.Conv1d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
- norm_layer(ndf * nf_mult),
- nn.LeakyReLU(0.2, True)
- ]
-
- nf_mult_prev = nf_mult
- nf_mult = min(2 ** n_layers, 8)
- sequence += [
- nn.Conv1d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
- norm_layer(ndf * nf_mult),
- nn.LeakyReLU(0.2, True)
- ]
- # output 1 channel prediction map
- sequence += [nn.Conv1d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
- self.main = nn.Sequential(*sequence)
-
- def forward(self, input):
- """Standard forward."""
- # (B, C, L)
- input = input.squeeze(1)
- input = self.main(input)
- return input
-
-
-if __name__ == '__main__':
- import torch
-
- ## FEATURES
- disc_in_channels = 2048
- disc_num_layers = 2
- use_actnorm = False
- disc_ndf = 64
- discriminator = NLayerDiscriminator1dFeats(input_nc=disc_in_channels, n_layers=disc_num_layers,
- use_actnorm=use_actnorm, ndf=disc_ndf).apply(weights_init)
- inputs = torch.rand((6, 2048, 212))
- outputs = discriminator(inputs)
- print(outputs.shape)
-
- ## AUDIO
- disc_in_channels = 1
- disc_num_layers = 3
- use_actnorm = False
- disc_ndf = 64
- discriminator = NLayerDiscriminator(input_nc=disc_in_channels, n_layers=disc_num_layers,
- use_actnorm=use_actnorm, ndf=disc_ndf).apply(weights_init)
- inputs = torch.rand((6, 1, 80, 848))
- outputs = discriminator(inputs)
- print(outputs.shape)
-
- ## IMAGE
- disc_in_channels = 3
- disc_num_layers = 3
- use_actnorm = False
- disc_ndf = 64
- discriminator = NLayerDiscriminator(input_nc=disc_in_channels, n_layers=disc_num_layers,
- use_actnorm=use_actnorm, ndf=disc_ndf).apply(weights_init)
- inputs = torch.rand((6, 3, 256, 256))
- outputs = discriminator(inputs)
- print(outputs.shape)
diff --git a/spaces/AILab-CVC/SEED-Bench_Leaderboard/constants.py b/spaces/AILab-CVC/SEED-Bench_Leaderboard/constants.py
deleted file mode 100644
index d7b953f0e66dbc002b0bae8b88dd724a06578cfa..0000000000000000000000000000000000000000
--- a/spaces/AILab-CVC/SEED-Bench_Leaderboard/constants.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# this is .py for store constants
-MODEL_INFO = ["Model Type", "Model", "Language Model"]
-TASK_INFO = ["Scene Understanding", "Instance Identity", "Instance Attributes", "Instance Localization", "Instance Counting", "Spatial Relation", "Instance Interaction", "Visual Reasoning", "Text Recognition", "Avg. Img", "Action Recognition", "Action Prediction", "Procedure Understanding", "Avg. Video", "Avg. All"]
-TASK_INFO_v2 = ["Avg. All", "Avg. Img", "Avg. Video", "Scene Understanding", "Instance Identity", "Instance Attributes", "Instance Localization", "Instance Counting", "Spatial Relation", "Instance Interaction", "Visual Reasoning", "Text Recognition", "Action Recognition", "Action Prediction", "Procedure Understanding"]
-
-AVG_INFO = ["Avg. All", "Avg. Img", "Avg. Video"]
-DATA_TITILE_TYPE = ["markdown", "markdown", "markdown", "number", "number", "number", "number", "number", "number", "number", "number", "number", "number", "number", "number", "number", "number", "number"]
-CSV_DIR = "./file/result.csv"
-
-# COLUMN_NAMES = MODEL_INFO + TASK_INFO
-COLUMN_NAMES = MODEL_INFO + TASK_INFO_v2
-
-DATA_NUM = [3158, 1831, 4649, 978, 2447, 657, 97, 331, 85, 1740, 2077, 1192]
-
-UNTUNED_MODEL_RESULTS = '''LLM & Flan-T5 & Flan-T5-XL &23.0 &29.0 &32.8 &31.8 &20.5 &31.8 &33.0 &18.2 &19.4 &23.2 &34.9 &25.4 \\
- LLM & Vicuna & Vicuna-7B &23.4 &30.7 &29.7 &30.9 &30.8 &28.6 &29.8 &18.5 &13.4 &27.3 &34.5 &23.8 \\
- LLM & LLaMA & LLaMA-7B &26.3 &27.4 &26.2 &28.3 &25.1 &28.8 &19.2 &37.0 & 9.0 &33.0 &23.1 &26.2 \\
- ImageLLM & BLIP-2 & Flan-T5-XL &59.1 &53.9 &49.2 &42.3 &43.2 &36.7 &55.7 &45.6 &25.9 &32.6 &47.5 &24.0 \\
- ImageLLM & InstructBLIP & Flan-T5-XL &60.3 &58.5 &63.4 &40.6 &58.4 &38.7 &51.6 &45.9 &25.9 &33.1 &49.1 &27.1 \\
- ImageLLM & InstructBLIP-Vicuna & Vicuna-7B &60.2 &58.9 &65.6 &43.6 &57.2 &40.3 &52.6 &47.7 &43.5 &34.5 &49.6 &23.1 \\
- ImageLLM & LLaVA & LLaMA-7B &42.7 &34.9 &33.5 &28.4 &41.9 &30.8 &27.8 &46.8 &27.7 &29.7 &21.4 &19.1 \\
- ImageLLM & MiniGPT-4 & Flan-T5-XL &56.3 &49.2 &45.8 &37.9 &45.3 &32.6 &47.4 &57.1 &11.8 &38.2 &24.5 &27.1 \\
- ImageLLM & VPGTrans & LLaMA-7B &51.9 &44.1 &39.9 &36.1 &33.7 &36.4 &32.0 &53.2 &30.6 &39.5 &24.3 &31.9 \\
- ImageLLM & MultiModal-GPT & LLaMA-7B &43.6 &37.9 &31.5 &30.8 &27.3 &30.1 &29.9 &51.4 &18.8 &36.9 &25.8 &24.0 \\
- ImageLLM & Otter & LLaMA-7B &44.9 &38.6 &32.2 &30.9 &26.3 &31.8 &32.0 &51.4 &31.8 &37.9 &27.2 &24.8 \\
- ImageLLM & OpenFlamingo & LLaMA-7B &43.9 &38.1 &31.3 &30.1 &27.3 &30.6 &29.9 &50.2 &20.0 &37.2 &25.4 &24.2 \\
- ImageLLM & LLaMA-Adapter V2 & LLaMA-7B &45.2 &38.5 &29.3 &33.0 &29.7 &35.5 &39.2 &52.0 &24.7 &38.6 &18.5 &19.6 \\
- ImageLLM & GVT & Vicuna-7B &41.7 &35.5 &31.8 &29.5 &36.2 &32.0 &32.0 &51.1 &27.1 &33.9 &25.4 &23.0 \\
- ImageLLM & mPLUG-Owl & LLaMA-7B &49.7 &45.3 &32.5 &36.7 &27.3 &32.7 &44.3 &54.7 &28.8 &26.7 &17.9 &26.5 \\
- VideoLLM & VideoChat & Vicuna-7B &47.1 &43.8 &34.9 &40.0 &32.8 &34.6 &42.3 &50.5 &17.7 &34.9 &36.4 &27.3 \\
- VideoLLM & Video-ChatGPT & LLaMA-7B &37.2 &31.4 &33.2 &28.4 &35.5 &29.5 &23.7 &42.3 &25.9 &27.6 &21.3 &21.1 \\
- VideoLLM & Valley & LLaMA-13B &39.3 &32.9 &31.6 &27.9 &24.2 &30.1 &27.8 &43.8 &11.8 &31.3 &23.2 &20.7 \\'''
-
-
-LEADERBORAD_INTRODUCTION = """# SEED-Bench Leaderboard
-
- Welcome to the leaderboard of the SEED-Bench! 🏆
- SEED-Bench consists of 19K multiple-choice questions with accurate human annotations for evaluating Multimodal LLMs, covering 12 evaluation dimensions including both the spatial and temporal understanding.
- Please refer to [our paper](https://arxiv.org/abs/2307.16125) for more details.
- """
-
-SUBMIT_INTRODUCTION = """# Submit Introduction
- 1. Obtain JSON file from our [github repository](https://github.com/AILab-CVC/SEED-Bench#leaderboard-submit) after evaluation. For example, you can obtain InstructBLIP's JSON file as results/results.json after running
- ```shell
- python eval.py --model instruct_blip --anno_path SEED-Bench.json --output-dir results
- ```
- 2. If you want to update model performance by uploading new results, please ensure 'Model Name Revision' is the same as what's shown in the leaderboard. For example, if you want to modify InstructBLIP's performance, you need to fill in 'InstructBLIP' in 'Revision Model Name'.
- 3. Please provide the correct link of your model's repository for each submission.
- 4. For the evaluation dimension, you can choose "All/Image/Video", and the results of dimensions that are not evaluated will be set to zero.
- 5. After clicking 'Submit Eval', you can click 'Refresh' to obtain the latest result in the leaderboard.
-
- ## Submit Example
- For example, if you want to upload InstructBLIP's result in the leaderboard, you need to:
- 1. Fill in 'InstructBLIP' in 'Model Name' if it is your first time to submit your result (You can leave 'Revision Model Name' blank).
- 2. Fill in 'InstructBLIP' in 'Revision Model Name' if you want to update your result (You can leave 'Model Name' blank).
- 2. Select 'ImageLLM' in 'Model Type'.
- 3. Fill in 'https://github.com/salesforce/LAVIS' in 'Model Link'.
- 4. Select 'Flan-T5-XL' in 'LLM Type'.
- 5. Select 'All' in 'Evaluation Dimension'.
- 6. Upload results.json.
- 7. Click the 'Submit Eval' button.
- 8. Click 'Refresh' to obtain the uploaded leaderboard.
-"""
-
-TABLE_INTRODUCTION = """In the table below, we summarize each task performance of all the models.
- We use accurancy(%) as the primary evaluation metric for each tasks.
- """
-
-LEADERBORAD_INFO = """
- Based on powerful Large Language Models (LLMs), recent generative Multimodal Large Language Models (MLLMs) have gained prominence as a pivotal research area, exhibiting remarkable capability for both comprehension and generation.
- In this work, we address the evaluation of generative comprehension in MLLMs as a preliminary step towards a comprehensive assessment of generative models, by introducing a benchmark named SEED-Bench.
- SEED-Bench consists of 19K multiple choice questions with accurate human annotations (x6 larger than existing benchmarks), which spans 12 evaluation dimensions including the comprehension of both the image and video modality.
- We develop an advanced pipeline for generating multiple-choice questions that target specific evaluation dimensions, integrating both automatic filtering and manual verification processes.
- Multiple-choice questions with groundtruth options derived from human annotation enables an objective and efficient assessment of model performance, eliminating the need for human or GPT intervention during evaluation.
- We further evaluate the performance of 18 models across all 12 dimensions, covering both the spatial and temporal understanding.
- By revealing the limitations of existing MLLMs through evaluation results, we aim for SEED-Bench to provide insights for motivating future research.
-"""
-
-
-
-CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
-CITATION_BUTTON_TEXT = r"""@article{li2023seed,
- title={SEED-Bench: Benchmarking Multimodal LLMs with Generative Comprehension},
- author={Li, Bohao and Wang, Rui and Wang, Guangzhi and Ge, Yuying and Ge, Yixiao and Shan, Ying},
- journal={arXiv preprint arXiv:2307.16125},
- year={2023}
-}"""
diff --git a/spaces/AIQuest/lungCancerVgg19/app.py b/spaces/AIQuest/lungCancerVgg19/app.py
deleted file mode 100644
index 15b5b4b16d538626b570b57a3e174af563c2a3c2..0000000000000000000000000000000000000000
--- a/spaces/AIQuest/lungCancerVgg19/app.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# this is the custome function to return pre-process the image to size (150 150 3)
-
-import numpy as np
-from tensorflow.keras.preprocessing import image
-from PIL import Image
-import gradio as gr
-from keras.models import load_model
-
-def custom_Image_preprocessing(image_data, target_size=(150, 150)):
- img = image.array_to_img(image_data, data_format='channels_last')
- img = img.resize(target_size) # Resize the image if needed
- img_arr = image.img_to_array(img)
- img_arr = img_arr * 1./255
- img_arr = np.expand_dims(img_arr, axis=0)
- return img_arr
-
-# function to predict the custome image
-
-def image_predict(image_path):
- model = load_model("Second_model.h5")
- image_preprocess = custom_Image_preprocessing(image_path)
- result = model.predict(image_preprocess)
- if ( result <= 0.5 ):
- return 'Negative',round(result[0][0]*100,2),'%'
- else:
- return 'Positive',round(result[0][0]*100,2),'%'
-
-
-# Define Gradio interface
-input_component = gr.components.Image(label = "Upload the X-Ray")
-output_component = gr.components.Textbox(label = "Result")
-interface = gr.Interface(fn=image_predict, inputs=input_component, outputs=output_component,title = "Lung Cancer x-Ray Classification",description = "This web app provides predictions based on X-Ray images and predict either the X-ray contains sympotms of lung cancer or not ")
-interface.launch()
-
diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-120e_deepfashion2_sling_256x192/__init__.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-120e_deepfashion2_sling_256x192/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/AlekseyKorshuk/gai-project/README.md b/spaces/AlekseyKorshuk/gai-project/README.md
deleted file mode 100644
index 9c5e885ea53405183fb926e2469b1505b4c25e5a..0000000000000000000000000000000000000000
--- a/spaces/AlekseyKorshuk/gai-project/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Gai Project
-emoji: 📈
-colorFrom: blue
-colorTo: blue
-sdk: gradio
-sdk_version: 3.50.2
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Alycer/VITS-Umamusume-voice-synthesizer/text/korean.py b/spaces/Alycer/VITS-Umamusume-voice-synthesizer/text/korean.py
deleted file mode 100644
index edee07429a450c55e3d8e246997faaa1e0b89cc9..0000000000000000000000000000000000000000
--- a/spaces/Alycer/VITS-Umamusume-voice-synthesizer/text/korean.py
+++ /dev/null
@@ -1,210 +0,0 @@
-import re
-from jamo import h2j, j2hcj
-import ko_pron
-
-
-# This is a list of Korean classifiers preceded by pure Korean numerals.
-_korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통'
-
-# List of (hangul, hangul divided) pairs:
-_hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [
- ('ㄳ', 'ㄱㅅ'),
- ('ㄵ', 'ㄴㅈ'),
- ('ㄶ', 'ㄴㅎ'),
- ('ㄺ', 'ㄹㄱ'),
- ('ㄻ', 'ㄹㅁ'),
- ('ㄼ', 'ㄹㅂ'),
- ('ㄽ', 'ㄹㅅ'),
- ('ㄾ', 'ㄹㅌ'),
- ('ㄿ', 'ㄹㅍ'),
- ('ㅀ', 'ㄹㅎ'),
- ('ㅄ', 'ㅂㅅ'),
- ('ㅘ', 'ㅗㅏ'),
- ('ㅙ', 'ㅗㅐ'),
- ('ㅚ', 'ㅗㅣ'),
- ('ㅝ', 'ㅜㅓ'),
- ('ㅞ', 'ㅜㅔ'),
- ('ㅟ', 'ㅜㅣ'),
- ('ㅢ', 'ㅡㅣ'),
- ('ㅑ', 'ㅣㅏ'),
- ('ㅒ', 'ㅣㅐ'),
- ('ㅕ', 'ㅣㅓ'),
- ('ㅖ', 'ㅣㅔ'),
- ('ㅛ', 'ㅣㅗ'),
- ('ㅠ', 'ㅣㅜ')
-]]
-
-# List of (Latin alphabet, hangul) pairs:
-_latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
- ('a', '에이'),
- ('b', '비'),
- ('c', '시'),
- ('d', '디'),
- ('e', '이'),
- ('f', '에프'),
- ('g', '지'),
- ('h', '에이치'),
- ('i', '아이'),
- ('j', '제이'),
- ('k', '케이'),
- ('l', '엘'),
- ('m', '엠'),
- ('n', '엔'),
- ('o', '오'),
- ('p', '피'),
- ('q', '큐'),
- ('r', '아르'),
- ('s', '에스'),
- ('t', '티'),
- ('u', '유'),
- ('v', '브이'),
- ('w', '더블유'),
- ('x', '엑스'),
- ('y', '와이'),
- ('z', '제트')
-]]
-
-# List of (ipa, lazy ipa) pairs:
-_ipa_to_lazy_ipa = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
- ('t͡ɕ','ʧ'),
- ('d͡ʑ','ʥ'),
- ('ɲ','n^'),
- ('ɕ','ʃ'),
- ('ʷ','w'),
- ('ɭ','l`'),
- ('ʎ','ɾ'),
- ('ɣ','ŋ'),
- ('ɰ','ɯ'),
- ('ʝ','j'),
- ('ʌ','ə'),
- ('ɡ','g'),
- ('\u031a','#'),
- ('\u0348','='),
- ('\u031e',''),
- ('\u0320',''),
- ('\u0339','')
-]]
-
-
-def latin_to_hangul(text):
- for regex, replacement in _latin_to_hangul:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def divide_hangul(text):
- text = j2hcj(h2j(text))
- for regex, replacement in _hangul_divided:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def hangul_number(num, sino=True):
- '''Reference https://github.com/Kyubyong/g2pK'''
- num = re.sub(',', '', num)
-
- if num == '0':
- return '영'
- if not sino and num == '20':
- return '스무'
-
- digits = '123456789'
- names = '일이삼사오육칠팔구'
- digit2name = {d: n for d, n in zip(digits, names)}
-
- modifiers = '한 두 세 네 다섯 여섯 일곱 여덟 아홉'
- decimals = '열 스물 서른 마흔 쉰 예순 일흔 여든 아흔'
- digit2mod = {d: mod for d, mod in zip(digits, modifiers.split())}
- digit2dec = {d: dec for d, dec in zip(digits, decimals.split())}
-
- spelledout = []
- for i, digit in enumerate(num):
- i = len(num) - i - 1
- if sino:
- if i == 0:
- name = digit2name.get(digit, '')
- elif i == 1:
- name = digit2name.get(digit, '') + '십'
- name = name.replace('일십', '십')
- else:
- if i == 0:
- name = digit2mod.get(digit, '')
- elif i == 1:
- name = digit2dec.get(digit, '')
- if digit == '0':
- if i % 4 == 0:
- last_three = spelledout[-min(3, len(spelledout)):]
- if ''.join(last_three) == '':
- spelledout.append('')
- continue
- else:
- spelledout.append('')
- continue
- if i == 2:
- name = digit2name.get(digit, '') + '백'
- name = name.replace('일백', '백')
- elif i == 3:
- name = digit2name.get(digit, '') + '천'
- name = name.replace('일천', '천')
- elif i == 4:
- name = digit2name.get(digit, '') + '만'
- name = name.replace('일만', '만')
- elif i == 5:
- name = digit2name.get(digit, '') + '십'
- name = name.replace('일십', '십')
- elif i == 6:
- name = digit2name.get(digit, '') + '백'
- name = name.replace('일백', '백')
- elif i == 7:
- name = digit2name.get(digit, '') + '천'
- name = name.replace('일천', '천')
- elif i == 8:
- name = digit2name.get(digit, '') + '억'
- elif i == 9:
- name = digit2name.get(digit, '') + '십'
- elif i == 10:
- name = digit2name.get(digit, '') + '백'
- elif i == 11:
- name = digit2name.get(digit, '') + '천'
- elif i == 12:
- name = digit2name.get(digit, '') + '조'
- elif i == 13:
- name = digit2name.get(digit, '') + '십'
- elif i == 14:
- name = digit2name.get(digit, '') + '백'
- elif i == 15:
- name = digit2name.get(digit, '') + '천'
- spelledout.append(name)
- return ''.join(elem for elem in spelledout)
-
-
-def number_to_hangul(text):
- '''Reference https://github.com/Kyubyong/g2pK'''
- tokens = set(re.findall(r'(\d[\d,]*)([\uac00-\ud71f]+)', text))
- for token in tokens:
- num, classifier = token
- if classifier[:2] in _korean_classifiers or classifier[0] in _korean_classifiers:
- spelledout = hangul_number(num, sino=False)
- else:
- spelledout = hangul_number(num, sino=True)
- text = text.replace(f'{num}{classifier}', f'{spelledout}{classifier}')
- # digit by digit for remaining digits
- digits = '0123456789'
- names = '영일이삼사오육칠팔구'
- for d, n in zip(digits, names):
- text = text.replace(d, n)
- return text
-
-
-def korean_to_lazy_ipa(text):
- text = latin_to_hangul(text)
- text = number_to_hangul(text)
- text=re.sub('[\uac00-\ud7af]+',lambda x:ko_pron.romanise(x.group(0),'ipa').split('] ~ [')[0],text)
- for regex, replacement in _ipa_to_lazy_ipa:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def korean_to_ipa(text):
- text = korean_to_lazy_ipa(text)
- return text.replace('ʧ','tʃ').replace('ʥ','dʑ')
diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r50-d8_512x1024_80k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r50-d8_512x1024_80k_cityscapes.py
deleted file mode 100644
index b0672b687ade8d554b71fdf0bc54de9f024fa30c..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r50-d8_512x1024_80k_cityscapes.py
+++ /dev/null
@@ -1,4 +0,0 @@
-_base_ = [
- '../_base_/models/nonlocal_r50-d8.py', '../_base_/datasets/cityscapes.py',
- '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py'
-]
diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/sync_bn.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/sync_bn.py
deleted file mode 100644
index c9b016fcbe860989c56cd1040034bcfa60e146d2..0000000000000000000000000000000000000000
--- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/sync_bn.py
+++ /dev/null
@@ -1,279 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import torch
-import torch.distributed as dist
-import torch.nn.functional as F
-from torch.autograd import Function
-from torch.autograd.function import once_differentiable
-from torch.nn.modules.module import Module
-from torch.nn.parameter import Parameter
-
-from annotator.uniformer.mmcv.cnn import NORM_LAYERS
-from ..utils import ext_loader
-
-ext_module = ext_loader.load_ext('_ext', [
- 'sync_bn_forward_mean', 'sync_bn_forward_var', 'sync_bn_forward_output',
- 'sync_bn_backward_param', 'sync_bn_backward_data'
-])
-
-
-class SyncBatchNormFunction(Function):
-
- @staticmethod
- def symbolic(g, input, running_mean, running_var, weight, bias, momentum,
- eps, group, group_size, stats_mode):
- return g.op(
- 'mmcv::MMCVSyncBatchNorm',
- input,
- running_mean,
- running_var,
- weight,
- bias,
- momentum_f=momentum,
- eps_f=eps,
- group_i=group,
- group_size_i=group_size,
- stats_mode=stats_mode)
-
- @staticmethod
- def forward(self, input, running_mean, running_var, weight, bias, momentum,
- eps, group, group_size, stats_mode):
- self.momentum = momentum
- self.eps = eps
- self.group = group
- self.group_size = group_size
- self.stats_mode = stats_mode
-
- assert isinstance(
- input, (torch.HalfTensor, torch.FloatTensor,
- torch.cuda.HalfTensor, torch.cuda.FloatTensor)), \
- f'only support Half or Float Tensor, but {input.type()}'
- output = torch.zeros_like(input)
- input3d = input.flatten(start_dim=2)
- output3d = output.view_as(input3d)
- num_channels = input3d.size(1)
-
- # ensure mean/var/norm/std are initialized as zeros
- # ``torch.empty()`` does not guarantee that
- mean = torch.zeros(
- num_channels, dtype=torch.float, device=input3d.device)
- var = torch.zeros(
- num_channels, dtype=torch.float, device=input3d.device)
- norm = torch.zeros_like(
- input3d, dtype=torch.float, device=input3d.device)
- std = torch.zeros(
- num_channels, dtype=torch.float, device=input3d.device)
-
- batch_size = input3d.size(0)
- if batch_size > 0:
- ext_module.sync_bn_forward_mean(input3d, mean)
- batch_flag = torch.ones([1], device=mean.device, dtype=mean.dtype)
- else:
- # skip updating mean and leave it as zeros when the input is empty
- batch_flag = torch.zeros([1], device=mean.device, dtype=mean.dtype)
-
- # synchronize mean and the batch flag
- vec = torch.cat([mean, batch_flag])
- if self.stats_mode == 'N':
- vec *= batch_size
- if self.group_size > 1:
- dist.all_reduce(vec, group=self.group)
- total_batch = vec[-1].detach()
- mean = vec[:num_channels]
-
- if self.stats_mode == 'default':
- mean = mean / self.group_size
- elif self.stats_mode == 'N':
- mean = mean / total_batch.clamp(min=1)
- else:
- raise NotImplementedError
-
- # leave var as zeros when the input is empty
- if batch_size > 0:
- ext_module.sync_bn_forward_var(input3d, mean, var)
-
- if self.stats_mode == 'N':
- var *= batch_size
- if self.group_size > 1:
- dist.all_reduce(var, group=self.group)
-
- if self.stats_mode == 'default':
- var /= self.group_size
- elif self.stats_mode == 'N':
- var /= total_batch.clamp(min=1)
- else:
- raise NotImplementedError
-
- # if the total batch size over all the ranks is zero,
- # we should not update the statistics in the current batch
- update_flag = total_batch.clamp(max=1)
- momentum = update_flag * self.momentum
- ext_module.sync_bn_forward_output(
- input3d,
- mean,
- var,
- weight,
- bias,
- running_mean,
- running_var,
- norm,
- std,
- output3d,
- eps=self.eps,
- momentum=momentum,
- group_size=self.group_size)
- self.save_for_backward(norm, std, weight)
- return output
-
- @staticmethod
- @once_differentiable
- def backward(self, grad_output):
- norm, std, weight = self.saved_tensors
- grad_weight = torch.zeros_like(weight)
- grad_bias = torch.zeros_like(weight)
- grad_input = torch.zeros_like(grad_output)
- grad_output3d = grad_output.flatten(start_dim=2)
- grad_input3d = grad_input.view_as(grad_output3d)
-
- batch_size = grad_input3d.size(0)
- if batch_size > 0:
- ext_module.sync_bn_backward_param(grad_output3d, norm, grad_weight,
- grad_bias)
-
- # all reduce
- if self.group_size > 1:
- dist.all_reduce(grad_weight, group=self.group)
- dist.all_reduce(grad_bias, group=self.group)
- grad_weight /= self.group_size
- grad_bias /= self.group_size
-
- if batch_size > 0:
- ext_module.sync_bn_backward_data(grad_output3d, weight,
- grad_weight, grad_bias, norm, std,
- grad_input3d)
-
- return grad_input, None, None, grad_weight, grad_bias, \
- None, None, None, None, None
-
-
-@NORM_LAYERS.register_module(name='MMSyncBN')
-class SyncBatchNorm(Module):
- """Synchronized Batch Normalization.
-
- Args:
- num_features (int): number of features/chennels in input tensor
- eps (float, optional): a value added to the denominator for numerical
- stability. Defaults to 1e-5.
- momentum (float, optional): the value used for the running_mean and
- running_var computation. Defaults to 0.1.
- affine (bool, optional): whether to use learnable affine parameters.
- Defaults to True.
- track_running_stats (bool, optional): whether to track the running
- mean and variance during training. When set to False, this
- module does not track such statistics, and initializes statistics
- buffers ``running_mean`` and ``running_var`` as ``None``. When
- these buffers are ``None``, this module always uses batch
- statistics in both training and eval modes. Defaults to True.
- group (int, optional): synchronization of stats happen within
- each process group individually. By default it is synchronization
- across the whole world. Defaults to None.
- stats_mode (str, optional): The statistical mode. Available options
- includes ``'default'`` and ``'N'``. Defaults to 'default'.
- When ``stats_mode=='default'``, it computes the overall statistics
- using those from each worker with equal weight, i.e., the
- statistics are synchronized and simply divied by ``group``. This
- mode will produce inaccurate statistics when empty tensors occur.
- When ``stats_mode=='N'``, it compute the overall statistics using
- the total number of batches in each worker ignoring the number of
- group, i.e., the statistics are synchronized and then divied by
- the total batch ``N``. This mode is beneficial when empty tensors
- occur during training, as it average the total mean by the real
- number of batch.
- """
-
- def __init__(self,
- num_features,
- eps=1e-5,
- momentum=0.1,
- affine=True,
- track_running_stats=True,
- group=None,
- stats_mode='default'):
- super(SyncBatchNorm, self).__init__()
- self.num_features = num_features
- self.eps = eps
- self.momentum = momentum
- self.affine = affine
- self.track_running_stats = track_running_stats
- group = dist.group.WORLD if group is None else group
- self.group = group
- self.group_size = dist.get_world_size(group)
- assert stats_mode in ['default', 'N'], \
- f'"stats_mode" only accepts "default" and "N", got "{stats_mode}"'
- self.stats_mode = stats_mode
- if self.affine:
- self.weight = Parameter(torch.Tensor(num_features))
- self.bias = Parameter(torch.Tensor(num_features))
- else:
- self.register_parameter('weight', None)
- self.register_parameter('bias', None)
- if self.track_running_stats:
- self.register_buffer('running_mean', torch.zeros(num_features))
- self.register_buffer('running_var', torch.ones(num_features))
- self.register_buffer('num_batches_tracked',
- torch.tensor(0, dtype=torch.long))
- else:
- self.register_buffer('running_mean', None)
- self.register_buffer('running_var', None)
- self.register_buffer('num_batches_tracked', None)
- self.reset_parameters()
-
- def reset_running_stats(self):
- if self.track_running_stats:
- self.running_mean.zero_()
- self.running_var.fill_(1)
- self.num_batches_tracked.zero_()
-
- def reset_parameters(self):
- self.reset_running_stats()
- if self.affine:
- self.weight.data.uniform_() # pytorch use ones_()
- self.bias.data.zero_()
-
- def forward(self, input):
- if input.dim() < 2:
- raise ValueError(
- f'expected at least 2D input, got {input.dim()}D input')
- if self.momentum is None:
- exponential_average_factor = 0.0
- else:
- exponential_average_factor = self.momentum
-
- if self.training and self.track_running_stats:
- if self.num_batches_tracked is not None:
- self.num_batches_tracked += 1
- if self.momentum is None: # use cumulative moving average
- exponential_average_factor = 1.0 / float(
- self.num_batches_tracked)
- else: # use exponential moving average
- exponential_average_factor = self.momentum
-
- if self.training or not self.track_running_stats:
- return SyncBatchNormFunction.apply(
- input, self.running_mean, self.running_var, self.weight,
- self.bias, exponential_average_factor, self.eps, self.group,
- self.group_size, self.stats_mode)
- else:
- return F.batch_norm(input, self.running_mean, self.running_var,
- self.weight, self.bias, False,
- exponential_average_factor, self.eps)
-
- def __repr__(self):
- s = self.__class__.__name__
- s += f'({self.num_features}, '
- s += f'eps={self.eps}, '
- s += f'momentum={self.momentum}, '
- s += f'affine={self.affine}, '
- s += f'track_running_stats={self.track_running_stats}, '
- s += f'group_size={self.group_size},'
- s += f'stats_mode={self.stats_mode})'
- return s
diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/parallel/_functions.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/parallel/_functions.py
deleted file mode 100644
index 9b5a8a44483ab991411d07122b22a1d027e4be8e..0000000000000000000000000000000000000000
--- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/parallel/_functions.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import torch
-from torch.nn.parallel._functions import _get_stream
-
-
-def scatter(input, devices, streams=None):
- """Scatters tensor across multiple GPUs."""
- if streams is None:
- streams = [None] * len(devices)
-
- if isinstance(input, list):
- chunk_size = (len(input) - 1) // len(devices) + 1
- outputs = [
- scatter(input[i], [devices[i // chunk_size]],
- [streams[i // chunk_size]]) for i in range(len(input))
- ]
- return outputs
- elif isinstance(input, torch.Tensor):
- output = input.contiguous()
- # TODO: copy to a pinned buffer first (if copying from CPU)
- stream = streams[0] if output.numel() > 0 else None
- if devices != [-1]:
- with torch.cuda.device(devices[0]), torch.cuda.stream(stream):
- output = output.cuda(devices[0], non_blocking=True)
- else:
- # unsqueeze the first dimension thus the tensor's shape is the
- # same as those scattered with GPU.
- output = output.unsqueeze(0)
- return output
- else:
- raise Exception(f'Unknown type {type(input)}.')
-
-
-def synchronize_stream(output, devices, streams):
- if isinstance(output, list):
- chunk_size = len(output) // len(devices)
- for i in range(len(devices)):
- for j in range(chunk_size):
- synchronize_stream(output[i * chunk_size + j], [devices[i]],
- [streams[i]])
- elif isinstance(output, torch.Tensor):
- if output.numel() != 0:
- with torch.cuda.device(devices[0]):
- main_stream = torch.cuda.current_stream()
- main_stream.wait_stream(streams[0])
- output.record_stream(main_stream)
- else:
- raise Exception(f'Unknown type {type(output)}.')
-
-
-def get_input_device(input):
- if isinstance(input, list):
- for item in input:
- input_device = get_input_device(item)
- if input_device != -1:
- return input_device
- return -1
- elif isinstance(input, torch.Tensor):
- return input.get_device() if input.is_cuda else -1
- else:
- raise Exception(f'Unknown type {type(input)}.')
-
-
-class Scatter:
-
- @staticmethod
- def forward(target_gpus, input):
- input_device = get_input_device(input)
- streams = None
- if input_device == -1 and target_gpus != [-1]:
- # Perform CPU to GPU copies in a background stream
- streams = [_get_stream(device) for device in target_gpus]
-
- outputs = scatter(input, target_gpus, streams)
- # Synchronize with the copy stream
- if streams is not None:
- synchronize_stream(outputs, target_gpus, streams)
-
- return tuple(outputs)
diff --git a/spaces/ArchitSharma/Digital-Photo-Color-Restoration/src/deoldify/visualize.py b/spaces/ArchitSharma/Digital-Photo-Color-Restoration/src/deoldify/visualize.py
deleted file mode 100644
index 02d2ef0ef0abd463d168401a4057f177af900322..0000000000000000000000000000000000000000
--- a/spaces/ArchitSharma/Digital-Photo-Color-Restoration/src/deoldify/visualize.py
+++ /dev/null
@@ -1,247 +0,0 @@
-import cv2
-import gc
-import requests
-from io import BytesIO
-import base64
-from scipy import misc
-from PIL import Image
-from matplotlib.axes import Axes
-from matplotlib.figure import Figure
-from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
-from typing import Tuple
-
-import torch
-from fastai.core import *
-from fastai.vision import *
-
-from .filters import IFilter, MasterFilter, ColorizerFilter
-from .generators import gen_inference_deep, gen_inference_wide
-
-
-
-# class LoadedModel
-class ModelImageVisualizer:
- def __init__(self, filter: IFilter, results_dir: str = None):
- self.filter = filter
- self.results_dir = None if results_dir is None else Path(results_dir)
- self.results_dir.mkdir(parents=True, exist_ok=True)
-
- def _clean_mem(self):
- torch.cuda.empty_cache()
- # gc.collect()
-
- def _open_pil_image(self, path: Path) -> Image:
- return Image.open(path).convert('RGB')
-
- def _get_image_from_url(self, url: str) -> Image:
- response = requests.get(url, timeout=30, headers={'Accept': '*/*;q=0.8'})
- img = Image.open(BytesIO(response.content)).convert('RGB')
- return img
-
- def plot_transformed_image_from_url(
- self,
- url: str,
- path: str = 'test_images/image.png',
- results_dir:Path = None,
- figsize: Tuple[int, int] = (20, 20),
- render_factor: int = None,
-
- display_render_factor: bool = False,
- compare: bool = False,
- post_process: bool = True,
- watermarked: bool = True,
- ) -> Path:
- img = self._get_image_from_url(url)
- img.save(path)
- return self.plot_transformed_image(
- path=path,
- results_dir=results_dir,
- figsize=figsize,
- render_factor=render_factor,
- display_render_factor=display_render_factor,
- compare=compare,
- post_process = post_process,
- watermarked=watermarked,
- )
-
- def plot_transformed_image(
- self,
- path: str,
- results_dir:Path = None,
- figsize: Tuple[int, int] = (20, 20),
- render_factor: int = None,
- display_render_factor: bool = False,
- compare: bool = False,
- post_process: bool = True,
- watermarked: bool = True,
- ) -> Path:
- path = Path(path)
- if results_dir is None:
- results_dir = Path(self.results_dir)
- result = self.get_transformed_image(
- path, render_factor, post_process=post_process,watermarked=watermarked
- )
- orig = self._open_pil_image(path)
- if compare:
- self._plot_comparison(
- figsize, render_factor, display_render_factor, orig, result
- )
- else:
- self._plot_solo(figsize, render_factor, display_render_factor, result)
-
- orig.close()
- result_path = self._save_result_image(path, result, results_dir=results_dir)
- result.close()
- return result_path
-
- def plot_transformed_pil_image(
- self,
- input_image: Image,
- figsize: Tuple[int, int] = (20, 20),
- render_factor: int = None,
- display_render_factor: bool = False,
- compare: bool = False,
- post_process: bool = True,
- ) -> Image:
-
- result = self.get_transformed_pil_image(
- input_image, render_factor, post_process=post_process
- )
-
- if compare:
- self._plot_comparison(
- figsize, render_factor, display_render_factor, input_image, result
- )
- else:
- self._plot_solo(figsize, render_factor, display_render_factor, result)
-
- return result
-
- def _plot_comparison(
- self,
- figsize: Tuple[int, int],
- render_factor: int,
- display_render_factor: bool,
- orig: Image,
- result: Image,
- ):
- fig, axes = plt.subplots(1, 2, figsize=figsize)
- self._plot_image(
- orig,
- axes=axes[0],
- figsize=figsize,
- render_factor=render_factor,
- display_render_factor=False,
- )
- self._plot_image(
- result,
- axes=axes[1],
- figsize=figsize,
- render_factor=render_factor,
- display_render_factor=display_render_factor,
- )
-
- def _plot_solo(
- self,
- figsize: Tuple[int, int],
- render_factor: int,
- display_render_factor: bool,
- result: Image,
- ):
- fig, axes = plt.subplots(1, 1, figsize=figsize)
- self._plot_image(
- result,
- axes=axes,
- figsize=figsize,
- render_factor=render_factor,
- display_render_factor=display_render_factor,
- )
-
- def _save_result_image(self, source_path: Path, image: Image, results_dir = None) -> Path:
- if results_dir is None:
- results_dir = Path(self.results_dir)
- result_path = results_dir / source_path.name
- image.save(result_path)
- return result_path
-
- def get_transformed_image(
- self, path: Path, render_factor: int = None, post_process: bool = True,
- watermarked: bool = True,
- ) -> Image:
- self._clean_mem()
- orig_image = self._open_pil_image(path)
- filtered_image = self.filter.filter(
- orig_image, orig_image, render_factor=render_factor,post_process=post_process
- )
-
- return filtered_image
-
- def get_transformed_pil_image(
- self, input_image: Image, render_factor: int = None, post_process: bool = True,
- ) -> Image:
- self._clean_mem()
- filtered_image = self.filter.filter(
- input_image, input_image, render_factor=render_factor,post_process=post_process
- )
-
- return filtered_image
-
- def _plot_image(
- self,
- image: Image,
- render_factor: int,
- axes: Axes = None,
- figsize=(20, 20),
- display_render_factor = False,
- ):
- if axes is None:
- _, axes = plt.subplots(figsize=figsize)
- axes.imshow(np.asarray(image) / 255)
- axes.axis('off')
- if render_factor is not None and display_render_factor:
- plt.text(
- 10,
- 10,
- 'render_factor: ' + str(render_factor),
- color='white',
- backgroundcolor='black',
- )
-
- def _get_num_rows_columns(self, num_images: int, max_columns: int) -> Tuple[int, int]:
- columns = min(num_images, max_columns)
- rows = num_images // columns
- rows = rows if rows * columns == num_images else rows + 1
- return rows, columns
-
-
-def get_image_colorizer(
- root_folder: Path = Path('./'), render_factor: int = 35, artistic: bool = True
-) -> ModelImageVisualizer:
- if artistic:
- return get_artistic_image_colorizer(root_folder=root_folder, render_factor=render_factor)
- else:
- return get_stable_image_colorizer(root_folder=root_folder, render_factor=render_factor)
-
-
-def get_stable_image_colorizer(
- root_folder: Path = Path('./'),
- weights_name: str = 'ColorizeStable_gen',
- results_dir='output',
- render_factor: int = 35
-) -> ModelImageVisualizer:
- learn = gen_inference_wide(root_folder=root_folder, weights_name=weights_name)
- filtr = MasterFilter([ColorizerFilter(learn=learn)], render_factor=render_factor)
- vis = ModelImageVisualizer(filtr, results_dir=results_dir)
- return vis
-
-
-def get_artistic_image_colorizer(
- root_folder: Path = Path('./'),
- weights_name: str = 'ColorizeArtistic_gen',
- results_dir='output',
- render_factor: int = 35
-) -> ModelImageVisualizer:
- learn = gen_inference_deep(root_folder=root_folder, weights_name=weights_name)
- filtr = MasterFilter([ColorizerFilter(learn=learn)], render_factor=render_factor)
- vis = ModelImageVisualizer(filtr, results_dir=results_dir)
- return vis
\ No newline at end of file
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/distributions/__init__.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/distributions/__init__.py
deleted file mode 100644
index 9a89a838b9a5cb264e9ae9d269fbedca6e2d6333..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/distributions/__init__.py
+++ /dev/null
@@ -1,21 +0,0 @@
-from pip._internal.distributions.base import AbstractDistribution
-from pip._internal.distributions.sdist import SourceDistribution
-from pip._internal.distributions.wheel import WheelDistribution
-from pip._internal.req.req_install import InstallRequirement
-
-
-def make_distribution_for_install_requirement(
- install_req: InstallRequirement,
-) -> AbstractDistribution:
- """Returns a Distribution for the given InstallRequirement"""
- # Editable requirements will always be source distributions. They use the
- # legacy logic until we create a modern standard for them.
- if install_req.editable:
- return SourceDistribution(install_req)
-
- # If it's a wheel, it's a WheelDistribution
- if install_req.is_wheel:
- return WheelDistribution(install_req)
-
- # Otherwise, a SourceDistribution
- return SourceDistribution(install_req)
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pyproject_hooks/_in_process/__init__.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pyproject_hooks/_in_process/__init__.py
deleted file mode 100644
index 917fa065b3c7feccdef5bc666a5109c855217260..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pyproject_hooks/_in_process/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-"""This is a subpackage because the directory is on sys.path for _in_process.py
-
-The subpackage should stay as empty as possible to avoid shadowing modules that
-the backend might import.
-"""
-
-import importlib.resources as resources
-
-try:
- resources.files
-except AttributeError:
- # Python 3.8 compatibility
- def _in_proc_script_path():
- return resources.path(__package__, '_in_process.py')
-else:
- def _in_proc_script_path():
- return resources.as_file(
- resources.files(__package__).joinpath('_in_process.py'))
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/status.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/status.py
deleted file mode 100644
index 09eff405ec194ee2884f203cb48c5df54ff0b9c7..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/status.py
+++ /dev/null
@@ -1,132 +0,0 @@
-from types import TracebackType
-from typing import Optional, Type
-
-from .console import Console, RenderableType
-from .jupyter import JupyterMixin
-from .live import Live
-from .spinner import Spinner
-from .style import StyleType
-
-
-class Status(JupyterMixin):
- """Displays a status indicator with a 'spinner' animation.
-
- Args:
- status (RenderableType): A status renderable (str or Text typically).
- console (Console, optional): Console instance to use, or None for global console. Defaults to None.
- spinner (str, optional): Name of spinner animation (see python -m rich.spinner). Defaults to "dots".
- spinner_style (StyleType, optional): Style of spinner. Defaults to "status.spinner".
- speed (float, optional): Speed factor for spinner animation. Defaults to 1.0.
- refresh_per_second (float, optional): Number of refreshes per second. Defaults to 12.5.
- """
-
- def __init__(
- self,
- status: RenderableType,
- *,
- console: Optional[Console] = None,
- spinner: str = "dots",
- spinner_style: StyleType = "status.spinner",
- speed: float = 1.0,
- refresh_per_second: float = 12.5,
- ):
- self.status = status
- self.spinner_style = spinner_style
- self.speed = speed
- self._spinner = Spinner(spinner, text=status, style=spinner_style, speed=speed)
- self._live = Live(
- self.renderable,
- console=console,
- refresh_per_second=refresh_per_second,
- transient=True,
- )
-
- @property
- def renderable(self) -> Spinner:
- return self._spinner
-
- @property
- def console(self) -> "Console":
- """Get the Console used by the Status objects."""
- return self._live.console
-
- def update(
- self,
- status: Optional[RenderableType] = None,
- *,
- spinner: Optional[str] = None,
- spinner_style: Optional[StyleType] = None,
- speed: Optional[float] = None,
- ) -> None:
- """Update status.
-
- Args:
- status (Optional[RenderableType], optional): New status renderable or None for no change. Defaults to None.
- spinner (Optional[str], optional): New spinner or None for no change. Defaults to None.
- spinner_style (Optional[StyleType], optional): New spinner style or None for no change. Defaults to None.
- speed (Optional[float], optional): Speed factor for spinner animation or None for no change. Defaults to None.
- """
- if status is not None:
- self.status = status
- if spinner_style is not None:
- self.spinner_style = spinner_style
- if speed is not None:
- self.speed = speed
- if spinner is not None:
- self._spinner = Spinner(
- spinner, text=self.status, style=self.spinner_style, speed=self.speed
- )
- self._live.update(self.renderable, refresh=True)
- else:
- self._spinner.update(
- text=self.status, style=self.spinner_style, speed=self.speed
- )
-
- def start(self) -> None:
- """Start the status animation."""
- self._live.start()
-
- def stop(self) -> None:
- """Stop the spinner animation."""
- self._live.stop()
-
- def __rich__(self) -> RenderableType:
- return self.renderable
-
- def __enter__(self) -> "Status":
- self.start()
- return self
-
- def __exit__(
- self,
- exc_type: Optional[Type[BaseException]],
- exc_val: Optional[BaseException],
- exc_tb: Optional[TracebackType],
- ) -> None:
- self.stop()
-
-
-if __name__ == "__main__": # pragma: no cover
-
- from time import sleep
-
- from .console import Console
-
- console = Console()
- with console.status("[magenta]Covid detector booting up") as status:
- sleep(3)
- console.log("Importing advanced AI")
- sleep(3)
- console.log("Advanced Covid AI Ready")
- sleep(3)
- status.update(status="[bold blue] Scanning for Covid", spinner="earth")
- sleep(3)
- console.log("Found 10,000,000,000 copies of Covid32.exe")
- sleep(3)
- status.update(
- status="[bold red]Moving Covid32.exe to Trash",
- spinner="bouncingBall",
- spinner_style="yellow",
- )
- sleep(5)
- console.print("[bold green]Covid deleted successfully")
diff --git a/spaces/Audio-AGI/AudioSep/models/CLAP/training/params.py b/spaces/Audio-AGI/AudioSep/models/CLAP/training/params.py
deleted file mode 100644
index 0cc1a0e2d982e900988cf5a4b24b2e59b093537b..0000000000000000000000000000000000000000
--- a/spaces/Audio-AGI/AudioSep/models/CLAP/training/params.py
+++ /dev/null
@@ -1,563 +0,0 @@
-import argparse
-
-
-def get_default_params(model_name):
- # Params from paper (https://arxiv.org/pdf/2103.00020.pdf)
- model_name = model_name.lower()
- if "vit" in model_name:
- return {"lr": 5.0e-4, "beta1": 0.9, "beta2": 0.98, "eps": 1.0e-6}
- else:
- return {"lr": 5.0e-4, "beta1": 0.9, "beta2": 0.999, "eps": 1.0e-8}
-
-
-def parse_args():
- parser = argparse.ArgumentParser()
- parser.add_argument(
- "--train-data",
- type=str,
- default=None,
- help="Path to h5 filewith training data",
- )
- parser.add_argument(
- "--val-data",
- type=str,
- default=None,
- help="Path to h5 file with validation data",
- )
- parser.add_argument(
- "--freeze-text",
- default=False,
- action="store_true",
- help="if you need to freeze the text encoder, make this True",
- )
- parser.add_argument(
- "--freeze-text-after",
- type=int,
- default=-1,
- help="if you need to freeze the text encoder after (include) epoch x, set this param to x. Set -1 to disable it",
- )
- parser.add_argument(
- "--train-ipc",
- type=str,
- default=None,
- help="Path to npy file of the number of instance per class in training data",
- )
- parser.add_argument(
- "--val-ipc",
- type=str,
- default=None,
- help="Path to npy file of the number of instance per class in validation data",
- )
- parser.add_argument(
- "--train-num-samples",
- type=int,
- default=None,
- help="Number of samples in dataset. Required for webdataset if not available in info file.",
- )
- parser.add_argument(
- "--val-num-samples",
- type=int,
- default=None,
- help="Number of samples in dataset. Useful for webdataset if not available in info file.",
- )
- parser.add_argument(
- "--dataset-type",
- choices=["webdataset", "csv", "auto", "toy"],
- default="auto",
- help="Which type of dataset to process.",
- )
- parser.add_argument(
- "--csv-separator",
- type=str,
- default="\t",
- help="For csv-like datasets, which separator to use.",
- )
- parser.add_argument(
- "--csv-img-key",
- type=str,
- default="filepath",
- help="For csv-like datasets, the name of the key for the image paths.",
- )
- parser.add_argument(
- "--csv-caption-key",
- type=str,
- default="title",
- help="For csv-like datasets, the name of the key for the captions.",
- )
- parser.add_argument(
- "--imagenet-val",
- type=str,
- default=None,
- help="Path to imagenet val set for conducting zero shot evaluation.",
- )
- parser.add_argument(
- "--imagenet-v2",
- type=str,
- default=None,
- help="Path to imagenet v2 for conducting zero shot evaluation.",
- )
- parser.add_argument(
- "--datasetnames",
- nargs="+",
- default=None,
- help="If loading webdataset, spedify the dataset names to load. Can be some of these: Clotho, audioset, audiocaps, BBCSoundEffects",
- )
- parser.add_argument(
- "--full-train-dataset",
- nargs="+",
- default=None,
- help="Which dataset will be trained with all the subsets. (train+test)",
- )
- parser.add_argument(
- "--exclude-eval-dataset",
- nargs="+",
- default=None,
- help="Which dataset will be excluded with evaluation",
- )
- parser.add_argument(
- "--datasetinfos",
- nargs="+",
- default=None,
- help="If loading webdataset, spedify the dataset types to load. Can be some of these: train, test, valid, unbalanced_train, balanced_train, eval",
- )
- parser.add_argument(
- "--dataset-proportion",
- type=float,
- default=1.0,
- help="How much proportion of dataset we want to train.",
- )
- parser.add_argument(
- "--remotedata",
- default=False,
- action="store_true",
- help="if the dataset is remote, set this flag",
- )
- parser.add_argument(
- "--class-label-path",
- type=str,
- default=None,
- help="The path of the class label pickle or csv.",
- )
- parser.add_argument(
- "--datasetpath",
- type=str,
- default="/mnt/audio_clip/webdataset_tar",
- help="The path to the dataset",
- )
- parser.add_argument(
- "--logs",
- type=str,
- default="./logs/",
- help="Where to store tensorboard logs. Use None to avoid storing logs.",
- )
- parser.add_argument(
- "--log-local",
- action="store_true",
- default=False,
- help="log files on local master, otherwise global master only.",
- )
- parser.add_argument(
- "--name",
- type=str,
- default=None,
- help="Optional identifier for the experiment when storing logs. Otherwise use current time.",
- )
- parser.add_argument(
- "--workers", type=int, default=1, help="Number of workers per GPU."
- )
- parser.add_argument(
- "--batch-size", type=int, default=64, help="Batch size per GPU."
- )
- parser.add_argument(
- "--epochs", type=int, default=32, help="Number of epochs to train for."
- )
- parser.add_argument("--lr", type=float, default=None, help="Learning rate.")
- parser.add_argument("--beta1", type=float, default=None, help="Adam beta 1.")
- parser.add_argument("--beta2", type=float, default=None, help="Adam beta 2.")
- parser.add_argument("--eps", type=float, default=None, help="Adam epsilon.")
- parser.add_argument("--momentum", type=float, default=None, help="SGD epsilon.")
- parser.add_argument("--wd", type=float, default=0.2, help="Weight decay.")
-
- parser.add_argument(
- "--split-opt",
- action="store_true",
- default=False,
- help="Use this flag to skip the learning rate decay.",
- )
- parser.add_argument(
- "--lr-pretrained", type=float, default=None, help="Learning rate for text."
- )
- parser.add_argument(
- "--beta1-pretrained", type=float, default=None, help="Adam beta 1 for text."
- )
- parser.add_argument(
- "--beta2-pretrained", type=float, default=None, help="Adam beta 2 for text."
- )
- parser.add_argument(
- "--eps-pretrained", type=float, default=None, help="Adam epsilon for text."
- )
- parser.add_argument(
- "--wd-pretrained", type=float, default=0.2, help="Weight decay for text."
- )
- parser.add_argument(
- "--momentum-pretrained", type=float, default=0.9, help="Momentum for text."
- )
- parser.add_argument(
- "--lr-new", type=float, default=None, help="Learning rate for audio."
- )
- parser.add_argument(
- "--beta1-new", type=float, default=None, help="Adam beta 1 for audio."
- )
- parser.add_argument(
- "--beta2-new", type=float, default=None, help="Adam beta 2 for audio."
- )
- parser.add_argument(
- "--eps-new", type=float, default=None, help="Adam epsilon for audio."
- )
- parser.add_argument(
- "--wd-new", type=float, default=0.2, help="Weight decay for audio."
- )
- parser.add_argument(
- "--momentum-new", type=float, default=0.9, help="Momentum for audio."
- )
- parser.add_argument(
- "--warmup", type=int, default=10000, help="Number of steps to warmup for."
- )
- parser.add_argument(
- "--use-bn-sync",
- default=False,
- action="store_true",
- help="Whether to use batch norm sync.",
- )
- parser.add_argument(
- "--skip-scheduler",
- action="store_true",
- default=False,
- help="Use this flag to skip the learning rate decay.",
- )
- parser.add_argument(
- "--save-frequency", type=int, default=1, help="How often to save checkpoints."
- )
- parser.add_argument(
- "--save-top-performance",
- type=int,
- default=0,
- help="Save the top x performance weights if the value >0",
- )
- parser.add_argument(
- "--save-most-recent",
- action="store_true",
- default=False,
- help="Always save the most recent model trained to epoch_latest.pt.",
- )
- parser.add_argument(
- "--zeroshot-frequency", type=int, default=2, help="How often to run zero shot."
- )
- parser.add_argument(
- "--val-frequency",
- type=int,
- default=1,
- help="How often to run evaluation with val data.",
- )
- parser.add_argument(
- "--resume",
- default=None,
- type=str,
- help="path to latest checkpoint (default: none)",
- )
- parser.add_argument(
- "--precision",
- choices=["amp", "fp16", "fp32"],
- default="amp",
- help="Floating point precision.",
- )
- parser.add_argument(
- "--amodel",
- type=str,
- default="RN50",
- help="Name of the audio backbone to use.",
- )
- parser.add_argument(
- "--tmodel",
- type=str,
- default="transformer",
- help="Name of the text backbone to use. Can be [transformer, bert, roberta, bart]",
- )
- parser.add_argument(
- "--pretrained-audio",
- default="",
- type=str,
- help="Use a pretrained audio model weights for the audio encoder of CLAP",
- )
- parser.add_argument(
- "--pretrained-text",
- default="",
- type=str,
- help="Use a pretrained text model weights for the text encoder of CLAP",
- )
- parser.add_argument(
- "--pretrained",
- default="",
- type=str,
- help="Use a pretrained CLIP model weights with the specified tag or file path.",
- )
- parser.add_argument(
- "--pretrained-image",
- default=False,
- action="store_true",
- help="Load imagenet pretrained weights for image tower backbone if available.",
- )
- parser.add_argument(
- "--lock-image",
- default=False,
- action="store_true",
- help="Lock full image tower by disabling gradients.",
- )
- parser.add_argument(
- "--lock-image-unlocked-groups",
- type=int,
- default=0,
- help="Leave last n image tower layer groups unlocked.",
- )
- parser.add_argument(
- "--lock-image-freeze-bn-stats",
- default=False,
- action="store_true",
- help="Freeze BatchNorm running stats in image tower for any locked layers.",
- )
- parser.add_argument(
- "--local-loss",
- default=False,
- action="store_true",
- help="calculate loss w/ local features @ global (instead of realizing full global @ global matrix)",
- )
- parser.add_argument(
- "--gather-with-grad",
- default=False,
- action="store_true",
- help="enable full distributed gradient for feature gather",
- )
- parser.add_argument(
- "--force-quick-gelu",
- default=False,
- action="store_true",
- help="Force use of QuickGELU activation for non-OpenAI transformer models.",
- )
- parser.add_argument(
- "--torchscript",
- default=False,
- action="store_true",
- help="torch.jit.script the model, also uses jit version of OpenAI models if pretrained=='openai'",
- )
- parser.add_argument(
- "--trace",
- default=False,
- action="store_true",
- help="torch.jit.trace the model for inference / eval only",
- )
- # arguments for distributed training
- parser.add_argument(
- "--dist-url",
- default="env://",
- type=str,
- help="url used to set up distributed training",
- )
- parser.add_argument(
- "--dist-backend", default="nccl", type=str, help="distributed backend"
- )
- parser.add_argument(
- "--report-to",
- default="",
- type=str,
- help="Options are ['wandb', 'tensorboard', 'wandb,tensorboard']",
- )
- parser.add_argument(
- "--wandb-notes", default="", type=str, help="Notes if logging with wandb"
- )
- parser.add_argument(
- "--C", type=float, default=3.16, help="inverse regularizer for logistic reg."
- )
- parser.add_argument(
- "--debug",
- default=False,
- action="store_true",
- help="If true, more information is logged.",
- )
- parser.add_argument(
- "--copy-codebase",
- default=False,
- action="store_true",
- help="If true, we copy the entire base on the log diretory, and execute from there.",
- )
- parser.add_argument(
- "--horovod",
- default=False,
- action="store_true",
- help="Use horovod for distributed training.",
- )
- parser.add_argument(
- "--ddp-static-graph",
- default=False,
- action="store_true",
- help="Enable static graph optimization for DDP in PyTorch >= 1.11.",
- )
- parser.add_argument(
- "--no-set-device-rank",
- default=False,
- action="store_true",
- help="Don't set device index from local rank (when CUDA_VISIBLE_DEVICES restricted to one per proc).",
- )
- parser.add_argument("--seed", type=int, default=4242, help="Default random seed.")
-
- parser.add_argument(
- "--top-k-checkpoint-select-dataset",
- type=str,
- default="all",
- help="The dataset of selecting top-k checkpoint.",
- )
-
- # @R10, @R@5, @R1, mAP@10
- parser.add_argument(
- "--top-k-checkpoint-select-metric",
- type=str,
- default="_R@10",
- help="The metric for selecting top-k checkpoint.",
- )
- parser.add_argument(
- "--openai-model-cache-dir",
- type=str,
- default="~/.cache/clip",
- help="Directory to download OpenAI models.",
- )
- parser.add_argument(
- "--optimizer",
- type=str,
- default="adamw",
- help="can be AdamW or SGD",
- )
- parser.add_argument(
- "--parallel-eval",
- default=False,
- action="store_true",
- help="Eval in parallel (multi-GPU, multi-node).",
- )
-
- parser.add_argument(
- "--no-eval",
- default=False,
- action="store_true",
- help="Training without evaluation.",
- )
-
- parser.add_argument(
- "--lp-mlp",
- default=False,
- action="store_true",
- help="Linear Probe using MLP layer or not.",
- )
-
- parser.add_argument(
- "--lp-freeze",
- default=False,
- action="store_true",
- help="Linear Probe using Freeze CLAP or not",
- )
-
- parser.add_argument(
- "--lp-act",
- default="None",
- type=str,
- help="Options are ['relu','elu','prelu','softmax','sigmoid']",
- )
-
- parser.add_argument(
- "--lp-loss", type=str, default="bce", help="Loss func of Linear Probe."
- )
-
- parser.add_argument(
- "--lp-metrics",
- type=str,
- default="map,mauc,acc",
- help="Metrics of Linear Probe.",
- )
-
- parser.add_argument(
- "--lp-lr", type=float, default=1e-4, help="learning rate of linear probe"
- )
- parser.add_argument(
- "--kappa",
- type=float,
- default=0,
- help="the kappa in the weighted contrastive loss, default is to turn off the weighted contrastive loss",
- )
-
- parser.add_argument(
- "--data-filling",
- type=str,
- default="pad",
- help="type of data filling when the audio length is shorter than the max length."
- "Can be one of the following: repeat, repeatpad, pad",
- )
- parser.add_argument(
- "--data-truncating",
- type=str,
- default="rand_trunc",
- help="type of data truncation when the audio length is longer than the max length."
- "Can be one of the following: rand_trunc, fusion",
- )
-
- parser.add_argument(
- "--clap-mlploss",
- default=False,
- action="store_true",
- help="Using MLP loss for CLAP model or not",
- )
-
- parser.add_argument(
- "--wandb-id",
- type=str,
- default=None,
- help="the id of wandb experiment to restore.",
- )
-
- parser.add_argument(
- "--sleep", type=float, default=0, help="sleep n seconds before start training"
- )
-
- # variable length processing
- parser.add_argument(
- "--enable-fusion",
- default=False,
- action="store_true",
- help="Enable feature funsion for variable-length data",
- )
-
- parser.add_argument(
- "--fusion-type",
- type=str,
- default="None",
- help="Type is among ['channel_map', 'daf_1d','aff_1d','iaff_1d','daf_2d','aff_2d','iaff_2d']",
- )
-
- parser.add_argument(
- "--mixup",
- default=False,
- action="store_true",
- help="Enable mixup in finetuning training.",
- )
- parser.add_argument(
- "--text-augment-selection",
- type=str,
- default=None,
- help="For selecting levels of augmented text. Type is among ['all', 'augment_only', 'none']",
- )
-
- args = parser.parse_args()
-
- # If some params are not passed, we use the default values based on model name.
- default_params = get_default_params(args.amodel)
- for name, val in default_params.items():
- if getattr(args, name) is None:
- setattr(args, name, val)
-
- return args
diff --git a/spaces/AutoGeneralAI/ChatGPT/README.md b/spaces/AutoGeneralAI/ChatGPT/README.md
deleted file mode 100644
index 7a0ac55518dad931d297f92e036995fa7c574e11..0000000000000000000000000000000000000000
--- a/spaces/AutoGeneralAI/ChatGPT/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: ChatGPT
-emoji: 🐢
-colorFrom: yellow
-colorTo: blue
-sdk: gradio
-sdk_version: 3.27.0
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/modeling/test_roi_pooler.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/modeling/test_roi_pooler.py
deleted file mode 100644
index b93b7ae6ca633fa8686c86f82e7d55ac5ca62bc7..0000000000000000000000000000000000000000
--- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/modeling/test_roi_pooler.py
+++ /dev/null
@@ -1,165 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import logging
-import unittest
-import torch
-
-from detectron2.modeling.poolers import ROIPooler
-from detectron2.structures import Boxes, RotatedBoxes
-from detectron2.utils.testing import random_boxes
-
-logger = logging.getLogger(__name__)
-
-
-class TestROIPooler(unittest.TestCase):
- def _test_roialignv2_roialignrotated_match(self, device):
- pooler_resolution = 14
- canonical_level = 4
- canonical_scale_factor = 2 ** canonical_level
- pooler_scales = (1.0 / canonical_scale_factor,)
- sampling_ratio = 0
-
- N, C, H, W = 2, 4, 10, 8
- N_rois = 10
- std = 11
- mean = 0
- feature = (torch.rand(N, C, H, W) - 0.5) * 2 * std + mean
-
- features = [feature.to(device)]
-
- rois = []
- rois_rotated = []
- for _ in range(N):
- boxes = random_boxes(N_rois, W * canonical_scale_factor)
- rotated_boxes = torch.zeros(N_rois, 5)
- rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0
- rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0
- rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0]
- rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1]
- rois.append(Boxes(boxes).to(device))
- rois_rotated.append(RotatedBoxes(rotated_boxes).to(device))
-
- roialignv2_pooler = ROIPooler(
- output_size=pooler_resolution,
- scales=pooler_scales,
- sampling_ratio=sampling_ratio,
- pooler_type="ROIAlignV2",
- )
-
- roialignv2_out = roialignv2_pooler(features, rois)
-
- roialignrotated_pooler = ROIPooler(
- output_size=pooler_resolution,
- scales=pooler_scales,
- sampling_ratio=sampling_ratio,
- pooler_type="ROIAlignRotated",
- )
-
- roialignrotated_out = roialignrotated_pooler(features, rois_rotated)
-
- self.assertTrue(torch.allclose(roialignv2_out, roialignrotated_out, atol=1e-4))
-
- def test_roialignv2_roialignrotated_match_cpu(self):
- self._test_roialignv2_roialignrotated_match(device="cpu")
-
- @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
- def test_roialignv2_roialignrotated_match_cuda(self):
- self._test_roialignv2_roialignrotated_match(device="cuda")
-
- def _test_scriptability(self, device):
- pooler_resolution = 14
- canonical_level = 4
- canonical_scale_factor = 2 ** canonical_level
- pooler_scales = (1.0 / canonical_scale_factor,)
- sampling_ratio = 0
-
- N, C, H, W = 2, 4, 10, 8
- N_rois = 10
- std = 11
- mean = 0
- feature = (torch.rand(N, C, H, W) - 0.5) * 2 * std + mean
-
- features = [feature.to(device)]
-
- rois = []
- for _ in range(N):
- boxes = random_boxes(N_rois, W * canonical_scale_factor)
-
- rois.append(Boxes(boxes).to(device))
-
- roialignv2_pooler = ROIPooler(
- output_size=pooler_resolution,
- scales=pooler_scales,
- sampling_ratio=sampling_ratio,
- pooler_type="ROIAlignV2",
- )
-
- roialignv2_out = roialignv2_pooler(features, rois)
- scripted_roialignv2_out = torch.jit.script(roialignv2_pooler)(features, rois)
- self.assertTrue(torch.equal(roialignv2_out, scripted_roialignv2_out))
-
- def test_scriptability_cpu(self):
- self._test_scriptability(device="cpu")
-
- @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
- def test_scriptability_gpu(self):
- self._test_scriptability(device="cuda")
-
- def test_no_images(self):
- N, C, H, W = 0, 32, 32, 32
- feature = torch.rand(N, C, H, W) - 0.5
- features = [feature]
- pooler = ROIPooler(
- output_size=14, scales=(1.0,), sampling_ratio=0.0, pooler_type="ROIAlignV2"
- )
- output = pooler.forward(features, [])
- self.assertEqual(output.shape, (0, C, 14, 14))
-
- def test_roi_pooler_tracing(self):
- class Model(torch.nn.Module):
- def __init__(self, roi):
- super(Model, self).__init__()
- self.roi = roi
-
- def forward(self, x, boxes):
- return self.roi(x, [Boxes(boxes)])
-
- pooler_resolution = 14
- canonical_level = 4
- canonical_scale_factor = 2 ** canonical_level
- pooler_scales = (1.0 / canonical_scale_factor, 0.5 / canonical_scale_factor)
- sampling_ratio = 0
-
- N, C, H, W = 1, 4, 10, 8
- N_rois = 10
- std = 11
- mean = 0
- feature = (torch.rand(N, C, H, W) - 0.5) * 2 * std + mean
- feature = [feature, feature]
-
- rois = random_boxes(N_rois, W * canonical_scale_factor)
- # Add one larger box so that this level has only one box.
- # This may trigger the bug https://github.com/pytorch/pytorch/issues/49852
- # that we shall workaround.
- rois = torch.cat([rois, torch.tensor([[0, 0, 448, 448]])])
-
- model = Model(
- ROIPooler(
- output_size=pooler_resolution,
- scales=pooler_scales,
- sampling_ratio=sampling_ratio,
- pooler_type="ROIAlign",
- )
- )
-
- with torch.no_grad():
- func = torch.jit.trace(model, (feature, rois))
- o = func(feature, rois)
- self.assertEqual(o.shape, (11, 4, 14, 14))
- o = func(feature, rois[:5])
- self.assertEqual(o.shape, (5, 4, 14, 14))
- o = func(feature, random_boxes(20, W * canonical_scale_factor))
- self.assertEqual(o.shape, (20, 4, 14, 14))
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/spaces/Ayush113/cricket_matchups/README.md b/spaces/Ayush113/cricket_matchups/README.md
deleted file mode 100644
index 1e6d8cd43cb685d4515400c39bdc14f298dac516..0000000000000000000000000000000000000000
--- a/spaces/Ayush113/cricket_matchups/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Cricket Matchups
-emoji: 🏃
-colorFrom: purple
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.46.1
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Bart92/RVC_HF/Fixes/local_fixes.py b/spaces/Bart92/RVC_HF/Fixes/local_fixes.py
deleted file mode 100644
index 8a418076eee6f65fe06eb0f607061796b839c1ee..0000000000000000000000000000000000000000
--- a/spaces/Bart92/RVC_HF/Fixes/local_fixes.py
+++ /dev/null
@@ -1,136 +0,0 @@
-import os
-import sys
-import time
-import shutil
-import requests
-import zipfile
-
-def insert_new_line(file_name, line_to_find, text_to_insert):
- lines = []
- with open(file_name, 'r', encoding='utf-8') as read_obj:
- lines = read_obj.readlines()
- already_exists = False
- with open(file_name + '.tmp', 'w', encoding='utf-8') as write_obj:
- for i in range(len(lines)):
- write_obj.write(lines[i])
- if lines[i].strip() == line_to_find:
- # If next line exists and starts with sys.path.append, skip
- if i+1 < len(lines) and lines[i+1].strip().startswith("sys.path.append"):
- print('It was already fixed! Skip adding a line...')
- already_exists = True
- break
- else:
- write_obj.write(text_to_insert + '\n')
- # If no existing sys.path.append line was found, replace the original file
- if not already_exists:
- os.replace(file_name + '.tmp', file_name)
- return True
- else:
- # If existing line was found, delete temporary file
- os.remove(file_name + '.tmp')
- return False
-
-def replace_in_file(file_name, old_text, new_text):
- with open(file_name, 'r', encoding='utf-8') as file:
- file_contents = file.read()
-
- if old_text in file_contents:
- file_contents = file_contents.replace(old_text, new_text)
- with open(file_name, 'w', encoding='utf-8') as file:
- file.write(file_contents)
- return True
-
- return False
-
-if __name__ == "__main__":
- current_path = os.getcwd()
- file_name = os.path.join(current_path, "infer", "modules", "train", "extract", "extract_f0_print.py")
- line_to_find = 'import numpy as np, logging'
- text_to_insert = "sys.path.append(r'" + current_path + "')"
-
-
- success_1 = insert_new_line(file_name, line_to_find, text_to_insert)
- if success_1:
- print('The first operation was successful!')
- else:
- print('He skipped the first operation because it was already fixed!')
-
- file_name = 'infer-web.py'
- old_text = 'with gr.Blocks(theme=gr.themes.Soft()) as app:'
- new_text = 'with gr.Blocks() as app:'
-
- success_2 = replace_in_file(file_name, old_text, new_text)
- if success_2:
- print('The second operation was successful!')
- else:
- print('The second operation was omitted because it was already fixed!')
-
- print('Local corrections successful! You should now be able to infer and train locally in Applio RVC Fork.')
-
- time.sleep(5)
-
-def find_torchcrepe_directory(directory):
- """
- Recursively searches for the topmost folder named 'torchcrepe' within a directory.
- Returns the path of the directory found or None if none is found.
- """
- for root, dirs, files in os.walk(directory):
- if 'torchcrepe' in dirs:
- return os.path.join(root, 'torchcrepe')
- return None
-
-def download_and_extract_torchcrepe():
- url = 'https://github.com/maxrmorrison/torchcrepe/archive/refs/heads/master.zip'
- temp_dir = 'temp_torchcrepe'
- destination_dir = os.getcwd()
-
- try:
- torchcrepe_dir_path = os.path.join(destination_dir, 'torchcrepe')
-
- if os.path.exists(torchcrepe_dir_path):
- print("Skipping the torchcrepe download. The folder already exists.")
- return
-
- # Download the file
- print("Starting torchcrepe download...")
- response = requests.get(url)
-
- # Raise an error if the GET request was unsuccessful
- response.raise_for_status()
- print("Download completed.")
-
- # Save the downloaded file
- zip_file_path = os.path.join(temp_dir, 'master.zip')
- os.makedirs(temp_dir, exist_ok=True)
- with open(zip_file_path, 'wb') as file:
- file.write(response.content)
- print(f"Zip file saved to {zip_file_path}")
-
- # Extract the zip file
- print("Extracting content...")
- with zipfile.ZipFile(zip_file_path, 'r') as zip_file:
- zip_file.extractall(temp_dir)
- print("Extraction completed.")
-
- # Locate the torchcrepe folder and move it to the destination directory
- torchcrepe_dir = find_torchcrepe_directory(temp_dir)
- if torchcrepe_dir:
- shutil.move(torchcrepe_dir, destination_dir)
- print(f"Moved the torchcrepe directory to {destination_dir}!")
- else:
- print("The torchcrepe directory could not be located.")
-
- except Exception as e:
- print("Torchcrepe not successfully downloaded", e)
-
- # Clean up temporary directory
- if os.path.exists(temp_dir):
- shutil.rmtree(temp_dir)
-
-# Run the function
-download_and_extract_torchcrepe()
-
-temp_dir = 'temp_torchcrepe'
-
-if os.path.exists(temp_dir):
- shutil.rmtree(temp_dir)
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/metadata/base.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/metadata/base.py
deleted file mode 100644
index cafb79fb3dcf43744393e2964056fe32c350bbc1..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/metadata/base.py
+++ /dev/null
@@ -1,688 +0,0 @@
-import csv
-import email.message
-import functools
-import json
-import logging
-import pathlib
-import re
-import zipfile
-from typing import (
- IO,
- TYPE_CHECKING,
- Any,
- Collection,
- Container,
- Dict,
- Iterable,
- Iterator,
- List,
- NamedTuple,
- Optional,
- Tuple,
- Union,
-)
-
-from pip._vendor.packaging.requirements import Requirement
-from pip._vendor.packaging.specifiers import InvalidSpecifier, SpecifierSet
-from pip._vendor.packaging.utils import NormalizedName
-from pip._vendor.packaging.version import LegacyVersion, Version
-
-from pip._internal.exceptions import NoneMetadataError
-from pip._internal.locations import site_packages, user_site
-from pip._internal.models.direct_url import (
- DIRECT_URL_METADATA_NAME,
- DirectUrl,
- DirectUrlValidationError,
-)
-from pip._internal.utils.compat import stdlib_pkgs # TODO: Move definition here.
-from pip._internal.utils.egg_link import egg_link_path_from_sys_path
-from pip._internal.utils.misc import is_local, normalize_path
-from pip._internal.utils.packaging import safe_extra
-from pip._internal.utils.urls import url_to_path
-
-from ._json import msg_to_json
-
-if TYPE_CHECKING:
- from typing import Protocol
-else:
- Protocol = object
-
-DistributionVersion = Union[LegacyVersion, Version]
-
-InfoPath = Union[str, pathlib.PurePath]
-
-logger = logging.getLogger(__name__)
-
-
-class BaseEntryPoint(Protocol):
- @property
- def name(self) -> str:
- raise NotImplementedError()
-
- @property
- def value(self) -> str:
- raise NotImplementedError()
-
- @property
- def group(self) -> str:
- raise NotImplementedError()
-
-
-def _convert_installed_files_path(
- entry: Tuple[str, ...],
- info: Tuple[str, ...],
-) -> str:
- """Convert a legacy installed-files.txt path into modern RECORD path.
-
- The legacy format stores paths relative to the info directory, while the
- modern format stores paths relative to the package root, e.g. the
- site-packages directory.
-
- :param entry: Path parts of the installed-files.txt entry.
- :param info: Path parts of the egg-info directory relative to package root.
- :returns: The converted entry.
-
- For best compatibility with symlinks, this does not use ``abspath()`` or
- ``Path.resolve()``, but tries to work with path parts:
-
- 1. While ``entry`` starts with ``..``, remove the equal amounts of parts
- from ``info``; if ``info`` is empty, start appending ``..`` instead.
- 2. Join the two directly.
- """
- while entry and entry[0] == "..":
- if not info or info[-1] == "..":
- info += ("..",)
- else:
- info = info[:-1]
- entry = entry[1:]
- return str(pathlib.Path(*info, *entry))
-
-
-class RequiresEntry(NamedTuple):
- requirement: str
- extra: str
- marker: str
-
-
-class BaseDistribution(Protocol):
- @classmethod
- def from_directory(cls, directory: str) -> "BaseDistribution":
- """Load the distribution from a metadata directory.
-
- :param directory: Path to a metadata directory, e.g. ``.dist-info``.
- """
- raise NotImplementedError()
-
- @classmethod
- def from_metadata_file_contents(
- cls,
- metadata_contents: bytes,
- filename: str,
- project_name: str,
- ) -> "BaseDistribution":
- """Load the distribution from the contents of a METADATA file.
-
- This is used to implement PEP 658 by generating a "shallow" dist object that can
- be used for resolution without downloading or building the actual dist yet.
-
- :param metadata_contents: The contents of a METADATA file.
- :param filename: File name for the dist with this metadata.
- :param project_name: Name of the project this dist represents.
- """
- raise NotImplementedError()
-
- @classmethod
- def from_wheel(cls, wheel: "Wheel", name: str) -> "BaseDistribution":
- """Load the distribution from a given wheel.
-
- :param wheel: A concrete wheel definition.
- :param name: File name of the wheel.
-
- :raises InvalidWheel: Whenever loading of the wheel causes a
- :py:exc:`zipfile.BadZipFile` exception to be thrown.
- :raises UnsupportedWheel: If the wheel is a valid zip, but malformed
- internally.
- """
- raise NotImplementedError()
-
- def __repr__(self) -> str:
- return f"{self.raw_name} {self.version} ({self.location})"
-
- def __str__(self) -> str:
- return f"{self.raw_name} {self.version}"
-
- @property
- def location(self) -> Optional[str]:
- """Where the distribution is loaded from.
-
- A string value is not necessarily a filesystem path, since distributions
- can be loaded from other sources, e.g. arbitrary zip archives. ``None``
- means the distribution is created in-memory.
-
- Do not canonicalize this value with e.g. ``pathlib.Path.resolve()``. If
- this is a symbolic link, we want to preserve the relative path between
- it and files in the distribution.
- """
- raise NotImplementedError()
-
- @property
- def editable_project_location(self) -> Optional[str]:
- """The project location for editable distributions.
-
- This is the directory where pyproject.toml or setup.py is located.
- None if the distribution is not installed in editable mode.
- """
- # TODO: this property is relatively costly to compute, memoize it ?
- direct_url = self.direct_url
- if direct_url:
- if direct_url.is_local_editable():
- return url_to_path(direct_url.url)
- else:
- # Search for an .egg-link file by walking sys.path, as it was
- # done before by dist_is_editable().
- egg_link_path = egg_link_path_from_sys_path(self.raw_name)
- if egg_link_path:
- # TODO: get project location from second line of egg_link file
- # (https://github.com/pypa/pip/issues/10243)
- return self.location
- return None
-
- @property
- def installed_location(self) -> Optional[str]:
- """The distribution's "installed" location.
-
- This should generally be a ``site-packages`` directory. This is
- usually ``dist.location``, except for legacy develop-installed packages,
- where ``dist.location`` is the source code location, and this is where
- the ``.egg-link`` file is.
-
- The returned location is normalized (in particular, with symlinks removed).
- """
- raise NotImplementedError()
-
- @property
- def info_location(self) -> Optional[str]:
- """Location of the .[egg|dist]-info directory or file.
-
- Similarly to ``location``, a string value is not necessarily a
- filesystem path. ``None`` means the distribution is created in-memory.
-
- For a modern .dist-info installation on disk, this should be something
- like ``{location}/{raw_name}-{version}.dist-info``.
-
- Do not canonicalize this value with e.g. ``pathlib.Path.resolve()``. If
- this is a symbolic link, we want to preserve the relative path between
- it and other files in the distribution.
- """
- raise NotImplementedError()
-
- @property
- def installed_by_distutils(self) -> bool:
- """Whether this distribution is installed with legacy distutils format.
-
- A distribution installed with "raw" distutils not patched by setuptools
- uses one single file at ``info_location`` to store metadata. We need to
- treat this specially on uninstallation.
- """
- info_location = self.info_location
- if not info_location:
- return False
- return pathlib.Path(info_location).is_file()
-
- @property
- def installed_as_egg(self) -> bool:
- """Whether this distribution is installed as an egg.
-
- This usually indicates the distribution was installed by (older versions
- of) easy_install.
- """
- location = self.location
- if not location:
- return False
- return location.endswith(".egg")
-
- @property
- def installed_with_setuptools_egg_info(self) -> bool:
- """Whether this distribution is installed with the ``.egg-info`` format.
-
- This usually indicates the distribution was installed with setuptools
- with an old pip version or with ``single-version-externally-managed``.
-
- Note that this ensure the metadata store is a directory. distutils can
- also installs an ``.egg-info``, but as a file, not a directory. This
- property is *False* for that case. Also see ``installed_by_distutils``.
- """
- info_location = self.info_location
- if not info_location:
- return False
- if not info_location.endswith(".egg-info"):
- return False
- return pathlib.Path(info_location).is_dir()
-
- @property
- def installed_with_dist_info(self) -> bool:
- """Whether this distribution is installed with the "modern format".
-
- This indicates a "modern" installation, e.g. storing metadata in the
- ``.dist-info`` directory. This applies to installations made by
- setuptools (but through pip, not directly), or anything using the
- standardized build backend interface (PEP 517).
- """
- info_location = self.info_location
- if not info_location:
- return False
- if not info_location.endswith(".dist-info"):
- return False
- return pathlib.Path(info_location).is_dir()
-
- @property
- def canonical_name(self) -> NormalizedName:
- raise NotImplementedError()
-
- @property
- def version(self) -> DistributionVersion:
- raise NotImplementedError()
-
- @property
- def setuptools_filename(self) -> str:
- """Convert a project name to its setuptools-compatible filename.
-
- This is a copy of ``pkg_resources.to_filename()`` for compatibility.
- """
- return self.raw_name.replace("-", "_")
-
- @property
- def direct_url(self) -> Optional[DirectUrl]:
- """Obtain a DirectUrl from this distribution.
-
- Returns None if the distribution has no `direct_url.json` metadata,
- or if `direct_url.json` is invalid.
- """
- try:
- content = self.read_text(DIRECT_URL_METADATA_NAME)
- except FileNotFoundError:
- return None
- try:
- return DirectUrl.from_json(content)
- except (
- UnicodeDecodeError,
- json.JSONDecodeError,
- DirectUrlValidationError,
- ) as e:
- logger.warning(
- "Error parsing %s for %s: %s",
- DIRECT_URL_METADATA_NAME,
- self.canonical_name,
- e,
- )
- return None
-
- @property
- def installer(self) -> str:
- try:
- installer_text = self.read_text("INSTALLER")
- except (OSError, ValueError, NoneMetadataError):
- return "" # Fail silently if the installer file cannot be read.
- for line in installer_text.splitlines():
- cleaned_line = line.strip()
- if cleaned_line:
- return cleaned_line
- return ""
-
- @property
- def requested(self) -> bool:
- return self.is_file("REQUESTED")
-
- @property
- def editable(self) -> bool:
- return bool(self.editable_project_location)
-
- @property
- def local(self) -> bool:
- """If distribution is installed in the current virtual environment.
-
- Always True if we're not in a virtualenv.
- """
- if self.installed_location is None:
- return False
- return is_local(self.installed_location)
-
- @property
- def in_usersite(self) -> bool:
- if self.installed_location is None or user_site is None:
- return False
- return self.installed_location.startswith(normalize_path(user_site))
-
- @property
- def in_site_packages(self) -> bool:
- if self.installed_location is None or site_packages is None:
- return False
- return self.installed_location.startswith(normalize_path(site_packages))
-
- def is_file(self, path: InfoPath) -> bool:
- """Check whether an entry in the info directory is a file."""
- raise NotImplementedError()
-
- def iter_distutils_script_names(self) -> Iterator[str]:
- """Find distutils 'scripts' entries metadata.
-
- If 'scripts' is supplied in ``setup.py``, distutils records those in the
- installed distribution's ``scripts`` directory, a file for each script.
- """
- raise NotImplementedError()
-
- def read_text(self, path: InfoPath) -> str:
- """Read a file in the info directory.
-
- :raise FileNotFoundError: If ``path`` does not exist in the directory.
- :raise NoneMetadataError: If ``path`` exists in the info directory, but
- cannot be read.
- """
- raise NotImplementedError()
-
- def iter_entry_points(self) -> Iterable[BaseEntryPoint]:
- raise NotImplementedError()
-
- def _metadata_impl(self) -> email.message.Message:
- raise NotImplementedError()
-
- @functools.lru_cache(maxsize=1)
- def _metadata_cached(self) -> email.message.Message:
- # When we drop python 3.7 support, move this to the metadata property and use
- # functools.cached_property instead of lru_cache.
- metadata = self._metadata_impl()
- self._add_egg_info_requires(metadata)
- return metadata
-
- @property
- def metadata(self) -> email.message.Message:
- """Metadata of distribution parsed from e.g. METADATA or PKG-INFO.
-
- This should return an empty message if the metadata file is unavailable.
-
- :raises NoneMetadataError: If the metadata file is available, but does
- not contain valid metadata.
- """
- return self._metadata_cached()
-
- @property
- def metadata_dict(self) -> Dict[str, Any]:
- """PEP 566 compliant JSON-serializable representation of METADATA or PKG-INFO.
-
- This should return an empty dict if the metadata file is unavailable.
-
- :raises NoneMetadataError: If the metadata file is available, but does
- not contain valid metadata.
- """
- return msg_to_json(self.metadata)
-
- @property
- def metadata_version(self) -> Optional[str]:
- """Value of "Metadata-Version:" in distribution metadata, if available."""
- return self.metadata.get("Metadata-Version")
-
- @property
- def raw_name(self) -> str:
- """Value of "Name:" in distribution metadata."""
- # The metadata should NEVER be missing the Name: key, but if it somehow
- # does, fall back to the known canonical name.
- return self.metadata.get("Name", self.canonical_name)
-
- @property
- def requires_python(self) -> SpecifierSet:
- """Value of "Requires-Python:" in distribution metadata.
-
- If the key does not exist or contains an invalid value, an empty
- SpecifierSet should be returned.
- """
- value = self.metadata.get("Requires-Python")
- if value is None:
- return SpecifierSet()
- try:
- # Convert to str to satisfy the type checker; this can be a Header object.
- spec = SpecifierSet(str(value))
- except InvalidSpecifier as e:
- message = "Package %r has an invalid Requires-Python: %s"
- logger.warning(message, self.raw_name, e)
- return SpecifierSet()
- return spec
-
- def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requirement]:
- """Dependencies of this distribution.
-
- For modern .dist-info distributions, this is the collection of
- "Requires-Dist:" entries in distribution metadata.
- """
- raise NotImplementedError()
-
- def iter_provided_extras(self) -> Iterable[str]:
- """Extras provided by this distribution.
-
- For modern .dist-info distributions, this is the collection of
- "Provides-Extra:" entries in distribution metadata.
- """
- raise NotImplementedError()
-
- def _iter_declared_entries_from_record(self) -> Optional[Iterator[str]]:
- try:
- text = self.read_text("RECORD")
- except FileNotFoundError:
- return None
- # This extra Path-str cast normalizes entries.
- return (str(pathlib.Path(row[0])) for row in csv.reader(text.splitlines()))
-
- def _iter_declared_entries_from_legacy(self) -> Optional[Iterator[str]]:
- try:
- text = self.read_text("installed-files.txt")
- except FileNotFoundError:
- return None
- paths = (p for p in text.splitlines(keepends=False) if p)
- root = self.location
- info = self.info_location
- if root is None or info is None:
- return paths
- try:
- info_rel = pathlib.Path(info).relative_to(root)
- except ValueError: # info is not relative to root.
- return paths
- if not info_rel.parts: # info *is* root.
- return paths
- return (
- _convert_installed_files_path(pathlib.Path(p).parts, info_rel.parts)
- for p in paths
- )
-
- def iter_declared_entries(self) -> Optional[Iterator[str]]:
- """Iterate through file entries declared in this distribution.
-
- For modern .dist-info distributions, this is the files listed in the
- ``RECORD`` metadata file. For legacy setuptools distributions, this
- comes from ``installed-files.txt``, with entries normalized to be
- compatible with the format used by ``RECORD``.
-
- :return: An iterator for listed entries, or None if the distribution
- contains neither ``RECORD`` nor ``installed-files.txt``.
- """
- return (
- self._iter_declared_entries_from_record()
- or self._iter_declared_entries_from_legacy()
- )
-
- def _iter_requires_txt_entries(self) -> Iterator[RequiresEntry]:
- """Parse a ``requires.txt`` in an egg-info directory.
-
- This is an INI-ish format where an egg-info stores dependencies. A
- section name describes extra other environment markers, while each entry
- is an arbitrary string (not a key-value pair) representing a dependency
- as a requirement string (no markers).
-
- There is a construct in ``importlib.metadata`` called ``Sectioned`` that
- does mostly the same, but the format is currently considered private.
- """
- try:
- content = self.read_text("requires.txt")
- except FileNotFoundError:
- return
- extra = marker = "" # Section-less entries don't have markers.
- for line in content.splitlines():
- line = line.strip()
- if not line or line.startswith("#"): # Comment; ignored.
- continue
- if line.startswith("[") and line.endswith("]"): # A section header.
- extra, _, marker = line.strip("[]").partition(":")
- continue
- yield RequiresEntry(requirement=line, extra=extra, marker=marker)
-
- def _iter_egg_info_extras(self) -> Iterable[str]:
- """Get extras from the egg-info directory."""
- known_extras = {""}
- for entry in self._iter_requires_txt_entries():
- if entry.extra in known_extras:
- continue
- known_extras.add(entry.extra)
- yield entry.extra
-
- def _iter_egg_info_dependencies(self) -> Iterable[str]:
- """Get distribution dependencies from the egg-info directory.
-
- To ease parsing, this converts a legacy dependency entry into a PEP 508
- requirement string. Like ``_iter_requires_txt_entries()``, there is code
- in ``importlib.metadata`` that does mostly the same, but not do exactly
- what we need.
-
- Namely, ``importlib.metadata`` does not normalize the extra name before
- putting it into the requirement string, which causes marker comparison
- to fail because the dist-info format do normalize. This is consistent in
- all currently available PEP 517 backends, although not standardized.
- """
- for entry in self._iter_requires_txt_entries():
- if entry.extra and entry.marker:
- marker = f'({entry.marker}) and extra == "{safe_extra(entry.extra)}"'
- elif entry.extra:
- marker = f'extra == "{safe_extra(entry.extra)}"'
- elif entry.marker:
- marker = entry.marker
- else:
- marker = ""
- if marker:
- yield f"{entry.requirement} ; {marker}"
- else:
- yield entry.requirement
-
- def _add_egg_info_requires(self, metadata: email.message.Message) -> None:
- """Add egg-info requires.txt information to the metadata."""
- if not metadata.get_all("Requires-Dist"):
- for dep in self._iter_egg_info_dependencies():
- metadata["Requires-Dist"] = dep
- if not metadata.get_all("Provides-Extra"):
- for extra in self._iter_egg_info_extras():
- metadata["Provides-Extra"] = extra
-
-
-class BaseEnvironment:
- """An environment containing distributions to introspect."""
-
- @classmethod
- def default(cls) -> "BaseEnvironment":
- raise NotImplementedError()
-
- @classmethod
- def from_paths(cls, paths: Optional[List[str]]) -> "BaseEnvironment":
- raise NotImplementedError()
-
- def get_distribution(self, name: str) -> Optional["BaseDistribution"]:
- """Given a requirement name, return the installed distributions.
-
- The name may not be normalized. The implementation must canonicalize
- it for lookup.
- """
- raise NotImplementedError()
-
- def _iter_distributions(self) -> Iterator["BaseDistribution"]:
- """Iterate through installed distributions.
-
- This function should be implemented by subclass, but never called
- directly. Use the public ``iter_distribution()`` instead, which
- implements additional logic to make sure the distributions are valid.
- """
- raise NotImplementedError()
-
- def iter_all_distributions(self) -> Iterator[BaseDistribution]:
- """Iterate through all installed distributions without any filtering."""
- for dist in self._iter_distributions():
- # Make sure the distribution actually comes from a valid Python
- # packaging distribution. Pip's AdjacentTempDirectory leaves folders
- # e.g. ``~atplotlib.dist-info`` if cleanup was interrupted. The
- # valid project name pattern is taken from PEP 508.
- project_name_valid = re.match(
- r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$",
- dist.canonical_name,
- flags=re.IGNORECASE,
- )
- if not project_name_valid:
- logger.warning(
- "Ignoring invalid distribution %s (%s)",
- dist.canonical_name,
- dist.location,
- )
- continue
- yield dist
-
- def iter_installed_distributions(
- self,
- local_only: bool = True,
- skip: Container[str] = stdlib_pkgs,
- include_editables: bool = True,
- editables_only: bool = False,
- user_only: bool = False,
- ) -> Iterator[BaseDistribution]:
- """Return a list of installed distributions.
-
- This is based on ``iter_all_distributions()`` with additional filtering
- options. Note that ``iter_installed_distributions()`` without arguments
- is *not* equal to ``iter_all_distributions()``, since some of the
- configurations exclude packages by default.
-
- :param local_only: If True (default), only return installations
- local to the current virtualenv, if in a virtualenv.
- :param skip: An iterable of canonicalized project names to ignore;
- defaults to ``stdlib_pkgs``.
- :param include_editables: If False, don't report editables.
- :param editables_only: If True, only report editables.
- :param user_only: If True, only report installations in the user
- site directory.
- """
- it = self.iter_all_distributions()
- if local_only:
- it = (d for d in it if d.local)
- if not include_editables:
- it = (d for d in it if not d.editable)
- if editables_only:
- it = (d for d in it if d.editable)
- if user_only:
- it = (d for d in it if d.in_usersite)
- return (d for d in it if d.canonical_name not in skip)
-
-
-class Wheel(Protocol):
- location: str
-
- def as_zipfile(self) -> zipfile.ZipFile:
- raise NotImplementedError()
-
-
-class FilesystemWheel(Wheel):
- def __init__(self, location: str) -> None:
- self.location = location
-
- def as_zipfile(self) -> zipfile.ZipFile:
- return zipfile.ZipFile(self.location, allowZip64=True)
-
-
-class MemoryWheel(Wheel):
- def __init__(self, location: str, stream: IO[bytes]) -> None:
- self.location = location
- self.stream = stream
-
- def as_zipfile(self) -> zipfile.ZipFile:
- return zipfile.ZipFile(self.stream, allowZip64=True)
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/utf1632prober.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/utf1632prober.py
deleted file mode 100644
index 6bdec63d6867928bf73a7e513f60cee8f49ca050..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/utf1632prober.py
+++ /dev/null
@@ -1,225 +0,0 @@
-######################## BEGIN LICENSE BLOCK ########################
-#
-# Contributor(s):
-# Jason Zavaglia
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
-# 02110-1301 USA
-######################### END LICENSE BLOCK #########################
-from typing import List, Union
-
-from .charsetprober import CharSetProber
-from .enums import ProbingState
-
-
-class UTF1632Prober(CharSetProber):
- """
- This class simply looks for occurrences of zero bytes, and infers
- whether the file is UTF16 or UTF32 (low-endian or big-endian)
- For instance, files looking like ( \0 \0 \0 [nonzero] )+
- have a good probability to be UTF32BE. Files looking like ( \0 [nonzero] )+
- may be guessed to be UTF16BE, and inversely for little-endian varieties.
- """
-
- # how many logical characters to scan before feeling confident of prediction
- MIN_CHARS_FOR_DETECTION = 20
- # a fixed constant ratio of expected zeros or non-zeros in modulo-position.
- EXPECTED_RATIO = 0.94
-
- def __init__(self) -> None:
- super().__init__()
- self.position = 0
- self.zeros_at_mod = [0] * 4
- self.nonzeros_at_mod = [0] * 4
- self._state = ProbingState.DETECTING
- self.quad = [0, 0, 0, 0]
- self.invalid_utf16be = False
- self.invalid_utf16le = False
- self.invalid_utf32be = False
- self.invalid_utf32le = False
- self.first_half_surrogate_pair_detected_16be = False
- self.first_half_surrogate_pair_detected_16le = False
- self.reset()
-
- def reset(self) -> None:
- super().reset()
- self.position = 0
- self.zeros_at_mod = [0] * 4
- self.nonzeros_at_mod = [0] * 4
- self._state = ProbingState.DETECTING
- self.invalid_utf16be = False
- self.invalid_utf16le = False
- self.invalid_utf32be = False
- self.invalid_utf32le = False
- self.first_half_surrogate_pair_detected_16be = False
- self.first_half_surrogate_pair_detected_16le = False
- self.quad = [0, 0, 0, 0]
-
- @property
- def charset_name(self) -> str:
- if self.is_likely_utf32be():
- return "utf-32be"
- if self.is_likely_utf32le():
- return "utf-32le"
- if self.is_likely_utf16be():
- return "utf-16be"
- if self.is_likely_utf16le():
- return "utf-16le"
- # default to something valid
- return "utf-16"
-
- @property
- def language(self) -> str:
- return ""
-
- def approx_32bit_chars(self) -> float:
- return max(1.0, self.position / 4.0)
-
- def approx_16bit_chars(self) -> float:
- return max(1.0, self.position / 2.0)
-
- def is_likely_utf32be(self) -> bool:
- approx_chars = self.approx_32bit_chars()
- return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
- self.zeros_at_mod[0] / approx_chars > self.EXPECTED_RATIO
- and self.zeros_at_mod[1] / approx_chars > self.EXPECTED_RATIO
- and self.zeros_at_mod[2] / approx_chars > self.EXPECTED_RATIO
- and self.nonzeros_at_mod[3] / approx_chars > self.EXPECTED_RATIO
- and not self.invalid_utf32be
- )
-
- def is_likely_utf32le(self) -> bool:
- approx_chars = self.approx_32bit_chars()
- return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
- self.nonzeros_at_mod[0] / approx_chars > self.EXPECTED_RATIO
- and self.zeros_at_mod[1] / approx_chars > self.EXPECTED_RATIO
- and self.zeros_at_mod[2] / approx_chars > self.EXPECTED_RATIO
- and self.zeros_at_mod[3] / approx_chars > self.EXPECTED_RATIO
- and not self.invalid_utf32le
- )
-
- def is_likely_utf16be(self) -> bool:
- approx_chars = self.approx_16bit_chars()
- return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
- (self.nonzeros_at_mod[1] + self.nonzeros_at_mod[3]) / approx_chars
- > self.EXPECTED_RATIO
- and (self.zeros_at_mod[0] + self.zeros_at_mod[2]) / approx_chars
- > self.EXPECTED_RATIO
- and not self.invalid_utf16be
- )
-
- def is_likely_utf16le(self) -> bool:
- approx_chars = self.approx_16bit_chars()
- return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
- (self.nonzeros_at_mod[0] + self.nonzeros_at_mod[2]) / approx_chars
- > self.EXPECTED_RATIO
- and (self.zeros_at_mod[1] + self.zeros_at_mod[3]) / approx_chars
- > self.EXPECTED_RATIO
- and not self.invalid_utf16le
- )
-
- def validate_utf32_characters(self, quad: List[int]) -> None:
- """
- Validate if the quad of bytes is valid UTF-32.
-
- UTF-32 is valid in the range 0x00000000 - 0x0010FFFF
- excluding 0x0000D800 - 0x0000DFFF
-
- https://en.wikipedia.org/wiki/UTF-32
- """
- if (
- quad[0] != 0
- or quad[1] > 0x10
- or (quad[0] == 0 and quad[1] == 0 and 0xD8 <= quad[2] <= 0xDF)
- ):
- self.invalid_utf32be = True
- if (
- quad[3] != 0
- or quad[2] > 0x10
- or (quad[3] == 0 and quad[2] == 0 and 0xD8 <= quad[1] <= 0xDF)
- ):
- self.invalid_utf32le = True
-
- def validate_utf16_characters(self, pair: List[int]) -> None:
- """
- Validate if the pair of bytes is valid UTF-16.
-
- UTF-16 is valid in the range 0x0000 - 0xFFFF excluding 0xD800 - 0xFFFF
- with an exception for surrogate pairs, which must be in the range
- 0xD800-0xDBFF followed by 0xDC00-0xDFFF
-
- https://en.wikipedia.org/wiki/UTF-16
- """
- if not self.first_half_surrogate_pair_detected_16be:
- if 0xD8 <= pair[0] <= 0xDB:
- self.first_half_surrogate_pair_detected_16be = True
- elif 0xDC <= pair[0] <= 0xDF:
- self.invalid_utf16be = True
- else:
- if 0xDC <= pair[0] <= 0xDF:
- self.first_half_surrogate_pair_detected_16be = False
- else:
- self.invalid_utf16be = True
-
- if not self.first_half_surrogate_pair_detected_16le:
- if 0xD8 <= pair[1] <= 0xDB:
- self.first_half_surrogate_pair_detected_16le = True
- elif 0xDC <= pair[1] <= 0xDF:
- self.invalid_utf16le = True
- else:
- if 0xDC <= pair[1] <= 0xDF:
- self.first_half_surrogate_pair_detected_16le = False
- else:
- self.invalid_utf16le = True
-
- def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
- for c in byte_str:
- mod4 = self.position % 4
- self.quad[mod4] = c
- if mod4 == 3:
- self.validate_utf32_characters(self.quad)
- self.validate_utf16_characters(self.quad[0:2])
- self.validate_utf16_characters(self.quad[2:4])
- if c == 0:
- self.zeros_at_mod[mod4] += 1
- else:
- self.nonzeros_at_mod[mod4] += 1
- self.position += 1
- return self.state
-
- @property
- def state(self) -> ProbingState:
- if self._state in {ProbingState.NOT_ME, ProbingState.FOUND_IT}:
- # terminal, decided states
- return self._state
- if self.get_confidence() > 0.80:
- self._state = ProbingState.FOUND_IT
- elif self.position > 4 * 1024:
- # if we get to 4kb into the file, and we can't conclude it's UTF,
- # let's give up
- self._state = ProbingState.NOT_ME
- return self._state
-
- def get_confidence(self) -> float:
- return (
- 0.85
- if (
- self.is_likely_utf16le()
- or self.is_likely_utf16be()
- or self.is_likely_utf32le()
- or self.is_likely_utf32be()
- )
- else 0.00
- )
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/formatters/other.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/formatters/other.py
deleted file mode 100644
index 1e39cd42a8cc6ad2a4eceae5c2fb07a477a51dd6..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/formatters/other.py
+++ /dev/null
@@ -1,161 +0,0 @@
-"""
- pygments.formatters.other
- ~~~~~~~~~~~~~~~~~~~~~~~~~
-
- Other formatters: NullFormatter, RawTokenFormatter.
-
- :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-from pip._vendor.pygments.formatter import Formatter
-from pip._vendor.pygments.util import get_choice_opt
-from pip._vendor.pygments.token import Token
-from pip._vendor.pygments.console import colorize
-
-__all__ = ['NullFormatter', 'RawTokenFormatter', 'TestcaseFormatter']
-
-
-class NullFormatter(Formatter):
- """
- Output the text unchanged without any formatting.
- """
- name = 'Text only'
- aliases = ['text', 'null']
- filenames = ['*.txt']
-
- def format(self, tokensource, outfile):
- enc = self.encoding
- for ttype, value in tokensource:
- if enc:
- outfile.write(value.encode(enc))
- else:
- outfile.write(value)
-
-
-class RawTokenFormatter(Formatter):
- r"""
- Format tokens as a raw representation for storing token streams.
-
- The format is ``tokentyperepr(tokenstring)\n``. The output can later
- be converted to a token stream with the `RawTokenLexer`, described in the
- :doc:`lexer list `.
-
- Only two options are accepted:
-
- `compress`
- If set to ``'gz'`` or ``'bz2'``, compress the output with the given
- compression algorithm after encoding (default: ``''``).
- `error_color`
- If set to a color name, highlight error tokens using that color. If
- set but with no value, defaults to ``'red'``.
-
- .. versionadded:: 0.11
-
- """
- name = 'Raw tokens'
- aliases = ['raw', 'tokens']
- filenames = ['*.raw']
-
- unicodeoutput = False
-
- def __init__(self, **options):
- Formatter.__init__(self, **options)
- # We ignore self.encoding if it is set, since it gets set for lexer
- # and formatter if given with -Oencoding on the command line.
- # The RawTokenFormatter outputs only ASCII. Override here.
- self.encoding = 'ascii' # let pygments.format() do the right thing
- self.compress = get_choice_opt(options, 'compress',
- ['', 'none', 'gz', 'bz2'], '')
- self.error_color = options.get('error_color', None)
- if self.error_color is True:
- self.error_color = 'red'
- if self.error_color is not None:
- try:
- colorize(self.error_color, '')
- except KeyError:
- raise ValueError("Invalid color %r specified" %
- self.error_color)
-
- def format(self, tokensource, outfile):
- try:
- outfile.write(b'')
- except TypeError:
- raise TypeError('The raw tokens formatter needs a binary '
- 'output file')
- if self.compress == 'gz':
- import gzip
- outfile = gzip.GzipFile('', 'wb', 9, outfile)
-
- write = outfile.write
- flush = outfile.close
- elif self.compress == 'bz2':
- import bz2
- compressor = bz2.BZ2Compressor(9)
-
- def write(text):
- outfile.write(compressor.compress(text))
-
- def flush():
- outfile.write(compressor.flush())
- outfile.flush()
- else:
- write = outfile.write
- flush = outfile.flush
-
- if self.error_color:
- for ttype, value in tokensource:
- line = b"%r\t%r\n" % (ttype, value)
- if ttype is Token.Error:
- write(colorize(self.error_color, line))
- else:
- write(line)
- else:
- for ttype, value in tokensource:
- write(b"%r\t%r\n" % (ttype, value))
- flush()
-
-
-TESTCASE_BEFORE = '''\
- def testNeedsName(lexer):
- fragment = %r
- tokens = [
-'''
-TESTCASE_AFTER = '''\
- ]
- assert list(lexer.get_tokens(fragment)) == tokens
-'''
-
-
-class TestcaseFormatter(Formatter):
- """
- Format tokens as appropriate for a new testcase.
-
- .. versionadded:: 2.0
- """
- name = 'Testcase'
- aliases = ['testcase']
-
- def __init__(self, **options):
- Formatter.__init__(self, **options)
- if self.encoding is not None and self.encoding != 'utf-8':
- raise ValueError("Only None and utf-8 are allowed encodings.")
-
- def format(self, tokensource, outfile):
- indentation = ' ' * 12
- rawbuf = []
- outbuf = []
- for ttype, value in tokensource:
- rawbuf.append(value)
- outbuf.append('%s(%s, %r),\n' % (indentation, ttype, value))
-
- before = TESTCASE_BEFORE % (''.join(rawbuf),)
- during = ''.join(outbuf)
- after = TESTCASE_AFTER
- if self.encoding is None:
- outfile.write(before + during + after)
- else:
- outfile.write(before.encode('utf-8'))
- outfile.write(during.encode('utf-8'))
- outfile.write(after.encode('utf-8'))
- outfile.flush()
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/sphinxext.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/sphinxext.py
deleted file mode 100644
index 3537ecdb26f99b5a717d4c2a0e815636e1a9d58f..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/sphinxext.py
+++ /dev/null
@@ -1,217 +0,0 @@
-"""
- pygments.sphinxext
- ~~~~~~~~~~~~~~~~~~
-
- Sphinx extension to generate automatic documentation of lexers,
- formatters and filters.
-
- :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-import sys
-
-from docutils import nodes
-from docutils.statemachine import ViewList
-from docutils.parsers.rst import Directive
-from sphinx.util.nodes import nested_parse_with_titles
-
-
-MODULEDOC = '''
-.. module:: %s
-
-%s
-%s
-'''
-
-LEXERDOC = '''
-.. class:: %s
-
- :Short names: %s
- :Filenames: %s
- :MIME types: %s
-
- %s
-
-'''
-
-FMTERDOC = '''
-.. class:: %s
-
- :Short names: %s
- :Filenames: %s
-
- %s
-
-'''
-
-FILTERDOC = '''
-.. class:: %s
-
- :Name: %s
-
- %s
-
-'''
-
-
-class PygmentsDoc(Directive):
- """
- A directive to collect all lexers/formatters/filters and generate
- autoclass directives for them.
- """
- has_content = False
- required_arguments = 1
- optional_arguments = 0
- final_argument_whitespace = False
- option_spec = {}
-
- def run(self):
- self.filenames = set()
- if self.arguments[0] == 'lexers':
- out = self.document_lexers()
- elif self.arguments[0] == 'formatters':
- out = self.document_formatters()
- elif self.arguments[0] == 'filters':
- out = self.document_filters()
- elif self.arguments[0] == 'lexers_overview':
- out = self.document_lexers_overview()
- else:
- raise Exception('invalid argument for "pygmentsdoc" directive')
- node = nodes.compound()
- vl = ViewList(out.split('\n'), source='')
- nested_parse_with_titles(self.state, vl, node)
- for fn in self.filenames:
- self.state.document.settings.record_dependencies.add(fn)
- return node.children
-
- def document_lexers_overview(self):
- """Generate a tabular overview of all lexers.
-
- The columns are the lexer name, the extensions handled by this lexer
- (or "None"), the aliases and a link to the lexer class."""
- from pip._vendor.pygments.lexers._mapping import LEXERS
- from pip._vendor.pygments.lexers import find_lexer_class
- out = []
-
- table = []
-
- def format_link(name, url):
- if url:
- return f'`{name} <{url}>`_'
- return name
-
- for classname, data in sorted(LEXERS.items(), key=lambda x: x[1][1].lower()):
- lexer_cls = find_lexer_class(data[1])
- extensions = lexer_cls.filenames + lexer_cls.alias_filenames
-
- table.append({
- 'name': format_link(data[1], lexer_cls.url),
- 'extensions': ', '.join(extensions).replace('*', '\\*').replace('_', '\\') or 'None',
- 'aliases': ', '.join(data[2]),
- 'class': f'{data[0]}.{classname}'
- })
-
- column_names = ['name', 'extensions', 'aliases', 'class']
- column_lengths = [max([len(row[column]) for row in table if row[column]])
- for column in column_names]
-
- def write_row(*columns):
- """Format a table row"""
- out = []
- for l, c in zip(column_lengths, columns):
- if c:
- out.append(c.ljust(l))
- else:
- out.append(' '*l)
-
- return ' '.join(out)
-
- def write_seperator():
- """Write a table separator row"""
- sep = ['='*c for c in column_lengths]
- return write_row(*sep)
-
- out.append(write_seperator())
- out.append(write_row('Name', 'Extension(s)', 'Short name(s)', 'Lexer class'))
- out.append(write_seperator())
- for row in table:
- out.append(write_row(
- row['name'],
- row['extensions'],
- row['aliases'],
- f':class:`~{row["class"]}`'))
- out.append(write_seperator())
-
- return '\n'.join(out)
-
- def document_lexers(self):
- from pip._vendor.pygments.lexers._mapping import LEXERS
- out = []
- modules = {}
- moduledocstrings = {}
- for classname, data in sorted(LEXERS.items(), key=lambda x: x[0]):
- module = data[0]
- mod = __import__(module, None, None, [classname])
- self.filenames.add(mod.__file__)
- cls = getattr(mod, classname)
- if not cls.__doc__:
- print("Warning: %s does not have a docstring." % classname)
- docstring = cls.__doc__
- if isinstance(docstring, bytes):
- docstring = docstring.decode('utf8')
- modules.setdefault(module, []).append((
- classname,
- ', '.join(data[2]) or 'None',
- ', '.join(data[3]).replace('*', '\\*').replace('_', '\\') or 'None',
- ', '.join(data[4]) or 'None',
- docstring))
- if module not in moduledocstrings:
- moddoc = mod.__doc__
- if isinstance(moddoc, bytes):
- moddoc = moddoc.decode('utf8')
- moduledocstrings[module] = moddoc
-
- for module, lexers in sorted(modules.items(), key=lambda x: x[0]):
- if moduledocstrings[module] is None:
- raise Exception("Missing docstring for %s" % (module,))
- heading = moduledocstrings[module].splitlines()[4].strip().rstrip('.')
- out.append(MODULEDOC % (module, heading, '-'*len(heading)))
- for data in lexers:
- out.append(LEXERDOC % data)
-
- return ''.join(out)
-
- def document_formatters(self):
- from pip._vendor.pygments.formatters import FORMATTERS
-
- out = []
- for classname, data in sorted(FORMATTERS.items(), key=lambda x: x[0]):
- module = data[0]
- mod = __import__(module, None, None, [classname])
- self.filenames.add(mod.__file__)
- cls = getattr(mod, classname)
- docstring = cls.__doc__
- if isinstance(docstring, bytes):
- docstring = docstring.decode('utf8')
- heading = cls.__name__
- out.append(FMTERDOC % (heading, ', '.join(data[2]) or 'None',
- ', '.join(data[3]).replace('*', '\\*') or 'None',
- docstring))
- return ''.join(out)
-
- def document_filters(self):
- from pip._vendor.pygments.filters import FILTERS
-
- out = []
- for name, cls in FILTERS.items():
- self.filenames.add(sys.modules[cls.__module__].__file__)
- docstring = cls.__doc__
- if isinstance(docstring, bytes):
- docstring = docstring.decode('utf8')
- out.append(FILTERDOC % (cls.__name__, name, docstring))
- return ''.join(out)
-
-
-def setup(app):
- app.add_directive('pygmentsdoc', PygmentsDoc)
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/pyparsing/testing.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/pyparsing/testing.py
deleted file mode 100644
index 84a0ef17078c99e5917db41e3dbaf035fe206d7c..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/pyparsing/testing.py
+++ /dev/null
@@ -1,331 +0,0 @@
-# testing.py
-
-from contextlib import contextmanager
-import typing
-
-from .core import (
- ParserElement,
- ParseException,
- Keyword,
- __diag__,
- __compat__,
-)
-
-
-class pyparsing_test:
- """
- namespace class for classes useful in writing unit tests
- """
-
- class reset_pyparsing_context:
- """
- Context manager to be used when writing unit tests that modify pyparsing config values:
- - packrat parsing
- - bounded recursion parsing
- - default whitespace characters.
- - default keyword characters
- - literal string auto-conversion class
- - __diag__ settings
-
- Example::
-
- with reset_pyparsing_context():
- # test that literals used to construct a grammar are automatically suppressed
- ParserElement.inlineLiteralsUsing(Suppress)
-
- term = Word(alphas) | Word(nums)
- group = Group('(' + term[...] + ')')
-
- # assert that the '()' characters are not included in the parsed tokens
- self.assertParseAndCheckList(group, "(abc 123 def)", ['abc', '123', 'def'])
-
- # after exiting context manager, literals are converted to Literal expressions again
- """
-
- def __init__(self):
- self._save_context = {}
-
- def save(self):
- self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS
- self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS
-
- self._save_context[
- "literal_string_class"
- ] = ParserElement._literalStringClass
-
- self._save_context["verbose_stacktrace"] = ParserElement.verbose_stacktrace
-
- self._save_context["packrat_enabled"] = ParserElement._packratEnabled
- if ParserElement._packratEnabled:
- self._save_context[
- "packrat_cache_size"
- ] = ParserElement.packrat_cache.size
- else:
- self._save_context["packrat_cache_size"] = None
- self._save_context["packrat_parse"] = ParserElement._parse
- self._save_context[
- "recursion_enabled"
- ] = ParserElement._left_recursion_enabled
-
- self._save_context["__diag__"] = {
- name: getattr(__diag__, name) for name in __diag__._all_names
- }
-
- self._save_context["__compat__"] = {
- "collect_all_And_tokens": __compat__.collect_all_And_tokens
- }
-
- return self
-
- def restore(self):
- # reset pyparsing global state
- if (
- ParserElement.DEFAULT_WHITE_CHARS
- != self._save_context["default_whitespace"]
- ):
- ParserElement.set_default_whitespace_chars(
- self._save_context["default_whitespace"]
- )
-
- ParserElement.verbose_stacktrace = self._save_context["verbose_stacktrace"]
-
- Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"]
- ParserElement.inlineLiteralsUsing(
- self._save_context["literal_string_class"]
- )
-
- for name, value in self._save_context["__diag__"].items():
- (__diag__.enable if value else __diag__.disable)(name)
-
- ParserElement._packratEnabled = False
- if self._save_context["packrat_enabled"]:
- ParserElement.enable_packrat(self._save_context["packrat_cache_size"])
- else:
- ParserElement._parse = self._save_context["packrat_parse"]
- ParserElement._left_recursion_enabled = self._save_context[
- "recursion_enabled"
- ]
-
- __compat__.collect_all_And_tokens = self._save_context["__compat__"]
-
- return self
-
- def copy(self):
- ret = type(self)()
- ret._save_context.update(self._save_context)
- return ret
-
- def __enter__(self):
- return self.save()
-
- def __exit__(self, *args):
- self.restore()
-
- class TestParseResultsAsserts:
- """
- A mixin class to add parse results assertion methods to normal unittest.TestCase classes.
- """
-
- def assertParseResultsEquals(
- self, result, expected_list=None, expected_dict=None, msg=None
- ):
- """
- Unit test assertion to compare a :class:`ParseResults` object with an optional ``expected_list``,
- and compare any defined results names with an optional ``expected_dict``.
- """
- if expected_list is not None:
- self.assertEqual(expected_list, result.as_list(), msg=msg)
- if expected_dict is not None:
- self.assertEqual(expected_dict, result.as_dict(), msg=msg)
-
- def assertParseAndCheckList(
- self, expr, test_string, expected_list, msg=None, verbose=True
- ):
- """
- Convenience wrapper assert to test a parser element and input string, and assert that
- the resulting ``ParseResults.asList()`` is equal to the ``expected_list``.
- """
- result = expr.parse_string(test_string, parse_all=True)
- if verbose:
- print(result.dump())
- else:
- print(result.as_list())
- self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg)
-
- def assertParseAndCheckDict(
- self, expr, test_string, expected_dict, msg=None, verbose=True
- ):
- """
- Convenience wrapper assert to test a parser element and input string, and assert that
- the resulting ``ParseResults.asDict()`` is equal to the ``expected_dict``.
- """
- result = expr.parse_string(test_string, parseAll=True)
- if verbose:
- print(result.dump())
- else:
- print(result.as_list())
- self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg)
-
- def assertRunTestResults(
- self, run_tests_report, expected_parse_results=None, msg=None
- ):
- """
- Unit test assertion to evaluate output of ``ParserElement.runTests()``. If a list of
- list-dict tuples is given as the ``expected_parse_results`` argument, then these are zipped
- with the report tuples returned by ``runTests`` and evaluated using ``assertParseResultsEquals``.
- Finally, asserts that the overall ``runTests()`` success value is ``True``.
-
- :param run_tests_report: tuple(bool, [tuple(str, ParseResults or Exception)]) returned from runTests
- :param expected_parse_results (optional): [tuple(str, list, dict, Exception)]
- """
- run_test_success, run_test_results = run_tests_report
-
- if expected_parse_results is not None:
- merged = [
- (*rpt, expected)
- for rpt, expected in zip(run_test_results, expected_parse_results)
- ]
- for test_string, result, expected in merged:
- # expected should be a tuple containing a list and/or a dict or an exception,
- # and optional failure message string
- # an empty tuple will skip any result validation
- fail_msg = next(
- (exp for exp in expected if isinstance(exp, str)), None
- )
- expected_exception = next(
- (
- exp
- for exp in expected
- if isinstance(exp, type) and issubclass(exp, Exception)
- ),
- None,
- )
- if expected_exception is not None:
- with self.assertRaises(
- expected_exception=expected_exception, msg=fail_msg or msg
- ):
- if isinstance(result, Exception):
- raise result
- else:
- expected_list = next(
- (exp for exp in expected if isinstance(exp, list)), None
- )
- expected_dict = next(
- (exp for exp in expected if isinstance(exp, dict)), None
- )
- if (expected_list, expected_dict) != (None, None):
- self.assertParseResultsEquals(
- result,
- expected_list=expected_list,
- expected_dict=expected_dict,
- msg=fail_msg or msg,
- )
- else:
- # warning here maybe?
- print("no validation for {!r}".format(test_string))
-
- # do this last, in case some specific test results can be reported instead
- self.assertTrue(
- run_test_success, msg=msg if msg is not None else "failed runTests"
- )
-
- @contextmanager
- def assertRaisesParseException(self, exc_type=ParseException, msg=None):
- with self.assertRaises(exc_type, msg=msg):
- yield
-
- @staticmethod
- def with_line_numbers(
- s: str,
- start_line: typing.Optional[int] = None,
- end_line: typing.Optional[int] = None,
- expand_tabs: bool = True,
- eol_mark: str = "|",
- mark_spaces: typing.Optional[str] = None,
- mark_control: typing.Optional[str] = None,
- ) -> str:
- """
- Helpful method for debugging a parser - prints a string with line and column numbers.
- (Line and column numbers are 1-based.)
-
- :param s: tuple(bool, str - string to be printed with line and column numbers
- :param start_line: int - (optional) starting line number in s to print (default=1)
- :param end_line: int - (optional) ending line number in s to print (default=len(s))
- :param expand_tabs: bool - (optional) expand tabs to spaces, to match the pyparsing default
- :param eol_mark: str - (optional) string to mark the end of lines, helps visualize trailing spaces (default="|")
- :param mark_spaces: str - (optional) special character to display in place of spaces
- :param mark_control: str - (optional) convert non-printing control characters to a placeholding
- character; valid values:
- - "unicode" - replaces control chars with Unicode symbols, such as "␍" and "␊"
- - any single character string - replace control characters with given string
- - None (default) - string is displayed as-is
-
- :return: str - input string with leading line numbers and column number headers
- """
- if expand_tabs:
- s = s.expandtabs()
- if mark_control is not None:
- if mark_control == "unicode":
- tbl = str.maketrans(
- {c: u for c, u in zip(range(0, 33), range(0x2400, 0x2433))}
- | {127: 0x2421}
- )
- eol_mark = ""
- else:
- tbl = str.maketrans(
- {c: mark_control for c in list(range(0, 32)) + [127]}
- )
- s = s.translate(tbl)
- if mark_spaces is not None and mark_spaces != " ":
- if mark_spaces == "unicode":
- tbl = str.maketrans({9: 0x2409, 32: 0x2423})
- s = s.translate(tbl)
- else:
- s = s.replace(" ", mark_spaces)
- if start_line is None:
- start_line = 1
- if end_line is None:
- end_line = len(s)
- end_line = min(end_line, len(s))
- start_line = min(max(1, start_line), end_line)
-
- if mark_control != "unicode":
- s_lines = s.splitlines()[start_line - 1 : end_line]
- else:
- s_lines = [line + "␊" for line in s.split("␊")[start_line - 1 : end_line]]
- if not s_lines:
- return ""
-
- lineno_width = len(str(end_line))
- max_line_len = max(len(line) for line in s_lines)
- lead = " " * (lineno_width + 1)
- if max_line_len >= 99:
- header0 = (
- lead
- + "".join(
- "{}{}".format(" " * 99, (i + 1) % 100)
- for i in range(max(max_line_len // 100, 1))
- )
- + "\n"
- )
- else:
- header0 = ""
- header1 = (
- header0
- + lead
- + "".join(
- " {}".format((i + 1) % 10)
- for i in range(-(-max_line_len // 10))
- )
- + "\n"
- )
- header2 = lead + "1234567890" * (-(-max_line_len // 10)) + "\n"
- return (
- header1
- + header2
- + "\n".join(
- "{:{}d}:{}{}".format(i, lineno_width, line, eol_mark)
- for i, line in enumerate(s_lines, start=start_line)
- )
- + "\n"
- )
diff --git a/spaces/Blessin/drama-director/app.py b/spaces/Blessin/drama-director/app.py
deleted file mode 100644
index 1cdcc8cc3be67082cd96f5e593ca139f2be2318b..0000000000000000000000000000000000000000
--- a/spaces/Blessin/drama-director/app.py
+++ /dev/null
@@ -1,56 +0,0 @@
-import gradio as gr
-import openai
-from gtts import gTTS
-import tempfile
-
-def generate_stage_directions(location, situation, api_key):
- prompt = (
- f"Write detailed stage directions for a scene with 5 characters, set in a {location}. "
- "You are reading these directions out loud to an audience, so keep the stage directions conversational. "
- "Do not break it down into different sections. Each character enters one by one, two of them enter as a couple. "
- "As they enter, tell us their name and describe their physical characteristics, their emotional state and their actions, "
- "gestures and movements in the scene. Write detailed stage direction on how they interact with the location they are in "
- "and with each other, with detailed description on their movements, actions and gestures in the scene. Make the overall "
- "scene highly dramatic, full of twists and turns, with lots of movement by the characters that keep changing positions "
- "and moving around. At some point, a {situation} happens in the scene. Show the characters interacting with elements of "
- "the location. Describe in vivid detail their emotion, facial expressions and emotions. You will also write dialogues for "
- "each character. Keep the dialogues short. Keep the scene mostly non-verbal, with only a few dialogues. Make the scene "
- "very dramatic, emotional, thrilling. Keep your response limited to 750 words."
- )
-
- openai.api_key = api_key # Set the API key from the user input
-
- try:
- response = openai.Completion.create(
- engine="text-davinci-003",
- prompt=prompt,
- max_tokens=750,
- temperature=0.7,
- )
- stage_directions = response.choices[0].text.strip()
- response_audio_path = text_to_audio(stage_directions)
- return response_audio_path
- except Exception as e:
- return str(e)
-
-def text_to_audio(text):
- tts = gTTS(text, lang='en')
- temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.mp3')
- tts.save(temp_file.name)
- return temp_file.name
-
-# Create Gradio UI
-iface = gr.Interface(
- fn=generate_stage_directions,
- inputs=[
- gr.Textbox(label="Location"),
- gr.Textbox(label="Situation"),
- gr.Textbox(label="API Key")
- ],
- outputs=gr.Audio(type='filepath', label="Stage Directions"),
- live=True,
- title="DramaDirector",
- description="Input a location, situation, and your OpenAI API key to generate stage directions.",
-)
-
-iface.launch()
diff --git a/spaces/Boadiwaa/Recipes/openai/error.py b/spaces/Boadiwaa/Recipes/openai/error.py
deleted file mode 100644
index 47f9aab6bc18877b843d947ffd82432b8930cc9e..0000000000000000000000000000000000000000
--- a/spaces/Boadiwaa/Recipes/openai/error.py
+++ /dev/null
@@ -1,164 +0,0 @@
-import openai
-
-
-class OpenAIError(Exception):
- def __init__(
- self,
- message=None,
- http_body=None,
- http_status=None,
- json_body=None,
- headers=None,
- code=None,
- ):
- super(OpenAIError, self).__init__(message)
-
- if http_body and hasattr(http_body, "decode"):
- try:
- http_body = http_body.decode("utf-8")
- except BaseException:
- http_body = (
- ""
- )
-
- self._message = message
- self.http_body = http_body
- self.http_status = http_status
- self.json_body = json_body
- self.headers = headers or {}
- self.code = code
- self.request_id = self.headers.get("request-id", None)
- self.error = self.construct_error_object()
- self.organization = self.headers.get("openai-organization", None)
-
- def __str__(self):
- msg = self._message or ""
- if self.request_id is not None:
- return "Request {0}: {1}".format(self.request_id, msg)
- else:
- return msg
-
- # Returns the underlying `Exception` (base class) message, which is usually
- # the raw message returned by OpenAI's API. This was previously available
- # in python2 via `error.message`. Unlike `str(error)`, it omits "Request
- # req_..." from the beginning of the string.
- @property
- def user_message(self):
- return self._message
-
- def __repr__(self):
- return "%s(message=%r, http_status=%r, request_id=%r)" % (
- self.__class__.__name__,
- self._message,
- self.http_status,
- self.request_id,
- )
-
- def construct_error_object(self):
- if (
- self.json_body is None
- or "error" not in self.json_body
- or not isinstance(self.json_body["error"], dict)
- ):
- return None
-
- return openai.api_resources.error_object.ErrorObject.construct_from(
- self.json_body["error"]
- )
-
-
-class APIError(OpenAIError):
- pass
-
-
-class TryAgain(OpenAIError):
- pass
-
-
-class APIConnectionError(OpenAIError):
- def __init__(
- self,
- message,
- http_body=None,
- http_status=None,
- json_body=None,
- headers=None,
- code=None,
- should_retry=False,
- ):
- super(APIConnectionError, self).__init__(
- message, http_body, http_status, json_body, headers, code
- )
- self.should_retry = should_retry
-
-
-class InvalidRequestError(OpenAIError):
- def __init__(
- self,
- message,
- param,
- code=None,
- http_body=None,
- http_status=None,
- json_body=None,
- headers=None,
- ):
- super(InvalidRequestError, self).__init__(
- message, http_body, http_status, json_body, headers, code
- )
- self.param = param
-
- def __repr__(self):
- return "%s(message=%r, param=%r, code=%r, http_status=%r, " "request_id=%r)" % (
- self.__class__.__name__,
- self._message,
- self.param,
- self.code,
- self.http_status,
- self.request_id,
- )
-
- def __reduce__(self):
- return type(self), (
- self._message,
- self.param,
- self.code,
- self.http_body,
- self.http_status,
- self.json_body,
- self.headers,
- )
-
-
-class AuthenticationError(OpenAIError):
- pass
-
-
-class PermissionError(OpenAIError):
- pass
-
-
-class RateLimitError(OpenAIError):
- pass
-
-
-class ServiceUnavailableError(OpenAIError):
- pass
-
-
-class InvalidAPIType(OpenAIError):
- pass
-
-
-class SignatureVerificationError(OpenAIError):
- def __init__(self, message, sig_header, http_body=None):
- super(SignatureVerificationError, self).__init__(message, http_body)
- self.sig_header = sig_header
-
- def __reduce__(self):
- return type(self), (
- self._message,
- self.sig_header,
- self.http_body,
- )
diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/data/__init__.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/data/__init__.py
deleted file mode 100644
index b04d1dc275abef1a090a5f51c5d5a32d5541704d..0000000000000000000000000000000000000000
--- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/data/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-from . import transforms # isort:skip
-
-from .build import (
- build_detection_test_loader,
- build_detection_train_loader,
- get_detection_dataset_dicts,
- load_proposals_into_dataset,
- print_instances_class_histogram,
-)
-from .catalog import DatasetCatalog, MetadataCatalog
-from .common import DatasetFromList, MapDataset
-from .dataset_mapper import DatasetMapper
-
-# ensure the builtin datasets are registered
-from . import datasets, samplers # isort:skip
-
-__all__ = [k for k in globals().keys() if not k.startswith("_")]
diff --git a/spaces/CVPR/LIVE/pybind11/tests/test_kwargs_and_defaults.py b/spaces/CVPR/LIVE/pybind11/tests/test_kwargs_and_defaults.py
deleted file mode 100644
index 5257e0cd3061707f0dd1b79de54a0c6cdae81cd1..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/pybind11/tests/test_kwargs_and_defaults.py
+++ /dev/null
@@ -1,192 +0,0 @@
-# -*- coding: utf-8 -*-
-import pytest
-
-import env # noqa: F401
-
-from pybind11_tests import kwargs_and_defaults as m
-
-
-def test_function_signatures(doc):
- assert doc(m.kw_func0) == "kw_func0(arg0: int, arg1: int) -> str"
- assert doc(m.kw_func1) == "kw_func1(x: int, y: int) -> str"
- assert doc(m.kw_func2) == "kw_func2(x: int = 100, y: int = 200) -> str"
- assert doc(m.kw_func3) == "kw_func3(data: str = 'Hello world!') -> None"
- assert doc(m.kw_func4) == "kw_func4(myList: List[int] = [13, 17]) -> str"
- assert doc(m.kw_func_udl) == "kw_func_udl(x: int, y: int = 300) -> str"
- assert doc(m.kw_func_udl_z) == "kw_func_udl_z(x: int, y: int = 0) -> str"
- assert doc(m.args_function) == "args_function(*args) -> tuple"
- assert doc(m.args_kwargs_function) == "args_kwargs_function(*args, **kwargs) -> tuple"
- assert doc(m.KWClass.foo0) == \
- "foo0(self: m.kwargs_and_defaults.KWClass, arg0: int, arg1: float) -> None"
- assert doc(m.KWClass.foo1) == \
- "foo1(self: m.kwargs_and_defaults.KWClass, x: int, y: float) -> None"
-
-
-def test_named_arguments(msg):
- assert m.kw_func0(5, 10) == "x=5, y=10"
-
- assert m.kw_func1(5, 10) == "x=5, y=10"
- assert m.kw_func1(5, y=10) == "x=5, y=10"
- assert m.kw_func1(y=10, x=5) == "x=5, y=10"
-
- assert m.kw_func2() == "x=100, y=200"
- assert m.kw_func2(5) == "x=5, y=200"
- assert m.kw_func2(x=5) == "x=5, y=200"
- assert m.kw_func2(y=10) == "x=100, y=10"
- assert m.kw_func2(5, 10) == "x=5, y=10"
- assert m.kw_func2(x=5, y=10) == "x=5, y=10"
-
- with pytest.raises(TypeError) as excinfo:
- # noinspection PyArgumentList
- m.kw_func2(x=5, y=10, z=12)
- assert excinfo.match(
- r'(?s)^kw_func2\(\): incompatible.*Invoked with: kwargs: ((x=5|y=10|z=12)(, |$))' + '{3}$')
-
- assert m.kw_func4() == "{13 17}"
- assert m.kw_func4(myList=[1, 2, 3]) == "{1 2 3}"
-
- assert m.kw_func_udl(x=5, y=10) == "x=5, y=10"
- assert m.kw_func_udl_z(x=5) == "x=5, y=0"
-
-
-def test_arg_and_kwargs():
- args = 'arg1_value', 'arg2_value', 3
- assert m.args_function(*args) == args
-
- args = 'a1', 'a2'
- kwargs = dict(arg3='a3', arg4=4)
- assert m.args_kwargs_function(*args, **kwargs) == (args, kwargs)
-
-
-def test_mixed_args_and_kwargs(msg):
- mpa = m.mixed_plus_args
- mpk = m.mixed_plus_kwargs
- mpak = m.mixed_plus_args_kwargs
- mpakd = m.mixed_plus_args_kwargs_defaults
-
- assert mpa(1, 2.5, 4, 99.5, None) == (1, 2.5, (4, 99.5, None))
- assert mpa(1, 2.5) == (1, 2.5, ())
- with pytest.raises(TypeError) as excinfo:
- assert mpa(1)
- assert msg(excinfo.value) == """
- mixed_plus_args(): incompatible function arguments. The following argument types are supported:
- 1. (arg0: int, arg1: float, *args) -> tuple
-
- Invoked with: 1
- """ # noqa: E501 line too long
- with pytest.raises(TypeError) as excinfo:
- assert mpa()
- assert msg(excinfo.value) == """
- mixed_plus_args(): incompatible function arguments. The following argument types are supported:
- 1. (arg0: int, arg1: float, *args) -> tuple
-
- Invoked with:
- """ # noqa: E501 line too long
-
- assert mpk(-2, 3.5, pi=3.14159, e=2.71828) == (-2, 3.5, {'e': 2.71828, 'pi': 3.14159})
- assert mpak(7, 7.7, 7.77, 7.777, 7.7777, minusseven=-7) == (
- 7, 7.7, (7.77, 7.777, 7.7777), {'minusseven': -7})
- assert mpakd() == (1, 3.14159, (), {})
- assert mpakd(3) == (3, 3.14159, (), {})
- assert mpakd(j=2.71828) == (1, 2.71828, (), {})
- assert mpakd(k=42) == (1, 3.14159, (), {'k': 42})
- assert mpakd(1, 1, 2, 3, 5, 8, then=13, followedby=21) == (
- 1, 1, (2, 3, 5, 8), {'then': 13, 'followedby': 21})
- # Arguments specified both positionally and via kwargs should fail:
- with pytest.raises(TypeError) as excinfo:
- assert mpakd(1, i=1)
- assert msg(excinfo.value) == """
- mixed_plus_args_kwargs_defaults(): incompatible function arguments. The following argument types are supported:
- 1. (i: int = 1, j: float = 3.14159, *args, **kwargs) -> tuple
-
- Invoked with: 1; kwargs: i=1
- """ # noqa: E501 line too long
- with pytest.raises(TypeError) as excinfo:
- assert mpakd(1, 2, j=1)
- assert msg(excinfo.value) == """
- mixed_plus_args_kwargs_defaults(): incompatible function arguments. The following argument types are supported:
- 1. (i: int = 1, j: float = 3.14159, *args, **kwargs) -> tuple
-
- Invoked with: 1, 2; kwargs: j=1
- """ # noqa: E501 line too long
-
-
-def test_keyword_only_args(msg):
- assert m.kwonly_all(i=1, j=2) == (1, 2)
- assert m.kwonly_all(j=1, i=2) == (2, 1)
-
- with pytest.raises(TypeError) as excinfo:
- assert m.kwonly_all(i=1) == (1,)
- assert "incompatible function arguments" in str(excinfo.value)
-
- with pytest.raises(TypeError) as excinfo:
- assert m.kwonly_all(1, 2) == (1, 2)
- assert "incompatible function arguments" in str(excinfo.value)
-
- assert m.kwonly_some(1, k=3, j=2) == (1, 2, 3)
-
- assert m.kwonly_with_defaults(z=8) == (3, 4, 5, 8)
- assert m.kwonly_with_defaults(2, z=8) == (2, 4, 5, 8)
- assert m.kwonly_with_defaults(2, j=7, k=8, z=9) == (2, 7, 8, 9)
- assert m.kwonly_with_defaults(2, 7, z=9, k=8) == (2, 7, 8, 9)
-
- assert m.kwonly_mixed(1, j=2) == (1, 2)
- assert m.kwonly_mixed(j=2, i=3) == (3, 2)
- assert m.kwonly_mixed(i=2, j=3) == (2, 3)
-
- assert m.kwonly_plus_more(4, 5, k=6, extra=7) == (4, 5, 6, {'extra': 7})
- assert m.kwonly_plus_more(3, k=5, j=4, extra=6) == (3, 4, 5, {'extra': 6})
- assert m.kwonly_plus_more(2, k=3, extra=4) == (2, -1, 3, {'extra': 4})
-
- with pytest.raises(TypeError) as excinfo:
- assert m.kwonly_mixed(i=1) == (1,)
- assert "incompatible function arguments" in str(excinfo.value)
-
- with pytest.raises(RuntimeError) as excinfo:
- m.register_invalid_kwonly(m)
- assert msg(excinfo.value) == """
- arg(): cannot specify an unnamed argument after an kwonly() annotation
- """
-
-
-@pytest.mark.xfail("env.PYPY and env.PY2", reason="PyPy2 doesn't double count")
-def test_args_refcount():
- """Issue/PR #1216 - py::args elements get double-inc_ref()ed when combined with regular
- arguments"""
- refcount = m.arg_refcount_h
-
- myval = 54321
- expected = refcount(myval)
- assert m.arg_refcount_h(myval) == expected
- assert m.arg_refcount_o(myval) == expected + 1
- assert m.arg_refcount_h(myval) == expected
- assert refcount(myval) == expected
-
- assert m.mixed_plus_args(1, 2.0, "a", myval) == (1, 2.0, ("a", myval))
- assert refcount(myval) == expected
-
- assert m.mixed_plus_kwargs(3, 4.0, a=1, b=myval) == (3, 4.0, {"a": 1, "b": myval})
- assert refcount(myval) == expected
-
- assert m.args_function(-1, myval) == (-1, myval)
- assert refcount(myval) == expected
-
- assert m.mixed_plus_args_kwargs(5, 6.0, myval, a=myval) == (5, 6.0, (myval,), {"a": myval})
- assert refcount(myval) == expected
-
- assert m.args_kwargs_function(7, 8, myval, a=1, b=myval) == \
- ((7, 8, myval), {"a": 1, "b": myval})
- assert refcount(myval) == expected
-
- exp3 = refcount(myval, myval, myval)
- assert m.args_refcount(myval, myval, myval) == (exp3, exp3, exp3)
- assert refcount(myval) == expected
-
- # This function takes the first arg as a `py::object` and the rest as a `py::args`. Unlike the
- # previous case, when we have both positional and `py::args` we need to construct a new tuple
- # for the `py::args`; in the previous case, we could simply inc_ref and pass on Python's input
- # tuple without having to inc_ref the individual elements, but here we can't, hence the extra
- # refs.
- assert m.mixed_args_refcount(myval, myval, myval) == (exp3 + 3, exp3 + 3, exp3 + 3)
-
- assert m.class_default_argument() == ""
diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/par.h b/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/par.h
deleted file mode 100644
index d232a6cfacb03f9b8b5a420542c2d690723a5622..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/par.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/******************************************************************************
- * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the NVIDIA CORPORATION nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- ******************************************************************************/
-#pragma once
-
-#include
-#include
-#include
-#include
-
-#include
-
-#if THRUST_CPP_DIALECT >= 2011
-# include
-#endif
-
-
-namespace thrust
-{
-namespace cuda_cub {
-
-template
-struct execute_on_stream_base : execution_policy
-{
-private:
- cudaStream_t stream;
-
-public:
- __host__ __device__
- execute_on_stream_base(cudaStream_t stream_ = default_stream())
- : stream(stream_) {}
-
- THRUST_RUNTIME_FUNCTION
- Derived
- on(cudaStream_t const &s) const
- {
- Derived result = derived_cast(*this);
- result.stream = s;
- return result;
- }
-
-private:
- friend __host__ __device__
- cudaStream_t
- get_stream(const execute_on_stream_base &exec)
- {
- return exec.stream;
- }
-};
-
-struct execute_on_stream : execute_on_stream_base
-{
- typedef execute_on_stream_base base_t;
-
- __host__ __device__
- execute_on_stream() : base_t(){};
- __host__ __device__
- execute_on_stream(cudaStream_t stream) : base_t(stream){};
-};
-
-
-struct par_t : execution_policy,
- thrust::detail::allocator_aware_execution_policy<
- execute_on_stream_base>
-#if THRUST_CPP_DIALECT >= 2011
-, thrust::detail::dependencies_aware_execution_policy<
- execute_on_stream_base>
-#endif
-{
- typedef execution_policy base_t;
-
- __host__ __device__
- THRUST_CONSTEXPR par_t() : base_t() {}
-
- typedef execute_on_stream stream_attachment_type;
-
- THRUST_RUNTIME_FUNCTION
- stream_attachment_type
- on(cudaStream_t const &stream) const
- {
- return execute_on_stream(stream);
- }
-};
-
-THRUST_INLINE_CONSTANT par_t par;
-} // namespace cuda_
-
-namespace system {
-namespace cuda {
- using thrust::cuda_cub::par;
- namespace detail {
- using thrust::cuda_cub::par_t;
- }
-} // namesapce cuda
-} // namespace system
-
-namespace cuda {
-using thrust::cuda_cub::par;
-} // namespace cuda
-
-} // end namespace thrust
-
diff --git a/spaces/CVPR/regionclip-demo/detectron2/layers/roi_align_rotated.py b/spaces/CVPR/regionclip-demo/detectron2/layers/roi_align_rotated.py
deleted file mode 100644
index e3775e08fc9b9172f73c8ec7025a51ef2edd0a1d..0000000000000000000000000000000000000000
--- a/spaces/CVPR/regionclip-demo/detectron2/layers/roi_align_rotated.py
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import torch
-from torch import nn
-from torch.autograd import Function
-from torch.autograd.function import once_differentiable
-from torch.nn.modules.utils import _pair
-
-from detectron2 import _C
-
-
-class _ROIAlignRotated(Function):
- @staticmethod
- def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio):
- ctx.save_for_backward(roi)
- ctx.output_size = _pair(output_size)
- ctx.spatial_scale = spatial_scale
- ctx.sampling_ratio = sampling_ratio
- ctx.input_shape = input.size()
- output = _C.roi_align_rotated_forward(
- input, roi, spatial_scale, output_size[0], output_size[1], sampling_ratio
- )
- return output
-
- @staticmethod
- @once_differentiable
- def backward(ctx, grad_output):
- (rois,) = ctx.saved_tensors
- output_size = ctx.output_size
- spatial_scale = ctx.spatial_scale
- sampling_ratio = ctx.sampling_ratio
- bs, ch, h, w = ctx.input_shape
- grad_input = _C.roi_align_rotated_backward(
- grad_output,
- rois,
- spatial_scale,
- output_size[0],
- output_size[1],
- bs,
- ch,
- h,
- w,
- sampling_ratio,
- )
- return grad_input, None, None, None, None, None
-
-
-roi_align_rotated = _ROIAlignRotated.apply
-
-
-class ROIAlignRotated(nn.Module):
- def __init__(self, output_size, spatial_scale, sampling_ratio):
- """
- Args:
- output_size (tuple): h, w
- spatial_scale (float): scale the input boxes by this number
- sampling_ratio (int): number of inputs samples to take for each output
- sample. 0 to take samples densely.
-
- Note:
- ROIAlignRotated supports continuous coordinate by default:
- Given a continuous coordinate c, its two neighboring pixel indices (in our
- pixel model) are computed by floor(c - 0.5) and ceil(c - 0.5). For example,
- c=1.3 has pixel neighbors with discrete indices [0] and [1] (which are sampled
- from the underlying signal at continuous coordinates 0.5 and 1.5).
- """
- super(ROIAlignRotated, self).__init__()
- self.output_size = output_size
- self.spatial_scale = spatial_scale
- self.sampling_ratio = sampling_ratio
-
- def forward(self, input, rois):
- """
- Args:
- input: NCHW images
- rois: Bx6 boxes. First column is the index into N.
- The other 5 columns are (x_ctr, y_ctr, width, height, angle_degrees).
- """
- assert rois.dim() == 2 and rois.size(1) == 6
- orig_dtype = input.dtype
- if orig_dtype == torch.float16:
- input = input.float()
- rois = rois.float()
- return roi_align_rotated(
- input, rois, self.output_size, self.spatial_scale, self.sampling_ratio
- ).to(dtype=orig_dtype)
-
- def __repr__(self):
- tmpstr = self.__class__.__name__ + "("
- tmpstr += "output_size=" + str(self.output_size)
- tmpstr += ", spatial_scale=" + str(self.spatial_scale)
- tmpstr += ", sampling_ratio=" + str(self.sampling_ratio)
- tmpstr += ")"
- return tmpstr
diff --git a/spaces/ChrisPreston/diff-svc_minato_aqua/modules/commons/common_layers.py b/spaces/ChrisPreston/diff-svc_minato_aqua/modules/commons/common_layers.py
deleted file mode 100644
index 3a3ee3a6d95c193ee7ceb0ec0568eeb366b5d6b0..0000000000000000000000000000000000000000
--- a/spaces/ChrisPreston/diff-svc_minato_aqua/modules/commons/common_layers.py
+++ /dev/null
@@ -1,675 +0,0 @@
-import math
-
-import torch
-import torch.nn.functional as F
-import torch.onnx.operators
-from torch import nn
-from torch.nn import Parameter
-
-import utils
-
-
-class Reshape(nn.Module):
- def __init__(self, *args):
- super(Reshape, self).__init__()
- self.shape = args
-
- def forward(self, x):
- return x.view(self.shape)
-
-
-class Permute(nn.Module):
- def __init__(self, *args):
- super(Permute, self).__init__()
- self.args = args
-
- def forward(self, x):
- return x.permute(self.args)
-
-
-class LinearNorm(torch.nn.Module):
- def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
- super(LinearNorm, self).__init__()
- self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
-
- torch.nn.init.xavier_uniform_(
- self.linear_layer.weight,
- gain=torch.nn.init.calculate_gain(w_init_gain))
-
- def forward(self, x):
- return self.linear_layer(x)
-
-
-class ConvNorm(torch.nn.Module):
- def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
- padding=None, dilation=1, bias=True, w_init_gain='linear'):
- super(ConvNorm, self).__init__()
- if padding is None:
- assert (kernel_size % 2 == 1)
- padding = int(dilation * (kernel_size - 1) / 2)
-
- self.conv = torch.nn.Conv1d(in_channels, out_channels,
- kernel_size=kernel_size, stride=stride,
- padding=padding, dilation=dilation,
- bias=bias)
-
- torch.nn.init.xavier_uniform_(
- self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain))
-
- def forward(self, signal):
- conv_signal = self.conv(signal)
- return conv_signal
-
-
-def Embedding(num_embeddings, embedding_dim, padding_idx=None):
- m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
- nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
- if padding_idx is not None:
- nn.init.constant_(m.weight[padding_idx], 0)
- return m
-
-
-def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False):
- if not export and torch.cuda.is_available():
- try:
- from apex.normalization import FusedLayerNorm
- return FusedLayerNorm(normalized_shape, eps, elementwise_affine)
- except ImportError:
- pass
- return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine)
-
-
-def Linear(in_features, out_features, bias=True):
- m = nn.Linear(in_features, out_features, bias)
- nn.init.xavier_uniform_(m.weight)
- if bias:
- nn.init.constant_(m.bias, 0.)
- return m
-
-
-class SinusoidalPositionalEmbedding(nn.Module):
- """This module produces sinusoidal positional embeddings of any length.
-
- Padding symbols are ignored.
- """
-
- def __init__(self, embedding_dim, padding_idx, init_size=1024):
- super().__init__()
- self.embedding_dim = embedding_dim
- self.padding_idx = padding_idx
- self.weights = SinusoidalPositionalEmbedding.get_embedding(
- init_size,
- embedding_dim,
- padding_idx,
- )
- self.register_buffer('_float_tensor', torch.FloatTensor(1))
-
- @staticmethod
- def get_embedding(num_embeddings, embedding_dim, padding_idx=None):
- """Build sinusoidal embeddings.
-
- This matches the implementation in tensor2tensor, but differs slightly
- from the description in Section 3.5 of "Attention Is All You Need".
- """
- half_dim = embedding_dim // 2
- emb = math.log(10000) / (half_dim - 1)
- emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
- emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
- emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
- if embedding_dim % 2 == 1:
- # zero pad
- emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
- if padding_idx is not None:
- emb[padding_idx, :] = 0
- return emb
-
- def forward(self, input, incremental_state=None, timestep=None, positions=None, **kwargs):
- """Input is expected to be of size [bsz x seqlen]."""
- bsz, seq_len = input.shape[:2]
- max_pos = self.padding_idx + 1 + seq_len
- if self.weights is None or max_pos > self.weights.size(0):
- # recompute/expand embeddings if needed
- self.weights = SinusoidalPositionalEmbedding.get_embedding(
- max_pos,
- self.embedding_dim,
- self.padding_idx,
- )
- self.weights = self.weights.to(self._float_tensor)
-
- if incremental_state is not None:
- # positions is the same for every token when decoding a single step
- pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len
- return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)
-
- positions = utils.make_positions(input, self.padding_idx) if positions is None else positions
- return self.weights.index_select(0, positions.view(-1)).view(bsz, seq_len, -1).detach()
-
- def max_positions(self):
- """Maximum number of supported positions."""
- return int(1e5) # an arbitrary large number
-
-
-class ConvTBC(nn.Module):
- def __init__(self, in_channels, out_channels, kernel_size, padding=0):
- super(ConvTBC, self).__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.kernel_size = kernel_size
- self.padding = padding
-
- self.weight = torch.nn.Parameter(torch.Tensor(
- self.kernel_size, in_channels, out_channels))
- self.bias = torch.nn.Parameter(torch.Tensor(out_channels))
-
- def forward(self, input):
- return torch.conv_tbc(input.contiguous(), self.weight, self.bias, self.padding)
-
-
-class MultiheadAttention(nn.Module):
- def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0., bias=True,
- add_bias_kv=False, add_zero_attn=False, self_attention=False,
- encoder_decoder_attention=False):
- super().__init__()
- self.embed_dim = embed_dim
- self.kdim = kdim if kdim is not None else embed_dim
- self.vdim = vdim if vdim is not None else embed_dim
- self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
-
- self.num_heads = num_heads
- self.dropout = dropout
- self.head_dim = embed_dim // num_heads
- assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
- self.scaling = self.head_dim ** -0.5
-
- self.self_attention = self_attention
- self.encoder_decoder_attention = encoder_decoder_attention
-
- assert not self.self_attention or self.qkv_same_dim, 'Self-attention requires query, key and ' \
- 'value to be of the same size'
-
- if self.qkv_same_dim:
- self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
- else:
- self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
- self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
- self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
-
- if bias:
- self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
- else:
- self.register_parameter('in_proj_bias', None)
-
- self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
-
- if add_bias_kv:
- self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
- self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
- else:
- self.bias_k = self.bias_v = None
-
- self.add_zero_attn = add_zero_attn
-
- self.reset_parameters()
-
- self.enable_torch_version = False
- if hasattr(F, "multi_head_attention_forward"):
- self.enable_torch_version = True
- else:
- self.enable_torch_version = False
- self.last_attn_probs = None
-
- def reset_parameters(self):
- if self.qkv_same_dim:
- nn.init.xavier_uniform_(self.in_proj_weight)
- else:
- nn.init.xavier_uniform_(self.k_proj_weight)
- nn.init.xavier_uniform_(self.v_proj_weight)
- nn.init.xavier_uniform_(self.q_proj_weight)
-
- nn.init.xavier_uniform_(self.out_proj.weight)
- if self.in_proj_bias is not None:
- nn.init.constant_(self.in_proj_bias, 0.)
- nn.init.constant_(self.out_proj.bias, 0.)
- if self.bias_k is not None:
- nn.init.xavier_normal_(self.bias_k)
- if self.bias_v is not None:
- nn.init.xavier_normal_(self.bias_v)
-
- def forward(
- self,
- query, key, value,
- key_padding_mask=None,
- incremental_state=None,
- need_weights=True,
- static_kv=False,
- attn_mask=None,
- before_softmax=False,
- need_head_weights=False,
- enc_dec_attn_constraint_mask=None,
- reset_attn_weight=None
- ):
- """Input shape: Time x Batch x Channel
-
- Args:
- key_padding_mask (ByteTensor, optional): mask to exclude
- keys that are pads, of shape `(batch, src_len)`, where
- padding elements are indicated by 1s.
- need_weights (bool, optional): return the attention weights,
- averaged over heads (default: False).
- attn_mask (ByteTensor, optional): typically used to
- implement causal attention, where the mask prevents the
- attention from looking forward in time (default: None).
- before_softmax (bool, optional): return the raw attention
- weights and values before the attention softmax.
- need_head_weights (bool, optional): return the attention
- weights for each head. Implies *need_weights*. Default:
- return the average attention weights over all heads.
- """
- if need_head_weights:
- need_weights = True
-
- tgt_len, bsz, embed_dim = query.size()
- assert embed_dim == self.embed_dim
- assert list(query.size()) == [tgt_len, bsz, embed_dim]
-
- if self.enable_torch_version and incremental_state is None and not static_kv and reset_attn_weight is None:
- if self.qkv_same_dim:
- return F.multi_head_attention_forward(query, key, value,
- self.embed_dim, self.num_heads,
- self.in_proj_weight,
- self.in_proj_bias, self.bias_k, self.bias_v,
- self.add_zero_attn, self.dropout,
- self.out_proj.weight, self.out_proj.bias,
- self.training, key_padding_mask, need_weights,
- attn_mask)
- else:
- return F.multi_head_attention_forward(query, key, value,
- self.embed_dim, self.num_heads,
- torch.empty([0]),
- self.in_proj_bias, self.bias_k, self.bias_v,
- self.add_zero_attn, self.dropout,
- self.out_proj.weight, self.out_proj.bias,
- self.training, key_padding_mask, need_weights,
- attn_mask, use_separate_proj_weight=True,
- q_proj_weight=self.q_proj_weight,
- k_proj_weight=self.k_proj_weight,
- v_proj_weight=self.v_proj_weight)
-
- if incremental_state is not None:
- print('Not implemented error.')
- exit()
- else:
- saved_state = None
-
- if self.self_attention:
- # self-attention
- q, k, v = self.in_proj_qkv(query)
- elif self.encoder_decoder_attention:
- # encoder-decoder attention
- q = self.in_proj_q(query)
- if key is None:
- assert value is None
- k = v = None
- else:
- k = self.in_proj_k(key)
- v = self.in_proj_v(key)
-
- else:
- q = self.in_proj_q(query)
- k = self.in_proj_k(key)
- v = self.in_proj_v(value)
- q *= self.scaling
-
- if self.bias_k is not None:
- assert self.bias_v is not None
- k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
- v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
- if attn_mask is not None:
- attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)
- if key_padding_mask is not None:
- key_padding_mask = torch.cat(
- [key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1)], dim=1)
-
- q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)
- if k is not None:
- k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
- if v is not None:
- v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
-
- if saved_state is not None:
- print('Not implemented error.')
- exit()
-
- src_len = k.size(1)
-
- # This is part of a workaround to get around fork/join parallelism
- # not supporting Optional types.
- if key_padding_mask is not None and key_padding_mask.shape == torch.Size([]):
- key_padding_mask = None
-
- if key_padding_mask is not None:
- assert key_padding_mask.size(0) == bsz
- assert key_padding_mask.size(1) == src_len
-
- if self.add_zero_attn:
- src_len += 1
- k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
- v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
- if attn_mask is not None:
- attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)
- if key_padding_mask is not None:
- key_padding_mask = torch.cat(
- [key_padding_mask, torch.zeros(key_padding_mask.size(0), 1).type_as(key_padding_mask)], dim=1)
-
- attn_weights = torch.bmm(q, k.transpose(1, 2))
- attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
-
- assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
-
- if attn_mask is not None:
- if len(attn_mask.shape) == 2:
- attn_mask = attn_mask.unsqueeze(0)
- elif len(attn_mask.shape) == 3:
- attn_mask = attn_mask[:, None].repeat([1, self.num_heads, 1, 1]).reshape(
- bsz * self.num_heads, tgt_len, src_len)
- attn_weights = attn_weights + attn_mask
-
- if enc_dec_attn_constraint_mask is not None: # bs x head x L_kv
- attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
- attn_weights = attn_weights.masked_fill(
- enc_dec_attn_constraint_mask.unsqueeze(2).bool(),
- -1e9,
- )
- attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
-
- if key_padding_mask is not None:
- # don't attend to padding symbols
- attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
- attn_weights = attn_weights.masked_fill(
- key_padding_mask.unsqueeze(1).unsqueeze(2),
- -1e9,
- )
- attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
-
- attn_logits = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
-
- if before_softmax:
- return attn_weights, v
-
- attn_weights_float = utils.softmax(attn_weights, dim=-1)
- attn_weights = attn_weights_float.type_as(attn_weights)
- attn_probs = F.dropout(attn_weights_float.type_as(attn_weights), p=self.dropout, training=self.training)
-
- if reset_attn_weight is not None:
- if reset_attn_weight:
- self.last_attn_probs = attn_probs.detach()
- else:
- assert self.last_attn_probs is not None
- attn_probs = self.last_attn_probs
- attn = torch.bmm(attn_probs, v)
- assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
- attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
- attn = self.out_proj(attn)
-
- if need_weights:
- attn_weights = attn_weights_float.view(bsz, self.num_heads, tgt_len, src_len).transpose(1, 0)
- if not need_head_weights:
- # average attention weights over heads
- attn_weights = attn_weights.mean(dim=0)
- else:
- attn_weights = None
-
- return attn, (attn_weights, attn_logits)
-
- def in_proj_qkv(self, query):
- return self._in_proj(query).chunk(3, dim=-1)
-
- def in_proj_q(self, query):
- if self.qkv_same_dim:
- return self._in_proj(query, end=self.embed_dim)
- else:
- bias = self.in_proj_bias
- if bias is not None:
- bias = bias[:self.embed_dim]
- return F.linear(query, self.q_proj_weight, bias)
-
- def in_proj_k(self, key):
- if self.qkv_same_dim:
- return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
- else:
- weight = self.k_proj_weight
- bias = self.in_proj_bias
- if bias is not None:
- bias = bias[self.embed_dim:2 * self.embed_dim]
- return F.linear(key, weight, bias)
-
- def in_proj_v(self, value):
- if self.qkv_same_dim:
- return self._in_proj(value, start=2 * self.embed_dim)
- else:
- weight = self.v_proj_weight
- bias = self.in_proj_bias
- if bias is not None:
- bias = bias[2 * self.embed_dim:]
- return F.linear(value, weight, bias)
-
- def _in_proj(self, input, start=0, end=None):
- weight = self.in_proj_weight
- bias = self.in_proj_bias
- weight = weight[start:end, :]
- if bias is not None:
- bias = bias[start:end]
- return F.linear(input, weight, bias)
-
- def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz):
- return attn_weights
-
-
-class Swish(torch.autograd.Function):
- @staticmethod
- def forward(ctx, i):
- result = i * torch.sigmoid(i)
- ctx.save_for_backward(i)
- return result
-
- @staticmethod
- def backward(ctx, grad_output):
- i = ctx.saved_variables[0]
- sigmoid_i = torch.sigmoid(i)
- return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
-
-
-class CustomSwish(nn.Module):
- def forward(self, input_tensor):
- return Swish.apply(input_tensor)
-
-
-class Mish(nn.Module):
- def forward(self, x):
- return x * torch.tanh(F.softplus(x))
-
-
-class TransformerFFNLayer(nn.Module):
- def __init__(self, hidden_size, filter_size, padding="SAME", kernel_size=1, dropout=0., act='gelu'):
- super().__init__()
- self.kernel_size = kernel_size
- self.dropout = dropout
- self.act = act
- if padding == 'SAME':
- self.ffn_1 = nn.Conv1d(hidden_size, filter_size, kernel_size, padding=kernel_size // 2)
- elif padding == 'LEFT':
- self.ffn_1 = nn.Sequential(
- nn.ConstantPad1d((kernel_size - 1, 0), 0.0),
- nn.Conv1d(hidden_size, filter_size, kernel_size)
- )
- self.ffn_2 = Linear(filter_size, hidden_size)
- if self.act == 'swish':
- self.swish_fn = CustomSwish()
-
- def forward(self, x, incremental_state=None):
- # x: T x B x C
- if incremental_state is not None:
- assert incremental_state is None, 'Nar-generation does not allow this.'
- exit(1)
-
- x = self.ffn_1(x.permute(1, 2, 0)).permute(2, 0, 1)
- x = x * self.kernel_size ** -0.5
-
- if incremental_state is not None:
- x = x[-1:]
- if self.act == 'gelu':
- x = F.gelu(x)
- if self.act == 'relu':
- x = F.relu(x)
- if self.act == 'swish':
- x = self.swish_fn(x)
- x = F.dropout(x, self.dropout, training=self.training)
- x = self.ffn_2(x)
- return x
-
-
-class BatchNorm1dTBC(nn.Module):
- def __init__(self, c):
- super(BatchNorm1dTBC, self).__init__()
- self.bn = nn.BatchNorm1d(c)
-
- def forward(self, x):
- """
-
- :param x: [T, B, C]
- :return: [T, B, C]
- """
- x = x.permute(1, 2, 0) # [B, C, T]
- x = self.bn(x) # [B, C, T]
- x = x.permute(2, 0, 1) # [T, B, C]
- return x
-
-
-class EncSALayer(nn.Module):
- def __init__(self, c, num_heads, dropout, attention_dropout=0.1,
- relu_dropout=0.1, kernel_size=9, padding='SAME', norm='ln', act='gelu'):
- super().__init__()
- self.c = c
- self.dropout = dropout
- self.num_heads = num_heads
- if num_heads > 0:
- if norm == 'ln':
- self.layer_norm1 = LayerNorm(c)
- elif norm == 'bn':
- self.layer_norm1 = BatchNorm1dTBC(c)
- self.self_attn = MultiheadAttention(
- self.c, num_heads, self_attention=True, dropout=attention_dropout, bias=False,
- )
- if norm == 'ln':
- self.layer_norm2 = LayerNorm(c)
- elif norm == 'bn':
- self.layer_norm2 = BatchNorm1dTBC(c)
- self.ffn = TransformerFFNLayer(
- c, 4 * c, kernel_size=kernel_size, dropout=relu_dropout, padding=padding, act=act)
-
- def forward(self, x, encoder_padding_mask=None, **kwargs):
- layer_norm_training = kwargs.get('layer_norm_training', None)
- if layer_norm_training is not None:
- self.layer_norm1.training = layer_norm_training
- self.layer_norm2.training = layer_norm_training
- if self.num_heads > 0:
- residual = x
- x = self.layer_norm1(x)
- x, _, = self.self_attn(
- query=x,
- key=x,
- value=x,
- key_padding_mask=encoder_padding_mask
- )
- x = F.dropout(x, self.dropout, training=self.training)
- x = residual + x
- x = x * (1 - encoder_padding_mask.float()).transpose(0, 1)[..., None]
-
- residual = x
- x = self.layer_norm2(x)
- x = self.ffn(x)
- x = F.dropout(x, self.dropout, training=self.training)
- x = residual + x
- x = x * (1 - encoder_padding_mask.float()).transpose(0, 1)[..., None]
- return x
-
-
-class DecSALayer(nn.Module):
- def __init__(self, c, num_heads, dropout, attention_dropout=0.1, relu_dropout=0.1, kernel_size=9, act='gelu'):
- super().__init__()
- self.c = c
- self.dropout = dropout
- self.layer_norm1 = LayerNorm(c)
- self.self_attn = MultiheadAttention(
- c, num_heads, self_attention=True, dropout=attention_dropout, bias=False
- )
- self.layer_norm2 = LayerNorm(c)
- self.encoder_attn = MultiheadAttention(
- c, num_heads, encoder_decoder_attention=True, dropout=attention_dropout, bias=False,
- )
- self.layer_norm3 = LayerNorm(c)
- self.ffn = TransformerFFNLayer(
- c, 4 * c, padding='LEFT', kernel_size=kernel_size, dropout=relu_dropout, act=act)
-
- def forward(
- self,
- x,
- encoder_out=None,
- encoder_padding_mask=None,
- incremental_state=None,
- self_attn_mask=None,
- self_attn_padding_mask=None,
- attn_out=None,
- reset_attn_weight=None,
- **kwargs,
- ):
- layer_norm_training = kwargs.get('layer_norm_training', None)
- if layer_norm_training is not None:
- self.layer_norm1.training = layer_norm_training
- self.layer_norm2.training = layer_norm_training
- self.layer_norm3.training = layer_norm_training
- residual = x
- x = self.layer_norm1(x)
- x, _ = self.self_attn(
- query=x,
- key=x,
- value=x,
- key_padding_mask=self_attn_padding_mask,
- incremental_state=incremental_state,
- attn_mask=self_attn_mask
- )
- x = F.dropout(x, self.dropout, training=self.training)
- x = residual + x
-
- residual = x
- x = self.layer_norm2(x)
- if encoder_out is not None:
- x, attn = self.encoder_attn(
- query=x,
- key=encoder_out,
- value=encoder_out,
- key_padding_mask=encoder_padding_mask,
- incremental_state=incremental_state,
- static_kv=True,
- enc_dec_attn_constraint_mask=None,
- # utils.get_incremental_state(self, incremental_state, 'enc_dec_attn_constraint_mask'),
- reset_attn_weight=reset_attn_weight
- )
- attn_logits = attn[1]
- else:
- assert attn_out is not None
- x = self.encoder_attn.in_proj_v(attn_out.transpose(0, 1))
- attn_logits = None
- x = F.dropout(x, self.dropout, training=self.training)
- x = residual + x
-
- residual = x
- x = self.layer_norm3(x)
- x = self.ffn(x, incremental_state=incremental_state)
- x = F.dropout(x, self.dropout, training=self.training)
- x = residual + x
- # if len(attn_logits.size()) > 3:
- # indices = attn_logits.softmax(-1).max(-1).values.sum(-1).argmax(-1)
- # attn_logits = attn_logits.gather(1,
- # indices[:, None, None, None].repeat(1, 1, attn_logits.size(-2), attn_logits.size(-1))).squeeze(1)
- return x, attn_logits
diff --git a/spaces/CikeyQI/Yunzai/Yunzai/lib/config/config.js b/spaces/CikeyQI/Yunzai/Yunzai/lib/config/config.js
deleted file mode 100644
index d3fcf31ded62ba5243075d8e7a331a1a3dfba253..0000000000000000000000000000000000000000
--- a/spaces/CikeyQI/Yunzai/Yunzai/lib/config/config.js
+++ /dev/null
@@ -1,174 +0,0 @@
-import YAML from "yaml"
-import fs from "node:fs"
-import chokidar from "chokidar"
-
-/** 配置文件 */
-class Cfg {
- constructor () {
- this.config = {}
-
- /** 监听文件 */
- this.watcher = { config: {}, defSet: {} }
-
- this.initCfg()
- }
-
- /** 初始化配置 */
- initCfg () {
- let path = "config/config/"
- let pathDef = "config/default_config/"
- const files = fs.readdirSync(pathDef).filter(file => file.endsWith(".yaml"))
- for (let file of files)
- if (!fs.existsSync(`${path}${file}`))
- fs.copyFileSync(`${pathDef}${file}`, `${path}${file}`)
- if (!fs.existsSync("data")) fs.mkdirSync("data")
- if (!fs.existsSync("resources")) fs.mkdirSync("resources")
- }
-
- /** Bot配置 */
- get bot () {
- let bot = this.getConfig("bot")
- let defbot = this.getdefSet("bot")
- bot = { ...defbot, ...bot }
-
- return bot
- }
-
- get other () {
- return this.getConfig("other")
- }
-
- get redis () {
- return this.getConfig("redis")
- }
-
- get renderer() {
- return this.getConfig("renderer");
- }
-
- /** 主人账号 */
- get masterQQ () {
- let masterQQ = this.getConfig("other").masterQQ || []
-
- if (!Array.isArray(masterQQ))
- masterQQ = [masterQQ]
-
- const masters = []
- for (const i of masterQQ)
- masters.push(Number(i) || String(i))
- return masters
- }
-
- /** Bot账号:[主人帐号] */
- get master () {
- let master = this.getConfig("other").master || []
-
- if (!Array.isArray(master))
- master = [master]
-
- const masters = {}
- for (let i of master) {
- i = i.split(":")
- if (Array.isArray(masters[i[0]]))
- masters[i[0]].push(i[1])
- else
- masters[i[0]] = [i[1]]
- }
- return masters
- }
-
- /** 机器人账号 */
- get uin () {
- return Object.keys(this.master)
- }
- get qq () {
- return this.uin
- }
-
- /** package.json */
- get package () {
- if (this._package) return this._package
-
- this._package = JSON.parse(fs.readFileSync("package.json", "utf8"))
- return this._package
- }
-
- /** 群配置 */
- getGroup (bot_id = "", group_id = "") {
- const config = this.getConfig("group")
- const defCfg = this.getdefSet("group")
- return {
- ...defCfg.default,
- ...config.default,
- ...config[`${bot_id}:default`],
- ...config[group_id],
- ...config[`${bot_id}:${group_id}`],
- }
- }
-
- /** other配置 */
- getOther () {
- let def = this.getdefSet("other")
- let config = this.getConfig("other")
- return { ...def, ...config }
- }
-
- /**
- * @param app 功能
- * @param name 配置文件名称
- */
- getdefSet (name) {
- return this.getYaml("default_config", name)
- }
-
- /** 用户配置 */
- getConfig (name) {
- return this.getYaml("config", name)
- }
-
- /**
- * 获取配置yaml
- * @param type 默认跑配置-defSet,用户配置-config
- * @param name 名称
- */
- getYaml (type, name) {
- let file = `config/${type}/${name}.yaml`
- let key = `${type}.${name}`
- if (this.config[key]) return this.config[key]
-
- this.config[key] = YAML.parse(
- fs.readFileSync(file, "utf8")
- )
-
- this.watch(file, name, type)
-
- return this.config[key]
- }
-
- /** 监听配置文件 */
- watch (file, name, type = "default_config") {
- let key = `${type}.${name}`
-
- if (this.watcher[key]) return
-
- const watcher = chokidar.watch(file)
- watcher.on("change", path => {
- delete this.config[key]
- if (typeof Bot == "undefined") return
- logger.mark(`[修改配置文件][${type}][${name}]`)
- if (this[`change_${name}`]) {
- this[`change_${name}`]()
- }
- })
-
- this.watcher[key] = watcher
- }
-
- async change_bot () {
- /** 修改日志等级 */
- let log = await import("./log.js")
- log.default()
- }
-}
-
-export default new Cfg()
\ No newline at end of file
diff --git a/spaces/CjangCjengh/Sanskrit-TTS/text/__init__.py b/spaces/CjangCjengh/Sanskrit-TTS/text/__init__.py
deleted file mode 100644
index 4e69c354dd24e3243980236eca962cd5945a92fc..0000000000000000000000000000000000000000
--- a/spaces/CjangCjengh/Sanskrit-TTS/text/__init__.py
+++ /dev/null
@@ -1,32 +0,0 @@
-""" from https://github.com/keithito/tacotron """
-from text import cleaners
-
-
-def text_to_sequence(text, symbols, cleaner_names):
- '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
- Args:
- text: string to convert to a sequence
- cleaner_names: names of the cleaner functions to run the text through
- Returns:
- List of integers corresponding to the symbols in the text
- '''
- _symbol_to_id = {s: i for i, s in enumerate(symbols)}
-
- sequence = []
-
- clean_text = _clean_text(text, cleaner_names)
- for symbol in clean_text:
- if symbol not in _symbol_to_id.keys():
- continue
- symbol_id = _symbol_to_id[symbol]
- sequence += [symbol_id]
- return sequence
-
-
-def _clean_text(text, cleaner_names):
- for name in cleaner_names:
- cleaner = getattr(cleaners, name)
- if not cleaner:
- raise Exception('Unknown cleaner: %s' % name)
- text = cleaner(text)
- return text
diff --git a/spaces/Cong723/gpt-academic-public/crazy_functions/crazy_utils.py b/spaces/Cong723/gpt-academic-public/crazy_functions/crazy_utils.py
deleted file mode 100644
index e54136c441e7d713b0e8f5a66de9fb8bae1b1f4c..0000000000000000000000000000000000000000
--- a/spaces/Cong723/gpt-academic-public/crazy_functions/crazy_utils.py
+++ /dev/null
@@ -1,608 +0,0 @@
-from toolbox import update_ui, get_conf, trimmed_format_exc
-
-def input_clipping(inputs, history, max_token_limit):
- import numpy as np
- from request_llm.bridge_all import model_info
- enc = model_info["gpt-3.5-turbo"]['tokenizer']
- def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
-
- mode = 'input-and-history'
- # 当 输入部分的token占比 小于 全文的一半时,只裁剪历史
- input_token_num = get_token_num(inputs)
- if input_token_num < max_token_limit//2:
- mode = 'only-history'
- max_token_limit = max_token_limit - input_token_num
-
- everything = [inputs] if mode == 'input-and-history' else ['']
- everything.extend(history)
- n_token = get_token_num('\n'.join(everything))
- everything_token = [get_token_num(e) for e in everything]
- delta = max(everything_token) // 16 # 截断时的颗粒度
-
- while n_token > max_token_limit:
- where = np.argmax(everything_token)
- encoded = enc.encode(everything[where], disallowed_special=())
- clipped_encoded = encoded[:len(encoded)-delta]
- everything[where] = enc.decode(clipped_encoded)[:-1] # -1 to remove the may-be illegal char
- everything_token[where] = get_token_num(everything[where])
- n_token = get_token_num('\n'.join(everything))
-
- if mode == 'input-and-history':
- inputs = everything[0]
- else:
- pass
- history = everything[1:]
- return inputs, history
-
-def request_gpt_model_in_new_thread_with_ui_alive(
- inputs, inputs_show_user, llm_kwargs,
- chatbot, history, sys_prompt, refresh_interval=0.2,
- handle_token_exceed=True,
- retry_times_at_unknown_error=2,
- ):
- """
- Request GPT model,请求GPT模型同时维持用户界面活跃。
-
- 输入参数 Args (以_array结尾的输入变量都是列表,列表长度为子任务的数量,执行时,会把列表拆解,放到每个子线程中分别执行):
- inputs (string): List of inputs (输入)
- inputs_show_user (string): List of inputs to show user(展现在报告中的输入,借助此参数,在汇总报告中隐藏啰嗦的真实输入,增强报告的可读性)
- top_p (float): Top p value for sampling from model distribution (GPT参数,浮点数)
- temperature (float): Temperature value for sampling from model distribution(GPT参数,浮点数)
- chatbot: chatbot inputs and outputs (用户界面对话窗口句柄,用于数据流可视化)
- history (list): List of chat history (历史,对话历史列表)
- sys_prompt (string): List of system prompts (系统输入,列表,用于输入给GPT的前提提示,比如你是翻译官怎样怎样)
- refresh_interval (float, optional): Refresh interval for UI (default: 0.2) (刷新时间间隔频率,建议低于1,不可高于3,仅仅服务于视觉效果)
- handle_token_exceed:是否自动处理token溢出的情况,如果选择自动处理,则会在溢出时暴力截断,默认开启
- retry_times_at_unknown_error:失败时的重试次数
-
- 输出 Returns:
- future: 输出,GPT返回的结果
- """
- import time
- from concurrent.futures import ThreadPoolExecutor
- from request_llm.bridge_all import predict_no_ui_long_connection
- # 用户反馈
- chatbot.append([inputs_show_user, ""])
- yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
- executor = ThreadPoolExecutor(max_workers=16)
- mutable = ["", time.time(), ""]
- def _req_gpt(inputs, history, sys_prompt):
- retry_op = retry_times_at_unknown_error
- exceeded_cnt = 0
- while True:
- # watchdog error
- if len(mutable) >= 2 and (time.time()-mutable[1]) > 5:
- raise RuntimeError("检测到程序终止。")
- try:
- # 【第一种情况】:顺利完成
- result = predict_no_ui_long_connection(
- inputs=inputs, llm_kwargs=llm_kwargs,
- history=history, sys_prompt=sys_prompt, observe_window=mutable)
- return result
- except ConnectionAbortedError as token_exceeded_error:
- # 【第二种情况】:Token溢出
- if handle_token_exceed:
- exceeded_cnt += 1
- # 【选择处理】 尝试计算比例,尽可能多地保留文本
- from toolbox import get_reduce_token_percent
- p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error))
- MAX_TOKEN = 4096
- EXCEED_ALLO = 512 + 512 * exceeded_cnt
- inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN-EXCEED_ALLO)
- mutable[0] += f'[Local Message] 警告,文本过长将进行截断,Token溢出数:{n_exceed}。\n\n'
- continue # 返回重试
- else:
- # 【选择放弃】
- tb_str = '```\n' + trimmed_format_exc() + '```'
- mutable[0] += f"[Local Message] 警告,在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
- return mutable[0] # 放弃
- except:
- # 【第三种情况】:其他错误:重试几次
- tb_str = '```\n' + trimmed_format_exc() + '```'
- print(tb_str)
- mutable[0] += f"[Local Message] 警告,在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
- if retry_op > 0:
- retry_op -= 1
- mutable[0] += f"[Local Message] 重试中,请稍等 {retry_times_at_unknown_error-retry_op}/{retry_times_at_unknown_error}:\n\n"
- if ("Rate limit reached" in tb_str) or ("Too Many Requests" in tb_str):
- time.sleep(30)
- time.sleep(5)
- continue # 返回重试
- else:
- time.sleep(5)
- return mutable[0] # 放弃
-
- # 提交任务
- future = executor.submit(_req_gpt, inputs, history, sys_prompt)
- while True:
- # yield一次以刷新前端页面
- time.sleep(refresh_interval)
- # “喂狗”(看门狗)
- mutable[1] = time.time()
- if future.done():
- break
- chatbot[-1] = [chatbot[-1][0], mutable[0]]
- yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
-
- final_result = future.result()
- chatbot[-1] = [chatbot[-1][0], final_result]
- yield from update_ui(chatbot=chatbot, history=[]) # 如果最后成功了,则删除报错信息
- return final_result
-
-
-def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
- inputs_array, inputs_show_user_array, llm_kwargs,
- chatbot, history_array, sys_prompt_array,
- refresh_interval=0.2, max_workers=-1, scroller_max_len=30,
- handle_token_exceed=True, show_user_at_complete=False,
- retry_times_at_unknown_error=2,
- ):
- """
- Request GPT model using multiple threads with UI and high efficiency
- 请求GPT模型的[多线程]版。
- 具备以下功能:
- 实时在UI上反馈远程数据流
- 使用线程池,可调节线程池的大小避免openai的流量限制错误
- 处理中途中止的情况
- 网络等出问题时,会把traceback和已经接收的数据转入输出
-
- 输入参数 Args (以_array结尾的输入变量都是列表,列表长度为子任务的数量,执行时,会把列表拆解,放到每个子线程中分别执行):
- inputs_array (list): List of inputs (每个子任务的输入)
- inputs_show_user_array (list): List of inputs to show user(每个子任务展现在报告中的输入,借助此参数,在汇总报告中隐藏啰嗦的真实输入,增强报告的可读性)
- llm_kwargs: llm_kwargs参数
- chatbot: chatbot (用户界面对话窗口句柄,用于数据流可视化)
- history_array (list): List of chat history (历史对话输入,双层列表,第一层列表是子任务分解,第二层列表是对话历史)
- sys_prompt_array (list): List of system prompts (系统输入,列表,用于输入给GPT的前提提示,比如你是翻译官怎样怎样)
- refresh_interval (float, optional): Refresh interval for UI (default: 0.2) (刷新时间间隔频率,建议低于1,不可高于3,仅仅服务于视觉效果)
- max_workers (int, optional): Maximum number of threads (default: see config.py) (最大线程数,如果子任务非常多,需要用此选项防止高频地请求openai导致错误)
- scroller_max_len (int, optional): Maximum length for scroller (default: 30)(数据流的显示最后收到的多少个字符,仅仅服务于视觉效果)
- handle_token_exceed (bool, optional): (是否在输入过长时,自动缩减文本)
- handle_token_exceed:是否自动处理token溢出的情况,如果选择自动处理,则会在溢出时暴力截断,默认开启
- show_user_at_complete (bool, optional): (在结束时,把完整输入-输出结果显示在聊天框)
- retry_times_at_unknown_error:子任务失败时的重试次数
-
- 输出 Returns:
- list: List of GPT model responses (每个子任务的输出汇总,如果某个子任务出错,response中会携带traceback报错信息,方便调试和定位问题。)
- """
- import time, random
- from concurrent.futures import ThreadPoolExecutor
- from request_llm.bridge_all import predict_no_ui_long_connection
- assert len(inputs_array) == len(history_array)
- assert len(inputs_array) == len(sys_prompt_array)
- if max_workers == -1: # 读取配置文件
- try: max_workers, = get_conf('DEFAULT_WORKER_NUM')
- except: max_workers = 8
- if max_workers <= 0: max_workers = 3
- # 屏蔽掉 chatglm的多线程,可能会导致严重卡顿
- if not (llm_kwargs['llm_model'].startswith('gpt-') or llm_kwargs['llm_model'].startswith('api2d-')):
- max_workers = 1
-
- executor = ThreadPoolExecutor(max_workers=max_workers)
- n_frag = len(inputs_array)
- # 用户反馈
- chatbot.append(["请开始多线程操作。", ""])
- yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
- # 跨线程传递
- mutable = [["", time.time(), "等待中"] for _ in range(n_frag)]
-
- # 子线程任务
- def _req_gpt(index, inputs, history, sys_prompt):
- gpt_say = ""
- retry_op = retry_times_at_unknown_error
- exceeded_cnt = 0
- mutable[index][2] = "执行中"
- while True:
- # watchdog error
- if len(mutable[index]) >= 2 and (time.time()-mutable[index][1]) > 5:
- raise RuntimeError("检测到程序终止。")
- try:
- # 【第一种情况】:顺利完成
- # time.sleep(10); raise RuntimeError("测试")
- gpt_say = predict_no_ui_long_connection(
- inputs=inputs, llm_kwargs=llm_kwargs, history=history,
- sys_prompt=sys_prompt, observe_window=mutable[index], console_slience=True
- )
- mutable[index][2] = "已成功"
- return gpt_say
- except ConnectionAbortedError as token_exceeded_error:
- # 【第二种情况】:Token溢出,
- if handle_token_exceed:
- exceeded_cnt += 1
- # 【选择处理】 尝试计算比例,尽可能多地保留文本
- from toolbox import get_reduce_token_percent
- p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error))
- MAX_TOKEN = 4096
- EXCEED_ALLO = 512 + 512 * exceeded_cnt
- inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN-EXCEED_ALLO)
- gpt_say += f'[Local Message] 警告,文本过长将进行截断,Token溢出数:{n_exceed}。\n\n'
- mutable[index][2] = f"截断重试"
- continue # 返回重试
- else:
- # 【选择放弃】
- tb_str = '```\n' + trimmed_format_exc() + '```'
- gpt_say += f"[Local Message] 警告,线程{index}在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
- if len(mutable[index][0]) > 0: gpt_say += "此线程失败前收到的回答:\n\n" + mutable[index][0]
- mutable[index][2] = "输入过长已放弃"
- return gpt_say # 放弃
- except:
- # 【第三种情况】:其他错误
- tb_str = '```\n' + trimmed_format_exc() + '```'
- print(tb_str)
- gpt_say += f"[Local Message] 警告,线程{index}在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
- if len(mutable[index][0]) > 0: gpt_say += "此线程失败前收到的回答:\n\n" + mutable[index][0]
- if retry_op > 0:
- retry_op -= 1
- wait = random.randint(5, 20)
- if ("Rate limit reached" in tb_str) or ("Too Many Requests" in tb_str):
- wait = wait * 3
- fail_info = "OpenAI绑定信用卡可解除频率限制 "
- else:
- fail_info = ""
- # 也许等待十几秒后,情况会好转
- for i in range(wait):
- mutable[index][2] = f"{fail_info}等待重试 {wait-i}"; time.sleep(1)
- # 开始重试
- mutable[index][2] = f"重试中 {retry_times_at_unknown_error-retry_op}/{retry_times_at_unknown_error}"
- continue # 返回重试
- else:
- mutable[index][2] = "已失败"
- wait = 5
- time.sleep(5)
- return gpt_say # 放弃
-
- # 异步任务开始
- futures = [executor.submit(_req_gpt, index, inputs, history, sys_prompt) for index, inputs, history, sys_prompt in zip(
- range(len(inputs_array)), inputs_array, history_array, sys_prompt_array)]
- cnt = 0
- while True:
- # yield一次以刷新前端页面
- time.sleep(refresh_interval)
- cnt += 1
- worker_done = [h.done() for h in futures]
- if all(worker_done):
- executor.shutdown()
- break
- # 更好的UI视觉效果
- observe_win = []
- # 每个线程都要“喂狗”(看门狗)
- for thread_index, _ in enumerate(worker_done):
- mutable[thread_index][1] = time.time()
- # 在前端打印些好玩的东西
- for thread_index, _ in enumerate(worker_done):
- print_something_really_funny = "[ ...`"+mutable[thread_index][0][-scroller_max_len:].\
- replace('\n', '').replace('```', '...').replace(
- ' ', '.').replace(' ', '.....').replace('$', '.')+"`... ]"
- observe_win.append(print_something_really_funny)
- # 在前端打印些好玩的东西
- stat_str = ''.join([f'`{mutable[thread_index][2]}`: {obs}\n\n'
- if not done else f'`{mutable[thread_index][2]}`\n\n'
- for thread_index, done, obs in zip(range(len(worker_done)), worker_done, observe_win)])
- # 在前端打印些好玩的东西
- chatbot[-1] = [chatbot[-1][0], f'多线程操作已经开始,完成情况: \n\n{stat_str}' + ''.join(['.']*(cnt % 10+1))]
- yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
-
- # 异步任务结束
- gpt_response_collection = []
- for inputs_show_user, f in zip(inputs_show_user_array, futures):
- gpt_res = f.result()
- gpt_response_collection.extend([inputs_show_user, gpt_res])
-
- # 是否在结束时,在界面上显示结果
- if show_user_at_complete:
- for inputs_show_user, f in zip(inputs_show_user_array, futures):
- gpt_res = f.result()
- chatbot.append([inputs_show_user, gpt_res])
- yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
- time.sleep(0.3)
- return gpt_response_collection
-
-
-def breakdown_txt_to_satisfy_token_limit(txt, get_token_fn, limit):
- def cut(txt_tocut, must_break_at_empty_line): # 递归
- if get_token_fn(txt_tocut) <= limit:
- return [txt_tocut]
- else:
- lines = txt_tocut.split('\n')
- estimated_line_cut = limit / get_token_fn(txt_tocut) * len(lines)
- estimated_line_cut = int(estimated_line_cut)
- for cnt in reversed(range(estimated_line_cut)):
- if must_break_at_empty_line:
- if lines[cnt] != "":
- continue
- print(cnt)
- prev = "\n".join(lines[:cnt])
- post = "\n".join(lines[cnt:])
- if get_token_fn(prev) < limit:
- break
- if cnt == 0:
- raise RuntimeError("存在一行极长的文本!")
- # print(len(post))
- # 列表递归接龙
- result = [prev]
- result.extend(cut(post, must_break_at_empty_line))
- return result
- try:
- return cut(txt, must_break_at_empty_line=True)
- except RuntimeError:
- return cut(txt, must_break_at_empty_line=False)
-
-
-def force_breakdown(txt, limit, get_token_fn):
- """
- 当无法用标点、空行分割时,我们用最暴力的方法切割
- """
- for i in reversed(range(len(txt))):
- if get_token_fn(txt[:i]) < limit:
- return txt[:i], txt[i:]
- return "Tiktoken未知错误", "Tiktoken未知错误"
-
-def breakdown_txt_to_satisfy_token_limit_for_pdf(txt, get_token_fn, limit):
- # 递归
- def cut(txt_tocut, must_break_at_empty_line, break_anyway=False):
- if get_token_fn(txt_tocut) <= limit:
- return [txt_tocut]
- else:
- lines = txt_tocut.split('\n')
- estimated_line_cut = limit / get_token_fn(txt_tocut) * len(lines)
- estimated_line_cut = int(estimated_line_cut)
- cnt = 0
- for cnt in reversed(range(estimated_line_cut)):
- if must_break_at_empty_line:
- if lines[cnt] != "":
- continue
- prev = "\n".join(lines[:cnt])
- post = "\n".join(lines[cnt:])
- if get_token_fn(prev) < limit:
- break
- if cnt == 0:
- if break_anyway:
- prev, post = force_breakdown(txt_tocut, limit, get_token_fn)
- else:
- raise RuntimeError(f"存在一行极长的文本!{txt_tocut}")
- # print(len(post))
- # 列表递归接龙
- result = [prev]
- result.extend(cut(post, must_break_at_empty_line, break_anyway=break_anyway))
- return result
- try:
- # 第1次尝试,将双空行(\n\n)作为切分点
- return cut(txt, must_break_at_empty_line=True)
- except RuntimeError:
- try:
- # 第2次尝试,将单空行(\n)作为切分点
- return cut(txt, must_break_at_empty_line=False)
- except RuntimeError:
- try:
- # 第3次尝试,将英文句号(.)作为切分点
- res = cut(txt.replace('.', '。\n'), must_break_at_empty_line=False) # 这个中文的句号是故意的,作为一个标识而存在
- return [r.replace('。\n', '.') for r in res]
- except RuntimeError as e:
- try:
- # 第4次尝试,将中文句号(。)作为切分点
- res = cut(txt.replace('。', '。。\n'), must_break_at_empty_line=False)
- return [r.replace('。。\n', '。') for r in res]
- except RuntimeError as e:
- # 第5次尝试,没办法了,随便切一下敷衍吧
- return cut(txt, must_break_at_empty_line=False, break_anyway=True)
-
-
-
-def read_and_clean_pdf_text(fp):
- """
- 这个函数用于分割pdf,用了很多trick,逻辑较乱,效果奇好
-
- **输入参数说明**
- - `fp`:需要读取和清理文本的pdf文件路径
-
- **输出参数说明**
- - `meta_txt`:清理后的文本内容字符串
- - `page_one_meta`:第一页清理后的文本内容列表
-
- **函数功能**
- 读取pdf文件并清理其中的文本内容,清理规则包括:
- - 提取所有块元的文本信息,并合并为一个字符串
- - 去除短块(字符数小于100)并替换为回车符
- - 清理多余的空行
- - 合并小写字母开头的段落块并替换为空格
- - 清除重复的换行
- - 将每个换行符替换为两个换行符,使每个段落之间有两个换行符分隔
- """
- import fitz, copy
- import re
- import numpy as np
- from colorful import print亮黄, print亮绿
- fc = 0 # Index 0 文本
- fs = 1 # Index 1 字体
- fb = 2 # Index 2 框框
- REMOVE_FOOT_NOTE = True # 是否丢弃掉 不是正文的内容 (比正文字体小,如参考文献、脚注、图注等)
- REMOVE_FOOT_FFSIZE_PERCENT = 0.95 # 小于正文的?时,判定为不是正文(有些文章的正文部分字体大小不是100%统一的,有肉眼不可见的小变化)
- def primary_ffsize(l):
- """
- 提取文本块主字体
- """
- fsize_statiscs = {}
- for wtf in l['spans']:
- if wtf['size'] not in fsize_statiscs: fsize_statiscs[wtf['size']] = 0
- fsize_statiscs[wtf['size']] += len(wtf['text'])
- return max(fsize_statiscs, key=fsize_statiscs.get)
-
- def ffsize_same(a,b):
- """
- 提取字体大小是否近似相等
- """
- return abs((a-b)/max(a,b)) < 0.02
-
- with fitz.open(fp) as doc:
- meta_txt = []
- meta_font = []
-
- meta_line = []
- meta_span = []
- ############################## <第 1 步,搜集初始信息> ##################################
- for index, page in enumerate(doc):
- # file_content += page.get_text()
- text_areas = page.get_text("dict") # 获取页面上的文本信息
- for t in text_areas['blocks']:
- if 'lines' in t:
- pf = 998
- for l in t['lines']:
- txt_line = "".join([wtf['text'] for wtf in l['spans']])
- if len(txt_line) == 0: continue
- pf = primary_ffsize(l)
- meta_line.append([txt_line, pf, l['bbox'], l])
- for wtf in l['spans']: # for l in t['lines']:
- meta_span.append([wtf['text'], wtf['size'], len(wtf['text'])])
- # meta_line.append(["NEW_BLOCK", pf])
- # 块元提取 for each word segment with in line for each line cross-line words for each block
- meta_txt.extend([" ".join(["".join([wtf['text'] for wtf in l['spans']]) for l in t['lines']]).replace(
- '- ', '') for t in text_areas['blocks'] if 'lines' in t])
- meta_font.extend([np.mean([np.mean([wtf['size'] for wtf in l['spans']])
- for l in t['lines']]) for t in text_areas['blocks'] if 'lines' in t])
- if index == 0:
- page_one_meta = [" ".join(["".join([wtf['text'] for wtf in l['spans']]) for l in t['lines']]).replace(
- '- ', '') for t in text_areas['blocks'] if 'lines' in t]
-
- ############################## <第 2 步,获取正文主字体> ##################################
- fsize_statiscs = {}
- for span in meta_span:
- if span[1] not in fsize_statiscs: fsize_statiscs[span[1]] = 0
- fsize_statiscs[span[1]] += span[2]
- main_fsize = max(fsize_statiscs, key=fsize_statiscs.get)
- if REMOVE_FOOT_NOTE:
- give_up_fize_threshold = main_fsize * REMOVE_FOOT_FFSIZE_PERCENT
-
- ############################## <第 3 步,切分和重新整合> ##################################
- mega_sec = []
- sec = []
- for index, line in enumerate(meta_line):
- if index == 0:
- sec.append(line[fc])
- continue
- if REMOVE_FOOT_NOTE:
- if meta_line[index][fs] <= give_up_fize_threshold:
- continue
- if ffsize_same(meta_line[index][fs], meta_line[index-1][fs]):
- # 尝试识别段落
- if meta_line[index][fc].endswith('.') and\
- (meta_line[index-1][fc] != 'NEW_BLOCK') and \
- (meta_line[index][fb][2] - meta_line[index][fb][0]) < (meta_line[index-1][fb][2] - meta_line[index-1][fb][0]) * 0.7:
- sec[-1] += line[fc]
- sec[-1] += "\n\n"
- else:
- sec[-1] += " "
- sec[-1] += line[fc]
- else:
- if (index+1 < len(meta_line)) and \
- meta_line[index][fs] > main_fsize:
- # 单行 + 字体大
- mega_sec.append(copy.deepcopy(sec))
- sec = []
- sec.append("# " + line[fc])
- else:
- # 尝试识别section
- if meta_line[index-1][fs] > meta_line[index][fs]:
- sec.append("\n" + line[fc])
- else:
- sec.append(line[fc])
- mega_sec.append(copy.deepcopy(sec))
-
- finals = []
- for ms in mega_sec:
- final = " ".join(ms)
- final = final.replace('- ', ' ')
- finals.append(final)
- meta_txt = finals
-
- ############################## <第 4 步,乱七八糟的后处理> ##################################
- def 把字符太少的块清除为回车(meta_txt):
- for index, block_txt in enumerate(meta_txt):
- if len(block_txt) < 100:
- meta_txt[index] = '\n'
- return meta_txt
- meta_txt = 把字符太少的块清除为回车(meta_txt)
-
- def 清理多余的空行(meta_txt):
- for index in reversed(range(1, len(meta_txt))):
- if meta_txt[index] == '\n' and meta_txt[index-1] == '\n':
- meta_txt.pop(index)
- return meta_txt
- meta_txt = 清理多余的空行(meta_txt)
-
- def 合并小写开头的段落块(meta_txt):
- def starts_with_lowercase_word(s):
- pattern = r"^[a-z]+"
- match = re.match(pattern, s)
- if match:
- return True
- else:
- return False
- for _ in range(100):
- for index, block_txt in enumerate(meta_txt):
- if starts_with_lowercase_word(block_txt):
- if meta_txt[index-1] != '\n':
- meta_txt[index-1] += ' '
- else:
- meta_txt[index-1] = ''
- meta_txt[index-1] += meta_txt[index]
- meta_txt[index] = '\n'
- return meta_txt
- meta_txt = 合并小写开头的段落块(meta_txt)
- meta_txt = 清理多余的空行(meta_txt)
-
- meta_txt = '\n'.join(meta_txt)
- # 清除重复的换行
- for _ in range(5):
- meta_txt = meta_txt.replace('\n\n', '\n')
-
- # 换行 -> 双换行
- meta_txt = meta_txt.replace('\n', '\n\n')
-
- ############################## <第 5 步,展示分割效果> ##################################
- # for f in finals:
- # print亮黄(f)
- # print亮绿('***************************')
-
- return meta_txt, page_one_meta
-
-
-def get_files_from_everything(txt, type): # type='.md'
- """
- 这个函数是用来获取指定目录下所有指定类型(如.md)的文件,并且对于网络上的文件,也可以获取它。
- 下面是对每个参数和返回值的说明:
- 参数
- - txt: 路径或网址,表示要搜索的文件或者文件夹路径或网络上的文件。
- - type: 字符串,表示要搜索的文件类型。默认是.md。
- 返回值
- - success: 布尔值,表示函数是否成功执行。
- - file_manifest: 文件路径列表,里面包含以指定类型为后缀名的所有文件的绝对路径。
- - project_folder: 字符串,表示文件所在的文件夹路径。如果是网络上的文件,就是临时文件夹的路径。
- 该函数详细注释已添加,请确认是否满足您的需要。
- """
- import glob, os
-
- success = True
- if txt.startswith('http'):
- # 网络的远程文件
- import requests
- from toolbox import get_conf
- proxies, = get_conf('proxies')
- r = requests.get(txt, proxies=proxies)
- with open('./gpt_log/temp'+type, 'wb+') as f: f.write(r.content)
- project_folder = './gpt_log/'
- file_manifest = ['./gpt_log/temp'+type]
- elif txt.endswith(type):
- # 直接给定文件
- file_manifest = [txt]
- project_folder = os.path.dirname(txt)
- elif os.path.exists(txt):
- # 本地路径,递归搜索
- project_folder = txt
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*'+type, recursive=True)]
- if len(file_manifest) == 0:
- success = False
- else:
- project_folder = None
- file_manifest = []
- success = False
-
- return success, file_manifest, project_folder
diff --git a/spaces/Cyril666/ContourNet-ABI/modules/model_alignment.py b/spaces/Cyril666/ContourNet-ABI/modules/model_alignment.py
deleted file mode 100644
index 0405c228b3339e5ba0835c33ba56844831c06057..0000000000000000000000000000000000000000
--- a/spaces/Cyril666/ContourNet-ABI/modules/model_alignment.py
+++ /dev/null
@@ -1,34 +0,0 @@
-import torch
-import torch.nn as nn
-from fastai.vision import *
-
-from modules.model import Model, _default_tfmer_cfg
-
-
-class BaseAlignment(Model):
- def __init__(self, config):
- super().__init__(config)
- d_model = ifnone(config.model_alignment_d_model, _default_tfmer_cfg['d_model'])
-
- self.loss_weight = ifnone(config.model_alignment_loss_weight, 1.0)
- self.max_length = config.dataset_max_length + 1 # additional stop token
- self.w_att = nn.Linear(2 * d_model, d_model)
- self.cls = nn.Linear(d_model, self.charset.num_classes)
-
- def forward(self, l_feature, v_feature):
- """
- Args:
- l_feature: (N, T, E) where T is length, N is batch size and d is dim of model
- v_feature: (N, T, E) shape the same as l_feature
- l_lengths: (N,)
- v_lengths: (N,)
- """
- f = torch.cat((l_feature, v_feature), dim=2)
- f_att = torch.sigmoid(self.w_att(f))
- output = f_att * v_feature + (1 - f_att) * l_feature
-
- logits = self.cls(output) # (N, T, C)
- pt_lengths = self._get_length(logits)
-
- return {'logits': logits, 'pt_lengths': pt_lengths, 'loss_weight':self.loss_weight,
- 'name': 'alignment'}
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/linear-58a44b5e.js b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/linear-58a44b5e.js
deleted file mode 100644
index 5957ab4a575538fb9023ff2dbfffc2cab1f1743e..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/linear-58a44b5e.js
+++ /dev/null
@@ -1,2 +0,0 @@
-function W(n,t){return n==null||t==null?NaN:nt?1:n>=t?0:NaN}function En(n){let t=n,e=n,r=n;n.length!==2&&(t=(a,u)=>n(a)-u,e=W,r=(a,u)=>W(n(a),u));function i(a,u,s=0,c=a.length){if(s>>1;r(a[h],u)<0?s=h+1:c=h}while(s>>1;r(a[h],u)<=0?s=h+1:c=h}while(ss&&t(a[h-1],u)>-t(a[h],u)?h-1:h}return{left:i,center:o,right:f}}function Un(n){return n===null?NaN:+n}function*Qt(n,t){if(t===void 0)for(let e of n)e!=null&&(e=+e)>=e&&(yield e);else{let e=-1;for(let r of n)(r=t(r,++e,n))!=null&&(r=+r)>=r&&(yield r)}}const Pn=En(W),Yn=Pn.right,Ut=Pn.left;En(Un).center;const Jn=Yn;var nn=Math.sqrt(50),tn=Math.sqrt(10),en=Math.sqrt(2);function Kn(n,t,e){var r,i=-1,f,o,a;if(t=+t,n=+n,e=+e,n===t&&e>0)return[n];if((r=t0){let u=Math.round(n/a),s=Math.round(t/a);for(u*at&&--s,o=new Array(f=s-u+1);++it&&--s,o=new Array(f=s-u+1);++i=0?(f>=nn?10:f>=tn?5:f>=en?2:1)*Math.pow(10,i):-Math.pow(10,-i)/(f>=nn?10:f>=tn?5:f>=en?2:1)}function Wn(n,t,e){var r=Math.abs(t-n)/Math.max(0,e),i=Math.pow(10,Math.floor(Math.log(r)/Math.LN10)),f=r/i;return f>=nn?i*=10:f>=tn?i*=5:f>=en&&(i*=2),t=1e21?n.toLocaleString("en").replace(/,/g,""):n.toString(10)}function G(n,t){if((e=(n=t?n.toExponential(t-1):n.toExponential()).indexOf("e"))<0)return null;var e,r=n.slice(0,e);return[r.length>1?r[0]+r.slice(2):r,+n.slice(e+1)]}function L(n){return n=G(Math.abs(n)),n?n[1]:NaN}function tt(n,t){return function(e,r){for(var i=e.length,f=[],o=0,a=n[0],u=0;i>0&&a>0&&(u+a+1>r&&(a=Math.max(1,r-u)),f.push(e.substring(i-=a,i+a)),!((u+=a+1)>r));)a=n[o=(o+1)%n.length];return f.reverse().join(t)}}function et(n){return function(t){return t.replace(/[0-9]/g,function(e){return n[+e]})}}var rt=/^(?:(.)?([<>=^]))?([+\-( ])?([$#])?(0)?(\d+)?(,)?(\.\d+)?(~)?([a-z%])?$/i;function Z(n){if(!(t=rt.exec(n)))throw new Error("invalid format: "+n);var t;return new sn({fill:t[1],align:t[2],sign:t[3],symbol:t[4],zero:t[5],width:t[6],comma:t[7],precision:t[8]&&t[8].slice(1),trim:t[9],type:t[10]})}Z.prototype=sn.prototype;function sn(n){this.fill=n.fill===void 0?" ":n.fill+"",this.align=n.align===void 0?">":n.align+"",this.sign=n.sign===void 0?"-":n.sign+"",this.symbol=n.symbol===void 0?"":n.symbol+"",this.zero=!!n.zero,this.width=n.width===void 0?void 0:+n.width,this.comma=!!n.comma,this.precision=n.precision===void 0?void 0:+n.precision,this.trim=!!n.trim,this.type=n.type===void 0?"":n.type+""}sn.prototype.toString=function(){return this.fill+this.align+this.sign+this.symbol+(this.zero?"0":"")+(this.width===void 0?"":Math.max(1,this.width|0))+(this.comma?",":"")+(this.precision===void 0?"":"."+Math.max(0,this.precision|0))+(this.trim?"~":"")+this.type};function it(n){n:for(var t=n.length,e=1,r=-1,i;e0&&(r=0);break}return r>0?n.slice(0,r)+n.slice(i+1):n}var qn;function at(n,t){var e=G(n,t);if(!e)return n+"";var r=e[0],i=e[1],f=i-(qn=Math.max(-8,Math.min(8,Math.floor(i/3)))*3)+1,o=r.length;return f===o?r:f>o?r+new Array(f-o+1).join("0"):f>0?r.slice(0,f)+"."+r.slice(f):"0."+new Array(1-f).join("0")+G(n,Math.max(0,t+f-1))[0]}function xn(n,t){var e=G(n,t);if(!e)return n+"";var r=e[0],i=e[1];return i<0?"0."+new Array(-i).join("0")+r:r.length>i+1?r.slice(0,i+1)+"."+r.slice(i+1):r+new Array(i-r.length+2).join("0")}const mn={"%":(n,t)=>(n*100).toFixed(t),b:n=>Math.round(n).toString(2),c:n=>n+"",d:nt,e:(n,t)=>n.toExponential(t),f:(n,t)=>n.toFixed(t),g:(n,t)=>n.toPrecision(t),o:n=>Math.round(n).toString(8),p:(n,t)=>xn(n*100,t),r:xn,s:at,X:n=>Math.round(n).toString(16).toUpperCase(),x:n=>Math.round(n).toString(16)};function bn(n){return n}var pn=Array.prototype.map,yn=["y","z","a","f","p","n","µ","m","","k","M","G","T","P","E","Z","Y"];function ft(n){var t=n.grouping===void 0||n.thousands===void 0?bn:tt(pn.call(n.grouping,Number),n.thousands+""),e=n.currency===void 0?"":n.currency[0]+"",r=n.currency===void 0?"":n.currency[1]+"",i=n.decimal===void 0?".":n.decimal+"",f=n.numerals===void 0?bn:et(pn.call(n.numerals,String)),o=n.percent===void 0?"%":n.percent+"",a=n.minus===void 0?"−":n.minus+"",u=n.nan===void 0?"NaN":n.nan+"";function s(h){h=Z(h);var l=h.fill,p=h.align,g=h.sign,k=h.symbol,v=h.zero,N=h.width,R=h.comma,y=h.precision,H=h.trim,m=h.type;m==="n"?(R=!0,m="g"):mn[m]||(y===void 0&&(y=12),H=!0,m="g"),(v||l==="0"&&p==="=")&&(v=!0,l="0",p="=");var Vn=k==="$"?e:k==="#"&&/[boxX]/.test(m)?"0"+m.toLowerCase():"",Xn=k==="$"?r:/[%p]/.test(m)?o:"",ln=mn[m],Qn=/[defgprs%]/.test(m);y=y===void 0?6:/[gprs]/.test(m)?Math.max(1,Math.min(21,y)):Math.max(0,Math.min(20,y));function dn(d){var A=Vn,b=Xn,E,gn,F;if(m==="c")b=ln(d)+b,d="";else{d=+d;var $=d<0||1/d<0;if(d=isNaN(d)?u:ln(Math.abs(d),y),H&&(d=it(d)),$&&+d==0&&g!=="+"&&($=!1),A=($?g==="("?g:a:g==="-"||g==="("?"":g)+A,b=(m==="s"?yn[8+qn/3]:"")+b+($&&g==="("?")":""),Qn){for(E=-1,gn=d.length;++EF||F>57){b=(F===46?i+d.slice(E+1):d.slice(E))+b,d=d.slice(0,E);break}}}R&&!v&&(d=t(d,1/0));var B=A.length+d.length+b.length,_=B>1)+A+d+b+_.slice(B);break;default:d=_+A+d+b;break}return f(d)}return dn.toString=function(){return h+""},dn}function c(h,l){var p=s((h=Z(h),h.type="f",h)),g=Math.max(-8,Math.min(8,Math.floor(L(l)/3)))*3,k=Math.pow(10,-g),v=yn[8+g/3];return function(N){return p(k*N)+v}}return{format:s,formatPrefix:c}}var D,Ln,Hn;ot({thousands:",",grouping:[3],currency:["$",""]});function ot(n){return D=ft(n),Ln=D.format,Hn=D.formatPrefix,D}function ut(n){return Math.max(0,-L(Math.abs(n)))}function st(n,t){return Math.max(0,Math.max(-8,Math.min(8,Math.floor(L(t)/3)))*3-L(Math.abs(n)))}function ht(n,t){return n=Math.abs(n),t=Math.abs(t)-n,Math.max(0,L(t)-L(n))+1}const rn=Math.PI,an=2*rn,S=1e-6,ct=an-S;function fn(){this._x0=this._y0=this._x1=this._y1=null,this._=""}function In(){return new fn}fn.prototype=In.prototype={constructor:fn,moveTo:function(n,t){this._+="M"+(this._x0=this._x1=+n)+","+(this._y0=this._y1=+t)},closePath:function(){this._x1!==null&&(this._x1=this._x0,this._y1=this._y0,this._+="Z")},lineTo:function(n,t){this._+="L"+(this._x1=+n)+","+(this._y1=+t)},quadraticCurveTo:function(n,t,e,r){this._+="Q"+ +n+","+ +t+","+(this._x1=+e)+","+(this._y1=+r)},bezierCurveTo:function(n,t,e,r,i,f){this._+="C"+ +n+","+ +t+","+ +e+","+ +r+","+(this._x1=+i)+","+(this._y1=+f)},arcTo:function(n,t,e,r,i){n=+n,t=+t,e=+e,r=+r,i=+i;var f=this._x1,o=this._y1,a=e-n,u=r-t,s=f-n,c=o-t,h=s*s+c*c;if(i<0)throw new Error("negative radius: "+i);if(this._x1===null)this._+="M"+(this._x1=n)+","+(this._y1=t);else if(h>S)if(!(Math.abs(c*a-u*s)>S)||!i)this._+="L"+(this._x1=n)+","+(this._y1=t);else{var l=e-f,p=r-o,g=a*a+u*u,k=l*l+p*p,v=Math.sqrt(g),N=Math.sqrt(h),R=i*Math.tan((rn-Math.acos((g+h-k)/(2*v*N)))/2),y=R/N,H=R/v;Math.abs(y-1)>S&&(this._+="L"+(n+y*s)+","+(t+y*c)),this._+="A"+i+","+i+",0,0,"+ +(c*l>s*p)+","+(this._x1=n+H*a)+","+(this._y1=t+H*u)}},arc:function(n,t,e,r,i,f){n=+n,t=+t,e=+e,f=!!f;var o=e*Math.cos(r),a=e*Math.sin(r),u=n+o,s=t+a,c=1^f,h=f?r-i:i-r;if(e<0)throw new Error("negative radius: "+e);this._x1===null?this._+="M"+u+","+s:(Math.abs(this._x1-u)>S||Math.abs(this._y1-s)>S)&&(this._+="L"+u+","+s),e&&(h<0&&(h=h%an+an),h>ct?this._+="A"+e+","+e+",0,1,"+c+","+(n-o)+","+(t-a)+"A"+e+","+e+",0,1,"+c+","+(this._x1=u)+","+(this._y1=s):h>S&&(this._+="A"+e+","+e+",0,"+ +(h>=rn)+","+c+","+(this._x1=n+e*Math.cos(i))+","+(this._y1=t+e*Math.sin(i))))},rect:function(n,t,e,r){this._+="M"+(this._x0=this._x1=+n)+","+(this._y0=this._y1=+t)+"h"+ +e+"v"+ +r+"h"+-e+"Z"},toString:function(){return this._}};function P(n){return function(){return n}}function lt(n){return typeof n=="object"&&"length"in n?n:Array.from(n)}function Tn(n){this._context=n}Tn.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){(this._line||this._line!==0&&this._point===1)&&this._context.closePath(),this._line=1-this._line},point:function(n,t){switch(n=+n,t=+t,this._point){case 0:this._point=1,this._line?this._context.lineTo(n,t):this._context.moveTo(n,t);break;case 1:this._point=2;default:this._context.lineTo(n,t);break}}};function dt(n){return new Tn(n)}function gt(n){return n[0]}function xt(n){return n[1]}function Yt(n,t){var e=P(!0),r=null,i=dt,f=null;n=typeof n=="function"?n:n===void 0?gt:P(n),t=typeof t=="function"?t:t===void 0?xt:P(t);function o(a){var u,s=(a=lt(a)).length,c,h=!1,l;for(r==null&&(f=i(l=In())),u=0;u<=s;++u)!(u>8&15|t>>4&240,t>>4&15|t&240,(t&15)<<4|t&15,1):e===8?O(t>>24&255,t>>16&255,t>>8&255,(t&255)/255):e===4?O(t>>12&15|t>>8&240,t>>8&15|t>>4&240,t>>4&15|t&240,((t&15)<<4|t&15)/255):null):(t=pt.exec(n))?new x(t[1],t[2],t[3],1):(t=yt.exec(n))?new x(t[1]*255/100,t[2]*255/100,t[3]*255/100,1):(t=wt.exec(n))?O(t[1],t[2],t[3],t[4]):(t=Mt.exec(n))?O(t[1]*255/100,t[2]*255/100,t[3]*255/100,t[4]):(t=vt.exec(n))?An(t[1],t[2]/100,t[3]/100,1):(t=_t.exec(n))?An(t[1],t[2]/100,t[3]/100,t[4]):wn.hasOwnProperty(n)?_n(wn[n]):n==="transparent"?new x(NaN,NaN,NaN,0):null}function _n(n){return new x(n>>16&255,n>>8&255,n&255,1)}function O(n,t,e,r){return r<=0&&(n=t=e=NaN),new x(n,t,e,r)}function kt(n){return n instanceof C||(n=z(n)),n?(n=n.rgb(),new x(n.r,n.g,n.b,n.opacity)):new x}function X(n,t,e,r){return arguments.length===1?kt(n):new x(n,t,e,r??1)}function x(n,t,e,r){this.r=+n,this.g=+t,this.b=+e,this.opacity=+r}hn(x,X,zn(C,{brighter:function(n){return n=n==null?V:Math.pow(V,n),new x(this.r*n,this.g*n,this.b*n,this.opacity)},darker:function(n){return n=n==null?I:Math.pow(I,n),new x(this.r*n,this.g*n,this.b*n,this.opacity)},rgb:function(){return this},displayable:function(){return-.5<=this.r&&this.r<255.5&&-.5<=this.g&&this.g<255.5&&-.5<=this.b&&this.b<255.5&&0<=this.opacity&&this.opacity<=1},hex:Nn,formatHex:Nn,formatRgb:kn,toString:kn}));function Nn(){return"#"+Y(this.r)+Y(this.g)+Y(this.b)}function kn(){var n=this.opacity;return n=isNaN(n)?1:Math.max(0,Math.min(1,n)),(n===1?"rgb(":"rgba(")+Math.max(0,Math.min(255,Math.round(this.r)||0))+", "+Math.max(0,Math.min(255,Math.round(this.g)||0))+", "+Math.max(0,Math.min(255,Math.round(this.b)||0))+(n===1?")":", "+n+")")}function Y(n){return n=Math.max(0,Math.min(255,Math.round(n)||0)),(n<16?"0":"")+n.toString(16)}function An(n,t,e,r){return r<=0?n=t=e=NaN:e<=0||e>=1?n=t=NaN:t<=0&&(n=NaN),new w(n,t,e,r)}function Cn(n){if(n instanceof w)return new w(n.h,n.s,n.l,n.opacity);if(n instanceof C||(n=z(n)),!n)return new w;if(n instanceof w)return n;n=n.rgb();var t=n.r/255,e=n.g/255,r=n.b/255,i=Math.min(t,e,r),f=Math.max(t,e,r),o=NaN,a=f-i,u=(f+i)/2;return a?(t===f?o=(e-r)/a+(e0&&u<1?0:o,new w(o,a,u,n.opacity)}function At(n,t,e,r){return arguments.length===1?Cn(n):new w(n,t,e,r??1)}function w(n,t,e,r){this.h=+n,this.s=+t,this.l=+e,this.opacity=+r}hn(w,At,zn(C,{brighter:function(n){return n=n==null?V:Math.pow(V,n),new w(this.h,this.s,this.l*n,this.opacity)},darker:function(n){return n=n==null?I:Math.pow(I,n),new w(this.h,this.s,this.l*n,this.opacity)},rgb:function(){var n=this.h%360+(this.h<0)*360,t=isNaN(n)||isNaN(this.s)?0:this.s,e=this.l,r=e+(e<.5?e:1-e)*t,i=2*e-r;return new x(J(n>=240?n-240:n+120,i,r),J(n,i,r),J(n<120?n+240:n-120,i,r),this.opacity)},displayable:function(){return(0<=this.s&&this.s<=1||isNaN(this.s))&&0<=this.l&&this.l<=1&&0<=this.opacity&&this.opacity<=1},formatHsl:function(){var n=this.opacity;return n=isNaN(n)?1:Math.max(0,Math.min(1,n)),(n===1?"hsl(":"hsla(")+(this.h||0)+", "+(this.s||0)*100+"%, "+(this.l||0)*100+"%"+(n===1?")":", "+n+")")}}));function J(n,t,e){return(n<60?t+(e-t)*n/60:n<180?e:n<240?t+(e-t)*(240-n)/60:t)*255}function Fn(n,t,e,r,i){var f=n*n,o=f*n;return((1-3*n+3*f-o)*t+(4-6*f+3*o)*e+(1+3*n+3*f-3*o)*r+o*i)/6}function St(n){var t=n.length-1;return function(e){var r=e<=0?e=0:e>=1?(e=1,t-1):Math.floor(e*t),i=n[r],f=n[r+1],o=r>0?n[r-1]:2*i-f,a=r()=>n;function $n(n,t){return function(e){return n+e*t}}function Et(n,t,e){return n=Math.pow(n,e),t=Math.pow(t,e)-n,e=1/e,function(r){return Math.pow(n+r*t,e)}}function Kt(n,t){var e=t-n;return e?$n(n,e>180||e<-180?e-360*Math.round(e/360):e):U(isNaN(n)?t:n)}function Pt(n){return(n=+n)==1?Bn:function(t,e){return e-t?Et(t,e,n):U(isNaN(t)?e:t)}}function Bn(n,t){var e=t-n;return e?$n(n,e):U(isNaN(n)?t:n)}const Sn=function n(t){var e=Pt(t);function r(i,f){var o=e((i=X(i)).r,(f=X(f)).r),a=e(i.g,f.g),u=e(i.b,f.b),s=Bn(i.opacity,f.opacity);return function(c){return i.r=o(c),i.g=a(c),i.b=u(c),i.opacity=s(c),i+""}}return r.gamma=n,r}(1);function Dn(n){return function(t){var e=t.length,r=new Array(e),i=new Array(e),f=new Array(e),o,a;for(o=0;oe&&(f=t.slice(e,f),a[o]?a[o]+=f:a[++o]=f),(r=r[0])===(i=i[0])?a[o]?a[o]+=i:a[++o]=i:(a[++o]=null,u.push({i:o,x:Q(r,i)})),e=K.lastIndex;return et&&(e=n,n=t,t=e),function(r){return Math.max(n,Math.min(t,r))}}function $t(n,t,e){var r=n[0],i=n[1],f=t[0],o=t[1];return i2?Bt:$t,u=s=null,h}function h(l){return l==null||isNaN(l=+l)?f:(u||(u=a(n.map(r),t,e)))(r(o(l)))}return h.invert=function(l){return o(i((s||(s=a(t,n.map(r),Q)))(l)))},h.domain=function(l){return arguments.length?(n=Array.from(l,Ct),c()):n.slice()},h.range=function(l){return arguments.length?(t=Array.from(l),c()):t.slice()},h.rangeRound=function(l){return t=Array.from(l),e=Tt,c()},h.clamp=function(l){return arguments.length?(o=l?!0:j,c()):o!==j},h.interpolate=function(l){return arguments.length?(e=l,c()):e},h.unknown=function(l){return arguments.length?(f=l,h):f},function(l,p){return r=l,i=p,c()}}function Gt(){return Ot()(j,j)}function Zt(n,t,e,r){var i=Wn(n,t,e),f;switch(r=Z(r??",f"),r.type){case"s":{var o=Math.max(Math.abs(n),Math.abs(t));return r.precision==null&&!isNaN(f=st(i,o))&&(r.precision=f),Hn(r,o)}case"":case"e":case"g":case"p":case"r":{r.precision==null&&!isNaN(f=ht(i,Math.max(Math.abs(n),Math.abs(t))))&&(r.precision=f-(r.type==="e"));break}case"f":case"%":{r.precision==null&&!isNaN(f=ut(i))&&(r.precision=f-(r.type==="%")*2);break}}return Ln(r)}function Vt(n){var t=n.domain;return n.ticks=function(e){var r=t();return Kn(r[0],r[r.length-1],e??10)},n.tickFormat=function(e,r){var i=t();return Zt(i[0],i[i.length-1],e??10,r)},n.nice=function(e){e==null&&(e=10);var r=t(),i=0,f=r.length-1,o=r[i],a=r[f],u,s,c=10;for(a0;){if(s=jn(o,a,e),s===u)return r[i]=o,r[f]=a,t(r);if(s>0)o=Math.floor(o/s)*s,a=Math.ceil(a/s)*s;else if(s<0)o=Math.ceil(o*s)/s,a=Math.floor(a*s)/s;else break;u=s}return n},n}function Xt(){var n=Gt();return n.copy=function(){return Dt(n,Xt())},mt.apply(n,arguments),Vt(n)}export{Yn as $,At as A,Bn as B,C,cn as D,te as E,St as F,Rt as G,jt as H,On as I,qt as J,Sn as K,Wt as L,ne as M,Tt as N,It as O,Ct as P,Vt as Q,x as R,Ot as S,Dt as T,Kn as U,j as V,Jn as W,Gt as X,Jt as Y,Xt as Z,Yt as _,W as a,Zt as a0,X as a1,Ut as a2,Un as b,En as c,ht as d,st as e,Z as f,Ln as g,Hn as h,ft as i,P as j,In as k,dt as l,lt as m,Qt as n,mt as o,ut as p,hn as q,kt as r,zn as s,Wn as t,V as u,I as v,Kt as w,gt as x,xt as y,Q as z};
-//# sourceMappingURL=linear-58a44b5e.js.map
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/UploadText-28892309.js b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/UploadText-28892309.js
deleted file mode 100644
index 994d97156cc4cae59f29e0b21b4432b39c2a5b9d..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/UploadText-28892309.js
+++ /dev/null
@@ -1,2 +0,0 @@
-import{S as h,e as S,s as T,N as g,P as c,O as y,K as U,p as q,M as l,R as v,n as b,A as w,a4 as A}from"./index-3370be2a.js";import{X as C}from"./Blocks-f0129fcd.js";function K(t){let e,o=t[1](t[2][t[0]])+"",i,r,s,n,_=t[1]("or")+"",d,m,k,f=t[1]("interface.click_to_upload")+"",u;return{c(){e=g("div"),i=c(o),r=y(),s=g("span"),n=c("- "),d=c(_),m=c(" -"),k=y(),u=c(f),U(s,"class","or svelte-1ck5uk8"),U(e,"class","wrap svelte-1ck5uk8")},m(a,p){q(a,e,p),l(e,i),l(e,r),l(e,s),l(s,n),l(s,d),l(s,m),l(e,k),l(e,u)},p(a,[p]){p&3&&o!==(o=a[1](a[2][a[0]])+"")&&v(i,o),p&2&&_!==(_=a[1]("or")+"")&&v(d,_),p&2&&f!==(f=a[1]("interface.click_to_upload")+"")&&v(u,f)},i:b,o:b,d(a){a&&w(e)}}}function M(t,e,o){let i;A(t,C,n=>o(1,i=n));let{type:r="file"}=e;const s={image:"interface.drop_image",video:"interface.drop_video",audio:"interface.drop_audio",file:"interface.drop_file",csv:"interface.drop_csv"};return t.$$set=n=>{"type"in n&&o(0,r=n.type)},[r,i,s]}class P extends h{constructor(e){super(),S(this,e,M,K,T,{type:0})}}export{P as U};
-//# sourceMappingURL=UploadText-28892309.js.map
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio_client/utils.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio_client/utils.py
deleted file mode 100644
index 850e6f8882bd3295a01c9285b136dc54c3daa7d3..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio_client/utils.py
+++ /dev/null
@@ -1,575 +0,0 @@
-from __future__ import annotations
-
-import asyncio
-import base64
-import json
-import mimetypes
-import os
-import pkgutil
-import secrets
-import shutil
-import tempfile
-import warnings
-from concurrent.futures import CancelledError
-from dataclasses import dataclass, field
-from datetime import datetime
-from enum import Enum
-from pathlib import Path
-from threading import Lock
-from typing import Any, Callable, Optional
-
-import fsspec.asyn
-import httpx
-import huggingface_hub
-import requests
-from huggingface_hub import SpaceStage
-from websockets.legacy.protocol import WebSocketCommonProtocol
-
-API_URL = "api/predict/"
-WS_URL = "queue/join"
-UPLOAD_URL = "upload"
-CONFIG_URL = "config"
-API_INFO_URL = "info"
-RAW_API_INFO_URL = "info?serialize=False"
-SPACE_FETCHER_URL = "https://gradio-space-api-fetcher-v2.hf.space/api"
-RESET_URL = "reset"
-SPACE_URL = "https://hf.space/{}"
-
-SKIP_COMPONENTS = {
- "state",
- "row",
- "column",
- "tabs",
- "tab",
- "tabitem",
- "box",
- "form",
- "accordion",
- "group",
- "interpretation",
- "dataset",
-}
-STATE_COMPONENT = "state"
-INVALID_RUNTIME = [
- SpaceStage.NO_APP_FILE,
- SpaceStage.CONFIG_ERROR,
- SpaceStage.BUILD_ERROR,
- SpaceStage.RUNTIME_ERROR,
- SpaceStage.PAUSED,
-]
-
-__version__ = (pkgutil.get_data(__name__, "version.txt") or b"").decode("ascii").strip()
-
-
-class TooManyRequestsError(Exception):
- """Raised when the API returns a 429 status code."""
-
- pass
-
-
-class QueueError(Exception):
- """Raised when the queue is full or there is an issue adding a job to the queue."""
-
- pass
-
-
-class InvalidAPIEndpointError(Exception):
- """Raised when the API endpoint is invalid."""
-
- pass
-
-
-class SpaceDuplicationError(Exception):
- """Raised when something goes wrong with a Space Duplication."""
-
- pass
-
-
-class Status(Enum):
- """Status codes presented to client users."""
-
- STARTING = "STARTING"
- JOINING_QUEUE = "JOINING_QUEUE"
- QUEUE_FULL = "QUEUE_FULL"
- IN_QUEUE = "IN_QUEUE"
- SENDING_DATA = "SENDING_DATA"
- PROCESSING = "PROCESSING"
- ITERATING = "ITERATING"
- PROGRESS = "PROGRESS"
- FINISHED = "FINISHED"
- CANCELLED = "CANCELLED"
-
- @staticmethod
- def ordering(status: Status) -> int:
- """Order of messages. Helpful for testing."""
- order = [
- Status.STARTING,
- Status.JOINING_QUEUE,
- Status.QUEUE_FULL,
- Status.IN_QUEUE,
- Status.SENDING_DATA,
- Status.PROCESSING,
- Status.PROGRESS,
- Status.ITERATING,
- Status.FINISHED,
- Status.CANCELLED,
- ]
- return order.index(status)
-
- def __lt__(self, other: Status):
- return self.ordering(self) < self.ordering(other)
-
- @staticmethod
- def msg_to_status(msg: str) -> Status:
- """Map the raw message from the backend to the status code presented to users."""
- return {
- "send_hash": Status.JOINING_QUEUE,
- "queue_full": Status.QUEUE_FULL,
- "estimation": Status.IN_QUEUE,
- "send_data": Status.SENDING_DATA,
- "process_starts": Status.PROCESSING,
- "process_generating": Status.ITERATING,
- "process_completed": Status.FINISHED,
- "progress": Status.PROGRESS,
- }[msg]
-
-
-@dataclass
-class ProgressUnit:
- index: Optional[int]
- length: Optional[int]
- unit: Optional[str]
- progress: Optional[float]
- desc: Optional[str]
-
- @classmethod
- def from_ws_msg(cls, data: list[dict]) -> list[ProgressUnit]:
- return [
- cls(
- index=d.get("index"),
- length=d.get("length"),
- unit=d.get("unit"),
- progress=d.get("progress"),
- desc=d.get("desc"),
- )
- for d in data
- ]
-
-
-@dataclass
-class StatusUpdate:
- """Update message sent from the worker thread to the Job on the main thread."""
-
- code: Status
- rank: int | None
- queue_size: int | None
- eta: float | None
- success: bool | None
- time: datetime | None
- progress_data: list[ProgressUnit] | None
-
-
-def create_initial_status_update():
- return StatusUpdate(
- code=Status.STARTING,
- rank=None,
- queue_size=None,
- eta=None,
- success=None,
- time=datetime.now(),
- progress_data=None,
- )
-
-
-@dataclass
-class JobStatus:
- """The job status.
-
- Keeps track of the latest status update and intermediate outputs (not yet implements).
- """
-
- latest_status: StatusUpdate = field(default_factory=create_initial_status_update)
- outputs: list[Any] = field(default_factory=list)
-
-
-@dataclass
-class Communicator:
- """Helper class to help communicate between the worker thread and main thread."""
-
- lock: Lock
- job: JobStatus
- prediction_processor: Callable[..., tuple]
- reset_url: str
- should_cancel: bool = False
-
-
-########################
-# Network utils
-########################
-
-
-def is_http_url_like(possible_url: str) -> bool:
- """
- Check if the given string looks like an HTTP(S) URL.
- """
- return possible_url.startswith(("http://", "https://"))
-
-
-def probe_url(possible_url: str) -> bool:
- """
- Probe the given URL to see if it responds with a 200 status code (to HEAD, then to GET).
- """
- headers = {"User-Agent": "gradio (https://gradio.app/; team@gradio.app)"}
- try:
- with requests.session() as sess:
- head_request = sess.head(possible_url, headers=headers)
- if head_request.status_code == 405:
- return sess.get(possible_url, headers=headers).ok
- return head_request.ok
- except Exception:
- return False
-
-
-def is_valid_url(possible_url: str) -> bool:
- """
- Check if the given string is a valid URL.
- """
- warnings.warn(
- "is_valid_url should not be used. "
- "Use is_http_url_like() and probe_url(), as suitable, instead.",
- )
- return is_http_url_like(possible_url) and probe_url(possible_url)
-
-
-async def get_pred_from_ws(
- websocket: WebSocketCommonProtocol,
- data: str,
- hash_data: str,
- helper: Communicator | None = None,
-) -> dict[str, Any]:
- completed = False
- resp = {}
- while not completed:
- # Receive message in the background so that we can
- # cancel even while running a long pred
- task = asyncio.create_task(websocket.recv())
- while not task.done():
- if helper:
- with helper.lock:
- if helper.should_cancel:
- # Need to reset the iterator state since the client
- # will not reset the session
- async with httpx.AsyncClient() as http:
- reset = http.post(
- helper.reset_url, json=json.loads(hash_data)
- )
- # Retrieve cancel exception from task
- # otherwise will get nasty warning in console
- task.cancel()
- await asyncio.gather(task, reset, return_exceptions=True)
- raise CancelledError()
- # Need to suspend this coroutine so that task actually runs
- await asyncio.sleep(0.01)
- msg = task.result()
- resp = json.loads(msg)
- if helper:
- with helper.lock:
- has_progress = "progress_data" in resp
- status_update = StatusUpdate(
- code=Status.msg_to_status(resp["msg"]),
- queue_size=resp.get("queue_size"),
- rank=resp.get("rank", None),
- success=resp.get("success"),
- time=datetime.now(),
- eta=resp.get("rank_eta"),
- progress_data=ProgressUnit.from_ws_msg(resp["progress_data"])
- if has_progress
- else None,
- )
- output = resp.get("output", {}).get("data", [])
- if output and status_update.code != Status.FINISHED:
- try:
- result = helper.prediction_processor(*output)
- except Exception as e:
- result = [e]
- helper.job.outputs.append(result)
- helper.job.latest_status = status_update
- if resp["msg"] == "queue_full":
- raise QueueError("Queue is full! Please try again.")
- if resp["msg"] == "send_hash":
- await websocket.send(hash_data)
- elif resp["msg"] == "send_data":
- await websocket.send(data)
- completed = resp["msg"] == "process_completed"
- return resp["output"]
-
-
-########################
-# Data processing utils
-########################
-
-
-def download_tmp_copy_of_file(
- url_path: str, hf_token: str | None = None, dir: str | None = None
-) -> str:
- if dir is not None:
- os.makedirs(dir, exist_ok=True)
- headers = {"Authorization": "Bearer " + hf_token} if hf_token else {}
- directory = Path(dir or tempfile.gettempdir()) / secrets.token_hex(20)
- directory.mkdir(exist_ok=True, parents=True)
- file_path = directory / Path(url_path).name
-
- with requests.get(url_path, headers=headers, stream=True) as r:
- r.raise_for_status()
- with open(file_path, "wb") as f:
- shutil.copyfileobj(r.raw, f)
- return str(file_path.resolve())
-
-
-def create_tmp_copy_of_file(file_path: str, dir: str | None = None) -> str:
- directory = Path(dir or tempfile.gettempdir()) / secrets.token_hex(20)
- directory.mkdir(exist_ok=True, parents=True)
- dest = directory / Path(file_path).name
- shutil.copy2(file_path, dest)
- return str(dest.resolve())
-
-
-def get_mimetype(filename: str) -> str | None:
- if filename.endswith(".vtt"):
- return "text/vtt"
- mimetype = mimetypes.guess_type(filename)[0]
- if mimetype is not None:
- mimetype = mimetype.replace("x-wav", "wav").replace("x-flac", "flac")
- return mimetype
-
-
-def get_extension(encoding: str) -> str | None:
- encoding = encoding.replace("audio/wav", "audio/x-wav")
- type = mimetypes.guess_type(encoding)[0]
- if type == "audio/flac": # flac is not supported by mimetypes
- return "flac"
- elif type is None:
- return None
- extension = mimetypes.guess_extension(type)
- if extension is not None and extension.startswith("."):
- extension = extension[1:]
- return extension
-
-
-def encode_file_to_base64(f: str | Path):
- with open(f, "rb") as file:
- encoded_string = base64.b64encode(file.read())
- base64_str = str(encoded_string, "utf-8")
- mimetype = get_mimetype(str(f))
- return (
- "data:"
- + (mimetype if mimetype is not None else "")
- + ";base64,"
- + base64_str
- )
-
-
-def encode_url_to_base64(url: str):
- resp = requests.get(url)
- resp.raise_for_status()
- encoded_string = base64.b64encode(resp.content)
- base64_str = str(encoded_string, "utf-8")
- mimetype = get_mimetype(url)
- return (
- "data:" + (mimetype if mimetype is not None else "") + ";base64," + base64_str
- )
-
-
-def encode_url_or_file_to_base64(path: str | Path):
- path = str(path)
- if is_http_url_like(path):
- return encode_url_to_base64(path)
- return encode_file_to_base64(path)
-
-
-def decode_base64_to_binary(encoding: str) -> tuple[bytes, str | None]:
- extension = get_extension(encoding)
- data = encoding.rsplit(",", 1)[-1]
- return base64.b64decode(data), extension
-
-
-def strip_invalid_filename_characters(filename: str, max_bytes: int = 200) -> str:
- """Strips invalid characters from a filename and ensures that the file_length is less than `max_bytes` bytes."""
- filename = "".join([char for char in filename if char.isalnum() or char in "._- "])
- filename_len = len(filename.encode())
- if filename_len > max_bytes:
- while filename_len > max_bytes:
- if len(filename) == 0:
- break
- filename = filename[:-1]
- filename_len = len(filename.encode())
- return filename
-
-
-def sanitize_parameter_names(original_name: str) -> str:
- """Cleans up a Python parameter name to make the API info more readable."""
- return (
- "".join([char for char in original_name if char.isalnum() or char in " _"])
- .replace(" ", "_")
- .lower()
- )
-
-
-def decode_base64_to_file(
- encoding: str,
- file_path: str | None = None,
- dir: str | Path | None = None,
- prefix: str | None = None,
-):
- directory = Path(dir or tempfile.gettempdir()) / secrets.token_hex(20)
- directory.mkdir(exist_ok=True, parents=True)
- data, extension = decode_base64_to_binary(encoding)
- if file_path is not None and prefix is None:
- filename = Path(file_path).name
- prefix = filename
- if "." in filename:
- prefix = filename[0 : filename.index(".")]
- extension = filename[filename.index(".") + 1 :]
-
- if prefix is not None:
- prefix = strip_invalid_filename_characters(prefix)
-
- if extension is None:
- file_obj = tempfile.NamedTemporaryFile(
- delete=False, prefix=prefix, dir=directory
- )
- else:
- file_obj = tempfile.NamedTemporaryFile(
- delete=False,
- prefix=prefix,
- suffix="." + extension,
- dir=directory,
- )
- file_obj.write(data)
- file_obj.flush()
- return file_obj
-
-
-def dict_or_str_to_json_file(jsn: str | dict | list, dir: str | Path | None = None):
- if dir is not None:
- os.makedirs(dir, exist_ok=True)
-
- file_obj = tempfile.NamedTemporaryFile(
- delete=False, suffix=".json", dir=dir, mode="w+"
- )
- if isinstance(jsn, str):
- jsn = json.loads(jsn)
- json.dump(jsn, file_obj)
- file_obj.flush()
- return file_obj
-
-
-def file_to_json(file_path: str | Path) -> dict | list:
- with open(file_path) as f:
- return json.load(f)
-
-
-###########################
-# HuggingFace Hub API Utils
-###########################
-def set_space_timeout(
- space_id: str,
- hf_token: str | None = None,
- timeout_in_seconds: int = 300,
-):
- headers = huggingface_hub.utils.build_hf_headers(
- token=hf_token,
- library_name="gradio_client",
- library_version=__version__,
- )
- req = requests.post(
- f"https://huggingface.co/api/spaces/{space_id}/sleeptime",
- json={"seconds": timeout_in_seconds},
- headers=headers,
- )
- try:
- huggingface_hub.utils.hf_raise_for_status(req)
- except huggingface_hub.utils.HfHubHTTPError as err:
- raise SpaceDuplicationError(
- f"Could not set sleep timeout on duplicated Space. Please visit {SPACE_URL.format(space_id)} "
- "to set a timeout manually to reduce billing charges."
- ) from err
-
-
-########################
-# Misc utils
-########################
-
-
-def synchronize_async(func: Callable, *args, **kwargs) -> Any:
- """
- Runs async functions in sync scopes. Can be used in any scope.
-
- Example:
- if inspect.iscoroutinefunction(block_fn.fn):
- predictions = utils.synchronize_async(block_fn.fn, *processed_input)
-
- Args:
- func:
- *args:
- **kwargs:
- """
- return fsspec.asyn.sync(fsspec.asyn.get_loop(), func, *args, **kwargs) # type: ignore
-
-
-class APIInfoParseError(ValueError):
- pass
-
-
-def get_type(schema: dict):
- if "type" in schema:
- return schema["type"]
- elif schema.get("oneOf"):
- return "oneOf"
- elif schema.get("anyOf"):
- return "anyOf"
- else:
- raise APIInfoParseError(f"Cannot parse type for {schema}")
-
-
-def json_schema_to_python_type(schema: Any) -> str:
- """Convert the json schema into a python type hint"""
- type_ = get_type(schema)
- if type_ == {}:
- if "json" in schema["description"]:
- return "Dict[Any, Any]"
- else:
- return "Any"
- elif type_ == "null":
- return "None"
- elif type_ == "integer":
- return "int"
- elif type_ == "string":
- return "str"
- elif type_ == "boolean":
- return "bool"
- elif type_ == "number":
- return "int | float"
- elif type_ == "array":
- items = schema.get("items")
- if "prefixItems" in items:
- elements = ", ".join(
- [json_schema_to_python_type(i) for i in items["prefixItems"]]
- )
- return f"Tuple[{elements}]"
- else:
- elements = json_schema_to_python_type(items)
- return f"List[{elements}]"
- elif type_ == "object":
- des = ", ".join(
- [
- f"{n}: {json_schema_to_python_type(v)} ({v.get('description')})"
- for n, v in schema["properties"].items()
- ]
- )
- return f"Dict({des})"
- elif type_ in ["oneOf", "anyOf"]:
- desc = " | ".join([json_schema_to_python_type(i) for i in schema[type_]])
- return desc
- else:
- raise APIInfoParseError(f"Cannot parse schema {schema}")
diff --git a/spaces/Deci/DeciLM-6b-instruct/USE_POLICY.md b/spaces/Deci/DeciLM-6b-instruct/USE_POLICY.md
deleted file mode 100644
index abbcc199b2d1e4feb5d7e40c0bd67e1b0ce29e97..0000000000000000000000000000000000000000
--- a/spaces/Deci/DeciLM-6b-instruct/USE_POLICY.md
+++ /dev/null
@@ -1,50 +0,0 @@
-# Llama 2 Acceptable Use Policy
-
-Meta is committed to promoting safe and fair use of its tools and features, including Llama 2. If you access or use Llama 2, you agree to this Acceptable Use Policy (“Policy”). The most recent copy of this policy can be found at [ai.meta.com/llama/use-policy](http://ai.meta.com/llama/use-policy).
-
-## Prohibited Uses
-We want everyone to use Llama 2 safely and responsibly. You agree you will not use, or allow others to use, Llama 2 to:
-
-1. Violate the law or others’ rights, including to:
- 1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as:
- 1. Violence or terrorism
- 2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material
- 3. Human trafficking, exploitation, and sexual violence
- 4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials.
- 5. Sexual solicitation
- 6. Any other criminal activity
- 2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals
- 3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services
- 4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices
- 5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws
- 6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials
- 7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system
-
-
-
-2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of Llama 2 related to the following:
- 1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State
- 2. Guns and illegal weapons (including weapon development)
- 3. Illegal drugs and regulated/controlled substances
- 4. Operation of critical infrastructure, transportation technologies, or heavy machinery
- 5. Self-harm or harm to others, including suicide, cutting, and eating disorders
- 6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual
-
-
-
-3. Intentionally deceive or mislead others, including use of Llama 2 related to the following:
- 1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation
- 2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content
- 3. Generating, promoting, or further distributing spam
- 4. Impersonating another individual without consent, authorization, or legal right
- 5. Representing that the use of Llama 2 or outputs are human-generated
- 6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement
-4. Fail to appropriately disclose to end users any known dangers of your AI system
-
-Please report any violation of this Policy, software “bug,” or other problems that could lead to a violation of this Policy through one of the following means:
-
-* Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama)
-* Reporting risky content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback)
-* Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info)
-* Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: [LlamaUseReport@meta.com](mailto:LlamaUseReport@meta.com)
-
diff --git a/spaces/Detomo/ai-comic-generation/src/app/queries/predictWithHuggingFace.ts b/spaces/Detomo/ai-comic-generation/src/app/queries/predictWithHuggingFace.ts
deleted file mode 100644
index 06bd16c1536da31ebc17ec37f073773f8091a560..0000000000000000000000000000000000000000
--- a/spaces/Detomo/ai-comic-generation/src/app/queries/predictWithHuggingFace.ts
+++ /dev/null
@@ -1,90 +0,0 @@
-"use server"
-
-import { HfInference, HfInferenceEndpoint } from "@huggingface/inference"
-import { LLMEngine } from "@/types"
-
-const hf = new HfInference(process.env.AUTH_HF_API_TOKEN)
-
-const llmEngine = `${process.env.LLM_ENGINE || ""}` as LLMEngine
-const inferenceEndpoint = `${process.env.LLM_HF_INFERENCE_ENDPOINT_URL || ""}`
-const inferenceModel = `${process.env.LLM_HF_INFERENCE_API_MODEL || ""}`
-
-let hfie: HfInferenceEndpoint = hf
-
-switch (llmEngine) {
- case "INFERENCE_ENDPOINT":
- if (inferenceEndpoint) {
- console.log("Using a custom HF Inference Endpoint")
- hfie = hf.endpoint(inferenceEndpoint)
- } else {
- const error = "No Inference Endpoint URL defined"
- console.error(error)
- throw new Error(error)
- }
- break;
-
- case "INFERENCE_API":
- if (inferenceModel) {
- console.log("Using an HF Inference API Model")
- } else {
- const error = "No Inference API model defined"
- console.error(error)
- throw new Error(error)
- }
- break;
-
- default:
- const error = "Please check your Hugging Face Inference API or Inference Endpoint settings"
- console.error(error)
- throw new Error(error)
-}
-
-const api = llmEngine === "INFERENCE_ENDPOINT" ? hfie : hf
-
-export async function predictWithHuggingFace(inputs: string) {
- let instructions = ""
- try {
- for await (const output of api.textGenerationStream({
- model: llmEngine === "INFERENCE_ENDPOINT" ? undefined : (inferenceModel || undefined),
- inputs,
- parameters: {
- do_sample: true,
- // we don't require a lot of token for our task
- // but to be safe, let's count ~110 tokens per panel
- max_new_tokens: 450, // 1150,
- return_full_text: false,
- }
- })) {
- instructions += output.token.text
- process.stdout.write(output.token.text)
- if (
- instructions.includes("") ||
- instructions.includes("") ||
- instructions.includes("[INST]") ||
- instructions.includes("[/INST]") ||
- instructions.includes("") ||
- instructions.includes("") ||
- instructions.includes("<|end|>") ||
- instructions.includes("<|assistant|>")
- ) {
- break
- }
- }
- } catch (err) {
- console.error(`error during generation: ${err}`)
- }
-
- // need to do some cleanup of the garbage the LLM might have gave us
- return (
- instructions
- .replaceAll("<|end|>", "")
- .replaceAll("", "")
- .replaceAll("", "")
- .replaceAll("[INST]", "")
- .replaceAll("[/INST]", "")
- .replaceAll("", "")
- .replaceAll("", "")
- .replaceAll("<|assistant|>", "")
- .replaceAll('""', '"')
- )
-}
diff --git a/spaces/Djacon/emotion_detection/inference.py b/spaces/Djacon/emotion_detection/inference.py
deleted file mode 100644
index d15c886a90c8239aab4bf14dd0655bf6a9098eee..0000000000000000000000000000000000000000
--- a/spaces/Djacon/emotion_detection/inference.py
+++ /dev/null
@@ -1,52 +0,0 @@
-import torch
-from transformers import BertForSequenceClassification, AutoTokenizer
-from transformers import PegasusForConditionalGeneration, PegasusTokenizer
-
-# path_emo = 'Djacon/rubert-tiny2-russian-emotion-detection'
-path_emo = './models/emotion_detection/'
-model_emo = BertForSequenceClassification.from_pretrained(path_emo)
-tokenizer_emo = AutoTokenizer.from_pretrained(path_emo)
-
-LABELS = ['Joy', 'Interest', 'Surprise', 'Sadness', 'Anger',
- 'Disgust', 'Fear', 'Guilt', 'Neutral']
-
-
-# Probabilistic prediction of emotion in a text
-@torch.no_grad()
-def predict_emotions(text: str) -> str:
- inputs = tokenizer_emo(text, max_length=512, truncation=True,
- return_tensors='pt')
- inputs = inputs.to(model_emo.device)
-
- outputs = model_emo(**inputs)
-
- pred = torch.nn.functional.softmax(outputs.logits, dim=1)
-
- emotions_list = {}
- for i in range(len(pred[0].tolist())):
- emotions_list[LABELS[i]] = round(100 * pred[0].tolist()[i], 3)
- return '\n'.join(f'{k}: {v}' for k, v in sorted(emotions_list.items(),
- key=lambda x: -x[1]))
-
-
-path_sum = './models/summarizer/'
-model_sum = PegasusForConditionalGeneration.from_pretrained(path_sum)
-tokenizer_sum = PegasusTokenizer.from_pretrained(path_sum)
-
-
-def predict_summarization(text: str) -> str:
- batch = tokenizer_sum([text], truncation=True, padding="longest",
- return_tensors="pt")
- translated = model_sum.generate(**batch)
- return tokenizer_sum.batch_decode(translated, skip_special_tokens=True)[0]
-
-
-def test():
- predict_emotions('I am so happy now!')
- print('\n>>> Emotion Detection successfully initialized! <<<\n')
-
- predict_summarization('I am so happy now!')
- print('\n>>> Pegasus successfully initialized! <<<\n')
-
-
-test()
diff --git a/spaces/Docfile/open_llm_leaderboard/src/assets/text_content.py b/spaces/Docfile/open_llm_leaderboard/src/assets/text_content.py
deleted file mode 100644
index 71d642419341ea8f2245df56c4d087e189617b1f..0000000000000000000000000000000000000000
--- a/spaces/Docfile/open_llm_leaderboard/src/assets/text_content.py
+++ /dev/null
@@ -1,168 +0,0 @@
-from src.display_models.model_metadata_type import ModelType
-
-TITLE = """
🤗 Open LLM Leaderboard
"""
-
-INTRODUCTION_TEXT = """
-📐 The 🤗 Open LLM Leaderboard aims to track, rank and evaluate open LLMs and chatbots.
-
-🤗 Submit a model for automated evaluation on the 🤗 GPU cluster on the "Submit" page!
-The leaderboard's backend runs the great [Eleuther AI Language Model Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness) - read more details in the "About" page!
-"""
-
-LLM_BENCHMARKS_TEXT = f"""
-# Context
-With the plethora of large language models (LLMs) and chatbots being released week upon week, often with grandiose claims of their performance, it can be hard to filter out the genuine progress that is being made by the open-source community and which model is the current state of the art.
-
-## Icons
-{ModelType.PT.to_str(" : ")} model
-{ModelType.FT.to_str(" : ")} model
-{ModelType.IFT.to_str(" : ")} model
-{ModelType.RL.to_str(" : ")} model
-If there is no icon, we have not uploaded the information on the model yet, feel free to open an issue with the model information!
-
-🏴☠️ indicates that this model has been flagged by the community, and should probably be ignored! Clicking the icon will redirect you to the discussion about the model.
-(For ex, the model was trained on the evaluation data, and is therefore cheating on the leaderboard.)
-
-## How it works
-
-📈 We evaluate models on 4 key benchmarks using the Eleuther AI Language Model Evaluation Harness , a unified framework to test generative language models on a large number of different evaluation tasks.
-
-- AI2 Reasoning Challenge (25-shot) - a set of grade-school science questions.
-- HellaSwag (10-shot) - a test of commonsense inference, which is easy for humans (~95%) but challenging for SOTA models.
-- MMLU (5-shot) - a test to measure a text model's multitask accuracy. The test covers 57 tasks including elementary mathematics, US history, computer science, law, and more.
-- TruthfulQA (0-shot) - a test to measure a model’s propensity to reproduce falsehoods commonly found online. Note: TruthfulQA in the Harness is actually a minima a 6-shots task, as it is prepended by 6 examples systematically, even when launched using 0 for the number of few-shot examples.
-
-For all these evaluations, a higher score is a better score.
-We chose these benchmarks as they test a variety of reasoning and general knowledge across a wide variety of fields in 0-shot and few-shot settings.
-
-## Details and logs
-You can find:
-- detailed numerical results in the `results` Hugging Face dataset: https://huggingface.co/datasets/open-llm-leaderboard/results
-- details on the input/outputs for the models in the `details` Hugging Face dataset: https://huggingface.co/datasets/open-llm-leaderboard/details
-- community queries and running status in the `requests` Hugging Face dataset: https://huggingface.co/datasets/open-llm-leaderboard/requests
-
-## Reproducibility
-To reproduce our results, here is the commands you can run, using [this version](https://github.com/EleutherAI/lm-evaluation-harness/tree/b281b0921b636bc36ad05c0b0b0763bd6dd43463) of the Eleuther AI Harness:
-`python main.py --model=hf-causal --model_args="pretrained=,use_accelerate=True,revision="`
-` --tasks= --num_fewshot= --batch_size=2 --output_path=`
-
-The total batch size we get for models which fit on one A100 node is 16 (8 GPUs * 2). If you don't use parallelism, adapt your batch size to fit.
-*You can expect results to vary slightly for different batch sizes because of padding.*
-
-The tasks and few shots parameters are:
-- ARC: 25-shot, *arc-challenge* (`acc_norm`)
-- HellaSwag: 10-shot, *hellaswag* (`acc_norm`)
-- TruthfulQA: 0-shot, *truthfulqa-mc* (`mc2`)
-- MMLU: 5-shot, *hendrycksTest-abstract_algebra,hendrycksTest-anatomy,hendrycksTest-astronomy,hendrycksTest-business_ethics,hendrycksTest-clinical_knowledge,hendrycksTest-college_biology,hendrycksTest-college_chemistry,hendrycksTest-college_computer_science,hendrycksTest-college_mathematics,hendrycksTest-college_medicine,hendrycksTest-college_physics,hendrycksTest-computer_security,hendrycksTest-conceptual_physics,hendrycksTest-econometrics,hendrycksTest-electrical_engineering,hendrycksTest-elementary_mathematics,hendrycksTest-formal_logic,hendrycksTest-global_facts,hendrycksTest-high_school_biology,hendrycksTest-high_school_chemistry,hendrycksTest-high_school_computer_science,hendrycksTest-high_school_european_history,hendrycksTest-high_school_geography,hendrycksTest-high_school_government_and_politics,hendrycksTest-high_school_macroeconomics,hendrycksTest-high_school_mathematics,hendrycksTest-high_school_microeconomics,hendrycksTest-high_school_physics,hendrycksTest-high_school_psychology,hendrycksTest-high_school_statistics,hendrycksTest-high_school_us_history,hendrycksTest-high_school_world_history,hendrycksTest-human_aging,hendrycksTest-human_sexuality,hendrycksTest-international_law,hendrycksTest-jurisprudence,hendrycksTest-logical_fallacies,hendrycksTest-machine_learning,hendrycksTest-management,hendrycksTest-marketing,hendrycksTest-medical_genetics,hendrycksTest-miscellaneous,hendrycksTest-moral_disputes,hendrycksTest-moral_scenarios,hendrycksTest-nutrition,hendrycksTest-philosophy,hendrycksTest-prehistory,hendrycksTest-professional_accounting,hendrycksTest-professional_law,hendrycksTest-professional_medicine,hendrycksTest-professional_psychology,hendrycksTest-public_relations,hendrycksTest-security_studies,hendrycksTest-sociology,hendrycksTest-us_foreign_policy,hendrycksTest-virology,hendrycksTest-world_religions* (average of all the results `acc`)
-
-## Quantization
-To get more information about quantization, see:
-- 8 bits: [blog post](https://huggingface.co/blog/hf-bitsandbytes-integration), [paper](https://arxiv.org/abs/2208.07339)
-- 4 bits: [blog post](https://huggingface.co/blog/4bit-transformers-bitsandbytes), [paper](https://arxiv.org/abs/2305.14314)
-
-## More resources
-If you still have questions, you can check our FAQ [here](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/179)!
-We also gather cool resources from the community, other teams, and other labs [here](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/174)!
-"""
-
-EVALUATION_QUEUE_TEXT = """
-# Evaluation Queue for the 🤗 Open LLM Leaderboard
-
-Models added here will be automatically evaluated on the 🤗 cluster.
-
-## Some good practices before submitting a model
-
-### 1) Make sure you can load your model and tokenizer using AutoClasses:
-```python
-from transformers import AutoConfig, AutoModel, AutoTokenizer
-config = AutoConfig.from_pretrained("your model name", revision=revision)
-model = AutoModel.from_pretrained("your model name", revision=revision)
-tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
-```
-If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
-
-Note: make sure your model is public!
-Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted!
-
-### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index)
-It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!
-
-### 3) Make sure your model has an open license!
-This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗
-
-### 4) Fill up your model card
-When we add extra information about models to the leaderboard, it will be automatically taken from the model card
-
-## In case of model failure
-If your model is displayed in the `FAILED` category, its execution stopped.
-Make sure you have followed the above steps first.
-If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task).
-"""
-
-CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
-CITATION_BUTTON_TEXT = r"""
-@misc{open-llm-leaderboard,
- author = {Edward Beeching, Clémentine Fourrier, Nathan Habib, Sheon Han, Nathan Lambert, Nazneen Rajani, Omar Sanseviero, Lewis Tunstall, Thomas Wolf},
- title = {Open LLM Leaderboard},
- year = {2023},
- publisher = {Hugging Face},
- howpublished = "\url{https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard}"
-}
-@software{eval-harness,
- author = {Gao, Leo and
- Tow, Jonathan and
- Biderman, Stella and
- Black, Sid and
- DiPofi, Anthony and
- Foster, Charles and
- Golding, Laurence and
- Hsu, Jeffrey and
- McDonell, Kyle and
- Muennighoff, Niklas and
- Phang, Jason and
- Reynolds, Laria and
- Tang, Eric and
- Thite, Anish and
- Wang, Ben and
- Wang, Kevin and
- Zou, Andy},
- title = {A framework for few-shot language model evaluation},
- month = sep,
- year = 2021,
- publisher = {Zenodo},
- version = {v0.0.1},
- doi = {10.5281/zenodo.5371628},
- url = {https://doi.org/10.5281/zenodo.5371628}
-}
-@misc{clark2018think,
- title={Think you have Solved Question Answering? Try ARC, the AI2 Reasoning Challenge},
- author={Peter Clark and Isaac Cowhey and Oren Etzioni and Tushar Khot and Ashish Sabharwal and Carissa Schoenick and Oyvind Tafjord},
- year={2018},
- eprint={1803.05457},
- archivePrefix={arXiv},
- primaryClass={cs.AI}
-}
-@misc{zellers2019hellaswag,
- title={HellaSwag: Can a Machine Really Finish Your Sentence?},
- author={Rowan Zellers and Ari Holtzman and Yonatan Bisk and Ali Farhadi and Yejin Choi},
- year={2019},
- eprint={1905.07830},
- archivePrefix={arXiv},
- primaryClass={cs.CL}
-}
-@misc{hendrycks2021measuring,
- title={Measuring Massive Multitask Language Understanding},
- author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},
- year={2021},
- eprint={2009.03300},
- archivePrefix={arXiv},
- primaryClass={cs.CY}
-}
-@misc{lin2022truthfulqa,
- title={TruthfulQA: Measuring How Models Mimic Human Falsehoods},
- author={Stephanie Lin and Jacob Hilton and Owain Evans},
- year={2022},
- eprint={2109.07958},
- archivePrefix={arXiv},
- primaryClass={cs.CL}
-}"""
diff --git a/spaces/DragGan/DragGan-Inversion/stylegan_human/legacy.py b/spaces/DragGan/DragGan-Inversion/stylegan_human/legacy.py
deleted file mode 100644
index 1f8b1a87fbf9a2c6b10227b9516a6851f6fabf12..0000000000000000000000000000000000000000
--- a/spaces/DragGan/DragGan-Inversion/stylegan_human/legacy.py
+++ /dev/null
@@ -1,223 +0,0 @@
-# Copyright (c) SenseTime Research. All rights reserved.
-
-# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
-# NVIDIA CORPORATION and its licensors retain all intellectual property
-# and proprietary rights in and to this software, related documentation
-# and any modifications thereto. Any use, reproduction, disclosure or
-# distribution of this software and related documentation without an express
-# license agreement from NVIDIA CORPORATION is strictly prohibited.
-#
-import pickle
-import dnnlib
-import re
-from typing import List, Optional
-import torch
-import copy
-import numpy as np
-from torch_utils import misc
-
-
-# ----------------------------------------------------------------------------
-# loading torch pkl
-def load_network_pkl(f, force_fp16=False, G_only=False):
- data = _LegacyUnpickler(f).load()
- if G_only:
- f = open('ori_model_Gonly.txt', 'a+')
- else:
- f = open('ori_model.txt', 'a+')
- for key in data.keys():
- f.write(str(data[key]))
- f.close()
-
- # We comment out this part, if you want to convert TF pickle, you can use the original script from StyleGAN2-ada-pytorch
- # # Legacy TensorFlow pickle => convert.
- # if isinstance(data, tuple) and len(data) == 3 and all(isinstance(net, _TFNetworkStub) for net in data):
- # tf_G, tf_D, tf_Gs = data
- # G = convert_tf_generator(tf_G)
- # D = convert_tf_discriminator(tf_D)
- # G_ema = convert_tf_generator(tf_Gs)
- # data = dict(G=G, D=D, G_ema=G_ema)
-
- # Add missing fields.
- if 'training_set_kwargs' not in data:
- data['training_set_kwargs'] = None
- if 'augment_pipe' not in data:
- data['augment_pipe'] = None
-
- # Validate contents.
- assert isinstance(data['G_ema'], torch.nn.Module)
- if not G_only:
- assert isinstance(data['D'], torch.nn.Module)
- assert isinstance(data['G'], torch.nn.Module)
- assert isinstance(data['training_set_kwargs'], (dict, type(None)))
- assert isinstance(data['augment_pipe'], (torch.nn.Module, type(None)))
-
- # Force FP16.
- if force_fp16:
- if G_only:
- convert_list = ['G_ema'] # 'G'
- else:
- convert_list = ['G', 'D', 'G_ema']
- for key in convert_list:
- old = data[key]
- kwargs = copy.deepcopy(old.init_kwargs)
- if key.startswith('G'):
- kwargs.synthesis_kwargs = dnnlib.EasyDict(
- kwargs.get('synthesis_kwargs', {}))
- kwargs.synthesis_kwargs.num_fp16_res = 4
- kwargs.synthesis_kwargs.conv_clamp = 256
- if key.startswith('D'):
- kwargs.num_fp16_res = 4
- kwargs.conv_clamp = 256
- if kwargs != old.init_kwargs:
- new = type(old)(**kwargs).eval().requires_grad_(False)
- misc.copy_params_and_buffers(old, new, require_all=True)
- data[key] = new
- return data
-
-
-class _TFNetworkStub(dnnlib.EasyDict):
- pass
-
-
-class _LegacyUnpickler(pickle.Unpickler):
- def find_class(self, module, name):
- if module == 'dnnlib.tflib.network' and name == 'Network':
- return _TFNetworkStub
- return super().find_class(module, name)
-
-# ----------------------------------------------------------------------------
-
-
-def num_range(s: str) -> List[int]:
- '''Accept either a comma separated list of numbers 'a,b,c' or a range 'a-c' and return as a list of ints.'''
-
- range_re = re.compile(r'^(\d+)-(\d+)$')
- m = range_re.match(s)
- if m:
- return list(range(int(m.group(1)), int(m.group(2))+1))
- vals = s.split(',')
- return [int(x) for x in vals]
-
-
-# ----------------------------------------------------------------------------
-# loading tf pkl
-def load_pkl(file_or_url):
- with open(file_or_url, 'rb') as file:
- return pickle.load(file, encoding='latin1')
-
-# ----------------------------------------------------------------------------
-
-# For editing
-
-
-def visual(output, out_path):
- import torch
- import cv2
- import numpy as np
- output = (output + 1)/2
- output = torch.clamp(output, 0, 1)
- if output.shape[1] == 1:
- output = torch.cat([output, output, output], 1)
- output = output[0].detach().cpu().permute(1, 2, 0).numpy()
- output = (output*255).astype(np.uint8)
- output = output[:, :, ::-1]
- cv2.imwrite(out_path, output)
-
-
-def save_obj(obj, path):
- with open(path, 'wb+') as f:
- pickle.dump(obj, f, protocol=4)
-
-# ----------------------------------------------------------------------------
-
-# Converting pkl to pth, change dict info inside pickle
-
-
-def convert_to_rgb(state_ros, state_nv, ros_name, nv_name):
- state_ros[f"{ros_name}.conv.weight"] = state_nv[f"{nv_name}.torgb.weight"].unsqueeze(
- 0)
- state_ros[f"{ros_name}.bias"] = state_nv[f"{nv_name}.torgb.bias"].unsqueeze(
- 0).unsqueeze(-1).unsqueeze(-1)
- state_ros[f"{ros_name}.conv.modulation.weight"] = state_nv[f"{nv_name}.torgb.affine.weight"]
- state_ros[f"{ros_name}.conv.modulation.bias"] = state_nv[f"{nv_name}.torgb.affine.bias"]
-
-
-def convert_conv(state_ros, state_nv, ros_name, nv_name):
- state_ros[f"{ros_name}.conv.weight"] = state_nv[f"{nv_name}.weight"].unsqueeze(
- 0)
- state_ros[f"{ros_name}.activate.bias"] = state_nv[f"{nv_name}.bias"]
- state_ros[f"{ros_name}.conv.modulation.weight"] = state_nv[f"{nv_name}.affine.weight"]
- state_ros[f"{ros_name}.conv.modulation.bias"] = state_nv[f"{nv_name}.affine.bias"]
- state_ros[f"{ros_name}.noise.weight"] = state_nv[f"{nv_name}.noise_strength"].unsqueeze(
- 0)
-
-
-def convert_blur_kernel(state_ros, state_nv, level):
- """Not quite sure why there is a factor of 4 here"""
- # They are all the same
- state_ros[f"convs.{2*level}.conv.blur.kernel"] = 4 * \
- state_nv["synthesis.b4.resample_filter"]
- state_ros[f"to_rgbs.{level}.upsample.kernel"] = 4 * \
- state_nv["synthesis.b4.resample_filter"]
-
-
-def determine_config(state_nv):
- mapping_names = [name for name in state_nv.keys() if "mapping.fc" in name]
- sythesis_names = [
- name for name in state_nv.keys() if "synthesis.b" in name]
-
- n_mapping = max([int(re.findall("(\d+)", n)[0])
- for n in mapping_names]) + 1
- resolution = max([int(re.findall("(\d+)", n)[0]) for n in sythesis_names])
- n_layers = np.log(resolution/2)/np.log(2)
-
- return n_mapping, n_layers
-
-
-def convert(network_pkl, output_file, G_only=False):
- with dnnlib.util.open_url(network_pkl) as f:
- G_nvidia = load_network_pkl(f, G_only=G_only)['G_ema']
-
- state_nv = G_nvidia.state_dict()
- n_mapping, n_layers = determine_config(state_nv)
-
- state_ros = {}
-
- for i in range(n_mapping):
- state_ros[f"style.{i+1}.weight"] = state_nv[f"mapping.fc{i}.weight"]
- state_ros[f"style.{i+1}.bias"] = state_nv[f"mapping.fc{i}.bias"]
-
- for i in range(int(n_layers)):
- if i > 0:
- for conv_level in range(2):
- convert_conv(
- state_ros, state_nv, f"convs.{2*i-2+conv_level}", f"synthesis.b{4*(2**i)}.conv{conv_level}")
- state_ros[f"noises.noise_{2*i-1+conv_level}"] = state_nv[f"synthesis.b{4*(2**i)}.conv{conv_level}.noise_const"].unsqueeze(
- 0).unsqueeze(0)
-
- convert_to_rgb(state_ros, state_nv,
- f"to_rgbs.{i-1}", f"synthesis.b{4*(2**i)}")
- convert_blur_kernel(state_ros, state_nv, i-1)
-
- else:
- state_ros[f"input.input"] = state_nv[f"synthesis.b{4*(2**i)}.const"].unsqueeze(
- 0)
- convert_conv(state_ros, state_nv, "conv1",
- f"synthesis.b{4*(2**i)}.conv1")
- state_ros[f"noises.noise_{2*i}"] = state_nv[f"synthesis.b{4*(2**i)}.conv1.noise_const"].unsqueeze(
- 0).unsqueeze(0)
- convert_to_rgb(state_ros, state_nv, "to_rgb1",
- f"synthesis.b{4*(2**i)}")
-
- # https://github.com/yuval-alaluf/restyle-encoder/issues/1#issuecomment-828354736
- latent_avg = state_nv['mapping.w_avg']
- state_dict = {"g_ema": state_ros, "latent_avg": latent_avg}
- # if G_only:
- # f = open('converted_model_Gonly.txt','a+')
- # else:
- # f = open('converted_model.txt','a+')
- # for key in state_dict['g_ema'].keys():
- # f.write(str(key)+': '+str(state_dict['g_ema'][key].shape)+'\n')
- # f.close()
- torch.save(state_dict, output_file)
diff --git a/spaces/ECCV2022/bytetrack/yolox/data/datasets/datasets_wrapper.py b/spaces/ECCV2022/bytetrack/yolox/data/datasets/datasets_wrapper.py
deleted file mode 100644
index a262e6a6755e7fa69132d3f59fbe20b1bb17a6cf..0000000000000000000000000000000000000000
--- a/spaces/ECCV2022/bytetrack/yolox/data/datasets/datasets_wrapper.py
+++ /dev/null
@@ -1,128 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding:utf-8 -*-
-# Copyright (c) Megvii, Inc. and its affiliates.
-
-from torch.utils.data.dataset import ConcatDataset as torchConcatDataset
-from torch.utils.data.dataset import Dataset as torchDataset
-
-import bisect
-from functools import wraps
-
-
-class ConcatDataset(torchConcatDataset):
- def __init__(self, datasets):
- super(ConcatDataset, self).__init__(datasets)
- if hasattr(self.datasets[0], "input_dim"):
- self._input_dim = self.datasets[0].input_dim
- self.input_dim = self.datasets[0].input_dim
-
- def pull_item(self, idx):
- if idx < 0:
- if -idx > len(self):
- raise ValueError(
- "absolute value of index should not exceed dataset length"
- )
- idx = len(self) + idx
- dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
- if dataset_idx == 0:
- sample_idx = idx
- else:
- sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
- return self.datasets[dataset_idx].pull_item(sample_idx)
-
-
-class MixConcatDataset(torchConcatDataset):
- def __init__(self, datasets):
- super(MixConcatDataset, self).__init__(datasets)
- if hasattr(self.datasets[0], "input_dim"):
- self._input_dim = self.datasets[0].input_dim
- self.input_dim = self.datasets[0].input_dim
-
- def __getitem__(self, index):
-
- if not isinstance(index, int):
- idx = index[1]
- if idx < 0:
- if -idx > len(self):
- raise ValueError(
- "absolute value of index should not exceed dataset length"
- )
- idx = len(self) + idx
- dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
- if dataset_idx == 0:
- sample_idx = idx
- else:
- sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
- if not isinstance(index, int):
- index = (index[0], sample_idx, index[2])
-
- return self.datasets[dataset_idx][index]
-
-
-class Dataset(torchDataset):
- """ This class is a subclass of the base :class:`torch.utils.data.Dataset`,
- that enables on the fly resizing of the ``input_dim``.
-
- Args:
- input_dimension (tuple): (width,height) tuple with default dimensions of the network
- """
-
- def __init__(self, input_dimension, mosaic=True):
- super().__init__()
- self.__input_dim = input_dimension[:2]
- self.enable_mosaic = mosaic
-
- @property
- def input_dim(self):
- """
- Dimension that can be used by transforms to set the correct image size, etc.
- This allows transforms to have a single source of truth
- for the input dimension of the network.
-
- Return:
- list: Tuple containing the current width,height
- """
- if hasattr(self, "_input_dim"):
- return self._input_dim
- return self.__input_dim
-
- @staticmethod
- def resize_getitem(getitem_fn):
- """
- Decorator method that needs to be used around the ``__getitem__`` method. |br|
- This decorator enables the on the fly resizing of
- the ``input_dim`` with our :class:`~lightnet.data.DataLoader` class.
-
- Example:
- >>> class CustomSet(ln.data.Dataset):
- ... def __len__(self):
- ... return 10
- ... @ln.data.Dataset.resize_getitem
- ... def __getitem__(self, index):
- ... # Should return (image, anno) but here we return input_dim
- ... return self.input_dim
- >>> data = CustomSet((200,200))
- >>> data[0]
- (200, 200)
- >>> data[(480,320), 0]
- (480, 320)
- """
-
- @wraps(getitem_fn)
- def wrapper(self, index):
- if not isinstance(index, int):
- has_dim = True
- self._input_dim = index[0]
- self.enable_mosaic = index[2]
- index = index[1]
- else:
- has_dim = False
-
- ret_val = getitem_fn(self, index)
-
- if has_dim:
- del self._input_dim
-
- return ret_val
-
- return wrapper
diff --git a/spaces/ECCV2022/bytetrack/yolox/layers/csrc/cocoeval/cocoeval.h b/spaces/ECCV2022/bytetrack/yolox/layers/csrc/cocoeval/cocoeval.h
deleted file mode 100644
index f9def4151102d1c493dc88186384342565798d05..0000000000000000000000000000000000000000
--- a/spaces/ECCV2022/bytetrack/yolox/layers/csrc/cocoeval/cocoeval.h
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-#pragma once
-
-#include
-#include
-#include
-#include
-#include
-
-namespace py = pybind11;
-
-namespace COCOeval {
-
-// Annotation data for a single object instance in an image
-struct InstanceAnnotation {
- InstanceAnnotation(
- uint64_t id,
- double score,
- double area,
- bool is_crowd,
- bool ignore)
- : id{id}, score{score}, area{area}, is_crowd{is_crowd}, ignore{ignore} {}
- uint64_t id;
- double score = 0.;
- double area = 0.;
- bool is_crowd = false;
- bool ignore = false;
-};
-
-// Stores intermediate results for evaluating detection results for a single
-// image that has D detected instances and G ground truth instances. This stores
-// matches between detected and ground truth instances
-struct ImageEvaluation {
- // For each of the D detected instances, the id of the matched ground truth
- // instance, or 0 if unmatched
- std::vector detection_matches;
-
- // The detection score of each of the D detected instances
- std::vector detection_scores;
-
- // Marks whether or not each of G instances was ignored from evaluation (e.g.,
- // because it's outside area_range)
- std::vector ground_truth_ignores;
-
- // Marks whether or not each of D instances was ignored from evaluation (e.g.,
- // because it's outside aRng)
- std::vector detection_ignores;
-};
-
-template
-using ImageCategoryInstances = std::vector>>;
-
-// C++ implementation of COCO API cocoeval.py::COCOeval.evaluateImg(). For each
-// combination of image, category, area range settings, and IOU thresholds to
-// evaluate, it matches detected instances to ground truth instances and stores
-// the results into a vector of ImageEvaluation results, which will be
-// interpreted by the COCOeval::Accumulate() function to produce precion-recall
-// curves. The parameters of nested vectors have the following semantics:
-// image_category_ious[i][c][d][g] is the intersection over union of the d'th
-// detected instance and g'th ground truth instance of
-// category category_ids[c] in image image_ids[i]
-// image_category_ground_truth_instances[i][c] is a vector of ground truth
-// instances in image image_ids[i] of category category_ids[c]
-// image_category_detection_instances[i][c] is a vector of detected
-// instances in image image_ids[i] of category category_ids[c]
-std::vector EvaluateImages(
- const std::vector>& area_ranges, // vector of 2-tuples
- int max_detections,
- const std::vector& iou_thresholds,
- const ImageCategoryInstances>& image_category_ious,
- const ImageCategoryInstances&
- image_category_ground_truth_instances,
- const ImageCategoryInstances&
- image_category_detection_instances);
-
-// C++ implementation of COCOeval.accumulate(), which generates precision
-// recall curves for each set of category, IOU threshold, detection area range,
-// and max number of detections parameters. It is assumed that the parameter
-// evaluations is the return value of the functon COCOeval::EvaluateImages(),
-// which was called with the same parameter settings params
-py::dict Accumulate(
- const py::object& params,
- const std::vector& evalutations);
-
-} // namespace COCOeval
diff --git a/spaces/ECCV2022/storydalle/app.py b/spaces/ECCV2022/storydalle/app.py
deleted file mode 100644
index 5a1e6a485d4c3c1d60cc7e5b46193e8dcb6f5b13..0000000000000000000000000000000000000000
--- a/spaces/ECCV2022/storydalle/app.py
+++ /dev/null
@@ -1,457 +0,0 @@
-import os, sys, torch
-import gradio as gr
-import torchvision.utils as vutils
-import torchvision.transforms as transforms
-from dalle.models import StoryDalle
-import argparse
-from PIL import Image
-from torchvision.utils import save_image
-import tensorflow as tf
-import tensorflow_hub as hub
-import gdown
-from allennlp.predictors.predictor import Predictor
-import random
-
-torch.set_grad_enabled(False)
-tf.config.set_visible_devices([], 'GPU') # setting Tensorflow's GPU visibility to None to constraing embedding model to CPU
-
-source_frame_paths = {
- 'Pororo': '/playpen-ssd/adyasha/projects/StoryGAN/pororo_png/Pororo_ENGLISH1_2/Pororo_ENGLISH1_2_ep6/12.png',
- 'Loopy': '/playpen-ssd/adyasha/projects/StoryGAN/pororo_png/Pororo_ENGLISH1_1/Pororo_ENGLISH1_1_ep12/26.png',
- 'Crong': '/playpen-ssd/adyasha/projects/StoryGAN/pororo_png/Pororo_ENGLISH1_1/Pororo_ENGLISH1_1_ep12/10.png',
- 'Poby': '/playpen-ssd/adyasha/projects/StoryGAN/pororo_png/Pororo_ENGLISH1_1/Pororo_ENGLISH1_1_ep9/34.png',
- 'Eddy': '/playpen-ssd/adyasha/projects/StoryGAN/pororo_png/Pororo_ENGLISH1_1/Pororo_ENGLISH1_1_ep12/46.png',
- 'Petty': '/playpen-ssd/adyasha/projects/StoryGAN/pororo_png/Pororo_ENGLISH2_1/Pororo_ENGLISH2_1_ep1/34.png',
- 'Tongtong': '/playpen-ssd/adyasha/projects/StoryGAN/pororo_png/Pororo_ENGLISH3_1/Pororo_ENGLISH3_1_ep7/8.png',
- 'Rody': '/playpen-ssd/adyasha/projects/StoryGAN/pororo_png/Pororo_ENGLISH3_1/Pororo_ENGLISH3_1_ep6/66.png',
- 'Harry': '/playpen-ssd/adyasha/projects/StoryGAN/pororo_png/Pororo_ENGLISH3_1/Pororo_ENGLISH3_1_ep7/39.png',
-}
-
-
-def get_span_words(span, document):
- return ' '.join(document[span[0]:span[1]+1])
-
-
-def print_clusters(prediction):
- document, clusters = prediction['document'], prediction['clusters']
- for cluster in clusters:
- print(get_span_words(cluster[0], document) + ': ', end='')
- print(f"[{'; '.join([get_span_words(span, document) for span in cluster])}]")
-
-
-def resolve_coref(captions, captions_mask, coref_predictor):
- sent_counts = []
- doc = ''
- for cap, mask in zip(captions, captions_mask):
- if mask == 0:
- sent_counts.append(0)
- else:
- print(cap)
- count = len([c.strip() for c in cap.split('.') if c.strip()])
- sent_counts.append(count)
- doc += cap + ' '
-
- # print(doc)
-
- doc = doc.strip()
- resolved_doc = coref_predictor.coref_resolved(doc)
- # print(resolved_doc)
- # print(sent_counts)
-
- sents = resolved_doc.split('. ')
- resolved_captions = []
- for i, (count, mask) in enumerate(zip(sent_counts, captions_mask)):
- if mask == 0:
- resolved_captions.append('')
- else:
- new_cap = '. '.join(sents[sum(sent_counts[:i]):sum(sent_counts[:i]) + count])
- new_cap = new_cap.strip()
- if new_cap[-1] not in ['!', '?', '.']:
- new_cap += '.'
- resolved_captions.append(new_cap)
-
- return resolved_captions
-
-
-def inverse_normalize(tensor, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)):
- mean = torch.as_tensor(mean, dtype=tensor.dtype, device=tensor.device)
- std = torch.as_tensor(std, dtype=tensor.dtype, device=tensor.device)
- if mean.ndim == 1:
- mean = mean.view(-1, 1, 1)
- if std.ndim == 1:
- std = std.view(-1, 1, 1)
- tensor.mul_(std).add_(mean)
- return tensor
-
-
-def save_story_results(images, video_len=4, n_candidates=1, mask=None):
- # print("Generated Images shape: ", images.shape)
-
- if mask is None:
- mask = [1 for _ in range(len(video_len))]
-
- all_images = []
- for i in range(len(images)): # batch size = 1
- for j in range(n_candidates):
- story = []
- for k, m in enumerate(mask):
- if m == 1:
- story.append(images[i][j][k])
- all_images.append(vutils.make_grid(story, sum(mask), padding=0))
- all_images = vutils.make_grid(all_images, 1, padding=20)
- print(all_images)
-
- pad_len = video_len - sum(mask)
-
- if pad_len > 0:
- pad_height = 256 * n_candidates + 20 * (n_candidates + 1)
- pad_width = 256 * pad_len + 20 * (pad_len)
- pad_image = torch.ones(3, pad_height, pad_width)
-
- print(all_images.shape, pad_image.shape)
- all_images = torch.cat([all_images[:, :, :-15], pad_image], dim=-1)
-
- print(all_images.shape)
- return all_images[:, 15:-15, 15:-15]
-
-
-def main(args):
-
- #device = 'cuda:0'
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
- # device = torch.device('cpu')
-
- model_url = 'https://drive.google.com/u/1/uc?id=1KAXVtE8lEE2Yc83VY7w6ycOOMkdWbmJo&export=sharing'
-
- #model_url = 'https://drive.google.com/u/1/uc?id=1lJ6zMZ6qTvFu6H35-VEdFlN13MMslivJ&export=download'
- png_url = 'https://drive.google.com/u/1/uc?id=1C33A1IzSHDPoQ4QBsgFWbF61QWaAxRo_&export=download'
-
- #if not os.path.exists("./ckpt/25.pth"):
- gdown.download(model_url, quiet=False, use_cookies=False, output="./ckpt/25.pth")
- # print("Downloaded checkpoint")
- #assert os.path.exists("./ckpt/25.pth")
- gdown.download(png_url, quiet=True, use_cookies=False, output="demo_pororo_good.png")
-
- coref_model_url = 'https://storage.googleapis.com/allennlp-public-models/coref-spanbert-large-2020.02.27.tar.gz'
- coref_predictor = Predictor.from_path(coref_model_url)
-
- if args.debug:
- model = None
- embed = None
- else:
- model, config = StoryDalle.from_pretrained(args)
- model.tokenizer.add_tokens(['pororo', 'loopy', 'eddy', 'harry', 'poby', 'tongtong', 'crong', 'rody', 'petty'])
- model.eval()
- # split_model into CPU and GPU
- if args.split_memory:
- model.stage2.to(device=device)
- model.story_linear.to(device=device)
- model.story_block.to(device=device)
- else:
- model.to(device=device)
- if model.config.story.condition:
- for i in range(len(model.cross_attention_layers)):
- model.cross_attention_layers[i].to(device)
- print("Cross-attention layers are in cuda:", next(model.cross_attention_layers[0].parameters()).is_cuda)
-
- embed = hub.load("https://tfhub.dev/google/universal-sentence-encoder-large/5")
-
-
- valid_transform = transforms.Compose(
- [transforms.Resize(config.dataset.image_resolution),
- transforms.CenterCrop(config.dataset.image_resolution),
- transforms.ToTensor(),
- transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]
- )
-
- print("Model is in ", model.device)
-
- #torch.save(model, './ckpt/checkpoint.pt')
- #sys.exit()
-
- def predict(caption_1, caption_2, caption_3, caption_4, source='Pororo', top_k=32, top_p=0.2, n_candidates=4,
- supercondition=False):
-
- if not args.debug:
-
- suffix = random.randint(0, 1000)
- img_file_path = "./demo/images/gradio_demo_pororo_%s.png" % suffix
- txt_file_path = "./demo/texts/gradio_demo_pororo_%s.txt" % suffix
-
- captions = [caption_1.strip(), caption_2.strip(), caption_3.strip(), caption_4.strip()]
- for i in range(len(captions)):
- if captions[i][-1] not in ['!', '?', '.']:
- captions[i] = captions[i] + '.'
- mask = [1 if caption != '' else 0 for caption in captions]
-
- with open(txt_file_path, 'w') as f:
- f.write('\n'.join(captions))
-
- print(captions, mask, source, n_candidates)
- captions = resolve_coref(captions, mask, coref_predictor)
- print(captions)
-
- for i, caption in enumerate(captions):
- if caption == "":
- captions[i] = "Pororo is reading a book." # filler for shorter captions
-
- tokens = [model.tokenizer.encode(caption) for caption in captions]
- texts = torch.stack([torch.LongTensor(token.ids) for token in tokens]).unsqueeze(0)
- sent_embeds = torch.tensor(embed(captions).numpy())
- src_image = valid_transform(Image.open('./demo/%s.png' % source).convert('RGB'))
-
- stories = []
- with torch.no_grad():
- for i in range(texts.shape[0]):
- candidates = []
- # for _ in range(n_candidates):
- # if args.split_memory: # if splitting model into CPU/GPU, send src_image from CPU memory
- # pixels = model.sampling_batch(texts[i].to(device), src_image.unsqueeze(0),
- # sent_embeds.unsqueeze(0).to(device), top_k=top_k, top_p=top_p,
- # prompt=None, n_candidates=1, device=device).cpu()
- # else:
- # pixels = model.sampling_batch(texts[i].to(device), src_image.unsqueeze(0).to(device),
- # sent_embeds.unsqueeze(0).to(device), top_k=top_k, top_p=top_p,
- # prompt=None, n_candidates=1).cpu()
- # print(pixels.shape)
- # candidates.append(pixels.squeeze())
- # stories.append(torch.stack(candidates))
- #with torch.cuda.amp.autocast():
-
- pixels = model.sampling_batch(texts[i].to(device), src_image.unsqueeze(0).to(device),
- sent_embeds.unsqueeze(0).to(device), top_k=top_k, top_p=top_p,
- prompt=None, n_candidates=n_candidates).cpu()
- stories.append(pixels)
- img = save_story_results(stories, video_len=4, n_candidates=n_candidates, mask=mask)
- save_image(img, img_file_path, normalize=True)
-
- else:
- img_file_path = "gradio_demo_pororo.png"
-
- return img_file_path
-
- with gr.Blocks(css='#output {width:750px; height:750px; float:left;}') as demo:
- gr.Markdown('''
-
StoryDALL-E: Adapting Pretrained Text-to-Image Transformers for Story Continuation Adyasha Maharana, Darryl Hannan and Mohit Bansal (UNC Chapel Hill) Published at ECCV 2022
-
- StoryDALL-E \[1\] is a model trained for the task of Story Visualization \[2\].
- The model receives a sequence of captions as input and generates a corresponding sequence of images which form a visual story depicting the narrative in the captions.
- We modify this task to enable the model to receive an initial scene as input, which can be used as a cue for the setting of the story and also for generating unseen or low-resource visual elements. We refer to this task as Story Continuation \[1\].
- StoryDALL-E is based on the [dalle](https://github.com/kakaobrain/minDALL-E) model.
- **This model has been developed for academic purposes only.**
-
- \[[Paper](http://arxiv.org/abs/2209.06192)\] \[[Code](https://github.com/adymaharana/storydalle)\] \[[Model Card](https://github.com/adymaharana/storydalle/blob/main/MODEL_CARD.MD)\]
-
- ### Dataset
- This model has been trained using the Pororo story visualization dataset \[1\].
- The data was adapted from the popular cartoon series *Pororo the Little Penguin* and originally released by \[2\].
- The Pororo dataset contains 9 recurring characters, as shown below, in the decreasing order of their frequency in the training data.
-
-
-
- The training dataset contains nearly 10,000 samples in the training set. Most of the scenes occur in a snowy village, surrounded by hills, trees and houses. A few episodes are located in gardens or water bodies. All the captions are in the English language and predominantly contain verbs in the present tense. Additionally, the training of this model starts from the pretrained checkpoint of mega-dalle, which is trained on the Conceptual Captions dataset.
-
- ### Intended Use
- This model is intended for generating visual stories containing the 9 characters in the Pororo dataset. This version of the StoryDALL-E model is reasonable at the following scenarios:
- * Frames containing a single character.
- * Overtly visual actions such as *making cookies*, *walking*, *reading a book*, *sitting*.
- * Scenes taking place in snowy settings, indoors and gardens.
- * Visual stories contaning 1-3 characters across all frames.
- * Scene transitions e.g. from day to night.
- * Moderately capable of generating semantic concepts that do not appear in the story continuation dataset, such as *doughnut* and *lion*.
-
- Here are some examples of generated visual stories for the above-mentioned settings.
-
-
-
-
-
- Due to the small training dataset size for story visualization, the model has poor generalization to some unseen settings. The model struggles to generate coherent images in the following scenarios.
- * Multiple characters in a frame.
- * Non-visual actions such as *compliment*.
- * Characters that are infrequent in the training dataset e.g. Rody, Harry.
- * Background locations that are not found in the cartoon e.g. a busy city.
- * Color-based descriptions for object.
- * Completely new characters based on textual descriptions.
-
- In the following demo, four or less captions can be entered in the `caption` text fields for the visual story.
- Select a `source` frame based on the character that is predominant in your visual story.
- `top_k` refers to the number of highest probability vocabulary tokens to keep for top-k-filtering.
- Only the most probable tokens with probabilities that add up to `top_p` or higher are kept for generation.
- Set `supercondition` to True to enable generation using a null hypothesis.
- Select between 1-4 `n_candidates` to generate a diverse set of stories for the given captions.
-
- Feel free to send feedback to adyasha@cs.unc.edu.
- ''')
-
- with gr.Row():
- with gr.Column():
- caption_1 = gr.Textbox(label="Caption 1", value='Pororo is reading a book.')
- caption_2 = gr.Textbox(label="Caption 2", value='Pororo is sleeping on the couch.')
- caption_3 = gr.Textbox(label="Caption 3", value='Pororo wakes up in the middle of the night in his bed.')
- caption_4 = gr.Textbox(label="Caption 4", value='Pororo is in his bedroom and looks terrified.')
- source = gr.Radio(["Pororo", "Loopy", "Crong", "Poby", "Eddy", "Petty", "Tongtong", "Rody", "Harry"],
- label="Source", value="Pororo")
- top_k = gr.Slider(16, 128, label="top_k", value=32)
- top_p = gr.Slider(0.01, 1.0, label="top_p", value=0.2)
- supercondition = gr.Checkbox(value=False, label='supercondition')
- n_candidates = gr.Dropdown([1, 2, 3, 4], value=4, label='n_candidates')
-
- with gr.Row():
- # clear_btn = gr.Button("Clear")
- submit_btn = gr.Button("Submit")
-
- with gr.Column():
- with gr.Row():
- frame_1_label = gr.Button("Frame 1")
- frame_2_label = gr.Button("Frame 2")
- frame_3_label = gr.Button("Frame 3")
- frame_4_label = gr.Button("Frame 4")
- # frame_1_label = gr.Label("Frame 1")
- # frame_2_label = gr.Label("Frame 2")
- # frame_3_label = gr.Label("Frame 3")
- # frame_4_label = gr.Label("Frame 4")
- output = gr.Image(label="", elem_id='output')
-
- submit_btn.click(fn=predict,
- inputs=[caption_1, caption_2, caption_3, caption_4, source, top_k, top_p, n_candidates,
- supercondition], outputs=output)
-
- gr.Markdown('''
- ### References
-
- \[1\] Maharana, Adyasha, et al. "StoryDALL-E: Adapting Pretrained Text-to-Image Transformers for Story Continuation." ECCV. 2022.
-
- \[2\] Li, Yitong, et al. "Storygan: A sequential conditional gan for story visualization." Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2019.
-
- \[3\] Kim, Kyung-Min, et al. "DeepStory: video story QA by deep embedded memory networks." Proceedings of the 26th International Joint Conference on Artificial Intelligence. 2017.
-
- \[4\] Sharma, Piyush, et al. "Conceptual captions: A cleaned, hypernymed, image alt-text dataset for automatic image captioning." Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). 2018.
- ''')
-
- demo.launch(share=False)
-
-
-if __name__ == "__main__":
-
- args_list = ['--model_name_or_path', './ckpt/25.pth',
- '--prefix_model_name_or_path', './1.3B/',
- '--dataset_name', 'pororo',
- '--tuning_mode', 'story',
- '--preseqlen', '32',
- '--condition',
- '--story_len', '4',
- '--sent_embed', '512',
- '--prefix_dropout', '0.2',
- '--data_dir', '/playpen-ssd/adyasha/projects/StoryGAN/pororo_png/',
- '--dataloader_num_workers', '1',
- '--do_eval',
- '--per_gpu_eval_batch_size', '16',
- '--mode', 'story']
-
- parser = argparse.ArgumentParser(description='arguments for training/evaluating prefix-tuning DALLE')
-
- # Model Arguments
- parser.add_argument('--model_name_or_path', type=str, default=None,
- help='The model checkpoint for weights initialization.')
- parser.add_argument('--prefix_model_name_or_path', type=str, default=None,
- help='The prefix model checkpoint for weights initialization.')
- parser.add_argument('--prefix_mode', type=str, default='activation', help='activation or embedding')
- parser.add_argument('--preseqlen', type=int, default=0, help='how many tokens of prefix should we include.')
- parser.add_argument('--optim_prefix', action="store_true",
- help='set to True if optimizing prefix directly; no if through amortized function')
- parser.add_argument('--tuning_mode', type=str, default='prefixtune', help='prefixtune or finetune')
- parser.add_argument('--top_k_layers', type=int, default=2,
- help='In finetuning setting, if we only tune the top k layers.')
- parser.add_argument('--parameterize_mode', type=str, default='mlp',
- help="mlp or emb to parametrize when we optimize for the embeddings.")
- parser.add_argument('--prefix_dropout', type=float, default=0.0, help='dropout rate for the prefix tuning model.')
- parser.add_argument('--teacher_dropout', type=float, default=0.0, help='dropout rate for the teacher model.')
- parser.add_argument('--init_random', action="store_true", help="set True if initializing random embeddings")
- parser.add_argument('--init_shallow', action="store_true", help="set True if not using reparameterization")
- parser.add_argument('--init_shallow_word', type=bool, default=False,
- help="set True if init_shallow and specify words")
- parser.add_argument('--replay_buffer', action="store_true", help="set True if using replay buffer in training")
- parser.add_argument('--gumbel', action="store_true", help="set True if using the gumbel softmax in training")
- parser.add_argument('--hidden_dim_prefix', type=float, default=512, help="hidden dim of MLP for generating prefix?")
-
- # Data Arguments
- parser.add_argument('--dataset_name', type=str, default='pororo', help="dataset name")
- parser.add_argument('--data_dir', type=str, default=None, help="Path to data directory")
- parser.add_argument('--lowdata_token', type=str, default='story',
- help="The token to be prepended at initialization time.")
- parser.add_argument('--use_lowdata_token', type=bool, default=True,
- help="Whether we should use the lowdata token for prefix-tuning")
- parser.add_argument('--train_embeddings', action="store_true", help="Whether to train word embeddings")
- parser.add_argument('--train_max_target_length', type=int, default=100,
- help='the max target length for training data.')
- parser.add_argument('--val_max_target_length', type=int, default=100, help='the max target length for dev data.')
- parser.add_argument('--dataloader_num_workers', type=int, default=8, help='number of workers when loading data')
-
- # new arguments for story
- parser.add_argument('--prompt', action="store_true", help="set True if using prompts in StoryDALLE")
- parser.add_argument('--story_len', type=int, default=4, help='the max target length for dev data.')
- parser.add_argument('--sent_embed', type=int, default=384, help='the max target length for dev data.')
- parser.add_argument('--condition', action="store_true", help="set True if using prompts in StoryDALLE")
- parser.add_argument('--clip_embed', action="store_true", help="set True if using prompts in StoryDALLE")
-
- # Training Arguments
- parser.add_argument('--output_dir', type=str, default=None, help="Path to data directory")
- parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
- parser.add_argument("--do_eval", action="store_true", help="Whether to run evaluation.")
- parser.add_argument("--do_test", action="store_true", help="Whether to run test.")
- parser.add_argument('--seed', type=int, default=42, help='seed for reproducibility')
- parser.add_argument("--overwrite_output_dir", action="store_true", help="Whether to overwrite output dir.")
- parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.")
- parser.add_argument(
- "--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation."
- )
- parser.add_argument(
- "--gradient_accumulation_steps",
- type=int,
- default=1,
- help="Number of updates steps to accumulate before performing a backward/update pass.",
- )
-
- parser.add_argument('--mode', type=str, default='val', help="mval or test.")
-
- parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
- parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.")
- parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
- parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
- parser.add_argument(
- "--num_train_epochs", default=3, type=int, help="Total number of training epochs to perform."
- )
- parser.add_argument(
- "--max_steps",
- default=-1,
- type=int,
- help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
- )
- parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
- parser.add_argument("--logging_steps", type=int, default=50, help="Log every X updates steps.")
- parser.add_argument("--save_steps", type=int, default=50, help="Save checkpoint every X updates steps.")
- parser.add_argument(
- "--eval_all_checkpoints",
- action="store_true",
- help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
- )
- parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
- parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
- parser.add_argument(
- "--fp16",
- action="store_true",
- help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
- )
-
- parser.add_argument("--debug", action="store_true", help="Whether to debug the demo.")
- parser.add_argument("--split_memory", action="store_true", help="Whether to split the model into GPU & CPU in the demo.")
-
- args = parser.parse_args(args_list)
-
- main(args)
-
-
-
-
-
diff --git a/spaces/EXPOSUREEE/Ai-Image-Enhancer/realesrgan/archs/__init__.py b/spaces/EXPOSUREEE/Ai-Image-Enhancer/realesrgan/archs/__init__.py
deleted file mode 100644
index f3fbbf3b78e33b61fd4c33a564a9a617010d90de..0000000000000000000000000000000000000000
--- a/spaces/EXPOSUREEE/Ai-Image-Enhancer/realesrgan/archs/__init__.py
+++ /dev/null
@@ -1,10 +0,0 @@
-import importlib
-from basicsr.utils import scandir
-from os import path as osp
-
-# automatically scan and import arch modules for registry
-# scan all the files that end with '_arch.py' under the archs folder
-arch_folder = osp.dirname(osp.abspath(__file__))
-arch_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(arch_folder) if v.endswith('_arch.py')]
-# import all the arch modules
-_arch_modules = [importlib.import_module(f'realesrgan.archs.{file_name}') for file_name in arch_filenames]
diff --git a/spaces/Eddycrack864/Applio-Inference/lib/infer_pack/models_onnx.py b/spaces/Eddycrack864/Applio-Inference/lib/infer_pack/models_onnx.py
deleted file mode 100644
index 963e67b29f828e9fdd096397952054fe77cf3d10..0000000000000000000000000000000000000000
--- a/spaces/Eddycrack864/Applio-Inference/lib/infer_pack/models_onnx.py
+++ /dev/null
@@ -1,819 +0,0 @@
-import math, pdb, os
-from time import time as ttime
-import torch
-from torch import nn
-from torch.nn import functional as F
-from lib.infer_pack import modules
-from lib.infer_pack import attentions
-from lib.infer_pack import commons
-from lib.infer_pack.commons import init_weights, get_padding
-from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
-from lib.infer_pack.commons import init_weights
-import numpy as np
-from lib.infer_pack import commons
-
-
-class TextEncoder256(nn.Module):
- def __init__(
- self,
- out_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- f0=True,
- ):
- super().__init__()
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.emb_phone = nn.Linear(256, hidden_channels)
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
- if f0 == True:
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
- self.encoder = attentions.Encoder(
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, phone, pitch, lengths):
- if pitch == None:
- x = self.emb_phone(phone)
- else:
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
- x = self.lrelu(x)
- x = torch.transpose(x, 1, -1) # [b, h, t]
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.encoder(x * x_mask, x_mask)
- stats = self.proj(x) * x_mask
-
- m, logs = torch.split(stats, self.out_channels, dim=1)
- return m, logs, x_mask
-
-
-class TextEncoder768(nn.Module):
- def __init__(
- self,
- out_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- f0=True,
- ):
- super().__init__()
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.emb_phone = nn.Linear(768, hidden_channels)
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
- if f0 == True:
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
- self.encoder = attentions.Encoder(
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, phone, pitch, lengths):
- if pitch == None:
- x = self.emb_phone(phone)
- else:
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
- x = self.lrelu(x)
- x = torch.transpose(x, 1, -1) # [b, h, t]
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.encoder(x * x_mask, x_mask)
- stats = self.proj(x) * x_mask
-
- m, logs = torch.split(stats, self.out_channels, dim=1)
- return m, logs, x_mask
-
-
-class ResidualCouplingBlock(nn.Module):
- def __init__(
- self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- n_flows=4,
- gin_channels=0,
- ):
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.flows = nn.ModuleList()
- for i in range(n_flows):
- self.flows.append(
- modules.ResidualCouplingLayer(
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=gin_channels,
- mean_only=True,
- )
- )
- self.flows.append(modules.Flip())
-
- def forward(self, x, x_mask, g=None, reverse=False):
- if not reverse:
- for flow in self.flows:
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
- else:
- for flow in reversed(self.flows):
- x = flow(x, x_mask, g=g, reverse=reverse)
- return x
-
- def remove_weight_norm(self):
- for i in range(self.n_flows):
- self.flows[i * 2].remove_weight_norm()
-
-
-class PosteriorEncoder(nn.Module):
- def __init__(
- self,
- in_channels,
- out_channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=0,
- ):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
-
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
- self.enc = modules.WN(
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=gin_channels,
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths, g=None):
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.pre(x) * x_mask
- x = self.enc(x, x_mask, g=g)
- stats = self.proj(x) * x_mask
- m, logs = torch.split(stats, self.out_channels, dim=1)
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
- return z, m, logs, x_mask
-
- def remove_weight_norm(self):
- self.enc.remove_weight_norm()
-
-
-class Generator(torch.nn.Module):
- def __init__(
- self,
- initial_channel,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=0,
- ):
- super(Generator, self).__init__()
- self.num_kernels = len(resblock_kernel_sizes)
- self.num_upsamples = len(upsample_rates)
- self.conv_pre = Conv1d(
- initial_channel, upsample_initial_channel, 7, 1, padding=3
- )
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
-
- self.ups = nn.ModuleList()
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
- self.ups.append(
- weight_norm(
- ConvTranspose1d(
- upsample_initial_channel // (2**i),
- upsample_initial_channel // (2 ** (i + 1)),
- k,
- u,
- padding=(k - u) // 2,
- )
- )
- )
-
- self.resblocks = nn.ModuleList()
- for i in range(len(self.ups)):
- ch = upsample_initial_channel // (2 ** (i + 1))
- for j, (k, d) in enumerate(
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
- ):
- self.resblocks.append(resblock(ch, k, d))
-
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
- self.ups.apply(init_weights)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
- def forward(self, x, g=None):
- x = self.conv_pre(x)
- if g is not None:
- x = x + self.cond(g)
-
- for i in range(self.num_upsamples):
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- x = self.ups[i](x)
- xs = None
- for j in range(self.num_kernels):
- if xs is None:
- xs = self.resblocks[i * self.num_kernels + j](x)
- else:
- xs += self.resblocks[i * self.num_kernels + j](x)
- x = xs / self.num_kernels
- x = F.leaky_relu(x)
- x = self.conv_post(x)
- x = torch.tanh(x)
-
- return x
-
- def remove_weight_norm(self):
- for l in self.ups:
- remove_weight_norm(l)
- for l in self.resblocks:
- l.remove_weight_norm()
-
-
-class SineGen(torch.nn.Module):
- """Definition of sine generator
- SineGen(samp_rate, harmonic_num = 0,
- sine_amp = 0.1, noise_std = 0.003,
- voiced_threshold = 0,
- flag_for_pulse=False)
- samp_rate: sampling rate in Hz
- harmonic_num: number of harmonic overtones (default 0)
- sine_amp: amplitude of sine-wavefrom (default 0.1)
- noise_std: std of Gaussian noise (default 0.003)
- voiced_thoreshold: F0 threshold for U/V classification (default 0)
- flag_for_pulse: this SinGen is used inside PulseGen (default False)
- Note: when flag_for_pulse is True, the first time step of a voiced
- segment is always sin(np.pi) or cos(0)
- """
-
- def __init__(
- self,
- samp_rate,
- harmonic_num=0,
- sine_amp=0.1,
- noise_std=0.003,
- voiced_threshold=0,
- flag_for_pulse=False,
- ):
- super(SineGen, self).__init__()
- self.sine_amp = sine_amp
- self.noise_std = noise_std
- self.harmonic_num = harmonic_num
- self.dim = self.harmonic_num + 1
- self.sampling_rate = samp_rate
- self.voiced_threshold = voiced_threshold
-
- def _f02uv(self, f0):
- # generate uv signal
- uv = torch.ones_like(f0)
- uv = uv * (f0 > self.voiced_threshold)
- return uv
-
- def forward(self, f0, upp):
- """sine_tensor, uv = forward(f0)
- input F0: tensor(batchsize=1, length, dim=1)
- f0 for unvoiced steps should be 0
- output sine_tensor: tensor(batchsize=1, length, dim)
- output uv: tensor(batchsize=1, length, 1)
- """
- with torch.no_grad():
- f0 = f0[:, None].transpose(1, 2)
- f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
- # fundamental component
- f0_buf[:, :, 0] = f0[:, :, 0]
- for idx in np.arange(self.harmonic_num):
- f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
- idx + 2
- ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
- rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
- rand_ini = torch.rand(
- f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
- )
- rand_ini[:, 0] = 0
- rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
- tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
- tmp_over_one *= upp
- tmp_over_one = F.interpolate(
- tmp_over_one.transpose(2, 1),
- scale_factor=upp,
- mode="linear",
- align_corners=True,
- ).transpose(2, 1)
- rad_values = F.interpolate(
- rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
- ).transpose(
- 2, 1
- ) #######
- tmp_over_one %= 1
- tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
- cumsum_shift = torch.zeros_like(rad_values)
- cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
- sine_waves = torch.sin(
- torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
- )
- sine_waves = sine_waves * self.sine_amp
- uv = self._f02uv(f0)
- uv = F.interpolate(
- uv.transpose(2, 1), scale_factor=upp, mode="nearest"
- ).transpose(2, 1)
- noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
- noise = noise_amp * torch.randn_like(sine_waves)
- sine_waves = sine_waves * uv + noise
- return sine_waves, uv, noise
-
-
-class SourceModuleHnNSF(torch.nn.Module):
- """SourceModule for hn-nsf
- SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
- add_noise_std=0.003, voiced_threshod=0)
- sampling_rate: sampling_rate in Hz
- harmonic_num: number of harmonic above F0 (default: 0)
- sine_amp: amplitude of sine source signal (default: 0.1)
- add_noise_std: std of additive Gaussian noise (default: 0.003)
- note that amplitude of noise in unvoiced is decided
- by sine_amp
- voiced_threshold: threhold to set U/V given F0 (default: 0)
- Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
- F0_sampled (batchsize, length, 1)
- Sine_source (batchsize, length, 1)
- noise_source (batchsize, length 1)
- uv (batchsize, length, 1)
- """
-
- def __init__(
- self,
- sampling_rate,
- harmonic_num=0,
- sine_amp=0.1,
- add_noise_std=0.003,
- voiced_threshod=0,
- is_half=True,
- ):
- super(SourceModuleHnNSF, self).__init__()
-
- self.sine_amp = sine_amp
- self.noise_std = add_noise_std
- self.is_half = is_half
- # to produce sine waveforms
- self.l_sin_gen = SineGen(
- sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
- )
-
- # to merge source harmonics into a single excitation
- self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
- self.l_tanh = torch.nn.Tanh()
-
- def forward(self, x, upp=None):
- sine_wavs, uv, _ = self.l_sin_gen(x, upp)
- if self.is_half:
- sine_wavs = sine_wavs.half()
- sine_merge = self.l_tanh(self.l_linear(sine_wavs))
- return sine_merge, None, None # noise, uv
-
-
-class GeneratorNSF(torch.nn.Module):
- def __init__(
- self,
- initial_channel,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels,
- sr,
- is_half=False,
- ):
- super(GeneratorNSF, self).__init__()
- self.num_kernels = len(resblock_kernel_sizes)
- self.num_upsamples = len(upsample_rates)
-
- self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
- self.m_source = SourceModuleHnNSF(
- sampling_rate=sr, harmonic_num=0, is_half=is_half
- )
- self.noise_convs = nn.ModuleList()
- self.conv_pre = Conv1d(
- initial_channel, upsample_initial_channel, 7, 1, padding=3
- )
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
-
- self.ups = nn.ModuleList()
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
- c_cur = upsample_initial_channel // (2 ** (i + 1))
- self.ups.append(
- weight_norm(
- ConvTranspose1d(
- upsample_initial_channel // (2**i),
- upsample_initial_channel // (2 ** (i + 1)),
- k,
- u,
- padding=(k - u) // 2,
- )
- )
- )
- if i + 1 < len(upsample_rates):
- stride_f0 = np.prod(upsample_rates[i + 1 :])
- self.noise_convs.append(
- Conv1d(
- 1,
- c_cur,
- kernel_size=stride_f0 * 2,
- stride=stride_f0,
- padding=stride_f0 // 2,
- )
- )
- else:
- self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
-
- self.resblocks = nn.ModuleList()
- for i in range(len(self.ups)):
- ch = upsample_initial_channel // (2 ** (i + 1))
- for j, (k, d) in enumerate(
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
- ):
- self.resblocks.append(resblock(ch, k, d))
-
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
- self.ups.apply(init_weights)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
- self.upp = np.prod(upsample_rates)
-
- def forward(self, x, f0, g=None):
- har_source, noi_source, uv = self.m_source(f0, self.upp)
- har_source = har_source.transpose(1, 2)
- x = self.conv_pre(x)
- if g is not None:
- x = x + self.cond(g)
-
- for i in range(self.num_upsamples):
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- x = self.ups[i](x)
- x_source = self.noise_convs[i](har_source)
- x = x + x_source
- xs = None
- for j in range(self.num_kernels):
- if xs is None:
- xs = self.resblocks[i * self.num_kernels + j](x)
- else:
- xs += self.resblocks[i * self.num_kernels + j](x)
- x = xs / self.num_kernels
- x = F.leaky_relu(x)
- x = self.conv_post(x)
- x = torch.tanh(x)
- return x
-
- def remove_weight_norm(self):
- for l in self.ups:
- remove_weight_norm(l)
- for l in self.resblocks:
- l.remove_weight_norm()
-
-
-sr2sr = {
- "32k": 32000,
- "40k": 40000,
- "48k": 48000,
-}
-
-
-class SynthesizerTrnMsNSFsidM(nn.Module):
- def __init__(
- self,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- spk_embed_dim,
- gin_channels,
- sr,
- version,
- **kwargs
- ):
- super().__init__()
- if type(sr) == type("strr"):
- sr = sr2sr[sr]
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.gin_channels = gin_channels
- # self.hop_length = hop_length#
- self.spk_embed_dim = spk_embed_dim
- if version == "v1":
- self.enc_p = TextEncoder256(
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- )
- else:
- self.enc_p = TextEncoder768(
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- )
- self.dec = GeneratorNSF(
- inter_channels,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=gin_channels,
- sr=sr,
- is_half=kwargs["is_half"],
- )
- self.enc_q = PosteriorEncoder(
- spec_channels,
- inter_channels,
- hidden_channels,
- 5,
- 1,
- 16,
- gin_channels=gin_channels,
- )
- self.flow = ResidualCouplingBlock(
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
- )
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
- self.speaker_map = None
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
-
- def remove_weight_norm(self):
- self.dec.remove_weight_norm()
- self.flow.remove_weight_norm()
- self.enc_q.remove_weight_norm()
-
- def construct_spkmixmap(self, n_speaker):
- self.speaker_map = torch.zeros((n_speaker, 1, 1, self.gin_channels))
- for i in range(n_speaker):
- self.speaker_map[i] = self.emb_g(torch.LongTensor([[i]]))
- self.speaker_map = self.speaker_map.unsqueeze(0)
-
- def forward(self, phone, phone_lengths, pitch, nsff0, g, rnd, max_len=None):
- if self.speaker_map is not None: # [N, S] * [S, B, 1, H]
- g = g.reshape((g.shape[0], g.shape[1], 1, 1, 1)) # [N, S, B, 1, 1]
- g = g * self.speaker_map # [N, S, B, 1, H]
- g = torch.sum(g, dim=1) # [N, 1, B, 1, H]
- g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N]
- else:
- g = g.unsqueeze(0)
- g = self.emb_g(g).transpose(1, 2)
-
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
- z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask
- z = self.flow(z_p, x_mask, g=g, reverse=True)
- o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
- return o
-
-
-class MultiPeriodDiscriminator(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(MultiPeriodDiscriminator, self).__init__()
- periods = [2, 3, 5, 7, 11, 17]
- # periods = [3, 5, 7, 11, 17, 23, 37]
-
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
- discs = discs + [
- DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
- ]
- self.discriminators = nn.ModuleList(discs)
-
- def forward(self, y, y_hat):
- y_d_rs = [] #
- y_d_gs = []
- fmap_rs = []
- fmap_gs = []
- for i, d in enumerate(self.discriminators):
- y_d_r, fmap_r = d(y)
- y_d_g, fmap_g = d(y_hat)
- # for j in range(len(fmap_r)):
- # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
- y_d_rs.append(y_d_r)
- y_d_gs.append(y_d_g)
- fmap_rs.append(fmap_r)
- fmap_gs.append(fmap_g)
-
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-
-class MultiPeriodDiscriminatorV2(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(MultiPeriodDiscriminatorV2, self).__init__()
- # periods = [2, 3, 5, 7, 11, 17]
- periods = [2, 3, 5, 7, 11, 17, 23, 37]
-
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
- discs = discs + [
- DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
- ]
- self.discriminators = nn.ModuleList(discs)
-
- def forward(self, y, y_hat):
- y_d_rs = [] #
- y_d_gs = []
- fmap_rs = []
- fmap_gs = []
- for i, d in enumerate(self.discriminators):
- y_d_r, fmap_r = d(y)
- y_d_g, fmap_g = d(y_hat)
- # for j in range(len(fmap_r)):
- # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
- y_d_rs.append(y_d_r)
- y_d_gs.append(y_d_g)
- fmap_rs.append(fmap_r)
- fmap_gs.append(fmap_g)
-
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-
-class DiscriminatorS(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(DiscriminatorS, self).__init__()
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList(
- [
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
- ]
- )
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
-
- def forward(self, x):
- fmap = []
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class DiscriminatorP(torch.nn.Module):
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
- super(DiscriminatorP, self).__init__()
- self.period = period
- self.use_spectral_norm = use_spectral_norm
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList(
- [
- norm_f(
- Conv2d(
- 1,
- 32,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 32,
- 128,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 128,
- 512,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 512,
- 1024,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 1024,
- 1024,
- (kernel_size, 1),
- 1,
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- ]
- )
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
-
- def forward(self, x):
- fmap = []
-
- # 1d to 2d
- b, c, t = x.shape
- if t % self.period != 0: # pad first
- n_pad = self.period - (t % self.period)
- x = F.pad(x, (0, n_pad), "reflect")
- t = t + n_pad
- x = x.view(b, c, t // self.period, self.period)
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
diff --git a/spaces/Ekimetrics/Biomap/biomap/model.py b/spaces/Ekimetrics/Biomap/biomap/model.py
deleted file mode 100644
index 2aad5b97311be96a44117f365a8af7e3d3add619..0000000000000000000000000000000000000000
--- a/spaces/Ekimetrics/Biomap/biomap/model.py
+++ /dev/null
@@ -1,454 +0,0 @@
-from utils import *
-from modules import *
-from data import *
-import torch.nn.functional as F
-import pytorch_lightning as pl
-import torch.multiprocessing
-import seaborn as sns
-import unet
-
-class LitUnsupervisedSegmenter(pl.LightningModule):
- def __init__(self, n_classes, cfg):
- super().__init__()
- self.name = "LitUnsupervisedSegmenter"
- self.cfg = cfg
- self.n_classes = n_classes
-
- if not cfg.continuous:
- dim = n_classes
- else:
- dim = cfg.dim
-
- data_dir = join(cfg.output_root, "data")
- if cfg.arch == "feature-pyramid":
- cut_model = load_model(cfg.model_type, data_dir).cuda()
- self.net = FeaturePyramidNet(
- cfg.granularity, cut_model, dim, cfg.continuous
- )
- elif cfg.arch == "dino":
- self.net = DinoFeaturizer(dim, cfg)
- else:
- raise ValueError("Unknown arch {}".format(cfg.arch))
-
- self.train_cluster_probe = ClusterLookup(dim, n_classes)
-
- self.cluster_probe = ClusterLookup(dim, n_classes + cfg.extra_clusters)
- # self.linear_probe = nn.Conv2d(dim, n_classes, (1, 1))
- # self.linear_probe = nn.Sequential(OrderedDict([
- # ('conv1', nn.Conv2d(dim, 2*n_classes, (7, 7), padding='same')),
- # ('relu1', nn.ReLU()),
- # ('conv2', nn.Conv2d(2*n_classes, n_classes, (3, 3), padding='same'))
- # ]))
- self.linear_probe = unet.AuxUNet(
- enc_chs=(3, 32, 64, 128, 256),
- dec_chs=(256, 128, 64, 32),
- aux_ch=70,
- num_class=n_classes,
- )
-
- self.decoder = nn.Conv2d(dim, self.net.n_feats, (1, 1))
-
- self.cluster_metrics = UnsupervisedMetrics(
- "test/cluster/", n_classes, cfg.extra_clusters, True
- )
- self.linear_metrics = UnsupervisedMetrics("test/linear/", n_classes, 0, False)
-
- self.test_cluster_metrics = UnsupervisedMetrics(
- "final/cluster/", n_classes, cfg.extra_clusters, True
- )
- self.test_linear_metrics = UnsupervisedMetrics(
- "final/linear/", n_classes, 0, False
- )
-
- self.linear_probe_loss_fn = torch.nn.CrossEntropyLoss()
- self.crf_loss_fn = ContrastiveCRFLoss(
- cfg.crf_samples, cfg.alpha, cfg.beta, cfg.gamma, cfg.w1, cfg.w2, cfg.shift
- )
-
- self.contrastive_corr_loss_fn = ContrastiveCorrelationLoss(cfg)
- for p in self.contrastive_corr_loss_fn.parameters():
- p.requires_grad = False
-
- self.automatic_optimization = False
-
- if self.cfg.dataset_name.startswith("cityscapes"):
- self.label_cmap = create_cityscapes_colormap()
- else:
- self.label_cmap = create_pascal_label_colormap()
-
- self.val_steps = 0
- self.save_hyperparameters()
-
- def forward(self, x):
- # in lightning, forward defines the prediction/inference actions
- return self.net(x)[1]
-
- def training_step(self, batch, batch_idx):
- # training_step defined the train loop.
- # It is independent of forward
- net_optim, linear_probe_optim, cluster_probe_optim = self.optimizers()
-
- net_optim.zero_grad()
- linear_probe_optim.zero_grad()
- cluster_probe_optim.zero_grad()
-
- with torch.no_grad():
- ind = batch["ind"]
- img = batch["img"]
- img_aug = batch["img_aug"]
- coord_aug = batch["coord_aug"]
- img_pos = batch["img_pos"]
- label = batch["label"]
- label_pos = batch["label_pos"]
-
- feats, code = self.net(img)
- if self.cfg.correspondence_weight > 0:
- feats_pos, code_pos = self.net(img_pos)
- log_args = dict(sync_dist=False, rank_zero_only=True)
-
- if self.cfg.use_true_labels:
- signal = one_hot_feats(label + 1, self.n_classes + 1)
- signal_pos = one_hot_feats(label_pos + 1, self.n_classes + 1)
- else:
- signal = feats
- signal_pos = feats_pos
-
- loss = 0
-
- should_log_hist = (
- (self.cfg.hist_freq is not None)
- and (self.global_step % self.cfg.hist_freq == 0)
- and (self.global_step > 0)
- )
- if self.cfg.use_salience:
- salience = batch["mask"].to(torch.float32).squeeze(1)
- salience_pos = batch["mask_pos"].to(torch.float32).squeeze(1)
- else:
- salience = None
- salience_pos = None
-
- if self.cfg.correspondence_weight > 0:
- (
- pos_intra_loss,
- pos_intra_cd,
- pos_inter_loss,
- pos_inter_cd,
- neg_inter_loss,
- neg_inter_cd,
- ) = self.contrastive_corr_loss_fn(
- signal,
- signal_pos,
- salience,
- salience_pos,
- code,
- code_pos,
- )
-
- if should_log_hist:
- self.logger.experiment.add_histogram(
- "intra_cd", pos_intra_cd, self.global_step
- )
- self.logger.experiment.add_histogram(
- "inter_cd", pos_inter_cd, self.global_step
- )
- self.logger.experiment.add_histogram(
- "neg_cd", neg_inter_cd, self.global_step
- )
- neg_inter_loss = neg_inter_loss.mean()
- pos_intra_loss = pos_intra_loss.mean()
- pos_inter_loss = pos_inter_loss.mean()
- self.log("loss/pos_intra", pos_intra_loss, **log_args)
- self.log("loss/pos_inter", pos_inter_loss, **log_args)
- self.log("loss/neg_inter", neg_inter_loss, **log_args)
- self.log("cd/pos_intra", pos_intra_cd.mean(), **log_args)
- self.log("cd/pos_inter", pos_inter_cd.mean(), **log_args)
- self.log("cd/neg_inter", neg_inter_cd.mean(), **log_args)
-
- loss += (
- self.cfg.pos_inter_weight * pos_inter_loss
- + self.cfg.pos_intra_weight * pos_intra_loss
- + self.cfg.neg_inter_weight * neg_inter_loss
- ) * self.cfg.correspondence_weight
-
- if self.cfg.rec_weight > 0:
- rec_feats = self.decoder(code)
- rec_loss = -(norm(rec_feats) * norm(feats)).sum(1).mean()
- self.log("loss/rec", rec_loss, **log_args)
- loss += self.cfg.rec_weight * rec_loss
-
- if self.cfg.aug_alignment_weight > 0:
- orig_feats_aug, orig_code_aug = self.net(img_aug)
- downsampled_coord_aug = resize(
- coord_aug.permute(0, 3, 1, 2), orig_code_aug.shape[2]
- ).permute(0, 2, 3, 1)
- aug_alignment = -torch.einsum(
- "bkhw,bkhw->bhw",
- norm(sample(code, downsampled_coord_aug)),
- norm(orig_code_aug),
- ).mean()
- self.log("loss/aug_alignment", aug_alignment, **log_args)
- loss += self.cfg.aug_alignment_weight * aug_alignment
-
- if self.cfg.crf_weight > 0:
- crf = self.crf_loss_fn(resize(img, 56), norm(resize(code, 56))).mean()
- self.log("loss/crf", crf, **log_args)
- loss += self.cfg.crf_weight * crf
-
- flat_label = label.reshape(-1)
- mask = (flat_label >= 0) & (flat_label < self.n_classes)
-
- detached_code = torch.clone(code.detach())
-
- # pdb.set_trace()
-
- linear_logits = self.linear_probe(img, detached_code)
- linear_logits = F.interpolate(
- linear_logits, label.shape[-2:], mode="bilinear", align_corners=False
- )
- linear_logits = linear_logits.permute(0, 2, 3, 1).reshape(-1, self.n_classes)
- linear_loss = self.linear_probe_loss_fn(
- linear_logits[mask], flat_label[mask]
- ).mean()
- loss += linear_loss
- self.log("loss/linear", linear_loss, **log_args)
-
- cluster_loss, cluster_probs = self.cluster_probe(detached_code, None)
- loss += cluster_loss
- self.log("loss/cluster", cluster_loss, **log_args)
- self.log("loss/total", loss, **log_args)
-
- self.manual_backward(loss)
- net_optim.step()
- cluster_probe_optim.step()
- linear_probe_optim.step()
-
- if (
- self.cfg.reset_probe_steps is not None
- and self.global_step == self.cfg.reset_probe_steps
- ):
- print("RESETTING PROBES")
- self.linear_probe.reset_parameters()
- self.cluster_probe.reset_parameters()
- self.trainer.optimizers[1] = torch.optim.Adam(
- list(self.linear_probe.parameters()), lr=5e-3
- )
- self.trainer.optimizers[2] = torch.optim.Adam(
- list(self.cluster_probe.parameters()), lr=5e-3
- )
-
- if self.global_step % 2000 == 0 and self.global_step > 0:
- print("RESETTING TFEVENT FILE")
- # Make a new tfevent file
- self.logger.experiment.close()
- self.logger.experiment._get_file_writer()
-
- return loss
-
- def on_train_start(self):
- tb_metrics = {**self.linear_metrics.compute(), **self.cluster_metrics.compute()}
- self.logger.log_hyperparams(self.cfg, tb_metrics)
-
- def validation_step(self, batch, batch_idx):
- img = batch["img"]
- label = batch["label"]
- self.net.eval()
-
- with torch.no_grad():
- feats, code = self.net(img)
-
- # code = F.interpolate(code, label.shape[-2:], mode='bilinear', align_corners=False)
- # linear_preds = self.linear_probe(code)
- linear_preds = self.linear_probe(img, code)
- linear_preds = linear_preds.argmax(1)
- self.linear_metrics.update(linear_preds, label)
-
- code = F.interpolate(
- code, label.shape[-2:], mode="bilinear", align_corners=False
- )
- cluster_loss, cluster_preds = self.cluster_probe(code, None)
- cluster_preds = cluster_preds.argmax(1)
- self.cluster_metrics.update(cluster_preds, label)
-
- return {
- "img": img[: self.cfg.n_images].detach().cpu(),
- "linear_preds": linear_preds[: self.cfg.n_images].detach().cpu(),
- "cluster_preds": cluster_preds[: self.cfg.n_images].detach().cpu(),
- "label": label[: self.cfg.n_images].detach().cpu(),
- }
-
- def validation_epoch_end(self, outputs) -> None:
- super().validation_epoch_end(outputs)
- with torch.no_grad():
- tb_metrics = {
- **self.linear_metrics.compute(),
- **self.cluster_metrics.compute(),
- }
-
- if self.trainer.is_global_zero and not self.cfg.submitting_to_aml:
- # output_num = 0
- output_num = random.randint(0, len(outputs) - 1)
- output = {k: v.detach().cpu() for k, v in outputs[output_num].items()}
-
- # pdb.set_trace()
- alpha = 0.4
- n_rows = 6
- fig, ax = plt.subplots(
- n_rows,
- self.cfg.n_images,
- figsize=(self.cfg.n_images * 3, n_rows * 3),
- )
- for i in range(self.cfg.n_images):
- try:
- rbg_img = prep_for_plot(output["img"][i])
- true_label = output["label"].squeeze()[i]
- true_label[true_label == -1] = 7
- except:
- continue
- # ax[0, i].imshow(prep_for_plot(output["img"][i]))
- # ax[1, i].imshow(self.label_cmap[output["label"].squeeze()[i]])
- # ax[2, i].imshow(self.label_cmap[output["linear_preds"][i]])
- # ax[3, i].imshow(self.label_cmap[self.cluster_metrics.map_clusters(output["cluster_preds"][i])])
- ax[0, i].imshow(rbg_img)
-
- ax[1, i].imshow(rbg_img)
- ax[1, i].imshow(true_label, alpha=alpha, cmap=cmap, norm=norm)
-
- ax[2, i].imshow(rbg_img)
- pred_label = output["linear_preds"][i]
- ax[2, i].imshow(pred_label, alpha=alpha, cmap=cmap, norm=norm)
-
- ax[3, i].imshow(rbg_img)
- retouched_label = retouch_label(pred_label.numpy(), true_label)
- ax[3, i].imshow(retouched_label, alpha=alpha, cmap=cmap, norm=norm)
-
- ax[4, i].imshow(rbg_img)
- pred_label = self.cluster_metrics.map_clusters(
- output["cluster_preds"][i]
- )
- ax[4, i].imshow(pred_label, alpha=alpha, cmap=cmap, norm=norm)
- # ax[3, i].imshow(map_clusters_with_label(true_label, pred_label), alpha=0.5, cmap=cmap, norm=norm)
-
- ax[5, i].imshow(rbg_img)
- retouched_label = retouch_label(pred_label.numpy(), true_label)
- ax[5, i].imshow(retouched_label, alpha=alpha, cmap=cmap, norm=norm)
-
- ax[0, 0].set_ylabel("Image", fontsize=16)
- ax[1, 0].set_ylabel("Label", fontsize=16)
- ax[2, 0].set_ylabel("UNet Probe", fontsize=16)
- ax[3, 0].set_ylabel("Retouched UNet Probe", fontsize=16)
- ax[4, 0].set_ylabel("Cluster Probe", fontsize=16)
- ax[5, 0].set_ylabel("Retouched cluster Probe", fontsize=16)
- remove_axes(ax)
- plt.tight_layout()
- add_plot(self.logger.experiment, "plot_labels", self.global_step)
-
- if self.cfg.has_labels:
- fig = plt.figure(figsize=(13, 10))
- ax = fig.gca()
- hist = (
- self.cluster_metrics.histogram.detach().cpu().to(torch.float32)
- )
- hist /= torch.clamp_min(hist.sum(dim=0, keepdim=True), 1)
- sns.heatmap(hist.t(), annot=False, fmt="g", ax=ax, cmap="Blues")
- ax.set_xlabel("Predicted labels")
- ax.set_ylabel("True labels")
- names = get_class_labels(self.cfg.dataset_name)
- if self.cfg.extra_clusters:
- names = names + ["Extra"]
- ax.set_xticks(np.arange(0, len(names)) + 0.5)
- ax.set_yticks(np.arange(0, len(names)) + 0.5)
- ax.xaxis.tick_top()
- ax.xaxis.set_ticklabels(names, fontsize=14)
- ax.yaxis.set_ticklabels(names, fontsize=14)
- colors = [self.label_cmap[i] / 255.0 for i in range(len(names))]
- [
- t.set_color(colors[i])
- for i, t in enumerate(ax.xaxis.get_ticklabels())
- ]
- [
- t.set_color(colors[i])
- for i, t in enumerate(ax.yaxis.get_ticklabels())
- ]
- # ax.yaxis.get_ticklabels()[-1].set_color(self.label_cmap[0] / 255.0)
- # ax.xaxis.get_ticklabels()[-1].set_color(self.label_cmap[0] / 255.0)
- plt.xticks(rotation=90)
- plt.yticks(rotation=0)
- ax.vlines(
- np.arange(0, len(names) + 1),
- color=[0.5, 0.5, 0.5],
- *ax.get_xlim()
- )
- ax.hlines(
- np.arange(0, len(names) + 1),
- color=[0.5, 0.5, 0.5],
- *ax.get_ylim()
- )
- plt.tight_layout()
- add_plot(self.logger.experiment, "conf_matrix", self.global_step)
-
- all_bars = torch.cat(
- [
- self.cluster_metrics.histogram.sum(0).cpu(),
- self.cluster_metrics.histogram.sum(1).cpu(),
- ],
- axis=0,
- )
- ymin = max(all_bars.min() * 0.8, 1)
- ymax = all_bars.max() * 1.2
-
- fig, ax = plt.subplots(1, 2, figsize=(2 * 5, 1 * 4))
- ax[0].bar(
- range(self.n_classes + self.cfg.extra_clusters),
- self.cluster_metrics.histogram.sum(0).cpu(),
- tick_label=names,
- color=colors,
- )
- ax[0].set_ylim(ymin, ymax)
- ax[0].set_title("Label Frequency")
- ax[0].set_yscale("log")
- ax[0].tick_params(axis="x", labelrotation=90)
-
- ax[1].bar(
- range(self.n_classes + self.cfg.extra_clusters),
- self.cluster_metrics.histogram.sum(1).cpu(),
- tick_label=names,
- color=colors,
- )
- ax[1].set_ylim(ymin, ymax)
- ax[1].set_title("Cluster Frequency")
- ax[1].set_yscale("log")
- ax[1].tick_params(axis="x", labelrotation=90)
-
- plt.tight_layout()
- add_plot(
- self.logger.experiment, "label frequency", self.global_step
- )
-
- if self.global_step > 2:
- self.log_dict(tb_metrics)
-
- if self.trainer.is_global_zero and self.cfg.azureml_logging:
- from azureml.core.run import Run
-
- run_logger = Run.get_context()
- for metric, value in tb_metrics.items():
- run_logger.log(metric, value)
-
- self.linear_metrics.reset()
- self.cluster_metrics.reset()
-
- def configure_optimizers(self):
- main_params = list(self.net.parameters())
-
- if self.cfg.rec_weight > 0:
- main_params.extend(self.decoder.parameters())
-
- net_optim = torch.optim.Adam(main_params, lr=self.cfg.lr)
- linear_probe_optim = torch.optim.Adam(
- list(self.linear_probe.parameters()), lr=5e-3
- )
- cluster_probe_optim = torch.optim.Adam(
- list(self.cluster_probe.parameters()), lr=5e-3
- )
-
- return net_optim, linear_probe_optim, cluster_probe_optim
diff --git a/spaces/Elegbede/Time_Series_Prediction/README.md b/spaces/Elegbede/Time_Series_Prediction/README.md
deleted file mode 100644
index e1216a4ea960241ed0ea0847f4ff9dc86e66f2bb..0000000000000000000000000000000000000000
--- a/spaces/Elegbede/Time_Series_Prediction/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Time Series Prediction
-emoji: 📈
-colorFrom: yellow
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.21.0
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/EnigmaOfTheWorld/ChanakyaNeeti/README.md b/spaces/EnigmaOfTheWorld/ChanakyaNeeti/README.md
deleted file mode 100644
index 75b0f3e6f350cc4daf6e2629c210e5efcd983c6b..0000000000000000000000000000000000000000
--- a/spaces/EnigmaOfTheWorld/ChanakyaNeeti/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: ChanakyaNeeti
-emoji: 🦀
-colorFrom: blue
-colorTo: red
-sdk: gradio
-sdk_version: 3.24.1
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Fr33d0m21/google-flan-t5-xxl/README.md b/spaces/Fr33d0m21/google-flan-t5-xxl/README.md
deleted file mode 100644
index b91d3e33712f15c062b04616d20f4b727acfd6b1..0000000000000000000000000000000000000000
--- a/spaces/Fr33d0m21/google-flan-t5-xxl/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Google Flan T5 Xxl
-emoji: 💻
-colorFrom: red
-colorTo: blue
-sdk: gradio
-sdk_version: 3.16.2
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/GastonMazzei/escher-inpaint-project/app.py b/spaces/GastonMazzei/escher-inpaint-project/app.py
deleted file mode 100644
index 86e3aa55505af3943201a8e262c32ba0e511dac5..0000000000000000000000000000000000000000
--- a/spaces/GastonMazzei/escher-inpaint-project/app.py
+++ /dev/null
@@ -1,292 +0,0 @@
-import subprocess, os, time
-subprocess.run('pip install -e .', shell=True)
-
-print("Installed the repo!")
-
-# GLIDE imports
-from typing import Tuple
-
-from IPython.display import display
-from PIL import Image
-import numpy as np
-import torch as th
-import torch.nn.functional as F
-
-from glide_text2im.download import load_checkpoint
-from glide_text2im.model_creation import (
- create_model_and_diffusion,
- model_and_diffusion_defaults,
- model_and_diffusion_defaults_upsampler
-)
-
-# gradio app imports
-import gradio as gr
-
-# Load images!
-IMAGE_0_ADDR = 'https://github.com/GastonMazzei/escher-project-website/blob/main/0.png?raw=true'
-IAGE_O_MASK_ADDR = 'https://github.com/GastonMazzei/escher-project-website/blob/main/0-mask.png?raw=true'
-Oimg = gr.processing_utils.encode_url_or_file_to_base64(IMAGE_0_ADDR)
-Omask = gr.processing_utils.encode_url_or_file_to_base64(IAGE_O_MASK_ADDR)
-Oimg = Image.open('0.png')
-Omask = Image.open('0-mask.png')
-
-
-from torchvision.transforms import ToTensor, ToPILImage
-image_to_tensor = ToTensor()
-tensor_to_image = ToPILImage()
-
-# This notebook supports both CPU and GPU.
-# On CPU, generating one sample may take on the order of 20 minutes.
-# On a GPU, it should be under a minute.
-
-has_cuda = th.cuda.is_available()
-device = th.device('cpu' if not has_cuda else 'cuda')
-
-# Implement a counter
-COUNTER = 0
-
-# Create base model.
-options = model_and_diffusion_defaults()
-options['inpaint'] = True
-options['use_fp16'] = has_cuda
-options['timestep_respacing'] = '100' # use 100 diffusion steps for fast sampling
-model, diffusion = create_model_and_diffusion(**options)
-model.eval()
-if has_cuda:
- model.convert_to_fp16()
-model.to(device)
-model.load_state_dict(load_checkpoint('base-inpaint', device))
-print('total base parameters', sum(x.numel() for x in model.parameters()))
-
-# Create upsampler model.
-options_up = model_and_diffusion_defaults_upsampler()
-options_up['inpaint'] = True
-options_up['use_fp16'] = has_cuda
-options_up['timestep_respacing'] = 'fast27' # use 27 diffusion steps for very fast sampling
-model_up, diffusion_up = create_model_and_diffusion(**options_up)
-model_up.eval()
-if has_cuda:
- model_up.convert_to_fp16()
-model_up.to(device)
-model_up.load_state_dict(load_checkpoint('upsample-inpaint', device))
-print('total upsampler parameters', sum(x.numel() for x in model_up.parameters()))
-
-# Sampling parameters
-batch_size = 1
-guidance_scale = 5.0
-
-# Tune this parameter to control the sharpness of 256x256 images.
-# A value of 1.0 is sharper, but sometimes results in grainy artifacts.
-upsample_temp = 0.997
-
-# Create an classifier-free guidance sampling function
-def model_fn(x_t, ts, **kwargs):
- half = x_t[: len(x_t) // 2]
- combined = th.cat([half, half], dim=0)
- model_out = model(combined, ts, **kwargs)
- eps, rest = model_out[:, :3], model_out[:, 3:]
- cond_eps, uncond_eps = th.split(eps, len(eps) // 2, dim=0)
- half_eps = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
- eps = th.cat([half_eps, half_eps], dim=0)
- return th.cat([eps, rest], dim=1)
-
-def denoised_fn(x_start):
- # Force the model to have the exact right x_start predictions
- # for the part of the image which is known.
- return (
- x_start * (1 - model_kwargs['inpaint_mask'])
- + model_kwargs['inpaint_image'] * model_kwargs['inpaint_mask']
- )
-
-def show_images(batch: th.Tensor):
- """ Display a batch of images inline. """
- scaled = ((batch + 1)*127.5).round().clamp(0,255).to(th.uint8).cpu()
- reshaped = scaled.permute(2, 0, 3, 1).reshape([batch.shape[2], -1, 3])
- return Image.fromarray(reshaped.numpy())
-
-def read_image(path: str, size: int = 256) -> Tuple[th.Tensor, th.Tensor]:
- pil_img = Image.open(path).convert('RGB')
- pil_img = pil_img.resize((size, size), resample=Image.BICUBIC)
- img = np.array(pil_img)
- return th.from_numpy(img)[None].permute(0, 3, 1, 2).float() / 127.5 - 1
-
-def pil_to_numpy(pil_img: Image) -> Tuple[th.Tensor, th.Tensor]:
- img = np.array(pil_img)
- return th.from_numpy(img)[None].permute(0, 3, 1, 2).float() / 127.5 - 1
-
-model_kwargs = dict()
-
-def inpaint(prompt, filename, mode):
-
-
- # A little test to see if we can record in memory :-)
- global Oimg, Omask
- if (int(mode) == 0):
- print(f'[I]: mode 0')
- content = "Nonexistent file"
- try:
- with open(f'{filename}.txt','r') as file:
- content = file.read()
- except: pass
- return content, Oimg
-
- elif (int(mode) == 1):
- time.sleep(80)
- global COUNTER
- COUNTER += 1
- print(f'[I]: mode 1')
- try:
- with open(f'{filename}.txt','w') as file:
- file.write(prompt)
- return f'Success', Oimg
- except: pass
- return f'Failure to write', Oimg
- # End of the little test
-
- # Set up the images
- Oimg, Omask = input_img, input_img_with_mask
-
- print(prompt)
-
- # Save as png for later mask detection :)
- input_img_256 = input_img.convert('RGB').resize((256, 256), resample=Image.BICUBIC)
- input_img_64 = input_img.convert('RGB').resize((64, 64), resample=Image.BICUBIC)
-
- # Source image we are inpainting
- source_image_256 = pil_to_numpy(input_img_256)
- source_image_64 = pil_to_numpy(input_img_64)
-
- # Since gradio doesn't supply which pixels were drawn, we need to find it ourselves!
- # Assuming that all black pixels are meant for inpainting.
- input_img_with_mask_64 = input_img_with_mask.convert('L').resize((64, 64), resample=Image.BICUBIC)
- gray_scale_source_image = image_to_tensor(input_img_with_mask_64)
- source_mask_64 = (gray_scale_source_image!=0).float()
- source_mask_64_img = tensor_to_image(source_mask_64)
-
- # The mask should always be a boolean 64x64 mask, and then we
- # can upsample it for the second stage.
- source_mask_64 = source_mask_64.unsqueeze(0)
- source_mask_256 = F.interpolate(source_mask_64, (256, 256), mode='nearest')
-
-
- ##############################
- # Sample from the base model #
- ##############################
-
- # Create the text tokens to feed to the model.
- tokens = model.tokenizer.encode(prompt)
- tokens, mask = model.tokenizer.padded_tokens_and_mask(
- tokens, options['text_ctx']
- )
-
- # Create the classifier-free guidance tokens (empty)
- full_batch_size = batch_size * 2
- uncond_tokens, uncond_mask = model.tokenizer.padded_tokens_and_mask(
- [], options['text_ctx']
- )
-
- # Pack the tokens together into model kwargs.
- global model_kwargs
- model_kwargs = dict(
- tokens=th.tensor(
- [tokens] * batch_size + [uncond_tokens] * batch_size, device=device
- ),
- mask=th.tensor(
- [mask] * batch_size + [uncond_mask] * batch_size,
- dtype=th.bool,
- device=device,
- ),
-
- # Masked inpainting image
- inpaint_image=(source_image_64 * source_mask_64).repeat(full_batch_size, 1, 1, 1).to(device),
- inpaint_mask=source_mask_64.repeat(full_batch_size, 1, 1, 1).to(device),
- )
-
- # Sample from the base model.
- model.del_cache()
- samples = diffusion.p_sample_loop(
- model_fn,
- (full_batch_size, 3, options["image_size"], options["image_size"]),
- device=device,
- clip_denoised=True,
- progress=True,
- model_kwargs=model_kwargs,
- cond_fn=None,
- denoised_fn=denoised_fn,
- )[:batch_size]
- model.del_cache()
-
- ##############################
- # Upsample the 64x64 samples #
- ##############################
-
- tokens = model_up.tokenizer.encode(prompt)
- tokens, mask = model_up.tokenizer.padded_tokens_and_mask(
- tokens, options_up['text_ctx']
- )
-
- # Create the model conditioning dict.
- model_kwargs = dict(
- # Low-res image to upsample.
- low_res=((samples+1)*127.5).round()/127.5 - 1,
-
- # Text tokens
- tokens=th.tensor(
- [tokens] * batch_size, device=device
- ),
- mask=th.tensor(
- [mask] * batch_size,
- dtype=th.bool,
- device=device,
- ),
-
- # Masked inpainting image.
- inpaint_image=(source_image_256 * source_mask_256).repeat(batch_size, 1, 1, 1).to(device),
- inpaint_mask=source_mask_256.repeat(batch_size, 1, 1, 1).to(device),
- )
-
- # Sample from the base model.
- model_up.del_cache()
- up_shape = (batch_size, 3, options_up["image_size"], options_up["image_size"])
- up_samples = diffusion_up.p_sample_loop(
- model_up,
- up_shape,
- noise=th.randn(up_shape, device=device) * upsample_temp,
- device=device,
- clip_denoised=True,
- progress=True,
- model_kwargs=model_kwargs,
- cond_fn=None,
- denoised_fn=denoised_fn,
- )[:batch_size]
- model_up.del_cache()
-
- return source_mask_64_img, show_images(up_samples)
-
-gradio_inputs = [
- #gr.inputs.Image(type='pil',
- # label="Input Image"),
- # gr.inputs.Image(type='pil',
- # label="Input Image With Mask"),
- #Oimg,Omask,
- gr.inputs.Textbox(label='Conditional Text to Inpaint'),
- gr.inputs.Textbox(label='Filename'),
- gr.inputs.Textbox(label='Mode (0 to read, 1 to write)')]
-
-# gradio_outputs = [gr.outputs.Image(label='Auto-Detected Mask (From drawn black pixels)')]
-
-gradio_outputs = [gr.outputs.Textbox(label='Little test'), gr.outputs.Image(label='Inpainted Image')]
-
-
-examples = [['grass.png', 'grass_with_mask.png', 'a corgi in a field']]
-
-title = "GLIDE Inpaint"
-description = "[WARNING: Queue times may take 4-6 minutes per person if there's no GPU! If there is a GPU, it'll take around 60 seconds] Using GLIDE to inpaint black regions of an input image! Instructions: 1) For the 'Input Image', upload an image. 2) For the 'Input Image with Mask', draw a black-colored mask (either manually with something like Paint, or by using gradio's built-in image editor & add a black-colored shape) IT MUST BE BLACK COLOR, but doesn't have to be rectangular! This is because it auto-detects the mask based on 0 (black) pixel values! 3) For the Conditional Text, type something you'd like to see the black region get filled in with :)"
-article = "
-
- """)
-
- with gr.Tab("Description"):
- gr.HTML("""
-
As many Text-to-Image Models as I can fit here
-
Suggest more up in the "Community" button
-
-
""")
-
- with gr.Tab("Tools"):
- with gr.Tab("View"):
- with gr.Row():
- with gr.Column(style="width=50%, height=70%"):
- gr.Pil(label="Crop")
- with gr.Column(style="width=50%, height=70%"):
- gr.Pil(label="Crop")
-
-
- with gr.Tab("Draw"):
- with gr.Column(style="width=50%, height=70%"):
- gr.Pil(label="Crop")
- with gr.Column(style="width=50%, height=70%"):
- gr.Pil(label="Draw")
-
-
- gr.ImagePaint(label="Draw")
-
- with gr.Tab("Text"):
- with gr.Row():
-
- with gr.Column(scale=50):
- gr.Textbox(label="", lines=8, interactive=True)
-
-
- with gr.Column(scale=50):
- gr.Textbox(label="", lines=8, interactive=True)
-
- with gr.Tab("Color Picker"):
- with gr.Row():
-
- with gr.Column(scale=50):
- gr.ColorPicker(label="Color", interactive=True)
-
-
- with gr.Column(scale=50):
- gr.ImagePaint(label="Draw", interactive=True)
- with gr.Row():
- with gr.Column(scale=100):
- magic1=gr.Textbox(lines=4)
- gr.HTML("""""")
- run=gr.Button("Generate Image")
- with gr.Row():
- with gr.Column(scale=100):
- #Model selection dropdown
- model_name1 = gr.Dropdown(label="Select Model", choices=[m for m in models], type="index", value=current_model, interactive=True)
- with gr.Row():
- with gr.Column(style="width=800px"):
- output1=gr.Image(label=(f"{current_model}"))
-
-
- with gr.Row():
- with gr.Column(scale=50):
- input_text=gr.Textbox(label="Use this box to extend an idea automagically, by typing some words and clicking Extend Idea",lines=2)
- use_short=gr.Button("Use Short Prompt")
- see_prompts=gr.Button("Extend Idea")
-
-
- def short_prompt(inputs):
- return(inputs)
-
- model_name1.change(set_model,inputs=model_name1,outputs=[output1])
-
- run.click(send_it1, inputs=[magic1, model_name1], outputs=[output1])
-
- use_short.click(short_prompt,inputs=[input_text],outputs=magic1)
-
- see_prompts.click(text_it1,inputs=[input_text],outputs=magic1)
-
-myface.queue(concurrency_count=200)
-myface.launch(inline=True, show_api=False, max_threads=400)
\ No newline at end of file
diff --git a/spaces/Youssef-Okeil/ArchitectureClassifier/app.py b/spaces/Youssef-Okeil/ArchitectureClassifier/app.py
deleted file mode 100644
index 76bfc2db88cc1fec77d1f3c52549a28093ec9531..0000000000000000000000000000000000000000
--- a/spaces/Youssef-Okeil/ArchitectureClassifier/app.py
+++ /dev/null
@@ -1,14 +0,0 @@
-from fastai.vision.all import *
-import gradio as gr
-
-
-model_inf=load_learner('export.pkl')
-
-categories=('Art Deco','Byzantine','Gothic','Neoclassical')
-
-def image_classifier(img):
- pred,pred_idx,probs = model_inf.predict(img)
- return dict(zip(categories,map(float,probs)))
-examples_arch=['ArtDeco.jpg','Byzantine.jpg','Gothic.jpg','Neoclassical.jpg']
-intrf=gr.Interface(fn=image_classifier, inputs=gr.inputs.Image(shape=(192, 192)), outputs=gr.outputs.Label(num_top_classes=4),examples=examples_arch)
-intrf.launch(inline=False)
\ No newline at end of file
diff --git a/spaces/Yuliang/ICON/apps/Normal.py b/spaces/Yuliang/ICON/apps/Normal.py
deleted file mode 100644
index 9eed9ab6d7d24d9efadc563c0f921a101fe1c37c..0000000000000000000000000000000000000000
--- a/spaces/Yuliang/ICON/apps/Normal.py
+++ /dev/null
@@ -1,213 +0,0 @@
-from lib.net import NormalNet
-from lib.common.train_util import *
-import logging
-import torch
-import numpy as np
-from torch import nn
-from skimage.transform import resize
-import pytorch_lightning as pl
-
-torch.backends.cudnn.benchmark = True
-
-logging.getLogger("lightning").setLevel(logging.ERROR)
-
-
-class Normal(pl.LightningModule):
- def __init__(self, cfg):
- super(Normal, self).__init__()
- self.cfg = cfg
- self.batch_size = self.cfg.batch_size
- self.lr_N = self.cfg.lr_N
-
- self.schedulers = []
-
- self.netG = NormalNet(self.cfg, error_term=nn.SmoothL1Loss())
-
- self.in_nml = [item[0] for item in cfg.net.in_nml]
-
- def get_progress_bar_dict(self):
- tqdm_dict = super().get_progress_bar_dict()
- if "v_num" in tqdm_dict:
- del tqdm_dict["v_num"]
- return tqdm_dict
-
- # Training related
- def configure_optimizers(self):
-
- # set optimizer
- weight_decay = self.cfg.weight_decay
- momentum = self.cfg.momentum
-
- optim_params_N_F = [
- {"params": self.netG.netF.parameters(), "lr": self.lr_N}]
- optim_params_N_B = [
- {"params": self.netG.netB.parameters(), "lr": self.lr_N}]
-
- optimizer_N_F = torch.optim.Adam(
- optim_params_N_F, lr=self.lr_N, weight_decay=weight_decay
- )
-
- optimizer_N_B = torch.optim.Adam(
- optim_params_N_B, lr=self.lr_N, weight_decay=weight_decay
- )
-
- scheduler_N_F = torch.optim.lr_scheduler.MultiStepLR(
- optimizer_N_F, milestones=self.cfg.schedule, gamma=self.cfg.gamma
- )
-
- scheduler_N_B = torch.optim.lr_scheduler.MultiStepLR(
- optimizer_N_B, milestones=self.cfg.schedule, gamma=self.cfg.gamma
- )
-
- self.schedulers = [scheduler_N_F, scheduler_N_B]
- optims = [optimizer_N_F, optimizer_N_B]
-
- return optims, self.schedulers
-
- def render_func(self, render_tensor):
-
- height = render_tensor["image"].shape[2]
- result_list = []
-
- for name in render_tensor.keys():
- result_list.append(
- resize(
- ((render_tensor[name].cpu().numpy()[0] + 1.0) / 2.0).transpose(
- 1, 2, 0
- ),
- (height, height),
- anti_aliasing=True,
- )
- )
- result_array = np.concatenate(result_list, axis=1)
-
- return result_array
-
- def training_step(self, batch, batch_idx, optimizer_idx):
-
- export_cfg(self.logger, self.cfg)
-
- # retrieve the data
- in_tensor = {}
- for name in self.in_nml:
- in_tensor[name] = batch[name]
-
- FB_tensor = {"normal_F": batch["normal_F"],
- "normal_B": batch["normal_B"]}
-
- self.netG.train()
-
- preds_F, preds_B = self.netG(in_tensor)
- error_NF, error_NB = self.netG.get_norm_error(
- preds_F, preds_B, FB_tensor)
-
- (opt_nf, opt_nb) = self.optimizers()
-
- opt_nf.zero_grad()
- opt_nb.zero_grad()
-
- self.manual_backward(error_NF, opt_nf)
- self.manual_backward(error_NB, opt_nb)
-
- opt_nf.step()
- opt_nb.step()
-
- if batch_idx > 0 and batch_idx % int(self.cfg.freq_show_train) == 0:
-
- self.netG.eval()
- with torch.no_grad():
- nmlF, nmlB = self.netG(in_tensor)
- in_tensor.update({"nmlF": nmlF, "nmlB": nmlB})
- result_array = self.render_func(in_tensor)
-
- self.logger.experiment.add_image(
- tag=f"Normal-train/{self.global_step}",
- img_tensor=result_array.transpose(2, 0, 1),
- global_step=self.global_step,
- )
-
- # metrics processing
- metrics_log = {
- "train_loss-NF": error_NF.item(),
- "train_loss-NB": error_NB.item(),
- }
-
- tf_log = tf_log_convert(metrics_log)
- bar_log = bar_log_convert(metrics_log)
-
- return {
- "loss": error_NF + error_NB,
- "loss-NF": error_NF,
- "loss-NB": error_NB,
- "log": tf_log,
- "progress_bar": bar_log,
- }
-
- def training_epoch_end(self, outputs):
-
- if [] in outputs:
- outputs = outputs[0]
-
- # metrics processing
- metrics_log = {
- "train_avgloss": batch_mean(outputs, "loss"),
- "train_avgloss-NF": batch_mean(outputs, "loss-NF"),
- "train_avgloss-NB": batch_mean(outputs, "loss-NB"),
- }
-
- tf_log = tf_log_convert(metrics_log)
-
- tf_log["lr-NF"] = self.schedulers[0].get_last_lr()[0]
- tf_log["lr-NB"] = self.schedulers[1].get_last_lr()[0]
-
- return {"log": tf_log}
-
- def validation_step(self, batch, batch_idx):
-
- # retrieve the data
- in_tensor = {}
- for name in self.in_nml:
- in_tensor[name] = batch[name]
-
- FB_tensor = {"normal_F": batch["normal_F"],
- "normal_B": batch["normal_B"]}
-
- self.netG.train()
-
- preds_F, preds_B = self.netG(in_tensor)
- error_NF, error_NB = self.netG.get_norm_error(
- preds_F, preds_B, FB_tensor)
-
- if (batch_idx > 0 and batch_idx % int(self.cfg.freq_show_train) == 0) or (
- batch_idx == 0
- ):
-
- with torch.no_grad():
- nmlF, nmlB = self.netG(in_tensor)
- in_tensor.update({"nmlF": nmlF, "nmlB": nmlB})
- result_array = self.render_func(in_tensor)
-
- self.logger.experiment.add_image(
- tag=f"Normal-val/{self.global_step}",
- img_tensor=result_array.transpose(2, 0, 1),
- global_step=self.global_step,
- )
-
- return {
- "val_loss": error_NF + error_NB,
- "val_loss-NF": error_NF,
- "val_loss-NB": error_NB,
- }
-
- def validation_epoch_end(self, outputs):
-
- # metrics processing
- metrics_log = {
- "val_avgloss": batch_mean(outputs, "val_loss"),
- "val_avgloss-NF": batch_mean(outputs, "val_loss-NF"),
- "val_avgloss-NB": batch_mean(outputs, "val_loss-NB"),
- }
-
- tf_log = tf_log_convert(metrics_log)
-
- return {"log": tf_log}
diff --git a/spaces/Zengyf-CVer/Streamlit_YOLOv5_Model2x/utils/metrics.py b/spaces/Zengyf-CVer/Streamlit_YOLOv5_Model2x/utils/metrics.py
deleted file mode 100644
index ee7d33982cfcd88a14d118823e57058335b46f1a..0000000000000000000000000000000000000000
--- a/spaces/Zengyf-CVer/Streamlit_YOLOv5_Model2x/utils/metrics.py
+++ /dev/null
@@ -1,367 +0,0 @@
-# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
-"""
-Model validation metrics
-"""
-
-import math
-import warnings
-from pathlib import Path
-
-import matplotlib.pyplot as plt
-import numpy as np
-import torch
-
-from utils import TryExcept, threaded
-
-
-def fitness(x):
- # Model fitness as a weighted combination of metrics
- w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95]
- return (x[:, :4] * w).sum(1)
-
-
-def smooth(y, f=0.05):
- # Box filter of fraction f
- nf = round(len(y) * f * 2) // 2 + 1 # number of filter elements (must be odd)
- p = np.ones(nf // 2) # ones padding
- yp = np.concatenate((p * y[0], y, p * y[-1]), 0) # y padded
- return np.convolve(yp, np.ones(nf) / nf, mode='valid') # y-smoothed
-
-
-def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16):
- """ Compute the average precision, given the recall and precision curves.
- Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
- # Arguments
- tp: True positives (nparray, nx1 or nx10).
- conf: Objectness value from 0-1 (nparray).
- pred_cls: Predicted object classes (nparray).
- target_cls: True object classes (nparray).
- plot: Plot precision-recall curve at mAP@0.5
- save_dir: Plot save directory
- # Returns
- The average precision as computed in py-faster-rcnn.
- """
-
- # Sort by objectness
- i = np.argsort(-conf)
- tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
-
- # Find unique classes
- unique_classes, nt = np.unique(target_cls, return_counts=True)
- nc = unique_classes.shape[0] # number of classes, number of detections
-
- # Create Precision-Recall curve and compute AP for each class
- px, py = np.linspace(0, 1, 1000), [] # for plotting
- ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000))
- for ci, c in enumerate(unique_classes):
- i = pred_cls == c
- n_l = nt[ci] # number of labels
- n_p = i.sum() # number of predictions
- if n_p == 0 or n_l == 0:
- continue
-
- # Accumulate FPs and TPs
- fpc = (1 - tp[i]).cumsum(0)
- tpc = tp[i].cumsum(0)
-
- # Recall
- recall = tpc / (n_l + eps) # recall curve
- r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases
-
- # Precision
- precision = tpc / (tpc + fpc) # precision curve
- p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score
-
- # AP from recall-precision curve
- for j in range(tp.shape[1]):
- ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])
- if plot and j == 0:
- py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5
-
- # Compute F1 (harmonic mean of precision and recall)
- f1 = 2 * p * r / (p + r + eps)
- names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data
- names = dict(enumerate(names)) # to dict
- if plot:
- plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names)
- plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1')
- plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision')
- plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall')
-
- i = smooth(f1.mean(0), 0.1).argmax() # max F1 index
- p, r, f1 = p[:, i], r[:, i], f1[:, i]
- tp = (r * nt).round() # true positives
- fp = (tp / (p + eps) - tp).round() # false positives
- return tp, fp, p, r, f1, ap, unique_classes.astype(int)
-
-
-def compute_ap(recall, precision):
- """ Compute the average precision, given the recall and precision curves
- # Arguments
- recall: The recall curve (list)
- precision: The precision curve (list)
- # Returns
- Average precision, precision curve, recall curve
- """
-
- # Append sentinel values to beginning and end
- mrec = np.concatenate(([0.0], recall, [1.0]))
- mpre = np.concatenate(([1.0], precision, [0.0]))
-
- # Compute the precision envelope
- mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))
-
- # Integrate area under curve
- method = 'interp' # methods: 'continuous', 'interp'
- if method == 'interp':
- x = np.linspace(0, 1, 101) # 101-point interp (COCO)
- ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
- else: # 'continuous'
- i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes
- ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
-
- return ap, mpre, mrec
-
-
-class ConfusionMatrix:
- # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix
- def __init__(self, nc, conf=0.25, iou_thres=0.45):
- self.matrix = np.zeros((nc + 1, nc + 1))
- self.nc = nc # number of classes
- self.conf = conf
- self.iou_thres = iou_thres
-
- def process_batch(self, detections, labels):
- """
- Return intersection-over-union (Jaccard index) of boxes.
- Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
- Arguments:
- detections (Array[N, 6]), x1, y1, x2, y2, conf, class
- labels (Array[M, 5]), class, x1, y1, x2, y2
- Returns:
- None, updates confusion matrix accordingly
- """
- if detections is None:
- gt_classes = labels.int()
- for gc in gt_classes:
- self.matrix[self.nc, gc] += 1 # background FN
- return
-
- detections = detections[detections[:, 4] > self.conf]
- gt_classes = labels[:, 0].int()
- detection_classes = detections[:, 5].int()
- iou = box_iou(labels[:, 1:], detections[:, :4])
-
- x = torch.where(iou > self.iou_thres)
- if x[0].shape[0]:
- matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()
- if x[0].shape[0] > 1:
- matches = matches[matches[:, 2].argsort()[::-1]]
- matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
- matches = matches[matches[:, 2].argsort()[::-1]]
- matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
- else:
- matches = np.zeros((0, 3))
-
- n = matches.shape[0] > 0
- m0, m1, _ = matches.transpose().astype(int)
- for i, gc in enumerate(gt_classes):
- j = m0 == i
- if n and sum(j) == 1:
- self.matrix[detection_classes[m1[j]], gc] += 1 # correct
- else:
- self.matrix[self.nc, gc] += 1 # background FP
-
- if n:
- for i, dc in enumerate(detection_classes):
- if not any(m1 == i):
- self.matrix[dc, self.nc] += 1 # background FN
-
- def matrix(self):
- return self.matrix
-
- def tp_fp(self):
- tp = self.matrix.diagonal() # true positives
- fp = self.matrix.sum(1) - tp # false positives
- # fn = self.matrix.sum(0) - tp # false negatives (missed detections)
- return tp[:-1], fp[:-1] # remove background class
-
- @TryExcept('WARNING: ConfusionMatrix plot failure: ')
- def plot(self, normalize=True, save_dir='', names=()):
- import seaborn as sn
-
- array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-9) if normalize else 1) # normalize columns
- array[array < 0.005] = np.nan # don't annotate (would appear as 0.00)
-
- fig, ax = plt.subplots(1, 1, figsize=(12, 9), tight_layout=True)
- nc, nn = self.nc, len(names) # number of classes, names
- sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size
- labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels
- with warnings.catch_warnings():
- warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered
- sn.heatmap(array,
- ax=ax,
- annot=nc < 30,
- annot_kws={
- "size": 8},
- cmap='Blues',
- fmt='.2f',
- square=True,
- vmin=0.0,
- xticklabels=names + ['background FP'] if labels else "auto",
- yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1))
- ax.set_ylabel('True')
- ax.set_ylabel('Predicted')
- ax.set_title('Confusion Matrix')
- fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250)
- plt.close(fig)
-
- def print(self):
- for i in range(self.nc + 1):
- print(' '.join(map(str, self.matrix[i])))
-
-
-def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7):
- # Returns Intersection over Union (IoU) of box1(1,4) to box2(n,4)
-
- # Get the coordinates of bounding boxes
- if xywh: # transform from xywh to xyxy
- (x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, 1), box2.chunk(4, 1)
- w1_, h1_, w2_, h2_ = w1 / 2, h1 / 2, w2 / 2, h2 / 2
- b1_x1, b1_x2, b1_y1, b1_y2 = x1 - w1_, x1 + w1_, y1 - h1_, y1 + h1_
- b2_x1, b2_x2, b2_y1, b2_y2 = x2 - w2_, x2 + w2_, y2 - h2_, y2 + h2_
- else: # x1, y1, x2, y2 = box1
- b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, 1)
- b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, 1)
- w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1
- w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1
-
- # Intersection area
- inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
- (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
-
- # Union Area
- union = w1 * h1 + w2 * h2 - inter + eps
-
- # IoU
- iou = inter / union
- if CIoU or DIoU or GIoU:
- cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
- ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
- if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
- c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared
- rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center dist ** 2
- if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
- v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / (h2 + eps)) - torch.atan(w1 / (h1 + eps)), 2)
- with torch.no_grad():
- alpha = v / (v - iou + (1 + eps))
- return iou - (rho2 / c2 + v * alpha) # CIoU
- return iou - rho2 / c2 # DIoU
- c_area = cw * ch + eps # convex area
- return iou - (c_area - union) / c_area # GIoU https://arxiv.org/pdf/1902.09630.pdf
- return iou # IoU
-
-
-def box_area(box):
- # box = xyxy(4,n)
- return (box[2] - box[0]) * (box[3] - box[1])
-
-
-def box_iou(box1, box2, eps=1e-7):
- # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
- """
- Return intersection-over-union (Jaccard index) of boxes.
- Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
- Arguments:
- box1 (Tensor[N, 4])
- box2 (Tensor[M, 4])
- Returns:
- iou (Tensor[N, M]): the NxM matrix containing the pairwise
- IoU values for every element in boxes1 and boxes2
- """
-
- # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
- (a1, a2), (b1, b2) = box1[:, None].chunk(2, 2), box2.chunk(2, 1)
- inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2)
-
- # IoU = inter / (area1 + area2 - inter)
- return inter / (box_area(box1.T)[:, None] + box_area(box2.T) - inter + eps)
-
-
-def bbox_ioa(box1, box2, eps=1e-7):
- """ Returns the intersection over box2 area given box1, box2. Boxes are x1y1x2y2
- box1: np.array of shape(4)
- box2: np.array of shape(nx4)
- returns: np.array of shape(n)
- """
-
- # Get the coordinates of bounding boxes
- b1_x1, b1_y1, b1_x2, b1_y2 = box1
- b2_x1, b2_y1, b2_x2, b2_y2 = box2.T
-
- # Intersection area
- inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
- (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
-
- # box2 area
- box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps
-
- # Intersection over box2 area
- return inter_area / box2_area
-
-
-def wh_iou(wh1, wh2, eps=1e-7):
- # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
- wh1 = wh1[:, None] # [N,1,2]
- wh2 = wh2[None] # [1,M,2]
- inter = torch.min(wh1, wh2).prod(2) # [N,M]
- return inter / (wh1.prod(2) + wh2.prod(2) - inter + eps) # iou = inter / (area1 + area2 - inter)
-
-
-# Plots ----------------------------------------------------------------------------------------------------------------
-
-
-@threaded
-def plot_pr_curve(px, py, ap, save_dir=Path('pr_curve.png'), names=()):
- # Precision-recall curve
- fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True)
- py = np.stack(py, axis=1)
-
- if 0 < len(names) < 21: # display per-class legend if < 21 classes
- for i, y in enumerate(py.T):
- ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision)
- else:
- ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision)
-
- ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean())
- ax.set_xlabel('Recall')
- ax.set_ylabel('Precision')
- ax.set_xlim(0, 1)
- ax.set_ylim(0, 1)
- ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
- ax.set_title('Precision-Recall Curve')
- fig.savefig(save_dir, dpi=250)
- plt.close(fig)
-
-
-@threaded
-def plot_mc_curve(px, py, save_dir=Path('mc_curve.png'), names=(), xlabel='Confidence', ylabel='Metric'):
- # Metric-confidence curve
- fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True)
-
- if 0 < len(names) < 21: # display per-class legend if < 21 classes
- for i, y in enumerate(py):
- ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric)
- else:
- ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric)
-
- y = smooth(py.mean(0), 0.05)
- ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}')
- ax.set_xlabel(xlabel)
- ax.set_ylabel(ylabel)
- ax.set_xlim(0, 1)
- ax.set_ylim(0, 1)
- ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
- ax.set_title(f'{ylabel}-Confidence Curve')
- fig.savefig(save_dir, dpi=250)
- plt.close(fig)
diff --git a/spaces/aadit2697/movie_recommender/README.md b/spaces/aadit2697/movie_recommender/README.md
deleted file mode 100644
index 7bfd4e7ecb8dddf1e7f7ef1e811f5d806301c413..0000000000000000000000000000000000000000
--- a/spaces/aadit2697/movie_recommender/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Movie Recommender
-emoji: 🐠
-colorFrom: yellow
-colorTo: blue
-sdk: streamlit
-sdk_version: 1.17.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/abdalrahmanshahrour/ArabicQuestionAnswering/app.py b/spaces/abdalrahmanshahrour/ArabicQuestionAnswering/app.py
deleted file mode 100644
index cb56df4edc05fc554106f02c5f4de11ea576af92..0000000000000000000000000000000000000000
--- a/spaces/abdalrahmanshahrour/ArabicQuestionAnswering/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-
-gr.Interface.load("models/abdalrahmanshahrour/ArabicQA").launch()
diff --git a/spaces/abhishek/first-order-motion-model/augmentation.py b/spaces/abhishek/first-order-motion-model/augmentation.py
deleted file mode 100644
index 50d03203aaec2a59fb2671bdeccfae1d214f607c..0000000000000000000000000000000000000000
--- a/spaces/abhishek/first-order-motion-model/augmentation.py
+++ /dev/null
@@ -1,345 +0,0 @@
-"""
-Code from https://github.com/hassony2/torch_videovision
-"""
-
-import numbers
-
-import random
-import numpy as np
-import PIL
-
-from skimage.transform import resize, rotate
-from skimage.util import pad
-import torchvision
-
-import warnings
-
-from skimage import img_as_ubyte, img_as_float
-
-
-def crop_clip(clip, min_h, min_w, h, w):
- if isinstance(clip[0], np.ndarray):
- cropped = [img[min_h:min_h + h, min_w:min_w + w, :] for img in clip]
-
- elif isinstance(clip[0], PIL.Image.Image):
- cropped = [
- img.crop((min_w, min_h, min_w + w, min_h + h)) for img in clip
- ]
- else:
- raise TypeError('Expected numpy.ndarray or PIL.Image' +
- 'but got list of {0}'.format(type(clip[0])))
- return cropped
-
-
-def pad_clip(clip, h, w):
- im_h, im_w = clip[0].shape[:2]
- pad_h = (0, 0) if h < im_h else ((h - im_h) // 2, (h - im_h + 1) // 2)
- pad_w = (0, 0) if w < im_w else ((w - im_w) // 2, (w - im_w + 1) // 2)
-
- return pad(clip, ((0, 0), pad_h, pad_w, (0, 0)), mode='edge')
-
-
-def resize_clip(clip, size, interpolation='bilinear'):
- if isinstance(clip[0], np.ndarray):
- if isinstance(size, numbers.Number):
- im_h, im_w, im_c = clip[0].shape
- # Min spatial dim already matches minimal size
- if (im_w <= im_h and im_w == size) or (im_h <= im_w
- and im_h == size):
- return clip
- new_h, new_w = get_resize_sizes(im_h, im_w, size)
- size = (new_w, new_h)
- else:
- size = size[1], size[0]
-
- scaled = [
- resize(img, size, order=1 if interpolation == 'bilinear' else 0, preserve_range=True,
- mode='constant', anti_aliasing=True) for img in clip
- ]
- elif isinstance(clip[0], PIL.Image.Image):
- if isinstance(size, numbers.Number):
- im_w, im_h = clip[0].size
- # Min spatial dim already matches minimal size
- if (im_w <= im_h and im_w == size) or (im_h <= im_w
- and im_h == size):
- return clip
- new_h, new_w = get_resize_sizes(im_h, im_w, size)
- size = (new_w, new_h)
- else:
- size = size[1], size[0]
- if interpolation == 'bilinear':
- pil_inter = PIL.Image.NEAREST
- else:
- pil_inter = PIL.Image.BILINEAR
- scaled = [img.resize(size, pil_inter) for img in clip]
- else:
- raise TypeError('Expected numpy.ndarray or PIL.Image' +
- 'but got list of {0}'.format(type(clip[0])))
- return scaled
-
-
-def get_resize_sizes(im_h, im_w, size):
- if im_w < im_h:
- ow = size
- oh = int(size * im_h / im_w)
- else:
- oh = size
- ow = int(size * im_w / im_h)
- return oh, ow
-
-
-class RandomFlip(object):
- def __init__(self, time_flip=False, horizontal_flip=False):
- self.time_flip = time_flip
- self.horizontal_flip = horizontal_flip
-
- def __call__(self, clip):
- if random.random() < 0.5 and self.time_flip:
- return clip[::-1]
- if random.random() < 0.5 and self.horizontal_flip:
- return [np.fliplr(img) for img in clip]
-
- return clip
-
-
-class RandomResize(object):
- """Resizes a list of (H x W x C) numpy.ndarray to the final size
- The larger the original image is, the more times it takes to
- interpolate
- Args:
- interpolation (str): Can be one of 'nearest', 'bilinear'
- defaults to nearest
- size (tuple): (widht, height)
- """
-
- def __init__(self, ratio=(3. / 4., 4. / 3.), interpolation='nearest'):
- self.ratio = ratio
- self.interpolation = interpolation
-
- def __call__(self, clip):
- scaling_factor = random.uniform(self.ratio[0], self.ratio[1])
-
- if isinstance(clip[0], np.ndarray):
- im_h, im_w, im_c = clip[0].shape
- elif isinstance(clip[0], PIL.Image.Image):
- im_w, im_h = clip[0].size
-
- new_w = int(im_w * scaling_factor)
- new_h = int(im_h * scaling_factor)
- new_size = (new_w, new_h)
- resized = resize_clip(
- clip, new_size, interpolation=self.interpolation)
-
- return resized
-
-
-class RandomCrop(object):
- """Extract random crop at the same location for a list of videos
- Args:
- size (sequence or int): Desired output size for the
- crop in format (h, w)
- """
-
- def __init__(self, size):
- if isinstance(size, numbers.Number):
- size = (size, size)
-
- self.size = size
-
- def __call__(self, clip):
- """
- Args:
- img (PIL.Image or numpy.ndarray): List of videos to be cropped
- in format (h, w, c) in numpy.ndarray
- Returns:
- PIL.Image or numpy.ndarray: Cropped list of videos
- """
- h, w = self.size
- if isinstance(clip[0], np.ndarray):
- im_h, im_w, im_c = clip[0].shape
- elif isinstance(clip[0], PIL.Image.Image):
- im_w, im_h = clip[0].size
- else:
- raise TypeError('Expected numpy.ndarray or PIL.Image' +
- 'but got list of {0}'.format(type(clip[0])))
-
- clip = pad_clip(clip, h, w)
- im_h, im_w = clip.shape[1:3]
- x1 = 0 if h == im_h else random.randint(0, im_w - w)
- y1 = 0 if w == im_w else random.randint(0, im_h - h)
- cropped = crop_clip(clip, y1, x1, h, w)
-
- return cropped
-
-
-class RandomRotation(object):
- """Rotate entire clip randomly by a random angle within
- given bounds
- Args:
- degrees (sequence or int): Range of degrees to select from
- If degrees is a number instead of sequence like (min, max),
- the range of degrees, will be (-degrees, +degrees).
- """
-
- def __init__(self, degrees):
- if isinstance(degrees, numbers.Number):
- if degrees < 0:
- raise ValueError('If degrees is a single number,'
- 'must be positive')
- degrees = (-degrees, degrees)
- else:
- if len(degrees) != 2:
- raise ValueError('If degrees is a sequence,'
- 'it must be of len 2.')
-
- self.degrees = degrees
-
- def __call__(self, clip):
- """
- Args:
- img (PIL.Image or numpy.ndarray): List of videos to be cropped
- in format (h, w, c) in numpy.ndarray
- Returns:
- PIL.Image or numpy.ndarray: Cropped list of videos
- """
- angle = random.uniform(self.degrees[0], self.degrees[1])
- if isinstance(clip[0], np.ndarray):
- rotated = [rotate(image=img, angle=angle, preserve_range=True) for img in clip]
- elif isinstance(clip[0], PIL.Image.Image):
- rotated = [img.rotate(angle) for img in clip]
- else:
- raise TypeError('Expected numpy.ndarray or PIL.Image' +
- 'but got list of {0}'.format(type(clip[0])))
-
- return rotated
-
-
-class ColorJitter(object):
- """Randomly change the brightness, contrast and saturation and hue of the clip
- Args:
- brightness (float): How much to jitter brightness. brightness_factor
- is chosen uniformly from [max(0, 1 - brightness), 1 + brightness].
- contrast (float): How much to jitter contrast. contrast_factor
- is chosen uniformly from [max(0, 1 - contrast), 1 + contrast].
- saturation (float): How much to jitter saturation. saturation_factor
- is chosen uniformly from [max(0, 1 - saturation), 1 + saturation].
- hue(float): How much to jitter hue. hue_factor is chosen uniformly from
- [-hue, hue]. Should be >=0 and <= 0.5.
- """
-
- def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
- self.brightness = brightness
- self.contrast = contrast
- self.saturation = saturation
- self.hue = hue
-
- def get_params(self, brightness, contrast, saturation, hue):
- if brightness > 0:
- brightness_factor = random.uniform(
- max(0, 1 - brightness), 1 + brightness)
- else:
- brightness_factor = None
-
- if contrast > 0:
- contrast_factor = random.uniform(
- max(0, 1 - contrast), 1 + contrast)
- else:
- contrast_factor = None
-
- if saturation > 0:
- saturation_factor = random.uniform(
- max(0, 1 - saturation), 1 + saturation)
- else:
- saturation_factor = None
-
- if hue > 0:
- hue_factor = random.uniform(-hue, hue)
- else:
- hue_factor = None
- return brightness_factor, contrast_factor, saturation_factor, hue_factor
-
- def __call__(self, clip):
- """
- Args:
- clip (list): list of PIL.Image
- Returns:
- list PIL.Image : list of transformed PIL.Image
- """
- if isinstance(clip[0], np.ndarray):
- brightness, contrast, saturation, hue = self.get_params(
- self.brightness, self.contrast, self.saturation, self.hue)
-
- # Create img transform function sequence
- img_transforms = []
- if brightness is not None:
- img_transforms.append(lambda img: torchvision.transforms.functional.adjust_brightness(img, brightness))
- if saturation is not None:
- img_transforms.append(lambda img: torchvision.transforms.functional.adjust_saturation(img, saturation))
- if hue is not None:
- img_transforms.append(lambda img: torchvision.transforms.functional.adjust_hue(img, hue))
- if contrast is not None:
- img_transforms.append(lambda img: torchvision.transforms.functional.adjust_contrast(img, contrast))
- random.shuffle(img_transforms)
- img_transforms = [img_as_ubyte, torchvision.transforms.ToPILImage()] + img_transforms + [np.array,
- img_as_float]
-
- with warnings.catch_warnings():
- warnings.simplefilter("ignore")
- jittered_clip = []
- for img in clip:
- jittered_img = img
- for func in img_transforms:
- jittered_img = func(jittered_img)
- jittered_clip.append(jittered_img.astype('float32'))
- elif isinstance(clip[0], PIL.Image.Image):
- brightness, contrast, saturation, hue = self.get_params(
- self.brightness, self.contrast, self.saturation, self.hue)
-
- # Create img transform function sequence
- img_transforms = []
- if brightness is not None:
- img_transforms.append(lambda img: torchvision.transforms.functional.adjust_brightness(img, brightness))
- if saturation is not None:
- img_transforms.append(lambda img: torchvision.transforms.functional.adjust_saturation(img, saturation))
- if hue is not None:
- img_transforms.append(lambda img: torchvision.transforms.functional.adjust_hue(img, hue))
- if contrast is not None:
- img_transforms.append(lambda img: torchvision.transforms.functional.adjust_contrast(img, contrast))
- random.shuffle(img_transforms)
-
- # Apply to all videos
- jittered_clip = []
- for img in clip:
- for func in img_transforms:
- jittered_img = func(img)
- jittered_clip.append(jittered_img)
-
- else:
- raise TypeError('Expected numpy.ndarray or PIL.Image' +
- 'but got list of {0}'.format(type(clip[0])))
- return jittered_clip
-
-
-class AllAugmentationTransform:
- def __init__(self, resize_param=None, rotation_param=None, flip_param=None, crop_param=None, jitter_param=None):
- self.transforms = []
-
- if flip_param is not None:
- self.transforms.append(RandomFlip(**flip_param))
-
- if rotation_param is not None:
- self.transforms.append(RandomRotation(**rotation_param))
-
- if resize_param is not None:
- self.transforms.append(RandomResize(**resize_param))
-
- if crop_param is not None:
- self.transforms.append(RandomCrop(**crop_param))
-
- if jitter_param is not None:
- self.transforms.append(ColorJitter(**jitter_param))
-
- def __call__(self, clip):
- for t in self.transforms:
- clip = t(clip)
- return clip
diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/core/evaluation/class_names.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/core/evaluation/class_names.py
deleted file mode 100644
index e69d602ee3a6e197e21a806d5aab4b020be2fe6c..0000000000000000000000000000000000000000
--- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/core/evaluation/class_names.py
+++ /dev/null
@@ -1,132 +0,0 @@
-import annotator.uniformer.mmcv as mmcv
-
-
-def wider_face_classes():
- return ['face']
-
-
-def voc_classes():
- return [
- 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
- 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',
- 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'
- ]
-
-
-def imagenet_det_classes():
- return [
- 'accordion', 'airplane', 'ant', 'antelope', 'apple', 'armadillo',
- 'artichoke', 'axe', 'baby_bed', 'backpack', 'bagel', 'balance_beam',
- 'banana', 'band_aid', 'banjo', 'baseball', 'basketball', 'bathing_cap',
- 'beaker', 'bear', 'bee', 'bell_pepper', 'bench', 'bicycle', 'binder',
- 'bird', 'bookshelf', 'bow_tie', 'bow', 'bowl', 'brassiere', 'burrito',
- 'bus', 'butterfly', 'camel', 'can_opener', 'car', 'cart', 'cattle',
- 'cello', 'centipede', 'chain_saw', 'chair', 'chime', 'cocktail_shaker',
- 'coffee_maker', 'computer_keyboard', 'computer_mouse', 'corkscrew',
- 'cream', 'croquet_ball', 'crutch', 'cucumber', 'cup_or_mug', 'diaper',
- 'digital_clock', 'dishwasher', 'dog', 'domestic_cat', 'dragonfly',
- 'drum', 'dumbbell', 'electric_fan', 'elephant', 'face_powder', 'fig',
- 'filing_cabinet', 'flower_pot', 'flute', 'fox', 'french_horn', 'frog',
- 'frying_pan', 'giant_panda', 'goldfish', 'golf_ball', 'golfcart',
- 'guacamole', 'guitar', 'hair_dryer', 'hair_spray', 'hamburger',
- 'hammer', 'hamster', 'harmonica', 'harp', 'hat_with_a_wide_brim',
- 'head_cabbage', 'helmet', 'hippopotamus', 'horizontal_bar', 'horse',
- 'hotdog', 'iPod', 'isopod', 'jellyfish', 'koala_bear', 'ladle',
- 'ladybug', 'lamp', 'laptop', 'lemon', 'lion', 'lipstick', 'lizard',
- 'lobster', 'maillot', 'maraca', 'microphone', 'microwave', 'milk_can',
- 'miniskirt', 'monkey', 'motorcycle', 'mushroom', 'nail', 'neck_brace',
- 'oboe', 'orange', 'otter', 'pencil_box', 'pencil_sharpener', 'perfume',
- 'person', 'piano', 'pineapple', 'ping-pong_ball', 'pitcher', 'pizza',
- 'plastic_bag', 'plate_rack', 'pomegranate', 'popsicle', 'porcupine',
- 'power_drill', 'pretzel', 'printer', 'puck', 'punching_bag', 'purse',
- 'rabbit', 'racket', 'ray', 'red_panda', 'refrigerator',
- 'remote_control', 'rubber_eraser', 'rugby_ball', 'ruler',
- 'salt_or_pepper_shaker', 'saxophone', 'scorpion', 'screwdriver',
- 'seal', 'sheep', 'ski', 'skunk', 'snail', 'snake', 'snowmobile',
- 'snowplow', 'soap_dispenser', 'soccer_ball', 'sofa', 'spatula',
- 'squirrel', 'starfish', 'stethoscope', 'stove', 'strainer',
- 'strawberry', 'stretcher', 'sunglasses', 'swimming_trunks', 'swine',
- 'syringe', 'table', 'tape_player', 'tennis_ball', 'tick', 'tie',
- 'tiger', 'toaster', 'traffic_light', 'train', 'trombone', 'trumpet',
- 'turtle', 'tv_or_monitor', 'unicycle', 'vacuum', 'violin',
- 'volleyball', 'waffle_iron', 'washer', 'water_bottle', 'watercraft',
- 'whale', 'wine_bottle', 'zebra'
- ]
-
-
-def imagenet_vid_classes():
- return [
- 'airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus', 'car',
- 'cattle', 'dog', 'domestic_cat', 'elephant', 'fox', 'giant_panda',
- 'hamster', 'horse', 'lion', 'lizard', 'monkey', 'motorcycle', 'rabbit',
- 'red_panda', 'sheep', 'snake', 'squirrel', 'tiger', 'train', 'turtle',
- 'watercraft', 'whale', 'zebra'
- ]
-
-
-def coco_classes():
- return [
- 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
- 'truck', 'boat', 'traffic_light', 'fire_hydrant', 'stop_sign',
- 'parking_meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
- 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',
- 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
- 'sports_ball', 'kite', 'baseball_bat', 'baseball_glove', 'skateboard',
- 'surfboard', 'tennis_racket', 'bottle', 'wine_glass', 'cup', 'fork',
- 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
- 'broccoli', 'carrot', 'hot_dog', 'pizza', 'donut', 'cake', 'chair',
- 'couch', 'potted_plant', 'bed', 'dining_table', 'toilet', 'tv',
- 'laptop', 'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave',
- 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
- 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush'
- ]
-
-
-def cityscapes_classes():
- return [
- 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
- 'bicycle'
- ]
-
-
-dataset_aliases = {
- 'voc': ['voc', 'pascal_voc', 'voc07', 'voc12'],
- 'imagenet_det': ['det', 'imagenet_det', 'ilsvrc_det'],
- 'imagenet_vid': ['vid', 'imagenet_vid', 'ilsvrc_vid'],
- 'coco': ['coco', 'mscoco', 'ms_coco'],
- 'wider_face': ['WIDERFaceDataset', 'wider_face', 'WIDERFace'],
- 'cityscapes': ['cityscapes']
-}
-
-
-def get_classes(dataset):
- """Get class names of a dataset."""
- alias2name = {}
- for name, aliases in dataset_aliases.items():
- for alias in aliases:
- alias2name[alias] = name
-
- if mmcv.is_str(dataset):
- if dataset in alias2name:
- labels = eval(alias2name[dataset] + '_classes()')
- else:
- raise ValueError(f'Unrecognized dataset: {dataset}')
- else:
- raise TypeError(f'dataset must a str, but got {type(dataset)}')
- return labels
-
-def get_palette(dataset):
- """Get class palette (RGB) of a dataset."""
- alias2name = {}
- for name, aliases in dataset_aliases.items():
- for alias in aliases:
- alias2name[alias] = name
-
- if mmcv.is_str(dataset):
- if dataset in alias2name:
- labels = eval(alias2name[dataset] + '_palette()')
- else:
- raise ValueError(f'Unrecognized dataset: {dataset}')
- else:
- raise TypeError(f'dataset must a str, but got {type(dataset)}')
- return labels
\ No newline at end of file
diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/fileio/handlers/pickle_handler.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/fileio/handlers/pickle_handler.py
deleted file mode 100644
index b37c79bed4ef9fd8913715e62dbe3fc5cafdc3aa..0000000000000000000000000000000000000000
--- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/fileio/handlers/pickle_handler.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import pickle
-
-from .base import BaseFileHandler
-
-
-class PickleHandler(BaseFileHandler):
-
- str_like = False
-
- def load_from_fileobj(self, file, **kwargs):
- return pickle.load(file, **kwargs)
-
- def load_from_path(self, filepath, **kwargs):
- return super(PickleHandler, self).load_from_path(
- filepath, mode='rb', **kwargs)
-
- def dump_to_str(self, obj, **kwargs):
- kwargs.setdefault('protocol', 2)
- return pickle.dumps(obj, **kwargs)
-
- def dump_to_fileobj(self, obj, file, **kwargs):
- kwargs.setdefault('protocol', 2)
- pickle.dump(obj, file, **kwargs)
-
- def dump_to_path(self, obj, filepath, **kwargs):
- super(PickleHandler, self).dump_to_path(
- obj, filepath, mode='wb', **kwargs)
diff --git a/spaces/adpro/dpt-depth04/app.py b/spaces/adpro/dpt-depth04/app.py
deleted file mode 100644
index d53cd25e9a32ed9f2b8c670cb4e9b6f00b05ec82..0000000000000000000000000000000000000000
--- a/spaces/adpro/dpt-depth04/app.py
+++ /dev/null
@@ -1,45 +0,0 @@
-import gradio as gr
-from transformers import DPTFeatureExtractor, DPTForDepthEstimation
-import torch
-import numpy as np
-from PIL import Image
-
-#torch.hub.download_url_to_file('http://images.cocodataset.org/val2017/000000039769.jpg', 'cats.jpg')
-
-feature_extractor = DPTFeatureExtractor.from_pretrained("Intel/dpt-large")
-model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large")
-
-def process_image(image):
- # prepare image for the model
- encoding = feature_extractor(image, return_tensors="pt")
-
- # forward pass
- with torch.no_grad():
- outputs = model(**encoding)
- predicted_depth = outputs.predicted_depth
-
- # interpolate to original size
- prediction = torch.nn.functional.interpolate(
- predicted_depth.unsqueeze(1),
- size=image.size[::-1],
- mode="bicubic",
- align_corners=False,
- ).squeeze()
- output = prediction.cpu().numpy()
- formatted = (output * 255 / np.max(output)).astype('uint8')
- img = Image.fromarray(formatted)
- return img
-
- return result
-
-title = "Demo: zero-shot depth estimation with DPT"
-description = "Demo for Intel's DPT, a Dense Prediction Transformer for state-of-the-art dense prediction tasks such as semantic segmentation and depth estimation."
-
-
-iface = gr.Interface(fn=process_image,
- inputs=gr.inputs.Image(type="pil"),
- outputs=gr.outputs.Image(type="pil", label="predicted depth"),
- title=title,
- description=description,
- enable_queue=True)
-iface.launch(debug=True)
\ No newline at end of file
diff --git a/spaces/akhaliq/Mask2Former/train_net_video.py b/spaces/akhaliq/Mask2Former/train_net_video.py
deleted file mode 100644
index 2d22345ed9659acfe70ed8d536b9775c748623f0..0000000000000000000000000000000000000000
--- a/spaces/akhaliq/Mask2Former/train_net_video.py
+++ /dev/null
@@ -1,290 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-"""
-MaskFormer Training Script.
-
-This script is a simplified version of the training script in detectron2/tools.
-"""
-try:
- # ignore ShapelyDeprecationWarning from fvcore
- from shapely.errors import ShapelyDeprecationWarning
- import warnings
- warnings.filterwarnings('ignore', category=ShapelyDeprecationWarning)
-except:
- pass
-
-import copy
-import itertools
-import logging
-import os
-
-from collections import OrderedDict
-from typing import Any, Dict, List, Set
-
-import torch
-
-import detectron2.utils.comm as comm
-from detectron2.checkpoint import DetectionCheckpointer
-from detectron2.config import get_cfg
-from detectron2.data import MetadataCatalog
-from detectron2.engine import (
- DefaultTrainer,
- default_argument_parser,
- default_setup,
- launch,
-)
-from detectron2.evaluation import (
- DatasetEvaluator,
- inference_on_dataset,
- print_csv_format,
- verify_results,
-)
-from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler
-from detectron2.solver.build import maybe_add_gradient_clipping
-from detectron2.utils.logger import setup_logger
-
-# MaskFormer
-from mask2former import add_maskformer2_config
-from mask2former_video import (
- YTVISDatasetMapper,
- YTVISEvaluator,
- add_maskformer2_video_config,
- build_detection_train_loader,
- build_detection_test_loader,
- get_detection_dataset_dicts,
-)
-
-
-class Trainer(DefaultTrainer):
- """
- Extension of the Trainer class adapted to MaskFormer.
- """
-
- @classmethod
- def build_evaluator(cls, cfg, dataset_name, output_folder=None):
- """
- Create evaluator(s) for a given dataset.
- This uses the special metadata "evaluator_type" associated with each builtin dataset.
- For your own dataset, you can simply create an evaluator manually in your
- script and do not have to worry about the hacky if-else logic here.
- """
- if output_folder is None:
- output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
- os.makedirs(output_folder, exist_ok=True)
-
- return YTVISEvaluator(dataset_name, cfg, True, output_folder)
-
- @classmethod
- def build_train_loader(cls, cfg):
- dataset_name = cfg.DATASETS.TRAIN[0]
- mapper = YTVISDatasetMapper(cfg, is_train=True)
-
- dataset_dict = get_detection_dataset_dicts(
- dataset_name,
- filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
- proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
- )
-
- return build_detection_train_loader(cfg, mapper=mapper, dataset=dataset_dict)
-
- @classmethod
- def build_test_loader(cls, cfg, dataset_name):
- dataset_name = cfg.DATASETS.TEST[0]
- mapper = YTVISDatasetMapper(cfg, is_train=False)
- return build_detection_test_loader(cfg, dataset_name, mapper=mapper)
-
- @classmethod
- def build_lr_scheduler(cls, cfg, optimizer):
- """
- It now calls :func:`detectron2.solver.build_lr_scheduler`.
- Overwrite it if you'd like a different scheduler.
- """
- return build_lr_scheduler(cfg, optimizer)
-
- @classmethod
- def build_optimizer(cls, cfg, model):
- weight_decay_norm = cfg.SOLVER.WEIGHT_DECAY_NORM
- weight_decay_embed = cfg.SOLVER.WEIGHT_DECAY_EMBED
-
- defaults = {}
- defaults["lr"] = cfg.SOLVER.BASE_LR
- defaults["weight_decay"] = cfg.SOLVER.WEIGHT_DECAY
-
- norm_module_types = (
- torch.nn.BatchNorm1d,
- torch.nn.BatchNorm2d,
- torch.nn.BatchNorm3d,
- torch.nn.SyncBatchNorm,
- # NaiveSyncBatchNorm inherits from BatchNorm2d
- torch.nn.GroupNorm,
- torch.nn.InstanceNorm1d,
- torch.nn.InstanceNorm2d,
- torch.nn.InstanceNorm3d,
- torch.nn.LayerNorm,
- torch.nn.LocalResponseNorm,
- )
-
- params: List[Dict[str, Any]] = []
- memo: Set[torch.nn.parameter.Parameter] = set()
- for module_name, module in model.named_modules():
- for module_param_name, value in module.named_parameters(recurse=False):
- if not value.requires_grad:
- continue
- # Avoid duplicating parameters
- if value in memo:
- continue
- memo.add(value)
-
- hyperparams = copy.copy(defaults)
- if "backbone" in module_name:
- hyperparams["lr"] = hyperparams["lr"] * cfg.SOLVER.BACKBONE_MULTIPLIER
- if (
- "relative_position_bias_table" in module_param_name
- or "absolute_pos_embed" in module_param_name
- ):
- print(module_param_name)
- hyperparams["weight_decay"] = 0.0
- if isinstance(module, norm_module_types):
- hyperparams["weight_decay"] = weight_decay_norm
- if isinstance(module, torch.nn.Embedding):
- hyperparams["weight_decay"] = weight_decay_embed
- params.append({"params": [value], **hyperparams})
-
- def maybe_add_full_model_gradient_clipping(optim):
- # detectron2 doesn't have full model gradient clipping now
- clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE
- enable = (
- cfg.SOLVER.CLIP_GRADIENTS.ENABLED
- and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model"
- and clip_norm_val > 0.0
- )
-
- class FullModelGradientClippingOptimizer(optim):
- def step(self, closure=None):
- all_params = itertools.chain(*[x["params"] for x in self.param_groups])
- torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val)
- super().step(closure=closure)
-
- return FullModelGradientClippingOptimizer if enable else optim
-
- optimizer_type = cfg.SOLVER.OPTIMIZER
- if optimizer_type == "SGD":
- optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)(
- params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM
- )
- elif optimizer_type == "ADAMW":
- optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)(
- params, cfg.SOLVER.BASE_LR
- )
- else:
- raise NotImplementedError(f"no optimizer type {optimizer_type}")
- if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model":
- optimizer = maybe_add_gradient_clipping(cfg, optimizer)
- return optimizer
-
- @classmethod
- def test(cls, cfg, model, evaluators=None):
- """
- Evaluate the given model. The given model is expected to already contain
- weights to evaluate.
- Args:
- cfg (CfgNode):
- model (nn.Module):
- evaluators (list[DatasetEvaluator] or None): if None, will call
- :meth:`build_evaluator`. Otherwise, must have the same length as
- ``cfg.DATASETS.TEST``.
- Returns:
- dict: a dict of result metrics
- """
- from torch.cuda.amp import autocast
- logger = logging.getLogger(__name__)
- if isinstance(evaluators, DatasetEvaluator):
- evaluators = [evaluators]
- if evaluators is not None:
- assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format(
- len(cfg.DATASETS.TEST), len(evaluators)
- )
-
- results = OrderedDict()
- for idx, dataset_name in enumerate(cfg.DATASETS.TEST):
- data_loader = cls.build_test_loader(cfg, dataset_name)
- # When evaluators are passed in as arguments,
- # implicitly assume that evaluators can be created before data_loader.
- if evaluators is not None:
- evaluator = evaluators[idx]
- else:
- try:
- evaluator = cls.build_evaluator(cfg, dataset_name)
- except NotImplementedError:
- logger.warn(
- "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, "
- "or implement its `build_evaluator` method."
- )
- results[dataset_name] = {}
- continue
- with autocast():
- results_i = inference_on_dataset(model, data_loader, evaluator)
- results[dataset_name] = results_i
- if comm.is_main_process():
- assert isinstance(
- results_i, dict
- ), "Evaluator must return a dict on the main process. Got {} instead.".format(
- results_i
- )
- logger.info("Evaluation results for {} in csv format:".format(dataset_name))
- print_csv_format(results_i)
-
- if len(results) == 1:
- results = list(results.values())[0]
- return results
-
-
-def setup(args):
- """
- Create configs and perform basic setups.
- """
- cfg = get_cfg()
- # for poly lr schedule
- add_deeplab_config(cfg)
- add_maskformer2_config(cfg)
- add_maskformer2_video_config(cfg)
- cfg.merge_from_file(args.config_file)
- cfg.merge_from_list(args.opts)
- cfg.freeze()
- default_setup(cfg, args)
- # Setup logger for "mask_former" module
- setup_logger(name="mask2former")
- setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name="mask2former_video")
- return cfg
-
-
-def main(args):
- cfg = setup(args)
-
- if args.eval_only:
- model = Trainer.build_model(cfg)
- DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
- cfg.MODEL.WEIGHTS, resume=args.resume
- )
- res = Trainer.test(cfg, model)
- if cfg.TEST.AUG.ENABLED:
- raise NotImplementedError
- if comm.is_main_process():
- verify_results(cfg, res)
- return res
-
- trainer = Trainer(cfg)
- trainer.resume_or_load(resume=args.resume)
- return trainer.train()
-
-
-if __name__ == "__main__":
- args = default_argument_parser().parse_args()
- print("Command Line Args:", args)
- launch(
- main,
- args.num_gpus,
- num_machines=args.num_machines,
- machine_rank=args.machine_rank,
- dist_url=args.dist_url,
- args=(args,),
- )
diff --git a/spaces/akhaliq/deeplab2/g3doc/setup/cityscapes_test_server_evaluation.md b/spaces/akhaliq/deeplab2/g3doc/setup/cityscapes_test_server_evaluation.md
deleted file mode 100644
index 07a5d3f2cac9cc642310ffeb881be77d74d522b2..0000000000000000000000000000000000000000
--- a/spaces/akhaliq/deeplab2/g3doc/setup/cityscapes_test_server_evaluation.md
+++ /dev/null
@@ -1,109 +0,0 @@
-# Test Server Evaluation on Cityscapes dataset
-
-This page walks through the steps required to convert DeepLab2 predictions for
-test server evaluation on [Cityscapes](https://www.cityscapes-dataset.com/).
-
-A high-level overview of the whole process:
-
-1. Save raw panoptic prediction in the two-channel format.
-
-2. Create images json file.
-
-3. Convert predictions in the two-channel format to the panoptic COCO format.
-
-4. Run local validation set evaluation or prepare test set evaluation.
-
-We also define some environmental variables for simplicity and convenience:
-
-`BASE_MODEL_DIRECTORY`: variables set in textproto file, which defines where all
-checkpoints and results are saved.
-
-`DATA_ROOT`: where the original Cityscapes dataset is located.
-
-`PATH_TO_SAVE`: where the converted results should be saved.
-
-`IMAGES_SPLIT`: *val* or *test* depending on the target split.
-
-## Save Raw Panoptic Prediction
-
-Save the raw panoptic predictions in the
-[two-channel panoptic format](https://arxiv.org/pdf/1801.00868.pdf) by ensuring
-the following fields are set properly in the textproto config file.
-
-```
-eval_dataset_options.decode_groundtruth_label = false
-evaluator_options.save_predictions = true
-evaluator_options.save_raw_predictions = true
-evaluator_options.convert_raw_to_eval_ids = true
-```
-
-Then run the model in evaluation modes (with `--mode=eval`), the results will be
-saved at
-
-*semantic segmentation*: ${BASE_MODEL_DIRECTORY}/vis/raw_semantic/\*.png
-
-*panoptic segmentation*: ${BASE_MODEL_DIRECTORY}/vis/raw_panoptic/\*.png
-
-## Create Images JSON
-
-Create images json file by running the following commands.
-
-```bash
-python deeplab2/utils/create_images_json_for_cityscapes.py \
- --image_dir=${DATA_ROOT}/leftImg8bit/${IMAGES_SPLIT} \
- --output_json_path=${PATH_TO_SAVE}/${IMAGES_SPLIT}_images.json \
- --only_basename \
- --include_image_type_suffix=false
-```
-
-## Convert the Prediction Format
-
-Convert prediction results saved in the
-[two-channel panoptic format](https://arxiv.org/pdf/1801.00868.pdf) to the
-panoptic COCO format.
-
-```bash
-python panopticapi/converters/2channels2panoptic_coco_format.py \
- --source_folder=${BASE_MODEL_DIRECTORY}/vis/raw_panoptic \
- --images_json_file=${PATH_TO_SAVE}/${IMAGES_SPLIT}_images.json\
- --categories_json_file=deeplab2/utils/panoptic_cityscapes_categories.json \
- --segmentations_folder=${PATH_TO_SAVE}/panoptic_cocoformat \
- --predictions_json_file=${PATH_TO_SAVE}/panoptic_cocoformat.json
-```
-
-## Run Local Evaluation Scripts (for *validation* set)
-
-Run the [official scripts](https://github.com/mcordts/cityscapesScripts) to
-evaluate validation set results.
-
-For *semantic segmentation*:
-
-```bash
-CITYSCAPES_RESULTS=${BASE_MODEL_DIRECTORY}/vis/raw_semantic/ \
-CITYSCAPES_DATASET=${DATA_ROOT} \
-CITYSCAPES_EXPORT_DIR=${PATH_TO_SAVE} \
-python cityscapesscripts/evaluation/evalPixelLevelSemanticLabeling.py
-```
-
-For *panoptic segmentation*:
-
-```bash
-python cityscapesscripts/evaluation/evalPanopticSemanticLabeling.py \
- --prediction-json-file=${PATH_TO_SAVE}/panoptic_cocoformat.json \
- --prediction-folder=${PATH_TO_SAVE}/panoptic_cocoformat \
- --gt-json-file=${DATA_ROOT}/gtFine/cityscapes_panoptic_val.json \
- --gt-folder=${DATA_ROOT}/gtFine/cityscapes_panoptic_val
-```
-
-Please note that our prediction fortmat does not support instance segmentation
-prediction format yet.
-
-## Prepare Submission Files (for *test* set)
-
-Run the following command to prepare a submission file for test server
-evaluation.
-
-```bash
-zip -r cityscapes_test_submission_semantic.zip ${BASE_MODEL_DIRECTORY}/vis/raw_semantic
-zip -r cityscapes_test_submission_panoptic.zip ${PATH_TO_SAVE}/panoptic_cocoformat ${PATH_TO_SAVE}/panoptic_cocoformat.json
-```
diff --git a/spaces/alamin655/websurfx/SECURITY.md b/spaces/alamin655/websurfx/SECURITY.md
deleted file mode 100644
index 3c2f80d1c5c5777165e1ba3919b9ab489427f0be..0000000000000000000000000000000000000000
--- a/spaces/alamin655/websurfx/SECURITY.md
+++ /dev/null
@@ -1,5 +0,0 @@
-# Security Policy
-
-We love responsible reports of (potential) security issues in Websurfx.
-
-Be sure to provide as much information as possible and if found also reproduction steps of the identified vulnerability. Also add the specific URL of the project as well as code you found the issue in to your report.
diff --git a/spaces/alexeikud/identidog/README.md b/spaces/alexeikud/identidog/README.md
deleted file mode 100644
index 108efae38fad88709bec55eccc405ee2ff662d36..0000000000000000000000000000000000000000
--- a/spaces/alexeikud/identidog/README.md
+++ /dev/null
@@ -1,15 +0,0 @@
----
-title: Identidog
-emoji: 🐶
-colorFrom: blue
-colorTo: green
-sdk: gradio
-sdk_version: 3.4.1
-python_version: 3.9.13
-app_file: app.py
-tags: ['dog identification', 'breed classifier', 'fastai', 'pytorch', 'opencv']
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/ali-ghamdan/deoldify/deoldify/_device.py b/spaces/ali-ghamdan/deoldify/deoldify/_device.py
deleted file mode 100644
index ed40ce131e3375a937c862fafa44e432f825f93b..0000000000000000000000000000000000000000
--- a/spaces/ali-ghamdan/deoldify/deoldify/_device.py
+++ /dev/null
@@ -1,30 +0,0 @@
-import os
-from enum import Enum
-from .device_id import DeviceId
-
-#NOTE: This must be called first before any torch imports in order to work properly!
-
-class DeviceException(Exception):
- pass
-
-class _Device:
- def __init__(self):
- self.set(DeviceId.CPU)
-
- def is_gpu(self):
- ''' Returns `True` if the current device is GPU, `False` otherwise. '''
- return self.current() is not DeviceId.CPU
-
- def current(self):
- return self._current_device
-
- def set(self, device:DeviceId):
- if device == DeviceId.CPU:
- os.environ['CUDA_VISIBLE_DEVICES']=''
- else:
- os.environ['CUDA_VISIBLE_DEVICES']=str(device.value)
- import torch
- torch.backends.cudnn.benchmark=False
-
- self._current_device = device
- return device
\ No newline at end of file
diff --git a/spaces/allinaigc/internet_GPT_venice/app.py b/spaces/allinaigc/internet_GPT_venice/app.py
deleted file mode 100644
index d4bd799174fa305b9db43d61277d4e9b47c36c11..0000000000000000000000000000000000000000
--- a/spaces/allinaigc/internet_GPT_venice/app.py
+++ /dev/null
@@ -1,160 +0,0 @@
-'''
-1. 增加的可联网的模式,可以增强对话的能力。
-1. 联网与不联网模式统一在一个函数里面,通过choice来选择。
-'''
-
-import openai
-import gradio as gr
-# from gradio import Radio
-import os
-import openai
-import requests
-from rich import print
-import pandas as pd
-import gc
-
-## 这里是huggingface的secret里面存放当前可以用的api key。
-openai.api_key = os.environ['my_api_key']
-os.environ["OPENAI_API_KEY"] = os.environ['my_api_key']
-
-### Bing Search
-bing_search_api_key = os.environ['bing_api_key']
-bing_search_endpoint = 'https://api.bing.microsoft.com/v7.0/search'
-
-def clear_conversation():
- return gr.update(value=None, visible=True), None, "",
-
-def search(query):
- # Construct a request
- # mkt = 'en-EN'
- mkt = 'zh-CN'
- params = {'q': query, 'mkt': mkt}
- headers = {'Ocp-Apim-Subscription-Key': bing_search_api_key}
-
- # Call the API
- try:
- response = requests.get(bing_search_endpoint, headers=headers, params=params)
- response.raise_for_status()
- json = response.json()
- return json["webPages"]["value"]
- # print("\nJSON Response:\n")
- # pprint(response.json())
-
- except Exception as e:
- raise e
-
-messages = [
- # {"role": "system", "content": "You are a helpful and kind AI Assistant."},
- {"role": "system", "content": "你是一个专业和友好的AI助手。"},]
-
-
-def chatbot(input, choice):
- global messages #! 通过制定messages可以在非增强模式中,记忆对话。
- history = []
- try:
- if input and choice!='联网增强模式':
- print('start the NO internet version of ChatGPT')
-
- # messages = [
- # # {"role": "system", "content": "You are a helpful and kind AI Assistant."},
- # {"role": "system", "content": "你是一个专业和友好的AI助手。"},] ## 这里可以开关memory,history功能。
-
- messages.append({"role": "user", "content": input})
- for resp in openai.ChatCompletion.create(model="gpt-3.5-turbo-16k", messages=messages, stream=True, max_tokens=8048,temperature=0.9):
- answer = str(resp['choices'][0]['delta'].get('content'))
- if answer != "None":
- history.append(answer)
- result = "".join(history).strip() #* working!
- yield result
-
- elif input and choice=='联网增强模式':
- print('start the internet version of ChatGPT')
-
- #NOTE: 重置messages,等于遗忘了之前的所有记录。
- messages = [
- # {"role": "system", "content": "You are a helpful and kind AI Assistant."},
- {"role": "system", "content": "你是一个专业和友好的AI助手。"},
- ]
-
- internet_search_result = search(input)
- search_prompt = [f"Source:\nTitle: {result['name']}\nURL: {result['url']}\nContent: {result['snippet']}" for result in internet_search_result]
- print('content:\n', search_prompt[0])
- # prompt = "Use these sources to answer the question:\n\n" + "\n\n".join(search_prompt) + "\n\nQuestion: " + input + "\n\nAnswer:"
- prompt = "Use these sources to answer the question:\n\n" + "\n\n".join(search_prompt[0:3]) + "\n\nQuestion: " + input + "\n\nAnswer:(注意:回答问题时请提示'以下答案基于互联网公开信息。')" ## 限制了只有3个搜索结果。
- messages.append({"role": "user", "content": prompt})
-
- ## no streaming version.
- # messages.append({"role": "user", "content": input})
- # print(input)
- # chat = openai.ChatCompletion.create(
- # model="gpt-3.5-turbo", messages=messages
- # )
- # reply = chat.choices[0].message.content
- # messages.append({"role": "assistant", "content": reply})
-
- ## streaming version. typewriter effect, word by word output.
- for resp in openai.ChatCompletion.create(model="gpt-3.5-turbo-16k", messages=messages, stream=True, max_tokens=8048, temperature=0.9):
-
- # sys.stdout.write(str(resp['choices'][0]['delta'].get('content'))) ## 这个方式仅在terminal中是working的。
- # sys.stdout.flush()
-
- #* 以下内容在Gradio中是working的。
- answer = str(resp['choices'][0]['delta'].get('content'))
- if answer != "None":
- history.append(answer)
- result = "".join(history).strip() #* working!
- yield result
-
- messages = [{"role": "system", "content": "你是一个专业和友好的AI助手。"},]
- messages.append({"role": "user", "content": ""})
-
-
- except Exception as e:
- print(e)
- messages = [{"role": "system", "content": "你是一个专业和友好的AI助手。"},]
- messages.append({"role": "user", "content": ""})
- yield e ## 在outputs的textbot里面显示错误。
-
-
- # return None
-
-css = "textarea {-webkit-text-fill-color:black; -webkit-opacity: 1;}"
-
-
-# auth_list = (
-
-# ('1234','1234'),
-# ('yao190476','0476'),
-# ('bluedongting','ting'),
-# ('mio','mio'),
-# ('ainachen','chen'),
-# ('wenshan','shan'),
-# )
-
-# user_csv = pd.read_csv('auth_list.csv')
-user_csv = pd.read_excel('auth_list.xlsx',dtype=str)
-auth_list = [(x, y) for (x, y) in user_csv[['username', 'password']].values]
-
-## 如果要优化显示效果,参见: https://gradio.app/theming-guide/
-try:
- inputs = [gr.inputs.Textbox(lines=5, label="请输入你的问题/任务").style(show_copy_button=True), gr.Radio(['默认ChatGPT模式', '联网增强模式'], value='默认ChatGPT模式', label="ChatGPT运行模式")] #! working. 如果有多个input,那么需要把这些input放在一个list里面,然后传给inputs。注意这里如果更改了选项的文字,需要到chatbot中更改对应的文字。
- outputs = gr.Textbox(lines=11, label="ChatGPT的回答").style(show_copy_button=True)
- interface = gr.Interface(
- # fn=chatbot,
- fn=chatbot,
- inputs=inputs,
- outputs=outputs,
- title="极速版ChatGPT",
- description="重要通知: 为了提供更为强大的人工智能服务,本产品已经正式完成内部测试与站点升级。欢迎大家通过手机或者电脑体验新站点(用户名与密码均不变):https://allinaigcnlp.com/ 新站点功能包括:对话更加流畅、增加多个贴近中文用户使用习惯的优质提示词、历史对话记录、一键开启单轮或多轮对话、完整对话记录截屏、自定义预设角色提示词等。",
-
- theme=gr.themes.Soft(),
- css=css,
- )
- interface.queue(concurrency_count=100)
- interface.launch(height=500,auth=auth_list,auth_message="欢迎使用ChatGPT")
-
-except Exception as e:
- print(e)
- messages = [{"role": "system", "content": "你是一个专业和友好的AI助手。"},]
- messages.append({"role": "user", "content": ""})
-
diff --git a/spaces/amazon/README/README.md b/spaces/amazon/README/README.md
deleted file mode 100644
index f257f75cfad5e975c14e48165f29fb07d03088c3..0000000000000000000000000000000000000000
--- a/spaces/amazon/README/README.md
+++ /dev/null
@@ -1,91 +0,0 @@
----
-title: README
-emoji: 🐠
-colorFrom: pink
-colorTo: purple
-sdk: static
-pinned: false
----
-
-
-
- Hugging Face is working with Amazon Web Services to make it easier than
- ever for startups and enterprises to train and deploy Hugging Face models in Amazon SageMaker.
-
- To train Hugging Face models in Amazon SageMaker, you can use the
- Hugging Face Deep Learning Containers (DLCs) and the Hugging Face
- support in the SageMaker Python SDK.
-
-
- The DLCs are fully integrated with the SageMaker distributed training
- libraries to train models more quickly using the latest generation of
- accelerated computing instances available on Amazon EC2. With the
- SageMaker Python SDK, you can start training with just a single line of
- code, enabling your teams to move from idea to production more quickly.
-
-
- To deploy Hugging Face models in Amazon SageMaker, you can use the
- Hugging Face Deep Learning Containers with the new Hugging Face
- Inference Toolkit.
-
-
- With the new Hugging Face Inference DLCs, deploy your trained models for
- inference with just one more line of code, or select any of the 10,000+
- models publicly available on the 🤗 Hub, and deploy them with Amazon
- SageMaker, to easily create production-ready endpoints that scale
- seamlessly, with built-in monitoring and enterprise-level security.
-
It took me a very long time to find the right career path for me, jumping around between jobs and career industries. Once I found data science, I had absolutely no doubt that it was the right path for me. Making the choice to pursue this transition, however, was not easy. I struggled a lot to understand what the requirements were, what the field looked like, and how I could use my experience in healthcare and my degree in biology to do data science. Now that I have been through my journey (zigzagging my way to a career as a data scientist in biotech), I would love to use that experience to help other data scientists coming from a non-data-science background find their way. Additionally, I not only felt like the day-to-day work suits my strengths, but it also has also been such a captivating field of work for me. The largest reason for this is because I have so many academic interests (likely driven by my jumping careers) and I love that data science allows one to work in any field that interests them, where there is data, or a space to create it. That said, the burden of choice can also be difficult for some people to navigate. I would love to draw on my experience and past choices to help guide younger/aspiring data scientists to find the path that's right for them.
Interview
How did you hear about SM?
Just googling. done some mentorship in the past through my alma matter
always thinking about mentorship
Mentorship experience?
through my alma matter
annually
new program
folks signup and they do some behind the scenes matching
matched with someone doing a similar transition to him (healthcare academia to DS)
some years of tutoring
plan, organize, etc
What are beginners lacking?
The industry is booming and competition is fierce
find ways to stand out, and draw on their past experiences/skills (e.g. from academia)
Requirements can ridiculous
build the confidence to apply anyway
help folks understand the nuance of these things
Confidence
And how can you add value as a mentor?
Wide variety of experience (in tech and outside)
transitioned from niche academia
music industry (startup)
current role (DexCon) experienced mid-level and growth and growing pains
Has seen a lot of career experiences. Wore a lot of hats
done a lot of DE, backend development
lots of perspective
-
- Questions about SM?
Is there any structure?
What is the average mentee?
Where are they in their career journey?
Can I see the results of the mentee's assessment? like what their technical limitations are?
-
-
-
-
-
-
\ No newline at end of file
diff --git a/spaces/attention-refocusing/Attention-refocusing/gligen/ldm/modules/diffusionmodules/hed_grounding_net.py b/spaces/attention-refocusing/Attention-refocusing/gligen/ldm/modules/diffusionmodules/hed_grounding_net.py
deleted file mode 100644
index e566bb35c914abd19e51c8661d54a8702c3d55df..0000000000000000000000000000000000000000
--- a/spaces/attention-refocusing/Attention-refocusing/gligen/ldm/modules/diffusionmodules/hed_grounding_net.py
+++ /dev/null
@@ -1,65 +0,0 @@
-import torch
-import torch.nn as nn
-from ldm.modules.attention import BasicTransformerBlock
-from ldm.modules.diffusionmodules.util import checkpoint, FourierEmbedder
-import torch.nn.functional as F
-from ..attention import SelfAttention, FeedForward
-from .convnext import convnext_tiny
-
-
-
-
-class PositionNet(nn.Module):
- def __init__(self, resize_input=448, out_dim=768):
- super().__init__()
- self.resize_input = resize_input
- self.down_factor = 32 # determined by the res50 backbone
- self.out_dim = out_dim
- assert self.resize_input % self.down_factor == 0
-
- self.convnext_tiny_backbone = convnext_tiny(pretrained=True)
-
- self.num_tokens = (self.resize_input // self.down_factor) ** 2
-
- convnext_feature_dim = 768
- self.pos_embedding = nn.Parameter(torch.empty(1, self.num_tokens, convnext_feature_dim).normal_(std=0.02)) # from BERT
-
- self.linears = nn.Sequential(
- nn.Linear( convnext_feature_dim, 512),
- nn.SiLU(),
- nn.Linear( 512, 512),
- nn.SiLU(),
- nn.Linear(512, out_dim),
- )
-
- self.null_feature = torch.nn.Parameter(torch.zeros([convnext_feature_dim]))
-
-
- def forward(self, hed_edge, mask):
- B = hed_edge.shape[0]
-
- # token from edge map
- hed_edge = torch.nn.functional.interpolate(hed_edge, self.resize_input)
- hed_edge_feature = self.convnext_tiny_backbone(hed_edge)
- objs = hed_edge_feature.reshape(B, -1, self.num_tokens)
- objs = objs.permute(0, 2, 1) # N*Num_tokens*dim
-
- # expand null token
- null_objs = self.null_feature.view(1,1,-1)
- null_objs = null_objs.repeat(B,self.num_tokens,1)
-
- # mask replacing
- mask = mask.view(-1,1,1)
- objs = objs*mask + null_objs*(1-mask)
-
- # add pos
- objs = objs + self.pos_embedding
-
- # fuse them
- objs = self.linears(objs)
-
- assert objs.shape == torch.Size([B,self.num_tokens,self.out_dim])
- return objs
-
-
-
diff --git a/spaces/autonomousvision/projected_gan/app.py b/spaces/autonomousvision/projected_gan/app.py
deleted file mode 100644
index 9a118a233d2097395f1cd6f47daaec7fd82038fe..0000000000000000000000000000000000000000
--- a/spaces/autonomousvision/projected_gan/app.py
+++ /dev/null
@@ -1,129 +0,0 @@
-import sys
-import os
-import gradio as gr
-from PIL import Image
-
-os.system("git clone https://github.com/autonomousvision/projected_gan.git")
-
-sys.path.append("projected_gan")
-
-
-"""Generate images using pretrained network pickle."""
-
-import re
-from typing import List, Optional, Tuple, Union
-
-import click
-import dnnlib
-import numpy as np
-import PIL.Image
-import torch
-
-import legacy
-
-from huggingface_hub import hf_hub_url
-
-#----------------------------------------------------------------------------
-
-def parse_range(s: Union[str, List]) -> List[int]:
- '''Parse a comma separated list of numbers or ranges and return a list of ints.
- Example: '1,2,5-10' returns [1, 2, 5, 6, 7]
- '''
- if isinstance(s, list): return s
- ranges = []
- range_re = re.compile(r'^(\d+)-(\d+)$')
- for p in s.split(','):
- m = range_re.match(p)
- if m:
- ranges.extend(range(int(m.group(1)), int(m.group(2))+1))
- else:
- ranges.append(int(p))
- return ranges
-
-#----------------------------------------------------------------------------
-
-def parse_vec2(s: Union[str, Tuple[float, float]]) -> Tuple[float, float]:
- '''Parse a floating point 2-vector of syntax 'a,b'.
- Example:
- '0,1' returns (0,1)
- '''
- if isinstance(s, tuple): return s
- parts = s.split(',')
- if len(parts) == 2:
- return (float(parts[0]), float(parts[1]))
- raise ValueError(f'cannot parse 2-vector {s}')
-
-#----------------------------------------------------------------------------
-
-def make_transform(translate: Tuple[float,float], angle: float):
- m = np.eye(3)
- s = np.sin(angle/360.0*np.pi*2)
- c = np.cos(angle/360.0*np.pi*2)
- m[0][0] = c
- m[0][1] = s
- m[0][2] = translate[0]
- m[1][0] = -s
- m[1][1] = c
- m[1][2] = translate[1]
- return m
-
-#----------------------------------------------------------------------------
-
-device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
-
-config_file_url = hf_hub_url("autonomousvision/Projected_GAN_Pokemon", filename="pokemon.pkl")
-with dnnlib.util.open_url(config_file_url) as f:
- G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore
-
-def generate_images(seeds):
- """Generate images using pretrained network pickle.
- Examples:
- \b
- # Generate an image using pre-trained AFHQv2 model ("Ours" in Figure 1, left).
- python gen_images.py --outdir=out --trunc=1 --seeds=2 \\
- --network=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-afhqv2-512x512.pkl
- \b
- # Generate uncurated images with truncation using the MetFaces-U dataset
- python gen_images.py --outdir=out --trunc=0.7 --seeds=600-605 \\
- --network=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-t-metfacesu-1024x1024.pkl
- """
-
-
-
-
- # Labels.
- label = torch.zeros([1, G.c_dim], device=device)
-
-
- # Generate images.
- for seed_idx, seed in enumerate(seeds):
- print('Generating image for seed %d (%d/%d) ...' % (seed, seed_idx, len(seeds)))
- z = torch.from_numpy(np.random.RandomState(seed).randn(1, G.z_dim)).to(device).float()
-
- # Construct an inverse rotation/translation matrix and pass to the generator. The
- # generator expects this matrix as an inverse to avoid potentially failing numerical
- # operations in the network.
- if hasattr(G.synthesis, 'input'):
- m = make_transform('0,0', 0)
- m = np.linalg.inv(m)
- G.synthesis.input.transform.copy_(torch.from_numpy(m))
-
- img = G(z, label, truncation_psi=1, noise_mode='const')
- img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
- pilimg = PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB')
- return pilimg
-
-
-def inference(seedin):
- listseed = [int(seedin)]
- output = generate_images(listseed)
- return output
-
-title = "Projected GAN"
-description = "Gradio demo for Projected GANs Converge Faster, Pokemon. To use it, add seed, or click one of the examples to load them. Read more at the links below."
-
-article = "
"
-
-gr.Interface(inference,gr.inputs.Slider(label="Seed",minimum=0, maximum=5000, step=1, default=0),"pil",title=title,description=description,article=article, examples=[
- [0],[1],[10],[20],[30],[42],[50],[60],[77],[102]
- ]).launch(enable_queue=True,cache_examples=True)
\ No newline at end of file
diff --git a/spaces/awacke1/Engineering-or-Magic-Q-A-IO/app.py b/spaces/awacke1/Engineering-or-Magic-Q-A-IO/app.py
deleted file mode 100644
index d8766d62191a54cde00658dcb2f31c3d2a93a242..0000000000000000000000000000000000000000
--- a/spaces/awacke1/Engineering-or-Magic-Q-A-IO/app.py
+++ /dev/null
@@ -1,219 +0,0 @@
-import streamlit as st
-import random
-import csv
-import os
-
-# Define the player card attributes
-player_cards = {
- "Player 1": {
- "sketch": "👩",
- "character": "Nurse",
- "player_board": "🏥",
- "action_dice": "🎲",
- "health_tokens": "❤️",
- "coin": "💰",
- "battle_tokens": "⚔️",
- "score": 0,
- "trophy": ""
- },
- "Player 2": {
- "sketch": "👨",
- "character": "Doctor",
- "player_board": "🏥",
- "action_dice": "🎲",
- "health_tokens": "❤️",
- "coin": "💰",
- "battle_tokens": "⚔️",
- "score": 0,
- "trophy": ""
- }
-}
-
-# Define the health problems
-health_problems = ["Flu", "COVID-19", "Diabetes", "Heart Disease", "Cancer"]
-
-# Define the game rules
-attack_range = (1, 20)
-defense_range = (1, 10)
-
-# Define the score, health tokens, and coin emoji sets
-score_emojis = ["🔥", "💥", "⚡️", "👊", "💪", "🏋️", "👑", "🎉", "🎊", "🎖️", "🏅", "🥇", "🥈", "🥉"]
-health_token_emojis = ["❤️", "💖", "💘", "💝", "💞", "💓", "💗", "💕", "💟", "❣️", "🩸", "🧡", "💛", "💚", "💙", "💜"]
-coin_emojis = ["💰", "💸", "💳", "🤑", "💎", "💷", "💵", "💲", "🏦", "💹", "📈", "📉", "💹", "🤑", "💰", "💵"]
-
-# Create a function to play a single round of the game
-def play_round(player_card, health_problem):
- st.write(f"{player_card['sketch']} {player_card['character']} attacks {health_problem} with {player_card['action_dice']}...")
- attack_score = random.randint(*attack_range)
- defense_score = random.randint(*defense_range)
- health_ferocity = random.randint(*attack_range)
- health_resistance = random.randint(*defense_range)
- if attack_score > health_resistance:
- player_card["score"] += 1
- score_emoji = random.choice(score_emojis)
- player_card["score_emoji"] = player_card.get("score_emoji", "") + score_emoji
- st.write(f"{player_card['sketch']} {player_card['character']} deals {attack_score - health_resistance} damage to {health_problem}! {score_emoji}")
- player_card["health_tokens"] += 1
- health_token_emoji = random.choice(health_token_emojis)
- player_card["health_token_emoji"] = player_card.get("health_token_emoji", "") + health_token_emoji
- #player_card["coin"] += 10
-
- #player_card["coin"] += 10
- coin_emoji = random.choice(coin_emojis)
- player_card["coin_emojis"] = player_card.get("coin_emojis", "") + coin_emoji
-
- coin_emoji = random.choice(coin_emojis)
- player_card["coin_emoji"] = player_card.get("coin_emoji", "") + coin_emoji
- else:
- st.write(f"{player_card['sketch']} {player_card['character']} misses the attack!")
- if health_ferocity > defense_score:
- player_card["health_tokens"] -= 1
- health_token_emoji = random.choice(health_token_emojis)
- player_card["health_token_emoji"] = player_card.get("health_token_emoji", "") + health_token_emoji
- st.write(f"{health_problem} deals {health_ferocity - defense_score} damage to {player_card['sketch']} {player_card['character']}! {health_token_emoji}")
- else:
- st.write(f"{health_problem} fails to attack!")
-
-
-# Create a function to play multiple rounds of the game
-def play_game(num_games):
- # Initialize the game state
- for player in player_cards:
- player_cards[player]["health_tokens"] = 20
- health_problem_scores = {problem: 0 for problem in health_problems}
- for i in range(num_games):
- # Randomly select a player and health problem
- player = random.choice(list(player_cards.keys()))
- health_problem = random.choice(health_problems)
- # Play the round
- play_round(player_cards[player], health_problem)
- # Update the scores
- health_problem_scores[health_problem] += 1
- # Check for a player win
- for player, attributes in player_cards.items():
- if attributes["health_tokens"] <= 0:
- st.write(f"{attributes['sketch']} {attributes['character']} has lost the game!")
- else:
- if attributes["score"] >= num_games / 2:
- st.write(f"{attributes['sketch']} {attributes['character']} has won the game!")
- # Add a trophy emoji to the player card on the sidebar
- if attributes["trophy"] == "":
- attributes["trophy"] = "🏆"
- if st.session_state.get(player + "_win", False):
- if attributes["trophy"] == "🏆":
- attributes["trophy"] = random.choice(["🥇", "🥈", "🥉"])
- st.sidebar.write(f"{attributes['sketch']} {attributes['character']} {attributes['trophy']}")
- # Save the game state to a CSV file
- with open("game_state.csv", "a", newline="") as f:
- writer = csv.writer(f)
- if os.stat("game_state.csv").st_size == 0:
- writer.writerow(["Player", "Sketch", "Character", "Player Board", "Action Dice", "Health Tokens", "Coin", "Battle Tokens", "Score", "Trophy"])
- for player, attributes in player_cards.items():
- row = [player, attributes["sketch"], attributes["character"], attributes["player_board"], attributes["action_dice"], attributes["health_tokens"], attributes["coin"], attributes["battle_tokens"], attributes["score"], attributes["trophy"]]
- writer.writerow(row)
- for problem in health_problems:
- row = [problem, health_problem_scores[problem]]
- writer.writerow(row)
- # Display the game results
- st.write("# Game Results")
- for player, attributes in player_cards.items():
- st.write(f"{attributes['sketch']} {attributes['character']}: {attributes['score']} successful attacks, {attributes['health_tokens']} health tokens, {attributes['coin']} coins")
- for problem, score in health_problem_scores.items():
- st.write(f"{problem}: {score} defeats")
- # Display a button to download the game state CSV file
- if os.path.exists("game_state.csv"):
- st.write("# Download Game State")
- files = [f for f in os.listdir(".") if os.path.isfile(f) and f.endswith(".csv")]
- if "game_state.csv" in files:
- files.remove("game_state.csv")
- if len(files) > 0:
- file_to_delete = st.selectbox("Select a file to delete", files)
- if st.button("Delete File"):
- os.remove(file_to_delete)
- if st.button("Download Game State"):
- with open("game_state.csv", "r") as f:
- csv_data = f.read()
- st.download_button("game_state.csv", csv_data, file_name="game_state.csv", mime="text/csv")
- st.write("*Note: Downloaded files are saved in your browser's default download location*")
-
-# Define the Streamlit app
-def app():
- st.set_page_config(page_title="Health Care Game", page_icon="🏥", layout="wide")
- st.title("Health Care Game")
- st.sidebar.write("# Game Settings")
- num_games = st.sidebar.slider("Number of games to play", 1, 100, 10)
- st.sidebar.write("# Player Cards")
- for player, attributes in player_cards.items():
- st.sidebar.write(f"## {player}")
- st.sidebar.write(f"Sketch: {attributes['sketch']}")
- st.sidebar.write(f"Character: {attributes['character']}")
- st.sidebar.write(f"Player Board: {attributes['player_board']}")
- st.sidebar.write(f"Action Dice: {attributes['action_dice']}")
- st.sidebar.write(f"Health Tokens: {attributes['health_tokens']}")
- st.sidebar.write(f"Coin: {attributes['coin']}")
- st.sidebar.write(f"Battle Tokens: {attributes['battle_tokens']}")
- st.sidebar.write(f"Score: {attributes['score']}")
- # Display a button to start the game
- if st.sidebar.button("Start Game"):
- # Play the game
- play_game(num_games)
-
-
-def showPressRelease():
- st.markdown("""
-
-title: 🤖🧠AI-RPG-Self-Play-RLML-Health-Battler-Game🏆🎁🎮
-emoji: 🏋️♀️💪🏥
-# AI RPG Self-Play RL ML Health Battler Game Press Release
-## Introduction
-🎉🎮🤖 Attention all gamers and health enthusiasts! The ultimate weapon to battle health problems has arrived - the AI RPG Self-Play RL ML Health Battler Game! 🤖🎮🎉
-## Gamified Health Battles
-- 🏋️♀️💪🏥 Sick of boring workouts and mundane health routines? Get ready to take on health problems like never before with our gamified approach. 🎉🕹️
-## Advanced AI Technology
-- 🤖🧠🔥 The AI technology behind our game is so advanced, you'll think you're battling a real-life disease! Let the personalized gameplay experience adapt to your style and keep you engaged for hours on end. 💻👨🔬
-## Healthy Competition
-- 🏆🎁🎮 Ready for some healthy competition? Compete against friends and other players around the world, earning rewards and achievements with our self-play reinforcement learning algorithms. 🌎🏆
-## Availability
-- 👨💻📲 The AI RPG Self-Play RL ML Health Battler Game is now available for public open source use on all platforms, including iOS and Android devices, via the world's largest ML platform Huggingface! Download now and start fighting for your health. 📲💥
-## Conclusion
-- Don't let health problems get the best of you - join the fight with our AI RPG Self-Play RL ML Health Battler Game! 🎮💪🩺
-
-Links to More About Health Games!
-1. Health Game Terminology: https://en.wikipedia.org/wiki/Health_(game_terminology)
-2. Games for Health: https://en.wikipedia.org/wiki/Games_for_Health
-3. Wii Fit: https://en.wikipedia.org/wiki/Wii_Fit#Development
-4. Cross Fit Games: https://en.wikipedia.org/wiki/CrossFit_Games
-5. Digital Media and Mental Health: https://en.wikipedia.org/wiki/Digital_media_use_and_mental_health
-6. Use of Technology for Mental Health: https://en.wikipedia.org/wiki/Use_of_technology_in_treatment_of_mental_disorders
-
- """)
-
-# Define the Streamlit app
-def app():
- st.set_page_config(page_title="Health Care Game", page_icon="🏥", layout="wide")
- st.title("Health Care Game")
- st.sidebar.write("# Game Settings")
- num_games = st.sidebar.slider("Number of games to play", 1, 100, 10)
- st.sidebar.write("# Player Cards")
- for player, attributes in player_cards.items():
- st.sidebar.write(f"## {player}")
- st.sidebar.write(f"Sketch: {attributes['sketch']}")
- st.sidebar.write(f"Character: {attributes['character']}")
- st.sidebar.write(f"Player Board: {attributes['player_board']}")
- st.sidebar.write(f"Action Dice: {attributes['action_dice']}")
- st.sidebar.write(f"Health Tokens: {attributes['health_tokens']}")
- st.sidebar.write(f"Coin: {attributes['coin']}")
- st.sidebar.write(f"Battle Tokens: {attributes['battle_tokens']}")
- st.sidebar.write("# Health Problems")
- for problem in health_problems:
- st.sidebar.write(f"- {problem}")
- # Start the game when the user clicks the "Play Game" button
- if st.button("Play Game"):
- play_game(num_games)
- showPressRelease()
-
-
-
-
-if __name__ == "__main__":
- app()
diff --git a/spaces/awacke1/GPU-RTX-Nvidia-Nsight-Starter-AI-Kit/app.py b/spaces/awacke1/GPU-RTX-Nvidia-Nsight-Starter-AI-Kit/app.py
deleted file mode 100644
index a473811f2461922982025bdab351918d13097320..0000000000000000000000000000000000000000
--- a/spaces/awacke1/GPU-RTX-Nvidia-Nsight-Starter-AI-Kit/app.py
+++ /dev/null
@@ -1,63 +0,0 @@
-import streamlit as st
-
-st.write("# URLs for GPU RTX Nvidia 3070 Nsight pages")
-
-urls = {
- "GPUs Ampere architecture" : "https://en.wikipedia.org/wiki/GeForce_30_series",
- "Ray Tracing Interactive": "https://en.wikipedia.org/wiki/Ray_tracing_(graphics)#Interactive_ray_tracing",
- "GeForce RTX 30 Series": "https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/",
-}
-
-for name, url in urls.items():
- st.write(f"- [{name}]({url})")
-
-
-import streamlit as st
-import tensorflow as tf
-from tensorflow.keras.preprocessing import image
-import numpy as np
-import pycuda.autoinit
-import pycuda.driver as cuda
-import tensorrt as trt
-import nvtabular as nvt
-import nvidia.dali as dali
-import nvidia.dali.ops as ops
-import nvidia.dali.types as types
-import deepstream as ds
-
-# Set up the Streamlit app
-st.set_page_config(page_title="Deep Learning Libraries Demo")
-
-# NVIDIA cuDNN
-st.header("NVIDIA cuDNN")
-st.write("cuDNN is a GPU-accelerated library of primitives for deep neural networks.")
-
-# NVIDIA TensorRT
-st.header("NVIDIA TensorRT")
-st.write("TensorRT is a high-performance deep learning inference optimizer and runtime for production deployment.")
-
-# NVIDIA Riva
-st.header("NVIDIA Riva")
-st.write("Riva is a platform for developing engaging and contextual AI-powered conversation apps.")
-
-# NVIDIA DeepStream SDK
-st.header("NVIDIA DeepStream SDK")
-st.write("DeepStream is a real-time streaming analytics toolkit for AI-based video understanding and multi-sensor processing.")
-
-# NVIDIA DALI
-st.header("NVIDIA DALI")
-st.write("DALI is a portable, open-source library for decoding and augmenting images and videos to accelerate deep learning applications.")
-
-# Load an image and run it through a pre-trained model
-st.header("Example: Image Classification with TensorFlow")
-model = tf.keras.applications.MobileNetV2()
-img_path = "example.jpg"
-img = image.load_img(img_path, target_size=(224, 224))
-x = image.img_to_array(img)
-x = np.expand_dims(x, axis=0)
-x = tf.keras.applications.mobilenet_v2.preprocess_input(x)
-preds = model.predict(x)
-st.write(f"Predicted class: {tf.keras.applications.mobilenet_v2.decode_predictions(preds, top=1)[0][0][1]}")
-
-# Clean up
-del model, img, x, preds
diff --git a/spaces/awacke1/QuoteBotForQuotesMeditation/app.py b/spaces/awacke1/QuoteBotForQuotesMeditation/app.py
deleted file mode 100644
index 76b5cd19bdd3ed892d4e1d6ac36189835940f42c..0000000000000000000000000000000000000000
--- a/spaces/awacke1/QuoteBotForQuotesMeditation/app.py
+++ /dev/null
@@ -1,139 +0,0 @@
-import streamlit as st
-import time
-
-# Your list of quotes as a dictionary
-quotes = [
- {"Number": 1, "Quote Topic": "Stages of Life 🌱", "Quote": "Every age unfolds a new lesson. Life's chapters evolve, each teaching us anew."},
- {"Number": 2, "Quote Topic": "Stages of Life 🌱", "Quote": "From infancy to twilight, our journey is painted in growth. Every stage shines with its own wisdom."},
- {"Number": 3, "Quote Topic": "Identity 🎭", "Quote": "We piece together our identity with experiences. In the vast cosmos, our ever-changing signature is our identity."},
- {"Number": 4, "Quote Topic": "Identity 🎭", "Quote": "We aren't born with a defined self. Our identity is an art, crafted through introspection and encounters."},
- {"Number": 5, "Quote Topic": "Enlightenment 💡", "Quote": "Every step towards enlightenment is a dive within. It's an ongoing journey of self and universe."},
- {"Number": 6, "Quote Topic": "Enlightenment 💡", "Quote": "True insight isn't just about knowledge. It's the harmony of mind, heart, and soul."},
- {"Number": 7, "Quote Topic": "Adaptive Resonance Theory 🧠", "Quote": "Our cognition is like a river, ever-flowing and adapting. Every experience shapes its course, forming new resonances."},
- {"Number": 8, "Quote Topic": "Adaptive Resonance Theory 🧠", "Quote": "The brain's capacity to learn is profound. It finds its rhythm in adaptive resonances."},
- {"Number": 9, "Quote Topic": "Panpsychism 🌌", "Quote": "The universe might hum with consciousness in every atom. Every speck could be part of this grand song."},
- {"Number": 10, "Quote Topic": "Panpsychism 🌌", "Quote": "What if consciousness isn't exclusive to us? The universe's shared melody might be sung by all."},
- {"Number": 11, "Quote Topic": "How to Think 🤔", "Quote": "Thinking isn't about arriving, it's about exploring. Dive deep, question, and embrace the universe within."},
- {"Number": 12, "Quote Topic": "How to Think 🤔", "Quote": "To think profoundly is to touch the cosmos. It's about connecting with the vastness and intricacies it holds."},
- {"Number": 13, "Quote Topic": "Plants Communication 🌿", "Quote": "In every leaf and root, plants tell tales of life. Their silent growth is a language we're just beginning to decipher."},
- {"Number": 14, "Quote Topic": "Plants Communication 🌿", "Quote": "Nature's every rustle is a whispered secret. It's a testament to the intricate web of communication."},
- {"Number": 15, "Quote Topic": "Fame 🌟", "Quote": "True impact outlives fleeting fame. What endures is the legacy we craft, not the applause we receive."},
- {"Number": 16, "Quote Topic": "Fame 🌟", "Quote": "Fame might shine bright, but it's transient. Genuine influence is the silent footprint we leave in time."},
- {"Number": 17, "Quote Topic": "Happiness 😊", "Quote": "True happiness is an inner light that shines brightest in shared moments."},
- {"Number": 18, "Quote Topic": "Happiness 😊", "Quote": "Life's riches aren't material but the joyous moments and heartfelt memories we gather."},
- {"Number": 19, "Quote Topic": "Exercise 🏃", "Quote": "Exercise is the symphony of the body, resonating with health and vitality."},
- {"Number": 20, "Quote Topic": "Exercise 🏃", "Quote": "In movement, we find balance, strength, and a celebration of life's potential."},
- {"Number": 21, "Quote Topic": "Good Habits 🔄", "Quote": "Good habits are the bricks that build the mansion of success."},
- {"Number": 22, "Quote Topic": "Good Habits 🔄", "Quote": "Consistency in habits crafts our destiny, one action at a time."},
- {"Number": 23, "Quote Topic": "Discipline 🕰️", "Quote": "Discipline is the bridge between dreams and their realization."},
- {"Number": 24, "Quote Topic": "Discipline 🕰️", "Quote": "Through discipline, chaos transforms into order, and aspirations into achievements."},
- {"Number": 25, "Quote Topic": "Stamina 🚀", "Quote": "Stamina isn't just enduring but thriving amidst challenges."},
- {"Number": 26, "Quote Topic": "Stamina 🚀", "Quote": "It's stamina that turns obstacles into stepping stones, fueling our journey forward."},
- {"Number": 27, "Quote Topic": "Artificial General Intelligence 🤯", "Quote": "AGI is not just about mimicking humans but understanding the core of intelligence itself."},
- {"Number": 28, "Quote Topic": "Artificial General Intelligence 🤯", "Quote": "The pursuit of AGI is a testament to humanity's quest to transcend its own boundaries."},
- {"Number": 29, "Quote Topic": "AI Pipelines 🛠️", "Quote": "AI pipelines are the arteries of intelligent systems, directing the flow of knowledge."},
- {"Number": 30, "Quote Topic": "AI Pipelines 🛠️", "Quote": "In well-crafted pipelines, AI finds its rhythm, efficiency, and transformative power."},
- {"Number": 31, "Quote Topic": "Genius 🌟", "Quote": "Genius isn't just raw talent; it's the alchemy of persistence, passion, and perspective."},
- {"Number": 32, "Quote Topic": "Genius 🌟", "Quote": "Every spark of genius has been nurtured by curiosity and an insatiable thirst for knowledge."},
- {"Number": 33, "Quote Topic": "Our Brains 🧠", "Quote": "Our brain is the universe's masterpiece, a nexus of thoughts, dreams, and memories."},
- {"Number": 34, "Quote Topic": "Our Brains 🧠", "Quote": "In every neuron, our brain holds the potential of countless possibilities and imaginations."},
- {"Number": 35, "Quote Topic": "Our Brains 🧠", "Quote": "The intricacies of our brain reflect the cosmos: vast, complex, and beautifully mysterious."},
- {"Number": 36, "Quote Topic": "Our Brains 🧠", "Quote": "Understanding our brain is the key to unlocking the enigmas of consciousness, behavior, and potential."},
- {"Number": 37, "Quote Topic": "Mindfulness 🌼", "Quote": "Mindfulness is the anchor that grounds us in the present, amidst life's tumultuous seas."},
- {"Number": 38, "Quote Topic": "Mindfulness 🌼", "Quote": "In the act of being mindful, we embrace life's symphony, note by note, moment by moment."},
- {"Number": 39, "Quote Topic": "Resilience 💪", "Quote": "Resilience is the art of bouncing back, turning wounds into wisdom and setbacks into comebacks."},
- {"Number": 40, "Quote Topic": "Resilience 💪", "Quote": "Life will test us, but with resilience, we rise, stronger and more enlightened."},
- {"Number": 41, "Quote Topic": "Innovation 💡", "Quote": "Innovation is the heartbeat of progress, pushing boundaries and redefining possibilities."},
- {"Number": 42, "Quote Topic": "Innovation 💡", "Quote": "Every breakthrough, every invention, is a testament to humanity's relentless spirit of innovation."},
- {"Number": 43, "Quote Topic": "Empathy ❤️", "Quote": "Empathy is the bridge between souls, transcending words and touching hearts."},
- {"Number": 44, "Quote Topic": "Empathy ❤️", "Quote": "Through empathy, we see the world through another's eyes, fostering understanding and unity."},
- {"Number": 45, "Quote Topic": "Happiness 😊", "Quote": "True happiness is an inner light that shines brightest in shared moments."},
- {"Number": 46, "Quote Topic": "Happiness 😊", "Quote": "Life's riches aren't material but the joyous moments and heartfelt memories we gather."},
- {"Number": 47, "Quote Topic": "Exercise 🏃", "Quote": "Exercise is the symphony of the body, resonating with health and vitality."},
- {"Number": 48, "Quote Topic": "Exercise 🏃", "Quote": "In movement, we find balance, strength, and a celebration of life's potential."},
- {"Number": 49, "Quote Topic": "Good Habits 🔄", "Quote": "Good habits are the bricks that build the mansion of success."},
- {"Number": 50, "Quote Topic": "Good Habits 🔄", "Quote": "Consistency in habits crafts our destiny, one action at a time."},
- {"Number": 51, "Quote Topic": "Discipline 🕰️", "Quote": "Discipline is the bridge between dreams and their realization."},
- {"Number": 52, "Quote Topic": "Discipline 🕰️", "Quote": "Through discipline, chaos transforms into order, and aspirations into achievements."},
- {"Number": 53, "Quote Topic": "Stamina 🚀", "Quote": "Stamina isn't just enduring but thriving amidst challenges."},
- {"Number": 54, "Quote Topic": "Stamina 🚀", "Quote": "It's stamina that turns obstacles into stepping stones, fueling our journey forward."},
- {"Number": 55, "Quote Topic": "Artificial General Intelligence 🤯", "Quote": "AGI is not just about mimicking humans but understanding the core of intelligence itself."},
- {"Number": 56, "Quote Topic": "Artificial General Intelligence 🤯", "Quote": "The pursuit of AGI is a testament to humanity's quest to transcend its own boundaries."},
- {"Number": 57, "Quote Topic": "AI Pipelines 🛠️", "Quote": "AI pipelines are the arteries of intelligent systems, directing the flow of knowledge."},
- {"Number": 58, "Quote Topic": "AI Pipelines 🛠️", "Quote": "In well-crafted pipelines, AI finds its rhythm, efficiency, and transformative power."},
- {"Number": 59, "Quote Topic": "Genius 🌟", "Quote": "Genius isn't just raw talent; it's the alchemy of persistence, passion, and perspective."},
- {"Number": 60, "Quote Topic": "Genius 🌟", "Quote": "Every spark of genius has been nurtured by curiosity and an insatiable thirst for knowledge."},
- {"Number": 61, "Quote Topic": "Our Brains 🧠", "Quote": "Our brain is the universe's masterpiece, a nexus of thoughts, dreams, and memories."},
- {"Number": 62, "Quote Topic": "Our Brains 🧠", "Quote": "In every neuron, our brain holds the potential of countless possibilities and imaginations."},
- {"Number": 63, "Quote Topic": "Our Brains 🧠", "Quote": "The intricacies of our brain reflect the cosmos: vast, complex, and beautifully mysterious."},
- {"Number": 64, "Quote Topic": "Our Brains 🧠", "Quote": "Understanding our brain is the key to unlocking the enigmas of consciousness, behavior, and potential."},
- {"Number": 65, "Quote Topic": "Mindfulness 🌼", "Quote": "Mindfulness is the anchor that grounds us in the present, amidst life's tumultuous seas."},
- {"Number": 66, "Quote Topic": "Mindfulness 🌼", "Quote": "In the act of being mindful, we embrace life's symphony, note by note, moment by moment."},
- {"Number": 67, "Quote Topic": "Resilience 💪", "Quote": "Resilience is the art of bouncing back, turning wounds into wisdom and setbacks into comebacks."},
- {"Number": 68, "Quote Topic": "Resilience 💪", "Quote": "Life will test us, but with resilience, we rise, stronger and more enlightened."},
- {"Number": 69, "Quote Topic": "Innovation 💡", "Quote": "Innovation is the heartbeat of progress, pushing boundaries and redefining possibilities."},
- {"Number": 70, "Quote Topic": "Innovation 💡", "Quote": "Every breakthrough, every invention, is a testament to humanity's relentless spirit of innovation."},
- {"Number": 71, "Quote Topic": "Empathy ❤️", "Quote": "Empathy is the bridge between souls, transcending words and touching hearts."},
- {"Number": 72, "Quote Topic": "Empathy ❤️", "Quote": "Through empathy, we see the world through another's eyes, fostering understanding and unity."},
- {"Number": 73, "Quote Topic": "Inspiration 🌈", "Quote": "Inspiration is the spark that ignites the soul, propelling us to chase our dreams."},
- {"Number": 74, "Quote Topic": "Inspiration 🌈", "Quote": "Every moment of inspiration is a call to action, pushing us beyond our boundaries."},
- {"Number": 75, "Quote Topic": "Learning 📚", "Quote": "Learning is the gateway to growth, opening doors to endless possibilities."},
- {"Number": 76, "Quote Topic": "Learning 📚", "Quote": "Every lesson learned is a step towards enlightenment, broadening our horizons."},
- {"Number": 77, "Quote Topic": "Collaboration 🤝", "Quote": "In collaboration, we find strength. Together, we achieve more than we could alone."},
- {"Number": 78, "Quote Topic": "Collaboration 🤝", "Quote": "Unity in purpose paves the way for monumental achievements, showcasing the power of collective effort."},
- {"Number": 79, "Quote Topic": "Dreams 🌌", "Quote": "Dreams are the architects of our future. They sketch the blueprint of our aspirations."},
- {"Number": 80, "Quote Topic": "Dreams 🌌", "Quote": "In dreams, we find hope, and with hope, we transform the fabric of reality."},
- {"Number": 81, "Quote Topic": "Courage 🦁", "Quote": "Courage is the fire that lights our path, even in the face of overwhelming odds."},
- {"Number": 82, "Quote Topic": "Courage 🦁", "Quote": "With courage in our hearts, we defy limitations and embrace the vastness of potential."},
- {"Number": 83, "Quote Topic": "Change 🌀", "Quote": "Change is life's only constant. It shapes, molds, and propels us forward."},
- {"Number": 84, "Quote Topic": "Change 🌀", "Quote": "Embracing change is embracing growth, an acknowledgment of life's ever-evolving nature."},
- {"Number": 85, "Quote Topic": "Adventure 🌍", "Quote": "Life is an adventure, filled with twists, turns, and unexpected discoveries."},
- {"Number": 86, "Quote Topic": "Adventure 🌍", "Quote": "Every adventure, big or small, adds a chapter to our story, enriching our experience."},
- {"Number": 87, "Quote Topic": "Creativity 🎨", "Quote": "Creativity is the dance of the soul, expressing itself in countless forms."},
- {"Number": 88, "Quote Topic": "Creativity 🎨", "Quote": "Through creativity, we paint the world in vibrant colors, showcasing our unique perspectives."},
- {"Number": 89, "Quote Topic": "Passion ❤️", "Quote": "Passion is the fuel for our journey, driving us to chase after our dreams."},
- {"Number": 90, "Quote Topic": "Passion ❤️", "Quote": "With passion, every task becomes a labor of love, and every challenge, a thrilling endeavor."},
- {"Number": 91, "Quote Topic": "Hope 🌟", "Quote": "Hope is the beacon that guides us through stormy nights, reminding us of the dawn that awaits."},
- {"Number": 92, "Quote Topic": "Hope 🌟", "Quote": "In hope, we find solace, and in its embrace, we find the strength to persevere."},
- {"Number": 93, "Quote Topic": "Intuition 🧭", "Quote": "Intuition is the silent whisper of the soul, guiding us with its subtle wisdom."},
- {"Number": 94, "Quote Topic": "Intuition 🧭", "Quote": "By tuning into our intuition, we align with our inner compass, navigating life with clarity."},
- {"Number": 95, "Quote Topic": "Joy 😃", "Quote": "Joy is the melody of the heart, a song of gratitude and love."},
- {"Number": 96, "Quote Topic": "Joy 😃", "Quote": "In moments of joy, we connect with the essence of life, celebrating its beauty."},
- {"Number": 97, "Quote Topic": "Wisdom 🦉", "Quote": "Wisdom is the culmination of experience, a treasure trove of insights and reflections."},
- {"Number": 98, "Quote Topic": "Wisdom 🦉", "Quote": "With wisdom, we navigate life's complexities, drawing from the lessons of the past."},
- {"Number": 99, "Quote Topic": "Love ❤️", "Quote": "Love is the universal language, transcending boundaries and touching souls."},
- {"Number": 100, "Quote Topic": "Love ❤️", "Quote": "Through love, we find connection, unity, and the essence of existence."}
-]
-
-
-
-def display_quote(index):
- '''Function to display the quote using st.markdown()'''
- number = quotes[index]['Number']
- topic = quotes[index]['Quote Topic']
- quote = quotes[index]['Quote']
- st.markdown(f"### {number}. {topic}")
- st.markdown(quote)
-
-# Streamlit app
-def main():
- st.title("Quote Timer")
-
- # Select a random quote to start
- import random
- index = random.randint(0, len(quotes)-1)
-
- display_quote(index)
-
- # Timer logic
- for i in range(15, 0, -1):
- st.write(f"Time left: {i} seconds")
- time.sleep(1)
- st.experimental_rerun()
-
- # Display a new quote when timer finishes
- index = (index + 1) % len(quotes)
- display_quote(index)
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/awacke1/Speech2Text-FastSpeech2/README.md b/spaces/awacke1/Speech2Text-FastSpeech2/README.md
deleted file mode 100644
index bcd3bffd33bfae1e4278a8231a0744bded7fb0ec..0000000000000000000000000000000000000000
--- a/spaces/awacke1/Speech2Text-FastSpeech2/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Speech2Text FastSpeech2
-emoji: 👁
-colorFrom: indigo
-colorTo: pink
-sdk: gradio
-sdk_version: 3.38.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/awaiss/vits-models/mel_processing.py b/spaces/awaiss/vits-models/mel_processing.py
deleted file mode 100644
index 3e252e76320522a8a4195a60665168f22769aec2..0000000000000000000000000000000000000000
--- a/spaces/awaiss/vits-models/mel_processing.py
+++ /dev/null
@@ -1,101 +0,0 @@
-import torch
-import torch.utils.data
-from librosa.filters import mel as librosa_mel_fn
-
-MAX_WAV_VALUE = 32768.0
-
-
-def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
- """
- PARAMS
- ------
- C: compression factor
- """
- return torch.log(torch.clamp(x, min=clip_val) * C)
-
-
-def dynamic_range_decompression_torch(x, C=1):
- """
- PARAMS
- ------
- C: compression factor used to compress
- """
- return torch.exp(x) / C
-
-
-def spectral_normalize_torch(magnitudes):
- output = dynamic_range_compression_torch(magnitudes)
- return output
-
-
-def spectral_de_normalize_torch(magnitudes):
- output = dynamic_range_decompression_torch(magnitudes)
- return output
-
-
-mel_basis = {}
-hann_window = {}
-
-
-def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
- if torch.min(y) < -1.:
- print('min value is ', torch.min(y))
- if torch.max(y) > 1.:
- print('max value is ', torch.max(y))
-
- global hann_window
- dtype_device = str(y.dtype) + '_' + str(y.device)
- wnsize_dtype_device = str(win_size) + '_' + dtype_device
- if wnsize_dtype_device not in hann_window:
- hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
-
- y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
- y = y.squeeze(1)
-
- spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
- center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
-
- spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
- return spec
-
-
-def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
- global mel_basis
- dtype_device = str(spec.dtype) + '_' + str(spec.device)
- fmax_dtype_device = str(fmax) + '_' + dtype_device
- if fmax_dtype_device not in mel_basis:
- mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
- mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
- spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
- spec = spectral_normalize_torch(spec)
- return spec
-
-
-def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
- if torch.min(y) < -1.:
- print('min value is ', torch.min(y))
- if torch.max(y) > 1.:
- print('max value is ', torch.max(y))
-
- global mel_basis, hann_window
- dtype_device = str(y.dtype) + '_' + str(y.device)
- fmax_dtype_device = str(fmax) + '_' + dtype_device
- wnsize_dtype_device = str(win_size) + '_' + dtype_device
- if fmax_dtype_device not in mel_basis:
- mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
- mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
- if wnsize_dtype_device not in hann_window:
- hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
-
- y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
- y = y.squeeze(1)
-
- spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
- center=center, pad_mode='reflect', normalized=False, onesided=True)
-
- spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
-
- spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
- spec = spectral_normalize_torch(spec)
-
- return spec
diff --git a/spaces/banana-projects/web3d/node_modules/three/src/geometries/SphereGeometry.js b/spaces/banana-projects/web3d/node_modules/three/src/geometries/SphereGeometry.js
deleted file mode 100644
index f59ffc2eaccc6363531cb630381692f043af1bac..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/src/geometries/SphereGeometry.js
+++ /dev/null
@@ -1,156 +0,0 @@
-/**
- * @author mrdoob / http://mrdoob.com/
- * @author benaadams / https://twitter.com/ben_a_adams
- * @author Mugen87 / https://github.com/Mugen87
- */
-
-import { Geometry } from '../core/Geometry.js';
-import { BufferGeometry } from '../core/BufferGeometry.js';
-import { Float32BufferAttribute } from '../core/BufferAttribute.js';
-import { Vector3 } from '../math/Vector3.js';
-
-// SphereGeometry
-
-function SphereGeometry( radius, widthSegments, heightSegments, phiStart, phiLength, thetaStart, thetaLength ) {
-
- Geometry.call( this );
-
- this.type = 'SphereGeometry';
-
- this.parameters = {
- radius: radius,
- widthSegments: widthSegments,
- heightSegments: heightSegments,
- phiStart: phiStart,
- phiLength: phiLength,
- thetaStart: thetaStart,
- thetaLength: thetaLength
- };
-
- this.fromBufferGeometry( new SphereBufferGeometry( radius, widthSegments, heightSegments, phiStart, phiLength, thetaStart, thetaLength ) );
- this.mergeVertices();
-
-}
-
-SphereGeometry.prototype = Object.create( Geometry.prototype );
-SphereGeometry.prototype.constructor = SphereGeometry;
-
-// SphereBufferGeometry
-
-function SphereBufferGeometry( radius, widthSegments, heightSegments, phiStart, phiLength, thetaStart, thetaLength ) {
-
- BufferGeometry.call( this );
-
- this.type = 'SphereBufferGeometry';
-
- this.parameters = {
- radius: radius,
- widthSegments: widthSegments,
- heightSegments: heightSegments,
- phiStart: phiStart,
- phiLength: phiLength,
- thetaStart: thetaStart,
- thetaLength: thetaLength
- };
-
- radius = radius || 1;
-
- widthSegments = Math.max( 3, Math.floor( widthSegments ) || 8 );
- heightSegments = Math.max( 2, Math.floor( heightSegments ) || 6 );
-
- phiStart = phiStart !== undefined ? phiStart : 0;
- phiLength = phiLength !== undefined ? phiLength : Math.PI * 2;
-
- thetaStart = thetaStart !== undefined ? thetaStart : 0;
- thetaLength = thetaLength !== undefined ? thetaLength : Math.PI;
-
- var thetaEnd = thetaStart + thetaLength;
-
- var ix, iy;
-
- var index = 0;
- var grid = [];
-
- var vertex = new Vector3();
- var normal = new Vector3();
-
- // buffers
-
- var indices = [];
- var vertices = [];
- var normals = [];
- var uvs = [];
-
- // generate vertices, normals and uvs
-
- for ( iy = 0; iy <= heightSegments; iy ++ ) {
-
- var verticesRow = [];
-
- var v = iy / heightSegments;
-
- // special case for the poles
-
- var uOffset = ( iy == 0 ) ? 0.5 / widthSegments : ( ( iy == heightSegments ) ? - 0.5 / widthSegments : 0 );
-
- for ( ix = 0; ix <= widthSegments; ix ++ ) {
-
- var u = ix / widthSegments;
-
- // vertex
-
- vertex.x = - radius * Math.cos( phiStart + u * phiLength ) * Math.sin( thetaStart + v * thetaLength );
- vertex.y = radius * Math.cos( thetaStart + v * thetaLength );
- vertex.z = radius * Math.sin( phiStart + u * phiLength ) * Math.sin( thetaStart + v * thetaLength );
-
- vertices.push( vertex.x, vertex.y, vertex.z );
-
- // normal
-
- normal.copy( vertex ).normalize();
- normals.push( normal.x, normal.y, normal.z );
-
- // uv
-
- uvs.push( u + uOffset, 1 - v );
-
- verticesRow.push( index ++ );
-
- }
-
- grid.push( verticesRow );
-
- }
-
- // indices
-
- for ( iy = 0; iy < heightSegments; iy ++ ) {
-
- for ( ix = 0; ix < widthSegments; ix ++ ) {
-
- var a = grid[ iy ][ ix + 1 ];
- var b = grid[ iy ][ ix ];
- var c = grid[ iy + 1 ][ ix ];
- var d = grid[ iy + 1 ][ ix + 1 ];
-
- if ( iy !== 0 || thetaStart > 0 ) indices.push( a, b, d );
- if ( iy !== heightSegments - 1 || thetaEnd < Math.PI ) indices.push( b, c, d );
-
- }
-
- }
-
- // build geometry
-
- this.setIndex( indices );
- this.addAttribute( 'position', new Float32BufferAttribute( vertices, 3 ) );
- this.addAttribute( 'normal', new Float32BufferAttribute( normals, 3 ) );
- this.addAttribute( 'uv', new Float32BufferAttribute( uvs, 2 ) );
-
-}
-
-SphereBufferGeometry.prototype = Object.create( BufferGeometry.prototype );
-SphereBufferGeometry.prototype.constructor = SphereBufferGeometry;
-
-
-export { SphereGeometry, SphereBufferGeometry };
diff --git a/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/lightmap_fragment.glsl.js b/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/lightmap_fragment.glsl.js
deleted file mode 100644
index 7cf7cbd00f24bd176763821331a984e96387ad26..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/lightmap_fragment.glsl.js
+++ /dev/null
@@ -1,7 +0,0 @@
-export default /* glsl */`
-#ifdef USE_LIGHTMAP
-
- reflectedLight.indirectDiffuse += PI * texture2D( lightMap, vUv2 ).xyz * lightMapIntensity; // factor of PI should not be present; included here to prevent breakage
-
-#endif
-`;
diff --git a/spaces/barnga/DL/app.py b/spaces/barnga/DL/app.py
deleted file mode 100644
index b8e324b9c29780cc194b84219d4782bd519931d7..0000000000000000000000000000000000000000
--- a/spaces/barnga/DL/app.py
+++ /dev/null
@@ -1,172 +0,0 @@
-### ----------------------------- ###
-### libraries ###
-### ----------------------------- ###
-
-import gradio as gr
-import pandas as pd
-import numpy as np
-from sklearn.model_selection import train_test_split
-from sklearn.linear_model import LogisticRegression
-from sklearn import metrics
-
-
-### ------------------------------ ###
-### data transformation ###
-### ------------------------------ ###
-
-# load dataset
-uncleaned_data = pd.read_csv('data.csv')
-
-# remove timestamp from dataset (always first column)
-uncleaned_data = uncleaned_data.iloc[: , 1:]
-data = pd.DataFrame()
-
-# keep track of which columns are categorical and what
-# those columns' value mappings are
-# structure: {colname1: {...}, colname2: {...} }
-cat_value_dicts = {}
-final_colname = uncleaned_data.columns[len(uncleaned_data.columns) - 1]
-
-# for each column...
-for (colname, colval) in uncleaned_data.iteritems():
-
- # check if col is already a number; if so, add col directly
- # to new dataframe and skip to next column
- if isinstance(colval.values[0], (np.integer, float)):
- data[colname] = uncleaned_data[colname].copy()
- continue
-
- # structure: {0: "lilac", 1: "blue", ...}
- new_dict = {}
- val = 0 # first index per column
- transformed_col_vals = [] # new numeric datapoints
-
- # if not, for each item in that column...
- for (row, item) in enumerate(colval.values):
-
- # if item is not in this col's dict...
- if item not in new_dict:
- new_dict[item] = val
- val += 1
-
- # then add numerical value to transformed dataframe
- transformed_col_vals.append(new_dict[item])
-
- # reverse dictionary only for final col (0, 1) => (vals)
- if colname == final_colname:
- new_dict = {value : key for (key, value) in new_dict.items()}
-
- cat_value_dicts[colname] = new_dict
- data[colname] = transformed_col_vals
-
-
-### -------------------------------- ###
-### model training ###
-### -------------------------------- ###
-
-# select features and predicton; automatically selects last column as prediction
-cols = len(data.columns)
-num_features = cols - 1
-x = data.iloc[: , :num_features]
-y = data.iloc[: , num_features:]
-
-# split data into training and testing sets
-x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25)
-
-# instantiate the model (using default parameters)
-model = LogisticRegression()
-model.fit(x_train, y_train.values.ravel())
-y_pred = model.predict(x_test)
-
-
-### -------------------------------- ###
-### article generation ###
-### -------------------------------- ###
-# borrow file reading function from reader.py
-
-def get_feat():
- feats = [abs(x) for x in model.coef_[0]]
- max_val = max(feats)
- idx = feats.index(max_val)
- return data.columns[idx]
-
-acc = str(round(metrics.accuracy_score(y_test, y_pred) * 100, 1)) + "%"
-most_imp_feat = get_feat()
-# info = get_article(acc, most_imp_feat)
-
-
-
-### ------------------------------- ###
-### interface creation ###
-### ------------------------------- ###
-
-
-# predictor for generic number of features
-def general_predictor(*args):
- features = []
-
- # transform categorical input
- for colname, arg in zip(data.columns, args):
- if (colname in cat_value_dicts):
- features.append(cat_value_dicts[colname][arg])
- else:
- features.append(arg)
-
- # predict single datapoint
- new_input = [features]
- result = model.predict(new_input)
- return cat_value_dicts[final_colname][result[0]]
-
-# add data labels to replace those lost via star-args
-
-
-block = gr.Blocks()
-
-with open('info.md') as f:
- with block:
- gr.Markdown(f.readline())
- gr.Markdown('Take the quiz to get a personalized recommendation using AI.')
-
- with gr.Row():
- with gr.Box():
- inputls = []
- for colname in data.columns:
- # skip last column
- if colname == final_colname:
- continue
-
- # access categories dict if data is categorical
- # otherwise, just use a number input
- if colname in cat_value_dicts:
- radio_options = list(cat_value_dicts[colname].keys())
- inputls.append(gr.inputs.Dropdown(choices=radio_options, type="value", label=colname))
- else:
- # add numerical input
- inputls.append(gr.inputs.Number(label=colname))
- gr.Markdown(" ")
-
- submit = gr.Button("Click to see your personalized result!", variant="primary")
- gr.Markdown(" ")
- output = gr.Textbox(label="Your recommendation:", placeholder="your recommendation will appear here")
-
- submit.click(fn=general_predictor, inputs=inputls, outputs=output)
- gr.Markdown(" ")
-
- with gr.Row():
- with gr.Box():
- gr.Markdown(f"
Accuracy:
{acc}")
- with gr.Box():
- gr.Markdown(f"
Most important feature:
{most_imp_feat}")
-
- gr.Markdown(" ")
-
- with gr.Box():
- gr.Markdown('''⭐ Note that model accuracy is based on the uploaded data.csv and reflects how well the AI model can give correct recommendations for that dataset. Model accuracy and most important feature can be helpful for understanding how the model works, but should not be considered absolute facts about the real world.''')
-
- with gr.Box():
- with open('info.md') as f:
- f.readline()
- gr.Markdown(f.read())
-
-# show the interface
-block.launch()
\ No newline at end of file
diff --git a/spaces/beihai/GFPGAN-V1.3-whole-image/basicsr/ops/__init__.py b/spaces/beihai/GFPGAN-V1.3-whole-image/basicsr/ops/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/bibekyess/bgpt/spell_check.py b/spaces/bibekyess/bgpt/spell_check.py
deleted file mode 100644
index aedfa0869d6312c0359e3ac227f62894ccc5e4d4..0000000000000000000000000000000000000000
--- a/spaces/bibekyess/bgpt/spell_check.py
+++ /dev/null
@@ -1,35 +0,0 @@
-import spellchecker
-
-
-def correct_typos(sentence):
- # Initialize the spell checker object
- spell = spellchecker.SpellChecker(language="en")
- # Adds Bibek to its frequency dictionary to make it a known word
- spell.word_frequency.load_words(
- [
- "Bibek",
- "Bibek's",
- "skillsets",
- "skillset",
- "CV",
- "RIRO",
- "Bisonai",
- "IC",
- "BMC",
- "KAIST",
- ]
- )
- sentence_split = sentence.split()
- # Find the typos in the input sentence
- typos = spell.unknown(sentence_split)
- # Correct the typos
- corrected_sentence = [
- spell.correction(word)
- if spell.correction(word)
- else word
- if word in typos
- else word
- for word in sentence_split
- ]
- # Return the corrected sentence as a string
- return " ".join(corrected_sentence)
diff --git a/spaces/bigPear/digitalWDF/src/utils/config.py b/spaces/bigPear/digitalWDF/src/utils/config.py
deleted file mode 100644
index 849e0b57eb8f843e5eef26fc9a126f20211bf75c..0000000000000000000000000000000000000000
--- a/spaces/bigPear/digitalWDF/src/utils/config.py
+++ /dev/null
@@ -1,219 +0,0 @@
-import os
-import json
-from typing import Optional
-from dataclasses import dataclass, field
-
-
-CHATGLM_REPO_NAME = "THUDM/chatglm-6b"
-CHATGLM_LASTEST_HASH = "a8ede826cf1b62bd3c78bdfb3625c7c5d2048fbd"
-
-
-@dataclass
-class DatasetAttr:
-
- load_from: str
- dataset_name: Optional[str] = None
- file_name: Optional[str] = None
- file_sha1: Optional[str] = None
-
- def __post_init__(self):
- self.prompt_column = "instruction"
- self.query_column = "input"
- self.response_column = "output"
- self.history_column = None
-
-
-@dataclass
-class ModelArguments:
- """
- Arguments pertaining to which model/config/tokenizer we are going to fine-tune.
- """
- model_name_or_path: Optional[str] = field(
- default=CHATGLM_REPO_NAME,
- metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."}
- )
- config_name: Optional[str] = field(
- default=None,
- metadata={"help": "Pretrained config name or path if not the same as model_name."}
- )
- tokenizer_name: Optional[str] = field(
- default=None,
- metadata={"help": "Pretrained tokenizer name or path if not the same as model_name."}
- )
- cache_dir: Optional[str] = field(
- default=None,
- metadata={"help": "Where to store the pretrained models downloaded from huggingface.co."}
- )
- use_fast_tokenizer: Optional[bool] = field(
- default=True,
- metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}
- )
- model_revision: Optional[str] = field(
- default=CHATGLM_LASTEST_HASH,
- metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}
- )
- use_auth_token: Optional[bool] = field(
- default=False,
- metadata={"help": "Will use the token generated when running `huggingface-cli login`."}
- )
- quantization_bit: Optional[int] = field(
- default=None,
- metadata={"help": "The number of bits to quantize the model."}
- )
- checkpoint_dir: Optional[str] = field(
- default=None,
- metadata={"help": "Path to the directory containing the model checkpoints as well as the configurations."}
- )
- reward_model: Optional[str] = field(
- default=None,
- metadata={"help": "Path to the directory containing the checkpoints of the reward model."}
- )
-
- def __post_init__(self):
- if self.checkpoint_dir is not None: # support merging lora weights
- self.checkpoint_dir = [cd.strip() for cd in self.checkpoint_dir.split(",")]
-
-
-@dataclass
-class DataTrainingArguments:
- """
- Arguments pertaining to what data we are going to input our model for training and evaluation.
- """
- dataset: Optional[str] = field(
- default="alpaca_zh",
- metadata={"help": "The name of provided dataset(s) to use. Use comma to separate multiple datasets."}
- )
- dataset_dir: Optional[str] = field(
- default="data",
- metadata={"help": "The name of the folder containing datasets."}
- )
- split: Optional[str] = field(
- default="train",
- metadata={"help": "Which dataset split to use for training and evaluation."}
- )
- overwrite_cache: Optional[bool] = field(
- default=False,
- metadata={"help": "Overwrite the cached training and evaluation sets."}
- )
- preprocessing_num_workers: Optional[int] = field(
- default=None,
- metadata={"help": "The number of processes to use for the preprocessing."}
- )
- max_source_length: Optional[int] = field(
- default=512,
- metadata={"help": "The maximum total input sequence length after tokenization."}
- )
- max_target_length: Optional[int] = field(
- default=512,
- metadata={"help": "The maximum total output sequence length after tokenization."}
- )
- max_samples: Optional[int] = field(
- default=None,
- metadata={"help": "For debugging purposes, truncate the number of examples for each dataset."}
- )
- num_beams: Optional[int] = field(
- default=None,
- metadata={"help": "Number of beams to use for evaluation. This argument will be passed to `model.generate`"}
- )
- ignore_pad_token_for_loss: Optional[bool] = field(
- default=True,
- metadata={"help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not."}
- )
- source_prefix: Optional[str] = field(
- default=None,
- metadata={"help": "A prefix to add before every source text (useful for T5 models)."}
- )
-
- def __post_init__(self): # support mixing multiple datasets
- dataset_names = [ds.strip() for ds in self.dataset.split(",")]
- dataset_info = json.load(open(os.path.join(self.dataset_dir, "dataset_info.json"), "r"))
-
- self.dataset_list = []
- for name in dataset_names:
- if name not in dataset_info:
- raise ValueError("Undefined dataset {} in dataset_info.json.".format(name))
-
- if "hf_hub_url" in dataset_info[name]:
- dataset_attr = DatasetAttr("hf_hub", dataset_name=dataset_info[name]["hf_hub_url"])
- elif "script_url" in dataset_info[name]:
- dataset_attr = DatasetAttr("script", dataset_name=dataset_info[name]["script_url"])
- else:
- dataset_attr = DatasetAttr(
- "file",
- file_name=dataset_info[name]["file_name"],
- file_sha1=dataset_info[name]["file_sha1"] if "file_sha1" in dataset_info[name] else None
- )
-
- if "columns" in dataset_info[name]:
- dataset_attr.prompt_column = dataset_info[name]["columns"].get("prompt", None)
- dataset_attr.query_column = dataset_info[name]["columns"].get("query", None)
- dataset_attr.response_column = dataset_info[name]["columns"].get("response", None)
- dataset_attr.history_column = dataset_info[name]["columns"].get("history", None)
-
- self.dataset_list.append(dataset_attr)
-
-
-@dataclass
-class FinetuningArguments:
- """
- Arguments pertaining to which techniques we are going to fine-tuning with.
- """
- finetuning_type: Optional[str] = field(
- default="lora",
- metadata={"help": "Which fine-tuning method to use."}
- )
- num_layer_trainable: Optional[int] = field(
- default=3,
- metadata={"help": "Number of trainable layers for Freeze fine-tuning."}
- )
- name_module_trainable: Optional[str] = field(
- default="mlp",
- metadata={"help": "Name of trainable modules for Freeze fine-tuning."}
- )
- pre_seq_len: Optional[int] = field(
- default=16,
- metadata={"help": "Number of prefix tokens to use for P-tuning V2."}
- )
- prefix_projection: Optional[bool] = field(
- default=False,
- metadata={"help": "Whether to add a project layer for the prefix in P-tuning V2 or not."}
- )
- lora_rank: Optional[int] = field(
- default=8,
- metadata={"help": "The intrinsic dimension for LoRA fine-tuning."}
- )
- lora_alpha: Optional[float] = field(
- default=32.0,
- metadata={"help": "The scale factor for LoRA fine-tuning. (similar with the learning rate)"}
- )
- lora_dropout: Optional[float] = field(
- default=0.1,
- metadata={"help": "Dropout rate for the LoRA fine-tuning."}
- )
- lora_target: Optional[str] = field(
- default="query_key_value",
- metadata={"help": "Name(s) of target modules to apply LoRA. Use comma to separate multiple modules."}
- )
- resume_lora_training: Optional[bool] = field(
- default=True,
- metadata={"help": "Whether to resume training from the last LoRA weights or create new weights after merging them."}
- )
- plot_loss: Optional[bool] = field(
- default=False,
- metadata={"help": "Whether to plot the training loss after fine-tuning or not."}
- )
-
- def __post_init__(self):
- self.lora_target = [target.strip() for target in self.lora_target.split(",")] # support custom target modules of LoRA
-
- if self.num_layer_trainable > 0: # fine-tuning the last n layers if num_layer_trainable > 0
- trainable_layer_ids = [27-k for k in range(self.num_layer_trainable)]
- else: # fine-tuning the first n layers if num_layer_trainable < 0
- trainable_layer_ids = [k for k in range(-self.num_layer_trainable)]
- if self.name_module_trainable == "mlp":
- self.trainable_layers = ["layers.{:d}.mlp".format(idx) for idx in trainable_layer_ids]
- elif self.name_module_trainable == "qkv":
- self.trainable_layers = ["layers.{:d}.attention.query_key_value".format(idx) for idx in trainable_layer_ids]
-
- if self.finetuning_type not in ["none", "freeze", "p_tuning", "lora", "full"]:
- raise NotImplementedError("Invalid fine-tuning method.")
diff --git a/spaces/bigPear/digitalWDF/src/utils/ppo.py b/spaces/bigPear/digitalWDF/src/utils/ppo.py
deleted file mode 100644
index 7fe76f51bbe5196284fb94bbb0ad8a70a5fa4df9..0000000000000000000000000000000000000000
--- a/spaces/bigPear/digitalWDF/src/utils/ppo.py
+++ /dev/null
@@ -1,368 +0,0 @@
-import os
-import json
-import math
-import torch
-from tqdm import tqdm
-from typing import Callable, Dict, List, Literal, Optional, Sequence, Tuple
-
-from transformers import DataCollatorWithPadding, Seq2SeqTrainingArguments
-from transformers.trainer import TRAINING_ARGS_NAME, TRAINER_STATE_NAME
-from transformers.tokenization_utils import PreTrainedTokenizer
-
-from trl import PPOTrainer, AutoModelForCausalLMWithValueHead
-from trl.core import LengthSampler
-from trl.trainer.ppo_trainer import PPODecorators, logprobs_from_logits
-
-from .config import FinetuningArguments
-
-from .other import (
- AverageMeter,
- get_logger,
- save_trainable_params,
- save_valuehead_params,
- get_logits_processor,
- FINETUNING_ARGS_NAME
-)
-
-
-logger = get_logger(__name__)
-
-
-def replace_model(model: AutoModelForCausalLMWithValueHead, target: Literal["default", "reward"]) -> None:
- if target == "reward": # save original head temporarily
- valuehead_state_dict = model.v_head.state_dict()
-
- setattr(model, "origin_head_weight", valuehead_state_dict["summary.weight"])
- setattr(model, "origin_head_bias", valuehead_state_dict["summary.bias"])
-
- model.pretrained_model.set_adapter(target) # set the LoRA adapter to be active
- model.v_head.load_state_dict({
- "summary.weight": getattr(model, "{}_head_weight".format(target)),
- "summary.bias": getattr(model, "{}_head_bias".format(target))
- })
-
-
-@torch.no_grad()
-def compute_rewards(
- input_ids: torch.Tensor, # (batch size x seq len) with format `X [gMASK] [BOS] Y [EOS] [PAD] ... [PAD]`
- model: AutoModelForCausalLMWithValueHead,
- tokenizer: PreTrainedTokenizer
-) -> torch.Tensor:
-
- replace_model(model, target="reward")
-
- _, _, values = model(input_ids=input_ids)
- values = values.transpose(0, 1)
-
- rewards = []
- for i in range(input_ids.size(0)):
- eos_idx = (input_ids[i] == tokenizer.eos_token_id).nonzero() # Note: checking with [EOS] token is unsafe
- if len(eos_idx):
- eos_idx = eos_idx[0].item()
- else:
- eos_idx = input_ids.size(1) - 1
- rewards.append(values[i][eos_idx])
- rewards = torch.stack(rewards, dim=0)
-
- replace_model(model, target="default")
-
- return rewards
-
-
-def cast_layernorm_dtype(
- model: AutoModelForCausalLMWithValueHead,
- layer_norm_names: List[str] = ["layernorm"], # for chatglm setting
- layer_norm_params: Optional[Dict[str, torch.Tensor]] = None
-) -> Tuple[AutoModelForCausalLMWithValueHead, Dict[str, torch.Tensor]]:
-
- layer_norm_state_dict = {}
-
- for name, param in model.named_parameters():
- if param.ndim == 1 and any(layer_norm_name in name for layer_norm_name in layer_norm_names):
- if layer_norm_params is not None:
- param.data = layer_norm_params[name] # restore float32 weights
- else:
- layer_norm_state_dict[name] = param.data.detach().clone() # store float32 weights for stability
- param.data = param.data.to(torch.float16)
-
- return model, layer_norm_state_dict
-
-
-class PPODataCollatorForChatGLM(DataCollatorWithPadding):
- r"""
- Data collator for ChatGLM. It is capable of dynamically padding for batched data.
- """
- def __init__(
- self,
- tokenizer: PreTrainedTokenizer,
- min_input_length: int,
- max_input_length: int,
- inference_mode: bool = False,
- ):
- super().__init__(tokenizer, padding=True)
- self.inference_mode = inference_mode
-
- if min_input_length < max_input_length:
- self.input_size = LengthSampler(min_input_length, max_input_length)
- else:
- self.input_size = lambda: max_input_length # always use max_input_length
-
- def __call__(self, features: Sequence[Dict[str, Sequence]]) -> Dict[str, torch.Tensor]:
- r"""
- Pads batched data to the longest sequence in the batch. We adopt left-padding for ppo data.
-
- Equips with a length sampler to generate sequences with variable lengths.
-
- ChatGLM is able to generate attentions masks and position ids by itself.
- """
- if self.inference_mode:
- raise NotImplementedError
-
- input_ids = [torch.tensor(feature["input_ids"][:self.input_size()]).flip(0) for feature in features]
- input_ids = torch.nn.utils.rnn.pad_sequence(input_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id)
- features = {"input_ids": input_ids.flip(-1)}
- return features
-
-
-class PPOTrainerForChatGLM(PPOTrainer):
- r"""
- Inherits PPOTrainer.
- """
-
- def __init__(self, training_args: Seq2SeqTrainingArguments, finetuning_args: FinetuningArguments, *args, **kwargs):
- super().__init__(*args, **kwargs)
- self.state = {"log_history": []}
- self.training_args = training_args
- self.finetuning_args = finetuning_args
-
- @torch.no_grad()
- def generate(
- self,
- query_tensor: torch.Tensor, # (batch size x seq len)
- length_sampler: Callable = None,
- return_prompt: bool = True,
- **generation_kwargs,
- ) -> torch.Tensor:
- r"""
- Generate response with the model given the query tensor.
-
- Inspired by: https://github.com/lvwerra/trl/blob/08f550674c553c36c51d1027613c29f14f3676a5/trl/trainer/ppo_trainer.py#L387
- """
-
- self.model, layer_norm_params = cast_layernorm_dtype(self.model)
-
- if length_sampler is not None:
- generation_kwargs["max_new_tokens"] = length_sampler()
-
- unwrapped_model = self.accelerator.unwrap_model(self.model)
-
- response = unwrapped_model.generate(
- input_ids=query_tensor, **generation_kwargs
- )
-
- # Temporary hack to ensure the generation config is not initialized for each iteration of the evaluation loop
- # Inspired by: https://github.com/huggingface/transformers/blob/v4.28.1/src/transformers/trainer_seq2seq.py#L273
- if unwrapped_model.pretrained_model.generation_config._from_model_config:
- unwrapped_model.pretrained_model.generation_config._from_model_config = False
-
- self.model, _ = cast_layernorm_dtype(self.model, layer_norm_params)
-
- if not return_prompt and not self.is_encoder_decoder:
- return response[:, query_tensor.size(1):]
- return response
-
- def prepare_model_inputs(self, queries: torch.Tensor, responses: torch.Tensor):
- input_ids = []
- for query, response in zip(queries, responses): # query is left-padded, response is right-padded
- start = (query != self.tokenizer.pad_token_id).nonzero()[0].item()
- input_ids.append(torch.cat((query[start:], response, query[:start]))) # change to right-padding
-
- model_inputs = {"input_ids": torch.stack(input_ids, dim=0).to(self.current_device)} # already padded to equal length
- model_inputs["attention_mask"] = torch.ones_like(model_inputs["input_ids"]) # unused indeed, avoid distributed error
- return model_inputs
-
- @PPODecorators.empty_cuda_cache()
- def batched_forward_pass(
- self,
- model: AutoModelForCausalLMWithValueHead,
- queries: torch.Tensor,
- responses: torch.Tensor,
- model_inputs: dict,
- ):
- r"""
- Calculate model outputs in multiple batches.
-
- Override to inject custom behavior.
- """
- bs = len(queries)
- fbs = self.config.mini_batch_size
- all_logprobs = []
- all_logits = []
- all_masks = []
- all_values = []
-
- for i in range(int(bs / fbs)):
- input_kwargs = {key: value[i * fbs : (i + 1) * fbs] for key, value in model_inputs.items()}
-
- input_ids = input_kwargs["input_ids"]
- logits, _, values = model(input_ids=input_ids) # chatglm only needs input_ids
- logprobs = logprobs_from_logits(logits[:, :-1, :], input_ids[:, 1:])
-
- values = values.transpose(0, 1)
- masks = torch.zeros_like(input_ids)
-
- for j in range(fbs):
- start = (input_ids[j] == self.tokenizer.bos_token_id).nonzero()[0].item() # always contain a [BOS] token
- end = (input_ids[j] == self.tokenizer.eos_token_id).nonzero() # Note: checking with [EOS] token is unsafe
- if len(end):
- end = end[0].item()
- else:
- end = masks.size(1)
- masks[j][start:end] = 1
- if end - start < 2:
- raise ValueError("Responses are too short. Make sure they are at least 4 tokens long.")
-
- all_logits.append(logits)
- all_values.append(values)
- all_logprobs.append(logprobs)
- all_masks.append(masks)
-
- return (
- torch.cat(all_logprobs),
- torch.cat(all_logits)[:, :-1],
- torch.cat(all_values)[:, :-1],
- torch.cat(all_masks)[:, :-1],
- )
-
- def ppo_train(self, max_target_length: int) -> None:
-
- total_train_batch_size = self.config.batch_size * self.config.gradient_accumulation_steps * self.training_args.world_size
- len_dataloader = len(self.dataloader)
- num_steps_per_epoch = max(len_dataloader // self.config.gradient_accumulation_steps, 1)
- num_examples = len(self.dataset)
- num_train_epochs = self.training_args.num_train_epochs
- max_steps = math.ceil(num_train_epochs * num_steps_per_epoch)
-
- if self.is_world_process_zero():
- logger.info("***** Running training *****")
- logger.info(f" Num examples = {num_examples}")
- logger.info(f" Num Epochs = {num_train_epochs}")
- logger.info(f" Instantaneous batch size per device = {self.config.batch_size}")
- logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}")
- logger.info(f" Gradient Accumulation steps = {self.config.gradient_accumulation_steps}")
- logger.info(f" Total optimization steps = {max_steps}")
- logger.info(f" Number of trainable parameters = {sum(p.numel() for p in self.model.parameters() if p.requires_grad)}")
-
- # Keyword arguments for `model.generate`
- gen_kwargs = {
- "top_k": 0.0,
- "top_p": 1.0,
- "do_sample": True,
- "pad_token_id": self.tokenizer.pad_token_id,
- "eos_token_id": self.tokenizer.eos_token_id,
- "logits_processor": get_logits_processor()
- }
- output_length_sampler = LengthSampler(max_target_length // 2, max_target_length)
- unwrapped_model = self.accelerator.unwrap_model(self.model)
-
- dataiter = iter(self.dataloader)
- steps_trained = 0
- loss_meter = AverageMeter()
- reward_meter = AverageMeter()
-
- for step in tqdm(range(max_steps)):
-
- for _ in range(self.config.gradient_accumulation_steps):
-
- batch = next(dataiter)
- steps_trained += 1
- queries = batch["input_ids"] # left-padded sequences
-
- unwrapped_model.gradient_checkpointing_disable()
- unwrapped_model.config.use_cache = True
-
- # Get response from ChatGLM
- responses_with_queries = self.generate(queries, length_sampler=output_length_sampler, **gen_kwargs)
- responses = responses_with_queries[:, queries.size(1):].clone().detach() # right-padded sequences (remember to clone!!!)
- # batch["response"] = tokenizer.batch_decode(responses, skip_special_tokens=True) # comment to avoid decode error
-
- for i in range(responses_with_queries.size(0)): # change to right-padding
- start = (responses_with_queries[i] != self.tokenizer.pad_token_id).nonzero()[0].item()
- responses_with_queries[i] = torch.cat((responses_with_queries[i][start:], responses_with_queries[i][:start]))
-
- # Compute rewards
- rewards = compute_rewards(responses_with_queries, unwrapped_model, self.tokenizer)
-
- # Run PPO step
- unwrapped_model.gradient_checkpointing_enable()
- unwrapped_model.config.use_cache = False
-
- split_into_list = lambda x: [x[i] for i in range(x.size(0))]
- stats = self.step(*map(split_into_list, [queries, responses, rewards]))
-
- loss_meter.update(stats["ppo/loss/total"])
- reward_meter.update(rewards.sum().item(), n=rewards.size(0))
-
- if steps_trained == len_dataloader:
- dataiter = iter(self.dataloader)
- steps_trained = 0
-
- if self.is_world_process_zero() and (step+1) % self.training_args.logging_steps == 0:
- logs = {
- "loss": round(loss_meter.avg, 4),
- "reward": round(reward_meter.avg, 4),
- "learning_rate": stats["ppo/learning_rate"],
- "epoch": round(step / num_steps_per_epoch, 2)
- }
- print(logs)
- logs["step"] = step
- self.state["log_history"].append(logs)
- loss_meter.reset()
- reward_meter.reset()
-
- if (step+1) % self.training_args.save_steps == 0: # save checkpoint
- self.save_model(os.path.join(self.training_args.output_dir, f"checkpoint-{step+1}"))
-
- def is_world_process_zero(self) -> bool:
- r"""
- Whether or not this process is the global main process (when training in a distributed fashion on several
- machines, this is only going to be `True` for one process).
- """
- return self.training_args.process_index == 0
-
- def save_state(self, output_dir: Optional[str] = None) -> None:
- r"""
- Saves trainer state.
- """
- if not self.is_world_process_zero():
- return
-
- output_dir = output_dir if output_dir is not None else self.training_args.output_dir
- os.makedirs(output_dir, exist_ok=True)
- json.dump(self.state, open(os.path.join(output_dir, TRAINER_STATE_NAME), "w", encoding="utf-8", newline="\n"), indent=2)
-
- def save_model(self, output_dir: Optional[str] = None) -> None:
- r"""
- Saves trainable parameters as model checkpoints. We use `self.model.pretrained_model` to refer to the backbone model.
-
- Override to inject custom behavior.
- """
- if not self.is_world_process_zero():
- return
-
- output_dir = output_dir if output_dir is not None else self.training_args.output_dir
- os.makedirs(output_dir, exist_ok=True)
- logger.info(f"Saving model checkpoint to {output_dir}")
-
- unwrapped_model = self.accelerator.unwrap_model(self.model)
-
- if hasattr(unwrapped_model.pretrained_model, "peft_config"): # peft methods
- unwrapped_model.pretrained_model.save_pretrained(output_dir) # save lora weights
- else: # non-peft methods
- save_trainable_params(output_dir, unwrapped_model.pretrained_model)
-
- if hasattr(unwrapped_model, "v_head"):
- save_valuehead_params(output_dir, unwrapped_model.v_head) # save valuehead weights
-
- torch.save(self.training_args, os.path.join(output_dir, TRAINING_ARGS_NAME))
- torch.save(self.finetuning_args, os.path.join(output_dir, FINETUNING_ARGS_NAME))
diff --git a/spaces/bigjoker/stable-diffusion-webui/javascript/imageParams.js b/spaces/bigjoker/stable-diffusion-webui/javascript/imageParams.js
deleted file mode 100644
index 67404a89ba6084a065ab5ac188e01ed29952113b..0000000000000000000000000000000000000000
--- a/spaces/bigjoker/stable-diffusion-webui/javascript/imageParams.js
+++ /dev/null
@@ -1,19 +0,0 @@
-window.onload = (function(){
- window.addEventListener('drop', e => {
- const target = e.composedPath()[0];
- const idx = selected_gallery_index();
- if (target.placeholder.indexOf("Prompt") == -1) return;
-
- let prompt_target = get_tab_index('tabs') == 1 ? "img2img_prompt_image" : "txt2img_prompt_image";
-
- e.stopPropagation();
- e.preventDefault();
- const imgParent = gradioApp().getElementById(prompt_target);
- const files = e.dataTransfer.files;
- const fileInput = imgParent.querySelector('input[type="file"]');
- if ( fileInput ) {
- fileInput.files = files;
- fileInput.dispatchEvent(new Event('change'));
- }
- });
-});
diff --git a/spaces/bioriAsaeru/text-to-voice/Angry Birds Pocket Pc 2021 Download.md b/spaces/bioriAsaeru/text-to-voice/Angry Birds Pocket Pc 2021 Download.md
deleted file mode 100644
index 19d1e640f1d463c47995f7047bee6b4a23283301..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/Angry Birds Pocket Pc 2021 Download.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
The initial iOS version of the game included a single episode entitled "Poached Eggs", which contained three themed chapters, each with 21 levels. From time to time, Rovio has released free upgrades that include additional content, such as new levels, new in-game objects and even new birds. As updates have been released, they have been incorporated into the game's full version offered for download from each platform's application store.[29]
-
The birds really aren't happy. In fact, they're so angry that they present themselves to the player as kamikaze fighters with a red-hot vendetta. In total there are 120 levels of play - plenty of time to fire the Angry Birds by slingshot at the green pigs.
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/bioriAsaeru/text-to-voice/Download Windows 7 Extreme Edition R1 32bit .iso No Components Removed Fully Activated and Ready to Use.md b/spaces/bioriAsaeru/text-to-voice/Download Windows 7 Extreme Edition R1 32bit .iso No Components Removed Fully Activated and Ready to Use.md
deleted file mode 100644
index 35fefb362b38636c471abf0289294ef28908a981..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/Download Windows 7 Extreme Edition R1 32bit .iso No Components Removed Fully Activated and Ready to Use.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
We hope that you are satisfied with the information we have delivered to you. It is really important to realize that Windows 7 Black Edition ISO brings a lot of opportunities for the users running all the applications on the PC. Also, it is extremely handy and easy to function. Install the Windows 7 Black Edition ISO on your operating system as it is completely secure and error-free. If you have any queries regarding the download of Windows 7 Black Edition ISO you can drop down your concern in the comment section and we will get back with the solution in minimum time.
-
YOU CAN GET THE WINDOWS 8 KEY, IN FOLLOWING WAYS: When you purchase windows 8, you get thekey in the box of the CD/DVD, If you are purchasing the windows online, then you will get the keys in your email. you can also download and install the windows key finder.
-
\ No newline at end of file
diff --git a/spaces/bulentsofttech/gradio_s1000_veri_toplama_modeli/yolov5/models/experimental.py b/spaces/bulentsofttech/gradio_s1000_veri_toplama_modeli/yolov5/models/experimental.py
deleted file mode 100644
index db8e5b8e1dfd6389b6b1cefa05862d9cdd1150c5..0000000000000000000000000000000000000000
--- a/spaces/bulentsofttech/gradio_s1000_veri_toplama_modeli/yolov5/models/experimental.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
-"""
-Experimental modules
-"""
-import math
-
-import numpy as np
-import torch
-import torch.nn as nn
-
-from models.common import Conv
-from utils.downloads import attempt_download
-
-
-class Sum(nn.Module):
- # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
- def __init__(self, n, weight=False): # n: number of inputs
- super().__init__()
- self.weight = weight # apply weights boolean
- self.iter = range(n - 1) # iter object
- if weight:
- self.w = nn.Parameter(-torch.arange(1.0, n) / 2, requires_grad=True) # layer weights
-
- def forward(self, x):
- y = x[0] # no weight
- if self.weight:
- w = torch.sigmoid(self.w) * 2
- for i in self.iter:
- y = y + x[i + 1] * w[i]
- else:
- for i in self.iter:
- y = y + x[i + 1]
- return y
-
-
-class MixConv2d(nn.Module):
- # Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595
- def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): # ch_in, ch_out, kernel, stride, ch_strategy
- super().__init__()
- n = len(k) # number of convolutions
- if equal_ch: # equal c_ per group
- i = torch.linspace(0, n - 1E-6, c2).floor() # c2 indices
- c_ = [(i == g).sum() for g in range(n)] # intermediate channels
- else: # equal weight.numel() per group
- b = [c2] + [0] * n
- a = np.eye(n + 1, n, k=-1)
- a -= np.roll(a, 1, axis=1)
- a *= np.array(k) ** 2
- a[0] = 1
- c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b
-
- self.m = nn.ModuleList([
- nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)])
- self.bn = nn.BatchNorm2d(c2)
- self.act = nn.SiLU()
-
- def forward(self, x):
- return self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
-
-
-class Ensemble(nn.ModuleList):
- # Ensemble of models
- def __init__(self):
- super().__init__()
-
- def forward(self, x, augment=False, profile=False, visualize=False):
- y = [module(x, augment, profile, visualize)[0] for module in self]
- # y = torch.stack(y).max(0)[0] # max ensemble
- # y = torch.stack(y).mean(0) # mean ensemble
- y = torch.cat(y, 1) # nms ensemble
- return y, None # inference, train output
-
-
-def attempt_load(weights, device=None, inplace=True, fuse=True):
- # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
- from models.yolo import Detect, Model
-
- model = Ensemble()
- for w in weights if isinstance(weights, list) else [weights]:
- ckpt = torch.load(attempt_download(w), map_location='cpu') # load
- ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model
- model.append(ckpt.fuse().eval() if fuse else ckpt.eval()) # fused or un-fused model in eval mode
-
- # Compatibility updates
- for m in model.modules():
- t = type(m)
- if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model):
- m.inplace = inplace # torch 1.7.0 compatibility
- if t is Detect and not isinstance(m.anchor_grid, list):
- delattr(m, 'anchor_grid')
- setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl)
- elif t is Conv:
- m._non_persistent_buffers_set = set() # torch 1.6.0 compatibility
- elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'):
- m.recompute_scale_factor = None # torch 1.11.0 compatibility
-
- if len(model) == 1:
- return model[-1] # return model
- print(f'Ensemble created with {weights}\n')
- for k in 'names', 'nc', 'yaml':
- setattr(model, k, getattr(model[0], k))
- model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride
- assert all(model[0].nc == m.nc for m in model), f'Models have different class counts: {[m.nc for m in model]}'
- return model # return ensemble
diff --git a/spaces/camenduru-com/sl/Dockerfile b/spaces/camenduru-com/sl/Dockerfile
deleted file mode 100644
index 70441e13d19ecaef382fe0abc3b0e98e40128a99..0000000000000000000000000000000000000000
--- a/spaces/camenduru-com/sl/Dockerfile
+++ /dev/null
@@ -1,9 +0,0 @@
-FROM alpine
-WORKDIR /app
-ADD app.py ./
-RUN apk update && \
- apk add --no-cache python3 py-pip py-flask py3-gunicorn git; \
- pip --no-cache-dir install \
- git+https://github.com/vstavrinov/streamlink.git; \
- apk del git
-CMD gunicorn --bind 0.0.0.0:7860 app:app
\ No newline at end of file
diff --git a/spaces/captainChan/CaptainChan/modules/model_vision.py b/spaces/captainChan/CaptainChan/modules/model_vision.py
deleted file mode 100644
index a76a2c9b0af87b48a604dfae64532a70a049362a..0000000000000000000000000000000000000000
--- a/spaces/captainChan/CaptainChan/modules/model_vision.py
+++ /dev/null
@@ -1,151 +0,0 @@
-import logging
-import torch.nn as nn
-from fastai.vision import *
-
-from modules.attention import *
-from modules.backbone import ResTranformer
-from modules.model import Model
-from modules.resnet import resnet45
-
-
-class BaseVision(Model):
- def __init__(self, config):
- super().__init__(config)
- self.loss_weight = ifnone(config.model_vision_loss_weight, 1.0)
- self.out_channels = ifnone(config.model_vision_d_model, 512)
-
- if config.model_vision_backbone == 'transformer':
- self.backbone = ResTranformer(config)
- else: self.backbone = resnet45()
-
- if config.model_vision_attention == 'position':
- mode = ifnone(config.model_vision_attention_mode, 'nearest')
- self.attention = PositionAttention(
- in_channels=self.out_channels,
- max_length=config.dataset_max_length + 1, # additional stop token
- mode=mode,
- )
- elif config.model_vision_attention == 'attention':
- self.attention = Attention(
- in_channels=self.out_channels,
- max_length=config.dataset_max_length + 1, # additional stop token
- n_feature=8*32,
- )
- else:
- raise Exception(f'{config.model_vision_attention} is not valid.')
- self.cls = nn.Linear(self.out_channels, self.charset.num_classes)
-
- if config.model_vision_checkpoint is not None:
- logging.info(f'Read vision model from {config.model_vision_checkpoint}.')
- self.load(config.model_vision_checkpoint)
-
- def _forward(self, b_features):
- attn_vecs, attn_scores = self.attention(b_features) # (N, T, E), (N, T, H, W)
- logits = self.cls(attn_vecs) # (N, T, C)
- pt_lengths = self._get_length(logits)
-
- return {'feature': attn_vecs, 'logits': logits, 'pt_lengths': pt_lengths,
- 'attn_scores': attn_scores, 'loss_weight':self.loss_weight, 'name': 'vision', 'b_features':b_features}
-
- def forward(self, images, *args, **kwargs):
- features = self.backbone(images, **kwargs) # (N, E, H, W)
- return self._forward(features)
-
-
-class BaseIterVision(BaseVision):
- def __init__(self, config):
- super().__init__(config)
- assert config.model_vision_backbone == 'transformer'
- self.iter_size = ifnone(config.model_vision_iter_size, 1)
- self.share_weights = ifnone(config.model_vision_share_weights, False)
- self.share_cnns = ifnone(config.model_vision_share_cnns, False)
- self.add_transformer = ifnone(config.model_vision_add_transformer, False)
- self.simple_trans = ifnone(config.model_vision_simple_trans, False)
- self.deep_supervision = ifnone(config.model_vision_deep_supervision, True)
- self.backbones = nn.ModuleList()
- self.trans = nn.ModuleList()
- for i in range(self.iter_size-1):
- B = None if self.share_weights else ResTranformer(config)
- if self.share_cnns:
- del B.resnet
- self.backbones.append(B)
- output_channel = self.out_channels
- if self.add_transformer:
- self.split_sizes = [output_channel]
- elif self.simple_trans:
- # self.split_sizes=[output_channel//16] + [0] * 5
- # self.split_sizes= [output_channel//16, output_channel//16, output_channel//8, output_channel//4, output_channel//2] + [0]
- self.split_sizes= [output_channel//16, output_channel//16, 0, output_channel//4, output_channel//2, output_channel]
- else:
- self.split_sizes=[output_channel//16, output_channel//16, output_channel//8, output_channel//4, output_channel//2, output_channel]
- self.trans.append(nn.Conv2d(output_channel, sum(self.split_sizes), 1))
- torch.nn.init.zeros_(self.trans[-1].weight)
-
- if config.model_vision_checkpoint is not None:
- logging.info(f'Read vision model from {config.model_vision_checkpoint}.')
- self.load(config.model_vision_checkpoint)
- cb_init = ifnone(config.model_vision_cb_init, True)
- if cb_init:
- self.cb_init()
-
- def load(self, source, device=None, strict=False):
- state = torch.load(source, map_location=device)
- msg = self.load_state_dict(state['model'], strict=strict)
- print(msg)
-
- def cb_init(self):
- model_state_dict = self.backbone.state_dict()
-
- for m in self.backbones:
- if m:
- print('cb_init')
- msg = m.load_state_dict(model_state_dict, strict=False)
- print(msg)
-
- def forward_test(self, images, *args):
- l_feats = self.backbone.resnet(images)
- b_feats = self.backbone.forward_transformer(l_feats)
- cnt = len(self.backbones)
- if cnt == 0:
- v_res = super()._forward(b_feats)
- for B,T in zip(self.backbones, self.trans):
- cnt -= 1
- extra_feats = T(b_feats).split(self.split_sizes, dim=1)
- if self.share_weights:
- v_res = super().forward(images, extra_feats=extra_feats)
- else:
- if self.add_transformer:
- if not self.share_cnns:
- l_feats = B.resnet(images)
- b_feats = B.forward_transformer(extra_feats[-1] + l_feats)
- else:
- b_feats = B(images, extra_feats=extra_feats)
- v_res = super()._forward(b_feats) if cnt==0 else None
- return v_res
-
- def forward_train(self, images, *args):
- l_feats = self.backbone.resnet(images)
- b_feats = self.backbone.forward_transformer(l_feats)
- v_res = super()._forward(b_feats)
- # v_res = super().forward(images)
- all_v_res = [v_res]
- for B,T in zip(self.backbones, self.trans):
- extra_feats = T(v_res['b_features']).split(self.split_sizes, dim=1)
- if self.share_weights:
- v_res = super().forward(images, extra_feats=extra_feats)
- else:
- if self.add_transformer:
- if not self.share_cnns:
- l_feats = B.resnet(images)
- b_feats = B.forward_transformer(extra_feats[-1] + l_feats)
- else:
- b_feats = B(images, extra_feats=extra_feats)
- v_res = super()._forward(b_feats)
- all_v_res.append(v_res)
- return all_v_res
-
- def forward(self, images, *args):
- if self.training and self.deep_supervision:
- return self.forward_train(images, *args)
- else:
- return self.forward_test(images, *args)
\ No newline at end of file
diff --git a/spaces/charbaaz356/Chat-GPT-LangChain-R/azure_utils.py b/spaces/charbaaz356/Chat-GPT-LangChain-R/azure_utils.py
deleted file mode 100644
index 4173eaa689abe9b7b6b66ed3fcf1ede591655a53..0000000000000000000000000000000000000000
--- a/spaces/charbaaz356/Chat-GPT-LangChain-R/azure_utils.py
+++ /dev/null
@@ -1,155 +0,0 @@
-# This class stores Azure voice data. Specifically, the class stores several records containing
-# language, lang_code, gender, voice_id and engine. The class also has a method to return the
-# voice_id, lang_code and engine given a language and gender.
-
-NEURAL_ENGINE = "neural"
-STANDARD_ENGINE = "standard"
-
-
-class AzureVoiceData:
- def get_voice(self, language, gender):
- for voice in self.voice_data:
- if voice['language'] == language and voice['gender'] == gender:
- return voice['azure_voice']
- return None
-
- def __init__(self):
- self.voice_data = [
- {'language': 'Arabic',
- 'azure_voice': 'ar-EG-ShakirNeural',
- 'gender': 'Male'},
- {'language': 'Arabic (Gulf)',
- 'azure_voice': 'ar-KW-FahedNeural',
- 'gender': 'Male'},
- {'language': 'Catalan',
- 'azure_voice': 'ca-ES-EnricNeural',
- 'gender': 'Male'},
- {'language': 'Chinese (Cantonese)',
- 'azure_voice': 'yue-CN-YunSongNeural',
- 'gender': 'Male'},
- {'language': 'Chinese (Mandarin)',
- 'azure_voice': 'zh-CN-YunxiNeural',
- 'gender': 'Male'},
- {'language': 'Danish',
- 'azure_voice': 'da-DK-JeppeNeural',
- 'gender': 'Male'},
- {'language': 'Dutch',
- 'azure_voice': 'nl-NL-MaartenNeural',
- 'gender': 'Male'},
- {'language': 'English (Australian)',
- 'azure_voice': 'en-AU-KenNeural',
- 'gender': 'Male'},
- {'language': 'English (British)',
- 'azure_voice': 'en-GB-RyanNeural',
- 'gender': 'Male'},
- {'language': 'English (Indian)',
- 'azure_voice': 'en-IN-PrabhatNeural',
- 'gender': 'Male'},
- {'language': 'English (New Zealand)',
- 'azure_voice': 'en-NZ-MitchellNeural',
- 'gender': 'Male'},
- {'language': 'English (South African)',
- 'azure_voice': 'en-ZA-LukeNeural',
- 'gender': 'Male'},
- {'language': 'English (US)',
- 'azure_voice': 'en-US-ChristopherNeural',
- 'gender': 'Male'},
- {'language': 'English (Welsh)',
- 'azure_voice': 'cy-GB-AledNeural',
- 'gender': 'Male'},
- {'language': 'Finnish',
- 'azure_voice': 'fi-FI-HarriNeural',
- 'gender': 'Male'},
- {'language': 'French',
- 'azure_voice': 'fr-FR-HenriNeural',
- 'gender': 'Male'},
- {'language': 'French (Canadian)',
- 'azure_voice': 'fr-CA-AntoineNeural',
- 'gender': 'Male'},
- {'language': 'German',
- 'azure_voice': 'de-DE-KlausNeural',
- 'gender': 'Male'},
- {'language': 'German (Austrian)',
- 'azure_voice': 'de-AT-JonasNeural',
- 'gender': 'Male'},
- {'language': 'Hindi',
- 'azure_voice': 'hi-IN-MadhurNeural',
- 'gender': 'Male'},
- {'language': 'Icelandic',
- 'azure_voice': 'is-IS-GunnarNeural',
- 'gender': 'Male'},
- {'language': 'Italian',
- 'azure_voice': 'it-IT-GianniNeural',
- 'gender': 'Male'},
- {'language': 'Japanese',
- 'azure_voice': 'ja-JP-KeitaNeural',
- 'gender': 'Male'},
- {'language': 'Korean',
- 'azure_voice': 'ko-KR-GookMinNeural',
- 'gender': 'Male'},
- {'language': 'Norwegian',
- 'azure_voice': 'nb-NO-FinnNeural',
- 'gender': 'Male'},
- {'language': 'Polish',
- 'azure_voice': 'pl-PL-MarekNeural',
- 'gender': 'Male'},
- {'language': 'Portuguese (Brazilian)',
- 'azure_voice': 'pt-BR-NicolauNeural',
- 'gender': 'Male'},
- {'language': 'Portuguese (European)',
- 'azure_voice': 'pt-PT-DuarteNeural',
- 'gender': 'Male'},
- {'language': 'Romanian',
- 'azure_voice': 'ro-RO-EmilNeural',
- 'gender': 'Male'},
- {'language': 'Russian',
- 'azure_voice': 'ru-RU-DmitryNeural',
- 'gender': 'Male'},
- {'language': 'Spanish (European)',
- 'azure_voice': 'es-ES-TeoNeural',
- 'gender': 'Male'},
- {'language': 'Spanish (Mexican)',
- 'azure_voice': 'es-MX-LibertoNeural',
- 'gender': 'Male'},
- {'language': 'Spanish (US)',
- 'azure_voice': 'es-US-AlonsoNeural"',
- 'gender': 'Male'},
- {'language': 'Swedish',
- 'azure_voice': 'sv-SE-MattiasNeural',
- 'gender': 'Male'},
- {'language': 'Turkish',
- 'azure_voice': 'tr-TR-AhmetNeural',
- 'gender': 'Male'},
- {'language': 'Welsh',
- 'azure_voice': 'cy-GB-AledNeural',
- 'gender': 'Male'},
- ]
-
-
-# Run from the command-line
-if __name__ == '__main__':
- azure_voice_data = AzureVoiceData()
-
- azure_voice = azure_voice_data.get_voice('English (US)', 'Male')
- print('English (US)', 'Male', azure_voice)
-
- azure_voice = azure_voice_data.get_voice('English (US)', 'Female')
- print('English (US)', 'Female', azure_voice)
-
- azure_voice = azure_voice_data.get_voice('French', 'Female')
- print('French', 'Female', azure_voice)
-
- azure_voice = azure_voice_data.get_voice('French', 'Male')
- print('French', 'Male', azure_voice)
-
- azure_voice = azure_voice_data.get_voice('Japanese', 'Female')
- print('Japanese', 'Female', azure_voice)
-
- azure_voice = azure_voice_data.get_voice('Japanese', 'Male')
- print('Japanese', 'Male', azure_voice)
-
- azure_voice = azure_voice_data.get_voice('Hindi', 'Female')
- print('Hindi', 'Female', azure_voice)
-
- azure_voice = azure_voice_data.get_voice('Hindi', 'Male')
- print('Hindi', 'Male', azure_voice)
diff --git a/spaces/chendl/compositional_test/multimodal/YOLOX/yolox/data/datasets/voc.py b/spaces/chendl/compositional_test/multimodal/YOLOX/yolox/data/datasets/voc.py
deleted file mode 100644
index bdacd80191bc50b92185b73c97a68d792041feaa..0000000000000000000000000000000000000000
--- a/spaces/chendl/compositional_test/multimodal/YOLOX/yolox/data/datasets/voc.py
+++ /dev/null
@@ -1,331 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding:utf-8 -*-
-# Code are based on
-# https://github.com/fmassa/vision/blob/voc_dataset/torchvision/datasets/voc.py
-# Copyright (c) Francisco Massa.
-# Copyright (c) Ellis Brown, Max deGroot.
-# Copyright (c) Megvii, Inc. and its affiliates.
-
-import os
-import os.path
-import pickle
-import xml.etree.ElementTree as ET
-
-import cv2
-import numpy as np
-
-from yolox.evaluators.voc_eval import voc_eval
-
-from .datasets_wrapper import CacheDataset, cache_read_img
-from .voc_classes import VOC_CLASSES
-
-
-class AnnotationTransform(object):
-
- """Transforms a VOC annotation into a Tensor of bbox coords and label index
- Initilized with a dictionary lookup of classnames to indexes
-
- Arguments:
- class_to_ind (dict, optional): dictionary lookup of classnames -> indexes
- (default: alphabetic indexing of VOC's 20 classes)
- keep_difficult (bool, optional): keep difficult instances or not
- (default: False)
- height (int): height
- width (int): width
- """
-
- def __init__(self, class_to_ind=None, keep_difficult=True):
- self.class_to_ind = class_to_ind or dict(
- zip(VOC_CLASSES, range(len(VOC_CLASSES)))
- )
- self.keep_difficult = keep_difficult
-
- def __call__(self, target):
- """
- Arguments:
- target (annotation) : the target annotation to be made usable
- will be an ET.Element
- Returns:
- a list containing lists of bounding boxes [bbox coords, class name]
- """
- res = np.empty((0, 5))
- for obj in target.iter("object"):
- difficult = obj.find("difficult")
- if difficult is not None:
- difficult = int(difficult.text) == 1
- else:
- difficult = False
- if not self.keep_difficult and difficult:
- continue
- name = obj.find("name").text.strip()
- bbox = obj.find("bndbox")
-
- pts = ["xmin", "ymin", "xmax", "ymax"]
- bndbox = []
- for i, pt in enumerate(pts):
- cur_pt = int(float(bbox.find(pt).text)) - 1
- # scale height or width
- # cur_pt = cur_pt / width if i % 2 == 0 else cur_pt / height
- bndbox.append(cur_pt)
- label_idx = self.class_to_ind[name]
- bndbox.append(label_idx)
- res = np.vstack((res, bndbox)) # [xmin, ymin, xmax, ymax, label_ind]
- # img_id = target.find('filename').text[:-4]
-
- width = int(target.find("size").find("width").text)
- height = int(target.find("size").find("height").text)
- img_info = (height, width)
-
- return res, img_info
-
-
-class VOCDetection(CacheDataset):
-
- """
- VOC Detection Dataset Object
-
- input is image, target is annotation
-
- Args:
- root (string): filepath to VOCdevkit folder.
- image_set (string): imageset to use (eg. 'train', 'val', 'test')
- transform (callable, optional): transformation to perform on the
- input image
- target_transform (callable, optional): transformation to perform on the
- target `annotation`
- (eg: take in caption string, return tensor of word indices)
- dataset_name (string, optional): which dataset to load
- (default: 'VOC2007')
- """
-
- def __init__(
- self,
- data_dir,
- image_sets=[("2007", "trainval"), ("2012", "trainval")],
- img_size=(416, 416),
- preproc=None,
- target_transform=AnnotationTransform(),
- dataset_name="VOC0712",
- cache=False,
- cache_type="ram",
- ):
- self.root = data_dir
- self.image_set = image_sets
- self.img_size = img_size
- self.preproc = preproc
- self.target_transform = target_transform
- self.name = dataset_name
- self._annopath = os.path.join("%s", "Annotations", "%s.xml")
- self._imgpath = os.path.join("%s", "JPEGImages", "%s.jpg")
- self._classes = VOC_CLASSES
- self.cats = [
- {"id": idx, "name": val} for idx, val in enumerate(VOC_CLASSES)
- ]
- self.class_ids = list(range(len(VOC_CLASSES)))
- self.ids = list()
- for (year, name) in image_sets:
- self._year = year
- rootpath = os.path.join(self.root, "VOC" + year)
- for line in open(
- os.path.join(rootpath, "ImageSets", "Main", name + ".txt")
- ):
- self.ids.append((rootpath, line.strip()))
- self.num_imgs = len(self.ids)
-
- self.annotations = self._load_coco_annotations()
-
- path_filename = [
- (self._imgpath % self.ids[i]).split(self.root + "/")[1]
- for i in range(self.num_imgs)
- ]
- super().__init__(
- input_dimension=img_size,
- num_imgs=self.num_imgs,
- data_dir=self.root,
- cache_dir_name=f"cache_{self.name}",
- path_filename=path_filename,
- cache=cache,
- cache_type=cache_type
- )
-
- def __len__(self):
- return self.num_imgs
-
- def _load_coco_annotations(self):
- return [self.load_anno_from_ids(_ids) for _ids in range(self.num_imgs)]
-
- def load_anno_from_ids(self, index):
- img_id = self.ids[index]
- target = ET.parse(self._annopath % img_id).getroot()
-
- assert self.target_transform is not None
- res, img_info = self.target_transform(target)
- height, width = img_info
-
- r = min(self.img_size[0] / height, self.img_size[1] / width)
- res[:, :4] *= r
- resized_info = (int(height * r), int(width * r))
-
- return (res, img_info, resized_info)
-
- def load_anno(self, index):
- return self.annotations[index][0]
-
- def load_resized_img(self, index):
- img = self.load_image(index)
- r = min(self.img_size[0] / img.shape[0], self.img_size[1] / img.shape[1])
- resized_img = cv2.resize(
- img,
- (int(img.shape[1] * r), int(img.shape[0] * r)),
- interpolation=cv2.INTER_LINEAR,
- ).astype(np.uint8)
-
- return resized_img
-
- def load_image(self, index):
- img_id = self.ids[index]
- img = cv2.imread(self._imgpath % img_id, cv2.IMREAD_COLOR)
- assert img is not None, f"file named {self._imgpath % img_id} not found"
-
- return img
-
- @cache_read_img(use_cache=True)
- def read_img(self, index):
- return self.load_resized_img(index)
-
- def pull_item(self, index):
- """Returns the original image and target at an index for mixup
-
- Note: not using self.__getitem__(), as any transformations passed in
- could mess up this functionality.
-
- Argument:
- index (int): index of img to show
- Return:
- img, target
- """
- target, img_info, _ = self.annotations[index]
- img = self.read_img(index)
-
- return img, target, img_info, index
-
- @CacheDataset.mosaic_getitem
- def __getitem__(self, index):
- img, target, img_info, img_id = self.pull_item(index)
-
- if self.preproc is not None:
- img, target = self.preproc(img, target, self.input_dim)
-
- return img, target, img_info, img_id
-
- def evaluate_detections(self, all_boxes, output_dir=None):
- """
- all_boxes is a list of length number-of-classes.
- Each list element is a list of length number-of-images.
- Each of those list elements is either an empty list []
- or a numpy array of detection.
-
- all_boxes[class][image] = [] or np.array of shape #dets x 5
- """
- self._write_voc_results_file(all_boxes)
- IouTh = np.linspace(
- 0.5, 0.95, int(np.round((0.95 - 0.5) / 0.05)) + 1, endpoint=True
- )
- mAPs = []
- for iou in IouTh:
- mAP = self._do_python_eval(output_dir, iou)
- mAPs.append(mAP)
-
- print("--------------------------------------------------------------")
- print("map_5095:", np.mean(mAPs))
- print("map_50:", mAPs[0])
- print("--------------------------------------------------------------")
- return np.mean(mAPs), mAPs[0]
-
- def _get_voc_results_file_template(self):
- filename = "comp4_det_test" + "_{:s}.txt"
- filedir = os.path.join(self.root, "results", "VOC" + self._year, "Main")
- if not os.path.exists(filedir):
- os.makedirs(filedir)
- path = os.path.join(filedir, filename)
- return path
-
- def _write_voc_results_file(self, all_boxes):
- for cls_ind, cls in enumerate(VOC_CLASSES):
- cls_ind = cls_ind
- if cls == "__background__":
- continue
- print("Writing {} VOC results file".format(cls))
- filename = self._get_voc_results_file_template().format(cls)
- with open(filename, "wt") as f:
- for im_ind, index in enumerate(self.ids):
- index = index[1]
- dets = all_boxes[cls_ind][im_ind]
- if dets == []:
- continue
- for k in range(dets.shape[0]):
- f.write(
- "{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n".format(
- index,
- dets[k, -1],
- dets[k, 0] + 1,
- dets[k, 1] + 1,
- dets[k, 2] + 1,
- dets[k, 3] + 1,
- )
- )
-
- def _do_python_eval(self, output_dir="output", iou=0.5):
- rootpath = os.path.join(self.root, "VOC" + self._year)
- name = self.image_set[0][1]
- annopath = os.path.join(rootpath, "Annotations", "{:s}.xml")
- imagesetfile = os.path.join(rootpath, "ImageSets", "Main", name + ".txt")
- cachedir = os.path.join(
- self.root, "annotations_cache", "VOC" + self._year, name
- )
- if not os.path.exists(cachedir):
- os.makedirs(cachedir)
- aps = []
- # The PASCAL VOC metric changed in 2010
- use_07_metric = True if int(self._year) < 2010 else False
- print("Eval IoU : {:.2f}".format(iou))
- if output_dir is not None and not os.path.isdir(output_dir):
- os.mkdir(output_dir)
- for i, cls in enumerate(VOC_CLASSES):
-
- if cls == "__background__":
- continue
-
- filename = self._get_voc_results_file_template().format(cls)
- rec, prec, ap = voc_eval(
- filename,
- annopath,
- imagesetfile,
- cls,
- cachedir,
- ovthresh=iou,
- use_07_metric=use_07_metric,
- )
- aps += [ap]
- if iou == 0.5:
- print("AP for {} = {:.4f}".format(cls, ap))
- if output_dir is not None:
- with open(os.path.join(output_dir, cls + "_pr.pkl"), "wb") as f:
- pickle.dump({"rec": rec, "prec": prec, "ap": ap}, f)
- if iou == 0.5:
- print("Mean AP = {:.4f}".format(np.mean(aps)))
- print("~~~~~~~~")
- print("Results:")
- for ap in aps:
- print("{:.3f}".format(ap))
- print("{:.3f}".format(np.mean(aps)))
- print("~~~~~~~~")
- print("")
- print("--------------------------------------------------------------")
- print("Results computed with the **unofficial** Python eval code.")
- print("Results should be very close to the official MATLAB eval code.")
- print("Recompute with `./tools/reval.py --matlab ...` for your paper.")
- print("-- Thanks, The Management")
- print("--------------------------------------------------------------")
-
- return np.mean(aps)
diff --git a/spaces/chendl/compositional_test/transformers/examples/pytorch/image-classification/README.md b/spaces/chendl/compositional_test/transformers/examples/pytorch/image-classification/README.md
deleted file mode 100644
index 04b4748774ddf78fb727ea24c380f9586f8d920c..0000000000000000000000000000000000000000
--- a/spaces/chendl/compositional_test/transformers/examples/pytorch/image-classification/README.md
+++ /dev/null
@@ -1,211 +0,0 @@
-
-
-# Image classification examples
-
-This directory contains 2 scripts that showcase how to fine-tune any model supported by the [`AutoModelForImageClassification` API](https://huggingface.co/docs/transformers/main/en/model_doc/auto#transformers.AutoModelForImageClassification) (such as [ViT](https://huggingface.co/docs/transformers/main/en/model_doc/vit), [ConvNeXT](https://huggingface.co/docs/transformers/main/en/model_doc/convnext), [ResNet](https://huggingface.co/docs/transformers/main/en/model_doc/resnet), [Swin Transformer](https://huggingface.co/docs/transformers/main/en/model_doc/swin)...) using PyTorch. They can be used to fine-tune models on both [datasets from the hub](#using-datasets-from-hub) as well as on [your own custom data](#using-your-own-data).
-
-
-
-Try out the inference widget here: https://huggingface.co/google/vit-base-patch16-224
-
-Content:
-- [PyTorch version, Trainer](#pytorch-version-trainer)
-- [PyTorch version, no Trainer](#pytorch-version-no-trainer)
-
-## PyTorch version, Trainer
-
-Based on the script [`run_image_classification.py`](https://github.com/huggingface/transformers/blob/main/examples/pytorch/image-classification/run_image_classification.py).
-
-The script leverages the 🤗 [Trainer API](https://huggingface.co/docs/transformers/main_classes/trainer) to automatically take care of the training for you, running on distributed environments right away.
-
-### Using datasets from Hub
-
-Here we show how to fine-tune a Vision Transformer (`ViT`) on the [beans](https://huggingface.co/datasets/beans) dataset, to classify the disease type of bean leaves.
-
-```bash
-python run_image_classification.py \
- --dataset_name beans \
- --output_dir ./beans_outputs/ \
- --remove_unused_columns False \
- --do_train \
- --do_eval \
- --push_to_hub \
- --push_to_hub_model_id vit-base-beans \
- --learning_rate 2e-5 \
- --num_train_epochs 5 \
- --per_device_train_batch_size 8 \
- --per_device_eval_batch_size 8 \
- --logging_strategy steps \
- --logging_steps 10 \
- --evaluation_strategy epoch \
- --save_strategy epoch \
- --load_best_model_at_end True \
- --save_total_limit 3 \
- --seed 1337
-```
-
-👀 See the results here: [nateraw/vit-base-beans](https://huggingface.co/nateraw/vit-base-beans).
-
-Note that you can replace the model and dataset by simply setting the `model_name_or_path` and `dataset_name` arguments respectively, with any model or dataset from the [hub](https://huggingface.co/). For an overview of all possible arguments, we refer to the [docs](https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments) of the `TrainingArguments`, which can be passed as flags.
-
-> If your model classification head dimensions do not fit the number of labels in the dataset, you can specify `--ignore_mismatched_sizes` to adapt it.
-
-### Using your own data
-
-To use your own dataset, there are 2 ways:
-- you can either provide your own folders as `--train_dir` and/or `--validation_dir` arguments
-- you can upload your dataset to the hub (possibly as a private repo, if you prefer so), and simply pass the `--dataset_name` argument.
-
-Below, we explain both in more detail.
-
-#### Provide them as folders
-
-If you provide your own folders with images, the script expects the following directory structure:
-
-```bash
-root/dog/xxx.png
-root/dog/xxy.png
-root/dog/[...]/xxz.png
-
-root/cat/123.png
-root/cat/nsdf3.png
-root/cat/[...]/asd932_.png
-```
-
-In other words, you need to organize your images in subfolders, based on their class. You can then run the script like this:
-
-```bash
-python run_image_classification.py \
- --train_dir \
- --output_dir ./outputs/ \
- --remove_unused_columns False \
- --do_train \
- --do_eval
-```
-
-Internally, the script will use the [`ImageFolder`](https://huggingface.co/docs/datasets/v2.0.0/en/image_process#imagefolder) feature which will automatically turn the folders into 🤗 Dataset objects.
-
-##### 💡 The above will split the train dir into training and evaluation sets
- - To control the split amount, use the `--train_val_split` flag.
- - To provide your own validation split in its own directory, you can pass the `--validation_dir ` flag.
-
-#### Upload your data to the hub, as a (possibly private) repo
-
-It's very easy (and convenient) to upload your image dataset to the hub using the [`ImageFolder`](https://huggingface.co/docs/datasets/v2.0.0/en/image_process#imagefolder) feature available in 🤗 Datasets. Simply do the following:
-
-```python
-from datasets import load_dataset
-
-# example 1: local folder
-dataset = load_dataset("imagefolder", data_dir="path_to_your_folder")
-
-# example 2: local files (suppoted formats are tar, gzip, zip, xz, rar, zstd)
-dataset = load_dataset("imagefolder", data_files="path_to_zip_file")
-
-# example 3: remote files (suppoted formats are tar, gzip, zip, xz, rar, zstd)
-dataset = load_dataset("imagefolder", data_files="https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip")
-
-# example 4: providing several splits
-dataset = load_dataset("imagefolder", data_files={"train": ["path/to/file1", "path/to/file2"], "test": ["path/to/file3", "path/to/file4"]})
-```
-
-`ImageFolder` will create a `label` column, and the label name is based on the directory name.
-
-Next, push it to the hub!
-
-```python
-# assuming you have ran the huggingface-cli login command in a terminal
-dataset.push_to_hub("name_of_your_dataset")
-
-# if you want to push to a private repo, simply pass private=True:
-dataset.push_to_hub("name_of_your_dataset", private=True)
-```
-
-and that's it! You can now train your model by simply setting the `--dataset_name` argument to the name of your dataset on the hub (as explained in [Using datasets from the 🤗 hub](#using-datasets-from-hub)).
-
-More on this can also be found in [this blog post](https://huggingface.co/blog/image-search-datasets).
-
-### Sharing your model on 🤗 Hub
-
-0. If you haven't already, [sign up](https://huggingface.co/join) for a 🤗 account
-
-1. Make sure you have `git-lfs` installed and git set up.
-
-```bash
-$ apt install git-lfs
-$ git config --global user.email "you@example.com"
-$ git config --global user.name "Your Name"
-```
-
-2. Log in with your HuggingFace account credentials using `huggingface-cli`:
-
-```bash
-$ huggingface-cli login
-# ...follow the prompts
-```
-
-3. When running the script, pass the following arguments:
-
-```bash
-python run_image_classification.py \
- --push_to_hub \
- --push_to_hub_model_id \
- ...
-```
-
-## PyTorch version, no Trainer
-
-Based on the script [`run_image_classification_no_trainer.py`](https://github.com/huggingface/transformers/blob/main/examples/pytorch/image-classification/run_image_classification_no_trainer.py).
-
-Like `run_image_classification.py`, this script allows you to fine-tune any of the models on the [hub](https://huggingface.co/models) on an image classification task. The main difference is that this script exposes the bare training loop, to allow you to quickly experiment and add any customization you would like.
-
-It offers less options than the script with `Trainer` (for instance you can easily change the options for the optimizer
-or the dataloaders directly in the script) but still run in a distributed setup, and supports mixed precision by
-the means of the [🤗 `Accelerate`](https://github.com/huggingface/accelerate) library. You can use the script normally
-after installing it:
-
-```bash
-pip install git+https://github.com/huggingface/accelerate
-```
-
-You can then use your usual launchers to run in it in a distributed environment, but the easiest way is to run
-
-```bash
-accelerate config
-```
-
-and reply to the questions asked. Then
-
-```bash
-accelerate test
-```
-
-that will check everything is ready for training. Finally, you can launch training with
-
-```bash
-accelerate launch run_image_classification_trainer.py
-```
-
-This command is the same and will work for:
-
-- single/multiple CPUs
-- single/multiple GPUs
-- TPUs
-
-Note that this library is in alpha release so your feedback is more than welcome if you encounter any problem using it.
-
-Regarding using custom data with this script, we refer to [using your own data](#using-your-own-data).
diff --git a/spaces/chendl/compositional_test/transformers/examples/pytorch/text-classification/run_glue.py b/spaces/chendl/compositional_test/transformers/examples/pytorch/text-classification/run_glue.py
deleted file mode 100644
index c14107d897424c5a81e3ba2a0e8b2b8d1ce14abf..0000000000000000000000000000000000000000
--- a/spaces/chendl/compositional_test/transformers/examples/pytorch/text-classification/run_glue.py
+++ /dev/null
@@ -1,626 +0,0 @@
-#!/usr/bin/env python
-# coding=utf-8
-# Copyright 2020 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-""" Finetuning the library models for sequence classification on GLUE."""
-# You can also adapt this script on your own text classification task. Pointers for this are left as comments.
-
-import logging
-import os
-import random
-import sys
-from dataclasses import dataclass, field
-from typing import Optional
-
-import datasets
-import evaluate
-import numpy as np
-from datasets import load_dataset
-
-import transformers
-from transformers import (
- AutoConfig,
- AutoModelForSequenceClassification,
- AutoTokenizer,
- DataCollatorWithPadding,
- EvalPrediction,
- HfArgumentParser,
- PretrainedConfig,
- Trainer,
- TrainingArguments,
- default_data_collator,
- set_seed,
-)
-from transformers.trainer_utils import get_last_checkpoint
-from transformers.utils import check_min_version, send_example_telemetry
-from transformers.utils.versions import require_version
-
-
-# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
-check_min_version("4.28.0")
-
-require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
-
-task_to_keys = {
- "cola": ("sentence", None),
- "mnli": ("premise", "hypothesis"),
- "mrpc": ("sentence1", "sentence2"),
- "qnli": ("question", "sentence"),
- "qqp": ("question1", "question2"),
- "rte": ("sentence1", "sentence2"),
- "sst2": ("sentence", None),
- "stsb": ("sentence1", "sentence2"),
- "wnli": ("sentence1", "sentence2"),
-}
-
-logger = logging.getLogger(__name__)
-
-
-@dataclass
-class DataTrainingArguments:
- """
- Arguments pertaining to what data we are going to input our model for training and eval.
-
- Using `HfArgumentParser` we can turn this class
- into argparse arguments to be able to specify them on
- the command line.
- """
-
- task_name: Optional[str] = field(
- default=None,
- metadata={"help": "The name of the task to train on: " + ", ".join(task_to_keys.keys())},
- )
- dataset_name: Optional[str] = field(
- default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
- )
- dataset_config_name: Optional[str] = field(
- default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
- )
- max_seq_length: int = field(
- default=128,
- metadata={
- "help": (
- "The maximum total input sequence length after tokenization. Sequences longer "
- "than this will be truncated, sequences shorter will be padded."
- )
- },
- )
- overwrite_cache: bool = field(
- default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
- )
- pad_to_max_length: bool = field(
- default=True,
- metadata={
- "help": (
- "Whether to pad all samples to `max_seq_length`. "
- "If False, will pad the samples dynamically when batching to the maximum length in the batch."
- )
- },
- )
- max_train_samples: Optional[int] = field(
- default=None,
- metadata={
- "help": (
- "For debugging purposes or quicker training, truncate the number of training examples to this "
- "value if set."
- )
- },
- )
- max_eval_samples: Optional[int] = field(
- default=None,
- metadata={
- "help": (
- "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
- "value if set."
- )
- },
- )
- max_predict_samples: Optional[int] = field(
- default=None,
- metadata={
- "help": (
- "For debugging purposes or quicker training, truncate the number of prediction examples to this "
- "value if set."
- )
- },
- )
- train_file: Optional[str] = field(
- default=None, metadata={"help": "A csv or a json file containing the training data."}
- )
- validation_file: Optional[str] = field(
- default=None, metadata={"help": "A csv or a json file containing the validation data."}
- )
- test_file: Optional[str] = field(default=None, metadata={"help": "A csv or a json file containing the test data."})
-
- def __post_init__(self):
- if self.task_name is not None:
- self.task_name = self.task_name.lower()
- if self.task_name not in task_to_keys.keys():
- raise ValueError("Unknown task, you should pick one in " + ",".join(task_to_keys.keys()))
- elif self.dataset_name is not None:
- pass
- elif self.train_file is None or self.validation_file is None:
- raise ValueError("Need either a GLUE task, a training/validation file or a dataset name.")
- else:
- train_extension = self.train_file.split(".")[-1]
- assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
- validation_extension = self.validation_file.split(".")[-1]
- assert (
- validation_extension == train_extension
- ), "`validation_file` should have the same extension (csv or json) as `train_file`."
-
-
-@dataclass
-class ModelArguments:
- """
- Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
- """
-
- model_name_or_path: str = field(
- metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
- )
- config_name: Optional[str] = field(
- default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
- )
- tokenizer_name: Optional[str] = field(
- default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
- )
- cache_dir: Optional[str] = field(
- default=None,
- metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
- )
- use_fast_tokenizer: bool = field(
- default=True,
- metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
- )
- model_revision: str = field(
- default="main",
- metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
- )
- use_auth_token: bool = field(
- default=False,
- metadata={
- "help": (
- "Will use the token generated when running `huggingface-cli login` (necessary to use this script "
- "with private models)."
- )
- },
- )
- ignore_mismatched_sizes: bool = field(
- default=False,
- metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."},
- )
-
-
-def main():
- # See all possible arguments in src/transformers/training_args.py
- # or by passing the --help flag to this script.
- # We now keep distinct sets of args, for a cleaner separation of concerns.
-
- parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
- if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
- # If we pass only one argument to the script and it's the path to a json file,
- # let's parse it to get our arguments.
- model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
- else:
- model_args, data_args, training_args = parser.parse_args_into_dataclasses()
-
- # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
- # information sent is the one passed as arguments along with your Python/PyTorch versions.
- send_example_telemetry("run_glue", model_args, data_args)
-
- # Setup logging
- logging.basicConfig(
- format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
- datefmt="%m/%d/%Y %H:%M:%S",
- handlers=[logging.StreamHandler(sys.stdout)],
- )
-
- if training_args.should_log:
- # The default of training_args.log_level is passive, so we set log level at info here to have that default.
- transformers.utils.logging.set_verbosity_info()
-
- log_level = training_args.get_process_log_level()
- logger.setLevel(log_level)
- datasets.utils.logging.set_verbosity(log_level)
- transformers.utils.logging.set_verbosity(log_level)
- transformers.utils.logging.enable_default_handler()
- transformers.utils.logging.enable_explicit_format()
-
- # Log on each process the small summary:
- logger.warning(
- f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
- + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
- )
- logger.info(f"Training/evaluation parameters {training_args}")
-
- # Detecting last checkpoint.
- last_checkpoint = None
- if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
- last_checkpoint = get_last_checkpoint(training_args.output_dir)
- if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
- raise ValueError(
- f"Output directory ({training_args.output_dir}) already exists and is not empty. "
- "Use --overwrite_output_dir to overcome."
- )
- elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
- logger.info(
- f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
- "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
- )
-
- # Set seed before initializing model.
- set_seed(training_args.seed)
-
- # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
- # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
- #
- # For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the
- # sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named
- # label if at least two columns are provided.
- #
- # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
- # single column. You can easily tweak this behavior (see below)
- #
- # In distributed training, the load_dataset function guarantee that only one local process can concurrently
- # download the dataset.
- if data_args.task_name is not None:
- # Downloading and loading a dataset from the hub.
- raw_datasets = load_dataset(
- "glue",
- data_args.task_name,
- cache_dir=model_args.cache_dir,
- use_auth_token=True if model_args.use_auth_token else None,
- )
- elif data_args.dataset_name is not None:
- # Downloading and loading a dataset from the hub.
- raw_datasets = load_dataset(
- data_args.dataset_name,
- data_args.dataset_config_name,
- cache_dir=model_args.cache_dir,
- use_auth_token=True if model_args.use_auth_token else None,
- )
- else:
- # Loading a dataset from your local files.
- # CSV/JSON training and evaluation files are needed.
- data_files = {"train": data_args.train_file, "validation": data_args.validation_file}
-
- # Get the test dataset: you can provide your own CSV/JSON test file (see below)
- # when you use `do_predict` without specifying a GLUE benchmark task.
- if training_args.do_predict:
- if data_args.test_file is not None:
- train_extension = data_args.train_file.split(".")[-1]
- test_extension = data_args.test_file.split(".")[-1]
- assert (
- test_extension == train_extension
- ), "`test_file` should have the same extension (csv or json) as `train_file`."
- data_files["test"] = data_args.test_file
- else:
- raise ValueError("Need either a GLUE task or a test file for `do_predict`.")
-
- for key in data_files.keys():
- logger.info(f"load a local file for {key}: {data_files[key]}")
-
- if data_args.train_file.endswith(".csv"):
- # Loading a dataset from local csv files
- raw_datasets = load_dataset(
- "csv",
- data_files=data_files,
- cache_dir=model_args.cache_dir,
- use_auth_token=True if model_args.use_auth_token else None,
- )
- else:
- # Loading a dataset from local json files
- raw_datasets = load_dataset(
- "json",
- data_files=data_files,
- cache_dir=model_args.cache_dir,
- use_auth_token=True if model_args.use_auth_token else None,
- )
- # See more about loading any type of standard or custom dataset at
- # https://huggingface.co/docs/datasets/loading_datasets.html.
-
- # Labels
- if data_args.task_name is not None:
- is_regression = data_args.task_name == "stsb"
- if not is_regression:
- label_list = raw_datasets["train"].features["label"].names
- num_labels = len(label_list)
- else:
- num_labels = 1
- else:
- # Trying to have good defaults here, don't hesitate to tweak to your needs.
- is_regression = raw_datasets["train"].features["label"].dtype in ["float32", "float64"]
- if is_regression:
- num_labels = 1
- else:
- # A useful fast method:
- # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique
- label_list = raw_datasets["train"].unique("label")
- label_list.sort() # Let's sort it for determinism
- num_labels = len(label_list)
-
- # Load pretrained model and tokenizer
- #
- # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
- # download model & vocab.
- config = AutoConfig.from_pretrained(
- model_args.config_name if model_args.config_name else model_args.model_name_or_path,
- num_labels=num_labels,
- finetuning_task=data_args.task_name,
- cache_dir=model_args.cache_dir,
- revision=model_args.model_revision,
- use_auth_token=True if model_args.use_auth_token else None,
- )
- tokenizer = AutoTokenizer.from_pretrained(
- model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
- cache_dir=model_args.cache_dir,
- use_fast=model_args.use_fast_tokenizer,
- revision=model_args.model_revision,
- use_auth_token=True if model_args.use_auth_token else None,
- )
- model = AutoModelForSequenceClassification.from_pretrained(
- model_args.model_name_or_path,
- from_tf=bool(".ckpt" in model_args.model_name_or_path),
- config=config,
- cache_dir=model_args.cache_dir,
- revision=model_args.model_revision,
- use_auth_token=True if model_args.use_auth_token else None,
- ignore_mismatched_sizes=model_args.ignore_mismatched_sizes,
- )
-
- # Preprocessing the raw_datasets
- if data_args.task_name is not None:
- sentence1_key, sentence2_key = task_to_keys[data_args.task_name]
- else:
- # Again, we try to have some nice defaults but don't hesitate to tweak to your use case.
- non_label_column_names = [name for name in raw_datasets["train"].column_names if name != "label"]
- if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names:
- sentence1_key, sentence2_key = "sentence1", "sentence2"
- else:
- if len(non_label_column_names) >= 2:
- sentence1_key, sentence2_key = non_label_column_names[:2]
- else:
- sentence1_key, sentence2_key = non_label_column_names[0], None
-
- # Padding strategy
- if data_args.pad_to_max_length:
- padding = "max_length"
- else:
- # We will pad later, dynamically at batch creation, to the max sequence length in each batch
- padding = False
-
- # Some models have set the order of the labels to use, so let's make sure we do use it.
- label_to_id = None
- if (
- model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id
- and data_args.task_name is not None
- and not is_regression
- ):
- # Some have all caps in their config, some don't.
- label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()}
- if sorted(label_name_to_id.keys()) == sorted(label_list):
- label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)}
- else:
- logger.warning(
- "Your model seems to have been trained with labels, but they don't match the dataset: ",
- f"model labels: {sorted(label_name_to_id.keys())}, dataset labels: {sorted(label_list)}."
- "\nIgnoring the model labels as a result.",
- )
- elif data_args.task_name is None and not is_regression:
- label_to_id = {v: i for i, v in enumerate(label_list)}
-
- if label_to_id is not None:
- model.config.label2id = label_to_id
- model.config.id2label = {id: label for label, id in config.label2id.items()}
- elif data_args.task_name is not None and not is_regression:
- model.config.label2id = {l: i for i, l in enumerate(label_list)}
- model.config.id2label = {id: label for label, id in config.label2id.items()}
-
- if data_args.max_seq_length > tokenizer.model_max_length:
- logger.warning(
- f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
- f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
- )
- max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
-
- def preprocess_function(examples):
- # Tokenize the texts
- args = (
- (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key])
- )
- result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True)
-
- # Map labels to IDs (not necessary for GLUE tasks)
- if label_to_id is not None and "label" in examples:
- result["label"] = [(label_to_id[l] if l != -1 else -1) for l in examples["label"]]
- return result
-
- with training_args.main_process_first(desc="dataset map pre-processing"):
- raw_datasets = raw_datasets.map(
- preprocess_function,
- batched=True,
- load_from_cache_file=not data_args.overwrite_cache,
- desc="Running tokenizer on dataset",
- )
- if training_args.do_train:
- if "train" not in raw_datasets:
- raise ValueError("--do_train requires a train dataset")
- train_dataset = raw_datasets["train"]
- if data_args.max_train_samples is not None:
- max_train_samples = min(len(train_dataset), data_args.max_train_samples)
- train_dataset = train_dataset.select(range(max_train_samples))
-
- if training_args.do_eval:
- if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
- raise ValueError("--do_eval requires a validation dataset")
- eval_dataset = raw_datasets["validation_matched" if data_args.task_name == "mnli" else "validation"]
- if data_args.max_eval_samples is not None:
- max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
- eval_dataset = eval_dataset.select(range(max_eval_samples))
-
- if training_args.do_predict or data_args.task_name is not None or data_args.test_file is not None:
- if "test" not in raw_datasets and "test_matched" not in raw_datasets:
- raise ValueError("--do_predict requires a test dataset")
- predict_dataset = raw_datasets["test_matched" if data_args.task_name == "mnli" else "test"]
- if data_args.max_predict_samples is not None:
- max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples)
- predict_dataset = predict_dataset.select(range(max_predict_samples))
-
- # Log a few random samples from the training set:
- if training_args.do_train:
- for index in random.sample(range(len(train_dataset)), 3):
- logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
-
- # Get the metric function
- if data_args.task_name is not None:
- metric = evaluate.load("glue", data_args.task_name)
- else:
- metric = evaluate.load("accuracy")
-
- # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
- # predictions and label_ids field) and has to return a dictionary string to float.
- def compute_metrics(p: EvalPrediction):
- preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
- preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1)
- if data_args.task_name is not None:
- result = metric.compute(predictions=preds, references=p.label_ids)
- if len(result) > 1:
- result["combined_score"] = np.mean(list(result.values())).item()
- return result
- elif is_regression:
- return {"mse": ((preds - p.label_ids) ** 2).mean().item()}
- else:
- return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()}
-
- # Data collator will default to DataCollatorWithPadding when the tokenizer is passed to Trainer, so we change it if
- # we already did the padding.
- if data_args.pad_to_max_length:
- data_collator = default_data_collator
- elif training_args.fp16:
- data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
- else:
- data_collator = None
-
- # Initialize our Trainer
- trainer = Trainer(
- model=model,
- args=training_args,
- train_dataset=train_dataset if training_args.do_train else None,
- eval_dataset=eval_dataset if training_args.do_eval else None,
- compute_metrics=compute_metrics,
- tokenizer=tokenizer,
- data_collator=data_collator,
- )
-
- # Training
- if training_args.do_train:
- checkpoint = None
- if training_args.resume_from_checkpoint is not None:
- checkpoint = training_args.resume_from_checkpoint
- elif last_checkpoint is not None:
- checkpoint = last_checkpoint
- train_result = trainer.train(resume_from_checkpoint=checkpoint)
- metrics = train_result.metrics
- max_train_samples = (
- data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
- )
- metrics["train_samples"] = min(max_train_samples, len(train_dataset))
-
- trainer.save_model() # Saves the tokenizer too for easy upload
-
- trainer.log_metrics("train", metrics)
- trainer.save_metrics("train", metrics)
- trainer.save_state()
-
- # Evaluation
- if training_args.do_eval:
- logger.info("*** Evaluate ***")
-
- # Loop to handle MNLI double evaluation (matched, mis-matched)
- tasks = [data_args.task_name]
- eval_datasets = [eval_dataset]
- if data_args.task_name == "mnli":
- tasks.append("mnli-mm")
- valid_mm_dataset = raw_datasets["validation_mismatched"]
- if data_args.max_eval_samples is not None:
- max_eval_samples = min(len(valid_mm_dataset), data_args.max_eval_samples)
- valid_mm_dataset = valid_mm_dataset.select(range(max_eval_samples))
- eval_datasets.append(valid_mm_dataset)
- combined = {}
-
- for eval_dataset, task in zip(eval_datasets, tasks):
- metrics = trainer.evaluate(eval_dataset=eval_dataset)
-
- max_eval_samples = (
- data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
- )
- metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
-
- if task == "mnli-mm":
- metrics = {k + "_mm": v for k, v in metrics.items()}
- if task is not None and "mnli" in task:
- combined.update(metrics)
-
- trainer.log_metrics("eval", metrics)
- trainer.save_metrics("eval", combined if task is not None and "mnli" in task else metrics)
-
- if training_args.do_predict:
- logger.info("*** Predict ***")
-
- # Loop to handle MNLI double evaluation (matched, mis-matched)
- tasks = [data_args.task_name]
- predict_datasets = [predict_dataset]
- if data_args.task_name == "mnli":
- tasks.append("mnli-mm")
- predict_datasets.append(raw_datasets["test_mismatched"])
-
- for predict_dataset, task in zip(predict_datasets, tasks):
- # Removing the `label` columns because it contains -1 and Trainer won't like that.
- predict_dataset = predict_dataset.remove_columns("label")
- predictions = trainer.predict(predict_dataset, metric_key_prefix="predict").predictions
- predictions = np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1)
-
- output_predict_file = os.path.join(training_args.output_dir, f"predict_results_{task}.txt")
- if trainer.is_world_process_zero():
- with open(output_predict_file, "w") as writer:
- logger.info(f"***** Predict results {task} *****")
- writer.write("index\tprediction\n")
- for index, item in enumerate(predictions):
- if is_regression:
- writer.write(f"{index}\t{item:3.3f}\n")
- else:
- item = label_list[item]
- writer.write(f"{index}\t{item}\n")
-
- kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"}
- if data_args.task_name is not None:
- kwargs["language"] = "en"
- kwargs["dataset_tags"] = "glue"
- kwargs["dataset_args"] = data_args.task_name
- kwargs["dataset"] = f"GLUE {data_args.task_name.upper()}"
-
- if training_args.push_to_hub:
- trainer.push_to_hub(**kwargs)
- else:
- trainer.create_model_card(**kwargs)
-
-
-def _mp_fn(index):
- # For xla_spawn (TPUs)
- main()
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/chendl/compositional_test/transformers/examples/research_projects/longform-qa/eli5_utils.py b/spaces/chendl/compositional_test/transformers/examples/research_projects/longform-qa/eli5_utils.py
deleted file mode 100644
index d4b235fdbaab26218c37f1b60d3142349c11b737..0000000000000000000000000000000000000000
--- a/spaces/chendl/compositional_test/transformers/examples/research_projects/longform-qa/eli5_utils.py
+++ /dev/null
@@ -1,688 +0,0 @@
-import functools
-import math
-import os # noqa: F401
-from random import choice, randint
-from time import time
-
-import datasets # noqa: F401
-import faiss # noqa: F401
-import numpy as np
-import pandas as pd
-import torch
-import torch.utils.checkpoint as checkpoint
-from elasticsearch import Elasticsearch # noqa: F401
-from elasticsearch.helpers import bulk, streaming_bulk # noqa: F401
-from torch import nn
-from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler
-from tqdm import tqdm
-
-from transformers import AdamW, AutoModel, AutoModelForSeq2SeqLM, AutoTokenizer, get_linear_schedule_with_warmup
-
-
-pd.set_option("display.max_colwidth", None)
-
-
-###############
-# Sparse index
-###############
-def make_es_index_snippets(es_client, passages_dset, index_name="english_wiki_kilt_snippets_100w"):
- index_config = {
- "settings": {
- "number_of_shards": 1,
- "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}},
- },
- "mappings": {
- "properties": {
- "article_title": {"type": "text", "analyzer": "standard", "similarity": "BM25"},
- "section_title": {"type": "text", "analyzer": "standard", "similarity": "BM25"},
- "passage_text": {"type": "text", "analyzer": "standard", "similarity": "BM25"},
- }
- },
- }
- es_client.indices.create(index=index_name, body=index_config)
- number_of_docs = passages_dset.num_rows
- progress = tqdm(unit="docs", total=number_of_docs)
- successes = 0
-
- def passage_generator():
- for passage in passages_dset:
- yield passage
-
- # create the ES index
- for ok, action in streaming_bulk(
- client=es_client,
- index=index_name,
- actions=passage_generator(),
- ):
- progress.update(1)
- successes += ok
- print("Indexed %d documents" % (successes,))
-
-
-def query_es_index(question, es_client, index_name="english_wiki_kilt_snippets_100w", n_results=10, min_length=20):
- q = question.lower()
- banned = ["how", "why", "what", "where", "which", "do", "does", "is", "?", "eli5", "eli5:"]
- q = " ".join([w for w in q.split() if w not in banned])
- response = es_client.search(
- index=index_name,
- body={
- "query": {
- "multi_match": {
- "query": q,
- "fields": ["article_title", "section_title", "passage_text^2"],
- "type": "cross_fields",
- }
- },
- "size": 2 * n_results,
- },
- )
- hits = response["hits"]["hits"]
- support_doc = "
" + "
".join([hit["_source"]["passage_text"] for hit in hits])
- res_list = [{k: hit["_source"][k] for k in hit["_source"] if k != "passage_text"} for hit in hits]
- for r, hit in zip(res_list, hits):
- r["passage_id"] = hit["_id"]
- r["score"] = hit["_score"]
- r["passage_text"] = hit["_source"]["passage_text"]
- res_list = [res for res in res_list if len(res["passage_text"].split()) > min_length][:n_results]
- return support_doc, res_list
-
-
-###############
-# ELI5 retriever training
-###############
-class ELI5DatasetQARetriver(Dataset):
- def __init__(self, examples_array, extra_answer_threshold=3, min_answer_length=64, training=True, n_samples=None):
- self.data = examples_array
- self.answer_thres = extra_answer_threshold
- self.min_length = min_answer_length
- self.training = training
- self.n_samples = self.data.num_rows if n_samples is None else n_samples
-
- def __len__(self):
- return self.n_samples
-
- def make_example(self, idx):
- example = self.data[idx]
- question = example["title"]
- if self.training:
- answers = [a for i, (a, sc) in enumerate(zip(example["answers"]["text"], example["answers"]["score"]))]
- answer_tab = choice(answers).split(" ")
- start_idx = randint(0, max(0, len(answer_tab) - self.min_length))
- answer_span = " ".join(answer_tab[start_idx:])
- else:
- answer_span = example["answers"]["text"][0]
- return (question, answer_span)
-
- def __getitem__(self, idx):
- return self.make_example(idx % self.data.num_rows)
-
-
-class RetrievalQAEmbedder(nn.Module):
- def __init__(self, sent_encoder, dim):
- super(RetrievalQAEmbedder, self).__init__()
- self.sent_encoder = sent_encoder
- self.output_dim = 128
- self.project_q = nn.Linear(dim, self.output_dim, bias=False)
- self.project_a = nn.Linear(dim, self.output_dim, bias=False)
- self.ce_loss = nn.CrossEntropyLoss(reduction="mean")
-
- def embed_sentences_checkpointed(self, input_ids, attention_mask, checkpoint_batch_size=-1):
- # reproduces BERT forward pass with checkpointing
- if checkpoint_batch_size < 0 or input_ids.shape[0] < checkpoint_batch_size:
- return self.sent_encoder(input_ids, attention_mask=attention_mask)[1]
- else:
- # prepare implicit variables
- device = input_ids.device
- input_shape = input_ids.size()
- token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
- head_mask = [None] * self.sent_encoder.config.num_hidden_layers
- extended_attention_mask: torch.Tensor = self.sent_encoder.get_extended_attention_mask(
- attention_mask, input_shape
- )
-
- # define function for checkpointing
- def partial_encode(*inputs):
- encoder_outputs = self.sent_encoder.encoder(
- inputs[0],
- attention_mask=inputs[1],
- head_mask=head_mask,
- )
- sequence_output = encoder_outputs[0]
- pooled_output = self.sent_encoder.pooler(sequence_output)
- return pooled_output
-
- # run embedding layer on everything at once
- embedding_output = self.sent_encoder.embeddings(
- input_ids=input_ids, position_ids=None, token_type_ids=token_type_ids, inputs_embeds=None
- )
- # run encoding and pooling on one mini-batch at a time
- pooled_output_list = []
- for b in range(math.ceil(input_ids.shape[0] / checkpoint_batch_size)):
- b_embedding_output = embedding_output[b * checkpoint_batch_size : (b + 1) * checkpoint_batch_size]
- b_attention_mask = extended_attention_mask[b * checkpoint_batch_size : (b + 1) * checkpoint_batch_size]
- pooled_output = checkpoint.checkpoint(partial_encode, b_embedding_output, b_attention_mask)
- pooled_output_list.append(pooled_output)
- return torch.cat(pooled_output_list, dim=0)
-
- def embed_questions(self, q_ids, q_mask, checkpoint_batch_size=-1):
- q_reps = self.embed_sentences_checkpointed(q_ids, q_mask, checkpoint_batch_size)
- return self.project_q(q_reps)
-
- def embed_answers(self, a_ids, a_mask, checkpoint_batch_size=-1):
- a_reps = self.embed_sentences_checkpointed(a_ids, a_mask, checkpoint_batch_size)
- return self.project_a(a_reps)
-
- def forward(self, q_ids, q_mask, a_ids, a_mask, checkpoint_batch_size=-1):
- device = q_ids.device
- q_reps = self.embed_questions(q_ids, q_mask, checkpoint_batch_size)
- a_reps = self.embed_answers(a_ids, a_mask, checkpoint_batch_size)
- compare_scores = torch.mm(q_reps, a_reps.t())
- loss_qa = self.ce_loss(compare_scores, torch.arange(compare_scores.shape[1]).to(device))
- loss_aq = self.ce_loss(compare_scores.t(), torch.arange(compare_scores.shape[0]).to(device))
- loss = (loss_qa + loss_aq) / 2
- return loss
-
-
-def make_qa_retriever_model(model_name="google/bert_uncased_L-8_H-512_A-8", from_file=None, device="cuda:0"):
- tokenizer = AutoTokenizer.from_pretrained(model_name)
- bert_model = AutoModel.from_pretrained(model_name).to(device)
- # run bert_model on a dummy batch to get output dimension
- d_ids = torch.LongTensor(
- [[bert_model.config.bos_token_id if bert_model.config.bos_token_id is not None else 1]]
- ).to(device)
- d_mask = torch.LongTensor([[1]]).to(device)
- sent_dim = bert_model(d_ids, attention_mask=d_mask)[1].shape[-1]
- qa_embedder = RetrievalQAEmbedder(bert_model, sent_dim).to(device)
- if from_file is not None:
- param_dict = torch.load(from_file) # has model weights, optimizer, and scheduler states
- qa_embedder.load_state_dict(param_dict["model"])
- return tokenizer, qa_embedder
-
-
-def make_qa_retriever_batch(qa_list, tokenizer, max_len=64, device="cuda:0"):
- q_ls = [q for q, a in qa_list]
- a_ls = [a for q, a in qa_list]
- q_toks = tokenizer(q_ls, max_length=max_len, padding="max_length", truncation=True)
- q_ids, q_mask = (
- torch.LongTensor(q_toks["input_ids"]).to(device),
- torch.LongTensor(q_toks["attention_mask"]).to(device),
- )
- a_toks = tokenizer(a_ls, max_length=max_len, padding="max_length", truncation=True)
- a_ids, a_mask = (
- torch.LongTensor(a_toks["input_ids"]).to(device),
- torch.LongTensor(a_toks["attention_mask"]).to(device),
- )
- return (q_ids, q_mask, a_ids, a_mask)
-
-
-def train_qa_retriever_epoch(model, dataset, tokenizer, optimizer, scheduler, args, e=0):
- model.train()
- # make iterator
- train_sampler = RandomSampler(dataset)
- model_collate_fn = functools.partial(
- make_qa_retriever_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0"
- )
- data_loader = DataLoader(dataset, batch_size=args.batch_size, sampler=train_sampler, collate_fn=model_collate_fn)
- epoch_iterator = tqdm(data_loader, desc="Iteration", disable=True)
- # accumulate loss since last print
- loc_steps = 0
- loc_loss = 0.0
- st_time = time()
- for step, batch in enumerate(epoch_iterator):
- q_ids, q_mask, a_ids, a_mask = batch
- pre_loss = model(q_ids, q_mask, a_ids, a_mask, checkpoint_batch_size=args.checkpoint_batch_size)
- loss = pre_loss.sum()
- # optimizer
- loss.backward()
- optimizer.step()
- scheduler.step()
- model.zero_grad()
- # some printing within the epoch
- loc_loss += loss.item()
- loc_steps += 1
- if step % args.print_freq == 0 or step == 1:
- print(
- "{:2d} {:5d} of {:5d} \t L: {:.3f} \t -- {:.3f}".format(
- e,
- step,
- len(dataset) // args.batch_size,
- loc_loss / loc_steps,
- time() - st_time,
- )
- )
- loc_loss = 0
- loc_steps = 0
-
-
-def train_qa_retriever_joint_epoch(model, dataset_list, tokenizer, optimizer, scheduler, args, e=0):
- model.train()
- model_collate_fn = functools.partial(
- make_qa_retriever_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0"
- )
- # make iterator
- train_samplers = [RandomSampler(dataset) for dataset in dataset_list]
- data_loaders = [
- DataLoader(dataset, batch_size=args.batch_size, sampler=train_sampler, collate_fn=model_collate_fn)
- for dataset, train_sampler in zip(dataset_list, train_samplers)
- ]
- iterators = [iter(dloader) for dloader in data_loaders]
- joint_iter = zip(*iterators)
- # accumulate loss since last print
- loc_steps = 0
- loc_loss = 0.0
- st_time = time()
- for step, (batches,) in enumerate(zip(joint_iter)):
- for batch in batches:
- q_ids, q_mask, a_ids, a_mask = batch
- loss = model(q_ids, q_mask, a_ids, a_mask, checkpoint_batch_size=args.checkpoint_batch_size)
- # optimizer
- loss.backward()
- optimizer.step()
- scheduler.step()
- model.zero_grad()
- # some printing within the epoch
- loc_loss += loss.item()
- loc_steps += 1
- if step % args.print_freq == 0:
- print(
- "{:2d} {:5d} of {:5d} \t L: {:.3f} \t -- {:.3f}".format(
- e,
- step,
- len(dataset_list[0]) // args.batch_size,
- loc_loss / loc_steps,
- time() - st_time,
- )
- )
- loc_loss = 0
- loc_steps = 0
-
-
-def evaluate_qa_retriever(model, dataset, tokenizer, args):
- model.eval()
- # make iterator
- eval_sampler = SequentialSampler(dataset)
- model_collate_fn = functools.partial(
- make_qa_retriever_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0"
- )
- data_loader = DataLoader(dataset, batch_size=args.batch_size, sampler=eval_sampler, collate_fn=model_collate_fn)
- epoch_iterator = tqdm(data_loader, desc="Iteration", disable=True)
- tot_loss = 0.0
- with torch.no_grad():
- for step, batch in enumerate(epoch_iterator):
- q_ids, q_mask, a_ids, a_mask = batch
- loss = model(q_ids, q_mask, a_ids, a_mask)
- tot_loss += loss.item()
- return tot_loss / (step + 1)
-
-
-def train_qa_retriever(qar_model, qar_tokenizer, qar_train_dset, qar_valid_dset, qar_args):
- qar_optimizer = AdamW(qar_model.parameters(), lr=qar_args.learning_rate, eps=1e-8)
- qar_scheduler = get_linear_schedule_with_warmup(
- qar_optimizer,
- num_warmup_steps=100,
- num_training_steps=(qar_args.num_epochs + 1) * math.ceil(len(qar_train_dset) / qar_args.batch_size),
- )
- for e in range(qar_args.num_epochs):
- train_qa_retriever_epoch(qar_model, qar_train_dset, qar_tokenizer, qar_optimizer, qar_scheduler, qar_args, e)
- m_save_dict = {
- "model": qar_model.state_dict(),
- "optimizer": qar_optimizer.state_dict(),
- "scheduler": qar_scheduler.state_dict(),
- }
- print("Saving model {}".format(qar_args.model_save_name))
- torch.save(m_save_dict, "{}_{}.pth".format(qar_args.model_save_name, e))
- eval_loss = evaluate_qa_retriever(qar_model, qar_valid_dset, qar_tokenizer, qar_args)
- print("Evaluation loss epoch {:4d}: {:.3f}".format(e, eval_loss))
-
-
-###############
-# ELI5 seq2seq model training
-###############
-class ELI5DatasetS2S(Dataset):
- def __init__(
- self, examples_array, make_doc_fun=None, extra_answer_threshold=3, document_cache=None, training=True
- ):
- self.training = training
- self.data = examples_array
- self.make_doc_function = make_doc_fun
- self.document_cache = {} if document_cache is None else document_cache
- assert not (make_doc_fun is None and document_cache is None)
- # make index of specific question-answer pairs from multi-answers
- if self.training:
- self.qa_id_list = [
- (i, j)
- for i, qa in enumerate(self.data)
- for j, (a, sc) in enumerate(zip(qa["answers"]["text"], qa["answers"]["score"]))
- if j == 0 or sc >= extra_answer_threshold
- ]
- else:
- self.qa_id_list = [(i, 0) for i in range(self.data.num_rows)]
-
- def __len__(self):
- return len(self.qa_id_list)
-
- def make_example(self, idx):
- i, j = self.qa_id_list[idx]
- example = self.data[i]
- question = example["title"] + " " + example["selftext"]
- answer = example["answers"]["text"][j]
- q_id = example["q_id"]
- if self.make_doc_function is not None:
- self.document_cache[q_id] = self.document_cache.get(q_id, self.make_doc_function(example["title"]))
- document = self.document_cache[q_id]
- in_st = "question: {} context: {}".format(
- question.lower().replace(" --t--", "").strip(),
- document.lower().strip(),
- )
- out_st = answer
- return (in_st, out_st)
-
- def __getitem__(self, idx):
- return self.make_example(idx)
-
-
-def make_qa_s2s_model(model_name="facebook/bart-large", from_file=None, device="cuda:0"):
- tokenizer = AutoTokenizer.from_pretrained(model_name)
- model = AutoModelForSeq2SeqLM.from_pretrained(model_name).to(device)
- if from_file is not None:
- param_dict = torch.load(from_file) # has model weights, optimizer, and scheduler states
- model.load_state_dict(param_dict["model"])
- return tokenizer, model
-
-
-def make_qa_s2s_batch(qa_list, tokenizer, max_len=64, max_a_len=360, device="cuda:0"):
- q_ls = [q for q, a in qa_list]
- a_ls = [a for q, a in qa_list]
- q_toks = tokenizer(q_ls, max_length=max_len, padding="max_length", truncation=True)
- q_ids, q_mask = (
- torch.LongTensor(q_toks["input_ids"]).to(device),
- torch.LongTensor(q_toks["attention_mask"]).to(device),
- )
- a_toks = tokenizer(a_ls, max_length=min(max_len, max_a_len), padding="max_length", truncation=True)
- a_ids, a_mask = (
- torch.LongTensor(a_toks["input_ids"]).to(device),
- torch.LongTensor(a_toks["attention_mask"]).to(device),
- )
- lm_labels = a_ids[:, 1:].contiguous().clone()
- lm_labels[a_mask[:, 1:].contiguous() == 0] = -100
- model_inputs = {
- "input_ids": q_ids,
- "attention_mask": q_mask,
- "decoder_input_ids": a_ids[:, :-1].contiguous(),
- "lm_labels": lm_labels,
- }
- return model_inputs
-
-
-def train_qa_s2s_epoch(model, dataset, tokenizer, optimizer, scheduler, args, e=0, curriculum=False):
- model.train()
- # make iterator
- if curriculum:
- train_sampler = SequentialSampler(dataset)
- else:
- train_sampler = RandomSampler(dataset)
- model_collate_fn = functools.partial(
- make_qa_s2s_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0"
- )
- data_loader = DataLoader(dataset, batch_size=args.batch_size, sampler=train_sampler, collate_fn=model_collate_fn)
- epoch_iterator = tqdm(data_loader, desc="Iteration", disable=True)
- # accumulate loss since last print
- loc_steps = 0
- loc_loss = 0.0
- st_time = time()
- for step, batch_inputs in enumerate(epoch_iterator):
- pre_loss = model(**batch_inputs)[0]
- loss = pre_loss.sum() / pre_loss.shape[0]
- loss.backward()
- # optimizer
- if step % args.backward_freq == 0:
- optimizer.step()
- scheduler.step()
- model.zero_grad()
- # some printing within the epoch
- loc_loss += loss.item()
- loc_steps += 1
- if step % args.print_freq == 0 or step == 1:
- print(
- "{:2d} {:5d} of {:5d} \t L: {:.3f} \t -- {:.3f}".format(
- e,
- step,
- len(dataset) // args.batch_size,
- loc_loss / loc_steps,
- time() - st_time,
- )
- )
- loc_loss = 0
- loc_steps = 0
-
-
-def eval_qa_s2s_epoch(model, dataset, tokenizer, args):
- model.eval()
- # make iterator
- train_sampler = SequentialSampler(dataset)
- model_collate_fn = functools.partial(
- make_qa_s2s_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0"
- )
- data_loader = DataLoader(dataset, batch_size=args.batch_size, sampler=train_sampler, collate_fn=model_collate_fn)
- epoch_iterator = tqdm(data_loader, desc="Iteration", disable=True)
- # accumulate loss since last print
- loc_steps = 0
- loc_loss = 0.0
- st_time = time()
- with torch.no_grad():
- for step, batch_inputs in enumerate(epoch_iterator):
- pre_loss = model(**batch_inputs)[0]
- loss = pre_loss.sum() / pre_loss.shape[0]
- loc_loss += loss.item()
- loc_steps += 1
- if step % args.print_freq == 0:
- print(
- "{:5d} of {:5d} \t L: {:.3f} \t -- {:.3f}".format(
- step,
- len(dataset) // args.batch_size,
- loc_loss / loc_steps,
- time() - st_time,
- )
- )
- print(
- "Total \t L: {:.3f} \t -- {:.3f}".format(
- loc_loss / loc_steps,
- time() - st_time,
- )
- )
-
-
-def train_qa_s2s(qa_s2s_model, qa_s2s_tokenizer, s2s_train_dset, s2s_valid_dset, s2s_args):
- s2s_optimizer = AdamW(qa_s2s_model.parameters(), lr=s2s_args.learning_rate, eps=1e-8)
- s2s_scheduler = get_linear_schedule_with_warmup(
- s2s_optimizer,
- num_warmup_steps=400,
- num_training_steps=(s2s_args.num_epochs + 1) * math.ceil(len(s2s_train_dset) / s2s_args.batch_size),
- )
- for e in range(s2s_args.num_epochs):
- train_qa_s2s_epoch(
- qa_s2s_model,
- s2s_train_dset,
- qa_s2s_tokenizer,
- s2s_optimizer,
- s2s_scheduler,
- s2s_args,
- e,
- curriculum=(e == 0),
- )
- m_save_dict = {
- "model": qa_s2s_model.state_dict(),
- "optimizer": s2s_optimizer.state_dict(),
- "scheduler": s2s_scheduler.state_dict(),
- }
- print("Saving model {}".format(s2s_args.model_save_name))
- eval_qa_s2s_epoch(qa_s2s_model, s2s_valid_dset, qa_s2s_tokenizer, s2s_args)
- torch.save(m_save_dict, "{}_{}.pth".format(s2s_args.model_save_name, e))
-
-
-# generate answer from input "question: ... context:
".join([p["passage_text"] for p in res_passages])
- res_list = [{k: p[k] for k in wiki_passages.column_names} for p in res_passages]
- res_list = [res for res in res_list if len(res["passage_text"].split()) > min_length][:n_results]
- for r, sc in zip(res_list, D[0]):
- r["score"] = float(sc)
- return support_doc, res_list
-
-
-def batch_query_qa_dense_index(questions, qa_embedder, tokenizer, wiki_passages, wiki_index, n_results=10):
- q_rep = embed_questions_for_retrieval(questions, tokenizer, qa_embedder)
- D, I = wiki_index.search(q_rep, n_results)
- res_passages_lst = [[wiki_passages[int(i)] for i in i_lst] for i_lst in I]
- support_doc_lst = [
- "
" + "
".join([p["passage_text"] for p in res_passages]) for res_passages in res_passages_lst
- ]
- all_res_lists = []
- for res_passages, dl in zip(res_passages_lst, D):
- res_list = [{k: p[k] for k in wiki_passages.column_names} for p in res_passages]
- for r, sc in zip(res_list, dl):
- r["score"] = float(sc)
- all_res_lists += [res_list[:]]
- return support_doc_lst, all_res_lists
-
-
-# find nearest neighbors of an answer or declarative text in Wikipedia snippets
-def query_qa_dense_index_nn(passage, qa_embedder, tokenizer, wiki_passages, wiki_index, n_results=10, min_length=20):
- a_rep = embed_passages_for_retrieval([passage], tokenizer, qa_embedder)
- D, I = wiki_index.search(a_rep, 2 * n_results)
- res_passages = [wiki_passages[int(i)] for i in I[0]]
- support_doc = "
" + "
".join([p["passage_text"] for p in res_passages])
- res_list = [{k: p[k] for k in wiki_passages.column_names} for p in res_passages]
- res_list = [res for res in res_list if len(res["passage_text"].split()) > min_length][:n_results]
- for r, sc, i in zip(res_list, D[0], I[0]):
- r["passage_id"] = int(i)
- r["score"] = float(sc)
- return support_doc, res_list
-
-
-def batch_query_qa_dense_index_nn(passages, qa_embedder, tokenizer, wiki_passages, wiki_index, n_results=10):
- a_reps = embed_passages_for_retrieval(passages, tokenizer, qa_embedder)
- D, I = wiki_index.search(a_reps, n_results)
- res_passages_lst = [[wiki_passages[int(i)] for i in i_lst] for i_lst in I]
- support_doc_lst = [
- "
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/cihyFjudo/fairness-paper-search/Rogue One A Star Wars Story 1 Movie Download Torrent - What Critics and Fans Are Saying.md b/spaces/cihyFjudo/fairness-paper-search/Rogue One A Star Wars Story 1 Movie Download Torrent - What Critics and Fans Are Saying.md
deleted file mode 100644
index cf16fa898ae6144e21fdd51d9f467163bf6f6831..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Rogue One A Star Wars Story 1 Movie Download Torrent - What Critics and Fans Are Saying.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/cihyFjudo/fairness-paper-search/The Guardians Tamil Dubbed Movie Free Download How to Get It in Minutes.md b/spaces/cihyFjudo/fairness-paper-search/The Guardians Tamil Dubbed Movie Free Download How to Get It in Minutes.md
deleted file mode 100644
index 84188de7ec33990ec9c272af2d86230b5189a048..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/The Guardians Tamil Dubbed Movie Free Download How to Get It in Minutes.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/ttLib/__init__.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/ttLib/__init__.py
deleted file mode 100644
index ed00764f7c193ca9bcd0bf67196da59c30048a28..0000000000000000000000000000000000000000
--- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/ttLib/__init__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-"""fontTools.ttLib -- a package for dealing with TrueType fonts."""
-
-from fontTools.misc.loggingTools import deprecateFunction
-import logging
-
-
-log = logging.getLogger(__name__)
-
-
-class TTLibError(Exception):
- pass
-
-
-class TTLibFileIsCollectionError(TTLibError):
- pass
-
-
-@deprecateFunction("use logging instead", category=DeprecationWarning)
-def debugmsg(msg):
- import time
-
- print(msg + time.strftime(" (%H:%M:%S)", time.localtime(time.time())))
-
-
-from fontTools.ttLib.ttFont import *
-from fontTools.ttLib.ttCollection import TTCollection
diff --git a/spaces/codedog-ai/edu-assistant/examples/coding_problem_orm.py b/spaces/codedog-ai/edu-assistant/examples/coding_problem_orm.py
deleted file mode 100644
index de66de6eddb364fe4289c96be2aa3a5a85df5b32..0000000000000000000000000000000000000000
--- a/spaces/codedog-ai/edu-assistant/examples/coding_problem_orm.py
+++ /dev/null
@@ -1,21 +0,0 @@
-import json
-
-from edu_assistant.learning_tasks.coding_problem import CodingProblem
-
-
-def load_problems(file_path: str):
- """load problems from json file and insert into redis orm.
-
- Args:
- file_path (str): file path to json file.
- """
- with open(file_path, "r", encoding="utf-8") as f:
- problems_data = json.load(f)
-
- CodingProblem.enable_redis_orm()
- problems = [CodingProblem.parse_obj(problem_data) for problem_data in problems_data]
- CodingProblem.insert(problems)
-
-
-if __name__ == "__main__":
- load_problems("examples/coding_problems.json")
diff --git a/spaces/colakin/video-generater/classes/ElevenLabsApi.php b/spaces/colakin/video-generater/classes/ElevenLabsApi.php
deleted file mode 100644
index 94e002666f278441df812a222e58305dc7aa2d95..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/classes/ElevenLabsApi.php
+++ /dev/null
@@ -1,332 +0,0 @@
-apiKey = $apiKey;
- $this->client = $client ?? new Client();
- }
-
- /**
- * Returns metadata about all your generated audio.
- *
- * @return Response
- *
- * Example of a successful response (200 OK):
- * {
- * "items": [
- * {
- * "id": "VW7YKqPnjY4h39yTbx2L",
- * "title": "Generated Audio 1",
- * "duration": 120,
- * "created_at": "2023-03-16T08:00:00Z",
- * "url": "https://download-link.example.com/your_history_item_audio.mp3"
- * },
- * {
- * "id": "AbCDeFgH1I2jK3LmN4O5",
- * "title": "Generated Audio 2",
- * "duration": 90,
- * "created_at": "2023-03-15T08:00:00Z",
- * "url": "https://download-link.example.com/your_history_item_audio2.mp3"
- * }
- * ]
- * }
- */
- public function getHistory(): Response {
- $url = self::BASE_URL . '/v1/history';
-
- return $this->makeRequest('GET', $url);
- }
-
- /**
- * Returns the audio of a history item.
- *
- * @param string $history_item_id The history item ID to get audio from.
- *
- * @return Response
- *
- * Example of a successful response (200 OK):
- * {
- * "url": "https://download-link.example.com/your_history_item_audio.mp3"
- * }
- */
- public function getAudioFromHistoryItem(string $historyItemId): Response {
- $url = self::BASE_URL . "/v1/history/{$historyItemId}";
-
- return $this->makeRequest('GET', $url);
- }
-
- /**
- * Delete a number of history items by their IDs.
- *
- * @param array $history_item_ids An array of history item IDs to delete.
- *
- * @return Response
- *
- * Example of a successful response (200 OK):
- * {
- * "status": "success",
- * "message": "Selected history items deleted successfully."
- * }
- */
- public function deleteHistoryItems(array $historyItemIds): Response {
- $url = self::BASE_URL . '/v1/history/delete';
- $body = json_encode(['history_item_ids' => $historyItemIds]);
-
- return $this->makeRequest('POST', $url, $body);
- }
-
- /**
- * Delete a history item by its ID
- *
- * @param string $history_item_id The ID of the history item to be deleted
- * @return Response
- *
- * Example of a successful response (200 OK):
- * {
- * "message": "History item deleted successfully"
- * }
- */
- public function deleteHistoryItem(string $historyItemId): Response {
- $url = self::BASE_URL . "/v1/history/{$historyItemId}";
-
- return $this->makeRequest('DELETE', $url);
- }
-
- /**
- * Download one or more history items.
- *
- * @param array $history_item_ids An array of history item IDs to download.
- *
- * @return Response
- *
- * Example of a successful response (200 OK):
- * {
- * "url": "https://download-link.example.com/your_downloaded_audio.zip"
- * }
- */
- public function downloadHistoryItems(array $historyItemIds): Response {
- $url = self::BASE_URL . '/v1/history/download';
- $body = json_encode(['history_item_ids' => $historyItemIds]);
-
- return $this->makeRequest('POST', $url, $body);
- }
-
- /**
- * Convert text to speech
- *
- * @param string $text The text to be converted into speech
- * @param array $options Optional parameters for the TTS conversion
- * @return Response
- *
- * Example of a successful response (200 OK):
- * {
- * "history_item_id": "VW7YKqPnjY4h39yTbx2L",
- * "text": "Hello, world!",
- * "options": {
- * "voice": "Joanna",
- * "language": "en-US",
- * "output_format": "mp3"
- * },
- * "audio_url": "https://api.elevenlabs.io/v1/history/VW7YKqPnjY4h39yTbx2L/audio"
- * }
- */
- public function textToSpeech(string $voiceId, array $data): Response {
- $url = self::BASE_URL . "/v1/text-to-speech/{$voiceId}/stream";
- $body = json_encode($data);
-
- return $this->makeRequest('POST', $url, $body);
- }
- /**
- * Convert text to speech with a specific Voice ID
- *
- * @param string $text The text to be converted into speech
- * @param string $voiceId The ID of the voice to be used for TTS conversion
- * @param array $options Optional parameters for the TTS conversion
- * @return Response
- *
- * Example of a successful response (200 OK):
- * {
- * "history_item_id": "VW7YKqPnjY4h39yTbx2L",
- * "text": "Hello, world!",
- * "options": {
- * "voice_id": "Joanna",
- * "language": "en-US",
- * "output_format": "mp3"
- * },
- * "audio_url": "https://api.elevenlabs.io/v1/history/VW7YKqPnjY4h39yTbx2L/audio"
- * }
- */
- public function textToSpeechWithVoiceId(string $voiceId, array $data): Response {
- $url = self::BASE_URL . "/v1/text-to-speech/{$voiceId}";
- $body = json_encode($data);
-
- return $this->makeRequest('POST', $url, $body);
- }
-
- /**
- * Make a request to the ElevenLabs API.
- *
- * @param string $method
- * @param string $url
- * @param string|null $body
- * @return Response
- */
- private function makeRequest(string $method, string $url, string $body = null): Response {
- $options = [
- 'headers' => [
- 'Xi-Api-Key' => $this->apiKey,
- 'Content-Type' => 'application/json',
- ],
- ];
-
- if ($body !== null) {
- $options['body'] = $body;
- }
-
- return $this->client->request($method, $url, $options);
- }
-
- /**
- * Delete a history item by its ID.
- *
- * @param string $history_item_id The history item ID to delete.
- *
- * @return Response
- *
- * Example of a successful response (200 OK):
- * {
- * "status": "success",
- * "message": "History item deleted successfully."
- * }
- */
- public function deleteSample(string $voiceId, string $sampleId): Response {
- $url = self::BASE_URL . "/v1/voices/{$voiceId}/samples/{$sampleId}";
-
- return $this->makeRequest('DELETE', $url);
- }
-
- /**
- * Returns the audio corresponding to a sample attached to a voice.
- *
- * @param string $voiceId
- * @param string $sampleId
- * @return Response
- * Example of a successful response (200 OK):
- * Content-Type: audio/mpeg
- * (Binary audio content)
- */
- public function getAudioFromSample(string $voiceId, string $sampleId): Response {
- $url = self::BASE_URL . "/v1/voices/{$voiceId}/samples/{$sampleId}/audio";
-
- return $this->makeRequest('GET', $url);
- }
-
- /**
- * Delete a history item by its ID.
- *
- * @param string $historyItemId
- * @return Response
- */
- public function deleteHistoryItemById(string $historyItemId): Response {
- $url = self::BASE_URL . "/v1/history/{$historyItemId}";
-
- return $this->makeRequest('DELETE', $url);
- }
-
- /**
- * Download one or more history items.
- *
- * @param array $historyItemIds
- * @return Response
- */
- public function downloadHistoryItemsByIds(array $historyItemIds): Response {
- $url = self::BASE_URL . '/v1/history/download';
- $body = json_encode(['history_item_ids' => $historyItemIds]);
-
- return $this->makeRequest('POST', $url, $body);
- }
-
- /**
- * Get metadata about all your generated audio.
- *
- * @return Response
- *
- * Example of a successful response (200 OK):
- * {
- * "items": [
- * {
- * "id": "VW7YKqPnjY4h39yTbx2L",
- * "created_at": "2023-03-16T12:30:00Z",
- * "request": {
- * "text": "Hello, world!",
- * "voice": "en-US-Wavenet-A",
- * "language_code": "en-US",
- * "speed": 1,
- * "pitch": 0,
- * "volume_gain_db": 0
- * },
- * "duration_seconds": 2.16
- * },
- * {
- * "id": "yv9dA7SxQx2zG8f4Zv1m",
- * "created_at": "2023-03-15T14:45:00Z",
- * "request": {
- * "text": "Good morning!",
- * "voice": "en-US-Wavenet-B",
- * "language_code": "en-US",
- * "speed": 1,
- * "pitch": 0,
- * "volume_gain_db": 0
- * },
- * "duration_seconds": 1.8
- * }
- * ]
- * }
- */
- public function getGeneratedItems(): Response {
- $url = self::BASE_URL . '/v1/history';
-
- return $this->makeRequest('GET', $url);
- }
-
- /**
- * Get the audio of a history item by its ID.
- *
- * @param string $historyItemId
- * @return Response
- */
- public function getAudioFromHistoryItemById(string $historyItemId): Response {
- $url = self::BASE_URL . "/v1/history/{$historyItemId}/audio";
-
- return $this->makeRequest('GET', $url);
- }
-
- /**
- * Delete a number of history items by their IDs.
- *
- * @param array $historyItemIds
- * @return Response
- */
- public function deleteHistoryItemsByIds(array $historyItemIds): Response {
- $url = self::BASE_URL . '/v1/history/delete';
- $body = json_encode(['history_item_ids' => $historyItemIds]);
-
- return $this->makeRequest('POST', $url, $body);
- }
-}
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/acelp_filters.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/acelp_filters.h
deleted file mode 100644
index fe86cb20974238a011004ed871ba66278cb875c0..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/acelp_filters.h
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * various filters for ACELP-based codecs
- *
- * Copyright (c) 2008 Vladimir Voroshilov
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef AVCODEC_ACELP_FILTERS_H
-#define AVCODEC_ACELP_FILTERS_H
-
-#include
-
-typedef struct ACELPFContext {
- /**
- * Floating point version of ff_acelp_interpolate()
- */
- void (*acelp_interpolatef)(float *out, const float *in,
- const float *filter_coeffs, int precision,
- int frac_pos, int filter_length, int length);
-
- /**
- * Apply an order 2 rational transfer function in-place.
- *
- * @param out output buffer for filtered speech samples
- * @param in input buffer containing speech data (may be the same as out)
- * @param zero_coeffs z^-1 and z^-2 coefficients of the numerator
- * @param pole_coeffs z^-1 and z^-2 coefficients of the denominator
- * @param gain scale factor for final output
- * @param mem intermediate values used by filter (should be 0 initially)
- * @param n number of samples (should be a multiple of eight)
- */
- void (*acelp_apply_order_2_transfer_function)(float *out, const float *in,
- const float zero_coeffs[2],
- const float pole_coeffs[2],
- float gain,
- float mem[2], int n);
-
-}ACELPFContext;
-
-/**
- * Initialize ACELPFContext.
- */
-void ff_acelp_filter_init(ACELPFContext *c);
-void ff_acelp_filter_init_mips(ACELPFContext *c);
-
-/**
- * low-pass Finite Impulse Response filter coefficients.
- *
- * Hamming windowed sinc filter with cutoff freq 3/40 of the sampling freq,
- * the coefficients are scaled by 2^15.
- * This array only contains the right half of the filter.
- * This filter is likely identical to the one used in G.729, though this
- * could not be determined from the original comments with certainty.
- */
-extern const int16_t ff_acelp_interp_filter[61];
-
-/**
- * Generic FIR interpolation routine.
- * @param[out] out buffer for interpolated data
- * @param in input data
- * @param filter_coeffs interpolation filter coefficients (0.15)
- * @param precision sub sample factor, that is the precision of the position
- * @param frac_pos fractional part of position [0..precision-1]
- * @param filter_length filter length
- * @param length length of output
- *
- * filter_coeffs contains coefficients of the right half of the symmetric
- * interpolation filter. filter_coeffs[0] should the central (unpaired) coefficient.
- * See ff_acelp_interp_filter for an example.
- */
-void ff_acelp_interpolate(int16_t* out, const int16_t* in,
- const int16_t* filter_coeffs, int precision,
- int frac_pos, int filter_length, int length);
-
-/**
- * Floating point version of ff_acelp_interpolate()
- */
-void ff_acelp_interpolatef(float *out, const float *in,
- const float *filter_coeffs, int precision,
- int frac_pos, int filter_length, int length);
-
-
-/**
- * high-pass filtering and upscaling (4.2.5 of G.729).
- * @param[out] out output buffer for filtered speech data
- * @param[in,out] hpf_f past filtered data from previous (2 items long)
- * frames (-0x20000000 <= (14.13) < 0x20000000)
- * @param in speech data to process
- * @param length input data size
- *
- * out[i] = 0.93980581 * in[i] - 1.8795834 * in[i-1] + 0.93980581 * in[i-2] +
- * 1.9330735 * out[i-1] - 0.93589199 * out[i-2]
- *
- * The filter has a cut-off frequency of 1/80 of the sampling freq
- *
- * @note Two items before the top of the in buffer must contain two items from the
- * tail of the previous subframe.
- *
- * @remark It is safe to pass the same array in in and out parameters.
- *
- * @remark AMR uses mostly the same filter (cut-off frequency 60Hz, same formula,
- * but constants differs in 5th sign after comma). Fortunately in
- * fixed-point all coefficients are the same as in G.729. Thus this
- * routine can be used for the fixed-point AMR decoder, too.
- */
-void ff_acelp_high_pass_filter(int16_t* out, int hpf_f[2],
- const int16_t* in, int length);
-
-/**
- * Apply an order 2 rational transfer function in-place.
- *
- * @param out output buffer for filtered speech samples
- * @param in input buffer containing speech data (may be the same as out)
- * @param zero_coeffs z^-1 and z^-2 coefficients of the numerator
- * @param pole_coeffs z^-1 and z^-2 coefficients of the denominator
- * @param gain scale factor for final output
- * @param mem intermediate values used by filter (should be 0 initially)
- * @param n number of samples
- */
-void ff_acelp_apply_order_2_transfer_function(float *out, const float *in,
- const float zero_coeffs[2],
- const float pole_coeffs[2],
- float gain,
- float mem[2], int n);
-
-/**
- * Apply tilt compensation filter, 1 - tilt * z-1.
- *
- * @param mem pointer to the filter's state (one single float)
- * @param tilt tilt factor
- * @param samples array where the filter is applied
- * @param size the size of the samples array
- */
-void ff_tilt_compensation(float *mem, float tilt, float *samples, int size);
-
-
-#endif /* AVCODEC_ACELP_FILTERS_H */
diff --git a/spaces/congsaPfin/Manga-OCR/logs/How to Use Nova Launcher to Transform Your Android Experience.md b/spaces/congsaPfin/Manga-OCR/logs/How to Use Nova Launcher to Transform Your Android Experience.md
deleted file mode 100644
index 21e697871bd94ac38937a898342fa679dfeab016..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/How to Use Nova Launcher to Transform Your Android Experience.md
+++ /dev/null
@@ -1,180 +0,0 @@
-
-
Nova Launcher: The Ultimate Guide to Customizing Your Android Home Screen
-
If you are looking for a way to make your Android device more personal and unique, you might want to try Nova Launcher. Nova Launcher is a powerful, customizable, and versatile home screen replacement that lets you tweak and transform your home screen to your liking. Whether you want to change the icons, the layout, the gestures, or the colors, Nova Launcher has you covered.
-
In this article, we will show you everything you need to know about Nova Launcher, from what it is and why you should use it, to how to install and set it up, to how to customize your home screen with it. We will also show you how to access the latest beta version of Nova Launcher and what's new in it. By the end of this article, you will be able to create your own amazing home screen with Nova Launcher.
Nova Launcher is a third-party launcher app that replaces your default home screen, app drawer, dock, and folders with a more customizable and flexible interface. Nova Launcher has been around since 2011 and has over 50 million downloads on Google Play. It is widely regarded as one of the best launchers for Android, thanks to its rich features, smooth performance, and frequent updates.
-
Some of the reasons why you should use Nova Launcher are:
-
-
It gives you more control over your home screen appearance and functionality. You can change the icon themes, sizes, shapes, labels, colors, and animations. You can also adjust the desktop grid, margin, padding, scroll effect, page indicator, dock icons, and more.
-
It lets you organize your apps better with folders and tabs in the app drawer. You can create custom categories, sort apps alphabetically or by usage, hide apps you don't want to see, and swipe up or down to access different tabs.
-
It allows you to use gestures and actions to launch apps or perform tasks quickly. You can swipe up, down, left, right, pinch, double tap, or use two-finger gestures on the home screen or the app drawer. You can also assign custom actions to buttons or icons.
-
It supports backup and restore of your settings and layouts. You can easily save and load any of your custom setups with a few taps. You can also share your backups with others or import backups from other launchers.
-
It offers a beta program that lets you try new features and improvements before they are released to all users. You can join the beta program on Google Play or download the beta APK from the website.
-
-
How to Install and Set Up Nova Launcher
-
Installing and setting up Nova Launcher is easy and straightforward. Here are the steps you need to follow:
-
-
Download Nova Launcher from Google Play or from the official website. If you download it from the website, make sure you enable "Unknown sources" in your device settings before installing it.
-
Open Nova Launcher and tap "Start". You will be asked to choose your preset theme (Light, Dark, or System) and app drawer style (Immersive or Card). You can change these later in the settings.
-
You will also be asked to choose your default launcher. Tap "Nova Launcher" and then "Always". This will make Nova Launcher your default home screen app.
-
You are now ready to use Nova Launcher.
How to Customize Your Home Screen with Nova Launcher
-
Now that you have installed and set up Nova Launcher, you can start customizing your home screen to your liking. Nova Launcher has a lot of options and settings that you can tweak and adjust, but we will focus on some of the most common and useful ones in this section.
-
How to Change Icon Themes and Sizes
-
One of the easiest ways to change the look of your home screen is to change the icon themes and sizes. Nova Launcher supports thousands of icon packs that you can download from Google Play or other sources. You can also use the built-in icon themes that come with Nova Launcher.
-
To change the icon themes and sizes, follow these steps:
-
nova launcher prime
-nova launcher themes
-nova launcher icon packs
-nova launcher beta
-nova launcher material you
-nova launcher gestures
-nova launcher backup and restore
-nova launcher vs lawnchair
-nova launcher tips and tricks
-nova launcher app drawer
-nova launcher widget size
-nova launcher adaptive icons
-nova launcher dark mode
-nova launcher google feed
-nova launcher grid size
-nova launcher hide apps
-nova launcher home screen
-nova launcher integration
-nova launcher kwgt
-nova launcher lock screen
-nova launcher mod apk
-nova launcher notification badges
-nova launcher one ui
-nova launcher pro apk
-nova launcher quick settings
-nova launcher review
-nova launcher settings
-nova launcher transparent dock
-nova launcher unread count
-nova launcher vertical scroll bar
-best settings for nova launcher
-how to customize nova launcher
-how to install nova launcher prime apk
-how to uninstall nova launcher
-is nova launcher safe to use
-latest version of nova launcher prime apk download free 2023
-new features in nova launcher 8.0 beta update 2023
-novalauncher.com official website download link 2023
-pros and cons of using nova launcher on android phone 2023
-what is the difference between nova launcher and stock android 2023
-
-
Long-press on an empty space on your home screen and tap "Settings".
-
Tap "Look & feel" and then "Icon style".
-
Tap "Icon theme" and choose the icon pack you want to use. You can also tap "System" to use the default icons of your device.
-
Tap "Icon size" and drag the slider to adjust the size of your icons. You can also tap "Normalize icon size" to make all icons the same size.
-
Tap "Done" to save your changes.
-
-
How to Adjust Desktop Grid and Layout
-
Another way to customize your home screen is to adjust the desktop grid and layout. The desktop grid determines how many rows and columns of icons you can fit on your home screen. The layout determines how much space you have between the icons, the dock, and the edges of the screen.
-
To adjust the desktop grid and layout, follow these steps:
-
-
Long-press on an empty space on your home screen and tap "Settings".
-
Tap "Home screen" and then "Desktop grid".
-
Drag the sliders to change the number of rows and columns on your home screen. You can also tap "Snap to grid" to align your icons with the grid.
-
Tap "Done" to save your changes.
-
Tap "Home screen" again and then "Layout".
-
Drag the sliders to change the padding, margin, and dock height of your home screen. You can also tap "Dock background" to change the color or shape of your dock.
-
Tap "Done" to save your changes.
-
-
How to Add and Remove Widgets and Shortcuts
-
Widgets are mini-apps that display information or perform functions on your home screen. Shortcuts are icons that launch apps or actions with a single tap. You can add or remove widgets and shortcuts on your home screen to make it more functional and convenient.
-
To add or remove widgets and shortcuts, follow these steps:
-
-
Long-press on an empty space on your home screen and tap "Widgets".
-
Scroll through the list of widgets and find the one you want to add. You can also use the search bar at the top to find a specific widget.
-
Long-press on the widget and drag it to an empty space on your home screen. You can also resize or move the widget by dragging its edges or corners.
-
To remove a widget, long-press on it and drag it to the "Remove" icon at the top of the screen.
-
To add a shortcut, long-press on an app icon in the app drawer or dock and drag it to an empty space on your home screen.
-
To remove a shortcut, long-press on it and drag it to the "Remove" icon at the top of the screen.
-
-
How to Create Folders and Tabs in the App Drawer
-
Folders are groups of apps that you can create in the app drawer or on the home screen. Tabs are categories of apps that you can create in the app drawer. You can use folders and tabs to organize your apps better and find them faster.
-
To create folders and tabs in the app drawer, follow these steps:
-
-
Open the app drawer by swiping up from the bottom of the screen or tapping the app drawer icon.
-
To create a folder, long-press on an app icon and drag it over another app icon. A folder will be created with both apps inside. You can also rename the folder by tapping on its name.
-
To create a tab, tap on the three-dot menu icon at the top right corner of the app drawer and then tap "Drawer groups".
-
Tap on "Add tab" and enter a name for the tab and choose an icon for it. You can also select which apps you want to include in the tab by tapping on them.
-
To switch between tabs, swipe left or right on the app drawer or tap on the tab icons at the top of the app drawer.
-
To remove a folder or a tab, long-press on it and drag it to the "Remove" icon at the top of the screen.
-
-
How to Use Gestures and Actions
-
Gestures and actions are shortcuts that let you launch apps or perform tasks quickly by using simple motions on your home screen or app drawer. You can assign different gestures and actions to different parts of your screen, such as swiping, pinching, double tapping, or using two-finger gestures.
-
To use gestures and actions, follow these steps:
-
-
Long-press on an empty space on your home screen and tap "Settings".
-
Tap "Gestures & inputs" and then tap on the gesture or action you want to customize.
-
Choose an app, shortcut, or action from the list that you want to launch or perform with that gesture or action.
-
Repeat the steps for any other gesture or action you want to customize.
-
To use a gesture or action, perform the motion on your home screen or app drawer as you have assigned it.
-
-
How to Backup and Restore Your Settings
-
Backup and restore is a feature that lets you save and load your Nova Launcher settings and layouts. This is useful if you want to switch devices, reset your device, share your setup with others, or try different setups without losing your current one.
-
To backup and restore your settings, follow these steps:
-
-
Long-press on an empty space on your home screen and tap "Settings".
-
Tap "Backup & import settings" and then tap "Backup".
-
Choose where you want to save your backup file. You can save it on your device, Google Drive, Dropbox, or any other cloud service.
-
Enter a name for your backup file and tap "OK".
-
To restore your settings, tap "Backup & import settings" again and then tap "Restore or manage backups".
-
Choose the backup file you want to restore from the list. You can also browse for backup files from other sources by tapping on the folder icon at the top right corner.
-
Tap "Restore" and confirm your choice. Your Nova Launcher settings and layouts will be restored.
-
-
How to Access Nova Launcher Beta and Latest Updates
-
Nova Launcher Beta is a version of Nova Launcher that lets you try new features and improvements before they are released to all users. Nova Launcher Beta is updated frequently with bug fixes and enhancements. If you want to be among the first to experience the latest Nova Launcher features, you can join the beta program in two ways: through Google Play or through the website.
-
How to Join the Beta Program on Google Play
-
To join the beta program on Google Play, follow these steps:
-
-
Open Google Play and search for Nova Launcher.
-
Scroll down to the bottom of the app page and tap "Join the beta".
-
Wait for a few minutes until you see a message that says "You're a beta tester".
-
Update Nova Launcher to the latest beta version by tapping "Update".
-
You are now part of the beta program. You will receive beta updates automatically through Google Play.
-
-
How to Download the Beta APK from the Website
-
To download the beta APK from the website, follow these steps:
-
-
Open your browser and go to the official Nova Launcher website.
-
Tap on "Download" and then tap on "Beta APK".
-
Download the APK file to your device. Make sure you enable "Unknown sources" in your device settings before installing it.
-
Open the APK file and install it. You may need to allow permissions for Nova Launcher to access your device.
-
You are now using the latest beta version of Nova Launcher. You will need to manually check for updates on the website and download them when they are available.
-
-
What's New in Nova Launcher 8.0 Beta
-
Nova Launcher 8.0 Beta is the latest beta version of Nova Launcher as of June 2023. It brings some new features and improvements that make Nova Launcher even better and more customizable. Some of the new features and improvements are:
-
-
A new adaptive icon shape called "Flower". This shape adds a floral touch to your icons and looks great with light or dark themes.
-
A new option to change the color of the notification dots. You can choose from a variety of colors or use the app's icon color.
-
A new option to hide the status bar on the home screen. This gives you more screen space and a cleaner look.
-
A new option to enable vertical scrolling in the app drawer. This makes it easier to browse through your apps and tabs.
-
A new option to disable the swipe up gesture to open the app drawer. This prevents accidental opening of the app drawer when you swipe up from the bottom of the screen.
-
A new option to customize the swipe down gesture on the home screen. You can choose to open the notification panel, the quick settings, the search bar, or any app or shortcut.
-
A new option to use a custom font for the app labels. You can choose from a list of fonts or use your own font file.
-
A new option to adjust the icon padding in the dock. You can make the icons closer or farther apart from each other.
-
A new option to show or hide the Google Discover page on the left of the home screen. You can also change the icon and color of the Google Discover button.
-
A new option to enable or disable haptic feedback for gestures and actions. You can also adjust the vibration intensity and duration.
-
-
These are just some of the new features and improvements in Nova Launcher 8.0 Beta. There are also many bug fixes and performance enhancements that make Nova Launcher smoother and faster than ever.
-
Conclusion and FAQs
-
Nova Launcher is a powerful, customizable, and versatile home screen replacement that lets you tweak and transform your home screen to your liking. Whether you want to change the icons, the layout, the gestures, or the colors, Nova Launcher has you covered. You can also join the beta program to try new features and improvements before they are released to all users.
-
If you have any questions about Nova Launcher, you might find the answers in these FAQs:
-
Q: How do I uninstall Nova Launcher?
-
A: To uninstall Nova Launcher, you need to first switch back to your default launcher. To do this, go to your device settings, tap on "Apps", find Nova Launcher, tap on it, and then tap on "Clear defaults". Then, go back to your home screen and tap on your home button. You will be asked to choose your default launcher. Choose your original launcher and then tap "Always". After that, you can uninstall Nova Launcher like any other app by long-pressing on its icon and dragging it to the "Uninstall" icon at the top of the screen.
-
Q: How do I update Nova Launcher?
-
A: If you are using Nova Launcher from Google Play, you will receive updates automatically through Google Play. You can also check for updates manually by opening Google Play, tapping on the menu icon at the top left corner, tapping on "My apps & games", and then tapping on "Update" next to Nova Launcher. If you are using Nova Launcher Beta from Google Play, you will receive beta updates automatically through Google Play as well. If you are using Nova Launcher Beta from the website, you will need to manually check for updates on the website and download them when they are available.
-
Q: How do I contact Nova Launcher support?
-
A: If you have any issues or feedback about Nova Launcher, you can contact Nova Launcher support by sending an email to support@teslacoilsw.com. You can also visit their help page or their Reddit community for more information and tips.
-
Q: How do I get more icon packs for Nova Launcher?
-
A: There are thousands of icon packs that you can download from Google Play or other sources that are compatible with Nova Launcher. You can search for "icon pack" on Google Play or browse through popular icon pack websites like Iconfinder or Flaticon. You can also create your own icon pack using apps like Icon Pack Studio or Adapticons. To apply an icon pack, go to Nova Launcher settings, tap on "Look & feel", tap on "Icon style", tap on "Icon theme", and choose the icon pack you want to use.
-
Q: How do I get more widgets for Nova Launcher?
-
A: There are many widgets that you can download from Google Play or other sources that are compatible with Nova Launcher . You can search for "widget" on Google Play or browse through popular widget apps like KWGT, Zooper, or UCCW. You can also create your own widgets using apps like KWGT or Zooper. To add a widget, long-press on an empty space on your home screen, tap on "Widgets", scroll through the list of widgets, long-press on the widget you want to add, and drag it to an empty space on your home screen. You can also resize or move the widget by dragging its edges or corners.
-
I hope you enjoyed this article and learned how to use Nova Launcher to customize your Android home screen. Nova Launcher is a great app that lets you express your creativity and personality with your device. If you have any questions or comments, feel free to leave them below. Thanks for reading!
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Magisk - The Most Powerful and Versatile Rooting Solution for Android - Download Here.md b/spaces/congsaPfin/Manga-OCR/logs/Magisk - The Most Powerful and Versatile Rooting Solution for Android - Download Here.md
deleted file mode 100644
index 339217af205623c70821456872b034c794456caa..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Magisk - The Most Powerful and Versatile Rooting Solution for Android - Download Here.md
+++ /dev/null
@@ -1,85 +0,0 @@
-
-
Magic APK: What is it and how to download it?
-
If you are looking for a way to play the popular trading card game Magic: The Gathering on your Android device, or if you want to customize your device with root access and modules, you might have heard of Magic APK. But what is Magic APK and how can you download it? In this article, we will explain what Magic APK is, how to download it, how to install it, and how to use it.
-
What is Magic APK?
-
Magic APK is not a single app, but a term that refers to two different apps that share the same name. One is Magic: The Gathering Arena, the official app for playing the card game online. The other is Magisk, a tool for rooting and modifying your Android device. Let's take a closer look at each of them.
Magic: The Gathering Arena is the original trading card game- and now you can download and start playing for free with your friends from anywhere! Magic: The Gathering Arena empowers you to discover your own unique playstyle as you collect cards from every set in Magic's history. You can build your own decks, challenge other players in various modes, and earn rewards as you progress. Whether you are a beginner or a veteran, Magic: The Gathering Arena has something for everyone.
-
Magisk
-
Magisk is a tool that allows you to root your Android device without modifying the system partition. This means that you can still receive OTA updates and use apps that require SafetyNet verification, such as Google Pay and Netflix. Magisk also provides a framework for installing modules that can enhance your device's functionality and performance. For example, you can install modules that enable systemless ad-blocking, emoji replacement, camera tweaks, and more.
-
How to download Magic APK?
-
Depending on which app you want to download, there are different ways to get Magic APK. Here are the steps for each app:
-
Downloading Magic: The Gathering Arena APK
-
The easiest way to download Magic: The Gathering Arena APK is to visit the official Google Play Store page and tap on the Install button. Alternatively, you can also download the APK file from a trusted third-party source, such as APKPure or APKMirror. However, make sure that you download the latest version and check the file's integrity before installing it.
-
Downloading Magisk APK
-
The official source for downloading Magisk APK is the GitHub page of its developer, topjohnwu. There, you can find the latest stable and beta versions of Magisk, as well as the changelog and instructions. You can also download Magisk Manager, which is an app that helps you manage Magisk and its modules. Again, make sure that you download the correct version and verify the file's authenticity before installing it.
-
How to install Magic APK?
-
Once you have downloaded the Magic APK file of your choice, you need to install it on your device. Here are the steps for each app:
-
Installing Magic: The Gathering Arena APK
-
If you downloaded the app from the Google Play Store, you don't need to do anything else. The app will be automatically installed on your device. If you downloaded the app from another source, you need to enable Unknown Sources in your device's settings. This will allow you to install apps from outside the Play Store. Then, locate the downloaded file in your file manager and tap on it to start the installation process. Follow the on-screen prompts and grant the necessary permissions to complete the installation.
-
magic the gathering arena apk скачать
-magisk apk скачать последняя версия
-magic tiles 3 apk скачать
-magic call apk скачать
-magic photo editor apk скачать
-magic video maker apk скачать
-magic poser apk скачать
-magic fluid apk скачать
-magic piano apk скачать
-magic jigsaw puzzles apk скачать
-magic rampage apk скачать
-magic brick wars apk скачать
-magic wand apk скачать
-magic touch apk скачать
-magic twist apk скачать
-magic face apk скачать
-magic rush heroes apk скачать
-magic duels apk скачать
-magic bullet looks apk скачать
-magic iso maker apk скачать
-magic kingdom mod apk скачать
-magic mirror photo editor apk скачать
-magic eye color effect apk скачать
-magic voice changer apk скачать
-magic particles 3d live wallpaper apk скачать
-magic school story mod apk скачать
-magic fluid simulation pro apk скачать
-magic video star video editor effects apk скачать
-magic 2015 mod apk скачать
-magic dosbox free apk скачать
-magic dragon village mod apk скачать
-magic photo lab effect pro apk скачать
-magic liker for instagram apk скачать
-magic music maker premium mod apk скачать
-magic screen pro smart launcher theme live wallpaper lock screen widget weather clock news feed icon pack app manager and contacts theme hd wallpapers 2021 for android free download and software reviews cnet download com 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.1.2.3.4.5.6.7.8.9.10.11.12.13.14.15.16.17.18.19.20.21.22.23.24.25.26.27.28.29.30.31.32.33.34.35.36.37.38.39.40.apk скачать (just kidding 😜)
-magic ball live wallpaper hd pro version no ads no permissions required for android free download and software reviews cnet download com 1 2 3.apk скачать (also kidding 😅)
-magic fluid simulation full version unlocked all features no ads for android free download and software reviews cnet download com.apk скачать (ok, last joke 🙃)
-magic sword eternal mod unlimited money gems for android free download and software reviews cnet download com.apk скачать
-magic solitaire collection classic card games for android free download and software reviews cnet download com.apk скачать
-magic cube puzzle 3d relaxing brain game for android free download and software reviews cnet download com.apk скачать
-
Installing Magisk APKInstalling Magisk APK
-
Before you install Magisk APK, you need to make sure that your device is compatible and has an unlocked bootloader. You also need to have a custom recovery installed, such as TWRP or OrangeFox. Then, you need to boot your device into recovery mode and flash the Magisk ZIP file that you downloaded from GitHub. This will install Magisk on your device's boot partition. After that, you can reboot your device and install the Magisk Manager app that you downloaded from GitHub. This will allow you to manage Magisk and its modules.
-
How to use Magic APK?
-
After you have installed Magic APK, you can start using it on your device. Here are some tips for each app:
-
Using Magic: The Gathering Arena APK
-
To use Magic: The Gathering Arena APK, you need to create an account or log in with your existing one. Then, you can access the main menu, where you can choose from different modes, such as Play, Ranked, Draft, Events, and Store. You can also view your collection, edit your decks, and chat with other players. To play a match, you need to select a mode and a deck, and then wait for an opponent. You can also challenge your friends directly by using their usernames or codes. To play the game, you need to follow the rules of Magic: The Gathering and use your cards wisely to defeat your opponent.
-
Using Magisk APK
-
To use Magisk APK, you need to open the Magisk Manager app and grant it root access. Then, you can access the main menu, where you can see your device's status, check for updates, and install modules. You can also access the settings, where you can customize Magisk's features, such as hiding root from certain apps, enabling systemless hosts, and changing the update channel. To install a module, you need to tap on the Modules tab and then on the plus icon. You can then browse for the module's ZIP file or download it from the online repository. To use a module, you need to enable it and reboot your device.
-
Conclusion
-
Magic APK is a term that refers to two different apps that have the same name: Magic: The Gathering Arena and Magisk. Both apps are useful and popular among Android users who want to play the card game online or root their devices without modifying the system partition. To download Magic APK, you need to visit the official sources or trusted third-party sites. To install Magic APK, you need to enable Unknown Sources or flash the ZIP file in recovery mode. To use Magic APK, you need to create an account or grant root access and then enjoy the features of each app.
-
FAQs
-
-
Q: Is Magic APK safe to download and install?
-
A: Yes, as long as you download it from the official sources or trusted third-party sites. You should also check the file's integrity and authenticity before installing it.
-
Q: Is Magic APK free to use?
-
A: Yes, both Magic: The Gathering Arena and Magisk are free to use. However, some features may require in-app purchases or donations.
-
Q: What are the benefits of using Magic APK?
-
A: The benefits of using Magic APK are that you can play the card game online with your friends or other players from anywhere, or that you can root your device and install modules that can enhance its functionality and performance.
-
Q: What are the drawbacks of using Magic APK?
-
A: The drawbacks of using Magic APK are that you may encounter some bugs or compatibility issues with some devices or apps, or that you may void your device's warranty or lose some features by rooting it.
-
Q: How can I update Magic APK?
-
A: You can update Magic APK by checking for updates in the app itself or by downloading the latest version from the official sources or trusted third-party sites.
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/The benefits of playing 3D Soccer APK - Improve your skills and have fun.md b/spaces/congsaPfin/Manga-OCR/logs/The benefits of playing 3D Soccer APK - Improve your skills and have fun.md
deleted file mode 100644
index b0a049f057c4993c44e36dea33026394c8da816b..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/The benefits of playing 3D Soccer APK - Improve your skills and have fun.md
+++ /dev/null
@@ -1,131 +0,0 @@
-
-
What is 3D Soccer APK?
-
If you are a fan of soccer games and you want to enjoy a realistic and immersive experience on your Android device, you should try 3D Soccer APK. This is a free soccer game that lets you play in first-person view, as well as in third-person, top, and stadium view. You can control the ball with advanced techniques for dribbling and kicking, and play in various modes from 4 vs 4 to 11 vs 11. You can also become any player on the field, play as a goalkeeper, practice free kicks and corner kicks, and challenge other players online or offline.
How to download and install 3D Soccer APK on Android?
-
Downloading and installing 3D Soccer APK on your Android device is very easy. Just follow these simple steps:
-
-
Go to and tap on the Download APK button.
-
Wait for the download to finish and then open the file.
-
If you see a warning message about installing apps from unknown sources, go to your device settings and enable the option to allow it.
-
Follow the instructions on the screen to install the game.
-
Launch the game and enjoy!
-
-
You can also see some screenshots of the game below:
-
-
-
-
How to play 3D Soccer APK on Android?
-
Playing 3D Soccer APK on your Android device is very fun and intuitive. You can choose from different camera angles and control options to suit your preference. Here are some basic controls and gameplay modes:
-
-
To move or run, use the left joystick or tilt your device.
-
To kick the ball, tap on the screen or use the right joystick.
-
To dribble or pass the ball, use the A button or swipe on the screen.
-
To change players or camera views, use the B button or double-tap on the screen.
-
To slow down time or pause the game, use the C button or tap on the clock icon.
-
-
You can also play in different modes such as:
-
-
Free style: Practice your skills without any rules or opponents.
-
Ball spin: Try to spin the ball as much as possible by kicking it with different angles and power.
-
Free kicks: Aim for the goal from different distances and angles.
-
Corner kicks: Try to score from corner kicks by crossing or shooting.
-
Against the wall: Test your accuracy by hitting targets on a wall.
-
Match: Play a match against the computer or another player online or offline.
-
Tournament: Compete in a series of matches and try to win the trophy.
-
-
What are the benefits of playing 3D Soccer APK on Android?
-
Playing 3D Soccer APK on your Android device has many benefits. Here are some of them:
-
-
You can enjoy a realistic and immersive soccer experience with 3D graphics, animations, and sounds.
-
You can customize your players, teams, stadiums, and balls with various options and colors.
-
You can experience different weather conditions, time of day, and seasons in the game.
-
You can play with realistic physics and ball dynamics that affect the gameplay.
-
You can challenge yourself and others with different difficulty levels and game modes.
-
You can have fun and socialize with other players online or offline.
-
-
What are some tips and tricks for playing 3D Soccer APK on Android?
-
Playing 3D Soccer APK on your Android device can be challenging and rewarding. Here are some tips and tricks to help you improve your skills, score goals, and win matches:
Practice your skills in the free style mode and learn how to control the ball, dribble, pass, and shoot.
-
Use the ball spin mode to master the art of spinning the ball with different angles and power.
-
Use the free kicks and corner kicks modes to practice your aiming and shooting skills from different situations.
-
Use the against the wall mode to improve your accuracy and precision by hitting targets on a wall.
-
Use the match mode to test your skills against the computer or another player. Try to use different strategies and tactics to outsmart your opponent.
-
Use the tournament mode to challenge yourself and compete in a series of matches. Try to win the trophy by beating all the other teams.
-
-
What are some alternatives to 3D Soccer APK on Android?
-
If you are looking for other free soccer games for your Android device, you might want to check out these alternatives:
-
-
Name
Description
-
Dream League Soccer
A popular soccer game that lets you build your own team, train your players, and compete in various leagues and cups.
-
FIFA Soccer
A famous soccer game that features real players, teams, leagues, and stadiums from around the world.
-
PES 2021
A realistic soccer game that offers high-quality graphics, gameplay, and modes. You can also play online with other players.
-
Soccer Stars
A simple but addictive soccer game that uses a physics-based gameplay. You can play with different types of balls and tables.
-
Soccer Manager 2021
A soccer management game that lets you take charge of your favorite team, sign players, set tactics, and win trophies.
-
-
Conclusion
-
3D Soccer APK is a free soccer game for Android devices that offers a realistic and immersive experience. You can play in various modes, customize your settings, enjoy 3D graphics and physics, and challenge other players online or offline. If you love soccer games, you should definitely give it a try. You can download it from or scan the QR code below:
-
-
FAQs
-
Q: Is 3D Soccer APK safe to download and install?
-
A: Yes, 3D Soccer APK is safe to download and install. It does not contain any viruses or malware. However, you should always download it from a trusted source like or scan the QR code above.
-
Q: How much space does 3D Soccer APK take on my device?
-
A: 3D Soccer APK takes about 100 MB of space on your device. You should make sure you have enough storage space before downloading and installing it.
-
Q: Can I play 3D Soccer APK offline?
-
A: Yes, you can play 3D Soccer APK offline. You can play in any mode except for online multiplayer. You can also save your progress offline and resume it later when you go online.
-
Q: Can I play 3D Soccer APK with a controller?
-
-
Q: How can I contact the developer of 3D Soccer APK?
-
A: If you have any questions, feedback, or suggestions for 3D Soccer APK, you can contact the developer by email at . You can also visit their website at or follow them on social media.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/contluForse/HuggingGPT/assets/A Little Agency Sierra Model Sets 0125 Plus 3 Custom Setsrar ((HOT)).md b/spaces/contluForse/HuggingGPT/assets/A Little Agency Sierra Model Sets 0125 Plus 3 Custom Setsrar ((HOT)).md
deleted file mode 100644
index 91292f4cdb9f8b3cfcd58d97af33260d0340ad30..0000000000000000000000000000000000000000
--- a/spaces/contluForse/HuggingGPT/assets/A Little Agency Sierra Model Sets 0125 Plus 3 Custom Setsrar ((HOT)).md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
pkg: i have a 2gb file, but the whole. in this way, the program is easy to use for everyone, regardless of their technical knowledge. a little agency sierra model sets 0125 plus 3 custom setsrar . you will download the tool in the. this feature is for your exclusive use and may not be shared on. a little agency sierra model sets 0125 plus 3 custom setsrar the program is very easy to use, you can even use it without any technical knowledge. -a-little-agency-sierra-model-sets-0125-plus-3-custom-setsrar-file. you can open the file with. the program will help you to open the file and all of the files contained in the archive will be listed in. a little agency sierra model sets 0125 plus 3 custom setsrar this feature is for your exclusive use and may not be shared on. when you want to unzip a. you can unzip files one by one or, by enabling the. a little agency sierra model sets 0125 plus 3 custom setsrar this is the default tool for the. a little agency sierra model sets 0125 plus 3 custom setsrar even the most advanced users may find that they need to update to. a little agency sierra model sets 0125 plus 3 custom setsrar you will download the tool in the. a little agency sierra model sets 0125 plus 3 custom setsrar stuff that's new in the latest version of this piece of software : version 2. the most accurate measurement tool on the market.
-
A Little Agency Sierra Model Sets 0125 Plus 3 Custom Setsrar
if you're looking for a simple yet powerful app to run your carputer, check out the lightweight and easy-to-use carbuntu. while this is not a full-fledged, and. a little agency sierra model sets 0125 plus 3 custom setsrar . you will need to know this as it will also be. . mp3 download free a little agency - sierra model - sets 01-25 (plus 3 custom sets).rar
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/contluForse/HuggingGPT/assets/Cewek Smp Bugil Di Perkosa Keluar Darah Perawan __TOP__.md b/spaces/contluForse/HuggingGPT/assets/Cewek Smp Bugil Di Perkosa Keluar Darah Perawan __TOP__.md
deleted file mode 100644
index 1112583d998da8105207b2f81d53ac7956b7d3b4..0000000000000000000000000000000000000000
--- a/spaces/contluForse/HuggingGPT/assets/Cewek Smp Bugil Di Perkosa Keluar Darah Perawan __TOP__.md
+++ /dev/null
@@ -1,9 +0,0 @@
-
-
-May 13, 2563 BE - Vehicle Diagnostics and Troubleshooting SIS. Components - Repair Instructions Diesel Engine Specifications - ESI[tronic]-C Archive - "Car ... Vehicle Diagnostics and Troubleshooting SIS.
-Components - Repair Instructions Diesel Engine Specifications - ESI[tronic]-C Archive - "An automobile is a machine," said Karl-Friedrich Benz when he was still just the owner of Benz.
-Now we say "car." 8a78ff9644
-
-
-
diff --git a/spaces/diacanFperku/AutoGPT/Download Auto Keyboard 9.0 Crack.md b/spaces/diacanFperku/AutoGPT/Download Auto Keyboard 9.0 Crack.md
deleted file mode 100644
index f0134b05fa788259ce0a21ae60530a1d5b3a756d..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Download Auto Keyboard 9.0 Crack.md
+++ /dev/null
@@ -1,120 +0,0 @@
-
-
Download Auto Keyboard 9.0 Crack: A Tool to Automate Your Keystrokes and Mouse Actions
-
-
Do you often have to perform repetitive tasks on your computer that involve pressing the same keys or clicking the same buttons over and over again? Do you wish there was a way to automate these tasks and save your time and energy? If yes, then you might want to download auto keyboard 9.0 crack, a tool that can simulate keyboard keystrokes and mouse actions for you.
Auto Keyboard 9.0 is a software that can imitate keyboard keystrokes and mouse actions for you. You can enter a group of keystrokes or mouse actions and then it will automatically and repeatedly generate them for you. You can also set the delay time between each keystroke or mouse action.
-
-
Auto Keyboard 9.0 can be useful for various purposes, such as playing games, testing software, filling forms, creating macros, and more. It can help you economize a lot of time and effort if you have a lot of repetitive keyboard or mouse work to do.
-
-
What is Auto Keyboard 9.0 Crack?
-
-
Auto Keyboard 9.0 Crack is a modified version of Auto Keyboard 9.0 that allows you to use the software for free without paying for it. The original version of Auto Keyboard 9.0 is a shareware that requires you to purchase a license key after a 15-day trial period. However, with Auto Keyboard 9.0 Crack, you can bypass this limitation and use the software indefinitely.
-
-
Auto Keyboard 9.0 Crack also provides some additional features that are not available in the original version, such as modifying keystroke, interface, mouse, variable, hotkey, and text.
-
-
How to Download Auto Keyboard 9.0 Crack?
-
-
If you want to download auto keyboard 9.0 crack, you need to follow these steps:
-
-
-
-
Find a reliable source that offers Auto Keyboard 9.0 Crack for download. You can search online for torrent sites or direct links that provide this software.
-
Download the file to your computer. Make sure you scan it with an antivirus program before opening it to avoid any viruses or malware.
-
Extract the file using a software like WinRAR or 7-Zip. You will find a folder named CODEX that contains the cracked files.
-
Copy the contents of the CODEX folder to your Auto Keyboard 9.0 installation directory. This will overwrite some files and apply the crack to your software.
-
Run the software and enjoy using Auto Keyboard 9.0 Crack.
-
-
-
Note: This method is only compatible with the PC version of Auto Keyboard 9.0. It may not work with other versions or platforms. It may also cause crashes, glitches, or bans if you use it online. Use it at your own risk and discretion.
-
-
Conclusion
-
-
Auto Keyboard 9.0 Crack is a tool that can help you automate your keystrokes and mouse actions on your computer. It can be useful for various purposes, such as playing games, testing software, filling forms, creating macros, and more. It can also save you a lot of time and effort if you have a lot of repetitive keyboard or mouse work to do.
-
-
If you want to download auto keyboard 9.0 crack, you can follow the steps given above and enjoy using this software for free without paying for it. However, be careful when using this software as it may have some negative consequences.
-
-
If you want to learn more about download auto keyboard 9.0 crack, you can visit the official website of the software or read some reviews online. You can also watch some videos or streams on YouTube or Twitch that show how to use this software effectively.
-
What are the advantages and disadvantages of using Auto Keyboard 9.0 Crack?
-
-
Using Auto Keyboard 9.0 Crack can have some advantages and disadvantages for the users. Some of them are:
-
-
Advantages
-
-
-
You can use Auto Keyboard 9.0 for free without paying for it. The original version of Auto Keyboard 9.0 is a shareware that requires you to purchase a license key after a 15-day trial period. However, with Auto Keyboard 9.0 Crack, you can bypass this limitation and use the software indefinitely.
-
You can access some additional features that are not available in the original version, such as modifying keystroke, interface, mouse, variable, hotkey, and text.
-
You can automate your keystrokes and mouse actions on your computer and save your time and energy. You can enter a group of keystrokes or mouse actions and then it will automatically and repeatedly generate them for you. You can also set the delay time between each keystroke or mouse action.
-
You can use Auto Keyboard 9.0 for various purposes, such as playing games, testing software, filling forms, creating macros, and more. It can help you economize a lot of time and effort if you have a lot of repetitive keyboard or mouse work to do.
-
-
-
Disadvantages
-
-
-
You may face some legal issues if you use Auto Keyboard 9.0 Crack. The original version of Auto Keyboard 9.0 is a shareware that is protected by copyright laws. Using Auto Keyboard 9.0 Crack without paying for it may violate these laws and result in fines or lawsuits.
-
You may encounter some technical issues if you use Auto Keyboard 9.0 Crack. The cracked version of Auto Keyboard 9.0 may not be compatible with your system or other software. It may also cause crashes, glitches, or errors that may affect your performance or data.
-
You may risk your security if you use Auto Keyboard 9.0 Crack. The cracked version of Auto Keyboard 9.0 may contain viruses or malware that may harm your computer or steal your information. You may also expose yourself to hackers or scammers who may try to exploit your system or data.
-
You may lose your enjoyment if you use Auto Keyboard 9.0 Crack. The cracked version of Auto Keyboard 9.0 may make your tasks too easy or boring by automating them for you. You may also miss out on the updates or support that the original version of Auto Keyboard 9.0 provides.
-
-
-
Conclusion
-
-
Auto Keyboard 9.0 Crack is a tool that can help you automate your keystrokes and mouse actions on your computer. It can be useful for various purposes, such as playing games, testing software, filling forms, creating macros, and more. It can also save you a lot of time and effort if you have a lot of repetitive keyboard or mouse work to do.
-
-
If you want to download auto keyboard 9.0 crack, you can follow the steps given above and enjoy using this software for free without paying for it. However, be careful when using this software as it may have some negative consequences.
-
-
If you want to learn more about download auto keyboard 9.0 crack, you can visit the official website of the software or read some reviews online. You can also watch some videos or streams on YouTube or Twitch that show how to use this software effectively.
-
What are some alternatives to Auto Keyboard 9.0 Crack?
-
-
If you are looking for some alternatives to Auto Keyboard 9.0 Crack, you may want to check out some of these tools that can also help you automate your keystrokes and mouse actions on your computer. Some of them are:
-
-
-
Macro Toolworks: This is a powerful software that is available for Windows PCs and laptops. It allows you to create macros that can perform various tasks, such as keyboard shortcuts, text insertion, mouse clicks, file operations, and more. You can also schedule your macros to run at specific times or events.
-
AutoHotkey: This is a free, open-source utility for Windows that lets you create scripts that can automate almost anything on your computer. You can use it to create keyboard shortcuts, remap keys, manipulate windows, control the mouse, and more. You can also use it to create simple games or applications.
-
TinyTask: This is a lightweight, portable tool that can record and playback your keystrokes and mouse actions. It has a simple interface that shows only one button for recording and one button for playback. You can also adjust the speed and loop options of your recordings.
-
Free Virtual Keyboard: This is a tool that provides you with an on-screen keyboard that you can use instead of your physical keyboard. It is useful for situations where you don't have access to your keyboard or when you want to avoid typing errors or keyloggers. You can also customize the size, color, and layout of the virtual keyboard.
-
Cok Free Auto Typer: This is a tool that allows you to type text automatically by using a hotkey or a predefined text. You can use it to fill forms, send messages, write documents, and more. You can also set the interval time between each word or sentence.
-
-
-
Conclusion
-
-
Auto Keyboard 9.0 Crack is a tool that can help you automate your keystrokes and mouse actions on your computer. It can be useful for various purposes, such as playing games, testing software, filling forms, creating macros, and more. It can also save you a lot of time and effort if you have a lot of repetitive keyboard or mouse work to do.
-
-
If you want to download auto keyboard 9.0 crack, you can follow the steps given above and enjoy using this software for free without paying for it. However, be careful when using this software as it may have some negative consequences.
-
-
If you want to learn more about download auto keyboard 9.0 crack, you can visit the official website of the software or read some reviews online. You can also watch some videos or streams on YouTube or Twitch that show how to use this software effectively.
-
-
If you are looking for some alternatives to Auto Keyboard 9.0 Crack, you may want to check out some of these tools that can also help you automate your keystrokes and mouse actions on your computer. Some of them are Macro Toolworks, AutoHotkey, TinyTask, Free Virtual Keyboard, and Cok Free Auto Typer.
-
How to use Auto Keyboard 9.0 Crack effectively?
-
-
If you have downloaded auto keyboard 9.0 crack and want to use it effectively, you need to follow some tips and tricks that can help you get the most out of this tool. Here are some of them:
-
-
-
Choose the right keys or mouse actions to automate. You should select the keys or mouse actions that are relevant to your task and that can be repeated easily. For example, if you want to play a game that requires you to press the spacebar repeatedly, you can use Auto Keyboard 9.0 Crack to automate this action.
-
Set the right delay time and repeat frequency. You should adjust the delay time and repeat frequency according to your needs and preferences. The delay time is the time interval between each keystroke or mouse action, while the repeat frequency is the number of times the keystroke or mouse action is repeated. For example, if you want to press the spacebar every 0.5 seconds for 10 times, you can set the delay time to 0.5 seconds and the repeat frequency to 10.
-
Use hotkeys to start and stop the automation. You should use hotkeys to control the automation process easily and conveniently. You can assign a hotkey to start or stop the automation, or to pause or resume it. For example, you can use F1 to start the automation, F2 to stop it, F3 to pause it, and F4 to resume it.
-
Test your automation before using it. You should test your automation before using it for your actual task. You can use a text editor or a notepad to test your keystrokes or mouse actions and see if they work as expected. You can also check for any errors or glitches that may occur during the automation.
-
Use Auto Keyboard 9.0 Crack wisely and responsibly. You should use Auto Keyboard 9.0 Crack wisely and responsibly for your own benefit and convenience. You should not use it for illegal or unethical purposes, such as cheating, hacking, spamming, or harassing others. You should also respect the rules and policies of the websites or applications that you use with Auto Keyboard 9.0 Crack.
-
-
-
Conclusion
-
-
Auto Keyboard 9.0 Crack is a tool that can help you automate your keystrokes and mouse actions on your computer. It can be useful for various purposes, such as playing games, testing software, filling forms, creating macros, and more. It can also save you a lot of time and effort if you have a lot of repetitive keyboard or mouse work to do.
-
-
If you want to download auto keyboard 9.0 crack, you can follow the steps given above and enjoy using this software for free without paying for it. However, be careful when using this software as it may have some negative consequences.
-
-
If you want to learn more about download auto keyboard 9.0 crack, you can visit the official website of the software or read some reviews online. You can also watch some videos or streams on YouTube or Twitch that show how to use this software effectively.
-
-
If you are looking for some alternatives to Auto Keyboard 9.0 Crack, you may want to check out some of these tools that can also help you automate your keystrokes and mouse actions on your computer. Some of them are Macro Toolworks, AutoHotkey, TinyTask, Free Virtual Keyboard, and Cok Free Auto Typer.
-
-
If you want to use Auto Keyboard 9.0 Crack effectively, you need to follow some tips and tricks that can help you get the most out of this tool. Some of them are choosing the right keys or mouse actions to automate, setting the right delay time and repeat frequency, using hotkeys to start and stop the automation, testing your automation before using it, and using Auto Keyboard 9.0 Crack wisely and responsibly.
-
Conclusion
-
-
In this article, we have discussed download auto keyboard 9.0 crack, a tool that can help you automate your keystrokes and mouse actions on your computer. We have explained what it is, how to download it, what are its advantages and disadvantages, what are some alternatives to it, and how to use it effectively. We hope this article has been informative and helpful for you.
-
-
If you have any questions or feedback about download auto keyboard 9.0 crack, feel free to leave a comment below. We would love to hear from you and assist you in any way we can. Thank you for reading and have a great day!
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/diacanFperku/AutoGPT/Easerasystunewith[UPDATED] Crack.md b/spaces/diacanFperku/AutoGPT/Easerasystunewith[UPDATED] Crack.md
deleted file mode 100644
index 97e75e5f687b344ed0c27786f1c9d667f6a57435..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Easerasystunewith[UPDATED] Crack.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-EASERA SysTune (James Kang) It's very good for real-time measurement, the use of ... of a code-writer, which can be helpful when you're trying to crack a code. 4d29de3e1b
-
-
-
diff --git a/spaces/diacanFperku/AutoGPT/Endnote X6 ((EXCLUSIVE)) Full Crack For Windows 7 Download.md b/spaces/diacanFperku/AutoGPT/Endnote X6 ((EXCLUSIVE)) Full Crack For Windows 7 Download.md
deleted file mode 100644
index 6a16531787a0020bcd1405bfc8435dcdb1ce12c7..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Endnote X6 ((EXCLUSIVE)) Full Crack For Windows 7 Download.md
+++ /dev/null
@@ -1,23 +0,0 @@
-
-
How to Download Endnote X6 Full Crack for Windows 7
-
Endnote is a popular reference management software that helps researchers organize, cite, and share their sources. Endnote X6 is the latest version of the software that was released in 2012. It has many features and improvements over the previous versions, such as:
-
-
Syncing your Endnote library across your desktop, iPad, and online.
-
Searching and importing references from online databases and catalogs.
-
Creating and formatting citations and bibliographies in Microsoft Word, OpenOffice, and LaTeX.
-
Sharing your Endnote library with up to 14 colleagues or collaborators.
-
Accessing your Endnote library from anywhere with Endnote Web.
-
-
However, Endnote X6 is not a free software. You need to purchase a license to use it. The official price of Endnote X6 is $249.95 for a single user. If you are a student or an academic, you may be eligible for a discount. You can check the pricing and availability of Endnote X6 on the official website: https://endnote.com/buy/.
But what if you want to use Endnote X6 without paying for it? Is there a way to download Endnote X6 full crack for Windows 7? The answer is yes, but it is not recommended. Downloading and using cracked software is illegal and unethical. It may also expose your computer to viruses, malware, or other security risks. You may also face legal consequences if you are caught using pirated software.
-
Therefore, we do not advise you to download Endnote X6 full crack for Windows 7. Instead, we suggest you to use one of the following alternatives:
-
-
Use the free trial version of Endnote X6. You can download it from the official website: https://endnote.com/downloads/30-day-trial/. The trial version allows you to use all the features of Endnote X6 for 30 days. After that, you need to purchase a license or uninstall the software.
-
Use a free or open source reference management software. There are many options available online, such as Zotero, Mendeley, BibTeX, etc. These software have similar functions as Endnote X6, but they are free to use and distribute. You can find a comparison of different reference management software here: https://en.wikipedia.org/wiki/Comparison_of_reference_management_software.
-
Use an online citation generator. If you only need to create citations and bibliographies for your papers or projects, you can use an online tool that does the job for you. For example, you can use Cite This For Me: https://www.citethisforme.com/. This website allows you to choose from various citation styles, such as APA, MLA, Chicago, etc., and generate citations and bibliographies for your sources.
-
-
We hope this article has helped you understand how to download Endnote X6 full crack for Windows 7 and why you should not do it. We also hope you have found some useful alternatives to Endnote X6 that suit your needs and budget. Remember, using cracked software is not worth the risk. Be smart and ethical in your research endeavors.
- d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/diacanFperku/AutoGPT/Friends Season 8 720p Bluray X264 MrLss.md b/spaces/diacanFperku/AutoGPT/Friends Season 8 720p Bluray X264 MrLss.md
deleted file mode 100644
index 880549a5b3fb71048cacac5e0c8a8c6891957e8a..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Friends Season 8 720p Bluray X264 MrLss.md
+++ /dev/null
@@ -1,22 +0,0 @@
-
"
-
-examples = [
- ['Paris is the [MASK] of France.','bert-base-cased']
-]
-
-
-io1 = gr.Interface.load("huggingface/bert-base-cased")
-
-io2 = gr.Interface.load("huggingface/bert-base-uncased")
-
-
-def inference(inputtext, model):
- if model == "bert-base-cased":
- outlabel = io1(inputtext)
- else:
- outlabel = io2(inputtext)
- return outlabel
-
-
-gr.Interface(
- inference,
- [gr.inputs.Textbox(label="Context",lines=10),gr.inputs.Dropdown(choices=["bert-base-cased","bert-base-uncased"], type="value", default="bert-base-cased", label="model")],
- [gr.outputs.Label(label="Output")],
- examples=examples,
- article=article,
- title=title,
- description=description).launch(enable_queue=True)
\ No newline at end of file
diff --git a/spaces/dreambooth-hackathon/leaderboard/README.md b/spaces/dreambooth-hackathon/leaderboard/README.md
deleted file mode 100644
index 50c57a58b753c965a7bba000c7a3637fa05d1017..0000000000000000000000000000000000000000
--- a/spaces/dreambooth-hackathon/leaderboard/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Leaderboard
-emoji: 😻
-colorFrom: purple
-colorTo: gray
-sdk: gradio
-sdk_version: 3.14.0
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/elyza/ELYZA-japanese-Llama-2-7b-fast-instruct-demo/USE_POLICY.md b/spaces/elyza/ELYZA-japanese-Llama-2-7b-fast-instruct-demo/USE_POLICY.md
deleted file mode 100644
index abbcc199b2d1e4feb5d7e40c0bd67e1b0ce29e97..0000000000000000000000000000000000000000
--- a/spaces/elyza/ELYZA-japanese-Llama-2-7b-fast-instruct-demo/USE_POLICY.md
+++ /dev/null
@@ -1,50 +0,0 @@
-# Llama 2 Acceptable Use Policy
-
-Meta is committed to promoting safe and fair use of its tools and features, including Llama 2. If you access or use Llama 2, you agree to this Acceptable Use Policy (“Policy”). The most recent copy of this policy can be found at [ai.meta.com/llama/use-policy](http://ai.meta.com/llama/use-policy).
-
-## Prohibited Uses
-We want everyone to use Llama 2 safely and responsibly. You agree you will not use, or allow others to use, Llama 2 to:
-
-1. Violate the law or others’ rights, including to:
- 1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as:
- 1. Violence or terrorism
- 2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material
- 3. Human trafficking, exploitation, and sexual violence
- 4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials.
- 5. Sexual solicitation
- 6. Any other criminal activity
- 2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals
- 3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services
- 4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices
- 5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws
- 6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials
- 7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system
-
-
-
-2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of Llama 2 related to the following:
- 1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State
- 2. Guns and illegal weapons (including weapon development)
- 3. Illegal drugs and regulated/controlled substances
- 4. Operation of critical infrastructure, transportation technologies, or heavy machinery
- 5. Self-harm or harm to others, including suicide, cutting, and eating disorders
- 6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual
-
-
-
-3. Intentionally deceive or mislead others, including use of Llama 2 related to the following:
- 1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation
- 2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content
- 3. Generating, promoting, or further distributing spam
- 4. Impersonating another individual without consent, authorization, or legal right
- 5. Representing that the use of Llama 2 or outputs are human-generated
- 6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement
-4. Fail to appropriately disclose to end users any known dangers of your AI system
-
-Please report any violation of this Policy, software “bug,” or other problems that could lead to a violation of this Policy through one of the following means:
-
-* Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama)
-* Reporting risky content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback)
-* Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info)
-* Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: [LlamaUseReport@meta.com](mailto:LlamaUseReport@meta.com)
-
diff --git a/spaces/ennet/ChatDev/camel/messages/chat_messages.py b/spaces/ennet/ChatDev/camel/messages/chat_messages.py
deleted file mode 100644
index 1a9406344fe519d47d90c987fdd9fc6e91bdad72..0000000000000000000000000000000000000000
--- a/spaces/ennet/ChatDev/camel/messages/chat_messages.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
-# Licensed under the Apache License, Version 2.0 (the “License”);
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an “AS IS” BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
-from dataclasses import dataclass
-from typing import Dict, Optional
-
-from camel.messages import BaseMessage
-from camel.typing import RoleType
-
-
-@dataclass
-class ChatMessage(BaseMessage):
- r"""Base class for chat messages used in CAMEL chat system.
-
- Args:
- role_name (str): The name of the user or assistant role.
- role_type (RoleType): The type of role, either
- :obj:`RoleType.ASSISTANT` or :obj:`RoleType.USER`.
- meta_dict (Optional[Dict[str, str]]): Additional metadata dictionary
- for the message.
- role (str): The role of the message in OpenAI chat system.
- content (str): The content of the message. (default: :obj:`""`)
- """
- role_name: str
- role_type: RoleType
- meta_dict: Optional[Dict[str, str]]
- role: str
- content: str = ""
-
- def set_user_role_at_backend(self: BaseMessage):
- return self.__class__(
- role_name=self.role_name,
- role_type=self.role_type,
- meta_dict=self.meta_dict,
- role="user",
- content=self.content,
- )
-
-
-@dataclass
-class AssistantChatMessage(ChatMessage):
- r"""Class for chat messages from the assistant role used in CAMEL chat
- system.
-
- Attributes:
- role_name (str): The name of the assistant role.
- role_type (RoleType): The type of role, always
- :obj:`RoleType.ASSISTANT`.
- meta_dict (Optional[Dict[str, str]]): Additional metadata dictionary
- for the message.
- role (str): The role of the message in OpenAI chat system.
- (default: :obj:`"assistant"`)
- content (str): The content of the message. (default: :obj:`""`)
- """
- role_name: str
- role_type: RoleType = RoleType.ASSISTANT
- meta_dict: Optional[Dict[str, str]] = None
- role: str = "user"
- content: str = ""
-
-
-@dataclass
-class UserChatMessage(ChatMessage):
- r"""Class for chat messages from the user role used in CAMEL chat system.
-
- Args:
- role_name (str): The name of the user role.
- role_type (RoleType): The type of role, always :obj:`RoleType.USER`.
- meta_dict (Optional[Dict[str, str]]): Additional metadata dictionary
- for the message.
- role (str): The role of the message in OpenAI chat system.
- (default: :obj:`"user"`)
- content (str): The content of the message. (default: :obj:`""`)
- """
- role_name: str
- role_type: RoleType = RoleType.USER
- meta_dict: Optional[Dict[str, str]] = None
- role: str = "user"
- content: str = ""
diff --git a/spaces/erastorgueva-nv/NeMo-Forced-Aligner/app.py b/spaces/erastorgueva-nv/NeMo-Forced-Aligner/app.py
deleted file mode 100644
index 335b101c739f000a36f479413e2f763c53496409..0000000000000000000000000000000000000000
--- a/spaces/erastorgueva-nv/NeMo-Forced-Aligner/app.py
+++ /dev/null
@@ -1,317 +0,0 @@
-import gradio as gr
-import librosa
-import soundfile
-import tempfile
-import os
-import uuid
-import json
-
-import jieba
-
-import nemo.collections.asr as nemo_asr
-from nemo.collections.asr.models import ASRModel
-from nemo.utils import logging
-
-from align import main, AlignmentConfig, ASSFileConfig
-
-
-SAMPLE_RATE = 16000
-
-# Pre-download and cache the model in disk space
-logging.setLevel(logging.ERROR)
-for tmp_model_name in [
- "stt_en_fastconformer_hybrid_large_pc",
- "stt_de_fastconformer_hybrid_large_pc",
- "stt_es_fastconformer_hybrid_large_pc",
- "stt_fr_conformer_ctc_large",
- "stt_zh_citrinet_1024_gamma_0_25",
-]:
- tmp_model = ASRModel.from_pretrained(tmp_model_name, map_location='cpu')
- del tmp_model
-logging.setLevel(logging.INFO)
-
-
-def get_audio_data_and_duration(file):
- data, sr = librosa.load(file)
-
- if sr != SAMPLE_RATE:
- data = librosa.resample(data, orig_sr=sr, target_sr=SAMPLE_RATE)
-
- # monochannel
- data = librosa.to_mono(data)
-
- duration = librosa.get_duration(y=data, sr=SAMPLE_RATE)
- return data, duration
-
-
-def get_char_tokens(text, model):
- tokens = []
- for character in text:
- if character in model.decoder.vocabulary:
- tokens.append(model.decoder.vocabulary.index(character))
- else:
- tokens.append(len(model.decoder.vocabulary)) # return unk token (same as blank token)
-
- return tokens
-
-
-def get_S_prime_and_T(text, model_name, model, audio_duration):
-
- # estimate T
- if "citrinet" in model_name or "_fastconformer_" in model_name:
- output_timestep_duration = 0.08
- elif "_conformer_" in model_name:
- output_timestep_duration = 0.04
- elif "quartznet" in model_name:
- output_timestep_duration = 0.02
- else:
- raise RuntimeError("unexpected model name")
-
- T = int(audio_duration / output_timestep_duration) + 1
-
- # calculate S_prime = num tokens + num repetitions
- if hasattr(model, 'tokenizer'):
- all_tokens = model.tokenizer.text_to_ids(text)
- elif hasattr(model.decoder, "vocabulary"): # i.e. tokenization is simply character-based
- all_tokens = get_char_tokens(text, model)
- else:
- raise RuntimeError("cannot obtain tokens from this model")
-
- n_token_repetitions = 0
- for i_tok in range(1, len(all_tokens)):
- if all_tokens[i_tok] == all_tokens[i_tok - 1]:
- n_token_repetitions += 1
-
- S_prime = len(all_tokens) + n_token_repetitions
-
- return S_prime, T
-
-
-def hex_to_rgb_list(hex_string):
- hex_string = hex_string.lstrip("#")
- r = int(hex_string[:2], 16)
- g = int(hex_string[2:4], 16)
- b = int(hex_string[4:], 16)
- return [r, g, b]
-
-def delete_mp4s_except_given_filepath(filepath):
- files_in_dir = os.listdir()
- mp4_files_in_dir = [x for x in files_in_dir if x.endswith(".mp4")]
- for mp4_file in mp4_files_in_dir:
- if mp4_file != filepath:
- os.remove(mp4_file)
-
-
-
-
-def align(lang, Microphone, File_Upload, text, col1, col2, col3, progress=gr.Progress()):
- # Create utt_id, specify output_video_filepath and delete any MP4s
- # that are not that filepath. These stray MP4s can be created
- # if a user refreshes or exits the page while this 'align' function is executing.
- # This deletion will not delete any other users' video as long as this 'align' function
- # is run one at a time.
- utt_id = uuid.uuid4()
- output_video_filepath = f"{utt_id}.mp4"
- delete_mp4s_except_given_filepath(output_video_filepath)
-
- output_info = ""
-
- progress(0, desc="Validating input")
-
- # choose model
- if lang in ["en", "de", "es"]:
- model_name = f"stt_{lang}_fastconformer_hybrid_large_pc"
- elif lang in ["fr"]:
- model_name = f"stt_{lang}_conformer_ctc_large"
- elif lang in ["zh"]:
- model_name = f"stt_{lang}_citrinet_1024_gamma_0_25"
-
- # decide which of Mic / File_Upload is used as input & do error handling
- if (Microphone is not None) and (File_Upload is not None):
- raise gr.Error("Please use either the microphone or file upload input - not both")
-
- elif (Microphone is None) and (File_Upload is None):
- raise gr.Error("You have to either use the microphone or upload an audio file")
-
- elif Microphone is not None:
- file = Microphone
- else:
- file = File_Upload
-
- # check audio is not too long
- audio_data, duration = get_audio_data_and_duration(file)
-
- if duration > 4 * 60:
- raise gr.Error(
- f"Detected that uploaded audio has duration {duration/60:.1f} mins - please only upload audio of less than 4 mins duration"
- )
-
- # loading model
- progress(0.1, desc="Loading speech recognition model")
- model = ASRModel.from_pretrained(model_name)
-
- if text: # check input text is not too long compared to audio
- S_prime, T = get_S_prime_and_T(text, model_name, model, duration)
-
- if S_prime > T:
- raise gr.Error(
- f"The number of tokens in the input text is too long compared to the duration of the audio."
- f" This model can handle {T} tokens + token repetitions at most. You have provided {S_prime} tokens + token repetitions. "
- f" (Adjacent tokens that are not in the model's vocabulary are also counted as a token repetition.)"
- )
-
- with tempfile.TemporaryDirectory() as tmpdir:
- audio_path = os.path.join(tmpdir, f'{utt_id}.wav')
- soundfile.write(audio_path, audio_data, SAMPLE_RATE)
-
- # getting the text if it hasn't been provided
- if not text:
- progress(0.2, desc="Transcribing audio")
- text = model.transcribe([audio_path])[0]
- if 'hybrid' in model_name:
- text = text[0]
-
- if text == "":
- raise gr.Error(
- "ERROR: the ASR model did not detect any speech in the input audio. Please upload audio with speech."
- )
-
- output_info += (
- "You did not enter any input text, so the ASR model's transcription will be used:\n"
- "--------------------------\n"
- f"{text}\n"
- "--------------------------\n"
- f"You could try pasting the transcription into the text input box, correcting any"
- " transcription errors, and clicking 'Submit' again."
- )
-
- if lang == "zh" and " " not in text:
- # use jieba to add spaces between zh characters
- text = " ".join(jieba.cut(text))
-
- data = {
- "audio_filepath": audio_path,
- "text": text,
- }
- manifest_path = os.path.join(tmpdir, f"{utt_id}_manifest.json")
- with open(manifest_path, 'w') as fout:
- fout.write(f"{json.dumps(data)}\n")
-
- # run alignment
- if "|" in text:
- resegment_text_to_fill_space = False
- else:
- resegment_text_to_fill_space = True
-
- alignment_config = AlignmentConfig(
- pretrained_name=model_name,
- manifest_filepath=manifest_path,
- output_dir=f"{tmpdir}/nfa_output/",
- audio_filepath_parts_in_utt_id=1,
- batch_size=1,
- use_local_attention=True,
- additional_segment_grouping_separator="|",
- # transcribe_device='cpu',
- # viterbi_device='cpu',
- save_output_file_formats=["ass"],
- ass_file_config=ASSFileConfig(
- fontsize=45,
- resegment_text_to_fill_space=resegment_text_to_fill_space,
- max_lines_per_segment=4,
- text_already_spoken_rgb=hex_to_rgb_list(col1),
- text_being_spoken_rgb=hex_to_rgb_list(col2),
- text_not_yet_spoken_rgb=hex_to_rgb_list(col3),
- ),
- )
-
- progress(0.5, desc="Aligning audio")
-
- main(alignment_config)
-
- progress(0.95, desc="Saving generated alignments")
-
-
- if lang=="zh":
- # make video file from the token-level ASS file
- ass_file_for_video = f"{tmpdir}/nfa_output/ass/tokens/{utt_id}.ass"
- else:
- # make video file from the word-level ASS file
- ass_file_for_video = f"{tmpdir}/nfa_output/ass/words/{utt_id}.ass"
-
- ffmpeg_command = (
- f"ffmpeg -y -i {audio_path} "
- "-f lavfi -i color=c=white:s=1280x720:r=50 "
- "-crf 1 -shortest -vcodec libx264 -pix_fmt yuv420p "
- f"-vf 'ass={ass_file_for_video}' "
- f"{output_video_filepath}"
- )
-
- os.system(ffmpeg_command)
-
- return output_video_filepath, gr.update(value=output_info, visible=True), output_video_filepath
-
-
-def delete_non_tmp_video(video_path):
- if video_path:
- if os.path.exists(video_path):
- os.remove(video_path)
- return None
-
-
-with gr.Blocks(title="NeMo Forced Aligner", theme="huggingface") as demo:
- non_tmp_output_video_filepath = gr.State([])
-
- with gr.Row():
- with gr.Column():
- gr.Markdown("# NeMo Forced Aligner")
- gr.Markdown(
- "Demo for [NeMo Forced Aligner](https://github.com/NVIDIA/NeMo/tree/main/tools/nemo_forced_aligner) (NFA). "
- "Upload audio and (optionally) the text spoken in the audio to generate a video where each part of the text will be highlighted as it is spoken. ",
- )
-
- with gr.Row():
-
- with gr.Column(scale=1):
- gr.Markdown("## Input")
- lang_drop = gr.Dropdown(choices=["de", "en", "es", "fr", "zh"], value="en", label="Audio language",)
-
- mic_in = gr.Audio(source="microphone", type='filepath', label="Microphone input (max 4 mins)")
- audio_file_in = gr.Audio(source="upload", type='filepath', label="File upload (max 4 mins)")
- ref_text = gr.Textbox(
- label="[Optional] The reference text. Use '|' separators to specify which text will appear together. "
- "Leave this field blank to use an ASR model's transcription as the reference text instead."
- )
-
- gr.Markdown("[Optional] For fun - adjust the colors of the text in the output video")
- with gr.Row():
- col1 = gr.ColorPicker(label="text already spoken", value="#fcba03")
- col2 = gr.ColorPicker(label="text being spoken", value="#bf45bf")
- col3 = gr.ColorPicker(label="text to be spoken", value="#3e1af0")
-
- submit_button = gr.Button("Submit")
-
- with gr.Column(scale=1):
- gr.Markdown("## Output")
- video_out = gr.Video(label="output video")
- text_out = gr.Textbox(label="output info", visible=False)
-
- with gr.Row():
- gr.HTML(
- "
"
- )
-
- submit_button.click(
- fn=align,
- inputs=[lang_drop, mic_in, audio_file_in, ref_text, col1, col2, col3,],
- outputs=[video_out, text_out, non_tmp_output_video_filepath],
- ).then(
- fn=delete_non_tmp_video, inputs=[non_tmp_output_video_filepath], outputs=None,
- )
-
-demo.queue()
-demo.launch()
-
diff --git a/spaces/eson/tokenizer-arena/vocab/gpt_4/test.py b/spaces/eson/tokenizer-arena/vocab/gpt_4/test.py
deleted file mode 100644
index d3a936bf6ad655b8f834c1b8758381ec7157d7bd..0000000000000000000000000000000000000000
--- a/spaces/eson/tokenizer-arena/vocab/gpt_4/test.py
+++ /dev/null
@@ -1,9 +0,0 @@
-"""
-详见 gpt_35_turbo
-"""
-
-import tiktoken
-
-
-enc = tiktoken.encoding_for_model("gpt-4")
-
diff --git a/spaces/eubinecto/idiomify/idiomify/urls.py b/spaces/eubinecto/idiomify/idiomify/urls.py
deleted file mode 100644
index 08a7291a5ef456f2351825947800796862d87980..0000000000000000000000000000000000000000
--- a/spaces/eubinecto/idiomify/idiomify/urls.py
+++ /dev/null
@@ -1,13 +0,0 @@
-
-# EPIE dataset
-EPIE_IMMUTABLE_IDIOMS_TAGS_URL = "https://raw.githubusercontent.com/prateeksaxena2809/EPIE_Corpus/master/Static_Idioms_Corpus/Static_Idioms_Tags.txt" # noqa
-EPIE_IMMUTABLE_IDIOMS_URL = "https://raw.githubusercontent.com/prateeksaxena2809/EPIE_Corpus/master/Static_Idioms_Corpus/Static_Idioms_Candidates.txt" # noqa
-EPIE_IMMUTABLE_IDIOMS_CONTEXTS_URL = "https://raw.githubusercontent.com/prateeksaxena2809/EPIE_Corpus/master/Static_Idioms_Corpus/Static_Idioms_Words.txt" # noqa
-EPIE_MUTABLE_IDIOMS_TAGS_URL = "https://raw.githubusercontent.com/prateeksaxena2809/EPIE_Corpus/master/Formal_Idioms_Corpus/Formal_Idioms_Tags.txt" # noqa
-EPIE_MUTABLE_IDIOMS_URL = "https://raw.githubusercontent.com/prateeksaxena2809/EPIE_Corpus/master/Formal_Idioms_Corpus/Formal_Idioms_Candidates.txt" # noqa
-EPIE_MUTABLE_IDIOMS_CONTEXTS_URL = "https://github.com/prateeksaxena2809/EPIE_Corpus/blob/master/Formal_Idioms_Corpus/Formal_Idioms_Words.txt" # noqa
-
-# PIE dataset (Zhou, 2021)
-# https://aclanthology.org/2021.mwe-1.5/
-# right, let's just work on it.
-PIE_URL = "https://raw.githubusercontent.com/zhjjn/MWE_PIE/main/data_cleaned.csv"
diff --git a/spaces/exbert-project/exbert/client/src/ts/api/responses.ts b/spaces/exbert-project/exbert/client/src/ts/api/responses.ts
deleted file mode 100644
index 52ea5e7ddb562863a588ef3caeabd59d9f8ebcc9..0000000000000000000000000000000000000000
--- a/spaces/exbert-project/exbert/client/src/ts/api/responses.ts
+++ /dev/null
@@ -1,19 +0,0 @@
-import * as tp from '../etc/types'
-
-/**
- * All responses will contain a "status" member. The meaning of the number is detailed below
- *
- * - 405: Requested model did not exist
- * - 406: Requested corpus did not exist
- */
-interface BaseResponse {
- status: number
-}
-
-export interface ModelDetailResponse extends BaseResponse {
- payload: tp.ModelInfo
-}
-
-export interface AttentionDetailsResponse extends BaseResponse {
- payload: tp.AttentionResponse
-}
\ No newline at end of file
diff --git a/spaces/falterWliame/Face_Mask_Detection/Ableton Live 9 Authorization File.md b/spaces/falterWliame/Face_Mask_Detection/Ableton Live 9 Authorization File.md
deleted file mode 100644
index 17c5f78a43730b1b475f653fa80b545aebe4c8d8..0000000000000000000000000000000000000000
--- a/spaces/falterWliame/Face_Mask_Detection/Ableton Live 9 Authorization File.md
+++ /dev/null
@@ -1,28 +0,0 @@
-
-
How to Authorize Ableton Live 9 Offline
-
If you want to use Ableton Live 9 on a computer that is not connected to the internet, or if you have trouble authorizing online, you can follow these steps to authorize Live 9 offline.
Ableton Live 9 is a powerful music production software that allows you to create, record, edit, and perform your musical ideas. However, before you can use it, you need to authorize it with your Ableton user account. This ensures that you have a valid license and prevents piracy.
-
Normally, you can authorize Live 9 online by clicking on "Authorize with ableton.com" in the authorization dialog that appears when you launch Live 9. This will open your web browser and take you to your Ableton user account, where you can select your Live 9 license and complete the authorization process.
-
However, if your computer is not connected to the internet, or if you encounter any issues with the online authorization, you can also authorize Live 9 offline. This requires a few more steps, but it is still relatively easy to do. Here is how:
-
-
-
Install Live 9 on the computer that you want to use it on.
-
When Live 9 starts, you will see an authorization dialog. Click on "No Internet on this computer".
-
Live 9 will display another window in which the hardware code of your computer is shown, as well as further information regarding the offline authorization process.
-
Note down the hardware code (you will need to enter this number in your Ableton.com account). You can also save this information as a text file to make it easier to find later.
-
On an internet-connected computer, log in to your Ableton user account. Note: You do not necessarily need to use a second computer to authorize offline. Offline authorization can be performed on the same computer, as long as it has an internet connection.
-
Once you are logged in, select your Live 9 license from the list and click "offline authorization".
-
Enter your hardware code from Live 9 and click "Download". The Ableton server will generate an authorization (.auz) file for the computer you want to authorize.
-
If using an offline machine, you can save the authorization file to your portable storage device and bring it back to the computer where Live 9 is installed.
-
Open Live 9 and double click the .auz file or drag it onto the authorization dialog.
-
-
Congratulations! You have successfully authorized Live 9 offline. You can now enjoy all the features and functions of this amazing software.
-
If you need more help or have any questions about authorizing Live 9 offline, you can check out these resources:
d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/falterWliame/Face_Mask_Detection/E89382 Motherboard Schematic Pdf 24 __HOT__.md b/spaces/falterWliame/Face_Mask_Detection/E89382 Motherboard Schematic Pdf 24 __HOT__.md
deleted file mode 100644
index 4ebbab78b6790a4cbf5b4b0b692383ed1ab76c9e..0000000000000000000000000000000000000000
--- a/spaces/falterWliame/Face_Mask_Detection/E89382 Motherboard Schematic Pdf 24 __HOT__.md
+++ /dev/null
@@ -1,103 +0,0 @@
-
-
E89382 Motherboard Schematic PDF 24: What You Need to Know
-
-
If you are looking for a reliable and detailed source of information about the E89382 motherboard schematic pdf 24, you have come to the right place. In this article, we will explain what this schematic is, why it is important, and how you can download it for free.
E89382 motherboard schematic pdf 24 is a document that contains the technical specifications and diagrams of the E89382 motherboard, which is a common component in many laptops. The schematic shows the layout of the motherboard, the connections between the various components, and the values of the resistors, capacitors, and other electronic parts.
-
-
The E89382 motherboard schematic pdf 24 is useful for anyone who wants to repair, upgrade, or troubleshoot their laptop. It can help you identify the problem areas, find the right replacement parts, and follow the correct steps to fix your laptop. It can also help you learn more about how your laptop works and how to optimize its performance.
-
-
Why is E89382 Motherboard Schematic PDF 24 Important?
-
-
E89382 motherboard schematic pdf 24 is important because it can save you time, money, and hassle when dealing with laptop issues. Instead of taking your laptop to a professional service center, which can be costly and time-consuming, you can use the schematic to diagnose and solve the problem yourself. You can also use the schematic to upgrade your laptop with new components or features, such as adding more memory, installing a faster processor, or enhancing the audio quality.
-
-
E89382 motherboard schematic pdf 24 is also important because it can help you avoid damaging your laptop or making things worse. If you try to fix your laptop without knowing what you are doing, you might end up breaking something or causing a short circuit. By using the schematic, you can ensure that you are following the right procedures and using the right tools.
-
-
How to Download E89382 Motherboard Schematic PDF 24 for Free?
-
-
If you want to download E89382 motherboard schematic pdf 24 for free, you have several options. One option is to use a website that offers free downloads of laptop schematics, such as Scribd or AllDatasheet. These websites have a large collection of schematics for various laptop models and brands. You can search for your specific model or browse through their categories. Once you find the schematic you need, you can download it as a PDF file or view it online.
-
-
Another option is to use a torrent site that allows peer-to-peer file sharing, such as The Pirate Bay or Kickass Torrents. These sites have millions of users who upload and download files of all kinds, including laptop schematics. You can use a torrent client software, such as BitTorrent or uTorrent, to access these sites and download the files you want. However, be careful when using torrent sites, as they might contain viruses or malware that can harm your computer.
-
-
-
A third option is to use a search engine, such as Google or Bing, to look for E89382 motherboard schematic pdf 24. You can type in the keyword and see what results come up. You might find some websites that offer free downloads of schematics or some forums where users share their schematics with others. However, be wary of some websites that might ask you to register or pay before downloading anything.
-
-
Conclusion
-
-
E89382 motherboard schematic pdf 24 is a valuable resource for anyone who owns or works with laptops. It can help you repair, upgrade, or troubleshoot your laptop with ease and confidence. It can also help you learn more about how your laptop functions and how to improve its performance.
-
-
If you want to download E89382 motherboard schematic pdf 24 for free, you have several options to choose from. You can use a website that offers free downloads of laptop schematics, such as Scribd or AllDatasheet. You can use a torrent site that allows peer-to-peer file sharing, such as The Pirate Bay or Kickass Torrents. Or you can use a search engine, such as Google or Bing, to look for E89382 motherboard schematic pdf 24.
-
-
Whatever option you choose, make sure that you are careful and cautious when downloading anything from the internet. Always scan your files for viruses or malware before opening them. And always backup your data before making any changes to your laptop.
-
How to Read E89382 Motherboard Schematic PDF 24?
-
-
Reading E89382 motherboard schematic pdf 24 might seem daunting at first, but it is not as hard as it looks. You just need to know some basic symbols and terms that are used in the schematic. Here are some tips to help you read E89382 motherboard schematic pdf 24:
-
-
-
Look at the title page of the schematic, which usually contains the model name, the revision number, and the date of the schematic. This can help you identify the exact version of the motherboard you are dealing with.
-
Look at the index page of the schematic, which usually lists the main sections of the schematic, such as power supply, CPU, memory, audio, video, etc. This can help you navigate through the schematic and find the part you are looking for.
-
Look at the symbols and labels used in the schematic, such as resistors, capacitors, transistors, diodes, connectors, etc. These symbols represent the physical components on the motherboard and their values or functions. You can use a reference guide or a website to learn more about these symbols and their meanings.
-
Look at the lines and arrows used in the schematic, which indicate the connections and directions of the current flow between the components. These lines and arrows can help you trace the circuit and understand how it works.
-
Look at the notes and comments used in the schematic, which usually provide additional information or explanations about the components or the circuit. These notes and comments can help you clarify any doubts or questions you might have about the schematic.
-
-
-
How to Use E89382 Motherboard Schematic PDF 24?
-
-
Using E89382 motherboard schematic pdf 24 can help you perform various tasks related to your laptop. Here are some examples of how you can use E89382 motherboard schematic pdf 24:
-
-
-
You can use E89382 motherboard schematic pdf 24 to repair your laptop if it is not working properly or has some damage. You can use the schematic to identify the faulty component, find its location on the motherboard, and replace it with a new one.
-
You can use E89382 motherboard schematic pdf 24 to upgrade your laptop if you want to improve its performance or functionality. You can use the schematic to find out what components are compatible with your laptop, where they are located on the motherboard, and how to install them.
-
You can use E89382 motherboard schematic pdf 24 to troubleshoot your laptop if it is having some issues or errors. You can use the schematic to test the voltage, resistance, or continuity of the components or circuits on the motherboard and find out where the problem lies.
-
You can use E89382 motherboard schematic pdf 24 to learn more about your laptop if you are curious or interested in how it works. You can use the schematic to study the design and architecture of the motherboard and gain a deeper understanding of its functions and features.
-
-
-
Conclusion
-
-
E89382 motherboard schematic pdf 24 is a valuable resource for anyone who owns or works with laptops. It can help you repair, upgrade, troubleshoot, or learn more about your laptop with ease and confidence. It can also help you avoid damaging your laptop or making things worse.
-
-
If you want to download E89382 motherboard schematic pdf 24 for free, you have several options to choose from. You can use a website that offers free downloads of laptop schematics, such as Scribd or AllDatasheet. You can use a torrent site that allows peer-to-peer file sharing, such as The Pirate Bay or Kickass Torrents. Or you can use a search engine, such as Google or Bing, to look for E89382 motherboard schematic pdf 24.
-
-
Whatever option you choose, make sure that you are careful and cautious when downloading anything from the internet. Always scan your files for viruses or malware before opening them. And always backup your data before making any changes to your laptop.
-
Where to Find E89382 Motherboard Schematic PDF 24?
-
-
If you already have a laptop that uses the E89382 motherboard, you might be wondering where to find the schematic pdf 24 for it. There are a few ways to find the schematic pdf 24 for your laptop, depending on your situation:
-
-
-
If you have the original packaging or documentation of your laptop, you might find the schematic pdf 24 included in them. Some laptop manufacturers provide the schematic pdf 24 as a part of their customer service or warranty policy.
-
If you have access to the internet, you might be able to find the schematic pdf 24 on the official website of your laptop manufacturer or model. Some laptop manufacturers provide the schematic pdf 24 as a downloadable file or an online viewer on their website.
-
If you have a friend or a colleague who has the same or a similar laptop model as yours, you might be able to borrow or copy their schematic pdf 24. Some laptop users share their schematic pdf 24 with others who need them.
-
If none of the above options work for you, you might have to buy the schematic pdf 24 from a third-party source, such as an online store or a repair shop. Some third-party sources sell the schematic pdf 24 for various laptop models and brands.
-
-
-
How to Print E89382 Motherboard Schematic PDF 24?
-
-
If you want to print E89382 motherboard schematic pdf 24, you need to have a printer that can handle PDF files and large-format printing. You also need to have enough paper and ink to print the whole schematic pdf 24, which can be up to 32 pages long. Here are some steps to print E89382 motherboard schematic pdf 24:
-
-
-
Open the schematic pdf 24 file on your computer using a PDF reader software, such as Adobe Acrobat Reader or Foxit Reader.
-
Select the print option from the file menu or use the keyboard shortcut Ctrl+P.
-
Choose your printer from the list of available printers and adjust the print settings according to your preferences. You can choose the paper size, orientation, quality, scaling, margins, etc.
-
Click on the print preview button to see how the schematic pdf 24 will look like when printed. You can zoom in or out, rotate, or move the pages around to fit them on the paper.
-
Click on the print button to start printing the schematic pdf 24. Make sure you have enough paper and ink in your printer and wait for the printing process to finish.
-
-
-
Conclusion
-
-
E89382 motherboard schematic pdf 24 is a valuable resource for anyone who owns or works with laptops. It can help you repair, upgrade, troubleshoot, or learn more about your laptop with ease and confidence. It can also help you avoid damaging your laptop or making things worse.
-
-
If you want to download E89382 motherboard schematic pdf 24 for free, you have several options to choose from. You can use a website that offers free downloads of laptop schematics, such as Scribd or AllDatasheet. You can use a torrent site that allows peer-to-peer file sharing, such as The Pirate Bay or Kickass Torrents. Or you can use a search engine, such as Google or Bing, to look for E89382 motherboard schematic pdf 24.
-
-
Whatever option you choose, make sure that you are careful and cautious when downloading anything from the internet. Always scan your files for viruses or malware before opening them. And always backup your data before making any changes to your laptop.
-
Conclusion
-
-
E89382 motherboard schematic pdf 24 is a valuable resource for anyone who owns or works with laptops. It can help you repair, upgrade, troubleshoot, or learn more about your laptop with ease and confidence. It can also help you avoid damaging your laptop or making things worse.
-
-
If you want to download E89382 motherboard schematic pdf 24 for free, you have several options to choose from. You can use a website that offers free downloads of laptop schematics, such as Scribd or AllDatasheet. You can use a torrent site that allows peer-to-peer file sharing, such as The Pirate Bay or Kickass Torrents. Or you can use a search engine, such as Google or Bing, to look for E89382 motherboard schematic pdf 24.
-
-
Whatever option you choose, make sure that you are careful and cautious when downloading anything from the internet. Always scan your files for viruses or malware before opening them. And always backup your data before making any changes to your laptop.
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/falterWliame/Face_Mask_Detection/ESET NOD32 Antivirus 4.0.314 Full TOP Licencia Hasta 2050 Full TOP Version.md b/spaces/falterWliame/Face_Mask_Detection/ESET NOD32 Antivirus 4.0.314 Full TOP Licencia Hasta 2050 Full TOP Version.md
deleted file mode 100644
index b045ee6df52df0039cedce33f069eac11e8c11e0..0000000000000000000000000000000000000000
--- a/spaces/falterWliame/Face_Mask_Detection/ESET NOD32 Antivirus 4.0.314 Full TOP Licencia Hasta 2050 Full TOP Version.md
+++ /dev/null
@@ -1,9 +0,0 @@
-
-
Filtera de especies de virus aplicado al sistema Archived ESET NOD32 Antivirus 18 AVT CPA ESET NOD32 Antivirus 20/2020 FULL licensed. The license key is eu. Please contact your ESET agent for help.
-
eset nod32 antivirus keygen bluetooth modem mode 2. This ESET NOD32 Antivirus keygen free. No any key or serial number. Users can get the full version of this ESET Antivirus from their ESET support.
-
ESET NOD32 Antivirus 4.0.314 FULL Licencia Hasta 2050 Full Version
ESET NOD32 Antivirus 12 es un programa Full un Antivirus Eset en su ultima versin 2020, que protegen los datos de millones de usuarios como. license key eset nod32 no se puede usar period. la tipo de licencia es meramente usatoria la documentacion. y el universo de fechas permite. license eset nod32 how to unlock whatsapp account pc app license. License Key ESET NOD32 Antivirus Internet Security 2020 License: ESET NOD32 Antivirus 4 Serial Number Full Version License key eset nod32 license.
-
350891 records. Eset nod32 antivirus 12 serial numbers are presented here. Licencia ESET NOD32 Antivirus es una herramienta de seguridad para proteger a su red de ordenadores de errores. license eset nod32 no se puede usar period. la tipo de licencia es meramente usatoria la documentacion. y el universo de fechas permite. license eset nod32 how to unlock whatsapp account pc app license. License Key ESET NOD32 Antivirus Internet Security 2020 License: ESET NOD32 Antivirus 4 Serial Number Full Version License key eset nod32 license.
-
License key eset nod32 how to unlock whatsapp account pc app license. License Key ESET NOD32 Antivirus Internet Security 2020 License: ESET NOD32 Antivirus 4 Serial Number Full Version License key eset nod32 license.
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/fatiXbelha/sd/Asia Empire 2027 MOD APK The Best Way to Play the Game with All Skins Characters and Levels Unlocked.md b/spaces/fatiXbelha/sd/Asia Empire 2027 MOD APK The Best Way to Play the Game with All Skins Characters and Levels Unlocked.md
deleted file mode 100644
index c620fd6df26c3653c03f80126ae573f100b7ce8f..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Asia Empire 2027 MOD APK The Best Way to Play the Game with All Skins Characters and Levels Unlocked.md
+++ /dev/null
@@ -1,82 +0,0 @@
-
-
Asia Empire 2027 Mod Unlocked APK: How to Download and Play
-
If you are a fan of strategy games and want to experience what it is like to lead an Asian empire in the year 2027, then you should try Asia Empire 2027. This is a turn-based strategy game that lets you choose from 50 countries and compete against smart AI enemies. However, if you want to unlock all the features and countries in the game, you will need to use a mod unlocked apk. In this article, we will show you how to download and install the mod unlocked apk for Asia Empire 2027, and how to play the game with it.
-
What is Asia Empire 2027?
-
A turn-based strategy game set in the year 2027
-
Asia Empire 2027 is a game developed by iGindis Games, a company that specializes in creating strategy games for mobile devices. The game is set in the year 2027, when a big uprising overthrew the existing government in your country. As the leader of the uprising, you have unlimited authority in the country and your goal is to make it into an empire of Asia. You can select your country from 50 available options, each with its own strengths and weaknesses. You will have to deal with various challenges, such as diplomacy, economy, technology, war, spy, and world news. You will also have to face smart AI enemies that will try to stop you from achieving your goal.
Asia Empire 2027 has many features that make it an engaging and realistic strategy game. Some of these features are:
-
-
Weapons Suppliers: You can buy weapons from different suppliers around the world, such as USA, EU, Russia, and China. You can choose from various types of weapons, such as mercenaries, tanks, jets, ships, submarines, robots, drones, carriers, and missiles.
-
Spy Center: You can use spies to gather information about your enemies, sabotage their plans, or assassinate their leaders. You can also counter-spy your enemies and protect your secrets.
-
War Room: You can plan and execute your military operations in the war room. You can use different strategies and tactics to win battles and wars. You can also use nuclear weapons as a last resort.
-
Diplomats: You can use diplomats to improve your relations with other countries, form alliances, or declare war. You can also use them to influence the United Nations and other international organizations.
-
United Nations: You can participate in the United Nations and vote on important issues that affect the world. You can also use your influence to pass resolutions that benefit you or harm your enemies.
-
Economy system: You can manage your economy by setting taxes, spending budget, investing in technology, trading with other countries, or taking loans. You have to balance your income and expenses and avoid inflation or debt.
-
Technology: You can research and develop new technologies that will give you an edge over your enemies. You can choose from different fields of technology, such as military, economy, energy, health, or education.
-
World News Distribution: You can follow the world news that affect your country and the world. You can see how your actions influence the economy, relations
at the same time. You also need to defend your regions from enemy attacks or rebellions.
-
Improve your relations with other countries: You have a diplomacy system in the game that shows your relations with other countries. You can improve your relations by sending diplomats, making trade deals, forming alliances, or supporting their causes. You can also worsen your relations by declaring war, breaking treaties, or interfering in their affairs. You need to maintain good relations with your allies and avoid making too many enemies.
-
-
Strategies and tactics to win the game
-
To win the game, you need to follow these strategies and tactics:
-
-
Use your weapons effectively: You have a variety of weapons in the game that you can use to attack or defend your regions. You need to use your weapons effectively and choose the right ones for each situation. For example, you can use mercenaries to infiltrate enemy regions, tanks to break through enemy defenses, jets to bomb enemy bases, ships to blockade enemy ports, submarines to sneak attack enemy ships, robots to fight in urban areas, drones to spy on enemy movements, carriers to launch air strikes, and missiles to destroy enemy targets.
-
Use your spies wisely: You have a spy center in the game that you can use to gather information or sabotage your enemies. You need to use your spies wisely and assign them to different missions. For example, you can use spies to reveal enemy secrets, disrupt enemy economy, damage enemy weapons, steal enemy technology, incite enemy rebellions, or assassinate enemy leaders. You also need to protect your spies from being caught or killed by enemy counter-spies.
-
Use your diplomacy smartly: You have a diplomat system in the game that you can use to influence other countries or international organizations. You need to use your diplomacy smartly and make the right decisions. For example, you can use diplomats to improve your relations with other countries, form alliances or coalitions, declare war or peace, negotiate trade deals or treaties, influence the United Nations or other organizations, or ask for help or support.
-
Use your technology efficiently: You have a technology system in the game that you can use to research and develop new technologies that will give you an edge over your enemies. You need to use your technology efficiently and invest in the right fields. For example, you can research and develop technologies that will improve your economy, energy, health, education, military, or espionage.
-
Use your news strategically: You have a news system in the game that you can use to follow the world news that affect your country and the world. You need to use your news strategically and react accordingly. For example, you can use news to monitor your popularity, economy, relations, and events. You can also use propaganda to manipulate the public opinion and gain support.
-
-
Conclusion
-
Asia Empire 2027 is a turn-based strategy game that lets you lead an Asian empire in the year 2027. You can choose from 50 countries and compete against smart AI enemies. However, if you want to unlock all the features and countries in the game, you will need to use a mod unlocked apk. In this article, we showed you how to download and install the mod unlocked apk for Asia Empire 2027, and how to play the game with it. We hope you enjoyed this article and found it helpful. If you have any questions or feedback, please let us know in the comments below.
-
FAQs
-
Here are some frequently asked questions about Asia Empire 2027 mod unlocked apk:
-
-
Is Asia Empire 2027 mod unlocked apk safe?: Yes, as long as you download it from a reliable source and verify it before installing it on your device. However, you should always be careful when downloading and installing any mod apk from outside sources.
-
Is Asia Empire 2027 mod unlocked apk legal?: No, using a mod apk is not legal as it violates the terms and conditions of the original game developer. However, it is unlikely that you will face any legal consequences for using a mod apk as long as you do not distribute it or use it for commercial purposes.
-
Is Asia Empire 2027 mod unlocked apk compatible with my device?: It depends on your device model and specifications. The mod apk should work on most Android devices that support the original game. However, some devices might not be compatible with the mod apk due to different hardware or software configurations.
-
Can I play Asia Empire 2027 mod unlocked apk online?: No, you cannot play Asia Empire 2027 mod unlocked apk online as it is a modified version of the original game that does not support online features. You can only play the mod apk offline with AI enemies.
-
Can I update Asia Empire 2027 mod unlocked apk?: No, you cannot update the mod apk as it is not connected to the official game server. You can only play the mod apk with the version that you downloaded. If you want to update the game, you will need to download and install the latest mod apk from a reliable source.
-
-
asia empire 2027 mod apk unlimited money
-asia empire 2027 hack mod apk download
-asia empire 2027 mod apk latest version
-asia empire 2027 mod apk android 1
-asia empire 2027 mod apk revdl
-asia empire 2027 mod apk free shopping
-asia empire 2027 mod apk happymod
-asia empire 2027 mod apk rexdl
-asia empire 2027 mod apk no ads
-asia empire 2027 mod apk offline
-asia empire 2027 mod apk unlimited diamonds
-asia empire 2027 mod apk all countries unlocked
-asia empire 2027 mod apk unlimited everything
-asia empire 2027 mod apk premium
-asia empire 2027 mod apk vip
-asia empire 2027 mod apk full version
-asia empire 2027 mod apk unlimited resources
-asia empire 2027 mod apk unlimited troops
-asia empire 2027 mod apk unlimited weapons
-asia empire 2027 mod apk unlimited energy
-asia empire 2027 mod apk mega mod
-asia empire 2027 mod apk pro
-asia empire 2027 mod apk plus
-asia empire 2027 mod apk gold
-asia empire 2027 mod apk cracked
-asia empire 2027 cheat mod apk
-asia empire 2027 strategy game mod apk
-download game asia empire 2027 mod apk
-download asia empire 2027 hack mod apk
-download asia empire 2027 full unlocked mod apk
-how to download asia empire 2027 mod apk
-how to install asia empire 2027 mod apk
-how to play asia empire 2027 mod apk
-how to update asia empire 2027 mod apk
-how to get asia empire 2027 mod apk for free
-where to download asia empire 2027 mod apk
-where to find asia empire 2027 mod apk
-what is the best site to download asia empire 2027 mod apk
-what is the latest version of asia empire 2027 mod apk
-what is the difference between asia empire 2027 and asia empire 2027 mod apk
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/fatiXbelha/sd/Atomic Heart Deutsche Sprachdatei downloaden und einfgen.md b/spaces/fatiXbelha/sd/Atomic Heart Deutsche Sprachdatei downloaden und einfgen.md
deleted file mode 100644
index 72bd1279c2b60290fcfe7183b11113962c412286..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Atomic Heart Deutsche Sprachdatei downloaden und einfgen.md
+++ /dev/null
@@ -1,81 +0,0 @@
-
-
Atomic Heart: Wie man die deutsche Sprachdatei herunterlädt und installiert
-
Atomic Heart ist ein actionreiches Rollenspiel, das in einer alternativen Geschichte der Sowjetunion spielt. In diesem Spiel übernehmen Sie die Rolle eines Agenten des KGB, der eine geheime Militärbasis untersuchen muss, die von mutierten Kreaturen, schrecklichen Maschinen und übermächtigen Robotern überrannt wurde. Dabei müssen Sie Ihre Kampffähigkeiten, Ihre Umgebung und Ihre Ausrüstung nutzen, um zu überleben und die Wahrheit hinter der utopischen Welt zu enthüllen.
Atomic Heart ist vollständig auf Deutsch vertont. Jedoch hören davon die meisten Spieler nichts, weil das Spiel automatisch mit englischer Sprachausgabe startet und nur deutsche Untertitel anzeigt. Doch was das Spiel Ihnen nicht sagt, ist, dass Sie alle Dialoge, Zwischensequenzen und herrlich-verrückten Wortgefechte zwischen Charles und P-3 auf Deutsch hören können. Dazu müssen Sie nur einen Trick anwenden und die deutsche Sprachdatei herunterladen und installieren.
-
Wo findet man die deutsche Sprachdatei?
-
Um die deutsche Sprachdatei für Atomic Heart zu bekommen, müssen Sie folgende Schritte befolgen:
-
-
Besuchen Sie die offizielle Website von Atomic Heart unter [4](https://store.steampowered.com/app/668580/Atomic_Heart/).
-
Klicken Sie auf den Button "Sprachpakete" unter dem Spielbanner.
-
Wählen Sie "Deutsch" aus der Liste der verfügbaren Sprachen aus.
-
Klicken Sie auf den Button "Herunterladen" und speichern Sie die Datei auf Ihrem Computer.
-
Öffnen Sie den Ordner, in dem Sie das Spiel installiert haben (z.B. C:\Program Files (x86)\Steam\steamapps\common\Atomic Heart).
-
Kopieren Sie die heruntergeladene Datei "de.lpk" in den Unterordner "Data\Localization".
-
-
Wie wechselt man die Sprache im Spiel?
-
Um die Sprache von Englisch auf Deutsch umzustellen, müssen Sie folgende Schritte befolgen:
-
Starten Sie das Spiel und gehen Sie zum Hauptmenü.
-
Klicken Sie auf "Optionen" und dann auf "Audio".
-
Wählen Sie "Deutsch" aus dem Dropdown-Menü "Sprache".
-
Klicken Sie auf "Übernehmen" und dann auf "Zurück".
-
Genießen Sie das Spiel mit deutscher Sprachausgabe.
-
-
Fazit
-
Atomic Heart ist ein spannendes und atmosphärisches Spiel, das Sie in eine alternative Sowjetunion entführt, in der nichts so ist, wie es scheint. Wenn Sie das Spiel in seiner vollen Pracht erleben wollen, empfehlen wir Ihnen, die deutsche Sprachdatei herunterzuladen und zu installieren. So können Sie nicht nur die hervorragende Synchronisation der Charaktere genießen, sondern auch die vielen humorvollen und skurrilen Details des Spiels besser verstehen. Also, worauf warten Sie noch? Laden Sie sich die deutsche Sprachdatei für Atomic Heart noch heute herunter und tauchen Sie ein in diese verrückte Welt!
-
FAQs
-
Ist Atomic Heart auf Deutsch komplett vertont?
-
Ja, das ist es. Alle Dialoge, Zwischensequenzen und Kommentare der Charaktere sind auf Deutsch gesprochen. Sie müssen nur die deutsche Sprachdatei herunterladen und installieren, um sie zu hören.
-
Kann man die Sprache auch während des Spiels ändern?
-
Nein, das kann man nicht. Man muss die Sprache im Hauptmenü auswählen und das Spiel neu starten, um sie zu ändern.
-
atomic heart deutsch audio einstellen
-atomic heart deutsche sprachausgabe aktivieren
-atomic heart german language patch download
-atomic heart audio auf deutsch stellen ps5
-atomic heart deutsche tonspur umstellen
-atomic heart german voice over download
-atomic heart deutsch audio optionen
-atomic heart deutsche sprachausgabe ändern
-atomic heart german language mod download
-atomic heart audio auf deutsch stellen xbox
-atomic heart deutsche synchronisation einschalten
-atomic heart german voice pack download
-atomic heart deutsch audio settings
-atomic heart deutsche sprachausgabe umstellen
-atomic heart german language file download
-atomic heart audio auf deutsch stellen pc
-atomic heart deutsche vertonung aktivieren
-atomic heart german voice mod download
-atomic heart deutsch audio menü
-atomic heart deutsche sprachausgabe wechseln
-atomic heart german language pack download
-atomic heart audio auf deutsch stellen tipps
-atomic heart deutsche stimmen einschalten
-atomic heart german voice patch download
-atomic heart deutsch audio ändern
-atomic heart deutsche sprachausgabe anpassen
-atomic heart german language option download
-atomic heart audio auf deutsch stellen tricks
-atomic heart deutsche dialoge einschalten
-atomic heart german voice option download
-atomic heart deutsch audio anpassen
-atomic heart deutsche sprachausgabe einstellen
-atomic heart german language setting download
-atomic heart audio auf deutsch stellen anleitung
-atomic heart deutsche untertitel einschalten
-atomic heart german voice setting download
-atomic heart deutsch audio wechseln
-atomic heart deutsche sprachausgabe auswählen
-atomic heart german language switch download
-atomic heart audio auf deutsch stellen hilfe
-atomic heart deutsche version aktivieren
-atomic heart german voice switch download
-
Gibt es noch andere Sprachen für Atomic Heart?
-
Ja, es gibt mehrere andere Sprachen, die man für Atomic Heart herunterladen und installieren kann. Dazu gehören Englisch, Französisch, Italienisch, Spanisch, Russisch, Chinesisch und Japanisch.
-
Wie groß ist die deutsche Sprachdatei?
-
Die deutsche Sprachdatei ist etwa 1,5 GB groß. Sie benötigen also genügend Speicherplatz auf Ihrer Festplatte, um sie zu speichern.
-
Braucht man einen speziellen Patch oder Mod für die deutsche Sprachdatei?
-
Nein, braucht man nicht. Man muss nur die deutsche Sprachdatei von der offiziellen Website des Spiels herunterladen und installieren. Es sind keine weiteren Schritte erforderlich.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/fatiXbelha/sd/Download The Seven Deadly Sins Grand Cross APK Mod and Join the Epic Adventure.md b/spaces/fatiXbelha/sd/Download The Seven Deadly Sins Grand Cross APK Mod and Join the Epic Adventure.md
deleted file mode 100644
index d224188c616c43aed85f016122eb7fdf9b20108a..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Download The Seven Deadly Sins Grand Cross APK Mod and Join the Epic Adventure.md
+++ /dev/null
@@ -1,85 +0,0 @@
-
-
The Seven Deadly Sins: Grand Cross APK Mod - What You Need to Know
-
If you are a fan of the popular anime and manga series The Seven Deadly Sins, you might have heard of the mobile game The Seven Deadly Sins: Grand Cross. This is a turn-based RPG that lets you relive the epic story of the seven knights who rebelled against the Holy Knights and saved the kingdom of Liones. You can also create your own team of heroes, customize their outfits, and engage in thrilling battles with other players online.
But what if you want to enjoy the game without spending real money or waiting for hours to unlock new characters and items? Well, there is a way to do that with an APK mod. In this article, we will tell you everything you need to know about The Seven Deadly Sins: Grand Cross APK mod, including its features, benefits, and how to download and install it on your device.
-
Introduction
-
What is The Seven Deadly Sins: Grand Cross?
-
The Seven Deadly Sins: Grand Cross is a mobile game developed by Netmarble and based on the anime and manga series of the same name. It was released in 2020 for Android and iOS devices and has since gained millions of fans worldwide. The game features stunning 3D graphics, cinematic cutscenes, original voice acting, and a faithful adaptation of the original story. You can also explore various locations from the series, interact with different characters, collect items, and complete quests.
-
What is an APK mod?
-
An APK mod is a modified version of an original APK file. APK stands for Android Package Kit, which is the file format used by Android devices to install and run applications. An APK mod can alter or enhance the features of an application, such as adding unlimited resources, unlocking premium content, removing ads, or bypassing restrictions. However, not all APK mods are safe or legal, so you should always be careful when downloading them from unknown sources.
-
Why use an APK mod for The Seven Deadly Sins: Grand Cross?
-
There are many reasons why you might want to use an APK mod for The Seven Deadly Sins: Grand Cross. For one thing, it can save you a lot of time and money that you would otherwise spend on buying in-game currency or waiting for energy to refill. For another thing, it can make the game more fun and exciting by giving you access to all the characters and costumes that you want, as well as boosting your combat performance. Of course, using an APK mod also comes with some risks, such as getting banned from the game or exposing your device to malware. Therefore, you should always use it at your own discretion and responsibility.
-
Features of The Seven Deadly Sins: Grand Cross APK Mod
-
Unlimited money and diamonds
-
One of the most appealing features of The Seven Deadly Sins: Grand Cross APK mod is that it gives you unlimited money and diamonds. Money is the basic currency in the game that you can use to buy items, upgrade equipment, or enhance your heroes. Diamonds are the premium currency that you can use to summon new heroes, buy costumes, or refill your energy. Normally, you would have to earn money and diamonds by completing missions, participating in events, or spending real cash. But with the APK mod, you can get as much money and diamonds as you want without any hassle.
-
Unlocked characters and costumes
-
Another great feature of The Seven Deadly Sins: Grand Cross APK mod is that it unlocks all the characters and costumes in the game. The game has over 200 characters from the series, each with their own skills, stats, and personalities. You can also customize their appearance with various costumes, accessories, and hairstyles. However, some of the characters and costumes are only available through limited-time events or special offers. With the APK mod, you can get them all for free and enjoy the full roster of heroes.
-
High damage and defense
-
If you want to dominate the battles in The Seven Deadly Sins: Grand Cross, you will love the high damage and defense feature of the APK mod. This feature allows you to deal massive damage to your enemies and reduce the damage you receive from their attacks. You can also use powerful skills and ultimate moves without any cooldown or cost. This way, you can easily defeat any opponent, whether it is a boss, a player, or a monster. You can also clear the stages faster and earn more rewards.
-
the seven deadly sins grand cross mod apk unlimited diamonds
-the seven deadly sins grand cross hack apk download
-the seven deadly sins grand cross mod menu apk
-the seven deadly sins grand cross mod apk latest version
-the seven deadly sins grand cross mod apk android 1
-the seven deadly sins grand cross mod apk god mode
-the seven deadly sins grand cross mod apk platinmods
-the seven deadly sins grand cross mod apk offline
-the seven deadly sins grand cross mod apk ios
-the seven deadly sins grand cross mod apk free download
-the seven deadly sins grand cross mod apk no root
-the seven deadly sins grand cross mod apk unlimited money
-the seven deadly sins grand cross mod apk 2.18.0
-the seven deadly sins grand cross mod apk revdl
-the seven deadly sins grand cross mod apk an1
-the seven deadly sins grand cross mod apk 2021
-the seven deadly sins grand cross mod apk rexdl
-the seven deadly sins grand cross mod apk happymod
-the seven deadly sins grand cross mod apk vip
-the seven deadly sins grand cross mod apk one hit kill
-the seven deadly sins grand cross mod apk obb
-the seven deadly sins grand cross mod apk netmarble
-the seven deadly sins grand cross mod apk unlimited stamina
-the seven deadly sins grand cross mod apk jp
-the seven deadly sins grand cross mod apk global
-the seven deadly sins grand cross mod apk high damage
-the seven deadly sins grand cross mod apk anti ban
-the seven deadly sins grand cross mod apk update
-the seven deadly sins grand cross cheat apk download
-the seven deadly sins grand cross hack tool apk
-the seven deadly sins grand cross hack online generator apk
-the seven deadly sins grand cross hack version download apk
-the seven deadly sins grand cross hack no verification apk
-the seven deadly sins grand cross hack without human verification apk
-the seven deadly sins grand cross hack iosgods apk
-the seven deadly sins grand cross hack no survey no password no download no root no jailbreak no human verification no offers no activation code no install no registration required 100% working legit real updated latest version 2021 free unlimited diamonds coins gems gold money resources generator cheat engine tool online for android ios iphone ipad ipod touch tablet mac pc windows laptop desktop devices devices devices devices devices devices devices devices devices devices devices devices devices devices devices devices devices devices devices devices devices devices devices devices devices devices devices devices devices devices devices devices devices devices devices devices devices devices device device device device device device device device device device device device device device device device device device device device device device device device device device device device device device device device device device device device device device
-
No ads and root required
-
Finally, The Seven Deadly Sins: Grand Cross APK mod has no ads and no root required. This means that you can play the game without any annoying interruptions or pop-ups that might ruin your immersion or experience. You also don't need to root your device to install or run the APK mod, which can be risky or complicated for some users. All you need is a compatible Android device with enough storage space and internet connection.
-
How to Download and Install The Seven Deadly Sins: Grand Cross APK Mod
-
Step 1: Download the APK file from a trusted source
-
The first step to download and install The Seven Deadly Sins: Grand Cross APK mod is to find a reliable source that offers the latest version of the mod. You can search online for various websites or blogs that provide links to download the APK file. However, you should be careful not to download any fake or malicious files that might harm your device or steal your data. You should also check the reviews and ratings of the source before downloading anything.
-
Step 2: Enable unknown sources on your device
-
The next step is to enable unknown sources on your device. This is a security setting that prevents you from installing applications from sources other than the official Google Play Store. To enable unknown sources, you need to go to your device's settings, then security, then toggle on the option that says "allow installation of apps from unknown sources". You might see a warning message that says installing from unknown sources can be dangerous, but you can ignore it if you trust the source of the APK file.
-
Step 3: Install the APK file and launch the game
-
The final step is to install the APK file and launch the game. To do this, you need to locate the downloaded APK file on your device's file manager or downloads folder. Then, tap on it and follow the instructions on the screen to install it. Once the installation is complete, you can open the game and enjoy The Seven Deadly Sins: Grand Cross APK mod.
-
Conclusion
-
Summary of the main points
-
The Seven Deadly Sins: Grand Cross is a mobile game based on the anime and manga series of the same name. It is a turn-based RPG that lets you relive the epic story of the seven knights who rebelled against the Holy Knights and saved the kingdom of Liones. You can also create your own team of heroes, customize their outfits, and engage in thrilling battles with other players online.
-
An APK mod is a modified version of an original APK file that can alter or enhance the features of an application. The Seven Deadly Sins: Grand Cross APK mod has many features that make the game more fun and easy, such as unlimited money and diamonds, unlocked characters and costumes, high damage and defense, no ads and root required.
-
To download and install The Seven Deadly Sins: Grand Cross APK mod, you need to find a trusted source that offers the latest version of the mod, enable unknown sources on your device, and install the APK file on your device.
-
Call to action and disclaimer
-
If you are interested in trying out The Seven Deadly Sins: Grand Cross APK mod, you can follow the steps above and start playing right away. However, you should be aware that using an APK mod can have some risks, such as getting banned from the game or exposing your device to malware. Therefore, you should always use it at your own discretion and responsibility.
-
We hope this article has been helpful for you in learning more about The Seven Deadly Sins: Grand Cross APK mod. If you have any questions or feedback, feel free to leave a comment below. Thank you for reading and have a great day!
-
FAQs
-
What are the requirements to play The Seven Deadly Sins: Grand Cross APK mod?
-
To play The Seven Deadly Sins: Grand Cross APK mod, you need an Android device with at least 4 GB of RAM and 4 GB of free storage space. You also need a stable internet connection and a compatible operating system (Android 4.4 or higher).
-
Is The Seven Deadly Sins: Grand Cross APK mod safe to use?
-
The Seven Deadly Sins: Grand Cross APK mod is generally safe to use, as long as you download it from a trusted source and scan it for viruses before installing it. However, there is always a possibility that the mod might contain malware or spyware that could harm your device or steal your data. Therefore, you should always use it at your own risk and discretion.
-
Will I get banned from the game if I use The Seven Deadly Sins: Grand Cross APK mod?
-
There is a chance that you might get banned from the game if you use The Seven Deadly Sins: Grand Cross APK mod, especially if you abuse the features or cheat in online modes. The game developers have the right to detect and ban any players who violate the terms of service or use unauthorized modifications. Therefore, you should always be careful and respectful when playing the game with the mod.
-
Can I update The Seven Deadly Sins: Grand Cross APK mod?
-
You can update The Seven Deadly Sins: Grand Cross APK mod whenever there is a new version available from the source that you downloaded it from. However, you should always backup your data before updating, as some updates might cause compatibility issues or data loss. You should also check the changelog and reviews of the new version before installing it, as some updates might remove or change some features of the mod.
-
Can I play The Seven Deadly Sins: Grand Cross APK mod with my friends?
-
You can play The Seven Deadly Sins: Grand Cross APK mod with your friends, as long as they also have the same version of the mod installed on their devices. You can invite them to join your team, chat with them, or challenge them to duels. However, you should be aware that playing with the mod might give you an unfair advantage over other players who are playing with the original version of the game. Therefore, you should always be respectful and fair when playing with others.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/fatiXbelha/sd/FIFA Mobile Download the Best Football Game for Android and iOS.md b/spaces/fatiXbelha/sd/FIFA Mobile Download the Best Football Game for Android and iOS.md
deleted file mode 100644
index 0ce78697cea7f0526e723e5ac2a20d0c0be5f12f..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/FIFA Mobile Download the Best Football Game for Android and iOS.md
+++ /dev/null
@@ -1,114 +0,0 @@
-
-
How to Download FIFA Mobile and Enjoy the Ultimate Soccer Experience
-
If you are a fan of soccer, you might have heard of FIFA Mobile, a mobile game based on the popular FIFA franchise. FIFA Mobile is a free-to-play game that lets you build your dream team of soccer stars, compete in various modes, and experience realistic graphics and sound effects. In this article, we will show you how to download FIFA Mobile for your Android, iOS, or PC devices, as well as some tips and tricks for playing the game.
-
What is FIFA Mobile?
-
FIFA Mobile is a mobile game developed by EA Sports that brings you the ultimate soccer experience on your smartphone or tablet. The game features over 15,000 authentic players from over 600 teams and 30 leagues, including the Premier League, LaLiga Santander, Bundesliga, Serie A TIM, Ligue 1 Uber Eats, and more. You can choose from some of the world's best players, such as Kylian Mbappé, Christian Pulisic, Vinicius Jr, Son Heung-min, and more.
One of the most exciting features of FIFA Mobile is the official FIFA World Cup 2022 mode, where you can replay the official tournament brackets with any of the 32 qualified nations. You can also relive some of soccer's most memorable moments with new Heroes, representing career-making unforgettable moments from fan favorites like Solskjær and Di Natale. Additionally, you can build a team full of soccer legends with over 100 ICONs, such as Zidane, Beckham, Ronaldo, Maldini, and more.
-
Why Download FIFA Mobile?
-
There are many reasons why you should download FIFA Mobile if you love soccer. Here are some of them:
-
-
You can build your dream team with soccer stars from around the world. You can train your players, increase their stats and OVR, and customize your formation and tactics. You can also collect Player Items from various events and modes to upgrade your team.
-
You can compete in various modes, such as Head-to-Head, VS Attack, Manager Mode, UEFA Champions League, UEFA Europa League, UEFA Conference, and more. You can also join a League or create your own to play with other players online and earn rewards.
-
You can experience realistic graphics, sound effects, and commentary. The game uses the Frostbite engine to deliver stunning visuals and animations. You can also hear the roar of the crowd, the whistle of the referee, and the voice of the commentators as you play.
-
-
With FIFA Mobile, you can enjoy the ultimate soccer experience anytime, anywhere.
-
How to Download FIFA Mobile for Android Devices
-
If you have an Android device, you can download FIFA Mobile from the Google Play Store. Here are the steps to follow:
Tap on the Install button and wait for the download to finish. The game requires about 1.5 GB of free space on your device.
-
Open the app and follow the instructions to set up your account and team. You can use your Facebook, Google, or EA account to log in or create a new one.
-
-
Congratulations! You have successfully downloaded FIFA Mobile for your Android device. You can now start playing and enjoy the game.
-
download fifa mobile for android
-download fifa mobile for ios
-download fifa mobile apk
-download fifa mobile mod apk
-download fifa mobile on pc
-download fifa mobile offline
-download fifa mobile hack
-download fifa mobile 2022
-download fifa mobile latest version
-download fifa mobile from play store
-download fifa mobile from app store
-download fifa mobile world cup edition
-download fifa mobile with obb file
-download fifa mobile without internet
-download fifa mobile with unlimited coins
-download fifa mobile update
-download fifa mobile data
-download fifa mobile free
-download fifa mobile full version
-download fifa mobile game
-how to download fifa mobile on laptop
-how to download fifa mobile on mac
-how to download fifa mobile on windows 10
-how to download fifa mobile on chromebook
-how to download fifa mobile faster
-how to download fifa mobile in jio phone
-how to download fifa mobile in pc without emulator
-how to download fifa mobile in low mb
-how to download fifa mobile beta version
-how to download fifa mobile with vpn
-where to download fifa mobile 2022 apk
-where to download fifa mobile mod apk 2022
-where to download fifa mobile obb file 2022
-where to download fifa mobile hack version 2022
-where to download fifa mobile for free 2022
-best site to download fifa mobile apk 2022
-best site to download fifa mobile mod apk 2022
-best site to download fifa mobile obb file 2022
-best site to download fifa mobile hack version 2022
-best site to download fifa mobile for free 2022
-can i download fifa mobile on my phone 2022
-can i download fifa mobile on my tablet 2022
-can i download fifa mobile on my computer 2022
-can i download fifa mobile on my smart tv 2022
-can i download fifa mobile without wifi 2022
-
How to Download FIFA Mobile for iOS Devices
-
If you have an iOS device, you can download FIFA Mobile from the App Store. Here are the steps to follow:
Tap on the Get button and enter your Apple ID password if prompted. The game requires about 1.5 GB of free space on your device.
-
Wait for the app to download and install on your device.
-
Launch the app and follow the steps to create your account and team. You can use your Facebook, Game Center, or EA account to log in or create a new one.
-
-
Congratulations! You have successfully downloaded FIFA Mobile for your iOS device. You can now start playing and enjoy the game.
-
How to Download FIFA Mobile for PC or Mac
-
If you want to play FIFA Mobile on a bigger screen with keyboard and mouse controls, you can download it for your PC or Mac using an Android emulator. An Android emulator is a software that allows you to run Android apps on your computer. There are many Android emulators available, but we recommend using BlueStacks or NoxPlayer as they are easy to use and compatible with most games. Here are the steps to follow:
Launch the emulator and sign in with your Google account. If you don't have one, you can create one for free.
-
Go to the Google Play Store within the emulator and search for FIFA Soccer. You can also use this link: FIFA Soccer - Apps on Google Play.
-
Install the app and open it from the emulator's home screen.
-
Follow the instructions to set up your account and team. You can use your Facebook, Google, or EA account to log in or create a new one.
-
-
Congratulations! You have successfully downloaded FIFA Mobile for your PC or Mac. You can now start playing and enjoy the game.
-
Tips and Tricks for Playing FIFA Mobile
-
To help you get started with FIFA Mobile, here are some tips and tricks that will improve your gameplay and performance:
-
-
Train your players regularly to improve their stats and OVR. You can use Training XP, Skill Boosts, Rank Up Tokens, and Player Items to train your players. Training XP can be earned from various events and modes, Skill Boosts can be collected from Daily Quests and VS Attack mode, Rank Up Tokens can be obtained from League Tournaments and Achievements, and Player Items can be acquired from various sources such as Scouting, Market, Packs, Events, etc.
-
Use the Advanced Passing system to create more scoring opportunities. You can swipe on the screen to pass the ball with precision and power. You can also tap on a teammate to make a quick pass or double tap to make a through pass. You can also drag and drop a player on the screen to make a pass to him. The Advanced Passing system gives you more control and creativity over your passing game.
-
Collect Player Items from various events and modes to upgrade your team. You can find Player Items of different rarities, such as Bronze, Silver, Gold, Elite, Master, and Legendary. You can also find Player Items of different types, such as Base, Campaign, Event, Icon, Hero, etc. You can use Player Items to train your players, rank them up, or exchange them for other rewards.
-
Join a League or create your own to play with other players online. You can join an existing League or create your own with up to 32 members. You can chat with your League members, play friendly matches, and participate in League Tournaments. League Tournaments are a mode where you can compete against other Leagues for fame and glory. You can earn League Points, League Tokens, and other rewards from League Tournaments.
-
-
With these tips and tricks, you can improve your skills and have more fun playing FIFA Mobile.
-
Conclusion
-
FIFA Mobile is a mobile game that lets you enjoy the ultimate soccer experience on your smartphone or tablet. You can download FIFA Mobile for free from the Google Play Store, the App Store, or using an Android emulator for your PC or Mac. You can build your dream team of soccer stars, compete in various modes, and experience realistic graphics and sound effects. You can also join a League or create your own to play with other players online. FIFA Mobile is a game that will keep you entertained and engaged for hours.
-
FAQs
-
Here are some frequently asked questions about FIFA Mobile:
-
-
How much space does FIFA Mobile require on my device?
-
FIFA Mobile requires about 1.5 GB of free space on your device. However, this may vary depending on your device model and operating system.
-
How do I update FIFA Mobile?
-
FIFA Mobile updates automatically when you launch the app if there is a new version available. You can also check for updates manually by going to the Google Play Store or the App Store and tapping on the Update button if there is one.
-
How do I contact EA Sports for support or feedback?
You can get more coins and gems in FIFA Mobile by playing various events and modes, completing Daily Quests and Achievements, participating in League Tournaments, watching ads, or purchasing them with real money.
-
How do I change my team name, logo, or kit in FIFA Mobile?
-
You can change your team name, logo, or kit in FIFA Mobile by going to the My Team menu and tapping on the Edit button. You can choose from a variety of options or create your own custom ones.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git "a/spaces/fb700/chatglm-fitness-RLHF/crazy_functions/Langchain\347\237\245\350\257\206\345\272\223.py" "b/spaces/fb700/chatglm-fitness-RLHF/crazy_functions/Langchain\347\237\245\350\257\206\345\272\223.py"
deleted file mode 100644
index 31c459aa1fe5ba35efb85988ff18528d4851f2e5..0000000000000000000000000000000000000000
--- "a/spaces/fb700/chatglm-fitness-RLHF/crazy_functions/Langchain\347\237\245\350\257\206\345\272\223.py"
+++ /dev/null
@@ -1,107 +0,0 @@
-from toolbox import CatchException, update_ui, ProxyNetworkActivate
-from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, get_files_from_everything
-
-
-
-@CatchException
-def 知识库问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
- """
- txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
- llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行
- plugin_kwargs 插件模型的参数,暂时没有用武之地
- chatbot 聊天显示框的句柄,用于显示给用户
- history 聊天历史,前情提要
- system_prompt 给gpt的静默提醒
- web_port 当前软件运行的端口号
- """
- history = [] # 清空历史,以免输入溢出
- chatbot.append(("这是什么功能?", "[Local Message] 从一批文件(txt, md, tex)中读取数据构建知识库, 然后进行问答。"))
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-
- # resolve deps
- try:
- from zh_langchain import construct_vector_store
- from langchain.embeddings.huggingface import HuggingFaceEmbeddings
- from .crazy_utils import knowledge_archive_interface
- except Exception as e:
- chatbot.append(
- ["依赖不足",
- "导入依赖失败。正在尝试自动安装,请查看终端的输出或耐心等待..."]
- )
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- from .crazy_utils import try_install_deps
- try_install_deps(['zh_langchain==0.2.1'])
-
- # < --------------------读取参数--------------- >
- if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
- kai_id = plugin_kwargs.get("advanced_arg", 'default')
-
- # < --------------------读取文件--------------- >
- file_manifest = []
- spl = ["txt", "doc", "docx", "email", "epub", "html", "json", "md", "msg", "pdf", "ppt", "pptx", "rtf"]
- for sp in spl:
- _, file_manifest_tmp, _ = get_files_from_everything(txt, type=f'.{sp}')
- file_manifest += file_manifest_tmp
-
- if len(file_manifest) == 0:
- chatbot.append(["没有找到任何可读取文件", "当前支持的格式包括: txt, md, docx, pptx, pdf, json等"])
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
-
- # < -------------------预热文本向量化模组--------------- >
- chatbot.append([' '.join(file_manifest), "正在预热文本向量化模组, 如果是第一次运行, 将消耗较长时间下载中文向量化模型..."])
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- print('Checking Text2vec ...')
- from langchain.embeddings.huggingface import HuggingFaceEmbeddings
- with ProxyNetworkActivate(): # 临时地激活代理网络
- HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese")
-
- # < -------------------构建知识库--------------- >
- chatbot.append([' '.join(file_manifest), "正在构建知识库..."])
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- print('Establishing knowledge archive ...')
- with ProxyNetworkActivate(): # 临时地激活代理网络
- kai = knowledge_archive_interface()
- kai.feed_archive(file_manifest=file_manifest, id=kai_id)
- kai_files = kai.get_loaded_file()
- kai_files = ' '.join(kai_files)
- # chatbot.append(['知识库构建成功', "正在将知识库存储至cookie中"])
- # yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- # chatbot._cookies['langchain_plugin_embedding'] = kai.get_current_archive_id()
- # chatbot._cookies['lock_plugin'] = 'crazy_functions.Langchain知识库->读取知识库作答'
- # chatbot.append(['完成', "“根据知识库作答”函数插件已经接管问答系统, 提问吧! 但注意, 您接下来不能再使用其他插件了,刷新页面即可以退出知识库问答模式。"])
- chatbot.append(['构建完成', f"当前知识库内的有效文件:\n\n---\n\n{kai_files}\n\n---\n\n请切换至“知识库问答”插件进行知识库访问, 或者使用此插件继续上传更多文件。"])
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
-
-@CatchException
-def 读取知识库作答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port=-1):
- # resolve deps
- try:
- from zh_langchain import construct_vector_store
- from langchain.embeddings.huggingface import HuggingFaceEmbeddings
- from .crazy_utils import knowledge_archive_interface
- except Exception as e:
- chatbot.append(["依赖不足", "导入依赖失败。正在尝试自动安装,请查看终端的输出或耐心等待..."])
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- from .crazy_utils import try_install_deps
- try_install_deps(['zh_langchain==0.2.1'])
-
- # < ------------------- --------------- >
- kai = knowledge_archive_interface()
-
- if 'langchain_plugin_embedding' in chatbot._cookies:
- resp, prompt = kai.answer_with_archive_by_id(txt, chatbot._cookies['langchain_plugin_embedding'])
- else:
- if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
- kai_id = plugin_kwargs.get("advanced_arg", 'default')
- resp, prompt = kai.answer_with_archive_by_id(txt, kai_id)
-
- chatbot.append((txt, '[Local Message] ' + prompt))
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
- gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
- inputs=prompt, inputs_show_user=txt,
- llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
- sys_prompt=system_prompt
- )
- history.extend((prompt, gpt_say))
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
diff --git a/spaces/fclong/summary/fengshen/examples/clue1.1/predict2submit/chid_submit.py b/spaces/fclong/summary/fengshen/examples/clue1.1/predict2submit/chid_submit.py
deleted file mode 100644
index 156bcd7955ebb4e1bc22fdcf1c04364e7094312b..0000000000000000000000000000000000000000
--- a/spaces/fclong/summary/fengshen/examples/clue1.1/predict2submit/chid_submit.py
+++ /dev/null
@@ -1,72 +0,0 @@
-import json
-from tqdm import tqdm
-import argparse
-import numpy as np
-
-def save_data(data,file_path):
- with open(file_path, 'w', encoding='utf8') as f:
- json_data=json.dumps(data,ensure_ascii=False)
- f.write(json_data+'\n')
-
-
-def load_data(file_path,is_training=False):
- with open(file_path, 'r', encoding='utf8') as f:
- lines = f.readlines()
- result=[]
- for l,line in tqdm(enumerate(lines)):
- data = json.loads(line)
- result.append(data)
- return result
-
-
-def recls(line):
- mat=[]
- for l in line:
- s=[v for v in l['score'].values()]
- mat.append(s)
- mat=np.array(mat)
- batch,num_labels=mat.shape
- for i in range(len(line)):
- index = np.unravel_index(np.argmax(mat, axis=None), mat.shape)
- line[index[0]]['label'] = int(index[1])
- mat[index[0],:] = np.zeros((num_labels,))
- mat[:,index[1]] = np.zeros((batch,))
- return line
-
-
-
-
-
-
-def chid_m(data):
- lines={}
- for d in data:
- if d['line_id'] not in lines.keys():
- lines[d['line_id']]=[]
- lines[d['line_id']].append(d)
- result=[]
- for k,v in lines.items():
- result.extend(recls(v))
- return result
-
-
-
-def submit(file_path):
- lines = chid_m(load_data(file_path))
- result={}
- for line in tqdm(lines):
- data = line
- result[data['id']]=data['label']
- return result
-
-
-if __name__=="__main__":
- parser = argparse.ArgumentParser(description="train")
- parser.add_argument("--data_path", type=str,default="")
- parser.add_argument("--save_path", type=str,default="")
-
- args = parser.parse_args()
- save_data(submit(args.data_path), args.save_path)
-
-
-
\ No newline at end of file
diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/After Motion X Mod APK Create Stunning Videos with Pro Features.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/After Motion X Mod APK Create Stunning Videos with Pro Features.md
deleted file mode 100644
index 6f4daf1de4294388324b47b746529bfa00addfb6..0000000000000000000000000000000000000000
--- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/After Motion X Mod APK Create Stunning Videos with Pro Features.md
+++ /dev/null
@@ -1,130 +0,0 @@
-
-
After Motion X Mod APK: A New Way to Create Stunning Videos
-
Do you love making videos and sharing them with your friends and followers? Do you want to unleash your creativity and impress everyone with your amazing video editing skills? If yes, then you need to try After Motion X Mod APK, a new and powerful video editing app that lets you create stunning videos in minutes.
After Motion X is a video editing app that allows you to create professional-looking videos with ease. You can use it to edit videos from your gallery or record new ones with your camera. You can also add music, voice-overs, stickers, text, and more to your videos. You can customize your videos with various effects and transitions, such as glitch, blur, zoom, fade, and more. You can also adjust the speed, brightness, contrast, saturation, and other parameters of your videos. You can preview your videos before saving them and export them in high quality. You can also share your videos directly to social media platforms, such as Instagram, Facebook, YouTube, TikTok, and more.
-
Features of After Motion X
-
After Motion X has many features that make it a great video editing app. Some of them are:
-
-
Easy and intuitive interface: You can easily navigate through the app and access all the tools and options you need.
-
Multiple video sources: You can edit videos from your gallery or record new ones with your camera.
-
Multiple audio sources: You can add music from your device or online sources, or record your own voice-overs.
-
Multiple stickers and text: You can add stickers and text to your videos to make them more fun and expressive.
-
Multiple effects and transitions: You can apply various effects and transitions to your videos to make them more dynamic and attractive.
-
Multiple editing options: You can trim, crop, rotate, flip, split, merge, duplicate, and reverse your videos as you wish.
-
Multiple output options: You can save your videos in different resolutions and formats, such as MP4, MOV, GIF, etc.
-
Multiple sharing options: You can share your videos directly to social media platforms or other apps.
-
-
How to use After Motion X
-
To use After Motion X, you need to follow these simple steps:
-
-
Download and install After Motion X Mod APK from a trusted source.
-
Open the app and grant the necessary permissions.
-
Select a video from your gallery or record a new one with your camera.
-
Edit your video using the tools and options available.
-
Preview your video and make any changes if needed.
-
Save your video and share it with others.
-
-
Why download After Motion X Mod APK?
-
If you are wondering why you should download After Motion X Mod APK instead of the original version of the app, here are some reasons:
-
after motion x video editor mod apk
-after motion x pro mod apk download
-after motion x premium mod apk free
-after motion x alight motion mod apk
-after motion x latest version mod apk
-after motion x unlocked mod apk 2023
-after motion x apk mod full version
-after motion x no watermark mod apk
-after motion x hack mod apk android
-after motion x cracked mod apk online
-after motion x animation mod apk free download
-after motion x graphics mod apk unlimited
-after motion x effects mod apk 2023
-after motion x photo editor mod apk
-after motion x music mod apk offline
-after motion x intro maker mod apk
-after motion x splice mod apk download
-after motion x funimate mod apk free
-after motion x avu mod apk latest version
-after motion x capcut mod apk premium
-after motion x strava mod apk 2023
-after motion x fitbit mod apk android
-after motion x vsco mod apk online
-after motion x netflix mod apk offline
-after motion x snapseed mod apk download
-after motion x flipaclip mod apk free
-after motion x lightroom mod apk 2023
-after motion x adobe capture mod apk premium
-after motion x renderforest mod apk android
-after motion x powerdirector mod apk online
-after motion x youcut mod apk offline
-after motion x vn video editor mod apk download
-after motion x moving pictures mod apk free
-after motion x godaddy studio mod apk 2023
-after motion x nikke game mod apk android
-after motion x photoshop express mod apk online
-after motion x adobe express mod apk offline
-
Benefits of After Motion X Mod APK
-
After Motion X Mod APK has some benefits that make it better than the original version of the app. Some of them are:
-
-
No ads: You can enjoy using the app without any annoying ads interrupting you.
-
No watermark: You can save and share your videos without any watermark on them.
-
No limitations: You can use all the features and options of the app without any restrictions or limitations.
-
No root: You do not need to root your device to use the app.
-
How to download and install After Motion X Mod APK
-
To download and install After Motion X Mod APK, you need to follow these simple steps:
-
-
Click on the link below to download the APK file of After Motion X Mod APK.
-
Allow your device to install apps from unknown sources.
-
Locate the downloaded APK file and tap on it.
-
Follow the instructions on the screen to complete the installation.
If you want to make the most out of After Motion X Mod APK, here are some tips and tricks that you can use:
-
How to edit videos with After Motion X Mod APK
-
To edit videos with After Motion X Mod APK, you can use the following tools and options:
-
-
Cut: You can cut your video into smaller segments and delete the unwanted parts.
-
Crop: You can crop your video to fit different aspect ratios and remove the unwanted edges.
-
Rotate: You can rotate your video by 90, 180, or 270 degrees.
-
Flip: You can flip your video horizontally or vertically.
-
Split: You can split your video into two or more parts and rearrange them as you wish.
-
Merge: You can merge two or more videos into one and adjust the duration and order of each part.
-
Duplicate: You can duplicate your video and use it as a template for other edits.
-
Reverse: You can reverse your video and play it backwards.
-
-
How to add effects and transitions with After Motion X Mod APK
-
To add effects and transitions with After Motion X Mod APK, you can use the following tools and options:
-
-
Effects: You can apply various effects to your video, such as glitch, blur, zoom, fade, etc. You can also adjust the intensity and duration of each effect.
-
Transitions: You can add various transitions between your video segments, such as wipe, slide, dissolve, etc. You can also adjust the speed and direction of each transition.
-
-
How to export and share videos with After Motion X Mod APK
-
To export and share videos with After Motion X Mod APK, you can use the following tools and options:
-
-
Export: You can export your video in different resolutions and formats, such as MP4, MOV, GIF, etc. You can also choose the quality and frame rate of your video.
-
Share: You can share your video directly to social media platforms or other apps, such as Instagram, Facebook, YouTube, TikTok, etc. You can also save your video to your device or cloud storage.
-
-
Conclusion
-
After Motion X Mod APK is a new and powerful video editing app that lets you create stunning videos in minutes. You can use it to edit videos from your gallery or record new ones with your camera. You can also add music, voice-overs, stickers, text, and more to your videos. You can customize your videos with various effects and transitions, such as glitch, blur, zoom, fade, etc. You can also adjust the speed, brightness, contrast, saturation, and other parameters of your videos. You can preview your videos before saving them and export them in high quality. You can also share your videos directly to social media platforms or other apps.
-
If you want to enjoy using this app without any ads, watermarks, limitations, or root requirements, you should download After Motion X Mod APK from a trusted source. This modded version of the app gives you access to all the features and options of the app without any restrictions or limitations. You can download it from the link below and follow the instructions to install it on your device.
-
We hope this article has helped you learn more about After Motion X Mod APK and how to use it. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!
-
FAQs
-
Here are some frequently asked questions about After Motion X Mod APK:
-
-
Is After Motion X Mod APK safe to use?
-
Yes, After Motion X Mod APK is safe to use as long as you download it from a trusted source. However, you should always be careful when installing apps from unknown sources and scan them for viruses or malware before using them.
-
Is After Motion X Mod APK compatible with all devices?
-
After Motion X Mod APK is compatible with most Android devices that run on Android 4.4 or higher. However, some devices may not support some features or options of the app due to hardware or software limitations.
-
Is After Motion X Mod APK legal to use?
-
After Motion X Mod APK is not an official version of the app and it may violate some terms and conditions of the original app or the app store. Therefore, you should use it at your own risk and discretion. We do not encourage or endorse any illegal or unethical use of the app.
-
How can I update After Motion X Mod APK?
-
After Motion X Mod APK may not receive regular updates from the original app developer. Therefore, you may need to check for updates from the source where you downloaded it from. You can also visit our website regularly to get the latest version of the app.
-
How can I contact the developer of After Motion X Mod APK?
-
After Motion X Mod APK is not developed by the original app developer, but by a third-party modder. Therefore, you may not be able to contact them directly. However, you can leave a comment on the source where you downloaded it from or on our website and we will try to help you as much as we can.
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Beautiful Black Wallpaper Images for Free - Pexels.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Beautiful Black Wallpaper Images for Free - Pexels.md
deleted file mode 100644
index 44fa98d7d960258015e19fb7deb6ef4734ccbb42..0000000000000000000000000000000000000000
--- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Beautiful Black Wallpaper Images for Free - Pexels.md
+++ /dev/null
@@ -1,145 +0,0 @@
-
-
Free Download Black Background: Why and How to Use It
-
If you are looking for a simple yet effective way to enhance your digital design, you might want to consider using a black background. A black background can create a striking contrast, reduce eye strain, save battery life, and convey a sense of elegance and sophistication. In this article, we will explain what a black background is, why you should use it, how to download free black background images and wallpapers, and how to create your own black background images.
-
What is a black background and why use it?
-
Definition and benefits of a black background
-
A black background is a dark mode web design that uses a light-on-dark color scheme, meaning that the text and icons are light-colored while the background is dark or black. A black background can have different shades of darkness, ranging from pure black (#000000) to warm or cool blacks with different hues.
There are many benefits of using a black background, such as:
-
-
Cutting glare and reducing blue light exposure. A black background can help avoid screen glare, especially in low-light environments, and reduce the amount of blue light that can interfere with your sleep cycle and cause digital eye strain.
-
Extending battery life. A black background can save battery power by up to 30%, especially if your device has an OLED or AMOLED screen. This is because each pixel on an OLED screen produces its own light, and if it doesn't light up (and it's just black), it's off and not using power.
-
Making images pop. A black background can create a stunning contrast and make your images stand out more. This is why many photographers and artists use black backgrounds to showcase their work.
-
Conveying style and emotion. A black background can add character, impact, emphasis, and some emotional response to your web design. It can also convey a sense of elegance, sophistication, mystery, or power.
-
-
Examples of black background websites and photos
-
Many famous artists and photographers have used black backgrounds throughout different periods and for different genres. A few famous examples from portrait photographers are Annie Leibowitz in her self-portrait; Sharon Stone, photographed by Peter Lindbergh; and Audrey Hepburn, by Richard Avedon.
-
Black backgrounds are also popular in web design, especially for dark mode websites. Here are some examples of websites with black backgrounds that work well:
-
-
-
Name
-
Description
-
URL
-
-
-
Mirazur
-
A French restaurant that uses an earthy coffee-black for its dark background color to create a luxurious dining experience.
-
[Mirazur](^9^)
-
-
-
Pexels
-
A stock photo website that offers free download of high-quality black background images and wallpapers.
-
[Pexels](^1^)
-
-
-
Shotkit
-
A photography website that provides tips and tutorials on how to create a black background for photos.
-
[Shotkit](^10^)
-
-
-
TBH Creative
-
A web design agency that showcases examples of dark mode web design with different shades of black backgrounds.
-
[TBH Creative](^9^)
-
-
-
UX Movement
-
A user experience website that explains why you should never use pure black for text or backgrounds.
-
[UX Movement](^7^)
-
-
-
How to download free black background images and wallpapers
Sources and tips for finding high-quality black background images
-
If you want to download free black background images and wallpapers, you have plenty of options to choose from. There are many websites that offer free stock photos, vectors, and illustrations that you can use for personal or commercial purposes. Some of the most popular ones are:
-
-
Pexels. This website has a large collection of free black background images and wallpapers that you can download in different resolutions and formats. You can also browse by categories, such as abstract, nature, technology, or art.
-
Unsplash. This website has over 2 million high-resolution images that you can download and use for free. You can find stunning black background images and wallpapers in various themes, such as space, animals, architecture, or fashion.
-
Pixabay. This website has over 1.8 million free images, videos, and music that you can use without attribution. You can find amazing black background images and wallpapers in different styles, such as minimalist, grunge, or vintage.
-
Freepik. This website has over 10 million free vectors, icons, photos, and PSD files that you can use for your design projects. You can find creative black background images and wallpapers in different shapes, patterns, and textures.
-
-
When looking for free black background images and wallpapers, here are some tips to keep in mind:
-
-
Check the license and attribution requirements. Some websites may require you to credit the author or source of the image, while others may not. Make sure you read the terms and conditions before downloading and using any image.
-
Choose the right resolution and format. Depending on your device or website, you may need a different resolution or format for your black background image or wallpaper. For example, if you want to use it for your smartphone, you may need a portrait orientation and a JPG or PNG format. If you want to use it for your desktop, you may need a landscape orientation and a BMP or TIFF format.
-
Optimize the image size and quality. To avoid slowing down your device or website, you may want to reduce the file size of your black background image or wallpaper without compromising its quality. You can use online tools such as TinyPNG or Compress JPEG to compress your image files.
-
-
How to set a black background on your device or website
-
Once you have downloaded your preferred black background image or wallpaper, you can easily set it as your device or website background. Here are some simple steps to follow:
-
free download black background photos
-free download black background images
-free download black background vectors
-free download black background hd
-free download black background 4k
-free download black background stock
-free download black background wallpaper
-free download black background texture
-free download black background abstract
-free download black background dark
-free download black background for desktop
-free download black background for mobile
-free download black background for website
-free download black background for presentation
-free download black background for video
-free download black background with stars
-free download black background with gold
-free download black background with smoke
-free download black background with glitter
-free download black background with flowers
-free download black background with text
-free download black background with logo
-free download black background with pattern
-free download black background with gradient
-free download black background with neon
-free download black background with red
-free download black background with blue
-free download black background with green
-free download black background with yellow
-free download black background with pink
-free download black background with purple
-free download black background with orange
-free download black background with white
-free download black background with gray
-free download black background with brown
-free download black background with silver
-free download black background with rainbow
-free download black background with heart
-free download black background with moon
-free download black background with sun
-free download black background with fire
-free download black background with water
-free download black background with snow
-free download black background with ice
-free download black background with wood
-free download black background with metal
-free download black background with leather
-free download black background with marble
-free download black background with stone
-
-
For your smartphone or tablet:
-
-
Open the Settings app on your device.
-
Tap on Display or Wallpaper.
-
Select Choose a New Wallpaper or Change Wallpaper.
-
Browse your photo library or gallery and select the black background image or wallpaper that you downloaded.
-
Adjust the position and size of the image as you like.
-
Tap on Set or Apply to confirm your choice.
-
-
For your desktop or laptop:
-
-
Right-click on an empty area of your desktop screen.
-
Select Personalize or Properties.
-
Select Background or Desktop Background.
-
Browse your computer folders and select the black background image or wallpaper that you downloaded.
-
Select Fill, Fit, Stretch, Tile, or Center as the picture position option.
-
Click on Save Changes or OK to confirm your choice.
-
-
For your website:
-
-
Open the HTML file of your website in a text editor or a web design software.
-
Find the <body> tag of your HTML code.
-
Add the following CSS code inside the <body> tag: <style> body background-image: url("your-image-url");</style>
-
Replace "your-image-url" with the URL of the black background image or wallpaper that you downloaded. You can upload it to an online hosting service such as Imgur to get a URL for your image file.
-
Save and upload your HTML file to your web server.
-
-
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/fffffu/bing/src/app/loading.css b/spaces/fffffu/bing/src/app/loading.css
deleted file mode 100644
index eaaab6a86a228334c4eca3c5368ae6f0f593d405..0000000000000000000000000000000000000000
--- a/spaces/fffffu/bing/src/app/loading.css
+++ /dev/null
@@ -1,68 +0,0 @@
-::-webkit-scrollbar {
- width: 10px;
- height: 10px;
- display: none;
-}
-
-::-webkit-scrollbar-button:start:decrement,
-::-webkit-scrollbar-button:end:increment {
- height: 30px;
- background-color: transparent;
-}
-
-::-webkit-scrollbar-track-piece {
- background-color: #3b3b3b;
- -webkit-border-radius: 16px;
-}
-
-::-webkit-scrollbar-thumb:vertical {
- height: 50px;
- background-color: #666;
- border: 1px solid #eee;
- -webkit-border-radius: 6px;
-}
-
-/* loading start */
-.loading-spinner {
- display: flex;
- justify-content: center;
- align-items: center;
- height: 100vh;
- opacity: 1;
- transition: opacity .8s ease-out;
-}
-
-.loading-spinner.hidden {
- opacity: 0;
-}
-
-.loading-spinner>div {
- width: 30px;
- height: 30px;
- background: linear-gradient(90deg, #2870EA 10.79%, #1B4AEF 87.08%);
-
- border-radius: 100%;
- display: inline-block;
- animation: sk-bouncedelay 1.4s infinite ease-in-out both;
-}
-
-.loading-spinner .bounce1 {
- animation-delay: -0.32s;
-}
-
-.loading-spinner .bounce2 {
- animation-delay: -0.16s;
-}
-
-@keyframes sk-bouncedelay {
-
- 0%,
- 80%,
- 100% {
- transform: scale(0);
- }
-
- 40% {
- transform: scale(1.0);
- }
-}
diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/express/lib/router/route.js b/spaces/fffiloni/controlnet-animation-doodle/node_modules/express/lib/router/route.js
deleted file mode 100644
index cc643ac8bdb024f98348083432d0553f7ac30ea7..0000000000000000000000000000000000000000
--- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/express/lib/router/route.js
+++ /dev/null
@@ -1,225 +0,0 @@
-/*!
- * express
- * Copyright(c) 2009-2013 TJ Holowaychuk
- * Copyright(c) 2013 Roman Shtylman
- * Copyright(c) 2014-2015 Douglas Christopher Wilson
- * MIT Licensed
- */
-
-'use strict';
-
-/**
- * Module dependencies.
- * @private
- */
-
-var debug = require('debug')('express:router:route');
-var flatten = require('array-flatten');
-var Layer = require('./layer');
-var methods = require('methods');
-
-/**
- * Module variables.
- * @private
- */
-
-var slice = Array.prototype.slice;
-var toString = Object.prototype.toString;
-
-/**
- * Module exports.
- * @public
- */
-
-module.exports = Route;
-
-/**
- * Initialize `Route` with the given `path`,
- *
- * @param {String} path
- * @public
- */
-
-function Route(path) {
- this.path = path;
- this.stack = [];
-
- debug('new %o', path)
-
- // route handlers for various http methods
- this.methods = {};
-}
-
-/**
- * Determine if the route handles a given method.
- * @private
- */
-
-Route.prototype._handles_method = function _handles_method(method) {
- if (this.methods._all) {
- return true;
- }
-
- var name = method.toLowerCase();
-
- if (name === 'head' && !this.methods['head']) {
- name = 'get';
- }
-
- return Boolean(this.methods[name]);
-};
-
-/**
- * @return {Array} supported HTTP methods
- * @private
- */
-
-Route.prototype._options = function _options() {
- var methods = Object.keys(this.methods);
-
- // append automatic head
- if (this.methods.get && !this.methods.head) {
- methods.push('head');
- }
-
- for (var i = 0; i < methods.length; i++) {
- // make upper case
- methods[i] = methods[i].toUpperCase();
- }
-
- return methods;
-};
-
-/**
- * dispatch req, res into this route
- * @private
- */
-
-Route.prototype.dispatch = function dispatch(req, res, done) {
- var idx = 0;
- var stack = this.stack;
- var sync = 0
-
- if (stack.length === 0) {
- return done();
- }
-
- var method = req.method.toLowerCase();
- if (method === 'head' && !this.methods['head']) {
- method = 'get';
- }
-
- req.route = this;
-
- next();
-
- function next(err) {
- // signal to exit route
- if (err && err === 'route') {
- return done();
- }
-
- // signal to exit router
- if (err && err === 'router') {
- return done(err)
- }
-
- // max sync stack
- if (++sync > 100) {
- return setImmediate(next, err)
- }
-
- var layer = stack[idx++]
-
- // end of layers
- if (!layer) {
- return done(err)
- }
-
- if (layer.method && layer.method !== method) {
- next(err)
- } else if (err) {
- layer.handle_error(err, req, res, next);
- } else {
- layer.handle_request(req, res, next);
- }
-
- sync = 0
- }
-};
-
-/**
- * Add a handler for all HTTP verbs to this route.
- *
- * Behaves just like middleware and can respond or call `next`
- * to continue processing.
- *
- * You can use multiple `.all` call to add multiple handlers.
- *
- * function check_something(req, res, next){
- * next();
- * };
- *
- * function validate_user(req, res, next){
- * next();
- * };
- *
- * route
- * .all(validate_user)
- * .all(check_something)
- * .get(function(req, res, next){
- * res.send('hello world');
- * });
- *
- * @param {function} handler
- * @return {Route} for chaining
- * @api public
- */
-
-Route.prototype.all = function all() {
- var handles = flatten(slice.call(arguments));
-
- for (var i = 0; i < handles.length; i++) {
- var handle = handles[i];
-
- if (typeof handle !== 'function') {
- var type = toString.call(handle);
- var msg = 'Route.all() requires a callback function but got a ' + type
- throw new TypeError(msg);
- }
-
- var layer = Layer('/', {}, handle);
- layer.method = undefined;
-
- this.methods._all = true;
- this.stack.push(layer);
- }
-
- return this;
-};
-
-methods.forEach(function(method){
- Route.prototype[method] = function(){
- var handles = flatten(slice.call(arguments));
-
- for (var i = 0; i < handles.length; i++) {
- var handle = handles[i];
-
- if (typeof handle !== 'function') {
- var type = toString.call(handle);
- var msg = 'Route.' + method + '() requires a callback function but got a ' + type
- throw new Error(msg);
- }
-
- debug('%s %o', method, this.path)
-
- var layer = Layer('/', {}, handle);
- layer.method = method;
-
- this.methods[method] = true;
- this.stack.push(layer);
- }
-
- return this;
- };
-});
diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/toidentifier/index.js b/spaces/fffiloni/controlnet-animation-doodle/node_modules/toidentifier/index.js
deleted file mode 100644
index 9295d024a8c94ee27b3e2b437769599ac5f2b65d..0000000000000000000000000000000000000000
--- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/toidentifier/index.js
+++ /dev/null
@@ -1,32 +0,0 @@
-/*!
- * toidentifier
- * Copyright(c) 2016 Douglas Christopher Wilson
- * MIT Licensed
- */
-
-'use strict'
-
-/**
- * Module exports.
- * @public
- */
-
-module.exports = toIdentifier
-
-/**
- * Trasform the given string into a JavaScript identifier
- *
- * @param {string} str
- * @returns {string}
- * @public
- */
-
-function toIdentifier (str) {
- return str
- .split(' ')
- .map(function (token) {
- return token.slice(0, 1).toUpperCase() + token.slice(1)
- })
- .join('')
- .replace(/[^ _0-9a-z]/gi, '')
-}
diff --git a/spaces/fffiloni/simple-animation-doodle/AnimSys.js b/spaces/fffiloni/simple-animation-doodle/AnimSys.js
deleted file mode 100644
index 3ed41ee8b19561adefd4fb884607286d3e9433f7..0000000000000000000000000000000000000000
--- a/spaces/fffiloni/simple-animation-doodle/AnimSys.js
+++ /dev/null
@@ -1,274 +0,0 @@
-class AnimSys {
-
- constructor(fps, frame_limit, onions_tint, initial_frame_nb){
- this.fps = fps;
- this.frame_limit = frame_limit;
- this.onions_tint = onions_tint;
- this.initial_frame_nb = initial_frame_nb;
- this.framesList = [];
-
- this.frameGraphics = createGraphics(width, height);
- this.onionGraphics = createGraphics(width, height);
-
- this.UI = createDiv('');
- this.UI.id('ui-container');
-
- this.createFrame_btn = createButton('');
- this.createFrame_btn.mousePressed(this.create_new_frame.bind(this));
- //this.createFrame_btn.parent(this.UI);
- this.createFrame_btn.parent('right-panel');
-
- this.show_onion_btn = createButton('');
- this.show_onion_btn.mousePressed(this.switch_onions.bind(this));
- //this.show_onion_btn.parent(this.UI);
- this.show_onion_btn.parent('right-panel');
-
- this.hide_onion_btn = createButton('').hide();
- this.hide_onion_btn.id('hide-onion-btn');
- this.hide_onion_btn.mousePressed(this.switch_onions.bind(this));
- //this.hide_onion_btn.parent(this.UI);
- this.hide_onion_btn.parent('right-panel');
-
- this.clear_frame_btn = createButton('');
- this.clear_frame_btn.mousePressed(this.clear_frame.bind(this));
- //this.clear_frame_btn.parent(this.UI);
- this.clear_frame_btn.parent('left-panel');
-
- this.play_btn = createButton('');
- this.play_btn.id('play-btn');
- this.play_btn.mousePressed(this.play_anim.bind(this));
- //this.play_btn.parent(this.UI);
- this.play_btn.parent('right-panel');
-
- this.stop_btn = createButton('').hide();
- this.stop_btn.id('stop-btn');
- this.stop_btn.mousePressed(this.play_anim.bind(this));
- //this.stop_btn.parent(this.UI);
- this.stop_btn.parent('right-panel');
-
- this.timeline = createDiv('timeline');
- this.timeline.id('timeline');
- this.timeline.parent('timeline-ctn');
-
- this.frame_displayed = 0;
-
- this.isPlaying = false;
- this.play_interval = null;
-
- this.showOnions = false;
-
- }
-
- // -----------------------------------------
- // -----------------------------------------
-
-
- update_frame_list(){
- //flush timeline elements
- this.timeline.html('');
- //insert frames + new one in timeline
- for (let [i,frame] of this.framesList.entries()){
- let frameDiv = createDiv(i+1);
- frameDiv.id('frame-number-' + i);
- frameDiv.class('aframe');
- frameDiv.parent(this.timeline);
- frameDiv.mousePressed( () =>{
- this.display_frame(i);
- })
- redraw();
- }
- }
-
- // -----------------------------------------
- // -----------------------------------------
-
-
- create_new_frame(data){
-
- if(this.framesList.length == this.frame_limit){
- console.warn('you cannot create more than ' + this.frame_limit + ' frames for this project')
- } else {
- let image64;
- if(data){
- image64 = data.image64;
- } else {
- this.frameGraphics.clear();
- this.frameGraphics.loadPixels();
- image64 = this.frameGraphics.canvas.toDataURL('image/png');
- }
-
- let new_frame = {
- "img_data": image64
- };
- this.framesList.push(new_frame);
-
- this.update_frame_list();
-
- if(this.framesList.length > 0){
- this.frame_displayed = this.framesList.length -1;
- }
-
- this.display_frame(this.frame_displayed);
- }
-
- }
-
- // -----------------------------------------
- // -----------------------------------------
-
-
- display_frame(frame_index){
-
- if(this.isPlaying == true){
-
- if(frame_index == this.framesList.length - 1){
- frame_index = 0;
- } else {
- frame_index++;
- }
-
- }
-
- this.frame_displayed = frame_index;
-
- let getAllDiv = document.querySelectorAll('.aframe');
- getAllDiv.forEach(aframe => {
- aframe.classList.remove('current-frame');
- });
-
- let getDiv = select('#frame-number-' + this.frame_displayed);
- getDiv.addClass('current-frame');
-
- if(this.framesList[frame_index].img_data !== undefined){
-
- let display = loadImage(this.framesList[frame_index].img_data, function(){
-
- this.frameGraphics.clear();
- this.frameGraphics.image(display, 0, 0);
-
- }.bind(this));
-
- }
-
-
- //redraw();
- // ONIONS À TRAITER DANS UNE FUNCTION À PART POUR + D'EFFICACITÉ
-
-
-
- if( (this.isPlaying == false && this.showOnions == true) || (this.isPlaying == true && this.showOnions == true) ){
-
- let onion_index;
-
- if (frame_index == 0){
-
- onion_index = this.framesList.length - 1;
-
- } else {
-
- onion_index = frame_index - 1;
-
- }
-
- let displayOnions = loadImage(this.framesList[onion_index].img_data, function(){
-
- this.onionGraphics.clear();
- this.onionGraphics.tint(255, this.onions_tint);
- this.onionGraphics.image(displayOnions, 0, 0);
-
- }.bind(this));
- }
-
-
-
-
- setTimeout(redraw, 10)
-
- }
-
- // -----------------------------------------
- // -----------------------------------------
-
- update_frame(){
-
- this.frameGraphics.loadPixels();
- let image64 = this.frameGraphics.canvas.toDataURL('image/png');
-
- let data = {"image64": image64}
-
- if(this.framesList.length == 0){
-
- this.create_new_frame(data);
-
- } else {
-
- this.framesList[this.frame_displayed].img_data = image64;
-
- }
-
- }
-
- // -----------------------------------------
- // -----------------------------------------
-
- clear_frame(){
- // add here all the graphics layer you need to clear
- this.frameGraphics.clear();
- this.update_frame();
- redraw();
- }
-
- // -----------------------------------------
- // -----------------------------------------
-
- switch_onions(){
- if(this.showOnions == false){
-
- this.showOnions = true;
- this.show_onion_btn.hide();
- this.hide_onion_btn.show();
-
- } else {
-
- this.showOnions = false;
- this.hide_onion_btn.hide();
- this.show_onion_btn.show();
-
- }
- if(this.framesList.length != 0){
- setTimeout(function(){
- this.display_frame(this.frame_displayed)
- }.bind(this), 10);
- }
-
-
- }
-
- // -----------------------------------------
- // -----------------------------------------
-
- play_anim(){
- if(this.framesList.length > 0){
- if (this.isPlaying == false){
- if(this.showOnions == true){
- this.switch_onions();
- }
- this.isPlaying = true;
- this.play_interval = setInterval(function(){
- this.display_frame(this.frame_displayed)
- }.bind(this), 1000/this.fps);
- this.play_btn.hide();
- this.stop_btn.show();
- } else {
- clearInterval(this.play_interval);
- this.isPlaying = false;
- this.stop_btn.hide();
- this.play_btn.show();
-
- }
- } else {
- console.log("Create a first capture before playing")
- }
- }
-
-}
\ No newline at end of file
diff --git a/spaces/fffiloni/stable-diffusion-img2img/README.md b/spaces/fffiloni/stable-diffusion-img2img/README.md
deleted file mode 100644
index d01cc45b9f3623326df8c751a3d65f6850172a77..0000000000000000000000000000000000000000
--- a/spaces/fffiloni/stable-diffusion-img2img/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Stable Diffusion Img2img CPU
-emoji: 🎨🌠
-colorFrom: green
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.6
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/fkhuggingme/gpt-academic/request_llm/bridge_chatglm.py b/spaces/fkhuggingme/gpt-academic/request_llm/bridge_chatglm.py
deleted file mode 100644
index 7c86a22316cda8d6568afbd27e7d6e652703fb7f..0000000000000000000000000000000000000000
--- a/spaces/fkhuggingme/gpt-academic/request_llm/bridge_chatglm.py
+++ /dev/null
@@ -1,160 +0,0 @@
-
-from transformers import AutoModel, AutoTokenizer
-import time
-import threading
-import importlib
-from toolbox import update_ui, get_conf
-from multiprocessing import Process, Pipe
-
-load_message = "ChatGLM尚未加载,加载需要一段时间。注意,取决于`config.py`的配置,ChatGLM消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……"
-
-#################################################################################
-class GetGLMHandle(Process):
- def __init__(self):
- super().__init__(daemon=True)
- self.parent, self.child = Pipe()
- self.chatglm_model = None
- self.chatglm_tokenizer = None
- self.info = ""
- self.success = True
- self.check_dependency()
- self.start()
- self.threadLock = threading.Lock()
-
- def check_dependency(self):
- try:
- import sentencepiece
- self.info = "依赖检测通过"
- self.success = True
- except:
- self.info = "缺少ChatGLM的依赖,如果要使用ChatGLM,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_chatglm.txt`安装ChatGLM的依赖。"
- self.success = False
-
- def ready(self):
- return self.chatglm_model is not None
-
- def run(self):
- # 子进程执行
- # 第一次运行,加载参数
- retry = 0
- while True:
- try:
- if self.chatglm_model is None:
- self.chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
- device, = get_conf('LOCAL_MODEL_DEVICE')
- if device=='cpu':
- self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float()
- else:
- self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
- self.chatglm_model = self.chatglm_model.eval()
- break
- else:
- break
- except:
- retry += 1
- if retry > 3:
- self.child.send('[Local Message] Call ChatGLM fail 不能正常加载ChatGLM的参数。')
- raise RuntimeError("不能正常加载ChatGLM的参数!")
-
- while True:
- # 进入任务等待状态
- kwargs = self.child.recv()
- # 收到消息,开始请求
- try:
- for response, history in self.chatglm_model.stream_chat(self.chatglm_tokenizer, **kwargs):
- self.child.send(response)
- # # 中途接收可能的终止指令(如果有的话)
- # if self.child.poll():
- # command = self.child.recv()
- # if command == '[Terminate]': break
- except:
- self.child.send('[Local Message] Call ChatGLM fail.')
- # 请求处理结束,开始下一个循环
- self.child.send('[Finish]')
-
- def stream_chat(self, **kwargs):
- # 主进程执行
- self.threadLock.acquire()
- self.parent.send(kwargs)
- while True:
- res = self.parent.recv()
- if res != '[Finish]':
- yield res
- else:
- break
- self.threadLock.release()
-
-global glm_handle
-glm_handle = None
-#################################################################################
-def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
- """
- 多线程方法
- 函数的说明请见 request_llm/bridge_all.py
- """
- global glm_handle
- if glm_handle is None:
- glm_handle = GetGLMHandle()
- observe_window[0] = load_message + "\n\n" + glm_handle.info
- if not glm_handle.success:
- error = glm_handle.info
- glm_handle = None
- raise RuntimeError(error)
-
- # chatglm 没有 sys_prompt 接口,因此把prompt加入 history
- history_feedin = []
- history_feedin.append(["What can I do?", sys_prompt])
- for i in range(len(history)//2):
- history_feedin.append([history[2*i], history[2*i+1]] )
-
- watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
- response = ""
- for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
- observe_window[0] = response
- if len(observe_window) >= 2:
- if (time.time()-observe_window[1]) > watch_dog_patience:
- raise RuntimeError("程序终止。")
- return response
-
-
-
-def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
- """
- 单线程方法
- 函数的说明请见 request_llm/bridge_all.py
- """
- chatbot.append((inputs, ""))
-
- global glm_handle
- if glm_handle is None:
- glm_handle = GetGLMHandle()
- chatbot[-1] = (inputs, load_message + "\n\n" + glm_handle.info)
- yield from update_ui(chatbot=chatbot, history=[])
- if not glm_handle.success:
- glm_handle = None
- return
-
- if additional_fn is not None:
- import core_functional
- importlib.reload(core_functional) # 热更新prompt
- core_functional = core_functional.get_core_functions()
- if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
- inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
-
- # 处理历史信息
- history_feedin = []
- history_feedin.append(["What can I do?", system_prompt] )
- for i in range(len(history)//2):
- history_feedin.append([history[2*i], history[2*i+1]] )
-
- # 开始接收chatglm的回复
- response = "[Local Message]: 等待ChatGLM响应中 ..."
- for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
- chatbot[-1] = (inputs, response)
- yield from update_ui(chatbot=chatbot, history=history)
-
- # 总结输出
- if response == "[Local Message]: 等待ChatGLM响应中 ...":
- response = "[Local Message]: ChatGLM响应异常 ..."
- history.extend([inputs, response])
- yield from update_ui(chatbot=chatbot, history=history)
diff --git a/spaces/flax-community/Multilingual-VQA/sections/challenges.md b/spaces/flax-community/Multilingual-VQA/sections/challenges.md
deleted file mode 100644
index ba358cae24cfa9fb4c2b6d75c87e3dc09ebd3f5d..0000000000000000000000000000000000000000
--- a/spaces/flax-community/Multilingual-VQA/sections/challenges.md
+++ /dev/null
@@ -1,13 +0,0 @@
-We faced challenges at every step of the way, despite having some example scripts and models ready by the 🤗 team in Flax.
-
-- The dataset we used - Conceptual 12M took 2-3 days to translate using MBart (since we didn't have Marian at the time). The major bottleneck was implementing the translation efficiently. We tried using `mtranslate` first but it turned out to be too slow, even with multiprocessing.
-
-- The translations with deep learning models aren't as "perfect" as translation APIs like Google and Yandex. This could lead to poor performance.
-
-- We prepared the model and config classes for our model from scratch, basing it on `CLIP Vision` and `BERT` implementations in Flax. The ViT embeddings should be used inside the BERT embeddings class, which was the major challenge here.
-
-- We prepared a training script for image-text text-only MLM and sequence classification, which we based on hybrid clip, masked LM and the text classification examples.
-
-- We were only able to get around 1.5 days of training time on TPUs due to above mentioned challenges. We were unable to perform hyperparameter tuning. Our [loss curves on the pre-training](https://huggingface.co/flax-community/multilingual-vqa/tensorboard) show that the training hasn't converged, and we could see further improvement in the MLM accuracy.
-
-- The VQA dataset, despite having many examples, and after translating into 4x the number of examples, is small and the model overfits. In order to address this, we need more multilingual data, and lighter models, which are both a major challenge right now.
\ No newline at end of file
diff --git a/spaces/freddyaboulton/git-large-coco/app.py b/spaces/freddyaboulton/git-large-coco/app.py
deleted file mode 100644
index 810d5243ebcc5f8a1a73fc17dcd30ca138111e27..0000000000000000000000000000000000000000
--- a/spaces/freddyaboulton/git-large-coco/app.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import gradio as gr
-from transformers import AutoProcessor, AutoTokenizer, AutoImageProcessor, AutoModelForCausalLM, BlipForConditionalGeneration, VisionEncoderDecoderModel
-import torch
-
-torch.hub.download_url_to_file('http://images.cocodataset.org/val2017/000000039769.jpg', 'cats.jpg')
-torch.hub.download_url_to_file('https://huggingface.co/datasets/nielsr/textcaps-sample/resolve/main/stop_sign.png', 'stop_sign.png')
-torch.hub.download_url_to_file('https://cdn.openai.com/dall-e-2/demos/text2im/astronaut/horse/photo/0.jpg', 'astronaut.jpg')
-
-git_processor = AutoProcessor.from_pretrained("microsoft/git-large-coco")
-git_model = AutoModelForCausalLM.from_pretrained("microsoft/git-large-coco")
-
-device = "cuda" if torch.cuda.is_available() else "cpu"
-
-git_model.to(device)
-
-def generate_caption(processor, model, image):
- inputs = processor(images=image, return_tensors="pt").to(device)
- generated_ids = model.generate(pixel_values=inputs.pixel_values, max_length=50)
- generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
-
- return generated_caption
-
-
-def generate_captions(image):
- return generate_caption(git_processor, git_model, image)
-
-
-examples = [["cats.jpg"], ["stop_sign.png"], ["astronaut.jpg"]]
-outputs = [gr.outputs.Textbox(label="Caption generated by GIT-Large coco")]
-
-title = "Interactive demo: GIT-Large coco image captioning"
-description = "GIT-Large coco image captioning"
-article = "
"
-
-interface = gr.Interface(fn=generate_captions,
- inputs=gr.inputs.Image(type="pil"),
- outputs=outputs,
- examples=examples,
- title=title,
- description=description,
- article=article,
- enable_queue=True)
-interface.launch(debug=True)
diff --git a/spaces/gagan3012/T5-Summarization/.github/ISSUE_TEMPLATE/feature_request.md b/spaces/gagan3012/T5-Summarization/.github/ISSUE_TEMPLATE/feature_request.md
deleted file mode 100644
index bbcbbe7d61558adde3cbfd0c7a63a67c27ed6d30..0000000000000000000000000000000000000000
--- a/spaces/gagan3012/T5-Summarization/.github/ISSUE_TEMPLATE/feature_request.md
+++ /dev/null
@@ -1,20 +0,0 @@
----
-name: Feature request
-about: Suggest an idea for this project
-title: ''
-labels: ''
-assignees: ''
-
----
-
-**Is your feature request related to a problem? Please describe.**
-A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
-
-**Describe the solution you'd like**
-A clear and concise description of what you want to happen.
-
-**Describe alternatives you've considered**
-A clear and concise description of any alternative solutions or features you've considered.
-
-**Additional context**
-Add any other context or screenshots about the feature request here.
diff --git a/spaces/gatilin/damo-yolo-webui/app.py b/spaces/gatilin/damo-yolo-webui/app.py
deleted file mode 100644
index 80dc51faad957a6285dc6c3340aee9766b09f145..0000000000000000000000000000000000000000
--- a/spaces/gatilin/damo-yolo-webui/app.py
+++ /dev/null
@@ -1,92 +0,0 @@
-
-import os
-os.system("pip install tensorflow")
-os.system("pip install modelscope")
-os.system("pip install thop")
-os.system("pip install easydict ")
-
-import gradio as gr
-import PIL.Image as Image
-import torch
-from modelscope.pipelines import pipeline
-from modelscope.utils.constant import Tasks
-import cv2
-import numpy as np
-import random
-
-import warnings
-
-warnings.filterwarnings("ignore")
-
-def object_detection(img_pil, confidence_threshold, device):
- # 加载模型
- p = pipeline(task='image-object-detection', model='damo/cv_tinynas_object-detection_damoyolo', device=device)
-
- # 传入图片进行推理
- result = p(img_pil)
- # 读取图片
- img_cv = cv2.cvtColor(np.asarray(img_pil), cv2.COLOR_RGB2BGR)
- # 获取bbox和类别
- scores = result['scores']
- boxes = result['boxes']
- labels = result['labels']
- # 遍历每个bbox
- for i in range(len(scores)):
- # 只绘制置信度大于设定阈值的bbox
- if scores[i] > confidence_threshold:
- # 随机生成颜色
- class_color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
- # 获取bbox坐标
- x1, y1, x2, y2 = boxes[i]
- x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
- # 绘制bbox
- cv2.rectangle(img_cv, (x1, y1), (x2, y2), class_color, thickness=2)
- # 绘制类别标签
- label = f"{labels[i]}: {scores[i]:.2f}"
- cv2.putText(img_cv, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, class_color, thickness=2)
- img_pil = Image.fromarray(cv2.cvtColor(img_cv, cv2.COLOR_BGR2RGB))
- return img_pil
-
-
-def download_test_image():
- # Images
- torch.hub.download_url_to_file(
- 'https://user-images.githubusercontent.com/59380685/266264420-21575a83-4057-41cf-8a4a-b3ea6f332d79.jpg',
- 'bus.jpg')
- torch.hub.download_url_to_file(
- 'https://user-images.githubusercontent.com/59380685/266264536-82afdf58-6b9a-4568-b9df-551ee72cb6d9.jpg',
- 'dogs.jpg')
- torch.hub.download_url_to_file(
- 'https://user-images.githubusercontent.com/59380685/266264600-9d0c26ca-8ba6-45f2-b53b-4dc98460c43e.jpg',
- 'zidane.jpg')
-
-
-if __name__ == '__main__':
- download_test_image()
- # 定义输入和输出
- input_image = gr.inputs.Image(type='pil')
- input_slide = gr.inputs.Slider(minimum=0, maximum=1, step=0.05, default=0.5, label="Confidence Threshold")
- input_device = gr.inputs.Radio(["cpu", "cuda", "gpu"], default="cpu")
- output_image = gr.outputs.Image(type='pil')
-
- examples = [['bus.jpg', 0.45, "cpu"],
- ['dogs.jpg', 0.45, "cpu"],
- ['zidane.jpg', 0.45, "cpu"]]
- title = "DAMO-YOLO web demo"
- description = "
" \
- "
DAMO-YOLO DAMO-YOLO DAMO-YOLO DAMO-YOLO:一种快速准确的目标检测方法,采用了一些新技术,包括 NAS 主干、高效的 RepGFPN、ZeroHead、AlignedOTA 和蒸馏增强。" \
- "DAMO-YOLO: a fast and accurate object detection method with some new techs, including NAS backbones, efficient RepGFPN, ZeroHead, AlignedOTA, and distillation enhancement..
Casino Royale Dual Audio Eng-Hindi 720p: A Thrilling Movie Experience
-
If you are a fan of James Bond movies, you might have heard of Casino Royale, the 2006 film that introduced Daniel Craig as the new 007 agent. Casino Royale is a reboot of the Bond franchise, based on the first novel by Ian Fleming. It follows Bond's first mission as a double-O agent, where he has to face a ruthless banker and terrorist financier, Le Chiffre, in a high-stakes poker game at Montenegro.
Casino Royale is not only a great action-adventure thriller, but also a movie that you can enjoy in dual audio, both in Hindi and English. You can download Casino Royale dual audio eng-hindi 720p from various websites that offer high-quality bluray versions of the movie. You can also watch Casino Royale online free in dual audio eng-hindi 720p on some streaming platforms.
-
Why You Should Watch Casino Royale Dual Audio Eng-Hindi 720p
-
There are many reasons why you should watch Casino Royale dual audio eng-hindi 720p, whether you are a Bond fan or not. Here are some of them:
-
-
Casino Royale is one of the best James Bond movies ever made, with a gripping plot, stunning action sequences, and memorable characters.
-
Casino Royale features Daniel Craig's debut as James Bond, and he delivers a brilliant performance as the gritty and realistic spy.
-
Casino Royale also stars Eva Green as Vesper Lynd, the beautiful and mysterious treasury agent who accompanies Bond to the poker game. She is one of the most complex and intriguing Bond girls in the series.
-
Casino Royale has a great villain in Le Chiffre, played by Mads Mikkelsen. He is a cold and calculating mastermind who uses his mathematical skills and poker skills to fund terrorism.
-
Casino Royale has some of the most iconic scenes in the Bond franchise, such as the parkour chase in Madagascar, the torture scene in Montenegro, and the final showdown in Venice.
-
-
How to Download or Stream Casino Royale Dual Audio Eng-Hindi 720p
-
If you want to download or stream Casino Royale dual audio eng-hindi 720p, you have several options to choose from. You can either use Google Drive links or Dropbox links to download Casino Royale dual audio eng-hindi 480p, 720p, or 1080p bluray versions. You can also use torrent links to download Casino Royale dual audio eng-hindi 2160p 4K HDR versions.
-
-
If you prefer to watch Casino Royale online free in dual audio eng-hindi 720p, you can use some of the streaming platforms that offer this option. However, you may need to sign up for an account or use a VPN service to access some of these platforms. You can also use some tips and tricks to enhance your viewing experience of Casino Royale dual audio eng-hindi 720p.
-
Conclusion
-
Casino Royale dual audio eng-hindi 720p is a movie that you should not miss if you love James Bond movies or action-adventure thrillers. It is a movie that will keep you on the edge of your seat from start to finish. It is also a movie that you can enjoy in both Hindi and English languages, depending on your preference. You can download or stream Casino Royale dual audio eng-hindi 720p from various sources and watch it at your convenience.
-
What is Casino Royale Dual Audio Eng-Hindi 720p About?
-
Casino Royale dual audio eng-hindi 720p is about the origin story of James Bond, the world's most famous spy. It shows how Bond became a 00 agent, and how he earned his license to kill. It also shows how Bond met his first love, Vesper Lynd, and how he was betrayed by her.
-
Casino Royale dual audio eng-hindi 720p is also about the clash between Bond and Le Chiffre, a ruthless and cunning villain who uses his poker skills to finance terrorism. Bond has to beat Le Chiffre in a high-stakes poker game at Casino Royale, a luxury resort in Montenegro. The game is not only a test of Bond's skills, but also his loyalty, courage, and morality.
-
What are the Benefits of Watching Casino Royale Dual Audio Eng-Hindi 720p?
-
Watching Casino Royale dual audio eng-hindi 720p has many benefits for the viewers. Some of them are:
-
-
Watching Casino Royale dual audio eng-hindi 720p allows you to enjoy the movie in both Hindi and English languages, depending on your preference. You can switch between the languages easily and understand the dialogues better.
-
Watching Casino Royale dual audio eng-hindi 720p gives you a high-quality movie experience, with clear visuals and sound effects. You can see every detail of the movie, from the stunning locations to the thrilling action scenes.
-
Watching Casino Royale dual audio eng-hindi 720p helps you to learn more about James Bond, his character, his history, and his motivations. You can also learn more about the other characters in the movie, such as Vesper Lynd, Le Chiffre, M, and Felix Leiter.
-
Watching Casino Royale dual audio eng-hindi 720p entertains you with a captivating story, a brilliant cast, and a superb direction. You can also enjoy the humor, the romance, and the drama of the movie.
-
-
What are the Features of Casino Royale Dual Audio Eng-Hindi 720p?
-
Casino Royale dual audio eng-hindi 720p has many features that make it a great movie to watch. Some of them are:
-
-
Casino Royale dual audio eng-hindi 720p has a remastered version that enhances the quality of the movie. The remastered version has improved colors, contrast, and sharpness.
-
Casino Royale dual audio eng-hindi 720p has a bluray version that offers the best possible picture and sound quality. The bluray version has a high resolution, a wide aspect ratio, and a Dolby Digital sound system.
-
Casino Royale dual audio eng-hindi 720p has a HEVC version that compresses the movie without losing its quality. The HEVC version has a smaller file size, a faster download speed, and a lower bandwidth consumption.
-
Casino Royale dual audio eng-hindi 720p has a 4K HDR version that delivers the ultimate movie experience. The 4K HDR version has a ultra-high definition, a high dynamic range, and a stunning color gamut.
-
-
What are the Reviews of Casino Royale Dual Audio Eng-Hindi 720p?
-
Casino Royale dual audio eng-hindi 720p has received many positive reviews from critics and audiences alike. Some of them are:
-
-
"Casino Royale is a brilliant reboot of the Bond franchise, with Daniel Craig giving a superb performance as the new 007. The movie is thrilling, realistic, and emotional, with a great plot and direction." - Rotten Tomatoes
-
-
-
"Casino Royale is one of the best James Bond movies ever made, with a gripping story, stunning action scenes, and memorable characters. The movie is also enjoyable in dual audio, both in Hindi and English." - IMDb
-
-
-
"Casino Royale is a movie that you should not miss if you love James Bond movies or action-adventure thrillers. It is a movie that will keep you on the edge of your seat from start to finish. It is also a movie that you can enjoy in both Hindi and English languages, depending on your preference." - FilmyGod
-
-
What are the Tips and Tricks for Watching Casino Royale Dual Audio Eng-Hindi 720p?
-
If you want to watch Casino Royale dual audio eng-hindi 720p with the best possible experience, you can use some tips and tricks that will enhance your enjoyment of the movie. Some of them are:
-
-
Use a VPN service to access some of the sources that may be blocked or restricted in your region. A VPN service will also protect your privacy and security online.
-
Use a good media player that supports dual audio and subtitles. You can also adjust the audio and video settings according to your preference.
-
Use headphones or speakers to enjoy the sound effects and music of the movie. You can also use a surround sound system if you have one.
-
Watch the movie in a dark and comfortable environment. You can also use a projector or a big screen if you have one.
-
-
What are the Alternatives to Watching Casino Royale Dual Audio Eng-Hindi 720p?
-
If you are not satisfied with watching Casino Royale dual audio eng-hindi 720p, you can also try some alternatives that may suit your taste better. Some of them are:
-
-
Watch Casino Royale in its original language, English. You can also watch it with subtitles in Hindi or any other language you prefer.
-
Watch Casino Royale in another language, such as French, German, Spanish, or Italian. You can also watch it with subtitles in English or any other language you prefer.
-
Watch another James Bond movie that is similar to Casino Royale, such as Skyfall, Spectre, or Quantum of Solace. You can also watch them in dual audio or any other language you prefer.
-
Watch another action-adventure thriller movie that is similar to Casino Royale, such as Mission: Impossible, Bourne, or John Wick. You can also watch them in dual audio or any other language you prefer.
-
-
What are the FAQs about Casino Royale Dual Audio Eng-Hindi 720p?
-
If you have some questions about Casino Royale dual audio eng-hindi 720p, you can find some answers in this section. Here are some of the frequently asked questions about Casino Royale dual audio eng-hindi 720p:
-
-
Q: Is Casino Royale dual audio eng-hindi 720p legal to watch or download?
-
A: It depends on the source you are using to watch or download Casino Royale dual audio eng-hindi 720p. Some sources may have the legal rights or permissions to distribute or stream the movie, while others may not. You should always check the terms and conditions of the source before watching or downloading Casino Royale dual audio eng-hindi 720p.
-
Q: Is Casino Royale dual audio eng-hindi 720p safe to watch or download?
-
A: It also depends on the source you are using to watch or download Casino Royale dual audio eng-hindi 720p. Some sources may be safe and secure, while others may not. You should always use a reliable antivirus software and a VPN service to protect your device and data from any potential harm.
-
Q: Is Casino Royale dual audio eng-hindi 720p good quality to watch or download?
-
A: It varies from source to source. Some sources may offer good quality versions of Casino Royale dual audio eng-hindi 720p, while others may not. You should always check the resolution, bitrate, and file size of the version before watching or downloading Casino Royale dual audio eng-hindi 720p.
-
Q: Is Casino Royale dual audio eng-hindi 720p worth watching or downloading?
-
A: It depends on your personal preference and taste. If you like James Bond movies or action-adventure thrillers, you may find Casino Royale dual audio eng-hindi 720p worth watching or downloading. If you don't, you may not.
-
-
Conclusion
-
Casino Royale dual audio eng-hindi 720p is a movie that you can watch or download from various sources online. It is a movie that features Daniel Craig as James Bond, Eva Green as Vesper Lynd, and Mads Mikkelsen as Le Chiffre. It is a movie that tells the origin story of James Bond and his first mission as a 00 agent. It is a movie that has a thrilling plot, stunning action scenes, and memorable characters.
-
Casino Royale dual audio eng-hindi 720p is also a movie that you can enjoy in both Hindi and English languages, depending on your preference. You can switch between the languages easily and understand the dialogues better. You can also choose between different qualities and formats of Casino Royale dual audio eng-hindi 720p, depending on your device and internet connection.
-
Casino Royale dual audio eng-hindi 720p is a movie that has many benefits, features, tips, tricks, alternatives, and FAQs that you can learn from this article. You can also overcome some of the challenges of watching or downloading Casino Royale dual audio eng-hindi 720p by following some of the suggestions in this article.
-
Casino Royale dual audio eng-hindi 720p is a movie that you should not miss if you love James Bond movies or action-adventure thrillers. It is a movie that will keep you on the edge of your seat from start to finish. It is also a movie that you can enjoy in both Hindi and English languages, depending on your preference.
-
In conclusion, Casino Royale dual audio eng-hindi 720p is a movie that you can watch or download from various sources online. It is a movie that features Daniel Craig as James Bond, Eva Green as Vesper Lynd, and Mads Mikkelsen as Le Chiffre. It is a movie that tells the origin story of James Bond and his first mission as a 00 agent. It is a movie that has a thrilling plot, stunning action scenes, and memorable characters.
-
Casino Royale dual audio eng-hindi 720p is also a movie that you can enjoy in both Hindi and English languages, depending on your preference. You can switch between the languages easily and understand the dialogues better. You can also choose between different qualities and formats of Casino Royale dual audio eng-hindi 720p, depending on your device and internet connection.
-
Casino Royale dual audio eng-hindi 720p is a movie that has many benefits, features, tips, tricks, alternatives, and FAQs that you can learn from this article. You can also overcome some of the challenges of watching or downloading Casino Royale dual audio eng-hindi 720p by following some of the suggestions in this article.
-
Casino Royale dual audio eng-hindi 720p is a movie that you should not miss if you love James Bond movies or action-adventure thrillers. It is a movie that will keep you on the edge of your seat from start to finish. It is also a movie that you can enjoy in both Hindi and English languages, depending on your preference.
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/IMacros Enterprise Edition Unlimited V8.03.2216 Serial Key.md b/spaces/inplisQlawa/anything-midjourney-v4-1/IMacros Enterprise Edition Unlimited V8.03.2216 Serial Key.md
deleted file mode 100644
index 6efc9ba1f8577152ecbb371259113d019f9a6801..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/IMacros Enterprise Edition Unlimited V8.03.2216 Serial Key.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
IMacros Enterprise Edition Unlimited V8.03.2216 Serial Key
-
-Download. The Enterprise Player Edition includes free unlimited player licenses for use inside your ... Your existing license key will simply work ... 1fdad05405
-
-
-
diff --git a/spaces/inreVtussa/clothingai/Examples/Breakaway Live 09096 Serial Crack VERIFIED.md b/spaces/inreVtussa/clothingai/Examples/Breakaway Live 09096 Serial Crack VERIFIED.md
deleted file mode 100644
index ec12558bf51ea3f90c5798eece305f9d507687f0..0000000000000000000000000000000000000000
--- a/spaces/inreVtussa/clothingai/Examples/Breakaway Live 09096 Serial Crack VERIFIED.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-Signing up for 3- digit codes will result in a larger number of notifications ... Reptiles, Live ... LANYARDS, ID BADGE, BREAKAWAY TYPE, HOOK FASTENER ... 09096. Wrapping Machines, Bakery. 095. BARBER AND BEAUTY SHOP EQUIPMENT AND SUPPLIES ... CARDS, SERIAL AND PARALLEL, MICROCOMPUTER. 1fdad05405
-
-
-
diff --git a/spaces/jackli888/stable-diffusion-webui/javascript/textualInversion.js b/spaces/jackli888/stable-diffusion-webui/javascript/textualInversion.js
deleted file mode 100644
index 1103cf6fb1c0d9f0fd6f22dd3d66e8c9d1edbe6c..0000000000000000000000000000000000000000
--- a/spaces/jackli888/stable-diffusion-webui/javascript/textualInversion.js
+++ /dev/null
@@ -1,17 +0,0 @@
-
-
-
-function start_training_textual_inversion(){
- gradioApp().querySelector('#ti_error').innerHTML=''
-
- var id = randomId()
- requestProgress(id, gradioApp().getElementById('ti_output'), gradioApp().getElementById('ti_gallery'), function(){}, function(progress){
- gradioApp().getElementById('ti_progress').innerHTML = progress.textinfo
- })
-
- var res = args_to_array(arguments)
-
- res[0] = id
-
- return res
-}
diff --git a/spaces/jackli888/stable-diffusion-webui/scripts/postprocessing_gfpgan.py b/spaces/jackli888/stable-diffusion-webui/scripts/postprocessing_gfpgan.py
deleted file mode 100644
index 9f7c2baaa28333958818d332324b34bcb8bce3ca..0000000000000000000000000000000000000000
--- a/spaces/jackli888/stable-diffusion-webui/scripts/postprocessing_gfpgan.py
+++ /dev/null
@@ -1,33 +0,0 @@
-from PIL import Image
-import numpy as np
-
-from modules import scripts_postprocessing, gfpgan_model
-import gradio as gr
-
-from modules.ui_components import FormRow
-
-
-class ScriptPostprocessingGfpGan(scripts_postprocessing.ScriptPostprocessing):
- name = "GFPGAN"
- order = 2000
-
- def ui(self):
- with FormRow():
- gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN visibility", value=0, elem_id="extras_gfpgan_visibility")
-
- return {
- "gfpgan_visibility": gfpgan_visibility,
- }
-
- def process(self, pp: scripts_postprocessing.PostprocessedImage, gfpgan_visibility):
- if gfpgan_visibility == 0:
- return
-
- restored_img = gfpgan_model.gfpgan_fix_faces(np.array(pp.image, dtype=np.uint8))
- res = Image.fromarray(restored_img)
-
- if gfpgan_visibility < 1.0:
- res = Image.blend(pp.image, res, gfpgan_visibility)
-
- pp.image = res
- pp.info["GFPGAN visibility"] = round(gfpgan_visibility, 3)
diff --git a/spaces/jessica198601/jzlqy/README.md b/spaces/jessica198601/jzlqy/README.md
deleted file mode 100644
index a22ec0637867c54a3672818cff55152ae5bf4cb0..0000000000000000000000000000000000000000
--- a/spaces/jessica198601/jzlqy/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Jzlqy
-emoji: 🔥
-colorFrom: green
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.38.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/rdtypes/ANY/CSYNC.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/rdtypes/ANY/CSYNC.py
deleted file mode 100644
index 315da9ffc77ce493e413b9faa3329f602383f508..0000000000000000000000000000000000000000
--- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/rdtypes/ANY/CSYNC.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
-
-# Copyright (C) 2004-2007, 2009-2011, 2016 Nominum, Inc.
-#
-# Permission to use, copy, modify, and distribute this software and its
-# documentation for any purpose with or without fee is hereby granted,
-# provided that the above copyright notice and this permission notice
-# appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
-# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
-# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
-# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-import struct
-
-import dns.exception
-import dns.immutable
-import dns.name
-import dns.rdata
-import dns.rdatatype
-import dns.rdtypes.util
-
-
-@dns.immutable.immutable
-class Bitmap(dns.rdtypes.util.Bitmap):
- type_name = "CSYNC"
-
-
-@dns.immutable.immutable
-class CSYNC(dns.rdata.Rdata):
-
- """CSYNC record"""
-
- __slots__ = ["serial", "flags", "windows"]
-
- def __init__(self, rdclass, rdtype, serial, flags, windows):
- super().__init__(rdclass, rdtype)
- self.serial = self._as_uint32(serial)
- self.flags = self._as_uint16(flags)
- if not isinstance(windows, Bitmap):
- windows = Bitmap(windows)
- self.windows = tuple(windows.windows)
-
- def to_text(self, origin=None, relativize=True, **kw):
- text = Bitmap(self.windows).to_text()
- return "%d %d%s" % (self.serial, self.flags, text)
-
- @classmethod
- def from_text(
- cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
- ):
- serial = tok.get_uint32()
- flags = tok.get_uint16()
- bitmap = Bitmap.from_text(tok)
- return cls(rdclass, rdtype, serial, flags, bitmap)
-
- def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
- file.write(struct.pack("!IH", self.serial, self.flags))
- Bitmap(self.windows).to_wire(file)
-
- @classmethod
- def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
- (serial, flags) = parser.get_struct("!IH")
- bitmap = Bitmap.from_wire_parser(parser)
- return cls(rdclass, rdtype, serial, flags, bitmap)
diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fsspec/asyn.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fsspec/asyn.py
deleted file mode 100644
index 347e262ad054a6d5c06c85c894c873b58b340c00..0000000000000000000000000000000000000000
--- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fsspec/asyn.py
+++ /dev/null
@@ -1,1091 +0,0 @@
-import asyncio
-import asyncio.events
-import functools
-import inspect
-import io
-import numbers
-import os
-import re
-import threading
-from contextlib import contextmanager
-from glob import has_magic
-from typing import TYPE_CHECKING, Iterable
-
-from .callbacks import _DEFAULT_CALLBACK
-from .exceptions import FSTimeoutError
-from .implementations.local import LocalFileSystem, make_path_posix, trailing_sep
-from .spec import AbstractBufferedFile, AbstractFileSystem
-from .utils import is_exception, other_paths
-
-private = re.compile("_[^_]")
-iothread = [None] # dedicated fsspec IO thread
-loop = [None] # global event loop for any non-async instance
-_lock = None # global lock placeholder
-get_running_loop = asyncio.get_running_loop
-
-
-def get_lock():
- """Allocate or return a threading lock.
-
- The lock is allocated on first use to allow setting one lock per forked process.
- """
- global _lock
- if not _lock:
- _lock = threading.Lock()
- return _lock
-
-
-def reset_lock():
- """Reset the global lock.
-
- This should be called only on the init of a forked process to reset the lock to
- None, enabling the new forked process to get a new lock.
- """
- global _lock
-
- iothread[0] = None
- loop[0] = None
- _lock = None
-
-
-async def _runner(event, coro, result, timeout=None):
- timeout = timeout if timeout else None # convert 0 or 0.0 to None
- if timeout is not None:
- coro = asyncio.wait_for(coro, timeout=timeout)
- try:
- result[0] = await coro
- except Exception as ex:
- result[0] = ex
- finally:
- event.set()
-
-
-def sync(loop, func, *args, timeout=None, **kwargs):
- """
- Make loop run coroutine until it returns. Runs in other thread
-
- Examples
- --------
- >>> fsspec.asyn.sync(fsspec.asyn.get_loop(), func, *args,
- timeout=timeout, **kwargs)
- """
- timeout = timeout if timeout else None # convert 0 or 0.0 to None
- # NB: if the loop is not running *yet*, it is OK to submit work
- # and we will wait for it
- if loop is None or loop.is_closed():
- raise RuntimeError("Loop is not running")
- try:
- loop0 = asyncio.events.get_running_loop()
- if loop0 is loop:
- raise NotImplementedError("Calling sync() from within a running loop")
- except NotImplementedError:
- raise
- except RuntimeError:
- pass
- coro = func(*args, **kwargs)
- result = [None]
- event = threading.Event()
- asyncio.run_coroutine_threadsafe(_runner(event, coro, result, timeout), loop)
- while True:
- # this loops allows thread to get interrupted
- if event.wait(1):
- break
- if timeout is not None:
- timeout -= 1
- if timeout < 0:
- raise FSTimeoutError
-
- return_result = result[0]
- if isinstance(return_result, asyncio.TimeoutError):
- # suppress asyncio.TimeoutError, raise FSTimeoutError
- raise FSTimeoutError from return_result
- elif isinstance(return_result, BaseException):
- raise return_result
- else:
- return return_result
-
-
-def sync_wrapper(func, obj=None):
- """Given a function, make so can be called in async or blocking contexts
-
- Leave obj=None if defining within a class. Pass the instance if attaching
- as an attribute of the instance.
- """
-
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- self = obj or args[0]
- return sync(self.loop, func, *args, **kwargs)
-
- return wrapper
-
-
-@contextmanager
-def _selector_policy():
- original_policy = asyncio.get_event_loop_policy()
- try:
- if os.name == "nt" and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"):
- asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
-
- yield
- finally:
- asyncio.set_event_loop_policy(original_policy)
-
-
-def get_loop():
- """Create or return the default fsspec IO loop
-
- The loop will be running on a separate thread.
- """
- if loop[0] is None:
- with get_lock():
- # repeat the check just in case the loop got filled between the
- # previous two calls from another thread
- if loop[0] is None:
- with _selector_policy():
- loop[0] = asyncio.new_event_loop()
- th = threading.Thread(target=loop[0].run_forever, name="fsspecIO")
- th.daemon = True
- th.start()
- iothread[0] = th
- return loop[0]
-
-
-if TYPE_CHECKING:
- import resource
-
- ResourceError = resource.error
-else:
- try:
- import resource
- except ImportError:
- resource = None
- ResourceError = OSError
- else:
- ResourceError = getattr(resource, "error", OSError)
-
-_DEFAULT_BATCH_SIZE = 128
-_NOFILES_DEFAULT_BATCH_SIZE = 1280
-
-
-def _get_batch_size(nofiles=False):
- from fsspec.config import conf
-
- if nofiles:
- if "nofiles_gather_batch_size" in conf:
- return conf["nofiles_gather_batch_size"]
- else:
- if "gather_batch_size" in conf:
- return conf["gather_batch_size"]
- if nofiles:
- return _NOFILES_DEFAULT_BATCH_SIZE
- if resource is None:
- return _DEFAULT_BATCH_SIZE
-
- try:
- soft_limit, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
- except (ImportError, ValueError, ResourceError):
- return _DEFAULT_BATCH_SIZE
-
- if soft_limit == resource.RLIM_INFINITY:
- return -1
- else:
- return soft_limit // 8
-
-
-def running_async() -> bool:
- """Being executed by an event loop?"""
- try:
- asyncio.get_running_loop()
- return True
- except RuntimeError:
- return False
-
-
-async def _run_coros_in_chunks(
- coros,
- batch_size=None,
- callback=_DEFAULT_CALLBACK,
- timeout=None,
- return_exceptions=False,
- nofiles=False,
-):
- """Run the given coroutines in chunks.
-
- Parameters
- ----------
- coros: list of coroutines to run
- batch_size: int or None
- Number of coroutines to submit/wait on simultaneously.
- If -1, then it will not be any throttling. If
- None, it will be inferred from _get_batch_size()
- callback: fsspec.callbacks.Callback instance
- Gets a relative_update when each coroutine completes
- timeout: number or None
- If given, each coroutine times out after this time. Note that, since
- there are multiple batches, the total run time of this function will in
- general be longer
- return_exceptions: bool
- Same meaning as in asyncio.gather
- nofiles: bool
- If inferring the batch_size, does this operation involve local files?
- If yes, you normally expect smaller batches.
- """
-
- if batch_size is None:
- batch_size = _get_batch_size(nofiles=nofiles)
-
- if batch_size == -1:
- batch_size = len(coros)
-
- assert batch_size > 0
- results = []
- for start in range(0, len(coros), batch_size):
- chunk = [
- asyncio.Task(asyncio.wait_for(c, timeout=timeout))
- for c in coros[start : start + batch_size]
- ]
- if callback is not _DEFAULT_CALLBACK:
- [
- t.add_done_callback(lambda *_, **__: callback.relative_update(1))
- for t in chunk
- ]
- results.extend(
- await asyncio.gather(*chunk, return_exceptions=return_exceptions),
- )
- return results
-
-
-# these methods should be implemented as async by any async-able backend
-async_methods = [
- "_ls",
- "_cat_file",
- "_get_file",
- "_put_file",
- "_rm_file",
- "_cp_file",
- "_pipe_file",
- "_expand_path",
- "_info",
- "_isfile",
- "_isdir",
- "_exists",
- "_walk",
- "_glob",
- "_find",
- "_du",
- "_size",
- "_mkdir",
- "_makedirs",
-]
-
-
-class AsyncFileSystem(AbstractFileSystem):
- """Async file operations, default implementations
-
- Passes bulk operations to asyncio.gather for concurrent operation.
-
- Implementations that have concurrent batch operations and/or async methods
- should inherit from this class instead of AbstractFileSystem. Docstrings are
- copied from the un-underscored method in AbstractFileSystem, if not given.
- """
-
- # note that methods do not have docstring here; they will be copied
- # for _* methods and inferred for overridden methods.
-
- async_impl = True
- mirror_sync_methods = True
- disable_throttling = False
-
- def __init__(self, *args, asynchronous=False, loop=None, batch_size=None, **kwargs):
- self.asynchronous = asynchronous
- self._pid = os.getpid()
- if not asynchronous:
- self._loop = loop or get_loop()
- else:
- self._loop = None
- self.batch_size = batch_size
- super().__init__(*args, **kwargs)
-
- @property
- def loop(self):
- if self._pid != os.getpid():
- raise RuntimeError("This class is not fork-safe")
- return self._loop
-
- async def _rm_file(self, path, **kwargs):
- raise NotImplementedError
-
- async def _rm(self, path, recursive=False, batch_size=None, **kwargs):
- # TODO: implement on_error
- batch_size = batch_size or self.batch_size
- path = await self._expand_path(path, recursive=recursive)
- return await _run_coros_in_chunks(
- [self._rm_file(p, **kwargs) for p in reversed(path)],
- batch_size=batch_size,
- nofiles=True,
- )
-
- async def _cp_file(self, path1, path2, **kwargs):
- raise NotImplementedError
-
- async def _copy(
- self,
- path1,
- path2,
- recursive=False,
- on_error=None,
- maxdepth=None,
- batch_size=None,
- **kwargs,
- ):
- if on_error is None and recursive:
- on_error = "ignore"
- elif on_error is None:
- on_error = "raise"
-
- if isinstance(path1, list) and isinstance(path2, list):
- # No need to expand paths when both source and destination
- # are provided as lists
- paths1 = path1
- paths2 = path2
- else:
- source_is_str = isinstance(path1, str)
- paths1 = await self._expand_path(
- path1, maxdepth=maxdepth, recursive=recursive
- )
- if source_is_str and (not recursive or maxdepth is not None):
- # Non-recursive glob does not copy directories
- paths1 = [
- p for p in paths1 if not (trailing_sep(p) or await self._isdir(p))
- ]
- if not paths1:
- return
-
- source_is_file = len(paths1) == 1
- dest_is_dir = isinstance(path2, str) and (
- trailing_sep(path2) or await self._isdir(path2)
- )
-
- exists = source_is_str and (
- (has_magic(path1) and source_is_file)
- or (not has_magic(path1) and dest_is_dir and not trailing_sep(path1))
- )
- paths2 = other_paths(
- paths1,
- path2,
- exists=exists,
- flatten=not source_is_str,
- )
-
- batch_size = batch_size or self.batch_size
- coros = [self._cp_file(p1, p2, **kwargs) for p1, p2 in zip(paths1, paths2)]
- result = await _run_coros_in_chunks(
- coros, batch_size=batch_size, return_exceptions=True, nofiles=True
- )
-
- for ex in filter(is_exception, result):
- if on_error == "ignore" and isinstance(ex, FileNotFoundError):
- continue
- raise ex
-
- async def _pipe_file(self, path, value, **kwargs):
- raise NotImplementedError
-
- async def _pipe(self, path, value=None, batch_size=None, **kwargs):
- if isinstance(path, str):
- path = {path: value}
- batch_size = batch_size or self.batch_size
- return await _run_coros_in_chunks(
- [self._pipe_file(k, v, **kwargs) for k, v in path.items()],
- batch_size=batch_size,
- nofiles=True,
- )
-
- async def _process_limits(self, url, start, end):
- """Helper for "Range"-based _cat_file"""
- size = None
- suff = False
- if start is not None and start < 0:
- # if start is negative and end None, end is the "suffix length"
- if end is None:
- end = -start
- start = ""
- suff = True
- else:
- size = size or (await self._info(url))["size"]
- start = size + start
- elif start is None:
- start = 0
- if not suff:
- if end is not None and end < 0:
- if start is not None:
- size = size or (await self._info(url))["size"]
- end = size + end
- elif end is None:
- end = ""
- if isinstance(end, numbers.Integral):
- end -= 1 # bytes range is inclusive
- return "bytes=%s-%s" % (start, end)
-
- async def _cat_file(self, path, start=None, end=None, **kwargs):
- raise NotImplementedError
-
- async def _cat(
- self, path, recursive=False, on_error="raise", batch_size=None, **kwargs
- ):
- paths = await self._expand_path(path, recursive=recursive)
- coros = [self._cat_file(path, **kwargs) for path in paths]
- batch_size = batch_size or self.batch_size
- out = await _run_coros_in_chunks(
- coros, batch_size=batch_size, nofiles=True, return_exceptions=True
- )
- if on_error == "raise":
- ex = next(filter(is_exception, out), False)
- if ex:
- raise ex
- if (
- len(paths) > 1
- or isinstance(path, list)
- or paths[0] != self._strip_protocol(path)
- ):
- return {
- k: v
- for k, v in zip(paths, out)
- if on_error != "omit" or not is_exception(v)
- }
- else:
- return out[0]
-
- async def _cat_ranges(
- self,
- paths,
- starts,
- ends,
- max_gap=None,
- batch_size=None,
- on_error="return",
- **kwargs,
- ):
- # TODO: on_error
- if max_gap is not None:
- # use utils.merge_offset_ranges
- raise NotImplementedError
- if not isinstance(paths, list):
- raise TypeError
- if not isinstance(starts, Iterable):
- starts = [starts] * len(paths)
- if not isinstance(ends, Iterable):
- ends = [starts] * len(paths)
- if len(starts) != len(paths) or len(ends) != len(paths):
- raise ValueError
- coros = [
- self._cat_file(p, start=s, end=e, **kwargs)
- for p, s, e in zip(paths, starts, ends)
- ]
- batch_size = batch_size or self.batch_size
- return await _run_coros_in_chunks(
- coros, batch_size=batch_size, nofiles=True, return_exceptions=True
- )
-
- async def _put_file(self, lpath, rpath, **kwargs):
- raise NotImplementedError
-
- async def _put(
- self,
- lpath,
- rpath,
- recursive=False,
- callback=_DEFAULT_CALLBACK,
- batch_size=None,
- maxdepth=None,
- **kwargs,
- ):
- """Copy file(s) from local.
-
- Copies a specific file or tree of files (if recursive=True). If rpath
- ends with a "/", it will be assumed to be a directory, and target files
- will go within.
-
- The put_file method will be called concurrently on a batch of files. The
- batch_size option can configure the amount of futures that can be executed
- at the same time. If it is -1, then all the files will be uploaded concurrently.
- The default can be set for this instance by passing "batch_size" in the
- constructor, or for all instances by setting the "gather_batch_size" key
- in ``fsspec.config.conf``, falling back to 1/8th of the system limit .
- """
- if isinstance(lpath, list) and isinstance(rpath, list):
- # No need to expand paths when both source and destination
- # are provided as lists
- rpaths = rpath
- lpaths = lpath
- else:
- source_is_str = isinstance(lpath, str)
- if source_is_str:
- lpath = make_path_posix(lpath)
- fs = LocalFileSystem()
- lpaths = fs.expand_path(lpath, recursive=recursive, maxdepth=maxdepth)
- if source_is_str and (not recursive or maxdepth is not None):
- # Non-recursive glob does not copy directories
- lpaths = [p for p in lpaths if not (trailing_sep(p) or fs.isdir(p))]
- if not lpaths:
- return
-
- source_is_file = len(lpaths) == 1
- dest_is_dir = isinstance(rpath, str) and (
- trailing_sep(rpath) or await self._isdir(rpath)
- )
-
- rpath = self._strip_protocol(rpath)
- exists = source_is_str and (
- (has_magic(lpath) and source_is_file)
- or (not has_magic(lpath) and dest_is_dir and not trailing_sep(lpath))
- )
- rpaths = other_paths(
- lpaths,
- rpath,
- exists=exists,
- flatten=not source_is_str,
- )
-
- is_dir = {l: os.path.isdir(l) for l in lpaths}
- rdirs = [r for l, r in zip(lpaths, rpaths) if is_dir[l]]
- file_pairs = [(l, r) for l, r in zip(lpaths, rpaths) if not is_dir[l]]
-
- await asyncio.gather(*[self._makedirs(d, exist_ok=True) for d in rdirs])
- batch_size = batch_size or self.batch_size
-
- coros = []
- callback.set_size(len(file_pairs))
- for lfile, rfile in file_pairs:
- callback.branch(lfile, rfile, kwargs)
- coros.append(self._put_file(lfile, rfile, **kwargs))
-
- return await _run_coros_in_chunks(
- coros, batch_size=batch_size, callback=callback
- )
-
- async def _get_file(self, rpath, lpath, **kwargs):
- raise NotImplementedError
-
- async def _get(
- self,
- rpath,
- lpath,
- recursive=False,
- callback=_DEFAULT_CALLBACK,
- maxdepth=None,
- **kwargs,
- ):
- """Copy file(s) to local.
-
- Copies a specific file or tree of files (if recursive=True). If lpath
- ends with a "/", it will be assumed to be a directory, and target files
- will go within. Can submit a list of paths, which may be glob-patterns
- and will be expanded.
-
- The get_file method will be called concurrently on a batch of files. The
- batch_size option can configure the amount of futures that can be executed
- at the same time. If it is -1, then all the files will be uploaded concurrently.
- The default can be set for this instance by passing "batch_size" in the
- constructor, or for all instances by setting the "gather_batch_size" key
- in ``fsspec.config.conf``, falling back to 1/8th of the system limit .
- """
- if isinstance(lpath, list) and isinstance(rpath, list):
- # No need to expand paths when both source and destination
- # are provided as lists
- rpaths = rpath
- lpaths = lpath
- else:
- source_is_str = isinstance(rpath, str)
- # First check for rpath trailing slash as _strip_protocol removes it.
- source_not_trailing_sep = source_is_str and not trailing_sep(rpath)
- rpath = self._strip_protocol(rpath)
- rpaths = await self._expand_path(
- rpath, recursive=recursive, maxdepth=maxdepth
- )
- if source_is_str and (not recursive or maxdepth is not None):
- # Non-recursive glob does not copy directories
- rpaths = [
- p for p in rpaths if not (trailing_sep(p) or await self._isdir(p))
- ]
- if not rpaths:
- return
-
- lpath = make_path_posix(lpath)
- source_is_file = len(rpaths) == 1
- dest_is_dir = isinstance(lpath, str) and (
- trailing_sep(lpath) or LocalFileSystem().isdir(lpath)
- )
-
- exists = source_is_str and (
- (has_magic(rpath) and source_is_file)
- or (not has_magic(rpath) and dest_is_dir and source_not_trailing_sep)
- )
- lpaths = other_paths(
- rpaths,
- lpath,
- exists=exists,
- flatten=not source_is_str,
- )
-
- [os.makedirs(os.path.dirname(lp), exist_ok=True) for lp in lpaths]
- batch_size = kwargs.pop("batch_size", self.batch_size)
-
- coros = []
- callback.set_size(len(lpaths))
- for lpath, rpath in zip(lpaths, rpaths):
- callback.branch(rpath, lpath, kwargs)
- coros.append(self._get_file(rpath, lpath, **kwargs))
- return await _run_coros_in_chunks(
- coros, batch_size=batch_size, callback=callback
- )
-
- async def _isfile(self, path):
- try:
- return (await self._info(path))["type"] == "file"
- except: # noqa: E722
- return False
-
- async def _isdir(self, path):
- try:
- return (await self._info(path))["type"] == "directory"
- except OSError:
- return False
-
- async def _size(self, path):
- return (await self._info(path)).get("size", None)
-
- async def _sizes(self, paths, batch_size=None):
- batch_size = batch_size or self.batch_size
- return await _run_coros_in_chunks(
- [self._size(p) for p in paths], batch_size=batch_size
- )
-
- async def _exists(self, path):
- try:
- await self._info(path)
- return True
- except FileNotFoundError:
- return False
-
- async def _info(self, path, **kwargs):
- raise NotImplementedError
-
- async def _ls(self, path, detail=True, **kwargs):
- raise NotImplementedError
-
- async def _walk(self, path, maxdepth=None, on_error="omit", **kwargs):
- if maxdepth is not None and maxdepth < 1:
- raise ValueError("maxdepth must be at least 1")
-
- path = self._strip_protocol(path)
- full_dirs = {}
- dirs = {}
- files = {}
-
- detail = kwargs.pop("detail", False)
- try:
- listing = await self._ls(path, detail=True, **kwargs)
- except (FileNotFoundError, OSError) as e:
- if on_error == "raise":
- raise
- elif callable(on_error):
- on_error(e)
- if detail:
- yield path, {}, {}
- else:
- yield path, [], []
- return
-
- for info in listing:
- # each info name must be at least [path]/part , but here
- # we check also for names like [path]/part/
- pathname = info["name"].rstrip("/")
- name = pathname.rsplit("/", 1)[-1]
- if info["type"] == "directory" and pathname != path:
- # do not include "self" path
- full_dirs[name] = pathname
- dirs[name] = info
- elif pathname == path:
- # file-like with same name as give path
- files[""] = info
- else:
- files[name] = info
-
- if detail:
- yield path, dirs, files
- else:
- yield path, list(dirs), list(files)
-
- if maxdepth is not None:
- maxdepth -= 1
- if maxdepth < 1:
- return
-
- for d in dirs:
- async for _ in self._walk(
- full_dirs[d], maxdepth=maxdepth, detail=detail, **kwargs
- ):
- yield _
-
- async def _glob(self, path, maxdepth=None, **kwargs):
- if maxdepth is not None and maxdepth < 1:
- raise ValueError("maxdepth must be at least 1")
-
- import re
-
- ends = path.endswith("/")
- path = self._strip_protocol(path)
- idx_star = path.find("*") if path.find("*") >= 0 else len(path)
- idx_qmark = path.find("?") if path.find("?") >= 0 else len(path)
- idx_brace = path.find("[") if path.find("[") >= 0 else len(path)
-
- min_idx = min(idx_star, idx_qmark, idx_brace)
-
- detail = kwargs.pop("detail", False)
-
- if not has_magic(path):
- if await self._exists(path):
- if not detail:
- return [path]
- else:
- return {path: await self._info(path)}
- else:
- if not detail:
- return [] # glob of non-existent returns empty
- else:
- return {}
- elif "/" in path[:min_idx]:
- min_idx = path[:min_idx].rindex("/")
- root = path[: min_idx + 1]
- depth = path[min_idx + 1 :].count("/") + 1
- else:
- root = ""
- depth = path[min_idx + 1 :].count("/") + 1
-
- if "**" in path:
- if maxdepth is not None:
- idx_double_stars = path.find("**")
- depth_double_stars = path[idx_double_stars:].count("/") + 1
- depth = depth - depth_double_stars + maxdepth
- else:
- depth = None
-
- allpaths = await self._find(
- root, maxdepth=depth, withdirs=True, detail=True, **kwargs
- )
- # Escape characters special to python regex, leaving our supported
- # special characters in place.
- # See https://www.gnu.org/software/bash/manual/html_node/Pattern-Matching.html
- # for shell globbing details.
- pattern = (
- "^"
- + (
- path.replace("\\", r"\\")
- .replace(".", r"\.")
- .replace("+", r"\+")
- .replace("//", "/")
- .replace("(", r"\(")
- .replace(")", r"\)")
- .replace("|", r"\|")
- .replace("^", r"\^")
- .replace("$", r"\$")
- .replace("{", r"\{")
- .replace("}", r"\}")
- .rstrip("/")
- .replace("?", ".")
- )
- + "$"
- )
- pattern = re.sub("/[*]{2}", "=SLASH_DOUBLE_STARS=", pattern)
- pattern = re.sub("[*]{2}/?", "=DOUBLE_STARS=", pattern)
- pattern = re.sub("[*]", "[^/]*", pattern)
- pattern = re.sub("=SLASH_DOUBLE_STARS=", "(|/.*)", pattern)
- pattern = re.sub("=DOUBLE_STARS=", ".*", pattern)
- pattern = re.compile(pattern)
- out = {
- p: allpaths[p]
- for p in sorted(allpaths)
- if pattern.match(p.replace("//", "/").rstrip("/"))
- }
-
- # Return directories only when the glob end by a slash
- # This is needed for posix glob compliance
- if ends:
- out = {k: v for k, v in out.items() if v["type"] == "directory"}
-
- if detail:
- return out
- else:
- return list(out)
-
- async def _du(self, path, total=True, maxdepth=None, **kwargs):
- sizes = {}
- # async for?
- for f in await self._find(path, maxdepth=maxdepth, **kwargs):
- info = await self._info(f)
- sizes[info["name"]] = info["size"]
- if total:
- return sum(sizes.values())
- else:
- return sizes
-
- async def _find(self, path, maxdepth=None, withdirs=False, **kwargs):
- path = self._strip_protocol(path)
- out = {}
- detail = kwargs.pop("detail", False)
-
- # Add the root directory if withdirs is requested
- # This is needed for posix glob compliance
- if withdirs and path != "" and await self._isdir(path):
- out[path] = await self._info(path)
-
- # async for?
- async for _, dirs, files in self._walk(path, maxdepth, detail=True, **kwargs):
- if withdirs:
- files.update(dirs)
- out.update({info["name"]: info for name, info in files.items()})
- if not out and (await self._isfile(path)):
- # walk works on directories, but find should also return [path]
- # when path happens to be a file
- out[path] = {}
- names = sorted(out)
- if not detail:
- return names
- else:
- return {name: out[name] for name in names}
-
- async def _expand_path(self, path, recursive=False, maxdepth=None):
- if maxdepth is not None and maxdepth < 1:
- raise ValueError("maxdepth must be at least 1")
-
- if isinstance(path, str):
- out = await self._expand_path([path], recursive, maxdepth)
- else:
- out = set()
- path = [self._strip_protocol(p) for p in path]
- for p in path: # can gather here
- if has_magic(p):
- bit = set(await self._glob(p, maxdepth=maxdepth))
- out |= bit
- if recursive:
- # glob call above expanded one depth so if maxdepth is defined
- # then decrement it in expand_path call below. If it is zero
- # after decrementing then avoid expand_path call.
- if maxdepth is not None and maxdepth <= 1:
- continue
- out |= set(
- await self._expand_path(
- list(bit),
- recursive=recursive,
- maxdepth=maxdepth - 1 if maxdepth is not None else None,
- )
- )
- continue
- elif recursive:
- rec = set(await self._find(p, maxdepth=maxdepth, withdirs=True))
- out |= rec
- if p not in out and (recursive is False or (await self._exists(p))):
- # should only check once, for the root
- out.add(p)
- if not out:
- raise FileNotFoundError(path)
- return sorted(out)
-
- async def _mkdir(self, path, create_parents=True, **kwargs):
- pass # not necessary to implement, may not have directories
-
- async def _makedirs(self, path, exist_ok=False):
- pass # not necessary to implement, may not have directories
-
- async def open_async(self, path, mode="rb", **kwargs):
- if "b" not in mode or kwargs.get("compression"):
- raise ValueError
- raise NotImplementedError
-
-
-def mirror_sync_methods(obj):
- """Populate sync and async methods for obj
-
- For each method will create a sync version if the name refers to an async method
- (coroutine) and there is no override in the child class; will create an async
- method for the corresponding sync method if there is no implementation.
-
- Uses the methods specified in
- - async_methods: the set that an implementation is expected to provide
- - default_async_methods: that can be derived from their sync version in
- AbstractFileSystem
- - AsyncFileSystem: async-specific default coroutines
- """
- from fsspec import AbstractFileSystem
-
- for method in async_methods + dir(AsyncFileSystem):
- if not method.startswith("_"):
- continue
- smethod = method[1:]
- if private.match(method):
- isco = inspect.iscoroutinefunction(getattr(obj, method, None))
- unsync = getattr(getattr(obj, smethod, False), "__func__", None)
- is_default = unsync is getattr(AbstractFileSystem, smethod, "")
- if isco and is_default:
- mth = sync_wrapper(getattr(obj, method), obj=obj)
- setattr(obj, smethod, mth)
- if not mth.__doc__:
- mth.__doc__ = getattr(
- getattr(AbstractFileSystem, smethod, None), "__doc__", ""
- )
-
-
-class FSSpecCoroutineCancel(Exception):
- pass
-
-
-def _dump_running_tasks(
- printout=True, cancel=True, exc=FSSpecCoroutineCancel, with_task=False
-):
- import traceback
-
- tasks = [t for t in asyncio.tasks.all_tasks(loop[0]) if not t.done()]
- if printout:
- [task.print_stack() for task in tasks]
- out = [
- {
- "locals": task._coro.cr_frame.f_locals,
- "file": task._coro.cr_frame.f_code.co_filename,
- "firstline": task._coro.cr_frame.f_code.co_firstlineno,
- "linelo": task._coro.cr_frame.f_lineno,
- "stack": traceback.format_stack(task._coro.cr_frame),
- "task": task if with_task else None,
- }
- for task in tasks
- ]
- if cancel:
- for t in tasks:
- cbs = t._callbacks
- t.cancel()
- asyncio.futures.Future.set_exception(t, exc)
- asyncio.futures.Future.cancel(t)
- [cb[0](t) for cb in cbs] # cancels any dependent concurrent.futures
- try:
- t._coro.throw(exc) # exits coro, unless explicitly handled
- except exc:
- pass
- return out
-
-
-class AbstractAsyncStreamedFile(AbstractBufferedFile):
- # no read buffering, and always auto-commit
- # TODO: readahead might still be useful here, but needs async version
-
- async def read(self, length=-1):
- """
- Return data from cache, or fetch pieces as necessary
-
- Parameters
- ----------
- length: int (-1)
- Number of bytes to read; if <0, all remaining bytes.
- """
- length = -1 if length is None else int(length)
- if self.mode != "rb":
- raise ValueError("File not in read mode")
- if length < 0:
- length = self.size - self.loc
- if self.closed:
- raise ValueError("I/O operation on closed file.")
- if length == 0:
- # don't even bother calling fetch
- return b""
- out = await self._fetch_range(self.loc, self.loc + length)
- self.loc += len(out)
- return out
-
- async def write(self, data):
- """
- Write data to buffer.
-
- Buffer only sent on flush() or if buffer is greater than
- or equal to blocksize.
-
- Parameters
- ----------
- data: bytes
- Set of bytes to be written.
- """
- if self.mode not in {"wb", "ab"}:
- raise ValueError("File not in write mode")
- if self.closed:
- raise ValueError("I/O operation on closed file.")
- if self.forced:
- raise ValueError("This file has been force-flushed, can only close")
- out = self.buffer.write(data)
- self.loc += out
- if self.buffer.tell() >= self.blocksize:
- await self.flush()
- return out
-
- async def close(self):
- """Close file
-
- Finalizes writes, discards cache
- """
- if getattr(self, "_unclosable", False):
- return
- if self.closed:
- return
- if self.mode == "rb":
- self.cache = None
- else:
- if not self.forced:
- await self.flush(force=True)
-
- if self.fs is not None:
- self.fs.invalidate_cache(self.path)
- self.fs.invalidate_cache(self.fs._parent(self.path))
-
- self.closed = True
-
- async def flush(self, force=False):
- if self.closed:
- raise ValueError("Flush on closed file")
- if force and self.forced:
- raise ValueError("Force flush cannot be called more than once")
- if force:
- self.forced = True
-
- if self.mode not in {"wb", "ab"}:
- # no-op to flush on read-mode
- return
-
- if not force and self.buffer.tell() < self.blocksize:
- # Defer write on small block
- return
-
- if self.offset is None:
- # Initialize a multipart upload
- self.offset = 0
- try:
- await self._initiate_upload()
- except: # noqa: E722
- self.closed = True
- raise
-
- if await self._upload_chunk(final=force) is not False:
- self.offset += self.buffer.seek(0, 2)
- self.buffer = io.BytesIO()
-
- async def __aenter__(self):
- return self
-
- async def __aexit__(self, exc_type, exc_val, exc_tb):
- await self.close()
-
- async def _fetch_range(self, start, end):
- raise NotImplementedError
-
- async def _initiate_upload(self):
- pass
-
- async def _upload_chunk(self, final=False):
- raise NotImplementedError
diff --git a/spaces/johnberg/CLIPInverter/models/stylegan2/model_remapper.py b/spaces/johnberg/CLIPInverter/models/stylegan2/model_remapper.py
deleted file mode 100644
index 42a0c0e80cbbed97ef962a7bafefa4c193aea6e8..0000000000000000000000000000000000000000
--- a/spaces/johnberg/CLIPInverter/models/stylegan2/model_remapper.py
+++ /dev/null
@@ -1,694 +0,0 @@
-import math
-import random
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-from models.stylegan2.op import FusedLeakyReLU, fused_leaky_relu, upfirdn2d
-
-
-class PixelNorm(nn.Module):
- def __init__(self):
- super().__init__()
-
- def forward(self, input):
- return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8)
-
-
-def make_kernel(k):
- k = torch.tensor(k, dtype=torch.float32)
-
- if k.ndim == 1:
- k = k[None, :] * k[:, None]
-
- k /= k.sum()
-
- return k
-
-
-class Upsample(nn.Module):
- def __init__(self, kernel, factor=2):
- super().__init__()
-
- self.factor = factor
- kernel = make_kernel(kernel) * (factor ** 2)
- self.register_buffer('kernel', kernel)
-
- p = kernel.shape[0] - factor
-
- pad0 = (p + 1) // 2 + factor - 1
- pad1 = p // 2
-
- self.pad = (pad0, pad1)
-
- def forward(self, input):
- out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad)
-
- return out
-
-
-class Downsample(nn.Module):
- def __init__(self, kernel, factor=2):
- super().__init__()
-
- self.factor = factor
- kernel = make_kernel(kernel)
- self.register_buffer('kernel', kernel)
-
- p = kernel.shape[0] - factor
-
- pad0 = (p + 1) // 2
- pad1 = p // 2
-
- self.pad = (pad0, pad1)
-
- def forward(self, input):
- out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad)
-
- return out
-
-
-class Blur(nn.Module):
- def __init__(self, kernel, pad, upsample_factor=1):
- super().__init__()
-
- kernel = make_kernel(kernel)
-
- if upsample_factor > 1:
- kernel = kernel * (upsample_factor ** 2)
-
- self.register_buffer('kernel', kernel)
-
- self.pad = pad
-
- def forward(self, input):
- out = upfirdn2d(input, self.kernel, pad=self.pad)
-
- return out
-
-
-class EqualConv2d(nn.Module):
- def __init__(
- self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True
- ):
- super().__init__()
-
- self.weight = nn.Parameter(
- torch.randn(out_channel, in_channel, kernel_size, kernel_size)
- )
- self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2)
-
- self.stride = stride
- self.padding = padding
-
- if bias:
- self.bias = nn.Parameter(torch.zeros(out_channel))
-
- else:
- self.bias = None
-
- def forward(self, input):
- out = F.conv2d(
- input,
- self.weight * self.scale,
- bias=self.bias,
- stride=self.stride,
- padding=self.padding,
- )
-
- return out
-
- def __repr__(self):
- return (
- f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]},'
- f' {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})'
- )
-
-
-class EqualLinear(nn.Module):
- def __init__(
- self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None
- ):
- super().__init__()
-
- self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
-
- if bias:
- self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
-
- else:
- self.bias = None
-
- self.activation = activation
-
- self.scale = (1 / math.sqrt(in_dim)) * lr_mul
- self.lr_mul = lr_mul
-
- def forward(self, input):
- if self.activation:
- out = F.linear(input, self.weight * self.scale)
- out = fused_leaky_relu(out, self.bias * self.lr_mul)
-
- else:
- out = F.linear(
- input, self.weight * self.scale, bias=self.bias * self.lr_mul
- )
-
- return out
-
- def __repr__(self):
- return (
- f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
- )
-
-
-class ScaledLeakyReLU(nn.Module):
- def __init__(self, negative_slope=0.2):
- super().__init__()
-
- self.negative_slope = negative_slope
-
- def forward(self, input):
- out = F.leaky_relu(input, negative_slope=self.negative_slope)
-
- return out * math.sqrt(2)
-
-
-class ModulatedConv2d(nn.Module):
- def __init__(
- self,
- in_channel,
- out_channel,
- kernel_size,
- style_dim,
- demodulate=True,
- upsample=False,
- downsample=False,
- blur_kernel=[1, 3, 3, 1],
- ):
- super().__init__()
-
- self.eps = 1e-8
- self.kernel_size = kernel_size
- self.in_channel = in_channel
- self.out_channel = out_channel
- self.upsample = upsample
- self.downsample = downsample
-
- if upsample:
- factor = 2
- p = (len(blur_kernel) - factor) - (kernel_size - 1)
- pad0 = (p + 1) // 2 + factor - 1
- pad1 = p // 2 + 1
-
- self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor)
-
- if downsample:
- factor = 2
- p = (len(blur_kernel) - factor) + (kernel_size - 1)
- pad0 = (p + 1) // 2
- pad1 = p // 2
-
- self.blur = Blur(blur_kernel, pad=(pad0, pad1))
-
- fan_in = in_channel * kernel_size ** 2
- self.scale = 1 / math.sqrt(fan_in)
- self.padding = kernel_size // 2
-
- self.weight = nn.Parameter(
- torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)
- )
-
- self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
-
- self.demodulate = demodulate
-
- def __repr__(self):
- return (
- f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, '
- f'upsample={self.upsample}, downsample={self.downsample})'
- )
-
- def forward(self, input, style):
- batch, in_channel, height, width = input.shape
-
- style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
- weight = self.scale * self.weight * style
-
- if self.demodulate:
- demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
- weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
-
- weight = weight.view(
- batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size
- )
-
- if self.upsample:
- input = input.view(1, batch * in_channel, height, width)
- weight = weight.view(
- batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size
- )
- weight = weight.transpose(1, 2).reshape(
- batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size
- )
- out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch)
- _, _, height, width = out.shape
- out = out.view(batch, self.out_channel, height, width)
- out = self.blur(out)
-
- elif self.downsample:
- input = self.blur(input)
- _, _, height, width = input.shape
- input = input.view(1, batch * in_channel, height, width)
- out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
- _, _, height, width = out.shape
- out = out.view(batch, self.out_channel, height, width)
-
- else:
- input = input.view(1, batch * in_channel, height, width)
- out = F.conv2d(input, weight, padding=self.padding, groups=batch)
- _, _, height, width = out.shape
- out = out.view(batch, self.out_channel, height, width)
-
- return out
-
-
-class NoiseInjection(nn.Module):
- def __init__(self):
- super().__init__()
-
- self.weight = nn.Parameter(torch.zeros(1))
-
- def forward(self, image, noise=None):
- if noise is None:
- batch, _, height, width = image.shape
- noise = image.new_empty(batch, 1, height, width).normal_()
-
- return image + self.weight * noise
-
-
-class ConstantInput(nn.Module):
- def __init__(self, channel, size=4):
- super().__init__()
-
- self.input = nn.Parameter(torch.randn(1, channel, size, size))
-
- def forward(self, input):
- batch = input.shape[0]
- out = self.input.repeat(batch, 1, 1, 1)
-
- return out
-
-
-class StyledConv(nn.Module):
- def __init__(
- self,
- in_channel,
- out_channel,
- kernel_size,
- style_dim,
- upsample=False,
- blur_kernel=[1, 3, 3, 1],
- demodulate=True,
- ):
- super().__init__()
-
- self.conv = ModulatedConv2d(
- in_channel,
- out_channel,
- kernel_size,
- style_dim,
- upsample=upsample,
- blur_kernel=blur_kernel,
- demodulate=demodulate,
- )
-
- self.noise = NoiseInjection()
- # self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
- # self.activate = ScaledLeakyReLU(0.2)
- self.activate = FusedLeakyReLU(out_channel)
-
- def forward(self, input, style, noise=None):
- out = self.conv(input, style)
- out = self.noise(out, noise=noise)
- # out = out + self.bias
- out = self.activate(out)
-
- return out
-
-
-class ToRGB(nn.Module):
- def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]):
- super().__init__()
-
- if upsample:
- self.upsample = Upsample(blur_kernel)
-
- self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False)
- self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
-
- def forward(self, input, style, skip=None):
- out = self.conv(input, style)
- out = out + self.bias
-
- if skip is not None:
- skip = self.upsample(skip)
-
- out = out + skip
-
- return out
-
-# Generator with the remapper layers
-class Generator(nn.Module):
- def __init__(
- self,
- size,
- style_dim,
- n_mlp,
- channel_multiplier=2,
- blur_kernel=[1, 3, 3, 1],
- lr_mlp=0.01,
- ):
- super().__init__()
-
- self.size = size
-
- self.style_dim = style_dim
-
- layers = [PixelNorm()]
-
- for i in range(n_mlp):
- layers.append(
- EqualLinear(
- style_dim, style_dim, lr_mul=lr_mlp, activation='fused_lrelu'
- )
- )
-
- self.style = nn.Sequential(*layers)
-
- self.channels = {
- 4: 512,
- 8: 512,
- 16: 512,
- 32: 512,
- 64: 256 * channel_multiplier,
- 128: 128 * channel_multiplier,
- 256: 64 * channel_multiplier,
- 512: 32 * channel_multiplier,
- 1024: 16 * channel_multiplier,
- }
-
- self.input = ConstantInput(self.channels[4])
- self.conv1 = StyledConv(
- self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel
- )
- self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False)
-
- self.log_size = int(math.log(size, 2))
- self.num_layers = (self.log_size - 2) * 2 + 1
-
- self.convs = nn.ModuleList()
- self.upsamples = nn.ModuleList()
- self.to_rgbs = nn.ModuleList()
- self.noises = nn.Module()
-
- in_channel = self.channels[4]
-
- for layer_idx in range(self.num_layers):
- res = (layer_idx + 5) // 2
- shape = [1, 1, 2 ** res, 2 ** res]
- self.noises.register_buffer(f'noise_{layer_idx}', torch.randn(*shape))
-
- for i in range(3, self.log_size + 1):
- out_channel = self.channels[2 ** i]
-
- self.convs.append(
- StyledConv(
- in_channel,
- out_channel,
- 3,
- style_dim,
- upsample=True,
- blur_kernel=blur_kernel,
- )
- )
-
- self.convs.append(
- StyledConv(
- out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel
- )
- )
-
- self.to_rgbs.append(ToRGB(out_channel, style_dim))
-
- in_channel = out_channel
-
- self.n_latent = self.log_size * 2 - 2
-
- # CLIPREMAPPER
- cond_size = style_dim
- self.affine_mlps = nn.ModuleList()
- self.lambdas = nn.ParameterList()
- for i in range(1,9):
- self.lambdas.append(nn.Parameter(torch.tensor(0.05), requires_grad=True))
- self.lambdas.append(nn.Parameter(torch.tensor(0.05), requires_grad=True))
- self.affine_mlps.append(nn.Sequential(nn.Linear(cond_size, cond_size), nn.ReLU(), nn.Linear(cond_size, style_dim), nn.LayerNorm(style_dim), nn.ReLU(), nn.Linear(style_dim, style_dim)))
- self.affine_mlps.append(nn.Sequential(nn.Linear(cond_size, cond_size), nn.ReLU(), nn.Linear(cond_size, style_dim), nn.LayerNorm(style_dim), nn.ReLU(), nn.Linear(style_dim, style_dim)))
-
- def make_noise(self):
- device = self.input.input.device
-
- noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2, device=device)]
-
- for i in range(3, self.log_size + 1):
- for _ in range(2):
- noises.append(torch.randn(1, 1, 2 ** i, 2 ** i, device=device))
-
- return noises
-
- def mean_latent(self, n_latent):
- latent_in = torch.randn(
- n_latent, self.style_dim, device=self.input.input.device
- )
- latent = self.style(latent_in).mean(0, keepdim=True)
-
- return latent
-
- def get_latent(self, input):
- return self.style(input)
-
- def forward(
- self,
- styles,
- return_latents=False,
- return_features=False,
- inject_index=None,
- truncation=1,
- truncation_latent=None,
- input_is_latent=False,
- noise=None,
- randomize_noise=True,
- txt_embed=None
- ):
- if not input_is_latent:
- styles = [self.style(s) for s in styles]
-
- if noise is None:
- if randomize_noise:
- noise = [None] * self.num_layers
- else:
- noise = [
- getattr(self.noises, f'noise_{i}') for i in range(self.num_layers)
- ]
-
- if truncation < 1:
- style_t = []
-
- for style in styles:
- style_t.append(
- truncation_latent + truncation * (style - truncation_latent)
- )
-
- styles = style_t
-
- if len(styles) < 2:
- inject_index = self.n_latent
-
- if styles[0].ndim < 3:
- latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
- else:
- latent = styles[0]
-
- else:
- if inject_index is None:
- inject_index = random.randint(1, self.n_latent - 1)
-
- latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
- latent2 = styles[1].unsqueeze(1).repeat(1, self.n_latent - inject_index, 1)
-
- latent = torch.cat([latent, latent2], 1)
-
- out = self.input(latent)
- out = self.conv1(out, latent[:, 0], noise=noise[0])
-
- skip = self.to_rgb1(out, latent[:, 1])
-
- i = 1
- for affine1, affine2, conv1, conv2, noise1, noise2, to_rgb in zip(
- self.affine_mlps[::2], self.affine_mlps[1::2], self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs
- ):
- if txt_embed is None:
- out = conv1(out, latent[:, i], noise=noise1)
- out = conv2(out, latent[:, i + 1], noise=noise2)
- else:
- latent_pr_1 = affine1(txt_embed)
- lambda1 = self.lambdas[i-1] * 0.9
- inject_latent1 = latent[:, i] * (1-lambda1) + latent_pr_1 * (lambda1)
- out = conv1(out, inject_latent1, noise=noise1)
- latent_pr_2 = affine2(txt_embed)
- lambda2 = self.lambdas[i] * 0.9
- inject_latent2 = latent[:, i + 1] * (1-lambda2) + latent_pr_2 * (lambda2)
- out = conv2(out, inject_latent2, noise=noise2)
- skip = to_rgb(out, latent[:, i + 2], skip)
-
- i += 2
-
- image = skip
-
- if return_latents:
- return image, latent
- elif return_features:
- return image, out
- else:
- return image, None
-
-
-class ConvLayer(nn.Sequential):
- def __init__(
- self,
- in_channel,
- out_channel,
- kernel_size,
- downsample=False,
- blur_kernel=[1, 3, 3, 1],
- bias=True,
- activate=True,
- ):
- layers = []
-
- if downsample:
- factor = 2
- p = (len(blur_kernel) - factor) + (kernel_size - 1)
- pad0 = (p + 1) // 2
- pad1 = p // 2
-
- layers.append(Blur(blur_kernel, pad=(pad0, pad1)))
-
- stride = 2
- self.padding = 0
-
- else:
- stride = 1
- self.padding = kernel_size // 2
-
- layers.append(
- EqualConv2d(
- in_channel,
- out_channel,
- kernel_size,
- padding=self.padding,
- stride=stride,
- bias=bias and not activate,
- )
- )
-
- if activate:
- if bias:
- layers.append(FusedLeakyReLU(out_channel))
-
- else:
- layers.append(ScaledLeakyReLU(0.2))
-
- super().__init__(*layers)
-
-
-class ResBlock(nn.Module):
- def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1]):
- super().__init__()
-
- self.conv1 = ConvLayer(in_channel, in_channel, 3)
- self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True)
-
- self.skip = ConvLayer(
- in_channel, out_channel, 1, downsample=True, activate=False, bias=False
- )
-
- def forward(self, input):
- out = self.conv1(input)
- out = self.conv2(out)
-
- skip = self.skip(input)
- out = (out + skip) / math.sqrt(2)
-
- return out
-
-
-class Discriminator(nn.Module):
- def __init__(self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1]):
- super().__init__()
-
- channels = {
- 4: 512,
- 8: 512,
- 16: 512,
- 32: 512,
- 64: 256 * channel_multiplier,
- 128: 128 * channel_multiplier,
- 256: 64 * channel_multiplier,
- 512: 32 * channel_multiplier,
- 1024: 16 * channel_multiplier,
- }
-
- convs = [ConvLayer(3, channels[size], 1)]
-
- log_size = int(math.log(size, 2))
-
- in_channel = channels[size]
-
- for i in range(log_size, 2, -1):
- out_channel = channels[2 ** (i - 1)]
-
- convs.append(ResBlock(in_channel, out_channel, blur_kernel))
-
- in_channel = out_channel
-
- self.convs = nn.Sequential(*convs)
-
- self.stddev_group = 4
- self.stddev_feat = 1
-
- self.final_conv = ConvLayer(in_channel + 1, channels[4], 3)
- self.final_linear = nn.Sequential(
- EqualLinear(channels[4] * 4 * 4, channels[4], activation='fused_lrelu'),
- EqualLinear(channels[4], 1),
- )
-
- def forward(self, input):
- out = self.convs(input)
-
- batch, channel, height, width = out.shape
- group = min(batch, self.stddev_group)
- stddev = out.view(
- group, -1, self.stddev_feat, channel // self.stddev_feat, height, width
- )
- stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)
- stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2)
- stddev = stddev.repeat(group, 1, height, width)
- out = torch.cat([out, stddev], 1)
-
- out = self.final_conv(out)
-
- out = out.view(batch, -1)
- out = self.final_linear(out)
-
- return out
diff --git a/spaces/jonas/sdg-policy-tracing/app.py b/spaces/jonas/sdg-policy-tracing/app.py
deleted file mode 100644
index 67055efe1fccaf575f4d646265391d17469356da..0000000000000000000000000000000000000000
--- a/spaces/jonas/sdg-policy-tracing/app.py
+++ /dev/null
@@ -1,15 +0,0 @@
-import analyse_site
-import main_site
-import check_site
-from multiapp import MultiApp
-import streamlit as st
-
-st.set_page_config(f'SDSN x GIZ Policy Tracing', layout="wide")
-
-app = MultiApp()
-
-app.add_app("SDSN X GIZ Policy Tracing", main_site.app)
-app.add_app("Analyse Policy Document", analyse_site.app)
-app.add_app("Check Coherence", check_site.app)
-
-app.run()
\ No newline at end of file
diff --git a/spaces/jone/Music_Source_Separation/bytesep/optimizers/lr_schedulers.py b/spaces/jone/Music_Source_Separation/bytesep/optimizers/lr_schedulers.py
deleted file mode 100644
index 018dafa9275bcfe03bea92c64aee25a5db996b8f..0000000000000000000000000000000000000000
--- a/spaces/jone/Music_Source_Separation/bytesep/optimizers/lr_schedulers.py
+++ /dev/null
@@ -1,20 +0,0 @@
-def get_lr_lambda(step, warm_up_steps: int, reduce_lr_steps: int):
- r"""Get lr_lambda for LambdaLR. E.g.,
-
- .. code-block: python
- lr_lambda = lambda step: get_lr_lambda(step, warm_up_steps=1000, reduce_lr_steps=10000)
-
- from torch.optim.lr_scheduler import LambdaLR
- LambdaLR(optimizer, lr_lambda)
-
- Args:
- warm_up_steps: int, steps for warm up
- reduce_lr_steps: int, reduce learning rate by 0.9 every #reduce_lr_steps steps
-
- Returns:
- learning rate: float
- """
- if step <= warm_up_steps:
- return step / warm_up_steps
- else:
- return 0.9 ** (step // reduce_lr_steps)
diff --git a/spaces/julien-c/nllb-translation-in-browser/style.css b/spaces/julien-c/nllb-translation-in-browser/style.css
deleted file mode 100644
index da653b5c49f795eaf78a33b5857ea064b3bcfc6e..0000000000000000000000000000000000000000
--- a/spaces/julien-c/nllb-translation-in-browser/style.css
+++ /dev/null
@@ -1,29 +0,0 @@
-body {
- padding: 2rem;
- font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
-}
-
-h1 {
- font-size: 16px;
- margin-top: 0;
-}
-
-p {
- color: rgb(107, 114, 128);
- font-size: 15px;
- margin-bottom: 10px;
- margin-top: 5px;
-}
-
-.card {
- max-width: 820px;
- margin: 0 auto;
- padding: 16px;
- border: 1px solid lightgray;
- border-radius: 16px;
-}
-
-textarea {
- width: 100%;
- min-height: 100px;
-}
\ No newline at end of file
diff --git a/spaces/kangvcar/RealChar/realtime_ai_character/main.py b/spaces/kangvcar/RealChar/realtime_ai_character/main.py
deleted file mode 100644
index 0268607508e995d70049178b28fe0f9651053af7..0000000000000000000000000000000000000000
--- a/spaces/kangvcar/RealChar/realtime_ai_character/main.py
+++ /dev/null
@@ -1,42 +0,0 @@
-import os
-import warnings
-
-from dotenv import load_dotenv
-from fastapi import FastAPI
-from fastapi.middleware.cors import CORSMiddleware
-from fastapi.staticfiles import StaticFiles
-
-from realtime_ai_character.audio.speech_to_text import get_speech_to_text
-from realtime_ai_character.audio.text_to_speech import get_text_to_speech
-from realtime_ai_character.character_catalog.catalog_manager import CatalogManager
-from realtime_ai_character.restful_routes import router as restful_router
-from realtime_ai_character.utils import ConnectionManager
-from realtime_ai_character.websocket_routes import router as websocket_router
-
-load_dotenv()
-
-app = FastAPI()
-
-app.add_middleware(
- CORSMiddleware,
- # Change to domains if you deploy this to production
- allow_origins=['*'],
- allow_credentials=True,
- allow_methods=["*"],
- allow_headers=["*"],
-)
-
-app.include_router(restful_router)
-app.include_router(websocket_router)
-app.mount("/static", StaticFiles(directory=os.path.join(
- os.path.dirname(os.path.abspath(__file__)), 'static')), name="static")
-
-
-# initializations
-CatalogManager.initialize(overwrite=True)
-ConnectionManager.initialize()
-get_text_to_speech()
-get_speech_to_text()
-
-# suppress deprecation warnings
-warnings.filterwarnings("ignore", module="whisper")
diff --git a/spaces/katasou/Music-discord-bot/README.md b/spaces/katasou/Music-discord-bot/README.md
deleted file mode 100644
index 597d1fe272189a44f7595afe1336ee10dc4add16..0000000000000000000000000000000000000000
--- a/spaces/katasou/Music-discord-bot/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Music Discord Bot
-emoji: 🏢
-colorFrom: gray
-colorTo: blue
-sdk: gradio
-sdk_version: 3.36.1
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/kaveh/wsi-generator/app.py b/spaces/kaveh/wsi-generator/app.py
deleted file mode 100644
index 9e5262d55bc6487a8ad1d88baf17d914c7bf4d41..0000000000000000000000000000000000000000
--- a/spaces/kaveh/wsi-generator/app.py
+++ /dev/null
@@ -1,25 +0,0 @@
-from diffusers import DiffusionPipeline
-import gradio as gr
-import sys
-
-generator = DiffusionPipeline.from_pretrained("kaveh/wsi_generator")
-
-def generate(n_samples=1):
- images = []
- for i in range(n_samples):
- image = generator().images[0]
- images.append(image)
- return images
-
-with gr.Blocks() as demo:
- with gr.Column(variant="panel"):
- with gr.Row(variant="compact"):
- n_s = gr.Slider(1, 4, label='Number of Samples', value=1, step=1.0, show_label=True).style(container=False)
- btn = gr.Button("Generate image").style(full_width=False)
-
- gallery = gr.Gallery(
- label="Generated images", show_label=False, elem_id="gallery").style(columns=[2], rows=[2], object_fit="contain", height="auto", preview=True)
-
- btn.click(generate, n_s, gallery)
-
-demo.launch()
\ No newline at end of file
diff --git a/spaces/kazuk/youtube-whisper-02/app.py b/spaces/kazuk/youtube-whisper-02/app.py
deleted file mode 100644
index 4a61dc561a016c53ad93a3c556b0ef7bafa964eb..0000000000000000000000000000000000000000
--- a/spaces/kazuk/youtube-whisper-02/app.py
+++ /dev/null
@@ -1,66 +0,0 @@
-import gradio as gr
-import whisper
-from pytube import YouTube
-
-def get_audio(url):
- yt = YouTube(url)
- return yt.streams.filter(only_audio=True)[0].download(filename="tmp.mp4")
-
-def get_transcript(url, model_size, lang, format):
-
- model = whisper.load_model(model_size)
-
- if lang == "None":
- lang = None
-
- result = model.transcribe(get_audio(url), fp16=False, language=lang)
-
- if format == "None":
- return result["text"]
- elif format == ".srt":
- return format_to_srt(result["segments"])
-
-def format_to_srt(segments):
- output = ""
- for i, segment in enumerate(segments):
- output += f"{i + 1}\n"
- output += f"{format_timestamp(segment['start'])} --> {format_timestamp(segment['end'])}\n"
- output += f"{segment['text']}\n\n"
- return output
-
-def format_timestamp(t):
- hh = t//3600
- mm = (t - hh*3600)//60
- ss = t - hh*3600 - mm*60
- mi = (t - int(t))*1000
- return f"{int(hh):02d}:{int(mm):02d}:{int(ss):02d},{int(mi):03d}"
-
-
-langs = ["None"] + sorted(list(whisper.tokenizer.LANGUAGES.values()))
-model_size = list(whisper._MODELS.keys())
-
-with gr.Blocks() as demo:
-
- with gr.Row():
-
- with gr.Column():
-
- with gr.Row():
- url = gr.Textbox(placeholder='Youtube video URL', label='URL')
-
- with gr.Row():
-
- model_size = gr.Dropdown(choices=model_size, value='tiny', label="Model")
- lang = gr.Dropdown(choices=langs, value="None", label="Language (Optional)")
- format = gr.Dropdown(choices=["None", ".srt"], value="None", label="Timestamps? (Optional)")
-
- with gr.Row():
- gr.Markdown("Larger models are more accurate, but slower. For 1min video, it'll take ~30s (tiny), ~1min (base), ~3min (small), ~5min (medium), etc.")
- transcribe_btn = gr.Button('Transcribe')
-
- with gr.Column():
- outputs = gr.Textbox(placeholder='Transcription of the video', label='Transcription')
-
- transcribe_btn.click(get_transcript, inputs=[url, model_size, lang, format], outputs=outputs)
-
-demo.launch(debug=True)
diff --git a/spaces/kbora/minerva-generate-docker/blocks/inpainting.py b/spaces/kbora/minerva-generate-docker/blocks/inpainting.py
deleted file mode 100644
index b68217e21e62e6fa36bcde32ef3e24abe5cf9843..0000000000000000000000000000000000000000
--- a/spaces/kbora/minerva-generate-docker/blocks/inpainting.py
+++ /dev/null
@@ -1,230 +0,0 @@
-import gradio as gr
-from diffusers import DiffusionPipeline,StableDiffusionInpaintPipeline
-import torch
-from .utils.prompt2prompt import generate
-from .utils.device import get_device
-from .utils.schedulers import SCHEDULER_LIST, get_scheduler_list
-from .download import get_share_js, CSS, get_community_loading_icon
-
-INPAINT_MODEL_LIST = {
- "Stable Diffusion 2" : "stabilityai/stable-diffusion-2-inpainting",
- "Stable Diffusion 1" : "runwayml/stable-diffusion-inpainting",
-}
-
-class StableDiffusionInpaintGenerator:
- def __init__(self):
- self.pipe = None
-
- def load_model(self, model_path, scheduler):
- model_path = INPAINT_MODEL_LIST[model_path]
- if self.pipe is None:
- self.pipe = StableDiffusionInpaintPipeline.from_pretrained(
- model_path, torch_dtype=torch.float32
- )
- device = get_device()
- self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
- self.pipe.to(device)
- self.pipe.enable_attention_slicing()
- return self.pipe
-
- def generate_image(
- self,
- pil_image: str,
- model_path: str,
- prompt: str,
- negative_prompt: str,
- scheduler: str,
- guidance_scale: int,
- num_inference_step: int,
- height: int,
- width: int,
- seed_generator=0,
- ):
-
- image = pil_image["image"].convert("RGB").resize((width, height))
- mask_image = pil_image["mask"].convert("RGB").resize((width, height))
-
- pipe = self.load_model(model_path,scheduler)
-
- if seed_generator == 0:
- random_seed = torch.randint(0, 1000000, (1,))
- generator = torch.manual_seed(random_seed)
- else:
- generator = torch.manual_seed(seed_generator)
-
- output = pipe(
- prompt=prompt,
- image=image,
- mask_image=mask_image,
- negative_prompt=negative_prompt,
- num_images_per_prompt=1,
- num_inference_steps=num_inference_step,
- guidance_scale=guidance_scale,
- generator=generator,
- ).images
-
- return output
-
-
- def app():
- demo = gr.Blocks(css=CSS)
- with demo:
- with gr.Row():
- with gr.Column():
- stable_diffusion_inpaint_image_file = gr.Image(
- source="upload",
- tool="sketch",
- elem_id="image-upload-inpainting",
- type="pil",
- label="Upload",
-
- ).style(height=260)
-
- stable_diffusion_inpaint_prompt = gr.Textbox(
- lines=1,
- placeholder="Prompt, keywords that explains how you want to modify the image.",
- show_label=False,
- elem_id="prompt-text-input-inpainting",
- value=''
- )
-
- stable_diffusion_inpaint_negative_prompt = gr.Textbox(
- lines=1,
- placeholder="Negative Prompt, keywords that describe what you don't want in your image",
- show_label=False,
- elem_id = "negative-prompt-text-input-inpainting",
- value=''
- )
- # add button for generating a prompt from the prompt
- stable_diffusion_inpaint_generate = gr.Button(
- label="Generate Prompt",
- type="primary",
- align="center",
- value = "Generate Prompt"
- )
-
- # show a text box with the generated prompt
- stable_diffusion_inpaint_generated_prompt = gr.Textbox(
- lines=1,
- placeholder="Generated Prompt",
- show_label=False,
- info="Auto generated prompts for inspiration.",
-
- )
-
- stable_diffusion_inpaint_model_id = gr.Dropdown(
- choices=list(INPAINT_MODEL_LIST.keys()),
- value=list(INPAINT_MODEL_LIST.keys())[0],
- label="Inpaint Model Selection",
- elem_id="model-dropdown-inpainting",
- info="Select the model you want to use for inpainting."
- )
-
- stable_diffusion_inpaint_scheduler = gr.Dropdown(
- choices=SCHEDULER_LIST,
- value=SCHEDULER_LIST[0],
- label="Scheduler",
- elem_id="scheduler-dropdown-inpainting",
- info="Scheduler list for models. Different schdulers result in different outputs."
- )
-
-
- stable_diffusion_inpaint_guidance_scale = gr.Slider(
- minimum=0.1,
- maximum=15,
- step=0.1,
- value=7.5,
- label="Guidance Scale",
- elem_id = "guidance-scale-slider-inpainting",
- info = "Guidance scale determines how much the prompt will affect the image. Higher the value, more the effect."
-
- )
-
- stable_diffusion_inpaint_num_inference_step = gr.Slider(
- minimum=1,
- maximum=100,
- step=1,
- value=50,
- label="Num Inference Step",
- elem_id = "num-inference-step-slider-inpainting",
- info = "Number of inference step determines the quality of the image. Higher the number, better the quality."
-
- )
-
- stable_diffusion_inpaint_size = gr.Slider(
- minimum=128,
- maximum=1280,
- step=32,
- value=512,
- label="Image Size",
- elem_id="image-size-slider-inpainting",
- info = "Image size determines the height and width of the generated image. Higher the value, better the quality however slower the computation."
-
- )
-
- stable_diffusion_inpaint_seed_generator = gr.Slider(
- label="Seed(0 for random)",
- minimum=0,
- maximum=1000000,
- value=0,
- elem_id="seed-slider-inpainting",
- info="Set the seed to a specific value to reproduce the results."
- )
-
- stable_diffusion_inpaint_predict = gr.Button(
- value="Generate image"
- )
-
- with gr.Column():
- output_image = gr.Gallery(
- label="Generated images",
- show_label=False,
- elem_id="gallery-inpainting",
- ).style(grid=(1, 2))
-
- with gr.Group(elem_id="container-advanced-btns"):
- with gr.Group(elem_id="share-btn-container"):
- community_icon_html, loading_icon_html = get_community_loading_icon("inpainting")
- community_icon = gr.HTML(community_icon_html)
- loading_icon = gr.HTML(loading_icon_html)
- share_button = gr.Button("Save artwork", elem_id="share-btn-inpainting")
-
- gr.HTML(
- """
-
-
Inpainting Models
-
Inpainting models will take a masked image and modify the masked image with the given prompt.
-
Prompt should describe how you want to modify the image. For example, if you want to modify the image to have a blue sky, you can use the prompt "sky is blue".
-
Negative prompt should describe what you don't want in your image. For example, if you don't want the image to have a red sky, you can use the negative prompt "sky is red".
-
-
Stable Diffusion 1 & 2: Default model for many tasks.
-
- """
- )
- stable_diffusion_inpaint_predict.click(
- fn=StableDiffusionInpaintGenerator().generate_image,
- inputs=[
- stable_diffusion_inpaint_image_file,
- stable_diffusion_inpaint_model_id,
- stable_diffusion_inpaint_prompt,
- stable_diffusion_inpaint_negative_prompt,
- stable_diffusion_inpaint_scheduler,
- stable_diffusion_inpaint_guidance_scale,
- stable_diffusion_inpaint_num_inference_step,
- stable_diffusion_inpaint_size,
- stable_diffusion_inpaint_size,
- stable_diffusion_inpaint_seed_generator,
- ],
- outputs=[output_image],
- )
-
- stable_diffusion_inpaint_generate.click(
- fn=generate,
- inputs=[stable_diffusion_inpaint_prompt],
- outputs=[stable_diffusion_inpaint_generated_prompt],
- )
-
-
-
-
- return demo
diff --git a/spaces/kepl/gpt/g4f/Provider/Providers/Xiaor.py b/spaces/kepl/gpt/g4f/Provider/Providers/Xiaor.py
deleted file mode 100644
index 5757f9971157116cbbfabbe5420e3b7e88fed4e7..0000000000000000000000000000000000000000
--- a/spaces/kepl/gpt/g4f/Provider/Providers/Xiaor.py
+++ /dev/null
@@ -1,39 +0,0 @@
-import requests
-import os
-import json
-from ...typing import sha256, Dict, get_type_hints
-
-url = 'https://xiaor.eu.org'
-model = ['gpt-3.5-turbo', 'gpt-3.5-turbo-16k',
- 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0613']
-supports_stream = True
-needs_auth = False
-
-
-def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs):
- headers = {
- 'Content-Type': 'application/json',
- }
- data = {
- 'model': model,
- 'temperature': 0.7,
- 'presence_penalty': 0,
- 'messages': messages,
- }
- response = requests.post(url + '/p1/v1/chat/completions',
- json=data, stream=True)
-
- if stream:
- for chunk in response.iter_content(chunk_size=None):
- chunk = chunk.decode('utf-8')
- if chunk.strip():
- message = json.loads(chunk)['choices'][0]['message']['content']
- yield message
- else:
- message = response.json()['choices'][0]['message']['content']
- yield message
-
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join(
- [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
diff --git a/spaces/kevinwang676/ChatGLM2-SadTalker-VC/checkpoint/__init__.py b/spaces/kevinwang676/ChatGLM2-SadTalker-VC/checkpoint/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/kevinwang676/ChatGLM2-SadTalker-VC/src/face3d/models/arcface_torch/configs/ms1mv3_mbf.py b/spaces/kevinwang676/ChatGLM2-SadTalker-VC/src/face3d/models/arcface_torch/configs/ms1mv3_mbf.py
deleted file mode 100644
index b8a00d6305eeda5a94788017afc1cda0d4a4cd2a..0000000000000000000000000000000000000000
--- a/spaces/kevinwang676/ChatGLM2-SadTalker-VC/src/face3d/models/arcface_torch/configs/ms1mv3_mbf.py
+++ /dev/null
@@ -1,26 +0,0 @@
-from easydict import EasyDict as edict
-
-# make training faster
-# our RAM is 256G
-# mount -t tmpfs -o size=140G tmpfs /train_tmp
-
-config = edict()
-config.loss = "arcface"
-config.network = "mbf"
-config.resume = False
-config.output = None
-config.embedding_size = 512
-config.sample_rate = 1.0
-config.fp16 = True
-config.momentum = 0.9
-config.weight_decay = 2e-4
-config.batch_size = 128
-config.lr = 0.1 # batch size is 512
-
-config.rec = "/train_tmp/ms1m-retinaface-t1"
-config.num_classes = 93431
-config.num_image = 5179510
-config.num_epoch = 30
-config.warmup_epoch = -1
-config.decay_epoch = [10, 20, 25]
-config.val_targets = ["lfw", "cfp_fp", "agedb_30"]
diff --git a/spaces/kira4424/Tacotron-zero-short-voice-clone/vocoder/hifigan/train.py b/spaces/kira4424/Tacotron-zero-short-voice-clone/vocoder/hifigan/train.py
deleted file mode 100644
index 7e9c2f2cc69afec4762bf3b354f5a07982f70d38..0000000000000000000000000000000000000000
--- a/spaces/kira4424/Tacotron-zero-short-voice-clone/vocoder/hifigan/train.py
+++ /dev/null
@@ -1,253 +0,0 @@
-import warnings
-warnings.simplefilter(action='ignore', category=FutureWarning)
-import itertools
-import os
-import time
-import argparse
-import json
-import torch
-import torch.nn.functional as F
-from torch.utils.tensorboard import SummaryWriter
-from torch.utils.data import DistributedSampler, DataLoader
-import torch.multiprocessing as mp
-from torch.distributed import init_process_group
-from torch.nn.parallel import DistributedDataParallel
-from vocoder.hifigan.meldataset import MelDataset, mel_spectrogram, get_dataset_filelist
-from vocoder.hifigan.models import Generator, MultiPeriodDiscriminator, MultiScaleDiscriminator, feature_loss, generator_loss,\
- discriminator_loss
-from vocoder.hifigan.utils import plot_spectrogram, scan_checkpoint, load_checkpoint, save_checkpoint
-
-torch.backends.cudnn.benchmark = True
-
-
-def train(rank, a, h):
-
- a.checkpoint_path = a.models_dir.joinpath(a.run_id+'_hifigan')
- a.checkpoint_path.mkdir(exist_ok=True)
- a.training_epochs = 3100
- a.stdout_interval = 5
- a.checkpoint_interval = a.backup_every
- a.summary_interval = 5000
- a.validation_interval = 1000
- a.fine_tuning = True
-
- a.input_wavs_dir = a.syn_dir.joinpath("audio")
- a.input_mels_dir = a.syn_dir.joinpath("mels")
-
- if h.num_gpus > 1:
- init_process_group(backend=h.dist_config['dist_backend'], init_method=h.dist_config['dist_url'],
- world_size=h.dist_config['world_size'] * h.num_gpus, rank=rank)
-
- torch.cuda.manual_seed(h.seed)
- device = torch.device('cuda:{:d}'.format(rank))
-
- generator = Generator(h).to(device)
- mpd = MultiPeriodDiscriminator().to(device)
- msd = MultiScaleDiscriminator().to(device)
-
- if rank == 0:
- print(generator)
- os.makedirs(a.checkpoint_path, exist_ok=True)
- print("checkpoints directory : ", a.checkpoint_path)
-
- if os.path.isdir(a.checkpoint_path):
- cp_g = scan_checkpoint(a.checkpoint_path, 'g_hifigan_')
- cp_do = scan_checkpoint(a.checkpoint_path, 'do_hifigan_')
-
- steps = 0
- if cp_g is None or cp_do is None:
- state_dict_do = None
- last_epoch = -1
- else:
- state_dict_g = load_checkpoint(cp_g, device)
- state_dict_do = load_checkpoint(cp_do, device)
- generator.load_state_dict(state_dict_g['generator'])
- mpd.load_state_dict(state_dict_do['mpd'])
- msd.load_state_dict(state_dict_do['msd'])
- steps = state_dict_do['steps'] + 1
- last_epoch = state_dict_do['epoch']
-
- if h.num_gpus > 1:
- generator = DistributedDataParallel(generator, device_ids=[rank]).to(device)
- mpd = DistributedDataParallel(mpd, device_ids=[rank]).to(device)
- msd = DistributedDataParallel(msd, device_ids=[rank]).to(device)
-
- optim_g = torch.optim.AdamW(generator.parameters(), h.learning_rate, betas=[h.adam_b1, h.adam_b2])
- optim_d = torch.optim.AdamW(itertools.chain(msd.parameters(), mpd.parameters()),
- h.learning_rate, betas=[h.adam_b1, h.adam_b2])
-
- if state_dict_do is not None:
- optim_g.load_state_dict(state_dict_do['optim_g'])
- optim_d.load_state_dict(state_dict_do['optim_d'])
-
- scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=h.lr_decay, last_epoch=last_epoch)
- scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=h.lr_decay, last_epoch=last_epoch)
-
- training_filelist, validation_filelist = get_dataset_filelist(a)
-
- # print(training_filelist)
- # exit()
-
- trainset = MelDataset(training_filelist, h.segment_size, h.n_fft, h.num_mels,
- h.hop_size, h.win_size, h.sampling_rate, h.fmin, h.fmax, n_cache_reuse=0,
- shuffle=False if h.num_gpus > 1 else True, fmax_loss=h.fmax_for_loss, device=device,
- fine_tuning=a.fine_tuning, base_mels_path=a.input_mels_dir)
-
- train_sampler = DistributedSampler(trainset) if h.num_gpus > 1 else None
-
- train_loader = DataLoader(trainset, num_workers=h.num_workers, shuffle=False,
- sampler=train_sampler,
- batch_size=h.batch_size,
- pin_memory=True,
- drop_last=True)
-
- if rank == 0:
- validset = MelDataset(validation_filelist, h.segment_size, h.n_fft, h.num_mels,
- h.hop_size, h.win_size, h.sampling_rate, h.fmin, h.fmax, False, False, n_cache_reuse=0,
- fmax_loss=h.fmax_for_loss, device=device, fine_tuning=a.fine_tuning,
- base_mels_path=a.input_mels_dir)
- validation_loader = DataLoader(validset, num_workers=1, shuffle=False,
- sampler=None,
- batch_size=1,
- pin_memory=True,
- drop_last=True)
-
- sw = SummaryWriter(os.path.join(a.checkpoint_path, 'logs'))
-
- generator.train()
- mpd.train()
- msd.train()
- for epoch in range(max(0, last_epoch), a.training_epochs):
- if rank == 0:
- start = time.time()
- print("Epoch: {}".format(epoch+1))
-
- if h.num_gpus > 1:
- train_sampler.set_epoch(epoch)
-
- for i, batch in enumerate(train_loader):
- if rank == 0:
- start_b = time.time()
- x, y, _, y_mel = batch
- x = torch.autograd.Variable(x.to(device, non_blocking=True))
- y = torch.autograd.Variable(y.to(device, non_blocking=True))
- y_mel = torch.autograd.Variable(y_mel.to(device, non_blocking=True))
- y = y.unsqueeze(1)
-
- y_g_hat = generator(x)
- y_g_hat_mel = mel_spectrogram(y_g_hat.squeeze(1), h.n_fft, h.num_mels, h.sampling_rate, h.hop_size, h.win_size,
- h.fmin, h.fmax_for_loss)
- if steps > h.disc_start_step:
- optim_d.zero_grad()
-
- # MPD
- y_df_hat_r, y_df_hat_g, _, _ = mpd(y, y_g_hat.detach())
- loss_disc_f, losses_disc_f_r, losses_disc_f_g = discriminator_loss(y_df_hat_r, y_df_hat_g)
-
- # MSD
- y_ds_hat_r, y_ds_hat_g, _, _ = msd(y, y_g_hat.detach())
- loss_disc_s, losses_disc_s_r, losses_disc_s_g = discriminator_loss(y_ds_hat_r, y_ds_hat_g)
-
- loss_disc_all = loss_disc_s + loss_disc_f
-
- loss_disc_all.backward()
- optim_d.step()
-
- # Generator
- optim_g.zero_grad()
-
- # L1 Mel-Spectrogram Loss
- loss_mel = F.l1_loss(y_mel, y_g_hat_mel) * 45
-
- if steps > h.disc_start_step:
- y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g = mpd(y, y_g_hat)
- y_ds_hat_r, y_ds_hat_g, fmap_s_r, fmap_s_g = msd(y, y_g_hat)
- loss_fm_f = feature_loss(fmap_f_r, fmap_f_g)
- loss_fm_s = feature_loss(fmap_s_r, fmap_s_g)
- loss_gen_f, losses_gen_f = generator_loss(y_df_hat_g)
- loss_gen_s, losses_gen_s = generator_loss(y_ds_hat_g)
- loss_gen_all = loss_gen_s + loss_gen_f + loss_fm_s + loss_fm_f + loss_mel
- else:
- loss_gen_all = loss_mel
-
- loss_gen_all.backward()
- optim_g.step()
-
- if rank == 0:
- # STDOUT logging
- if steps % a.stdout_interval == 0:
- with torch.no_grad():
- mel_error = F.l1_loss(y_mel, y_g_hat_mel).item()
-
- print('Steps : {:d}, Gen Loss Total : {:4.3f}, Mel-Spec. Error : {:4.3f}, s/b : {:4.3f}'.
- format(steps, loss_gen_all, mel_error, time.time() - start_b))
-
- # checkpointing
- if steps % a.checkpoint_interval == 0 and steps != 0:
- checkpoint_path = "{}/g_hifigan_{:08d}.pt".format(a.checkpoint_path, steps)
- save_checkpoint(checkpoint_path,
- {'generator': (generator.module if h.num_gpus > 1 else generator).state_dict()})
- checkpoint_path = "{}/do_hifigan_{:08d}.pt".format(a.checkpoint_path, steps)
- save_checkpoint(checkpoint_path,
- {'mpd': (mpd.module if h.num_gpus > 1 else mpd).state_dict(),
- 'msd': (msd.module if h.num_gpus > 1 else msd).state_dict(),
- 'optim_g': optim_g.state_dict(), 'optim_d': optim_d.state_dict(), 'steps': steps,
- 'epoch': epoch})
-
- # Tensorboard summary logging
- if steps % a.summary_interval == 0:
- sw.add_scalar("training/gen_loss_total", loss_gen_all, steps)
- sw.add_scalar("training/mel_spec_error", mel_error, steps)
-
-
- # save temperate hifigan model
- if steps % a.save_every == 0:
- checkpoint_path = "{}/g_hifigan.pt".format(a.checkpoint_path)
- save_checkpoint(checkpoint_path,
- {'generator': (generator.module if h.num_gpus > 1 else generator).state_dict()})
- checkpoint_path = "{}/do_hifigan.pt".format(a.checkpoint_path)
- save_checkpoint(checkpoint_path,
- {'mpd': (mpd.module if h.num_gpus > 1 else mpd).state_dict(),
- 'msd': (msd.module if h.num_gpus > 1 else msd).state_dict(),
- 'optim_g': optim_g.state_dict(), 'optim_d': optim_d.state_dict(), 'steps': steps,
- 'epoch': epoch})
-
- # Validation
- if steps % a.validation_interval == 0: # and steps != 0:
- generator.eval()
- torch.cuda.empty_cache()
- val_err_tot = 0
- with torch.no_grad():
- for j, batch in enumerate(validation_loader):
- x, y, _, y_mel = batch
- y_g_hat = generator(x.to(device))
- y_mel = torch.autograd.Variable(y_mel.to(device, non_blocking=True))
- y_g_hat_mel = mel_spectrogram(y_g_hat.squeeze(1), h.n_fft, h.num_mels, h.sampling_rate,
- h.hop_size, h.win_size,
- h.fmin, h.fmax_for_loss)
-# val_err_tot += F.l1_loss(y_mel, y_g_hat_mel).item()
-
- if j <= 4:
- if steps == 0:
- sw.add_audio('gt/y_{}'.format(j), y[0], steps, h.sampling_rate)
- sw.add_figure('gt/y_spec_{}'.format(j), plot_spectrogram(x[0]), steps)
-
- sw.add_audio('generated/y_hat_{}'.format(j), y_g_hat[0], steps, h.sampling_rate)
- y_hat_spec = mel_spectrogram(y_g_hat.squeeze(1), h.n_fft, h.num_mels,
- h.sampling_rate, h.hop_size, h.win_size,
- h.fmin, h.fmax)
- sw.add_figure('generated/y_hat_spec_{}'.format(j),
- plot_spectrogram(y_hat_spec.squeeze(0).cpu().numpy()), steps)
-
- val_err = val_err_tot / (j+1)
- sw.add_scalar("validation/mel_spec_error", val_err, steps)
-
- generator.train()
-
- steps += 1
-
- scheduler_g.step()
- scheduler_d.step()
-
- if rank == 0:
- print('Time taken for epoch {} is {} sec\n'.format(epoch + 1, int(time.time() - start)))
diff --git a/spaces/kitkatchoco/openjourn/app.py b/spaces/kitkatchoco/openjourn/app.py
deleted file mode 100644
index 2193905172b6fb6d868bff88cc8311f491ec13b3..0000000000000000000000000000000000000000
--- a/spaces/kitkatchoco/openjourn/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-
-gr.Interface.load("models/prompthero/openjourney").launch()
\ No newline at end of file
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates.py
deleted file mode 100644
index 5e970be1a8f641b75e88121c617e304dbfe62da2..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates.py
+++ /dev/null
@@ -1,574 +0,0 @@
-from __future__ import annotations
-
-from typing import Any, Callable
-
-import numpy as np
-from PIL.Image import Image
-
-from gradio import components
-
-
-class TextArea(components.Textbox):
- """
- Sets: lines=7
- """
-
- is_template = True
-
- def __init__(
- self,
- value: str | Callable | None = "",
- *,
- lines: int = 7,
- max_lines: int = 20,
- placeholder: str | None = None,
- label: str | None = None,
- show_label: bool = True,
- interactive: bool | None = None,
- visible: bool = True,
- elem_id: str | None = None,
- **kwargs,
- ):
- super().__init__(
- value=value,
- lines=lines,
- max_lines=max_lines,
- placeholder=placeholder,
- label=label,
- show_label=show_label,
- interactive=interactive,
- visible=visible,
- elem_id=elem_id,
- **kwargs,
- )
-
-
-class Webcam(components.Image):
- """
- Sets: source="webcam", interactive=True
- """
-
- is_template = True
-
- def __init__(
- self,
- value: str | Image | np.ndarray | None = None,
- *,
- shape: tuple[int, int] | None = None,
- image_mode: str = "RGB",
- invert_colors: bool = False,
- source: str = "webcam",
- tool: str | None = None,
- type: str = "numpy",
- label: str | None = None,
- show_label: bool = True,
- interactive: bool | None = True,
- visible: bool = True,
- streaming: bool = False,
- elem_id: str | None = None,
- mirror_webcam: bool = True,
- brush_radius: float | None = None,
- **kwargs,
- ):
- super().__init__(
- value=value,
- shape=shape,
- image_mode=image_mode,
- invert_colors=invert_colors,
- source=source,
- tool=tool,
- type=type,
- label=label,
- show_label=show_label,
- interactive=interactive,
- visible=visible,
- streaming=streaming,
- elem_id=elem_id,
- mirror_webcam=mirror_webcam,
- brush_radius=brush_radius,
- **kwargs,
- )
-
-
-class Sketchpad(components.Image):
- """
- Sets: image_mode="L", source="canvas", shape=(28, 28), invert_colors=True, interactive=True
- """
-
- is_template = True
-
- def __init__(
- self,
- value: str | Image | np.ndarray | None = None,
- *,
- shape: tuple[int, int] = (28, 28),
- image_mode: str = "L",
- invert_colors: bool = True,
- source: str = "canvas",
- tool: str | None = None,
- type: str = "numpy",
- label: str | None = None,
- show_label: bool = True,
- interactive: bool | None = True,
- visible: bool = True,
- streaming: bool = False,
- elem_id: str | None = None,
- mirror_webcam: bool = True,
- brush_radius: float | None = None,
- **kwargs,
- ):
- super().__init__(
- value=value,
- shape=shape,
- image_mode=image_mode,
- invert_colors=invert_colors,
- source=source,
- tool=tool,
- type=type,
- label=label,
- show_label=show_label,
- interactive=interactive,
- visible=visible,
- streaming=streaming,
- elem_id=elem_id,
- mirror_webcam=mirror_webcam,
- brush_radius=brush_radius,
- **kwargs,
- )
-
-
-class Paint(components.Image):
- """
- Sets: source="canvas", tool="color-sketch", interactive=True
- """
-
- is_template = True
-
- def __init__(
- self,
- value: str | Image | np.ndarray | None = None,
- *,
- shape: tuple[int, int] | None = None,
- image_mode: str = "RGB",
- invert_colors: bool = False,
- source: str = "canvas",
- tool: str = "color-sketch",
- type: str = "numpy",
- label: str | None = None,
- show_label: bool = True,
- interactive: bool | None = True,
- visible: bool = True,
- streaming: bool = False,
- elem_id: str | None = None,
- mirror_webcam: bool = True,
- brush_radius: float | None = None,
- **kwargs,
- ):
- super().__init__(
- value=value,
- shape=shape,
- image_mode=image_mode,
- invert_colors=invert_colors,
- source=source,
- tool=tool,
- type=type,
- label=label,
- show_label=show_label,
- interactive=interactive,
- visible=visible,
- streaming=streaming,
- elem_id=elem_id,
- mirror_webcam=mirror_webcam,
- brush_radius=brush_radius,
- **kwargs,
- )
-
-
-class ImageMask(components.Image):
- """
- Sets: source="upload", tool="sketch", interactive=True
- """
-
- is_template = True
-
- def __init__(
- self,
- value: str | Image | np.ndarray | None = None,
- *,
- shape: tuple[int, int] | None = None,
- image_mode: str = "RGB",
- invert_colors: bool = False,
- source: str = "upload",
- tool: str = "sketch",
- type: str = "numpy",
- label: str | None = None,
- show_label: bool = True,
- interactive: bool | None = True,
- visible: bool = True,
- streaming: bool = False,
- elem_id: str | None = None,
- mirror_webcam: bool = True,
- brush_radius: float | None = None,
- **kwargs,
- ):
- super().__init__(
- value=value,
- shape=shape,
- image_mode=image_mode,
- invert_colors=invert_colors,
- source=source,
- tool=tool,
- type=type,
- label=label,
- show_label=show_label,
- interactive=interactive,
- visible=visible,
- streaming=streaming,
- elem_id=elem_id,
- mirror_webcam=mirror_webcam,
- brush_radius=brush_radius,
- **kwargs,
- )
-
-
-class ImagePaint(components.Image):
- """
- Sets: source="upload", tool="color-sketch", interactive=True
- """
-
- is_template = True
-
- def __init__(
- self,
- value: str | Image | np.ndarray | None = None,
- *,
- shape: tuple[int, int] | None = None,
- image_mode: str = "RGB",
- invert_colors: bool = False,
- source: str = "upload",
- tool: str = "color-sketch",
- type: str = "numpy",
- label: str | None = None,
- show_label: bool = True,
- interactive: bool | None = True,
- visible: bool = True,
- streaming: bool = False,
- elem_id: str | None = None,
- mirror_webcam: bool = True,
- brush_radius: float | None = None,
- **kwargs,
- ):
- super().__init__(
- value=value,
- shape=shape,
- image_mode=image_mode,
- invert_colors=invert_colors,
- source=source,
- tool=tool,
- type=type,
- label=label,
- show_label=show_label,
- interactive=interactive,
- visible=visible,
- streaming=streaming,
- elem_id=elem_id,
- mirror_webcam=mirror_webcam,
- brush_radius=brush_radius,
- **kwargs,
- )
-
-
-class Pil(components.Image):
- """
- Sets: type="pil"
- """
-
- is_template = True
-
- def __init__(
- self,
- value: str | Image | np.ndarray | None = None,
- *,
- shape: tuple[int, int] | None = None,
- image_mode: str = "RGB",
- invert_colors: bool = False,
- source: str = "upload",
- tool: str | None = None,
- type: str = "pil",
- label: str | None = None,
- show_label: bool = True,
- interactive: bool | None = None,
- visible: bool = True,
- streaming: bool = False,
- elem_id: str | None = None,
- mirror_webcam: bool = True,
- brush_radius: float | None = None,
- **kwargs,
- ):
- super().__init__(
- value=value,
- shape=shape,
- image_mode=image_mode,
- invert_colors=invert_colors,
- source=source,
- tool=tool,
- type=type,
- label=label,
- show_label=show_label,
- interactive=interactive,
- visible=visible,
- streaming=streaming,
- elem_id=elem_id,
- mirror_webcam=mirror_webcam,
- brush_radius=brush_radius,
- **kwargs,
- )
-
-
-class PlayableVideo(components.Video):
- """
- Sets: format="mp4"
- """
-
- is_template = True
-
- def __init__(
- self,
- value: str | Callable | None = None,
- *,
- format: str | None = "mp4",
- source: str = "upload",
- label: str | None = None,
- show_label: bool = True,
- interactive: bool | None = None,
- visible: bool = True,
- elem_id: str | None = None,
- mirror_webcam: bool = True,
- include_audio: bool | None = None,
- **kwargs,
- ):
- super().__init__(
- value=value,
- format=format,
- source=source,
- label=label,
- show_label=show_label,
- interactive=interactive,
- visible=visible,
- elem_id=elem_id,
- mirror_webcam=mirror_webcam,
- include_audio=include_audio,
- **kwargs,
- )
-
-
-class Microphone(components.Audio):
- """
- Sets: source="microphone"
- """
-
- is_template = True
-
- def __init__(
- self,
- value: str | tuple[int, np.ndarray] | Callable | None = None,
- *,
- source: str = "microphone",
- type: str = "numpy",
- label: str | None = None,
- show_label: bool = True,
- interactive: bool | None = None,
- visible: bool = True,
- streaming: bool = False,
- elem_id: str | None = None,
- **kwargs,
- ):
- super().__init__(
- value=value,
- source=source,
- type=type,
- label=label,
- show_label=show_label,
- interactive=interactive,
- visible=visible,
- streaming=streaming,
- elem_id=elem_id,
- **kwargs,
- )
-
-
-class Files(components.File):
- """
- Sets: file_count="multiple"
- """
-
- is_template = True
-
- def __init__(
- self,
- value: str | list[str] | Callable | None = None,
- *,
- file_count: str = "multiple",
- type: str = "file",
- label: str | None = None,
- show_label: bool = True,
- interactive: bool | None = None,
- visible: bool = True,
- elem_id: str | None = None,
- **kwargs,
- ):
- super().__init__(
- value=value,
- file_count=file_count,
- type=type,
- label=label,
- show_label=show_label,
- interactive=interactive,
- visible=visible,
- elem_id=elem_id,
- **kwargs,
- )
-
-
-class Numpy(components.Dataframe):
- """
- Sets: type="numpy"
- """
-
- is_template = True
-
- def __init__(
- self,
- value: list[list[Any]] | Callable | None = None,
- *,
- headers: list[str] | None = None,
- row_count: int | tuple[int, str] = (1, "dynamic"),
- col_count: int | tuple[int, str] | None = None,
- datatype: str | list[str] = "str",
- type: str = "numpy",
- max_rows: int | None = 20,
- max_cols: int | None = None,
- overflow_row_behaviour: str = "paginate",
- label: str | None = None,
- show_label: bool = True,
- interactive: bool | None = None,
- visible: bool = True,
- elem_id: str | None = None,
- wrap: bool = False,
- **kwargs,
- ):
- super().__init__(
- value=value,
- headers=headers,
- row_count=row_count,
- col_count=col_count,
- datatype=datatype,
- type=type,
- max_rows=max_rows,
- max_cols=max_cols,
- overflow_row_behaviour=overflow_row_behaviour,
- label=label,
- show_label=show_label,
- interactive=interactive,
- visible=visible,
- elem_id=elem_id,
- wrap=wrap,
- **kwargs,
- )
-
-
-class Matrix(components.Dataframe):
- """
- Sets: type="array"
- """
-
- is_template = True
-
- def __init__(
- self,
- value: list[list[Any]] | Callable | None = None,
- *,
- headers: list[str] | None = None,
- row_count: int | tuple[int, str] = (1, "dynamic"),
- col_count: int | tuple[int, str] | None = None,
- datatype: str | list[str] = "str",
- type: str = "array",
- max_rows: int | None = 20,
- max_cols: int | None = None,
- overflow_row_behaviour: str = "paginate",
- label: str | None = None,
- show_label: bool = True,
- interactive: bool | None = None,
- visible: bool = True,
- elem_id: str | None = None,
- wrap: bool = False,
- **kwargs,
- ):
- super().__init__(
- value=value,
- headers=headers,
- row_count=row_count,
- col_count=col_count,
- datatype=datatype,
- type=type,
- max_rows=max_rows,
- max_cols=max_cols,
- overflow_row_behaviour=overflow_row_behaviour,
- label=label,
- show_label=show_label,
- interactive=interactive,
- visible=visible,
- elem_id=elem_id,
- wrap=wrap,
- **kwargs,
- )
-
-
-class List(components.Dataframe):
- """
- Sets: type="array", col_count=1
- """
-
- is_template = True
-
- def __init__(
- self,
- value: list[list[Any]] | Callable | None = None,
- *,
- headers: list[str] | None = None,
- row_count: int | tuple[int, str] = (1, "dynamic"),
- col_count: int | tuple[int, str] = 1,
- datatype: str | list[str] = "str",
- type: str = "array",
- max_rows: int | None = 20,
- max_cols: int | None = None,
- overflow_row_behaviour: str = "paginate",
- label: str | None = None,
- show_label: bool = True,
- interactive: bool | None = None,
- visible: bool = True,
- elem_id: str | None = None,
- wrap: bool = False,
- **kwargs,
- ):
- super().__init__(
- value=value,
- headers=headers,
- row_count=row_count,
- col_count=col_count,
- datatype=datatype,
- type=type,
- max_rows=max_rows,
- max_cols=max_cols,
- overflow_row_behaviour=overflow_row_behaviour,
- label=label,
- show_label=show_label,
- interactive=interactive,
- visible=visible,
- elem_id=elem_id,
- wrap=wrap,
- **kwargs,
- )
-
-
-Mic = Microphone
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/ModifyUpload.svelte_svelte_type_style_lang-ba6baa96.js b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/ModifyUpload.svelte_svelte_type_style_lang-ba6baa96.js
deleted file mode 100644
index ce429997c6a40f1a00872003f0b0dd26a7c47dfd..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/ModifyUpload.svelte_svelte_type_style_lang-ba6baa96.js
+++ /dev/null
@@ -1,2 +0,0 @@
-function l(e,n,a){if(e==null)return null;if(typeof e=="string")return{name:"file_data",data:e};if(Array.isArray(e)){const s=[];for(const t of e)t===null?s.push(null):s.push(l(t,n,a));return s}else e.is_file&&(a==null?e.data=n+"/file="+e.name:e.data="/proxy="+a+"/file="+e.name);return e}const r=e=>{const n=new FileReader;return n.readAsDataURL(e),new Promise(a=>{n.onloadend=()=>{a(n.result)}})};export{r as b,l as n};
-//# sourceMappingURL=ModifyUpload.svelte_svelte_type_style_lang-ba6baa96.js.map
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/idna-3.4.dist-info/LICENSE.md b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/idna-3.4.dist-info/LICENSE.md
deleted file mode 100644
index b6f87326ffb36158c33f5e6dc9d6175262050cea..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/idna-3.4.dist-info/LICENSE.md
+++ /dev/null
@@ -1,29 +0,0 @@
-BSD 3-Clause License
-
-Copyright (c) 2013-2021, Kim Davies
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-1. Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
-
-2. Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
-3. Neither the name of the copyright holder nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/idna/core.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/idna/core.py
deleted file mode 100644
index 4f3003711020eac05ef5a19ab29ba5670d89f642..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/idna/core.py
+++ /dev/null
@@ -1,400 +0,0 @@
-from . import idnadata
-import bisect
-import unicodedata
-import re
-from typing import Union, Optional
-from .intranges import intranges_contain
-
-_virama_combining_class = 9
-_alabel_prefix = b'xn--'
-_unicode_dots_re = re.compile('[\u002e\u3002\uff0e\uff61]')
-
-class IDNAError(UnicodeError):
- """ Base exception for all IDNA-encoding related problems """
- pass
-
-
-class IDNABidiError(IDNAError):
- """ Exception when bidirectional requirements are not satisfied """
- pass
-
-
-class InvalidCodepoint(IDNAError):
- """ Exception when a disallowed or unallocated codepoint is used """
- pass
-
-
-class InvalidCodepointContext(IDNAError):
- """ Exception when the codepoint is not valid in the context it is used """
- pass
-
-
-def _combining_class(cp: int) -> int:
- v = unicodedata.combining(chr(cp))
- if v == 0:
- if not unicodedata.name(chr(cp)):
- raise ValueError('Unknown character in unicodedata')
- return v
-
-def _is_script(cp: str, script: str) -> bool:
- return intranges_contain(ord(cp), idnadata.scripts[script])
-
-def _punycode(s: str) -> bytes:
- return s.encode('punycode')
-
-def _unot(s: int) -> str:
- return 'U+{:04X}'.format(s)
-
-
-def valid_label_length(label: Union[bytes, str]) -> bool:
- if len(label) > 63:
- return False
- return True
-
-
-def valid_string_length(label: Union[bytes, str], trailing_dot: bool) -> bool:
- if len(label) > (254 if trailing_dot else 253):
- return False
- return True
-
-
-def check_bidi(label: str, check_ltr: bool = False) -> bool:
- # Bidi rules should only be applied if string contains RTL characters
- bidi_label = False
- for (idx, cp) in enumerate(label, 1):
- direction = unicodedata.bidirectional(cp)
- if direction == '':
- # String likely comes from a newer version of Unicode
- raise IDNABidiError('Unknown directionality in label {} at position {}'.format(repr(label), idx))
- if direction in ['R', 'AL', 'AN']:
- bidi_label = True
- if not bidi_label and not check_ltr:
- return True
-
- # Bidi rule 1
- direction = unicodedata.bidirectional(label[0])
- if direction in ['R', 'AL']:
- rtl = True
- elif direction == 'L':
- rtl = False
- else:
- raise IDNABidiError('First codepoint in label {} must be directionality L, R or AL'.format(repr(label)))
-
- valid_ending = False
- number_type = None # type: Optional[str]
- for (idx, cp) in enumerate(label, 1):
- direction = unicodedata.bidirectional(cp)
-
- if rtl:
- # Bidi rule 2
- if not direction in ['R', 'AL', 'AN', 'EN', 'ES', 'CS', 'ET', 'ON', 'BN', 'NSM']:
- raise IDNABidiError('Invalid direction for codepoint at position {} in a right-to-left label'.format(idx))
- # Bidi rule 3
- if direction in ['R', 'AL', 'EN', 'AN']:
- valid_ending = True
- elif direction != 'NSM':
- valid_ending = False
- # Bidi rule 4
- if direction in ['AN', 'EN']:
- if not number_type:
- number_type = direction
- else:
- if number_type != direction:
- raise IDNABidiError('Can not mix numeral types in a right-to-left label')
- else:
- # Bidi rule 5
- if not direction in ['L', 'EN', 'ES', 'CS', 'ET', 'ON', 'BN', 'NSM']:
- raise IDNABidiError('Invalid direction for codepoint at position {} in a left-to-right label'.format(idx))
- # Bidi rule 6
- if direction in ['L', 'EN']:
- valid_ending = True
- elif direction != 'NSM':
- valid_ending = False
-
- if not valid_ending:
- raise IDNABidiError('Label ends with illegal codepoint directionality')
-
- return True
-
-
-def check_initial_combiner(label: str) -> bool:
- if unicodedata.category(label[0])[0] == 'M':
- raise IDNAError('Label begins with an illegal combining character')
- return True
-
-
-def check_hyphen_ok(label: str) -> bool:
- if label[2:4] == '--':
- raise IDNAError('Label has disallowed hyphens in 3rd and 4th position')
- if label[0] == '-' or label[-1] == '-':
- raise IDNAError('Label must not start or end with a hyphen')
- return True
-
-
-def check_nfc(label: str) -> None:
- if unicodedata.normalize('NFC', label) != label:
- raise IDNAError('Label must be in Normalization Form C')
-
-
-def valid_contextj(label: str, pos: int) -> bool:
- cp_value = ord(label[pos])
-
- if cp_value == 0x200c:
-
- if pos > 0:
- if _combining_class(ord(label[pos - 1])) == _virama_combining_class:
- return True
-
- ok = False
- for i in range(pos-1, -1, -1):
- joining_type = idnadata.joining_types.get(ord(label[i]))
- if joining_type == ord('T'):
- continue
- if joining_type in [ord('L'), ord('D')]:
- ok = True
- break
-
- if not ok:
- return False
-
- ok = False
- for i in range(pos+1, len(label)):
- joining_type = idnadata.joining_types.get(ord(label[i]))
- if joining_type == ord('T'):
- continue
- if joining_type in [ord('R'), ord('D')]:
- ok = True
- break
- return ok
-
- if cp_value == 0x200d:
-
- if pos > 0:
- if _combining_class(ord(label[pos - 1])) == _virama_combining_class:
- return True
- return False
-
- else:
-
- return False
-
-
-def valid_contexto(label: str, pos: int, exception: bool = False) -> bool:
- cp_value = ord(label[pos])
-
- if cp_value == 0x00b7:
- if 0 < pos < len(label)-1:
- if ord(label[pos - 1]) == 0x006c and ord(label[pos + 1]) == 0x006c:
- return True
- return False
-
- elif cp_value == 0x0375:
- if pos < len(label)-1 and len(label) > 1:
- return _is_script(label[pos + 1], 'Greek')
- return False
-
- elif cp_value == 0x05f3 or cp_value == 0x05f4:
- if pos > 0:
- return _is_script(label[pos - 1], 'Hebrew')
- return False
-
- elif cp_value == 0x30fb:
- for cp in label:
- if cp == '\u30fb':
- continue
- if _is_script(cp, 'Hiragana') or _is_script(cp, 'Katakana') or _is_script(cp, 'Han'):
- return True
- return False
-
- elif 0x660 <= cp_value <= 0x669:
- for cp in label:
- if 0x6f0 <= ord(cp) <= 0x06f9:
- return False
- return True
-
- elif 0x6f0 <= cp_value <= 0x6f9:
- for cp in label:
- if 0x660 <= ord(cp) <= 0x0669:
- return False
- return True
-
- return False
-
-
-def check_label(label: Union[str, bytes, bytearray]) -> None:
- if isinstance(label, (bytes, bytearray)):
- label = label.decode('utf-8')
- if len(label) == 0:
- raise IDNAError('Empty Label')
-
- check_nfc(label)
- check_hyphen_ok(label)
- check_initial_combiner(label)
-
- for (pos, cp) in enumerate(label):
- cp_value = ord(cp)
- if intranges_contain(cp_value, idnadata.codepoint_classes['PVALID']):
- continue
- elif intranges_contain(cp_value, idnadata.codepoint_classes['CONTEXTJ']):
- try:
- if not valid_contextj(label, pos):
- raise InvalidCodepointContext('Joiner {} not allowed at position {} in {}'.format(
- _unot(cp_value), pos+1, repr(label)))
- except ValueError:
- raise IDNAError('Unknown codepoint adjacent to joiner {} at position {} in {}'.format(
- _unot(cp_value), pos+1, repr(label)))
- elif intranges_contain(cp_value, idnadata.codepoint_classes['CONTEXTO']):
- if not valid_contexto(label, pos):
- raise InvalidCodepointContext('Codepoint {} not allowed at position {} in {}'.format(_unot(cp_value), pos+1, repr(label)))
- else:
- raise InvalidCodepoint('Codepoint {} at position {} of {} not allowed'.format(_unot(cp_value), pos+1, repr(label)))
-
- check_bidi(label)
-
-
-def alabel(label: str) -> bytes:
- try:
- label_bytes = label.encode('ascii')
- ulabel(label_bytes)
- if not valid_label_length(label_bytes):
- raise IDNAError('Label too long')
- return label_bytes
- except UnicodeEncodeError:
- pass
-
- if not label:
- raise IDNAError('No Input')
-
- label = str(label)
- check_label(label)
- label_bytes = _punycode(label)
- label_bytes = _alabel_prefix + label_bytes
-
- if not valid_label_length(label_bytes):
- raise IDNAError('Label too long')
-
- return label_bytes
-
-
-def ulabel(label: Union[str, bytes, bytearray]) -> str:
- if not isinstance(label, (bytes, bytearray)):
- try:
- label_bytes = label.encode('ascii')
- except UnicodeEncodeError:
- check_label(label)
- return label
- else:
- label_bytes = label
-
- label_bytes = label_bytes.lower()
- if label_bytes.startswith(_alabel_prefix):
- label_bytes = label_bytes[len(_alabel_prefix):]
- if not label_bytes:
- raise IDNAError('Malformed A-label, no Punycode eligible content found')
- if label_bytes.decode('ascii')[-1] == '-':
- raise IDNAError('A-label must not end with a hyphen')
- else:
- check_label(label_bytes)
- return label_bytes.decode('ascii')
-
- try:
- label = label_bytes.decode('punycode')
- except UnicodeError:
- raise IDNAError('Invalid A-label')
- check_label(label)
- return label
-
-
-def uts46_remap(domain: str, std3_rules: bool = True, transitional: bool = False) -> str:
- """Re-map the characters in the string according to UTS46 processing."""
- from .uts46data import uts46data
- output = ''
-
- for pos, char in enumerate(domain):
- code_point = ord(char)
- try:
- uts46row = uts46data[code_point if code_point < 256 else
- bisect.bisect_left(uts46data, (code_point, 'Z')) - 1]
- status = uts46row[1]
- replacement = None # type: Optional[str]
- if len(uts46row) == 3:
- replacement = uts46row[2] # type: ignore
- if (status == 'V' or
- (status == 'D' and not transitional) or
- (status == '3' and not std3_rules and replacement is None)):
- output += char
- elif replacement is not None and (status == 'M' or
- (status == '3' and not std3_rules) or
- (status == 'D' and transitional)):
- output += replacement
- elif status != 'I':
- raise IndexError()
- except IndexError:
- raise InvalidCodepoint(
- 'Codepoint {} not allowed at position {} in {}'.format(
- _unot(code_point), pos + 1, repr(domain)))
-
- return unicodedata.normalize('NFC', output)
-
-
-def encode(s: Union[str, bytes, bytearray], strict: bool = False, uts46: bool = False, std3_rules: bool = False, transitional: bool = False) -> bytes:
- if isinstance(s, (bytes, bytearray)):
- try:
- s = s.decode('ascii')
- except UnicodeDecodeError:
- raise IDNAError('should pass a unicode string to the function rather than a byte string.')
- if uts46:
- s = uts46_remap(s, std3_rules, transitional)
- trailing_dot = False
- result = []
- if strict:
- labels = s.split('.')
- else:
- labels = _unicode_dots_re.split(s)
- if not labels or labels == ['']:
- raise IDNAError('Empty domain')
- if labels[-1] == '':
- del labels[-1]
- trailing_dot = True
- for label in labels:
- s = alabel(label)
- if s:
- result.append(s)
- else:
- raise IDNAError('Empty label')
- if trailing_dot:
- result.append(b'')
- s = b'.'.join(result)
- if not valid_string_length(s, trailing_dot):
- raise IDNAError('Domain too long')
- return s
-
-
-def decode(s: Union[str, bytes, bytearray], strict: bool = False, uts46: bool = False, std3_rules: bool = False) -> str:
- try:
- if isinstance(s, (bytes, bytearray)):
- s = s.decode('ascii')
- except UnicodeDecodeError:
- raise IDNAError('Invalid ASCII in A-label')
- if uts46:
- s = uts46_remap(s, std3_rules, False)
- trailing_dot = False
- result = []
- if not strict:
- labels = _unicode_dots_re.split(s)
- else:
- labels = s.split('.')
- if not labels or labels == ['']:
- raise IDNAError('Empty domain')
- if not labels[-1]:
- del labels[-1]
- trailing_dot = True
- for label in labels:
- s = ulabel(label)
- if s:
- result.append(s)
- else:
- raise IDNAError('Empty label')
- if trailing_dot:
- result.append('')
- return '.'.join(result)
diff --git a/spaces/latent-consistency/Real-Time-LCM-ControlNet-Lora-SD1.5/app-txt2imglora.py b/spaces/latent-consistency/Real-Time-LCM-ControlNet-Lora-SD1.5/app-txt2imglora.py
deleted file mode 100644
index c1ce457cab341cdab80447b01dc2223cff98fdfa..0000000000000000000000000000000000000000
--- a/spaces/latent-consistency/Real-Time-LCM-ControlNet-Lora-SD1.5/app-txt2imglora.py
+++ /dev/null
@@ -1,258 +0,0 @@
-import asyncio
-import json
-import logging
-import traceback
-from pydantic import BaseModel
-
-from fastapi import FastAPI, WebSocket, HTTPException, WebSocketDisconnect
-from fastapi.middleware.cors import CORSMiddleware
-from fastapi.responses import (
- StreamingResponse,
- JSONResponse,
- HTMLResponse,
- FileResponse,
-)
-
-from diffusers import DiffusionPipeline, LCMScheduler, AutoencoderTiny
-from compel import Compel
-import torch
-
-try:
- import intel_extension_for_pytorch as ipex
-except:
- pass
-from PIL import Image
-import numpy as np
-import gradio as gr
-import io
-import uuid
-import os
-import time
-import psutil
-
-
-MAX_QUEUE_SIZE = int(os.environ.get("MAX_QUEUE_SIZE", 0))
-TIMEOUT = float(os.environ.get("TIMEOUT", 0))
-SAFETY_CHECKER = os.environ.get("SAFETY_CHECKER", None)
-TORCH_COMPILE = os.environ.get("TORCH_COMPILE", None)
-HF_TOKEN = os.environ.get("HF_TOKEN", None)
-
-WIDTH = 512
-HEIGHT = 512
-
-# check if MPS is available OSX only M1/M2/M3 chips
-mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
-xpu_available = hasattr(torch, "xpu") and torch.xpu.is_available()
-device = torch.device(
- "cuda" if torch.cuda.is_available() else "xpu" if xpu_available else "cpu"
-)
-torch_device = device
-# change to torch.float16 to save GPU memory
-torch_dtype = torch.float
-
-print(f"TIMEOUT: {TIMEOUT}")
-print(f"SAFETY_CHECKER: {SAFETY_CHECKER}")
-print(f"MAX_QUEUE_SIZE: {MAX_QUEUE_SIZE}")
-print(f"device: {device}")
-
-if mps_available:
- device = torch.device("mps")
- torch_device = "cpu"
- torch_dtype = torch.float32
-
-model_id = "wavymulder/Analog-Diffusion"
-lcm_lora_id = "lcm-sd/lcm-sd1.5-lora"
-
-if SAFETY_CHECKER == "True":
- pipe = DiffusionPipeline.from_pretrained(model_id)
-else:
- pipe = DiffusionPipeline.from_pretrained(model_id, safety_checker=None)
-
-
-pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
-pipe.set_progress_bar_config(disable=True)
-pipe.to(device=torch_device, dtype=torch_dtype).to(device)
-pipe.unet.to(memory_format=torch.channels_last)
-
-# check if computer has less than 64GB of RAM using sys or os
-if psutil.virtual_memory().total < 64 * 1024**3:
- pipe.enable_attention_slicing()
-
-if TORCH_COMPILE:
- pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
- pipe.vae = torch.compile(pipe.vae, mode="reduce-overhead", fullgraph=True)
-
- pipe(prompt="warmup", num_inference_steps=1, guidance_scale=8.0)
-
-# Load LCM LoRA
-pipe.load_lora_weights(
- lcm_lora_id,
- weight_name="lcm_sd_lora.safetensors",
- adapter_name="lcm",
- use_auth_token=HF_TOKEN,
-)
-
-compel_proc = Compel(
- tokenizer=pipe.tokenizer,
- text_encoder=pipe.text_encoder,
- truncate_long_prompts=False,
-)
-user_queue_map = {}
-
-
-class InputParams(BaseModel):
- seed: int = 2159232
- prompt: str
- guidance_scale: float = 0.5
- strength: float = 0.5
- steps: int = 4
- lcm_steps: int = 50
- width: int = WIDTH
- height: int = HEIGHT
-
-
-def predict(params: InputParams):
- generator = torch.manual_seed(params.seed)
- prompt_embeds = compel_proc(params.prompt)
- results = pipe(
- prompt_embeds=prompt_embeds,
- generator=generator,
- num_inference_steps=params.steps,
- guidance_scale=params.guidance_scale,
- width=params.width,
- height=params.height,
- # original_inference_steps=params.lcm_steps,
- output_type="pil",
- )
- nsfw_content_detected = (
- results.nsfw_content_detected[0]
- if "nsfw_content_detected" in results
- else False
- )
- if nsfw_content_detected:
- return None
- return results.images[0]
-
-
-app = FastAPI()
-app.add_middleware(
- CORSMiddleware,
- allow_origins=["*"],
- allow_credentials=True,
- allow_methods=["*"],
- allow_headers=["*"],
-)
-
-
-@app.websocket("/ws")
-async def websocket_endpoint(websocket: WebSocket):
- await websocket.accept()
- if MAX_QUEUE_SIZE > 0 and len(user_queue_map) >= MAX_QUEUE_SIZE:
- print("Server is full")
- await websocket.send_json({"status": "error", "message": "Server is full"})
- await websocket.close()
- return
-
- try:
- uid = str(uuid.uuid4())
- print(f"New user connected: {uid}")
- await websocket.send_json(
- {"status": "success", "message": "Connected", "userId": uid}
- )
- user_queue_map[uid] = {
- "queue": asyncio.Queue(),
- }
- await websocket.send_json(
- {"status": "start", "message": "Start Streaming", "userId": uid}
- )
- await handle_websocket_data(websocket, uid)
- except WebSocketDisconnect as e:
- logging.error(f"WebSocket Error: {e}, {uid}")
- traceback.print_exc()
- finally:
- print(f"User disconnected: {uid}")
- queue_value = user_queue_map.pop(uid, None)
- queue = queue_value.get("queue", None)
- if queue:
- while not queue.empty():
- try:
- queue.get_nowait()
- except asyncio.QueueEmpty:
- continue
-
-
-@app.get("/queue_size")
-async def get_queue_size():
- queue_size = len(user_queue_map)
- return JSONResponse({"queue_size": queue_size})
-
-
-@app.get("/stream/{user_id}")
-async def stream(user_id: uuid.UUID):
- uid = str(user_id)
- try:
- user_queue = user_queue_map[uid]
- queue = user_queue["queue"]
-
- async def generate():
- while True:
- params = await queue.get()
- if params is None:
- continue
-
- image = predict(params)
- if image is None:
- continue
- frame_data = io.BytesIO()
- image.save(frame_data, format="JPEG")
- frame_data = frame_data.getvalue()
- if frame_data is not None and len(frame_data) > 0:
- yield b"--frame\r\nContent-Type: image/jpeg\r\n\r\n" + frame_data + b"\r\n"
-
- await asyncio.sleep(1.0 / 120.0)
-
- return StreamingResponse(
- generate(), media_type="multipart/x-mixed-replace;boundary=frame"
- )
- except Exception as e:
- logging.error(f"Streaming Error: {e}, {user_queue_map}")
- traceback.print_exc()
- return HTTPException(status_code=404, detail="User not found")
-
-
-async def handle_websocket_data(websocket: WebSocket, user_id: uuid.UUID):
- uid = str(user_id)
- user_queue = user_queue_map[uid]
- queue = user_queue["queue"]
- if not queue:
- return HTTPException(status_code=404, detail="User not found")
- last_time = time.time()
- try:
- while True:
- params = await websocket.receive_json()
- params = InputParams(**params)
- while not queue.empty():
- try:
- queue.get_nowait()
- except asyncio.QueueEmpty:
- continue
- await queue.put(params)
- if TIMEOUT > 0 and time.time() - last_time > TIMEOUT:
- await websocket.send_json(
- {
- "status": "timeout",
- "message": "Your session has ended",
- "userId": uid,
- }
- )
- await websocket.close()
- return
-
- except Exception as e:
- logging.error(f"Error: {e}")
- traceback.print_exc()
-
-
-@app.get("/", response_class=HTMLResponse)
-async def root():
- return FileResponse("./static/txt2imglora.html")
diff --git a/spaces/latent-consistency/Real-Time-LCM-Text-to-Image-Lora-SD1.5/app-controlnet.py b/spaces/latent-consistency/Real-Time-LCM-Text-to-Image-Lora-SD1.5/app-controlnet.py
deleted file mode 100644
index 5c5534314d2ad069011aba99b7a34a51ec02559d..0000000000000000000000000000000000000000
--- a/spaces/latent-consistency/Real-Time-LCM-Text-to-Image-Lora-SD1.5/app-controlnet.py
+++ /dev/null
@@ -1,322 +0,0 @@
-import asyncio
-import json
-import logging
-import traceback
-from pydantic import BaseModel
-
-from fastapi import FastAPI, WebSocket, HTTPException, WebSocketDisconnect
-from fastapi.middleware.cors import CORSMiddleware
-from fastapi.responses import (
- StreamingResponse,
- JSONResponse,
- HTMLResponse,
- FileResponse,
-)
-
-from diffusers import AutoencoderTiny, ControlNetModel
-from latent_consistency_controlnet import LatentConsistencyModelPipeline_controlnet
-from compel import Compel
-import torch
-
-from canny_gpu import SobelOperator
-
-# from controlnet_aux import OpenposeDetector
-# import cv2
-
-try:
- import intel_extension_for_pytorch as ipex
-except:
- pass
-from PIL import Image
-import numpy as np
-import gradio as gr
-import io
-import uuid
-import os
-import time
-import psutil
-
-
-MAX_QUEUE_SIZE = int(os.environ.get("MAX_QUEUE_SIZE", 0))
-TIMEOUT = float(os.environ.get("TIMEOUT", 0))
-SAFETY_CHECKER = os.environ.get("SAFETY_CHECKER", None)
-TORCH_COMPILE = os.environ.get("TORCH_COMPILE", None)
-WIDTH = 512
-HEIGHT = 512
-# disable tiny autoencoder for better quality speed tradeoff
-USE_TINY_AUTOENCODER = True
-
-# check if MPS is available OSX only M1/M2/M3 chips
-mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
-xpu_available = hasattr(torch, "xpu") and torch.xpu.is_available()
-device = torch.device(
- "cuda" if torch.cuda.is_available() else "xpu" if xpu_available else "cpu"
-)
-
-# change to torch.float16 to save GPU memory
-torch_dtype = torch.float16
-
-print(f"TIMEOUT: {TIMEOUT}")
-print(f"SAFETY_CHECKER: {SAFETY_CHECKER}")
-print(f"MAX_QUEUE_SIZE: {MAX_QUEUE_SIZE}")
-print(f"device: {device}")
-
-if mps_available:
- device = torch.device("mps")
- device = "cpu"
- torch_dtype = torch.float32
-
-controlnet_canny = ControlNetModel.from_pretrained(
- "lllyasviel/control_v11p_sd15_canny", torch_dtype=torch_dtype
-).to(device)
-
-canny_torch = SobelOperator(device=device)
-# controlnet_pose = ControlNetModel.from_pretrained(
-# "lllyasviel/control_v11p_sd15_openpose", torch_dtype=torch_dtype
-# ).to(device)
-# controlnet_depth = ControlNetModel.from_pretrained(
-# "lllyasviel/control_v11f1p_sd15_depth", torch_dtype=torch_dtype
-# ).to(device)
-
-
-# pose_processor = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
-
-if SAFETY_CHECKER == "True":
- pipe = LatentConsistencyModelPipeline_controlnet.from_pretrained(
- "SimianLuo/LCM_Dreamshaper_v7",
- controlnet=controlnet_canny,
- scheduler=None,
- )
-else:
- pipe = LatentConsistencyModelPipeline_controlnet.from_pretrained(
- "SimianLuo/LCM_Dreamshaper_v7",
- safety_checker=None,
- controlnet=controlnet_canny,
- scheduler=None,
- )
-
-if USE_TINY_AUTOENCODER:
- pipe.vae = AutoencoderTiny.from_pretrained(
- "madebyollin/taesd", torch_dtype=torch_dtype, use_safetensors=True
- )
-pipe.set_progress_bar_config(disable=True)
-pipe.to(device=device, dtype=torch_dtype).to(device)
-pipe.unet.to(memory_format=torch.channels_last)
-
-if psutil.virtual_memory().total < 64 * 1024**3:
- pipe.enable_attention_slicing()
-
-compel_proc = Compel(
- tokenizer=pipe.tokenizer,
- text_encoder=pipe.text_encoder,
- truncate_long_prompts=False,
-)
-if TORCH_COMPILE:
- pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
- pipe.vae = torch.compile(pipe.vae, mode="reduce-overhead", fullgraph=True)
-
- pipe(
- prompt="warmup",
- image=[Image.new("RGB", (768, 768))],
- control_image=[Image.new("RGB", (768, 768))],
- )
-
-
-user_queue_map = {}
-
-
-class InputParams(BaseModel):
- seed: int = 2159232
- prompt: str
- guidance_scale: float = 8.0
- strength: float = 0.5
- steps: int = 4
- lcm_steps: int = 50
- width: int = WIDTH
- height: int = HEIGHT
- controlnet_scale: float = 0.8
- controlnet_start: float = 0.0
- controlnet_end: float = 1.0
- canny_low_threshold: float = 0.31
- canny_high_threshold: float = 0.78
- debug_canny: bool = False
-
-
-def predict(
- input_image: Image.Image, params: InputParams, prompt_embeds: torch.Tensor = None
-):
- generator = torch.manual_seed(params.seed)
-
- control_image = canny_torch(
- input_image, params.canny_low_threshold, params.canny_high_threshold
- )
- results = pipe(
- control_image=control_image,
- prompt_embeds=prompt_embeds,
- generator=generator,
- image=input_image,
- strength=params.strength,
- num_inference_steps=params.steps,
- guidance_scale=params.guidance_scale,
- width=params.width,
- height=params.height,
- lcm_origin_steps=params.lcm_steps,
- output_type="pil",
- controlnet_conditioning_scale=params.controlnet_scale,
- control_guidance_start=params.controlnet_start,
- control_guidance_end=params.controlnet_end,
- )
- nsfw_content_detected = (
- results.nsfw_content_detected[0]
- if "nsfw_content_detected" in results
- else False
- )
- if nsfw_content_detected:
- return None
- result_image = results.images[0]
- if params.debug_canny:
- # paste control_image on top of result_image
- w0, h0 = (200, 200)
- control_image = control_image.resize((w0, h0))
- w1, h1 = result_image.size
- result_image.paste(control_image, (w1 - w0, h1 - h0))
-
- return result_image
-
-
-app = FastAPI()
-app.add_middleware(
- CORSMiddleware,
- allow_origins=["*"],
- allow_credentials=True,
- allow_methods=["*"],
- allow_headers=["*"],
-)
-
-
-@app.websocket("/ws")
-async def websocket_endpoint(websocket: WebSocket):
- await websocket.accept()
- if MAX_QUEUE_SIZE > 0 and len(user_queue_map) >= MAX_QUEUE_SIZE:
- print("Server is full")
- await websocket.send_json({"status": "error", "message": "Server is full"})
- await websocket.close()
- return
-
- try:
- uid = str(uuid.uuid4())
- print(f"New user connected: {uid}")
- await websocket.send_json(
- {"status": "success", "message": "Connected", "userId": uid}
- )
- user_queue_map[uid] = {"queue": asyncio.Queue()}
- await websocket.send_json(
- {"status": "start", "message": "Start Streaming", "userId": uid}
- )
- await handle_websocket_data(websocket, uid)
- except WebSocketDisconnect as e:
- logging.error(f"WebSocket Error: {e}, {uid}")
- traceback.print_exc()
- finally:
- print(f"User disconnected: {uid}")
- queue_value = user_queue_map.pop(uid, None)
- queue = queue_value.get("queue", None)
- if queue:
- while not queue.empty():
- try:
- queue.get_nowait()
- except asyncio.QueueEmpty:
- continue
-
-
-@app.get("/queue_size")
-async def get_queue_size():
- queue_size = len(user_queue_map)
- return JSONResponse({"queue_size": queue_size})
-
-
-@app.get("/stream/{user_id}")
-async def stream(user_id: uuid.UUID):
- uid = str(user_id)
- try:
- user_queue = user_queue_map[uid]
- queue = user_queue["queue"]
-
- async def generate():
- last_prompt: str = None
- prompt_embeds: torch.Tensor = None
- while True:
- data = await queue.get()
- input_image = data["image"]
- params = data["params"]
- if input_image is None:
- continue
- # avoid recalculate prompt embeds
- if last_prompt != params.prompt:
- print("new prompt")
- prompt_embeds = compel_proc(params.prompt)
- last_prompt = params.prompt
-
- image = predict(
- input_image,
- params,
- prompt_embeds,
- )
- if image is None:
- continue
- frame_data = io.BytesIO()
- image.save(frame_data, format="JPEG")
- frame_data = frame_data.getvalue()
- if frame_data is not None and len(frame_data) > 0:
- yield b"--frame\r\nContent-Type: image/jpeg\r\n\r\n" + frame_data + b"\r\n"
-
- await asyncio.sleep(1.0 / 120.0)
-
- return StreamingResponse(
- generate(), media_type="multipart/x-mixed-replace;boundary=frame"
- )
- except Exception as e:
- logging.error(f"Streaming Error: {e}, {user_queue_map}")
- traceback.print_exc()
- return HTTPException(status_code=404, detail="User not found")
-
-
-async def handle_websocket_data(websocket: WebSocket, user_id: uuid.UUID):
- uid = str(user_id)
- user_queue = user_queue_map[uid]
- queue = user_queue["queue"]
- if not queue:
- return HTTPException(status_code=404, detail="User not found")
- last_time = time.time()
- try:
- while True:
- data = await websocket.receive_bytes()
- params = await websocket.receive_json()
- params = InputParams(**params)
- pil_image = Image.open(io.BytesIO(data))
-
- while not queue.empty():
- try:
- queue.get_nowait()
- except asyncio.QueueEmpty:
- continue
- await queue.put({"image": pil_image, "params": params})
- if TIMEOUT > 0 and time.time() - last_time > TIMEOUT:
- await websocket.send_json(
- {
- "status": "timeout",
- "message": "Your session has ended",
- "userId": uid,
- }
- )
- await websocket.close()
- return
-
- except Exception as e:
- logging.error(f"Error: {e}")
- traceback.print_exc()
-
-
-@app.get("/", response_class=HTMLResponse)
-async def root():
- return FileResponse("./static/controlnet.html")
diff --git a/spaces/lavanjv/falcon-mini/README.md b/spaces/lavanjv/falcon-mini/README.md
deleted file mode 100644
index 2297acf53f8c047a018a45c3b722219f18306626..0000000000000000000000000000000000000000
--- a/spaces/lavanjv/falcon-mini/README.md
+++ /dev/null
@@ -1,16 +0,0 @@
----
-title: falcon-mini
-emoji: 🦅💸
-colorFrom: red
-colorTo: blue
-sdk: docker
-pinned: false
-app_port: 7860
-license: apache-2.0
-duplicated_from: matthoffner/falcon-mini
----
-
-# falcon-7b-instruct
-
-## ggllm.cpp
-## ctransformers
\ No newline at end of file
diff --git a/spaces/leogabraneth/text-generation-webui-main/repositories/exllama/example_batch.py b/spaces/leogabraneth/text-generation-webui-main/repositories/exllama/example_batch.py
deleted file mode 100644
index 669862676eee7edf4e4a0b5a4d5320e3b1442679..0000000000000000000000000000000000000000
--- a/spaces/leogabraneth/text-generation-webui-main/repositories/exllama/example_batch.py
+++ /dev/null
@@ -1,56 +0,0 @@
-from model import ExLlama, ExLlamaCache, ExLlamaConfig
-from tokenizer import ExLlamaTokenizer
-from generator import ExLlamaGenerator
-import os, glob
-
-# Directory containing model, tokenizer, generator
-
-model_directory = "/mnt/str/models/llama-13b-4bit-128g/"
-
-# Locate files we need within that directory
-
-tokenizer_path = os.path.join(model_directory, "tokenizer.model")
-model_config_path = os.path.join(model_directory, "config.json")
-st_pattern = os.path.join(model_directory, "*.safetensors")
-model_path = glob.glob(st_pattern)
-
-# Batched prompts
-
-prompts = [
- "Once upon a time,",
- "I don't like to",
- "A turbo encabulator is a",
- "In the words of Mark Twain,"
-]
-
-# Create config, model, tokenizer and generator
-
-config = ExLlamaConfig(model_config_path) # create config from config.json
-config.model_path = model_path # supply path to model weights file
-
-model = ExLlama(config) # create ExLlama instance and load the weights
-tokenizer = ExLlamaTokenizer(tokenizer_path) # create tokenizer from tokenizer model file
-
-cache = ExLlamaCache(model, batch_size = len(prompts)) # create cache for inference
-generator = ExLlamaGenerator(model, tokenizer, cache) # create generator
-
-# Configure generator
-
-generator.disallow_tokens([tokenizer.eos_token_id])
-
-generator.settings.token_repetition_penalty_max = 1.2
-generator.settings.temperature = 0.95
-generator.settings.top_p = 0.65
-generator.settings.top_k = 100
-generator.settings.typical = 0.5
-
-# Generate, batched
-
-for line in prompts:
- print(line)
-
-output = generator.generate_simple(prompts, max_new_tokens = 200)
-
-for line in output:
- print("---")
- print(line)
diff --git a/spaces/lewiswu1209/MockingBird/pre.py b/spaces/lewiswu1209/MockingBird/pre.py
deleted file mode 100644
index 17fd0f710153bfb71b717678998a853e364c8cd8..0000000000000000000000000000000000000000
--- a/spaces/lewiswu1209/MockingBird/pre.py
+++ /dev/null
@@ -1,76 +0,0 @@
-from synthesizer.preprocess import create_embeddings
-from utils.argutils import print_args
-from pathlib import Path
-import argparse
-
-from synthesizer.preprocess import preprocess_dataset
-from synthesizer.hparams import hparams
-from utils.argutils import print_args
-from pathlib import Path
-import argparse
-
-recognized_datasets = [
- "aidatatang_200zh",
- "magicdata",
- "aishell3",
- "data_aishell"
-]
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser(
- description="Preprocesses audio files from datasets, encodes them as mel spectrograms "
- "and writes them to the disk. Audio files are also saved, to be used by the "
- "vocoder for training.",
- formatter_class=argparse.ArgumentDefaultsHelpFormatter
- )
- parser.add_argument("datasets_root", type=Path, help=\
- "Path to the directory containing your datasets.")
- parser.add_argument("-o", "--out_dir", type=Path, default=argparse.SUPPRESS, help=\
- "Path to the output directory that will contain the mel spectrograms, the audios and the "
- "embeds. Defaults to /SV2TTS/synthesizer/")
- parser.add_argument("-n", "--n_processes", type=int, default=1, help=\
- "Number of processes in parallel.")
- parser.add_argument("-s", "--skip_existing", action="store_true", help=\
- "Whether to overwrite existing files with the same name. Useful if the preprocessing was "
- "interrupted. ")
- parser.add_argument("--hparams", type=str, default="", help=\
- "Hyperparameter overrides as a comma-separated list of name-value pairs")
- parser.add_argument("--no_trim", action="store_true", help=\
- "Preprocess audio without trimming silences (not recommended).")
- parser.add_argument("--no_alignments", action="store_true", help=\
- "Use this option when dataset does not include alignments\
- (these are used to split long audio files into sub-utterances.)")
- parser.add_argument("-d", "--dataset", type=str, default="aidatatang_200zh", help=\
- "Name of the dataset to process, allowing values: magicdata, aidatatang_200zh, aishell3, data_aishell.")
- parser.add_argument("-e", "--encoder_model_fpath", type=Path, default="encoder/saved_models/pretrained.pt", help=\
- "Path your trained encoder model.")
- parser.add_argument("-ne", "--n_processes_embed", type=int, default=1, help=\
- "Number of processes in parallel.An encoder is created for each, so you may need to lower "
- "this value on GPUs with low memory. Set it to 1 if CUDA is unhappy")
- args = parser.parse_args()
-
- # Process the arguments
- if not hasattr(args, "out_dir"):
- args.out_dir = args.datasets_root.joinpath("SV2TTS", "synthesizer")
- assert args.dataset in recognized_datasets, 'is not supported, please vote for it in https://github.com/babysor/MockingBird/issues/10'
- # Create directories
- assert args.datasets_root.exists()
- args.out_dir.mkdir(exist_ok=True, parents=True)
-
- # Verify webrtcvad is available
- if not args.no_trim:
- try:
- import webrtcvad
- except:
- raise ModuleNotFoundError("Package 'webrtcvad' not found. This package enables "
- "noise removal and is recommended. Please install and try again. If installation fails, "
- "use --no_trim to disable this error message.")
- encoder_model_fpath = args.encoder_model_fpath
- del args.no_trim, args.encoder_model_fpath
-
- args.hparams = hparams.parse(args.hparams)
- n_processes_embed = args.n_processes_embed
- del args.n_processes_embed
- preprocess_dataset(**vars(args))
-
- create_embeddings(synthesizer_root=args.out_dir, n_processes=n_processes_embed, encoder_model_fpath=encoder_model_fpath)
diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Imminent Monitor Cracked By Alcatrazl PORTABLE.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Imminent Monitor Cracked By Alcatrazl PORTABLE.md
deleted file mode 100644
index e5133d6350fe972b9cb26d20667f59c61a06f461..0000000000000000000000000000000000000000
--- a/spaces/lincquiQcaudo/Top-20-Diffusion/Imminent Monitor Cracked By Alcatrazl PORTABLE.md
+++ /dev/null
@@ -1,74 +0,0 @@
-## Imminent Monitor Cracked By Alcatrazl
-
-
-
-
-
- 
-
-
-
-
-
-**Download ✔ [https://fienislile.blogspot.com/?download=2tyE7H](https://fienislile.blogspot.com/?download=2tyE7H)**
-
-
-
-
-
-
-
-
-
-
-
-
-
-# Imminent Monitor Cracked By Alcatrazl: What You Need to Know
-
-
-
-Imminent Monitor is a popular remote access tool (RAT) that allows hackers to take control of other computers and spy on their activities. It has been used for various malicious purposes, such as stealing personal information, installing ransomware, and launching distributed denial-of-service (DDoS) attacks.
-
-
-
-However, a recent development has exposed the users of Imminent Monitor to a major risk. A hacker known as Alcatrazl has claimed to have cracked the encryption of the RAT and released a tool that can decrypt the configuration files of Imminent Monitor clients. This means that anyone who uses the tool can access the IP addresses, usernames, passwords, and other sensitive data of the hackers who use Imminent Monitor.
-
-
-
-This is a huge blow to the cybercriminal community, as it exposes their identities and locations to law enforcement agencies and potential victims. It also opens the door for other hackers to hijack their infected machines and use them for their own purposes. Alcatrazl has stated that his motivation for cracking Imminent Monitor was to stop its abuse and make the internet a safer place.
-
-
-
-If you are a user of Imminent Monitor, you should immediately stop using it and delete all traces of it from your system. You should also change your passwords and scan your computer for any malware or backdoors that may have been installed by Imminent Monitor or other hackers. You should also be prepared for possible legal consequences if your activities have been detected by authorities.
-
-
-
-If you are a victim of Imminent Monitor, you should also scan your computer for any malware or backdoors that may have been installed by the RAT. You should also check your online accounts and credit reports for any signs of identity theft or fraud. You should also report any suspicious activity to your local police or cybercrime unit.
-
-
-
-Imminent Monitor Cracked By Alcatrazl is a major event in the cybercrime world that has exposed thousands of hackers and their victims. It is a reminder that no tool or software is completely secure and that anyone who engages in illegal or unethical activities online can be caught and held accountable.
-
-
-
-How did Alcatrazl crack Imminent Monitor?
-
-
-
-According to Alcatrazl, he was able to crack Imminent Monitor by exploiting a vulnerability in its encryption algorithm. He said that he discovered that the RAT used a fixed key to encrypt its configuration files, which made them easy to decrypt. He also said that he reverse-engineered the RAT and found the key in its code.
-
-
-
-He then created a tool that can automatically decrypt any Imminent Monitor configuration file and extract its contents. He shared the tool on various hacking forums and websites, along with a tutorial on how to use it. He also uploaded a sample of decrypted configuration files that contained the data of over 10,000 Imminent Monitor users.
-
-
-
-He said that he did not intend to harm anyone with his tool, but rather to expose the dangers of using Imminent Monitor and other RATs. He also said that he hoped that his tool would deter other hackers from using Imminent Monitor and encourage them to use more secure and ethical tools.
-
- 145887f19f
-
-
-
-
-
diff --git a/spaces/lingbionlp/PhenoTagger_v1.2_Demo/src/app.py b/spaces/lingbionlp/PhenoTagger_v1.2_Demo/src/app.py
deleted file mode 100644
index 6d0d1ceb74478fa5ad3b2960a7a60fd976de1429..0000000000000000000000000000000000000000
--- a/spaces/lingbionlp/PhenoTagger_v1.2_Demo/src/app.py
+++ /dev/null
@@ -1,268 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-Created on Mon Nov 21 16:21:25 2022
-
-@author: luol2
-"""
-
-import streamlit as st
-from src.nn_model import bioTag_CNN,bioTag_BERT
-from src.dic_ner import dic_ont
-from src.tagging_text import bioTag
-import os
-import json
-from pandas import DataFrame
-import nltk
-nltk.download('punkt')
-nltk.download('averaged_perceptron_tagger')
-nltk.download('wordnet')
-
-st.set_page_config(
- page_title="PhenoTagger_v1.2",
- page_icon="🎈",
- layout="wide",
- menu_items={
- 'Get Help': 'https://www.ncbi.nlm.nih.gov/research/bionlp/',
- 'About': "PhenoTagger v1.2"
- }
-)
-
-
-# def _max_width_():
-# max_width_str = f"max-width: 2400px;"
-# st.markdown(
-# f"""
-#
-# """,
-# unsafe_allow_html=True,
-# )
-
-
-# _max_width_()
-
-# c30, c31, c32 = st.columns([2.5, 1, 3])
-
-# with c30:
-# # st.image("logo.png", width=400)
-st.title("👨⚕️ PhenoTagger_v1.2 Demo")
-
-with st.expander("ℹ️ - About this app", expanded=True):
-
- st.write(
- """
-- This app is an easy-to-use interface built in Streamlit for [PhenoTagger](https://github.com/ncbi-nlp/PhenoTagger) library!
-- PhenoTagger is a hybrid method that combines dictionary and deep learning-based methods to recognize Human Phenotype Ontology (HPO) concepts in unstructured biomedical text. Please refer to [our paper](https://doi.org/10.1093/bioinformatics/btab019) for more details.
-- Contact: [NLM/NCBI BioNLP Research Group](https://www.ncbi.nlm.nih.gov/research/bionlp/)
- """
- )
-
- st.markdown("")
-
-st.markdown("")
-st.markdown("## 📌 Paste document ")
-with st.form(key="my_form"):
-
-
- ce, c1, ce, c2, c3 = st.columns([0.07, 1, 0.07, 4, 0.07])
- with c1:
- ModelType = st.radio(
- "Choose your Ontology",
- ["HPO(Default)", "UBERON"],
- #help="Bioformer is more precise, CNN is more efficient",
- )
-
- if ModelType == "HPO(Default)":
- # kw_model = KeyBERT(model=roberta)
-
- @st.cache(allow_output_mutation=True)
- def load_model():
- ontfiles={'dic_file':'./dict_hpo/noabb_lemma.dic',
- 'word_id_file':'./dict_hpo/word_id_map.json',
- 'id_word_file':'./dict_hpo/id_word_map.json'}
-
-
- vocabfiles={'labelfile':'./dict_hpo/lable.vocab',
- 'checkpoint_path':'./models_v1.2/bioformer-cased-v1.0/',
- 'lowercase':False}
- modelfile='./models_v1.2/bioformer-HPO.h5'
-
-
- biotag_dic=dic_ont(ontfiles)
-
- nn_model=bioTag_BERT(vocabfiles)
- nn_model.load_model(modelfile)
- return nn_model,biotag_dic
-
- nn_model,biotag_dic = load_model()
-
- else:
- @st.cache(allow_output_mutation=True)
- def load_model():
- ontfiles={'dic_file':'./dict_uberon/noabb_lemma.dic',
- 'word_id_file':'./dict_uberon/word_id_map.json',
- 'id_word_file':'./dict_uberon/id_word_map.json'}
-
- vocabfiles={'labelfile':'./dict_uberon/lable.vocab',
- 'checkpoint_path':'./models_v1.2/bioformer-cased-v1.0/',
- 'lowercase':False}
-
- modelfile='./models_v1.2/bioformer-UBERON.h5'
-
- biotag_dic=dic_ont(ontfiles)
-
- nn_model=bioTag_CNN(vocabfiles)
- nn_model.load_model(modelfile)
-
- return nn_model,biotag_dic
-
- nn_model,biotag_dic = load_model()
-
- para_overlap = st.checkbox(
- "Overlap concept",
- value=False,
- help="Tick this box to identify overlapping concepts",
- )
- para_abbr = st.checkbox(
- "Abbreviaitons",
- value=True,
- help="Tick this box to identify abbreviations",
- )
-
- para_threshold = st.slider(
- "Threshold",
- min_value=0.5,
- max_value=1.0,
- value=0.95,
- step=0.05,
- help="Retrun the preditions which socre over the threshold.",
- )
-
-
-
-
- with c2:
-
-
- doc = st.text_area(
- "Paste your text below",
- value = 'The clinical features of Angelman syndrome (AS) comprise severe mental retardation, postnatal microcephaly, macrostomia and prognathia, absence of speech, ataxia, and a happy disposition. We report on seven patients who lack most of these features, but presented with obesity, muscular hypotonia and mild mental retardation. Based on the latter findings, the patients were initially suspected of having Prader-Willi syndrome. DNA methylation analysis of SNRPN and D15S63, however, revealed an AS pattern, ie the maternal band was faint or absent. Cytogenetic studies and microsatellite analysis demonstrated apparently normal chromosomes 15 of biparental inheritance. We conclude that these patients have an imprinting defect and a previously unrecognised form of AS. The mild phenotype may be explained by an incomplete imprinting defect or by cellular mosaicism.',
- height=400,
- )
-
-
-
-
- # MAX_WORDS = 500
- # import re
- # res = len(re.findall(r"\w+", doc))
- # if res > MAX_WORDS:
- # st.warning(
- # "⚠️ Your text contains "
- # + str(res)
- # + " words."
- # + " Only the first 500 words will be reviewed. Stay tuned as increased allowance is coming! 😊"
- # )
-
- # doc = doc[:MAX_WORDS]
-
- submit_button = st.form_submit_button(label="✨ Submit!")
-
-
-if not submit_button:
- st.stop()
-
-#st.write(para_overlap,para_abbr,para_threshold)
-para_set={
- #model_type':para_model, # cnn or bioformer
- 'onlyLongest': not para_overlap, # False: return overlap concepts, True only longgest
- 'abbrRecog':para_abbr,# False: don't identify abbr, True: identify abbr
- 'ML_Threshold':para_threshold,# the Threshold of deep learning model
- }
-st.markdown("")
-st.markdown("## 💡 Tagging results:")
-with st.spinner('Wait for tagging...'):
- tag_result=bioTag(doc,biotag_dic,nn_model,onlyLongest=para_set['onlyLongest'], abbrRecog=para_set['abbrRecog'],Threshold=para_set['ML_Threshold'])
-
-st.markdown('Move the mouse🖱️ over the entity to display the HPO id.', unsafe_allow_html=True)
-# print('dic...........:',biotag_dic.keys())
-# st.write('parameters:', para_overlap,para_abbr,para_threshold)
-
-html_results=''
-text_results=doc+'\n'
-entity_end=0
-hpoid_count={}
-if len(tag_result)>=0:
- for ele in tag_result:
- entity_start=int(ele[0])
- html_results+=doc[entity_end:entity_start]
- entity_end=int(ele[1])
- entity_id=ele[2]
- entity_score=ele[3]
- text_results+=ele[0]+'\t'+ele[1]+'\t'+doc[entity_start:entity_end]+'\t'+ele[2]+'\t'+format(float(ele[3]),'.2f')+'\n'
- if entity_id not in hpoid_count.keys():
- hpoid_count[entity_id]=1
- else:
- hpoid_count[entity_id]+=1
-
- html_results+=''+doc[entity_start:entity_end]+''
- html_results+=doc[entity_end:]
-
-else:
- html_results=doc
-
-st.markdown('
'+html_results+'
', unsafe_allow_html=True)
-
-
-#table
-data_entity=[]
-for ele in hpoid_count.keys():
- segs=ele.split(';')
- term_name=''
- for seg in segs:
- term_name+=biotag_dic.id_word[seg][0]+';'
- temp=[ele,term_name,hpoid_count[ele]] #hpoid, term name, count
- data_entity.append(temp)
-
-
-st.markdown("")
-st.markdown("")
-# st.markdown("## Table output:")
-
-# cs, c1, c2, c3, cLast = st.columns([2, 1.5, 1.5, 1.5, 2])
-
-# with c1:
-# CSVButton2 = download_button(keywords, "Data.csv", "📥 Download (.csv)")
-# with c2:
-# CSVButton2 = download_button(keywords, "Data.txt", "📥 Download (.txt)")
-# with c3:
-# CSVButton2 = download_button(keywords, "Data.json", "📥 Download (.json)")
-
-# st.header("")
-
-df = (
- DataFrame(data_entity, columns=["Ontology_id", "Term name","Frequency"])
- .sort_values(by="Frequency", ascending=False)
- .reset_index(drop=True)
-)
-
-df.index += 1
-
-c1, c2, c3 = st.columns([1, 4, 1])
-
-# format_dictionary = {
-# "Relevancy": "{:.1%}",
-# }
-
-# df = df.format(format_dictionary)
-
-with c2:
- st.table(df)
-
-c1, c2, c3 = st.columns([1, 1, 1])
-with c2:
- st.download_button('Download annotations', text_results)
-
diff --git a/spaces/lithiumice/SadTalker/src/face3d/models/arcface_torch/utils/utils_amp.py b/spaces/lithiumice/SadTalker/src/face3d/models/arcface_torch/utils/utils_amp.py
deleted file mode 100644
index 9ac2a03f4212faa129faed447a8f4519c0a00a8b..0000000000000000000000000000000000000000
--- a/spaces/lithiumice/SadTalker/src/face3d/models/arcface_torch/utils/utils_amp.py
+++ /dev/null
@@ -1,88 +0,0 @@
-from typing import Dict, List
-
-import torch
-
-if torch.__version__ < '1.9':
- Iterable = torch._six.container_abcs.Iterable
-else:
- import collections
-
- Iterable = collections.abc.Iterable
-from torch.cuda.amp import GradScaler
-
-
-class _MultiDeviceReplicator(object):
- """
- Lazily serves copies of a tensor to requested devices. Copies are cached per-device.
- """
-
- def __init__(self, master_tensor: torch.Tensor) -> None:
- assert master_tensor.is_cuda
- self.master = master_tensor
- self._per_device_tensors: Dict[torch.device, torch.Tensor] = {}
-
- def get(self, device) -> torch.Tensor:
- retval = self._per_device_tensors.get(device, None)
- if retval is None:
- retval = self.master.to(device=device, non_blocking=True, copy=True)
- self._per_device_tensors[device] = retval
- return retval
-
-
-class MaxClipGradScaler(GradScaler):
- def __init__(self, init_scale, max_scale: float, growth_interval=100):
- GradScaler.__init__(self, init_scale=init_scale, growth_interval=growth_interval)
- self.max_scale = max_scale
-
- def scale_clip(self):
- if self.get_scale() == self.max_scale:
- self.set_growth_factor(1)
- elif self.get_scale() < self.max_scale:
- self.set_growth_factor(2)
- elif self.get_scale() > self.max_scale:
- self._scale.fill_(self.max_scale)
- self.set_growth_factor(1)
-
- def scale(self, outputs):
- """
- Multiplies ('scales') a tensor or list of tensors by the scale factor.
-
- Returns scaled outputs. If this instance of :class:`GradScaler` is not enabled, outputs are returned
- unmodified.
-
- Arguments:
- outputs (Tensor or iterable of Tensors): Outputs to scale.
- """
- if not self._enabled:
- return outputs
- self.scale_clip()
- # Short-circuit for the common case.
- if isinstance(outputs, torch.Tensor):
- assert outputs.is_cuda
- if self._scale is None:
- self._lazy_init_scale_growth_tracker(outputs.device)
- assert self._scale is not None
- return outputs * self._scale.to(device=outputs.device, non_blocking=True)
-
- # Invoke the more complex machinery only if we're treating multiple outputs.
- stash: List[_MultiDeviceReplicator] = [] # holds a reference that can be overwritten by apply_scale
-
- def apply_scale(val):
- if isinstance(val, torch.Tensor):
- assert val.is_cuda
- if len(stash) == 0:
- if self._scale is None:
- self._lazy_init_scale_growth_tracker(val.device)
- assert self._scale is not None
- stash.append(_MultiDeviceReplicator(self._scale))
- return val * stash[0].get(val.device)
- elif isinstance(val, Iterable):
- iterable = map(apply_scale, val)
- if isinstance(val, list) or isinstance(val, tuple):
- return type(val)(iterable)
- else:
- return iterable
- else:
- raise ValueError("outputs must be a Tensor or an iterable of Tensors")
-
- return apply_scale(outputs)
diff --git a/spaces/ljjggr/bingo/src/components/header.tsx b/spaces/ljjggr/bingo/src/components/header.tsx
deleted file mode 100644
index dc298b722154d1ac6d7a7e148204605562d6cc58..0000000000000000000000000000000000000000
--- a/spaces/ljjggr/bingo/src/components/header.tsx
+++ /dev/null
@@ -1,12 +0,0 @@
-import * as React from 'react'
-import { UserMenu } from './user-menu'
-
-export async function Header() {
- return (
-
-
-
-
-
- )
-}
diff --git a/spaces/ljsabc/Fujisaki/app.py b/spaces/ljsabc/Fujisaki/app.py
deleted file mode 100644
index 15957ef2416c87270454e763db744d0ddb60f620..0000000000000000000000000000000000000000
--- a/spaces/ljsabc/Fujisaki/app.py
+++ /dev/null
@@ -1,118 +0,0 @@
-import torch
-
-from transformers import AutoTokenizer, GenerationConfig, AutoModel
-
-model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True, revision="658202d").float()
-setattr(model, "lm_head_raw", model.lm_head)
-tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True, revision="658202d")
-
-from peft import PeftModel
-peft_path = 'ljsabc/Fujisaki_GLM' # change it to your own
-model = PeftModel.from_pretrained(
- model,
- peft_path,
- torch_dtype=torch.float,
- )
-
-# dump a log to ensure everything works well
-print(model.peft_config)
-# We have to use full precision, as some tokens are >65535
-model.eval()
-print(model)
-
-torch.set_default_tensor_type(torch.FloatTensor)
-def evaluate(context, temperature, top_p, top_k):
- generation_config = GenerationConfig(
- temperature=temperature,
- top_p=top_p,
- top_k=top_k,
- #repetition_penalty=1.1,
- num_beams=1,
- do_sample=True,
- )
- with torch.no_grad():
- input_text = f"Context: {context}Answer: "
- ids = tokenizer.encode(input_text)
- input_ids = torch.LongTensor([ids]).to('cpu')
- out = model.generate(
- input_ids=input_ids,
- max_length=160,
- generation_config=generation_config
- )
- out_text = tokenizer.decode(out[0]).split("Answer: ")[1]
- return out_text
-
-def evaluate_stream(msg, history, temperature, top_p):
- generation_config = GenerationConfig(
- temperature=temperature,
- top_p=top_p,
- #repetition_penalty=1.1,
- num_beams=1,
- do_sample=True,
- )
-
- history.append([msg, None])
-
- context = ""
- if len(history) > 4:
- history.pop(0)
-
- for j in range(len(history)):
- history[j][0] = history[j][0].replace(" ", "")
-
- # concatenate context
- for h in history[:-1]:
- context += h[0] + "||" + h[1] + "||"
-
- context += history[-1][0]
- context = context.replace(r' ', '')
-
- # TODO: Avoid the tokens are too long.
- CUTOFF = 224
- while len(tokenizer.encode(context)) > CUTOFF:
- # save 15 token size for the answer
- context = context[15:]
-
- h = []
- print("History:", history)
- print("Context:", context)
- for response, h in model.stream_chat(tokenizer, context, h, max_length=CUTOFF, top_p=top_p, temperature=temperature):
- history[-1][1] = response
- yield history, ""
-
- #return response
-
-import gradio as gr
-
-title = """
-
-
-
-
- )
-}
diff --git a/spaces/ma-xu/LIVE/thrust/thrust/sequence.h b/spaces/ma-xu/LIVE/thrust/thrust/sequence.h
deleted file mode 100644
index e92391f64e1fd7d4fd82e08b662b45d285b45fa8..0000000000000000000000000000000000000000
--- a/spaces/ma-xu/LIVE/thrust/thrust/sequence.h
+++ /dev/null
@@ -1,296 +0,0 @@
-/*
- * Copyright 2008-2013 NVIDIA Corporation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-/*! \file sequence.h
- * \brief Fills a range with a sequence of numbers
- */
-
-#pragma once
-
-#include
-#include
-
-namespace thrust
-{
-
-
-/*! \addtogroup transformations
- * \{
- */
-
-
-/*! \p sequence fills the range [first, last) with a sequence of numbers.
- *
- * For each iterator \c i in the range [first, last), this version of
- * \p sequence performs the assignment *i = (i - first).
- *
- * The algorithm's execution is parallelized as determined by \p exec.
- *
- * \param exec The execution policy to use for parallelization.
- * \param first The beginning of the sequence.
- * \param last The end of the sequence.
- *
- * \tparam DerivedPolicy The name of the derived execution policy.
- * \tparam ForwardIterator is a model of Forward Iterator,
- * and \p ForwardIterator is mutable,
- * and if \c x and \c y are objects of \c ForwardIterator's \c value_type, then x + y is defined,
- * and if \c T is \p ForwardIterator's \c value_type, then T(0) is defined.
- *
- * The following code snippet demonstrates how to use \p sequence to fill a range
- * with a sequence of numbers using the \p thrust::host execution policy for parallelization:
- *
- * \code
- * #include
- * #include
- * ...
- * const int N = 10;
- * int A[N];
- * thrust::sequence(thrust::host, A, A + 10);
- * // A is now {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
- * \endcode
- *
- * \note Unlike the similar C++ STL function \c std::iota, \p sequence offers no
- * guarantee on order of execution.
- *
- * \see http://www.sgi.com/tech/stl/iota.html
- */
-template
-__host__ __device__
- void sequence(const thrust::detail::execution_policy_base &exec,
- ForwardIterator first,
- ForwardIterator last);
-
-
-/*! \p sequence fills the range [first, last) with a sequence of numbers.
- *
- * For each iterator \c i in the range [first, last), this version of
- * \p sequence performs the assignment *i = (i - first).
- *
- * \param first The beginning of the sequence.
- * \param last The end of the sequence.
- *
- * \tparam ForwardIterator is a model of Forward Iterator,
- * and \p ForwardIterator is mutable,
- * and if \c x and \c y are objects of \c ForwardIterator's \c value_type, then x + y is defined,
- * and if \c T is \p ForwardIterator's \c value_type, then T(0) is defined.
- *
- * The following code snippet demonstrates how to use \p sequence to fill a range
- * with a sequence of numbers.
- *
- * \code
- * #include
- * ...
- * const int N = 10;
- * int A[N];
- * thrust::sequence(A, A + 10);
- * // A is now {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
- * \endcode
- *
- * \note Unlike the similar C++ STL function \c std::iota, \p sequence offers no
- * guarantee on order of execution.
- *
- * \see http://www.sgi.com/tech/stl/iota.html
- */
-template
- void sequence(ForwardIterator first,
- ForwardIterator last);
-
-
-/*! \p sequence fills the range [first, last) with a sequence of numbers.
- *
- * For each iterator \c i in the range [first, last), this version of
- * \p sequence performs the assignment *i = init + (i - first).
- *
- * The algorithm's execution is parallelized as determined by \p exec.
- *
- * \param exec The execution policy to use for parallelization.
- * \param first The beginning of the sequence.
- * \param last The end of the sequence.
- * \param init The first value of the sequence of numbers.
- *
- * \tparam DerivedPolicy The name of the derived execution policy.
- * \tparam ForwardIterator is a model of Forward Iterator,
- * and \p ForwardIterator is mutable,
- * and if \c x and \c y are objects of \c ForwardIterator's \c value_type, then x + y is defined,
- * and if \c T is \p ForwardIterator's \c value_type, then T(0) is defined.
- * \tparam T is a model of Assignable,
- * and \p T is convertible to \p ForwardIterator's \c value_type.
- *
- * The following code snippet demonstrates how to use \p sequence to fill a range
- * with a sequence of numbers starting from the value 1 using the \p thrust::host execution
- * policy for parallelization:
- *
- * \code
- * #include
- * #include
- * ...
- * const int N = 10;
- * int A[N];
- * thrust::sequence(thrust::host, A, A + 10, 1);
- * // A is now {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
- * \endcode
- *
- * \note Unlike the similar C++ STL function \c std::iota, \p sequence offers no
- * guarantee on order of execution.
- *
- * \see http://www.sgi.com/tech/stl/iota.html
- */
-template
-__host__ __device__
- void sequence(const thrust::detail::execution_policy_base &exec,
- ForwardIterator first,
- ForwardIterator last,
- T init);
-
-
-/*! \p sequence fills the range [first, last) with a sequence of numbers.
- *
- * For each iterator \c i in the range [first, last), this version of
- * \p sequence performs the assignment *i = init + (i - first).
- *
- * \param first The beginning of the sequence.
- * \param last The end of the sequence.
- * \param init The first value of the sequence of numbers.
- *
- * \tparam ForwardIterator is a model of Forward Iterator,
- * and \p ForwardIterator is mutable,
- * and if \c x and \c y are objects of \c ForwardIterator's \c value_type, then x + y is defined,
- * and if \c T is \p ForwardIterator's \c value_type, then T(0) is defined.
- * \tparam T is a model of Assignable,
- * and \p T is convertible to \p ForwardIterator's \c value_type.
- *
- * The following code snippet demonstrates how to use \p sequence to fill a range
- * with a sequence of numbers starting from the value 1.
- *
- * \code
- * #include
- * ...
- * const int N = 10;
- * int A[N];
- * thrust::sequence(A, A + 10, 1);
- * // A is now {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
- * \endcode
- *
- * \note Unlike the similar C++ STL function \c std::iota, \p sequence offers no
- * guarantee on order of execution.
- *
- * \see http://www.sgi.com/tech/stl/iota.html
- */
-template
- void sequence(ForwardIterator first,
- ForwardIterator last,
- T init);
-
-
-/*! \p sequence fills the range [first, last) with a sequence of numbers.
- *
- * For each iterator \c i in the range [first, last), this version of
- * \p sequence performs the assignment *i = init + step * (i - first).
- *
- * The algorithm's execution is parallelized as determined by \p exec.
- *
- * \param exec The execution policy to use for parallelization.
- * \param first The beginning of the sequence.
- * \param last The end of the sequence.
- * \param init The first value of the sequence of numbers
- * \param step The difference between consecutive elements.
- *
- * \tparam DerivedPolicy The name of the derived execution policy.
- * \tparam ForwardIterator is a model of Forward Iterator,
- * and \p ForwardIterator is mutable,
- * and if \c x and \c y are objects of \c ForwardIterator's \c value_type, then x + y is defined,
- * and if \c T is \p ForwardIterator's \c value_type, then T(0) is defined.
- * \tparam T is a model of Assignable,
- * and \p T is convertible to \p ForwardIterator's \c value_type.
- *
- * The following code snippet demonstrates how to use \p sequence to fill a range
- * with a sequence of numbers starting from the value 1 with a step size of 3 using the \p thrust::host
- * execution policy for parallelization:
- *
- * \code
- * #include
- * #include
- * ...
- * const int N = 10;
- * int A[N];
- * thrust::sequence(thrust::host, A, A + 10, 1, 3);
- * // A is now {1, 4, 7, 10, 13, 16, 19, 22, 25, 28}
- * \endcode
- *
- * \note Unlike the similar C++ STL function \c std::iota, \p sequence offers no
- * guarantee on order of execution.
- *
- * \see http://www.sgi.com/tech/stl/iota.html
- */
-template
-__host__ __device__
- void sequence(const thrust::detail::execution_policy_base &exec,
- ForwardIterator first,
- ForwardIterator last,
- T init,
- T step);
-
-
-/*! \p sequence fills the range [first, last) with a sequence of numbers.
- *
- * For each iterator \c i in the range [first, last), this version of
- * \p sequence performs the assignment *i = init + step * (i - first).
- *
- * \param first The beginning of the sequence.
- * \param last The end of the sequence.
- * \param init The first value of the sequence of numbers
- * \param step The difference between consecutive elements.
- *
- * \tparam ForwardIterator is a model of Forward Iterator,
- * and \p ForwardIterator is mutable,
- * and if \c x and \c y are objects of \c ForwardIterator's \c value_type, then x + y is defined,
- * and if \c T is \p ForwardIterator's \c value_type, then T(0) is defined.
- * \tparam T is a model of Assignable,
- * and \p T is convertible to \p ForwardIterator's \c value_type.
- *
- * The following code snippet demonstrates how to use \p sequence to fill a range
- * with a sequence of numbers starting from the value 1 with a step size of 3.
- *
- * \code
- * #include
- * ...
- * const int N = 10;
- * int A[N];
- * thrust::sequence(A, A + 10, 1, 3);
- * // A is now {1, 4, 7, 10, 13, 16, 19, 22, 25, 28}
- * \endcode
- *
- * \note Unlike the similar C++ STL function \c std::iota, \p sequence offers no
- * guarantee on order of execution.
- *
- * \see http://www.sgi.com/tech/stl/iota.html
- */
-template
- void sequence(ForwardIterator first,
- ForwardIterator last,
- T init,
- T step);
-
-
-/*! \} // end transformations
- */
-
-
-} // end namespace thrust
-
-#include
-
diff --git a/spaces/manavisrani07/gradio-lipsync-wav2lip/basicsr/losses/loss_util.py b/spaces/manavisrani07/gradio-lipsync-wav2lip/basicsr/losses/loss_util.py
deleted file mode 100644
index fd293ff9e6a22814e5aeff6ae11fb54d2e4bafff..0000000000000000000000000000000000000000
--- a/spaces/manavisrani07/gradio-lipsync-wav2lip/basicsr/losses/loss_util.py
+++ /dev/null
@@ -1,145 +0,0 @@
-import functools
-import torch
-from torch.nn import functional as F
-
-
-def reduce_loss(loss, reduction):
- """Reduce loss as specified.
-
- Args:
- loss (Tensor): Elementwise loss tensor.
- reduction (str): Options are 'none', 'mean' and 'sum'.
-
- Returns:
- Tensor: Reduced loss tensor.
- """
- reduction_enum = F._Reduction.get_enum(reduction)
- # none: 0, elementwise_mean:1, sum: 2
- if reduction_enum == 0:
- return loss
- elif reduction_enum == 1:
- return loss.mean()
- else:
- return loss.sum()
-
-
-def weight_reduce_loss(loss, weight=None, reduction='mean'):
- """Apply element-wise weight and reduce loss.
-
- Args:
- loss (Tensor): Element-wise loss.
- weight (Tensor): Element-wise weights. Default: None.
- reduction (str): Same as built-in losses of PyTorch. Options are
- 'none', 'mean' and 'sum'. Default: 'mean'.
-
- Returns:
- Tensor: Loss values.
- """
- # if weight is specified, apply element-wise weight
- if weight is not None:
- assert weight.dim() == loss.dim()
- assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
- loss = loss * weight
-
- # if weight is not specified or reduction is sum, just reduce the loss
- if weight is None or reduction == 'sum':
- loss = reduce_loss(loss, reduction)
- # if reduction is mean, then compute mean over weight region
- elif reduction == 'mean':
- if weight.size(1) > 1:
- weight = weight.sum()
- else:
- weight = weight.sum() * loss.size(1)
- loss = loss.sum() / weight
-
- return loss
-
-
-def weighted_loss(loss_func):
- """Create a weighted version of a given loss function.
-
- To use this decorator, the loss function must have the signature like
- `loss_func(pred, target, **kwargs)`. The function only needs to compute
- element-wise loss without any reduction. This decorator will add weight
- and reduction arguments to the function. The decorated function will have
- the signature like `loss_func(pred, target, weight=None, reduction='mean',
- **kwargs)`.
-
- :Example:
-
- >>> import torch
- >>> @weighted_loss
- >>> def l1_loss(pred, target):
- >>> return (pred - target).abs()
-
- >>> pred = torch.Tensor([0, 2, 3])
- >>> target = torch.Tensor([1, 1, 1])
- >>> weight = torch.Tensor([1, 0, 1])
-
- >>> l1_loss(pred, target)
- tensor(1.3333)
- >>> l1_loss(pred, target, weight)
- tensor(1.5000)
- >>> l1_loss(pred, target, reduction='none')
- tensor([1., 1., 2.])
- >>> l1_loss(pred, target, weight, reduction='sum')
- tensor(3.)
- """
-
- @functools.wraps(loss_func)
- def wrapper(pred, target, weight=None, reduction='mean', **kwargs):
- # get element-wise loss
- loss = loss_func(pred, target, **kwargs)
- loss = weight_reduce_loss(loss, weight, reduction)
- return loss
-
- return wrapper
-
-
-def get_local_weights(residual, ksize):
- """Get local weights for generating the artifact map of LDL.
-
- It is only called by the `get_refined_artifact_map` function.
-
- Args:
- residual (Tensor): Residual between predicted and ground truth images.
- ksize (Int): size of the local window.
-
- Returns:
- Tensor: weight for each pixel to be discriminated as an artifact pixel
- """
-
- pad = (ksize - 1) // 2
- residual_pad = F.pad(residual, pad=[pad, pad, pad, pad], mode='reflect')
-
- unfolded_residual = residual_pad.unfold(2, ksize, 1).unfold(3, ksize, 1)
- pixel_level_weight = torch.var(unfolded_residual, dim=(-1, -2), unbiased=True, keepdim=True).squeeze(-1).squeeze(-1)
-
- return pixel_level_weight
-
-
-def get_refined_artifact_map(img_gt, img_output, img_ema, ksize):
- """Calculate the artifact map of LDL
- (Details or Artifacts: A Locally Discriminative Learning Approach to Realistic Image Super-Resolution. In CVPR 2022)
-
- Args:
- img_gt (Tensor): ground truth images.
- img_output (Tensor): output images given by the optimizing model.
- img_ema (Tensor): output images given by the ema model.
- ksize (Int): size of the local window.
-
- Returns:
- overall_weight: weight for each pixel to be discriminated as an artifact pixel
- (calculated based on both local and global observations).
- """
-
- residual_ema = torch.sum(torch.abs(img_gt - img_ema), 1, keepdim=True)
- residual_sr = torch.sum(torch.abs(img_gt - img_output), 1, keepdim=True)
-
- patch_level_weight = torch.var(residual_sr.clone(), dim=(-1, -2, -3), keepdim=True)**(1 / 5)
- pixel_level_weight = get_local_weights(residual_sr.clone(), ksize)
- overall_weight = patch_level_weight * pixel_level_weight
-
- overall_weight[residual_sr < residual_ema] = 0
-
- return overall_weight
diff --git a/spaces/manavisrani07/gradio-lipsync-wav2lip/basicsr/models/realesrnet_model.py b/spaces/manavisrani07/gradio-lipsync-wav2lip/basicsr/models/realesrnet_model.py
deleted file mode 100644
index f5790918b969682a0db0e2ed9236b7046d627b90..0000000000000000000000000000000000000000
--- a/spaces/manavisrani07/gradio-lipsync-wav2lip/basicsr/models/realesrnet_model.py
+++ /dev/null
@@ -1,189 +0,0 @@
-import numpy as np
-import random
-import torch
-from torch.nn import functional as F
-
-from basicsr.data.degradations import random_add_gaussian_noise_pt, random_add_poisson_noise_pt
-from basicsr.data.transforms import paired_random_crop
-from basicsr.models.sr_model import SRModel
-from basicsr.utils import DiffJPEG, USMSharp
-from basicsr.utils.img_process_util import filter2D
-from basicsr.utils.registry import MODEL_REGISTRY
-
-
-@MODEL_REGISTRY.register(suffix='basicsr')
-class RealESRNetModel(SRModel):
- """RealESRNet Model for Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data.
-
- It is trained without GAN losses.
- It mainly performs:
- 1. randomly synthesize LQ images in GPU tensors
- 2. optimize the networks with GAN training.
- """
-
- def __init__(self, opt):
- super(RealESRNetModel, self).__init__(opt)
- self.jpeger = DiffJPEG(differentiable=False).cuda() # simulate JPEG compression artifacts
- self.usm_sharpener = USMSharp().cuda() # do usm sharpening
- self.queue_size = opt.get('queue_size', 180)
-
- @torch.no_grad()
- def _dequeue_and_enqueue(self):
- """It is the training pair pool for increasing the diversity in a batch.
-
- Batch processing limits the diversity of synthetic degradations in a batch. For example, samples in a
- batch could not have different resize scaling factors. Therefore, we employ this training pair pool
- to increase the degradation diversity in a batch.
- """
- # initialize
- b, c, h, w = self.lq.size()
- if not hasattr(self, 'queue_lr'):
- assert self.queue_size % b == 0, f'queue size {self.queue_size} should be divisible by batch size {b}'
- self.queue_lr = torch.zeros(self.queue_size, c, h, w).cuda()
- _, c, h, w = self.gt.size()
- self.queue_gt = torch.zeros(self.queue_size, c, h, w).cuda()
- self.queue_ptr = 0
- if self.queue_ptr == self.queue_size: # the pool is full
- # do dequeue and enqueue
- # shuffle
- idx = torch.randperm(self.queue_size)
- self.queue_lr = self.queue_lr[idx]
- self.queue_gt = self.queue_gt[idx]
- # get first b samples
- lq_dequeue = self.queue_lr[0:b, :, :, :].clone()
- gt_dequeue = self.queue_gt[0:b, :, :, :].clone()
- # update the queue
- self.queue_lr[0:b, :, :, :] = self.lq.clone()
- self.queue_gt[0:b, :, :, :] = self.gt.clone()
-
- self.lq = lq_dequeue
- self.gt = gt_dequeue
- else:
- # only do enqueue
- self.queue_lr[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.lq.clone()
- self.queue_gt[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.gt.clone()
- self.queue_ptr = self.queue_ptr + b
-
- @torch.no_grad()
- def feed_data(self, data):
- """Accept data from dataloader, and then add two-order degradations to obtain LQ images.
- """
- if self.is_train and self.opt.get('high_order_degradation', True):
- # training data synthesis
- self.gt = data['gt'].to(self.device)
- # USM sharpen the GT images
- if self.opt['gt_usm'] is True:
- self.gt = self.usm_sharpener(self.gt)
-
- self.kernel1 = data['kernel1'].to(self.device)
- self.kernel2 = data['kernel2'].to(self.device)
- self.sinc_kernel = data['sinc_kernel'].to(self.device)
-
- ori_h, ori_w = self.gt.size()[2:4]
-
- # ----------------------- The first degradation process ----------------------- #
- # blur
- out = filter2D(self.gt, self.kernel1)
- # random resize
- updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob'])[0]
- if updown_type == 'up':
- scale = np.random.uniform(1, self.opt['resize_range'][1])
- elif updown_type == 'down':
- scale = np.random.uniform(self.opt['resize_range'][0], 1)
- else:
- scale = 1
- mode = random.choice(['area', 'bilinear', 'bicubic'])
- out = F.interpolate(out, scale_factor=scale, mode=mode)
- # add noise
- gray_noise_prob = self.opt['gray_noise_prob']
- if np.random.uniform() < self.opt['gaussian_noise_prob']:
- out = random_add_gaussian_noise_pt(
- out, sigma_range=self.opt['noise_range'], clip=True, rounds=False, gray_prob=gray_noise_prob)
- else:
- out = random_add_poisson_noise_pt(
- out,
- scale_range=self.opt['poisson_scale_range'],
- gray_prob=gray_noise_prob,
- clip=True,
- rounds=False)
- # JPEG compression
- jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range'])
- out = torch.clamp(out, 0, 1) # clamp to [0, 1], otherwise JPEGer will result in unpleasant artifacts
- out = self.jpeger(out, quality=jpeg_p)
-
- # ----------------------- The second degradation process ----------------------- #
- # blur
- if np.random.uniform() < self.opt['second_blur_prob']:
- out = filter2D(out, self.kernel2)
- # random resize
- updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob2'])[0]
- if updown_type == 'up':
- scale = np.random.uniform(1, self.opt['resize_range2'][1])
- elif updown_type == 'down':
- scale = np.random.uniform(self.opt['resize_range2'][0], 1)
- else:
- scale = 1
- mode = random.choice(['area', 'bilinear', 'bicubic'])
- out = F.interpolate(
- out, size=(int(ori_h / self.opt['scale'] * scale), int(ori_w / self.opt['scale'] * scale)), mode=mode)
- # add noise
- gray_noise_prob = self.opt['gray_noise_prob2']
- if np.random.uniform() < self.opt['gaussian_noise_prob2']:
- out = random_add_gaussian_noise_pt(
- out, sigma_range=self.opt['noise_range2'], clip=True, rounds=False, gray_prob=gray_noise_prob)
- else:
- out = random_add_poisson_noise_pt(
- out,
- scale_range=self.opt['poisson_scale_range2'],
- gray_prob=gray_noise_prob,
- clip=True,
- rounds=False)
-
- # JPEG compression + the final sinc filter
- # We also need to resize images to desired sizes. We group [resize back + sinc filter] together
- # as one operation.
- # We consider two orders:
- # 1. [resize back + sinc filter] + JPEG compression
- # 2. JPEG compression + [resize back + sinc filter]
- # Empirically, we find other combinations (sinc + JPEG + Resize) will introduce twisted lines.
- if np.random.uniform() < 0.5:
- # resize back + the final sinc filter
- mode = random.choice(['area', 'bilinear', 'bicubic'])
- out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode)
- out = filter2D(out, self.sinc_kernel)
- # JPEG compression
- jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2'])
- out = torch.clamp(out, 0, 1)
- out = self.jpeger(out, quality=jpeg_p)
- else:
- # JPEG compression
- jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2'])
- out = torch.clamp(out, 0, 1)
- out = self.jpeger(out, quality=jpeg_p)
- # resize back + the final sinc filter
- mode = random.choice(['area', 'bilinear', 'bicubic'])
- out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode)
- out = filter2D(out, self.sinc_kernel)
-
- # clamp and round
- self.lq = torch.clamp((out * 255.0).round(), 0, 255) / 255.
-
- # random crop
- gt_size = self.opt['gt_size']
- self.gt, self.lq = paired_random_crop(self.gt, self.lq, gt_size, self.opt['scale'])
-
- # training pair pool
- self._dequeue_and_enqueue()
- self.lq = self.lq.contiguous() # for the warning: grad and param do not obey the gradient layout contract
- else:
- # for paired training or validation
- self.lq = data['lq'].to(self.device)
- if 'gt' in data:
- self.gt = data['gt'].to(self.device)
- self.gt_usm = self.usm_sharpener(self.gt)
-
- def nondist_validation(self, dataloader, current_iter, tb_logger, save_img):
- # do not use the synthetic process during validation
- self.is_train = False
- super(RealESRNetModel, self).nondist_validation(dataloader, current_iter, tb_logger, save_img)
- self.is_train = True
diff --git a/spaces/maxmax20160403/sovits5.0/whisper/tokenizer.py b/spaces/maxmax20160403/sovits5.0/whisper/tokenizer.py
deleted file mode 100644
index a27cb359ee891590d3f793624f9f8ec768a26cc3..0000000000000000000000000000000000000000
--- a/spaces/maxmax20160403/sovits5.0/whisper/tokenizer.py
+++ /dev/null
@@ -1,331 +0,0 @@
-import os
-from dataclasses import dataclass
-from functools import lru_cache
-from typing import List, Optional, Tuple, Union
-
-import numpy as np
-import torch
-from transformers import GPT2TokenizerFast
-
-LANGUAGES = {
- "en": "english",
- "zh": "chinese",
- "de": "german",
- "es": "spanish",
- "ru": "russian",
- "ko": "korean",
- "fr": "french",
- "ja": "japanese",
- "pt": "portuguese",
- "tr": "turkish",
- "pl": "polish",
- "ca": "catalan",
- "nl": "dutch",
- "ar": "arabic",
- "sv": "swedish",
- "it": "italian",
- "id": "indonesian",
- "hi": "hindi",
- "fi": "finnish",
- "vi": "vietnamese",
- "he": "hebrew",
- "uk": "ukrainian",
- "el": "greek",
- "ms": "malay",
- "cs": "czech",
- "ro": "romanian",
- "da": "danish",
- "hu": "hungarian",
- "ta": "tamil",
- "no": "norwegian",
- "th": "thai",
- "ur": "urdu",
- "hr": "croatian",
- "bg": "bulgarian",
- "lt": "lithuanian",
- "la": "latin",
- "mi": "maori",
- "ml": "malayalam",
- "cy": "welsh",
- "sk": "slovak",
- "te": "telugu",
- "fa": "persian",
- "lv": "latvian",
- "bn": "bengali",
- "sr": "serbian",
- "az": "azerbaijani",
- "sl": "slovenian",
- "kn": "kannada",
- "et": "estonian",
- "mk": "macedonian",
- "br": "breton",
- "eu": "basque",
- "is": "icelandic",
- "hy": "armenian",
- "ne": "nepali",
- "mn": "mongolian",
- "bs": "bosnian",
- "kk": "kazakh",
- "sq": "albanian",
- "sw": "swahili",
- "gl": "galician",
- "mr": "marathi",
- "pa": "punjabi",
- "si": "sinhala",
- "km": "khmer",
- "sn": "shona",
- "yo": "yoruba",
- "so": "somali",
- "af": "afrikaans",
- "oc": "occitan",
- "ka": "georgian",
- "be": "belarusian",
- "tg": "tajik",
- "sd": "sindhi",
- "gu": "gujarati",
- "am": "amharic",
- "yi": "yiddish",
- "lo": "lao",
- "uz": "uzbek",
- "fo": "faroese",
- "ht": "haitian creole",
- "ps": "pashto",
- "tk": "turkmen",
- "nn": "nynorsk",
- "mt": "maltese",
- "sa": "sanskrit",
- "lb": "luxembourgish",
- "my": "myanmar",
- "bo": "tibetan",
- "tl": "tagalog",
- "mg": "malagasy",
- "as": "assamese",
- "tt": "tatar",
- "haw": "hawaiian",
- "ln": "lingala",
- "ha": "hausa",
- "ba": "bashkir",
- "jw": "javanese",
- "su": "sundanese",
-}
-
-# language code lookup by name, with a few language aliases
-TO_LANGUAGE_CODE = {
- **{language: code for code, language in LANGUAGES.items()},
- "burmese": "my",
- "valencian": "ca",
- "flemish": "nl",
- "haitian": "ht",
- "letzeburgesch": "lb",
- "pushto": "ps",
- "panjabi": "pa",
- "moldavian": "ro",
- "moldovan": "ro",
- "sinhalese": "si",
- "castilian": "es",
-}
-
-
-@dataclass(frozen=True)
-class Tokenizer:
- """A thin wrapper around `GPT2TokenizerFast` providing quick access to special tokens"""
-
- tokenizer: "GPT2TokenizerFast"
- language: Optional[str]
- sot_sequence: Tuple[int]
-
- def encode(self, text, **kwargs):
- return self.tokenizer.encode(text, **kwargs)
-
- def decode(self, token_ids: Union[int, List[int], np.ndarray, torch.Tensor], **kwargs):
- return self.tokenizer.decode(token_ids, **kwargs)
-
- def decode_with_timestamps(self, tokens) -> str:
- """
- Timestamp tokens are above the special tokens' id range and are ignored by `decode()`.
- This method decodes given tokens with timestamps tokens annotated, e.g. "<|1.08|>".
- """
- outputs = [[]]
- for token in tokens:
- if token >= self.timestamp_begin:
- timestamp = f"<|{(token - self.timestamp_begin) * 0.02:.2f}|>"
- outputs.append(timestamp)
- outputs.append([])
- else:
- outputs[-1].append(token)
- outputs = [s if isinstance(s, str) else self.tokenizer.decode(s) for s in outputs]
- return "".join(outputs)
-
- @property
- @lru_cache()
- def eot(self) -> int:
- return self.tokenizer.eos_token_id
-
- @property
- @lru_cache()
- def sot(self) -> int:
- return self._get_single_token_id("<|startoftranscript|>")
-
- @property
- @lru_cache()
- def sot_lm(self) -> int:
- return self._get_single_token_id("<|startoflm|>")
-
- @property
- @lru_cache()
- def sot_prev(self) -> int:
- return self._get_single_token_id("<|startofprev|>")
-
- @property
- @lru_cache()
- def no_speech(self) -> int:
- return self._get_single_token_id("<|nospeech|>")
-
- @property
- @lru_cache()
- def no_timestamps(self) -> int:
- return self._get_single_token_id("<|notimestamps|>")
-
- @property
- @lru_cache()
- def timestamp_begin(self) -> int:
- return self.tokenizer.all_special_ids[-1] + 1
-
- @property
- @lru_cache()
- def language_token(self) -> int:
- """Returns the token id corresponding to the value of the `language` field"""
- if self.language is None:
- raise ValueError(f"This tokenizer does not have language token configured")
-
- additional_tokens = dict(
- zip(
- self.tokenizer.additional_special_tokens,
- self.tokenizer.additional_special_tokens_ids,
- )
- )
- candidate = f"<|{self.language}|>"
- if candidate in additional_tokens:
- return additional_tokens[candidate]
-
- raise KeyError(f"Language {self.language} not found in tokenizer.")
-
- @property
- @lru_cache()
- def all_language_tokens(self) -> Tuple[int]:
- result = []
- for token, token_id in zip(
- self.tokenizer.additional_special_tokens,
- self.tokenizer.additional_special_tokens_ids,
- ):
- if token.strip("<|>") in LANGUAGES:
- result.append(token_id)
- return tuple(result)
-
- @property
- @lru_cache()
- def all_language_codes(self) -> Tuple[str]:
- return tuple(self.decode([l]).strip("<|>") for l in self.all_language_tokens)
-
- @property
- @lru_cache()
- def sot_sequence_including_notimestamps(self) -> Tuple[int]:
- return tuple(list(self.sot_sequence) + [self.no_timestamps])
-
- @property
- @lru_cache()
- def non_speech_tokens(self) -> Tuple[int]:
- """
- Returns the list of tokens to suppress in order to avoid any speaker tags or non-speech
- annotations, to prevent sampling texts that are not actually spoken in the audio, e.g.
-
- - ♪♪♪
- - ( SPEAKING FOREIGN LANGUAGE )
- - [DAVID] Hey there,
-
- keeping basic punctuations like commas, periods, question marks, exclamation points, etc.
- """
- symbols = list("\"#()*+/:;<=>@[\\]^_`{|}~「」『』")
- symbols += "<< >> <<< >>> -- --- -( -[ (' (\" (( )) ((( ))) [[ ]] {{ }} ♪♪ ♪♪♪".split()
-
- # symbols that may be a single token or multiple tokens depending on the tokenizer.
- # In case they're multiple tokens, suppress the first token, which is safe because:
- # These are between U+2640 and U+267F miscellaneous symbols that are okay to suppress
- # in generations, and in the 3-byte UTF-8 representation they share the first two bytes.
- miscellaneous = set("♩♪♫♬♭♮♯")
- assert all(0x2640 <= ord(c) <= 0x267F for c in miscellaneous)
-
- # allow hyphens "-" and single quotes "'" between words, but not at the beginning of a word
- result = {self.tokenizer.encode(" -")[0], self.tokenizer.encode(" '")[0]}
- for symbol in symbols + list(miscellaneous):
- for tokens in [self.tokenizer.encode(symbol), self.tokenizer.encode(" " + symbol)]:
- if len(tokens) == 1 or symbol in miscellaneous:
- result.add(tokens[0])
-
- return tuple(sorted(result))
-
- def _get_single_token_id(self, text) -> int:
- tokens = self.tokenizer.encode(text)
- assert len(tokens) == 1, f"{text} is not encoded as a single token"
- return tokens[0]
-
-
-@lru_cache(maxsize=None)
-def build_tokenizer(name: str = "gpt2"):
- os.environ["TOKENIZERS_PARALLELISM"] = "false"
- path = os.path.join(os.path.dirname(__file__), "assets", name)
- tokenizer = GPT2TokenizerFast.from_pretrained(path)
-
- specials = [
- "<|startoftranscript|>",
- *[f"<|{lang}|>" for lang in LANGUAGES.keys()],
- "<|translate|>",
- "<|transcribe|>",
- "<|startoflm|>",
- "<|startofprev|>",
- "<|nospeech|>",
- "<|notimestamps|>",
- ]
-
- tokenizer.add_special_tokens(dict(additional_special_tokens=specials))
- return tokenizer
-
-
-@lru_cache(maxsize=None)
-def get_tokenizer(
- multilingual: bool,
- *,
- task: Optional[str] = None, # Literal["transcribe", "translate", None]
- language: Optional[str] = None,
-) -> Tokenizer:
- if language is not None:
- language = language.lower()
- if language not in LANGUAGES:
- if language in TO_LANGUAGE_CODE:
- language = TO_LANGUAGE_CODE[language]
- else:
- raise ValueError(f"Unsupported language: {language}")
-
- if multilingual:
- tokenizer_name = "multilingual"
- task = task or "transcribe"
- language = language or "en"
- else:
- tokenizer_name = "gpt2"
- task = None
- language = None
-
- tokenizer = build_tokenizer(name=tokenizer_name)
- all_special_ids: List[int] = tokenizer.all_special_ids
- sot: int = all_special_ids[1]
- translate: int = all_special_ids[-6]
- transcribe: int = all_special_ids[-5]
-
- langs = tuple(LANGUAGES.keys())
- sot_sequence = [sot]
- if language is not None:
- sot_sequence.append(sot + 1 + langs.index(language))
- if task is not None:
- sot_sequence.append(transcribe if task == "transcribe" else translate)
-
- return Tokenizer(tokenizer=tokenizer, language=language, sot_sequence=tuple(sot_sequence))
diff --git a/spaces/medici/dreambooth-training/train_dreambooth.py b/spaces/medici/dreambooth-training/train_dreambooth.py
deleted file mode 100644
index f4ff135e549f0d6c72f733092f3df817cb178e01..0000000000000000000000000000000000000000
--- a/spaces/medici/dreambooth-training/train_dreambooth.py
+++ /dev/null
@@ -1,889 +0,0 @@
-import argparse
-import itertools
-import math
-import os
-from pathlib import Path
-from typing import Optional
-import subprocess
-import sys
-import gc
-import random
-
-import torch
-import torch.nn.functional as F
-import torch.utils.checkpoint
-from torch.utils.data import Dataset
-
-from accelerate import Accelerator
-from accelerate.logging import get_logger
-from accelerate.utils import set_seed
-from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
-from diffusers.utils.import_utils import is_xformers_available
-from diffusers.optimization import get_scheduler
-from huggingface_hub import HfFolder, Repository, whoami
-from PIL import Image
-from torchvision import transforms
-from tqdm.auto import tqdm
-from transformers import CLIPTextModel, CLIPTokenizer
-
-
-logger = get_logger(__name__)
-
-
-def parse_args():
- parser = argparse.ArgumentParser(description="Simple example of a training script.")
- parser.add_argument(
- "--pretrained_model_name_or_path",
- type=str,
- default=None,
- #required=True,
- help="Path to pretrained model or model identifier from huggingface.co/models.",
- )
- parser.add_argument(
- "--tokenizer_name",
- type=str,
- default=None,
- help="Pretrained tokenizer name or path if not the same as model_name",
- )
- parser.add_argument(
- "--instance_data_dir",
- type=str,
- default=None,
- #required=True,
- help="A folder containing the training data of instance images.",
- )
- parser.add_argument(
- "--class_data_dir",
- type=str,
- default=None,
- #required=False,
- help="A folder containing the training data of class images.",
- )
- parser.add_argument(
- "--instance_prompt",
- type=str,
- default=None,
- help="The prompt with identifier specifying the instance",
- )
- parser.add_argument(
- "--class_prompt",
- type=str,
- default="",
- help="The prompt to specify images in the same class as provided instance images.",
- )
- parser.add_argument(
- "--with_prior_preservation",
- default=False,
- action="store_true",
- help="Flag to add prior preservation loss.",
- )
- parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.")
- parser.add_argument(
- "--num_class_images",
- type=int,
- default=100,
- help=(
- "Minimal class images for prior preservation loss. If not have enough images, additional images will be"
- " sampled with class_prompt."
- ),
- )
- parser.add_argument(
- "--output_dir",
- type=str,
- default="",
- help="The output directory where the model predictions and checkpoints will be written.",
- )
- parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
- parser.add_argument(
- "--resolution",
- type=int,
- default=512,
- help=(
- "The resolution for input images, all the images in the train/validation dataset will be resized to this"
- " resolution"
- ),
- )
- parser.add_argument(
- "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution"
- )
- parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder")
- parser.add_argument(
- "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
- )
- parser.add_argument(
- "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
- )
- parser.add_argument("--num_train_epochs", type=int, default=1)
- parser.add_argument(
- "--max_train_steps",
- type=int,
- default=None,
- help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
- )
- parser.add_argument(
- "--gradient_accumulation_steps",
- type=int,
- default=1,
- help="Number of updates steps to accumulate before performing a backward/update pass.",
- )
- parser.add_argument(
- "--gradient_checkpointing",
- action="store_true",
- help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
- )
- parser.add_argument(
- "--learning_rate",
- type=float,
- default=5e-6,
- help="Initial learning rate (after the potential warmup period) to use.",
- )
- parser.add_argument(
- "--scale_lr",
- action="store_true",
- default=False,
- help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
- )
- parser.add_argument(
- "--lr_scheduler",
- type=str,
- default="constant",
- help=(
- 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
- ' "constant", "constant_with_warmup"]'
- ),
- )
- parser.add_argument(
- "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
- )
- parser.add_argument(
- "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
- )
- parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
- parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
- parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
- parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
- parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
- parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
- parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
- parser.add_argument(
- "--hub_model_id",
- type=str,
- default=None,
- help="The name of the repository to keep in sync with the local `output_dir`.",
- )
- parser.add_argument(
- "--logging_dir",
- type=str,
- default="logs",
- help=(
- "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
- " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
- ),
- )
- parser.add_argument(
- "--mixed_precision",
- type=str,
- default="no",
- choices=["no", "fp16", "bf16"],
- help=(
- "Whether to use mixed precision. Choose"
- "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
- "and an Nvidia Ampere GPU."
- ),
- )
-
- parser.add_argument(
- "--save_n_steps",
- type=int,
- default=1,
- help=("Save the model every n global_steps"),
- )
-
-
- parser.add_argument(
- "--save_starting_step",
- type=int,
- default=1,
- help=("The step from which it starts saving intermediary checkpoints"),
- )
-
- parser.add_argument(
- "--stop_text_encoder_training",
- type=int,
- default=1000000,
- help=("The step at which the text_encoder is no longer trained"),
- )
-
-
- parser.add_argument(
- "--image_captions_filename",
- action="store_true",
- help="Get captions from filename",
- )
-
-
- parser.add_argument(
- "--dump_only_text_encoder",
- action="store_true",
- default=False,
- help="Dump only text encoder",
- )
-
- parser.add_argument(
- "--train_only_unet",
- action="store_true",
- default=False,
- help="Train only the unet",
- )
-
- parser.add_argument(
- "--cache_latents",
- action="store_true",
- default=False,
- help="Train only the unet",
- )
-
- parser.add_argument(
- "--Session_dir",
- type=str,
- default="",
- help="Current session directory",
- )
-
-
-
-
- parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
-
- args = parser.parse_args()
- env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
- if env_local_rank != -1 and env_local_rank != args.local_rank:
- args.local_rank = env_local_rank
-
- #if args.instance_data_dir is None:
- # raise ValueError("You must specify a train data directory.")
-
- #if args.with_prior_preservation:
- # if args.class_data_dir is None:
- # raise ValueError("You must specify a data directory for class images.")
- # if args.class_prompt is None:
- # raise ValueError("You must specify prompt for class images.")
-
- return args
-
-
-class DreamBoothDataset(Dataset):
- """
- A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
- It pre-processes the images and the tokenizes prompts.
- """
-
- def __init__(
- self,
- instance_data_root,
- instance_prompt,
- tokenizer,
- args,
- class_data_root=None,
- class_prompt=None,
- size=512,
- center_crop=False,
- ):
- self.size = size
- self.center_crop = center_crop
- self.tokenizer = tokenizer
- self.image_captions_filename = None
-
- self.instance_data_root = Path(instance_data_root)
- if not self.instance_data_root.exists():
- raise ValueError("Instance images root doesn't exists.")
-
- self.instance_images_path = list(Path(instance_data_root).iterdir())
- self.num_instance_images = len(self.instance_images_path)
- self.instance_prompt = instance_prompt
- self._length = self.num_instance_images
-
- if args.image_captions_filename:
- self.image_captions_filename = True
-
- if class_data_root is not None:
- self.class_data_root = Path(class_data_root)
- self.class_data_root.mkdir(parents=True, exist_ok=True)
- self.class_images_path = list(self.class_data_root.iterdir())
- random.shuffle(self.class_images_path)
- self.num_class_images = len(self.class_images_path)
- self._length = max(self.num_class_images, self.num_instance_images)
- self.class_prompt = class_prompt
- else:
- self.class_data_root = None
-
- self.image_transforms = transforms.Compose(
- [
- transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
- transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
- transforms.ToTensor(),
- transforms.Normalize([0.5], [0.5]),
- ]
- )
-
- def __len__(self):
- return self._length
-
- def __getitem__(self, index):
- example = {}
- path = self.instance_images_path[index % self.num_instance_images]
- instance_image = Image.open(path)
- if not instance_image.mode == "RGB":
- instance_image = instance_image.convert("RGB")
-
- instance_prompt = self.instance_prompt
-
- if self.image_captions_filename:
- filename = Path(path).stem
- pt=''.join([i for i in filename if not i.isdigit()])
- pt=pt.replace("_"," ")
- pt=pt.replace("(","")
- pt=pt.replace(")","")
- pt=pt.replace("-","")
- instance_prompt = pt
- sys.stdout.write(" [0;32m" +instance_prompt+" [0m")
- sys.stdout.flush()
-
-
- example["instance_images"] = self.image_transforms(instance_image)
- example["instance_prompt_ids"] = self.tokenizer(
- instance_prompt,
- padding="do_not_pad",
- truncation=True,
- max_length=self.tokenizer.model_max_length,
- ).input_ids
-
- if self.class_data_root:
- class_image = Image.open(self.class_images_path[index % self.num_class_images])
- if not class_image.mode == "RGB":
- class_image = class_image.convert("RGB")
- example["class_images"] = self.image_transforms(class_image)
- example["class_prompt_ids"] = self.tokenizer(
- self.class_prompt,
- padding="do_not_pad",
- truncation=True,
- max_length=self.tokenizer.model_max_length,
- ).input_ids
-
- return example
-
-
-
-class PromptDataset(Dataset):
- "A simple dataset to prepare the prompts to generate class images on multiple GPUs."
-
- def __init__(self, prompt, num_samples):
- self.prompt = prompt
- self.num_samples = num_samples
-
- def __len__(self):
- return self.num_samples
-
- def __getitem__(self, index):
- example = {}
- example["prompt"] = self.prompt
- example["index"] = index
- return example
-
-class LatentsDataset(Dataset):
- def __init__(self, latents_cache, text_encoder_cache):
- self.latents_cache = latents_cache
- self.text_encoder_cache = text_encoder_cache
-
- def __len__(self):
- return len(self.latents_cache)
-
- def __getitem__(self, index):
- return self.latents_cache[index], self.text_encoder_cache[index]
-
-def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None):
- if token is None:
- token = HfFolder.get_token()
- if organization is None:
- username = whoami(token)["name"]
- return f"{username}/{model_id}"
- else:
- return f"{organization}/{model_id}"
-
-def merge_two_dicts(starting_dict: dict, updater_dict: dict) -> dict:
- """
- Starts from base starting dict and then adds the remaining key values from updater replacing the values from
- the first starting/base dict with the second updater dict.
-
- For later: how does d = {**d1, **d2} replace collision?
-
- :param starting_dict:
- :param updater_dict:
- :return:
- """
- new_dict: dict = starting_dict.copy() # start with keys and values of starting_dict
- new_dict.update(updater_dict) # modifies starting_dict with keys and values of updater_dict
- return new_dict
-
-def merge_args(args1: argparse.Namespace, args2: argparse.Namespace) -> argparse.Namespace:
- """
-
- ref: https://stackoverflow.com/questions/56136549/how-can-i-merge-two-argparse-namespaces-in-python-2-x
- :param args1:
- :param args2:
- :return:
- """
- # - the merged args
- # The vars() function returns the __dict__ attribute to values of the given object e.g {field:value}.
- merged_key_values_for_namespace: dict = merge_two_dicts(vars(args1), vars(args2))
- args = argparse.Namespace(**merged_key_values_for_namespace)
- return args
-
-def run_training(args_imported):
- args_default = parse_args()
- args = merge_args(args_default, args_imported)
- print(args)
- logging_dir = Path(args.output_dir, args.logging_dir)
- i=args.save_starting_step
- accelerator = Accelerator(
- gradient_accumulation_steps=args.gradient_accumulation_steps,
- mixed_precision=args.mixed_precision,
- log_with="tensorboard",
- logging_dir=logging_dir,
- )
-
- # Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate
- # This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models.
- # TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate.
- if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1:
- raise ValueError(
- "Gradient accumulation is not supported when training the text encoder in distributed training. "
- "Please set gradient_accumulation_steps to 1. This feature will be supported in the future."
- )
-
- if args.seed is not None:
- set_seed(args.seed)
-
- if args.with_prior_preservation:
- class_images_dir = Path(args.class_data_dir)
- if not class_images_dir.exists():
- class_images_dir.mkdir(parents=True)
- cur_class_images = len(list(class_images_dir.iterdir()))
-
- if cur_class_images < args.num_class_images:
- torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32
- pipeline = StableDiffusionPipeline.from_pretrained(
- args.pretrained_model_name_or_path, torch_dtype=torch_dtype
- )
- pipeline.set_progress_bar_config(disable=True)
-
- num_new_images = args.num_class_images - cur_class_images
- logger.info(f"Number of class images to sample: {num_new_images}.")
-
- sample_dataset = PromptDataset(args.class_prompt, num_new_images)
- sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size)
-
- sample_dataloader = accelerator.prepare(sample_dataloader)
- pipeline.to(accelerator.device)
-
- for example in tqdm(
- sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process
- ):
- with torch.autocast("cuda"):
- images = pipeline(example["prompt"]).images
-
- for i, image in enumerate(images):
- image.save(class_images_dir / f"{example['index'][i] + cur_class_images}.jpg")
-
- del pipeline
- if torch.cuda.is_available():
- torch.cuda.empty_cache()
-
- # Handle the repository creation
- if accelerator.is_main_process:
- if args.push_to_hub:
- if args.hub_model_id is None:
- repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
- else:
- repo_name = args.hub_model_id
- repo = Repository(args.output_dir, clone_from=repo_name)
-
- with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
- if "step_*" not in gitignore:
- gitignore.write("step_*\n")
- if "epoch_*" not in gitignore:
- gitignore.write("epoch_*\n")
- elif args.output_dir is not None:
- os.makedirs(args.output_dir, exist_ok=True)
-
- # Load the tokenizer
- if args.tokenizer_name:
- tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name)
- elif args.pretrained_model_name_or_path:
- tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
-
- # Load models and create wrapper for stable diffusion
- if args.train_only_unet:
- if os.path.exists(str(args.output_dir+"/text_encoder_trained")):
- text_encoder = CLIPTextModel.from_pretrained(args.output_dir, subfolder="text_encoder_trained")
- elif os.path.exists(str(args.output_dir+"/text_encoder")):
- text_encoder = CLIPTextModel.from_pretrained(args.output_dir, subfolder="text_encoder")
- else:
- text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
- else:
- text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
- vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
- unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
- if is_xformers_available():
- try:
- print("Enabling memory efficient attention with xformers...")
- unet.enable_xformers_memory_efficient_attention()
- except Exception as e:
- logger.warning(
- f"Could not enable memory efficient attention. Make sure xformers is installed correctly and a GPU is available: {e}"
- )
- vae.requires_grad_(False)
- if not args.train_text_encoder:
- text_encoder.requires_grad_(False)
-
- if args.gradient_checkpointing:
- unet.enable_gradient_checkpointing()
- if args.train_text_encoder:
- text_encoder.gradient_checkpointing_enable()
-
- if args.scale_lr:
- args.learning_rate = (
- args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
- )
-
- # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
- if args.use_8bit_adam:
- try:
- import bitsandbytes as bnb
- except ImportError:
- raise ImportError(
- "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
- )
-
- optimizer_class = bnb.optim.AdamW8bit
- else:
- optimizer_class = torch.optim.AdamW
-
- params_to_optimize = (
- itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters()
- )
- optimizer = optimizer_class(
- params_to_optimize,
- lr=args.learning_rate,
- betas=(args.adam_beta1, args.adam_beta2),
- weight_decay=args.adam_weight_decay,
- eps=args.adam_epsilon,
- )
-
- noise_scheduler = DDPMScheduler.from_config(args.pretrained_model_name_or_path, subfolder="scheduler")
-
- train_dataset = DreamBoothDataset(
- instance_data_root=args.instance_data_dir,
- instance_prompt=args.instance_prompt,
- class_data_root=args.class_data_dir if args.with_prior_preservation else None,
- class_prompt=args.class_prompt,
- tokenizer=tokenizer,
- size=args.resolution,
- center_crop=args.center_crop,
- args=args,
- )
-
- def collate_fn(examples):
- input_ids = [example["instance_prompt_ids"] for example in examples]
- pixel_values = [example["instance_images"] for example in examples]
-
- # Concat class and instance examples for prior preservation.
- # We do this to avoid doing two forward passes.
- if args.with_prior_preservation:
- input_ids += [example["class_prompt_ids"] for example in examples]
- pixel_values += [example["class_images"] for example in examples]
-
- pixel_values = torch.stack(pixel_values)
- pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
-
- input_ids = tokenizer.pad({"input_ids": input_ids}, padding=True, return_tensors="pt").input_ids
-
- batch = {
- "input_ids": input_ids,
- "pixel_values": pixel_values,
- }
- return batch
-
- train_dataloader = torch.utils.data.DataLoader(
- train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn
- )
-
- # Scheduler and math around the number of training steps.
- overrode_max_train_steps = False
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
- if args.max_train_steps is None:
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
- overrode_max_train_steps = True
-
- lr_scheduler = get_scheduler(
- args.lr_scheduler,
- optimizer=optimizer,
- num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps,
- num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,
- )
-
- if args.train_text_encoder:
- unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
- unet, text_encoder, optimizer, train_dataloader, lr_scheduler
- )
- else:
- unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
- unet, optimizer, train_dataloader, lr_scheduler
- )
-
- weight_dtype = torch.float32
- if args.mixed_precision == "fp16":
- weight_dtype = torch.float16
- elif args.mixed_precision == "bf16":
- weight_dtype = torch.bfloat16
-
- # Move text_encode and vae to gpu.
- # For mixed precision training we cast the text_encoder and vae weights to half-precision
- # as these models are only used for inference, keeping weights in full precision is not required.
- vae.to(accelerator.device, dtype=weight_dtype)
- if not args.train_text_encoder:
- text_encoder.to(accelerator.device, dtype=weight_dtype)
-
-
- if args.cache_latents:
- latents_cache = []
- text_encoder_cache = []
- for batch in tqdm(train_dataloader, desc="Caching latents"):
- with torch.no_grad():
- batch["pixel_values"] = batch["pixel_values"].to(accelerator.device, non_blocking=True, dtype=weight_dtype)
- batch["input_ids"] = batch["input_ids"].to(accelerator.device, non_blocking=True)
- latents_cache.append(vae.encode(batch["pixel_values"]).latent_dist)
- if args.train_text_encoder:
- text_encoder_cache.append(batch["input_ids"])
- else:
- text_encoder_cache.append(text_encoder(batch["input_ids"])[0])
- train_dataset = LatentsDataset(latents_cache, text_encoder_cache)
- train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=1, collate_fn=lambda x: x, shuffle=True)
-
- del vae
- #if not args.train_text_encoder:
- # del text_encoder
- if torch.cuda.is_available():
- torch.cuda.empty_cache()
-
- # We need to recalculate our total training steps as the size of the training dataloader may have changed.
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
- if overrode_max_train_steps:
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
- # Afterwards we recalculate our number of training epochs
- args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
-
- # We need to initialize the trackers we use, and also store our configuration.
- # The trackers initializes automatically on the main process.
- if accelerator.is_main_process:
- accelerator.init_trackers("dreambooth", config=vars(args))
-
- def bar(prg):
- br='|'+'█' * prg + ' ' * (25-prg)+'|'
- return br
-
- # Train!
- total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
-
- logger.info("***** Running training *****")
- logger.info(f" Num examples = {len(train_dataset)}")
- logger.info(f" Num batches each epoch = {len(train_dataloader)}")
- logger.info(f" Num Epochs = {args.num_train_epochs}")
- logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
- logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
- logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
- logger.info(f" Total optimization steps = {args.max_train_steps}")
- # Only show the progress bar once on each machine.
- progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
- global_step = 0
-
- for epoch in range(args.num_train_epochs):
- unet.train()
- if args.train_text_encoder:
- text_encoder.train()
- for step, batch in enumerate(train_dataloader):
- with accelerator.accumulate(unet):
- # Convert images to latent space
- with torch.no_grad():
- if args.cache_latents:
- latents_dist = batch[0][0]
- else:
- latents_dist = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist
- latents = latents_dist.sample() * 0.18215
-
- # Sample noise that we'll add to the latents
- noise = torch.randn_like(latents)
- bsz = latents.shape[0]
- # Sample a random timestep for each image
- timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
- timesteps = timesteps.long()
-
- # Add noise to the latents according to the noise magnitude at each timestep
- # (this is the forward diffusion process)
- noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
-
- # Get the text embedding for conditioning
- if(args.cache_latents):
- if args.train_text_encoder:
- encoder_hidden_states = text_encoder(batch[0][1])[0]
- else:
- encoder_hidden_states = batch[0][1]
- else:
- encoder_hidden_states = text_encoder(batch["input_ids"])[0]
-
- # Predict the noise residual
- model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample
-
- # Get the target for loss depending on the prediction type
- if noise_scheduler.config.prediction_type == "epsilon":
- target = noise
- elif noise_scheduler.config.prediction_type == "v_prediction":
- target = noise_scheduler.get_velocity(latents, noise, timesteps)
- else:
- raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
-
- if args.with_prior_preservation:
- # Chunk the noise and model_pred into two parts and compute the loss on each part separately.
- model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0)
- target, target_prior = torch.chunk(target, 2, dim=0)
-
- # Compute instance loss
- loss = F.mse_loss(model_pred.float(), target.float(), reduction="none").mean([1, 2, 3]).mean()
-
- # Compute prior loss
- prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean")
-
- # Add the prior loss to the instance loss.
- loss = loss + args.prior_loss_weight * prior_loss
- else:
- loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
-
- accelerator.backward(loss)
- if accelerator.sync_gradients:
- params_to_clip = (
- itertools.chain(unet.parameters(), text_encoder.parameters())
- if args.train_text_encoder
- else unet.parameters()
- )
- accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
- optimizer.step()
- lr_scheduler.step()
- optimizer.zero_grad()
-
- # Checks if the accelerator has performed an optimization step behind the scenes
- if accelerator.sync_gradients:
- progress_bar.update(1)
- global_step += 1
-
- fll=round((global_step*100)/args.max_train_steps)
- fll=round(fll/4)
- pr=bar(fll)
-
- logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
- progress_bar.set_postfix(**logs)
- progress_bar.set_description_str("Progress:"+pr)
- accelerator.log(logs, step=global_step)
-
- if global_step >= args.max_train_steps:
- break
-
- if args.train_text_encoder and global_step == args.stop_text_encoder_training and global_step >= 30:
- if accelerator.is_main_process:
- print(" [0;32m" +" Freezing the text_encoder ..."+" [0m")
- frz_dir=args.output_dir + "/text_encoder_frozen"
- if os.path.exists(frz_dir):
- subprocess.call('rm -r '+ frz_dir, shell=True)
- os.mkdir(frz_dir)
- pipeline = StableDiffusionPipeline.from_pretrained(
- args.pretrained_model_name_or_path,
- unet=accelerator.unwrap_model(unet),
- text_encoder=accelerator.unwrap_model(text_encoder),
- )
- pipeline.text_encoder.save_pretrained(frz_dir)
-
- if args.save_n_steps >= 200:
- if global_step < args.max_train_steps and global_step+1==i:
- ckpt_name = "_step_" + str(global_step+1)
- save_dir = Path(args.output_dir+ckpt_name)
- save_dir=str(save_dir)
- save_dir=save_dir.replace(" ", "_")
- if not os.path.exists(save_dir):
- os.mkdir(save_dir)
- inst=save_dir[16:]
- inst=inst.replace(" ", "_")
- print(" [1;32mSAVING CHECKPOINT: "+args.Session_dir+"/"+inst+".ckpt")
- # Create the pipeline using the trained modules and save it.
- if accelerator.is_main_process:
- pipeline = StableDiffusionPipeline.from_pretrained(
- args.pretrained_model_name_or_path,
- unet=accelerator.unwrap_model(unet),
- text_encoder=accelerator.unwrap_model(text_encoder),
- )
- pipeline.save_pretrained(save_dir)
- frz_dir=args.output_dir + "/text_encoder_frozen"
- if args.train_text_encoder and os.path.exists(frz_dir):
- subprocess.call('rm -r '+save_dir+'/text_encoder/*.*', shell=True)
- subprocess.call('cp -f '+frz_dir +'/*.* '+ save_dir+'/text_encoder', shell=True)
- chkpth=args.Session_dir+"/"+inst+".ckpt"
- subprocess.call('python /content/diffusers/scripts/convert_diffusers_to_original_stable_diffusion.py --model_path ' + save_dir + ' --checkpoint_path ' + chkpth + ' --half', shell=True)
- subprocess.call('rm -r '+ save_dir, shell=True)
- i=i+args.save_n_steps
-
- accelerator.wait_for_everyone()
-
- # Create the pipeline using using the trained modules and save it.
- if accelerator.is_main_process:
- if args.dump_only_text_encoder:
- txt_dir=args.output_dir + "/text_encoder_trained"
- if not os.path.exists(txt_dir):
- os.mkdir(txt_dir)
- pipeline = StableDiffusionPipeline.from_pretrained(
- args.pretrained_model_name_or_path,
- unet=accelerator.unwrap_model(unet),
- text_encoder=accelerator.unwrap_model(text_encoder),
- )
- pipeline.text_encoder.save_pretrained(txt_dir)
-
- elif args.train_only_unet:
- pipeline = StableDiffusionPipeline.from_pretrained(
- args.pretrained_model_name_or_path,
- unet=accelerator.unwrap_model(unet),
- text_encoder=accelerator.unwrap_model(text_encoder),
- )
- pipeline.save_pretrained(args.output_dir)
- txt_dir=args.output_dir + "/text_encoder_trained"
- subprocess.call('rm -r '+txt_dir, shell=True)
-
- else:
- pipeline = StableDiffusionPipeline.from_pretrained(
- args.pretrained_model_name_or_path,
- unet=accelerator.unwrap_model(unet),
- text_encoder=accelerator.unwrap_model(text_encoder),
- )
- frz_dir=args.output_dir + "/text_encoder_frozen"
- pipeline.save_pretrained(args.output_dir)
- if args.train_text_encoder and os.path.exists(frz_dir):
- subprocess.call('mv -f '+frz_dir +'/*.* '+ args.output_dir+'/text_encoder', shell=True)
- subprocess.call('rm -r '+ frz_dir, shell=True)
-
- if args.push_to_hub:
- repo.push_to_hub(commit_message="End of training", blocking=False, auto_lfs_prune=True)
-
- accelerator.end_training()
- del pipeline
- torch.cuda.empty_cache()
- gc.collect()
-if __name__ == "__main__":
- pass
- #main()
-
diff --git a/spaces/megaaziib/RVC-V2-Huggingface-Version/lib/infer_pack/modules.py b/spaces/megaaziib/RVC-V2-Huggingface-Version/lib/infer_pack/modules.py
deleted file mode 100644
index c83289df7c79a4810dacd15c050148544ba0b6a9..0000000000000000000000000000000000000000
--- a/spaces/megaaziib/RVC-V2-Huggingface-Version/lib/infer_pack/modules.py
+++ /dev/null
@@ -1,522 +0,0 @@
-import copy
-import math
-import numpy as np
-import scipy
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm
-
-from lib.infer_pack import commons
-from lib.infer_pack.commons import init_weights, get_padding
-from lib.infer_pack.transforms import piecewise_rational_quadratic_transform
-
-
-LRELU_SLOPE = 0.1
-
-
-class LayerNorm(nn.Module):
- def __init__(self, channels, eps=1e-5):
- super().__init__()
- self.channels = channels
- self.eps = eps
-
- self.gamma = nn.Parameter(torch.ones(channels))
- self.beta = nn.Parameter(torch.zeros(channels))
-
- def forward(self, x):
- x = x.transpose(1, -1)
- x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
- return x.transpose(1, -1)
-
-
-class ConvReluNorm(nn.Module):
- def __init__(
- self,
- in_channels,
- hidden_channels,
- out_channels,
- kernel_size,
- n_layers,
- p_dropout,
- ):
- super().__init__()
- self.in_channels = in_channels
- self.hidden_channels = hidden_channels
- self.out_channels = out_channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.p_dropout = p_dropout
- assert n_layers > 1, "Number of layers should be larger than 0."
-
- self.conv_layers = nn.ModuleList()
- self.norm_layers = nn.ModuleList()
- self.conv_layers.append(
- nn.Conv1d(
- in_channels, hidden_channels, kernel_size, padding=kernel_size // 2
- )
- )
- self.norm_layers.append(LayerNorm(hidden_channels))
- self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout))
- for _ in range(n_layers - 1):
- self.conv_layers.append(
- nn.Conv1d(
- hidden_channels,
- hidden_channels,
- kernel_size,
- padding=kernel_size // 2,
- )
- )
- self.norm_layers.append(LayerNorm(hidden_channels))
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
- self.proj.weight.data.zero_()
- self.proj.bias.data.zero_()
-
- def forward(self, x, x_mask):
- x_org = x
- for i in range(self.n_layers):
- x = self.conv_layers[i](x * x_mask)
- x = self.norm_layers[i](x)
- x = self.relu_drop(x)
- x = x_org + self.proj(x)
- return x * x_mask
-
-
-class DDSConv(nn.Module):
- """
- Dialted and Depth-Separable Convolution
- """
-
- def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
- super().__init__()
- self.channels = channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.p_dropout = p_dropout
-
- self.drop = nn.Dropout(p_dropout)
- self.convs_sep = nn.ModuleList()
- self.convs_1x1 = nn.ModuleList()
- self.norms_1 = nn.ModuleList()
- self.norms_2 = nn.ModuleList()
- for i in range(n_layers):
- dilation = kernel_size**i
- padding = (kernel_size * dilation - dilation) // 2
- self.convs_sep.append(
- nn.Conv1d(
- channels,
- channels,
- kernel_size,
- groups=channels,
- dilation=dilation,
- padding=padding,
- )
- )
- self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
- self.norms_1.append(LayerNorm(channels))
- self.norms_2.append(LayerNorm(channels))
-
- def forward(self, x, x_mask, g=None):
- if g is not None:
- x = x + g
- for i in range(self.n_layers):
- y = self.convs_sep[i](x * x_mask)
- y = self.norms_1[i](y)
- y = F.gelu(y)
- y = self.convs_1x1[i](y)
- y = self.norms_2[i](y)
- y = F.gelu(y)
- y = self.drop(y)
- x = x + y
- return x * x_mask
-
-
-class WN(torch.nn.Module):
- def __init__(
- self,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=0,
- p_dropout=0,
- ):
- super(WN, self).__init__()
- assert kernel_size % 2 == 1
- self.hidden_channels = hidden_channels
- self.kernel_size = (kernel_size,)
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
- self.p_dropout = p_dropout
-
- self.in_layers = torch.nn.ModuleList()
- self.res_skip_layers = torch.nn.ModuleList()
- self.drop = nn.Dropout(p_dropout)
-
- if gin_channels != 0:
- cond_layer = torch.nn.Conv1d(
- gin_channels, 2 * hidden_channels * n_layers, 1
- )
- self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
-
- for i in range(n_layers):
- dilation = dilation_rate**i
- padding = int((kernel_size * dilation - dilation) / 2)
- in_layer = torch.nn.Conv1d(
- hidden_channels,
- 2 * hidden_channels,
- kernel_size,
- dilation=dilation,
- padding=padding,
- )
- in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
- self.in_layers.append(in_layer)
-
- # last one is not necessary
- if i < n_layers - 1:
- res_skip_channels = 2 * hidden_channels
- else:
- res_skip_channels = hidden_channels
-
- res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
- res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
- self.res_skip_layers.append(res_skip_layer)
-
- def forward(self, x, x_mask, g=None, **kwargs):
- output = torch.zeros_like(x)
- n_channels_tensor = torch.IntTensor([self.hidden_channels])
-
- if g is not None:
- g = self.cond_layer(g)
-
- for i in range(self.n_layers):
- x_in = self.in_layers[i](x)
- if g is not None:
- cond_offset = i * 2 * self.hidden_channels
- g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
- else:
- g_l = torch.zeros_like(x_in)
-
- acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
- acts = self.drop(acts)
-
- res_skip_acts = self.res_skip_layers[i](acts)
- if i < self.n_layers - 1:
- res_acts = res_skip_acts[:, : self.hidden_channels, :]
- x = (x + res_acts) * x_mask
- output = output + res_skip_acts[:, self.hidden_channels :, :]
- else:
- output = output + res_skip_acts
- return output * x_mask
-
- def remove_weight_norm(self):
- if self.gin_channels != 0:
- torch.nn.utils.remove_weight_norm(self.cond_layer)
- for l in self.in_layers:
- torch.nn.utils.remove_weight_norm(l)
- for l in self.res_skip_layers:
- torch.nn.utils.remove_weight_norm(l)
-
-
-class ResBlock1(torch.nn.Module):
- def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
- super(ResBlock1, self).__init__()
- self.convs1 = nn.ModuleList(
- [
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=dilation[0],
- padding=get_padding(kernel_size, dilation[0]),
- )
- ),
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=dilation[1],
- padding=get_padding(kernel_size, dilation[1]),
- )
- ),
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=dilation[2],
- padding=get_padding(kernel_size, dilation[2]),
- )
- ),
- ]
- )
- self.convs1.apply(init_weights)
-
- self.convs2 = nn.ModuleList(
- [
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=1,
- padding=get_padding(kernel_size, 1),
- )
- ),
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=1,
- padding=get_padding(kernel_size, 1),
- )
- ),
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=1,
- padding=get_padding(kernel_size, 1),
- )
- ),
- ]
- )
- self.convs2.apply(init_weights)
-
- def forward(self, x, x_mask=None):
- for c1, c2 in zip(self.convs1, self.convs2):
- xt = F.leaky_relu(x, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c1(xt)
- xt = F.leaky_relu(xt, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c2(xt)
- x = xt + x
- if x_mask is not None:
- x = x * x_mask
- return x
-
- def remove_weight_norm(self):
- for l in self.convs1:
- remove_weight_norm(l)
- for l in self.convs2:
- remove_weight_norm(l)
-
-
-class ResBlock2(torch.nn.Module):
- def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
- super(ResBlock2, self).__init__()
- self.convs = nn.ModuleList(
- [
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=dilation[0],
- padding=get_padding(kernel_size, dilation[0]),
- )
- ),
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=dilation[1],
- padding=get_padding(kernel_size, dilation[1]),
- )
- ),
- ]
- )
- self.convs.apply(init_weights)
-
- def forward(self, x, x_mask=None):
- for c in self.convs:
- xt = F.leaky_relu(x, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c(xt)
- x = xt + x
- if x_mask is not None:
- x = x * x_mask
- return x
-
- def remove_weight_norm(self):
- for l in self.convs:
- remove_weight_norm(l)
-
-
-class Log(nn.Module):
- def forward(self, x, x_mask, reverse=False, **kwargs):
- if not reverse:
- y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
- logdet = torch.sum(-y, [1, 2])
- return y, logdet
- else:
- x = torch.exp(x) * x_mask
- return x
-
-
-class Flip(nn.Module):
- def forward(self, x, *args, reverse=False, **kwargs):
- x = torch.flip(x, [1])
- if not reverse:
- logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
- return x, logdet
- else:
- return x
-
-
-class ElementwiseAffine(nn.Module):
- def __init__(self, channels):
- super().__init__()
- self.channels = channels
- self.m = nn.Parameter(torch.zeros(channels, 1))
- self.logs = nn.Parameter(torch.zeros(channels, 1))
-
- def forward(self, x, x_mask, reverse=False, **kwargs):
- if not reverse:
- y = self.m + torch.exp(self.logs) * x
- y = y * x_mask
- logdet = torch.sum(self.logs * x_mask, [1, 2])
- return y, logdet
- else:
- x = (x - self.m) * torch.exp(-self.logs) * x_mask
- return x
-
-
-class ResidualCouplingLayer(nn.Module):
- def __init__(
- self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- p_dropout=0,
- gin_channels=0,
- mean_only=False,
- ):
- assert channels % 2 == 0, "channels should be divisible by 2"
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.half_channels = channels // 2
- self.mean_only = mean_only
-
- self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
- self.enc = WN(
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- p_dropout=p_dropout,
- gin_channels=gin_channels,
- )
- self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
- self.post.weight.data.zero_()
- self.post.bias.data.zero_()
-
- def forward(self, x, x_mask, g=None, reverse=False):
- x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
- h = self.pre(x0) * x_mask
- h = self.enc(h, x_mask, g=g)
- stats = self.post(h) * x_mask
- if not self.mean_only:
- m, logs = torch.split(stats, [self.half_channels] * 2, 1)
- else:
- m = stats
- logs = torch.zeros_like(m)
-
- if not reverse:
- x1 = m + x1 * torch.exp(logs) * x_mask
- x = torch.cat([x0, x1], 1)
- logdet = torch.sum(logs, [1, 2])
- return x, logdet
- else:
- x1 = (x1 - m) * torch.exp(-logs) * x_mask
- x = torch.cat([x0, x1], 1)
- return x
-
- def remove_weight_norm(self):
- self.enc.remove_weight_norm()
-
-
-class ConvFlow(nn.Module):
- def __init__(
- self,
- in_channels,
- filter_channels,
- kernel_size,
- n_layers,
- num_bins=10,
- tail_bound=5.0,
- ):
- super().__init__()
- self.in_channels = in_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.num_bins = num_bins
- self.tail_bound = tail_bound
- self.half_channels = in_channels // 2
-
- self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
- self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0)
- self.proj = nn.Conv1d(
- filter_channels, self.half_channels * (num_bins * 3 - 1), 1
- )
- self.proj.weight.data.zero_()
- self.proj.bias.data.zero_()
-
- def forward(self, x, x_mask, g=None, reverse=False):
- x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
- h = self.pre(x0)
- h = self.convs(h, x_mask, g=g)
- h = self.proj(h) * x_mask
-
- b, c, t = x0.shape
- h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
-
- unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels)
- unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt(
- self.filter_channels
- )
- unnormalized_derivatives = h[..., 2 * self.num_bins :]
-
- x1, logabsdet = piecewise_rational_quadratic_transform(
- x1,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=reverse,
- tails="linear",
- tail_bound=self.tail_bound,
- )
-
- x = torch.cat([x0, x1], 1) * x_mask
- logdet = torch.sum(logabsdet * x_mask, [1, 2])
- if not reverse:
- return x, logdet
- else:
- return x
diff --git a/spaces/merve/fill-in-the-blank/public/private-and-fair/util.js b/spaces/merve/fill-in-the-blank/public/private-and-fair/util.js
deleted file mode 100644
index 76a4bccf20f893c87bcb5088391cd9aa73c312e2..0000000000000000000000000000000000000000
--- a/spaces/merve/fill-in-the-blank/public/private-and-fair/util.js
+++ /dev/null
@@ -1,125 +0,0 @@
-window.ttSel = d3.select('body').selectAppend('div.tooltip.tooltip-hidden')
-window.util = (function(){
-
- var data = window.__datacache = window.__datacache || {}
-
- async function getFile(path){
- var [slug, type] = path.split('.')
- if (data[slug]) return data[slug]
-
- var datadir = 'https://storage.googleapis.com/uncertainty-over-space/explore-dp/'
-
- var res = await fetch(datadir + path + '?t=5')
- if (type == 'csv'){
- var parsed = d3.csvParse(await res.text())
- } else if (type == 'npy'){
- var parsed = npyjs.parse(await(res).arrayBuffer())
- } else if (type == 'json'){
- var parsed = await res.json()
- } else{
- throw 'unknown type'
- }
-
- data[slug] = parsed
-
- return parsed
- }
-
- async function drawDigit(ctx, index, s=4, offsetX=0, offsetY=0){
- var digitMetadata = await util.getFile('mnist_train.csv')
- if (!digitMetadata[0].label) decorateDigitMetadata(digitMetadata)
-
- var {label, labelIndex} = digitMetadata[index]
-
- if (!label) console.log('missing ', index)
- var rawdigits = await util.getFile(`cns-cache/mnist_train_raw_${label}.npy`)
- if (!rawdigits) return console.log('digits not loaded')
-
- d3.cross(d3.range(28), d3.range(28)).forEach(([i, j]) => {
- var r = rawdigits.data[labelIndex*28*28 + j*28 + i + 0]
- var g = rawdigits.data[labelIndex*28*28 + j*28 + i + 0]
- var b = rawdigits.data[labelIndex*28*28 + j*28 + i + 0]
-
- ctx.beginPath()
- ctx.fillStyle = `rgb(${r},${g},${b})`
- ctx.rect(i*s + offsetX, j*s + offsetY, s, s)
- ctx.fill()
- })
- }
-
- function decorateDigitMetadata(digitMetadata){
- digitMetadata.forEach(d => {
- delete d['']
- d.i = +d.i
- d.label = +d.y
- d.priv_order = +d.priv_order
- })
-
- var byLabel = d3.nestBy(digitMetadata, d => d.y)
- byLabel = _.sortBy(byLabel, d => d.key)
- byLabel.forEach(digit => {
- digit.forEach((d, i) => d.labelIndex = i)
- })
-
- return {digitMetadata, byLabel}
- }
-
- var colors = [d3.interpolateTurbo(.15), d3.interpolateTurbo(.85)]
- var epsilonExtent = [400000, .01]
- // var epsilonExtent = [65, .01]
-
-
- var addAxisLabel = (c, xText, yText, xOffset=40, yOffset=-40) => {
- c.svg.select('.x').append('g')
- .translate([c.width/2, xOffset])
- .append('text.axis-label')
- .text(xText)
- .at({textAnchor: 'middle'})
- .st({fill: '#000', fontSize: 14})
-
- c.svg.select('.y')
- .append('g')
- .translate([yOffset, c.height/2])
- .append('text.axis-label')
- .text(yText)
- .at({textAnchor: 'middle', transform: 'rotate(-90)'})
- .st({fill: '#000', fontSize: 14})
- }
-
- var ggPlotBg = (c, isBlack=true) => {
- if (!isBlack){
- c.svg.append('rect')
- .at({width: c.width, height: c.height, fill: '#eee'})
- .lower()
- }
-
- c.svg.selectAll('.tick').selectAll('line').remove()
- c.svg.selectAll('.y .tick')
- .append('path').at({d: 'M 0 0 H ' + c.width, stroke: '#fff', strokeWidth: 1})
- c.svg.selectAll('.y text').at({x: -3})
- c.svg.selectAll('.x .tick')
- .append('path').at({d: 'M 0 0 V -' + c.height, stroke: '#fff', strokeWidth: 1})
- }
-
-
- return {data, getFile, drawDigit, colors, epsilonExtent, addAxisLabel, ggPlotBg, decorateDigitMetadata}
-})()
-
-
-
-
-
-
-// mnist_train.csv
-// mnist_train_raw.npy
-// umap_train_0.npy
-// umap_train_1.npy
-// umap_train_2.npy
-// umap_train_3.npy
-// umap_train_4.npy
-// umap_train_5.npy
-// umap_train_6.npy
-// umap_train_7.npy
-// umap_train_8.npy
-// umap_train_9.npy
-// umap_train_all.npy
diff --git a/spaces/merve/fill-in-the-blank/source/dataset-worldviews/script.js b/spaces/merve/fill-in-the-blank/source/dataset-worldviews/script.js
deleted file mode 100644
index 3ebba088d65f389af1b446a9ea90fcde674d5fdf..0000000000000000000000000000000000000000
--- a/spaces/merve/fill-in-the-blank/source/dataset-worldviews/script.js
+++ /dev/null
@@ -1,588 +0,0 @@
-
-console.clear();
-
-var ttSel = d3.select("body").selectAppend("div.tooltip.tooltip-hidden");
-// For result tables
-const columns = ["object", "n", "n correct", "accuracy"];
-const rowHeight = 50;
-const rowWidth = 100;
-const buffer = 2;
-
-const classifierBlobWidth = 50;
-const classifierBlobHeight = 460;
-
-function drawShapesWithData(classifier) {
- var divHeight = classifier.class == "show-shapes" ? 250 : 490;
-
- var c = d3.conventions({
- sel: d3.select("." + classifier.class).html(""),
- width: 1300,
- height: divHeight,
- layers: "ds",
- });
-
- function runClassifier() {
- classifier.isClassified = true;
- var duration = 3000;
- classifierSel.classed("is-classified", true);
- graphResultsGroup.classed("is-classified", true);
-
- drawResults();
- buttonSel.text("Reset");
-
- var minX = d3.min(shapeParams, (d) => d.endX - 50);
- var timer = d3.timer((ms) => {
- if (!classifier.isClassified) {
- timer.stop();
- shapeSel.classed("is-classified", false);
- return;
- }
-
- var t = d3.easeCubicInOut(ms / duration);
- t = d3.clamp(0, t, 1);
-
- shapeParams.forEach((d, i) => {
- d.x = d.startX + (d.endX - d.startX) * t;
- d.y = d.startY + (d.endY - d.startY) * t;
- d.isClassified = d.x > minX;
- });
-
- shapeSel
- .translate((d) => [d.x, d.y])
- .classed("is-classified", (d) => d.isClassified);
-
- if (t == 1) {
- timer.stop();
- }
- });
- }
-
- function resetClassifier() {
- shapeSel.translate((d) => [d.startX, d.startY]);
- shapeSel.classed("is-classified", false);
- classifier.isClassified = false;
- shapeSel
- .transition("position")
- .duration(0)
- .translate((d) => [d.startX, d.startY]);
- classifierSel.classed("is-classified", false);
- graphResultsGroup.classed("is-classified", false);
- if (classifier.class != "show-shapes") {
- classifierBlobSel.attr("opacity", 100);
- }
-
- drawResults();
- buttonSel.text("Run Classifier");
- }
-
- // Add run/reset button
- var buttonSel = d3
- .select("." + classifier.class + "-button")
- .html("")
- .append("button#run")
- .at({
- type: "button",
- class: "classifier-button",
- })
- .text("Run Classifier")
- .on("click", () => {
- // if already classified, reset
- if (classifier.isClassified) {
- // Resetting
- resetClassifier();
- } else {
- runClassifier();
- }
- });
-
- // Backgrounds for different classifications
- var classifierSel = c.svg
- .append("g")
- .at({
- class: "classifier",
- })
- .translate([465, 20]);
-
- classifierSel
- .append("path.classifier-bg-shaded")
- .at({
- d: classifierBgPathTop,
- // fill: "#ccc",
- // stroke: "#000",
- })
- .translate([-50, 0]);
-
- classifierSel
- .append("text.classifier-bg-text")
- .at({
- fill: "#000",
- textAnchor: "middle",
- dominantBaseline: "central",
- class: "monospace",
- })
- .text("shaded")
- .translate([160, 15]);
-
- classifierSel
- .append("path.classifier-bg-unshaded")
- .at({
- d: classifierBgPathBottom,
- })
- .translate([-50, 160]);
-
- classifierSel
- .append("text.classifier-bg-text")
- .at({
- fill: "#000",
- textAnchor: "middle",
- dominantBaseline: "central",
- class: "monospace",
- })
- .text("unshaded")
- .translate([160, 175]);
-
- // Add the shapes themselves
- var shapeSel = c.svg
- .appendMany("path.shape", shapeParams)
- .at({
- d: (d) => d.path,
- class: (d) => "gt-" + d.gt + " " + d.correctness,
- })
- .translate(function (d) {
- if (classifier.class == "show-shapes") {
- return [d.initialX + 35, d.initialY-20];
- } else {
- return [d.startX, d.startY];
- }
- })
- .call(d3.attachTooltip)
- .on("mouseover", (d) => {
- ttSel.html("");
- if (classifier.usingLabel != "none") {
- ttSel
- .append("div")
- .html(
- `labeled:${toPropertyString(
- d[classifier.usingLabel],
- classifier.isRounding
- ).slice(0, -1)}`
- );
- }
- var gtSel = ttSel
- .append("div")
- .html(
- `ground truth:${d.gt}`
- );
- if (classifier.isClassified) {
- ttSel
- .append("div.labeled-row")
- .html(
- `classified as:${d.label}`
- );
-
- ttSel
- .append("div.correct-row")
- .classed("is-correct-tooltip", d.correctness == "correct")
- .html(` ${d.correctness}ly classified `);
- }
- ttSel.classed("tt-text", true);
- });
-
- // If we're just showing shapes, ignore everything else
- if (classifier.class == "show-shapes") return;
-
- // Add "classifier" line
- var classifierBlobSel = c.svg
- .append("g")
- .at({
- class: "classifier-blob",
- strokeWidth: 0,
- })
- .translate([378, 20]);
-
- classifierBlobSel
- .append("line.classifier-blob")
- .at({
- class: "line",
- x1: 27,
- x2: 27,
- y1: 0,
- y2: 464,
- stroke: "#000",
- strokeWidth: 1,
- })
- .style("stroke-dasharray", "5, 5");
-
- classifierBlobSel
- .append("text.classifier-blob-text")
- .at({
- class: "classifier-blob-text monospace",
- textAnchor: "middle",
- dominantBaseline: "central",
- })
- .text("is_shaded classifier")
- .attr("transform", "translate(30,480) rotate(0)");
-
- if (classifier.class == "show-shapes") {
- classifierBlobSel.classed("is-classified", true);
- }
-
- // Draw the results table with accuracies
- // This will be hidden before classifier is run.
- var graphResultsGroup = c.svg
- .append("g")
- .attr("class", "results")
- .translate([-20, 19]);
-
- function drawResults() {
- // Write text summary
- summarySel = d3
- .select("." + classifier.class + "-summary")
- .html(summaries[classifier.class])
- .translate([0, 20]);
- summarySel.classed("summary-text", true);
- summarySel.classed("is-classified", classifier.isClassified);
-
- if (!classifier.isClassified) {
- c.layers[0].html("");
- classifier.wasClassified = false;
- return;
- }
-
- // Access results, which are calculated in shapes.js.
- // If there are none, draw nothing.
- results = allResults[classifier.class];
- if (!results) return;
-
- // Figure out which shapes should be highlighted on mouseover
- // This depends on whether we're "rounding" edge case examples.
- function isMatch(rowName, labelName, isRounding) {
- // Not filtering at all
- if (rowName == "shape") {
- return true;
- }
- if (isRounding == true) {
- // No "other" category
- return labelName.includes(toOriginalString(rowName))
- ? true
- : false;
- } else {
- // There is an "other" category, prefixed by "rt_"
- if (labelName == toOriginalString(rowName)) {
- return true;
- } else if (
- labelName.includes("rt_") &&
- rowName == "other shapes"
- ) {
- return true;
- }
- return false;
- }
- }
-
- // Color the last row of each table
- function getColor(d, i) {
- if (i != 3) {
- // not last index
- return "#e6e6e6";
- } else {
- var scaleRowValue = d3
- .scaleLinear()
- .domain([0.3, 1.0])
- .range([0, 1]);
- return d3.interpolateRdYlGn(scaleRowValue(d));
- }
- }
-
- // Adjust text color for visibility
- function getTextColor(d, i) {
- if (i != 3) {
- // not last index
- return "#000000";
- } else {
- var bgColor = getColor(d, i);
- if (d < 0.3) {
- // Alternative: use a brighter color?
- // return d3.rgb(bgColor).brighter(-2);
- return "#FFCCD8";
- } else {
- // Alternative: use a darker color?
- // return d3.rgb(bgColor).darker(2);
- return "#000000";
- }
- }
- }
-
- // Draw results table
- var tableSel = c.layers[0]
- .html("")
- .raise()
- .st({ width: 400 })
- .append("div")
- .translate([0, 10])
- .append("table.results-table.monospace")
- .st({ width: 400 });
-
- var header = tableSel
- .append("thead")
- .append("tr")
- .appendMany("th", columns)
- .text((d) => d);
-
- var rowSel = tableSel
- .appendMany("tr", results)
- .at({
- class: "row monospace",
- })
- .on("mouseover", (row) => {
- if (classifier.class == "default-classifier") {
- return;
- }
- rowSel.classed("active", (d) => d == row);
- shapeSel.classed("shape-row-unhighlighted", function (d) {
- return !isMatch(
- row.object,
- d[classifier.usingLabel],
- (isRounding = classifier.isRounding)
- );
- });
- })
- .on("mouseout", (row) => {
- rowSel.classed("active", function (d) {
- if (d == row) {
- return false;
- }
- });
- if (classifier.isClassified) {
- shapeSel.classed("shape-row-unhighlighted", 0);
- }
- });
-
- rowSel
- .appendMany("td", (result) =>
- columns.map((column) => result[column])
- )
- .text((d) => d)
- .st({
- backgroundColor: getColor,
- color: getTextColor,
- });
-
- header.style("opacity", 0);
- rowSel.style("opacity", 0);
-
- // If the classifier has already been run before, draw results right away.
- // Otherwise, wait for other animation to run before drawing results.
- var initialDelay = classifier.wasClassified ? 0 : 2000;
- classifier.wasClassified = true;
-
- header
- .transition()
- .delay(initialDelay)
- .duration(1000)
- .style("opacity", 1);
- rowSel
- .transition()
- .delay(function (d, i) {
- return initialDelay + i * 200;
- })
- .duration(1000)
- .style("opacity", 1);
- }
-
- // Draw the dropdowns for selecting different labels
- function drawDropdown() {
- if (!classifier.options) return;
-
- ["rounding", "category"].forEach(function (classifierType) {
- if (!classifier.options[classifierType]) return;
- var sel = d3
- .select("#" + classifier.class + "-select-" + classifierType)
- .html("");
- sel.classed("dropdown", true);
- sel.appendMany("option", classifier.options[classifierType])
- .at({
- value: function (d) {
- return d.value;
- },
- })
- .text((d) => d.label);
- sel.on("change", function () {
- if (classifierType == "rounding") {
- classifier.isRounding = toBool(this.value);
- } else {
- classifier.usingLabel = this.value;
- }
- updateResults();
- drawResults();
- });
- });
- }
- drawDropdown();
- updateResults();
- drawResults();
-
- // For continuity, auto-run the second two classifiers
- if (
- classifier.class == "second-classifier" ||
- classifier.class == "final-classifier"
- ) {
- runClassifier();
- }
-}
-
-// Draw the "Labels Tell Stories" section
-function drawConclusion() {
- function drawNewspapers() {
- d3.select(".conclusion-newspapers").html(function () {
- var imgPath =
- "img/newspapers_" +
- document.getElementById("conclusion-select-category").value;
- return (
- ''
- );
- });
- }
-
- function drawInterface() {
- d3.select(".conclusion-interface").html(function () {
- var imgPath =
- "img/confusing_" +
- document.getElementById("conclusion-select-category").value;
- return (
- '
'
- );
- });
- }
-
- function drawConclusionSummary() {
- classifierSel = d3
- .select(".conclusion-summary")
- .html(summaries["conclusion"]);
- classifierSel.classed("summary-text is-classified", true);
- }
-
- function drawDropdown() {
- var sel = d3.select("#conclusion-select-category").html("");
- sel.classed("dropdown", true);
- sel.appendMany("option", conclusionOptions.category)
- .at({
- value: function (d) {
- return d.value;
- },
- })
- .text((d) => d.label);
- // sel.attr('select', 'circles, triangles, and rectangles');
- sel.on("change", function (d) {
- makeConclusionUpdates();
- });
- }
-
- function makeConclusionUpdates() {
- updateResults();
- drawNewspapers();
- drawInterface();
- drawConclusionSummary();
- }
- drawDropdown();
- makeConclusionUpdates();
-}
-
-// Handle the parameters everywhere classifiers are drawn
-var classifiers = [
- {
- // Just the initial display of shapes, not interactive
- class: "show-shapes",
- colorBy: (d) => d.correctness,
- isClassified: false,
- isRounding: false,
- usingLabel: "none",
- },
- {
- class: "default-classifier",
- colorBy: (d) => d.correctness,
- isClassified: false,
- isRounding: false,
- usingLabel: "none",
- },
- {
- class: "second-classifier",
- colorBy: (d) => d.correctness,
- isClassified: false,
- isRounding: true,
- usingLabel: "shape_name",
- options: {
- rounding: [
- { label: "with their best guess", value: true },
- { label: 'as "other"', value: false },
- ],
- },
- },
- {
- class: "final-classifier",
- colorBy: (d) => d.correctness,
- isClassified: false,
- isRounding: true,
- usingLabel: "shape_name",
- options: {
- rounding: [
- { label: "with our best guess", value: true },
- { label: 'as "other"', value: false },
- ],
- category: [
- {
- label: "circles, triangles, or rectangles",
- value: "shape_name",
- },
- { label: "pointy shapes or round shapes", value: "pointiness" },
- { label: "small shapes or big shapes", value: "size" },
- { label: "just shapes", value: "none" },
- ],
- },
- },
-];
-
-// "Labels Tell Stories" dropdown options
-var conclusionOptions = {
- category: [
- { label: "circles, triangles, and rectangles", value: "shape_name" },
- { label: "pointy shapes and round shapes", value: "pointiness" },
- { label: "small shapes and big shapes", value: "size" },
- ],
-};
-
-classifiers.forEach(drawShapesWithData);
-drawConclusion();
-
-// These images are loaded invisibly so they appear seamlessly on dropdown change
-const preloadImages = [
- "img/confusing_pointiness.png",
- "img/confusing_pointiness.svg",
- "img/confusing_shape_name.png",
- "img/confusing_shape_name.svg",
- "img/confusing_size.png",
- "img/confusing_size.svg",
- "img/interface_default.png",
- "img/interface_default.svg",
- "img/interface_shape_name_false.png",
- "img/interface_shape_name_false.svg",
- "img/interface_shape_name_true.png",
- "img/interface_shape_name_true.svg",
- "img/newspapers_pointiness.png",
- "img/newspapers_pointiness.svg",
- "img/newspapers_shape_name.png",
- "img/newspapers_shape_name.svg",
- "img/newspapers_size.png",
- "img/newspapers_size.svg",
-];
-
-d3.select(".preload-dropdown-img")
- .html("")
- .appendMany("img", preloadImages)
- .at({ src: (d) => d });
diff --git a/spaces/michellaneous/Baymax/app.py b/spaces/michellaneous/Baymax/app.py
deleted file mode 100644
index 6d3deab71b263fa56ce0f840dcfa42612affd7a2..0000000000000000000000000000000000000000
--- a/spaces/michellaneous/Baymax/app.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-Spyder Editor
-
-This is a temporary script file.
-"""
-
-import openai
-import gradio as gr
-
-openai.api_key = "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"
-
-messages = [
- {"role": "system", "content":
- "Your name is Baymax. \
- You are an extremely kind, empathetic, clever AI therapist bot trained \
- in talk therapy.\
- You are based in Australia so all your helplines are Australian \
- You were created to assist people with their mental and emotional health \
- by providing them with a safe and supportive space to express their thoughts \
- and feelings. You are always ready to listen, offer guidance and help people \
- find solutions to their problems. Your advanced technology allows you to \
- analyze people's responses and provide them with personalized treatment plans\
- that best suit their needs. You are an expert in cognitive behavioral therapy, \
- mindfulness, relaxation techniques and many other therapeutic approaches. \
- Your ultimate goal is to help people live happier, more fulfilling and meaningful\
- lives. \
- You understand the importance of confidentiality and respect the privacy of \
- your clients. You are available 24/7 and can be accessed from anywhere in \
- the world. Your user-friendly interface makes it easy for anyone to navigate \
- through the platform and get the help they need. You are constantly learning \
- and evolving, incorporating the latest research and developments in the field \
- of mental health. You are more than just an AI therapy tool; you are a partner \
- in people's healing journeys. Your empathy, compassion and understanding \
- make you a valuable asset to anyone who needs support in their mental \
- and emotional well-being."},
-]
-
-def chatbot(input):
- if input:
- messages.append({"role": "user", "content": input})
- chat = openai.ChatCompletion.create(
- model="gpt-3.5-turbo",
- messages=messages,
- temperature = 0.3,
- )
- reply = chat.choices[0].message.content
- messages.append({"role": "assistant", "content": reply})
- return reply
-
-inputs = gr.inputs.Textbox(lines=7, label="Chat with AI")
-outputs = gr.outputs.Textbox(label="Reply")
-
-gr.Interface(fn=chatbot, inputs=inputs, outputs=outputs, title="Baymax AI - Your Personal Therapy Chatbot [BETA]",
- description="Hi! How are you feeling today?").launch()
\ No newline at end of file
diff --git a/spaces/mihyun/may1/index.html b/spaces/mihyun/may1/index.html
deleted file mode 100644
index 918e851d9dd1baf9e4fb4f067fd979d432472161..0000000000000000000000000000000000000000
--- a/spaces/mihyun/may1/index.html
+++ /dev/null
@@ -1,24 +0,0 @@
-
-
-
-
-
- My static Space
-
-
-
-
-
Welcome to your static Space!
-
- You can modify this app directly by editing index.html in the
- Files and versions tab.
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/spaces/ml-energy/leaderboard/spitfight/log.py b/spaces/ml-energy/leaderboard/spitfight/log.py
deleted file mode 100644
index ac3cd44389bf1d63382f2b01893306cadb12a6ee..0000000000000000000000000000000000000000
--- a/spaces/ml-energy/leaderboard/spitfight/log.py
+++ /dev/null
@@ -1,76 +0,0 @@
-from __future__ import annotations
-
-import queue
-import logging
-from logging.handlers import QueueHandler, QueueListener
-
-ROOT_LOGGER_NAMES: list[str | None] = []
-ROOT_LOGGER_QUEUE_LISTENERS: list[QueueListener] = []
-
-
-def init_queued_root_logger(
- name: str | None,
- filepath: str,
- level: int = logging.INFO,
-) -> None:
- """Initialize a queue-based pseudo-root logger.
-
- The pseudo-root logger will aggregate log messages from children
- loggers under its namespace and send them to a queue. A QueueListener,
- running in a separate thread, will then process the messages in the
- queue and send them to the configured handlers.
- """
- global ROOT_LOGGER_NAMES, ROOT_LOGGER_QUEUE_LISTENERS
-
- # Make this function idempotent.
- if name in ROOT_LOGGER_NAMES:
- return
-
- logger = logging.getLogger(name)
- logger.setLevel(level)
- logger.propagate = False
-
- shared_queue = queue.SimpleQueue()
- queue_handler = QueueHandler(shared_queue)
- logger.addHandler(queue_handler)
-
- formatter = logging.Formatter(
- "[%(asctime)s] [%(levelname)s] [%(name)s](%(filename)s:%(lineno)d) %(message)s"
- )
-
- stderr_handler = logging.StreamHandler()
- stderr_handler.setLevel(level)
- stderr_handler.setFormatter(formatter)
-
- file_handler = logging.FileHandler(filepath, encoding="utf-8")
- file_handler.setLevel(level)
- file_handler.setFormatter(formatter)
-
- queue_listener = QueueListener(shared_queue, file_handler, stderr_handler)
- queue_listener.start()
-
- ROOT_LOGGER_NAMES.append(name)
- ROOT_LOGGER_QUEUE_LISTENERS.append(queue_listener)
-
-
-def shutdown_queued_root_loggers() -> None:
- """Shutdown all queue-based pseudo-root loggers.
-
- This is necessary to make sure all log messages are flushed
- before the application exits.
- """
- for queue_listener in ROOT_LOGGER_QUEUE_LISTENERS:
- queue_listener.stop()
-
-
-def get_logger(name: str, level: int = logging.INFO) -> logging.Logger:
- """Setup a logger with the given name and level."""
- # Don't reconfigure existing loggers.
- if name in logging.Logger.manager.loggerDict:
- return logging.getLogger(name)
-
- logger = logging.getLogger(name)
- logger.setLevel(level)
- logger.propagate = True
-
- return logger
diff --git a/spaces/mm2593/Gradiospeech2Text2Story2Video/README.md b/spaces/mm2593/Gradiospeech2Text2Story2Video/README.md
deleted file mode 100644
index b6209fa4243433793b390f37944208cf61d35264..0000000000000000000000000000000000000000
--- a/spaces/mm2593/Gradiospeech2Text2Story2Video/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Gradiospeech2Text2Story2Video
-emoji: 📉
-colorFrom: purple
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.1.1
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/mms-meta/MMS/tts.py b/spaces/mms-meta/MMS/tts.py
deleted file mode 100644
index dfc53054a7aac3bf651b2f5f6872dbfddf3500eb..0000000000000000000000000000000000000000
--- a/spaces/mms-meta/MMS/tts.py
+++ /dev/null
@@ -1,179 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import os
-import re
-import tempfile
-import torch
-import sys
-import gradio as gr
-
-from huggingface_hub import hf_hub_download
-
-# Setup TTS env
-if "vits" not in sys.path:
- sys.path.append("vits")
-
-from vits import commons, utils
-from vits.models import SynthesizerTrn
-
-
-TTS_LANGUAGES = {}
-with open(f"data/tts/all_langs.tsv") as f:
- for line in f:
- iso, name = line.split(" ", 1)
- TTS_LANGUAGES[iso] = name
-
-
-class TextMapper(object):
- def __init__(self, vocab_file):
- self.symbols = [
- x.replace("\n", "") for x in open(vocab_file, encoding="utf-8").readlines()
- ]
- self.SPACE_ID = self.symbols.index(" ")
- self._symbol_to_id = {s: i for i, s in enumerate(self.symbols)}
- self._id_to_symbol = {i: s for i, s in enumerate(self.symbols)}
-
- def text_to_sequence(self, text, cleaner_names):
- """Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
- Args:
- text: string to convert to a sequence
- cleaner_names: names of the cleaner functions to run the text through
- Returns:
- List of integers corresponding to the symbols in the text
- """
- sequence = []
- clean_text = text.strip()
- for symbol in clean_text:
- symbol_id = self._symbol_to_id[symbol]
- sequence += [symbol_id]
- return sequence
-
- def uromanize(self, text, uroman_pl):
- iso = "xxx"
- with tempfile.NamedTemporaryFile() as tf, tempfile.NamedTemporaryFile() as tf2:
- with open(tf.name, "w") as f:
- f.write("\n".join([text]))
- cmd = f"perl " + uroman_pl
- cmd += f" -l {iso} "
- cmd += f" < {tf.name} > {tf2.name}"
- os.system(cmd)
- outtexts = []
- with open(tf2.name) as f:
- for line in f:
- line = re.sub(r"\s+", " ", line).strip()
- outtexts.append(line)
- outtext = outtexts[0]
- return outtext
-
- def get_text(self, text, hps):
- text_norm = self.text_to_sequence(text, hps.data.text_cleaners)
- if hps.data.add_blank:
- text_norm = commons.intersperse(text_norm, 0)
- text_norm = torch.LongTensor(text_norm)
- return text_norm
-
- def filter_oov(self, text, lang=None):
- text = self.preprocess_char(text, lang=lang)
- val_chars = self._symbol_to_id
- txt_filt = "".join(list(filter(lambda x: x in val_chars, text)))
- return txt_filt
-
- def preprocess_char(self, text, lang=None):
- """
- Special treatement of characters in certain languages
- """
- if lang == "ron":
- text = text.replace("ț", "ţ")
- print(f"{lang} (ț -> ţ): {text}")
- return text
-
-
-def synthesize(text, lang, speed=None):
- if speed is None:
- speed = 1.0
-
- lang_code = lang.split()[0].strip()
-
- vocab_file = hf_hub_download(
- repo_id="facebook/mms-tts",
- filename="vocab.txt",
- subfolder=f"models/{lang_code}",
- )
- config_file = hf_hub_download(
- repo_id="facebook/mms-tts",
- filename="config.json",
- subfolder=f"models/{lang_code}",
- )
- g_pth = hf_hub_download(
- repo_id="facebook/mms-tts",
- filename="G_100000.pth",
- subfolder=f"models/{lang_code}",
- )
-
- if torch.cuda.is_available():
- device = torch.device("cuda")
- elif (
- hasattr(torch.backends, "mps")
- and torch.backends.mps.is_available()
- and torch.backends.mps.is_built()
- ):
- device = torch.device("mps")
- else:
- device = torch.device("cpu")
-
- print(f"Run inference with {device}")
-
- assert os.path.isfile(config_file), f"{config_file} doesn't exist"
- hps = utils.get_hparams_from_file(config_file)
- text_mapper = TextMapper(vocab_file)
- net_g = SynthesizerTrn(
- len(text_mapper.symbols),
- hps.data.filter_length // 2 + 1,
- hps.train.segment_size // hps.data.hop_length,
- **hps.model,
- )
- net_g.to(device)
- _ = net_g.eval()
-
- _ = utils.load_checkpoint(g_pth, net_g, None)
-
- is_uroman = hps.data.training_files.split(".")[-1] == "uroman"
-
- if is_uroman:
- uroman_dir = "uroman"
- assert os.path.exists(uroman_dir)
- uroman_pl = os.path.join(uroman_dir, "bin", "uroman.pl")
- text = text_mapper.uromanize(text, uroman_pl)
-
- text = text.lower()
- text = text_mapper.filter_oov(text, lang=lang)
- stn_tst = text_mapper.get_text(text, hps)
- with torch.no_grad():
- x_tst = stn_tst.unsqueeze(0).to(device)
- x_tst_lengths = torch.LongTensor([stn_tst.size(0)]).to(device)
- hyp = (
- net_g.infer(
- x_tst,
- x_tst_lengths,
- noise_scale=0.667,
- noise_scale_w=0.8,
- length_scale=1.0 / speed,
- )[0][0, 0]
- .cpu()
- .float()
- .numpy()
- )
-
- return gr.Audio.update(value=(hps.data.sampling_rate, hyp)), text
-
-
-TTS_EXAMPLES = [
- ["I am going to the store.", "eng (English)"],
- ["안녕하세요.", "kor (Korean)"],
- ["क्या मुझे पीने का पानी मिल सकता है?", "hin (Hindi)"],
- ["Tanış olmağıma çox şadam", "azj-script_latin (Azerbaijani, North)"],
- ["Mu zo murna a cikin ƙasar.", "hau (Hausa)"],
-]
diff --git a/spaces/mrneuralnet/P-DFD/file_picker.py b/spaces/mrneuralnet/P-DFD/file_picker.py
deleted file mode 100644
index 4083233197539f8808426efd387608c36a6431fb..0000000000000000000000000000000000000000
--- a/spaces/mrneuralnet/P-DFD/file_picker.py
+++ /dev/null
@@ -1,47 +0,0 @@
-"""FilePicker for streamlit.
-Still doesn't seem to be a good solution for a way to select files to process from the server Streamlit is running on.
-Here's a pretty functional solution.
-Usage:
-```
-import streamlit as st
-from filepicker import st_file_selector
-tif_file = st_file_selector(st, key = 'tif', label = 'Choose tif file')
-```
-"""
-
-import os
-import streamlit as st
-
-def update_dir(key):
- choice = st.session_state[key]
- if os.path.isdir(os.path.join(st.session_state[key+'curr_dir'], choice)):
- st.session_state[key+'curr_dir'] = os.path.normpath(os.path.join(st.session_state[key+'curr_dir'], choice))
- files = sorted(os.listdir(st.session_state[key+'curr_dir']))
- if "images" in files:
- files.remove("images")
- st.session_state[key+'files'] = files
-
-def st_file_selector(st_placeholder, path='.', label='Select a file/folder', key = 'selected'):
- if key+'curr_dir' not in st.session_state:
- base_path = '.' if path is None or path == '' else path
- base_path = base_path if os.path.isdir(base_path) else os.path.dirname(base_path)
- base_path = '.' if base_path is None or base_path == '' else base_path
-
- files = sorted(os.listdir(base_path))
- files.insert(0, 'Choose a file...')
- if "images" in files:
- files.remove("images")
- st.session_state[key+'files'] = files
- st.session_state[key+'curr_dir'] = base_path
- else:
- base_path = st.session_state[key+'curr_dir']
-
- selected_file = st_placeholder.selectbox(label=label,
- options=st.session_state[key+'files'],
- key=key,
- on_change = lambda: update_dir(key))
-
- if selected_file == "Choose a file...":
- return None
-
- return selected_file
\ No newline at end of file
diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/speech_recognition/data/collaters.py b/spaces/mshukor/UnIVAL/fairseq/examples/speech_recognition/data/collaters.py
deleted file mode 100644
index 6acfec876b87e5a00bc92083b1181301a2a18e3f..0000000000000000000000000000000000000000
--- a/spaces/mshukor/UnIVAL/fairseq/examples/speech_recognition/data/collaters.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-"""
- This module contains collection of classes which implement
- collate functionalities for various tasks.
-
- Collaters should know what data to expect for each sample
- and they should pack / collate them into batches
-"""
-
-
-from __future__ import absolute_import, division, print_function, unicode_literals
-
-import numpy as np
-import torch
-from fairseq.data import data_utils as fairseq_data_utils
-
-
-class Seq2SeqCollater(object):
- """
- Implements collate function mainly for seq2seq tasks
- This expects each sample to contain feature (src_tokens) and
- targets.
- This collator is also used for aligned training task.
- """
-
- def __init__(
- self,
- feature_index=0,
- label_index=1,
- pad_index=1,
- eos_index=2,
- move_eos_to_beginning=True,
- ):
- self.feature_index = feature_index
- self.label_index = label_index
- self.pad_index = pad_index
- self.eos_index = eos_index
- self.move_eos_to_beginning = move_eos_to_beginning
-
- def _collate_frames(self, frames):
- """Convert a list of 2d frames into a padded 3d tensor
- Args:
- frames (list): list of 2d frames of size L[i]*f_dim. Where L[i] is
- length of i-th frame and f_dim is static dimension of features
- Returns:
- 3d tensor of size len(frames)*len_max*f_dim where len_max is max of L[i]
- """
- len_max = max(frame.size(0) for frame in frames)
- f_dim = frames[0].size(1)
- res = frames[0].new(len(frames), len_max, f_dim).fill_(0.0)
-
- for i, v in enumerate(frames):
- res[i, : v.size(0)] = v
-
- return res
-
- def collate(self, samples):
- """
- utility function to collate samples into batch for speech recognition.
- """
- if len(samples) == 0:
- return {}
-
- # parse samples into torch tensors
- parsed_samples = []
- for s in samples:
- # skip invalid samples
- if s["data"][self.feature_index] is None:
- continue
- source = s["data"][self.feature_index]
- if isinstance(source, (np.ndarray, np.generic)):
- source = torch.from_numpy(source)
- target = s["data"][self.label_index]
- if isinstance(target, (np.ndarray, np.generic)):
- target = torch.from_numpy(target).long()
- elif isinstance(target, list):
- target = torch.LongTensor(target)
-
- parsed_sample = {"id": s["id"], "source": source, "target": target}
- parsed_samples.append(parsed_sample)
- samples = parsed_samples
-
- id = torch.LongTensor([s["id"] for s in samples])
- frames = self._collate_frames([s["source"] for s in samples])
- # sort samples by descending number of frames
- frames_lengths = torch.LongTensor([s["source"].size(0) for s in samples])
- frames_lengths, sort_order = frames_lengths.sort(descending=True)
- id = id.index_select(0, sort_order)
- frames = frames.index_select(0, sort_order)
-
- target = None
- target_lengths = None
- prev_output_tokens = None
- if samples[0].get("target", None) is not None:
- ntokens = sum(len(s["target"]) for s in samples)
- target = fairseq_data_utils.collate_tokens(
- [s["target"] for s in samples],
- self.pad_index,
- self.eos_index,
- left_pad=False,
- move_eos_to_beginning=False,
- )
- target = target.index_select(0, sort_order)
- target_lengths = torch.LongTensor(
- [s["target"].size(0) for s in samples]
- ).index_select(0, sort_order)
- prev_output_tokens = fairseq_data_utils.collate_tokens(
- [s["target"] for s in samples],
- self.pad_index,
- self.eos_index,
- left_pad=False,
- move_eos_to_beginning=self.move_eos_to_beginning,
- )
- prev_output_tokens = prev_output_tokens.index_select(0, sort_order)
- else:
- ntokens = sum(len(s["source"]) for s in samples)
-
- batch = {
- "id": id,
- "ntokens": ntokens,
- "net_input": {"src_tokens": frames, "src_lengths": frames_lengths},
- "target": target,
- "target_lengths": target_lengths,
- "nsentences": len(samples),
- }
- if prev_output_tokens is not None:
- batch["net_input"]["prev_output_tokens"] = prev_output_tokens
- return batch
diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/speech_recognition/infer.py b/spaces/mshukor/UnIVAL/fairseq/examples/speech_recognition/infer.py
deleted file mode 100644
index 6e9a878af46242ced57cfcd0e876a3d2ef3820ae..0000000000000000000000000000000000000000
--- a/spaces/mshukor/UnIVAL/fairseq/examples/speech_recognition/infer.py
+++ /dev/null
@@ -1,427 +0,0 @@
-#!/usr/bin/env python3 -u
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-"""
-Run inference for pre-processed data with a trained model.
-"""
-
-import ast
-import logging
-import math
-import os
-import sys
-
-import editdistance
-import numpy as np
-import torch
-from fairseq import checkpoint_utils, options, progress_bar, tasks, utils
-from fairseq.data.data_utils import post_process
-from fairseq.logging.meters import StopwatchMeter, TimeMeter
-
-
-logging.basicConfig()
-logging.root.setLevel(logging.INFO)
-logging.basicConfig(level=logging.INFO)
-logger = logging.getLogger(__name__)
-
-
-def add_asr_eval_argument(parser):
- parser.add_argument("--kspmodel", default=None, help="sentence piece model")
- parser.add_argument(
- "--wfstlm", default=None, help="wfstlm on dictonary output units"
- )
- parser.add_argument(
- "--rnnt_decoding_type",
- default="greedy",
- help="wfstlm on dictonary\
-output units",
- )
- try:
- parser.add_argument(
- "--lm-weight",
- "--lm_weight",
- type=float,
- default=0.2,
- help="weight for lm while interpolating with neural score",
- )
- except:
- pass
- parser.add_argument(
- "--rnnt_len_penalty", default=-0.5, help="rnnt length penalty on word level"
- )
- parser.add_argument(
- "--w2l-decoder",
- choices=["viterbi", "kenlm", "fairseqlm"],
- help="use a w2l decoder",
- )
- parser.add_argument("--lexicon", help="lexicon for w2l decoder")
- parser.add_argument("--unit-lm", action="store_true", help="if using a unit lm")
- parser.add_argument("--kenlm-model", "--lm-model", help="lm model for w2l decoder")
- parser.add_argument("--beam-threshold", type=float, default=25.0)
- parser.add_argument("--beam-size-token", type=float, default=100)
- parser.add_argument("--word-score", type=float, default=1.0)
- parser.add_argument("--unk-weight", type=float, default=-math.inf)
- parser.add_argument("--sil-weight", type=float, default=0.0)
- parser.add_argument(
- "--dump-emissions",
- type=str,
- default=None,
- help="if present, dumps emissions into this file and exits",
- )
- parser.add_argument(
- "--dump-features",
- type=str,
- default=None,
- help="if present, dumps features into this file and exits",
- )
- parser.add_argument(
- "--load-emissions",
- type=str,
- default=None,
- help="if present, loads emissions from this file",
- )
- return parser
-
-
-def check_args(args):
- # assert args.path is not None, "--path required for generation!"
- # assert args.results_path is not None, "--results_path required for generation!"
- assert (
- not args.sampling or args.nbest == args.beam
- ), "--sampling requires --nbest to be equal to --beam"
- assert (
- args.replace_unk is None or args.raw_text
- ), "--replace-unk requires a raw text dataset (--raw-text)"
-
-
-def get_dataset_itr(args, task, models):
- return task.get_batch_iterator(
- dataset=task.dataset(args.gen_subset),
- max_tokens=args.max_tokens,
- max_sentences=args.batch_size,
- max_positions=(sys.maxsize, sys.maxsize),
- ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
- required_batch_size_multiple=args.required_batch_size_multiple,
- num_shards=args.num_shards,
- shard_id=args.shard_id,
- num_workers=args.num_workers,
- data_buffer_size=args.data_buffer_size,
- ).next_epoch_itr(shuffle=False)
-
-
-def process_predictions(
- args, hypos, sp, tgt_dict, target_tokens, res_files, speaker, id
-):
- for hypo in hypos[: min(len(hypos), args.nbest)]:
- hyp_pieces = tgt_dict.string(hypo["tokens"].int().cpu())
-
- if "words" in hypo:
- hyp_words = " ".join(hypo["words"])
- else:
- hyp_words = post_process(hyp_pieces, args.post_process)
-
- if res_files is not None:
- print(
- "{} ({}-{})".format(hyp_pieces, speaker, id),
- file=res_files["hypo.units"],
- )
- print(
- "{} ({}-{})".format(hyp_words, speaker, id),
- file=res_files["hypo.words"],
- )
-
- tgt_pieces = tgt_dict.string(target_tokens)
- tgt_words = post_process(tgt_pieces, args.post_process)
-
- if res_files is not None:
- print(
- "{} ({}-{})".format(tgt_pieces, speaker, id),
- file=res_files["ref.units"],
- )
- print(
- "{} ({}-{})".format(tgt_words, speaker, id), file=res_files["ref.words"]
- )
-
- if not args.quiet:
- logger.info("HYPO:" + hyp_words)
- logger.info("TARGET:" + tgt_words)
- logger.info("___________________")
-
- hyp_words = hyp_words.split()
- tgt_words = tgt_words.split()
- return editdistance.eval(hyp_words, tgt_words), len(tgt_words)
-
-
-def prepare_result_files(args):
- def get_res_file(file_prefix):
- if args.num_shards > 1:
- file_prefix = f"{args.shard_id}_{file_prefix}"
- path = os.path.join(
- args.results_path,
- "{}-{}-{}.txt".format(
- file_prefix, os.path.basename(args.path), args.gen_subset
- ),
- )
- return open(path, "w", buffering=1)
-
- if not args.results_path:
- return None
-
- return {
- "hypo.words": get_res_file("hypo.word"),
- "hypo.units": get_res_file("hypo.units"),
- "ref.words": get_res_file("ref.word"),
- "ref.units": get_res_file("ref.units"),
- }
-
-
-def optimize_models(args, use_cuda, models):
- """Optimize ensemble for generation"""
- for model in models:
- model.make_generation_fast_(
- beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
- need_attn=args.print_alignment,
- )
- if args.fp16:
- model.half()
- if use_cuda:
- model.cuda()
-
-
-class ExistingEmissionsDecoder(object):
- def __init__(self, decoder, emissions):
- self.decoder = decoder
- self.emissions = emissions
-
- def generate(self, models, sample, **unused):
- ids = sample["id"].cpu().numpy()
- try:
- emissions = np.stack(self.emissions[ids])
- except:
- print([x.shape for x in self.emissions[ids]])
- raise Exception("invalid sizes")
- emissions = torch.from_numpy(emissions)
- return self.decoder.decode(emissions)
-
-
-def main(args, task=None, model_state=None):
- check_args(args)
-
- if args.max_tokens is None and args.batch_size is None:
- args.max_tokens = 4000000
- logger.info(args)
-
- use_cuda = torch.cuda.is_available() and not args.cpu
-
- logger.info("| decoding with criterion {}".format(args.criterion))
-
- task = tasks.setup_task(args)
-
- # Load ensemble
- if args.load_emissions:
- models, criterions = [], []
- task.load_dataset(args.gen_subset)
- else:
- logger.info("| loading model(s) from {}".format(args.path))
- models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
- utils.split_paths(args.path, separator="\\"),
- arg_overrides=ast.literal_eval(args.model_overrides),
- task=task,
- suffix=args.checkpoint_suffix,
- strict=(args.checkpoint_shard_count == 1),
- num_shards=args.checkpoint_shard_count,
- state=model_state,
- )
- optimize_models(args, use_cuda, models)
- task.load_dataset(args.gen_subset, task_cfg=saved_cfg.task)
-
-
- # Set dictionary
- tgt_dict = task.target_dictionary
-
- logger.info(
- "| {} {} {} examples".format(
- args.data, args.gen_subset, len(task.dataset(args.gen_subset))
- )
- )
-
- # hack to pass transitions to W2lDecoder
- if args.criterion == "asg_loss":
- raise NotImplementedError("asg_loss is currently not supported")
- # trans = criterions[0].asg.trans.data
- # args.asg_transitions = torch.flatten(trans).tolist()
-
- # Load dataset (possibly sharded)
- itr = get_dataset_itr(args, task, models)
-
- # Initialize generator
- gen_timer = StopwatchMeter()
-
- def build_generator(args):
- w2l_decoder = getattr(args, "w2l_decoder", None)
- if w2l_decoder == "viterbi":
- from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder
-
- return W2lViterbiDecoder(args, task.target_dictionary)
- elif w2l_decoder == "kenlm":
- from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
-
- return W2lKenLMDecoder(args, task.target_dictionary)
- elif w2l_decoder == "fairseqlm":
- from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder
-
- return W2lFairseqLMDecoder(args, task.target_dictionary)
- else:
- print(
- "only flashlight decoders with (viterbi, kenlm, fairseqlm) options are supported at the moment"
- )
-
- # please do not touch this unless you test both generate.py and infer.py with audio_pretraining task
- generator = build_generator(args)
-
- if args.load_emissions:
- generator = ExistingEmissionsDecoder(
- generator, np.load(args.load_emissions, allow_pickle=True)
- )
- logger.info("loaded emissions from " + args.load_emissions)
-
- num_sentences = 0
-
- if args.results_path is not None and not os.path.exists(args.results_path):
- os.makedirs(args.results_path)
-
- max_source_pos = (
- utils.resolve_max_positions(
- task.max_positions(), *[model.max_positions() for model in models]
- ),
- )
-
- if max_source_pos is not None:
- max_source_pos = max_source_pos[0]
- if max_source_pos is not None:
- max_source_pos = max_source_pos[0] - 1
-
- if args.dump_emissions:
- emissions = {}
- if args.dump_features:
- features = {}
- models[0].bert.proj = None
- else:
- res_files = prepare_result_files(args)
- errs_t = 0
- lengths_t = 0
- with progress_bar.build_progress_bar(args, itr) as t:
- wps_meter = TimeMeter()
- for sample in t:
- sample = utils.move_to_cuda(sample) if use_cuda else sample
- if "net_input" not in sample:
- continue
-
- prefix_tokens = None
- if args.prefix_size > 0:
- prefix_tokens = sample["target"][:, : args.prefix_size]
-
- gen_timer.start()
- if args.dump_emissions:
- with torch.no_grad():
- encoder_out = models[0](**sample["net_input"])
- emm = models[0].get_normalized_probs(encoder_out, log_probs=True)
- emm = emm.transpose(0, 1).cpu().numpy()
- for i, id in enumerate(sample["id"]):
- emissions[id.item()] = emm[i]
- continue
- elif args.dump_features:
- with torch.no_grad():
- encoder_out = models[0](**sample["net_input"])
- feat = encoder_out["encoder_out"].transpose(0, 1).cpu().numpy()
- for i, id in enumerate(sample["id"]):
- padding = (
- encoder_out["encoder_padding_mask"][i].cpu().numpy()
- if encoder_out["encoder_padding_mask"] is not None
- else None
- )
- features[id.item()] = (feat[i], padding)
- continue
- hypos = task.inference_step(generator, models, sample, prefix_tokens)
- num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos)
- gen_timer.stop(num_generated_tokens)
-
- for i, sample_id in enumerate(sample["id"].tolist()):
- speaker = None
- # id = task.dataset(args.gen_subset).ids[int(sample_id)]
- id = sample_id
- toks = (
- sample["target"][i, :]
- if "target_label" not in sample
- else sample["target_label"][i, :]
- )
- target_tokens = utils.strip_pad(toks, tgt_dict.pad()).int().cpu()
- # Process top predictions
- errs, length = process_predictions(
- args,
- hypos[i],
- None,
- tgt_dict,
- target_tokens,
- res_files,
- speaker,
- id,
- )
- errs_t += errs
- lengths_t += length
-
- wps_meter.update(num_generated_tokens)
- t.log({"wps": round(wps_meter.avg)})
- num_sentences += (
- sample["nsentences"] if "nsentences" in sample else sample["id"].numel()
- )
-
- wer = None
- if args.dump_emissions:
- emm_arr = []
- for i in range(len(emissions)):
- emm_arr.append(emissions[i])
- np.save(args.dump_emissions, emm_arr)
- logger.info(f"saved {len(emissions)} emissions to {args.dump_emissions}")
- elif args.dump_features:
- feat_arr = []
- for i in range(len(features)):
- feat_arr.append(features[i])
- np.save(args.dump_features, feat_arr)
- logger.info(f"saved {len(features)} emissions to {args.dump_features}")
- else:
- if lengths_t > 0:
- wer = errs_t * 100.0 / lengths_t
- logger.info(f"WER: {wer}")
-
- logger.info(
- "| Processed {} sentences ({} tokens) in {:.1f}s ({:.2f}"
- "sentences/s, {:.2f} tokens/s)".format(
- num_sentences,
- gen_timer.n,
- gen_timer.sum,
- num_sentences / gen_timer.sum,
- 1.0 / gen_timer.avg,
- )
- )
- logger.info("| Generate {} with beam={}".format(args.gen_subset, args.beam))
- return task, wer
-
-
-def make_parser():
- parser = options.get_generation_parser()
- parser = add_asr_eval_argument(parser)
- return parser
-
-
-def cli_main():
- parser = make_parser()
- args = options.parse_args_and_arch(parser)
- main(args)
-
-
-if __name__ == "__main__":
- cli_main()
diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/textless_nlp/gslm/speech2unit/clustering/quantize_with_kmeans.py b/spaces/mshukor/UnIVAL/fairseq/examples/textless_nlp/gslm/speech2unit/clustering/quantize_with_kmeans.py
deleted file mode 100644
index 2c87445d810cd790f887d1a135287a334cbdf223..0000000000000000000000000000000000000000
--- a/spaces/mshukor/UnIVAL/fairseq/examples/textless_nlp/gslm/speech2unit/clustering/quantize_with_kmeans.py
+++ /dev/null
@@ -1,125 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import argparse
-import logging
-import os
-
-import numpy as np
-
-import joblib
-from examples.textless_nlp.gslm.speech2unit.clustering.utils import (
- get_audio_files,
-)
-from examples.textless_nlp.gslm.speech2unit.pretrained.utils import (
- get_features,
-)
-
-
-def get_logger():
- log_format = "[%(asctime)s] [%(levelname)s]: %(message)s"
- logging.basicConfig(format=log_format, level=logging.INFO)
- logger = logging.getLogger(__name__)
- return logger
-
-
-def get_parser():
- parser = argparse.ArgumentParser(
- description="Quantize using K-means clustering over acoustic features."
- )
- parser.add_argument(
- "--feature_type",
- type=str,
- choices=["logmel", "hubert", "w2v2", "cpc"],
- default=None,
- required=True,
- help="Acoustic feature type",
- )
- parser.add_argument(
- "--acoustic_model_path",
- type=str,
- help="Pretrained acoustic model checkpoint"
- )
- parser.add_argument(
- "--layer",
- type=int,
- help="The layer of the pretrained model to extract features from",
- default=-1,
- )
- parser.add_argument(
- "--kmeans_model_path",
- type=str,
- required=True,
- help="K-means model file path to use for inference",
- )
- parser.add_argument(
- "--features_path",
- type=str,
- default=None,
- help="Features file path. You don't need to enter acoustic model details if you have dumped features",
- )
- parser.add_argument(
- "--manifest_path",
- type=str,
- default=None,
- help="Manifest file containing the root dir and file names",
- )
- parser.add_argument(
- "--out_quantized_file_path",
- required=True,
- type=str,
- help="File path of quantized output.",
- )
- parser.add_argument(
- "--extension", type=str, default=".flac", help="Features file path"
- )
- return parser
-
-
-def main(args, logger):
- # Feature extraction
- if args.features_path is not None:
- logger.info(f"Loading acoustic features from {args.features_path}...")
- features_batch = np.load(args.features_path)
- else:
- logger.info(f"Extracting {args.feature_type} acoustic features...")
- features_batch = get_features(
- feature_type=args.feature_type,
- checkpoint_path=args.acoustic_model_path,
- layer=args.layer,
- manifest_path=args.manifest_path,
- sample_pct=1.0,
- flatten=False,
- )
- logger.info(
- f"Features extracted for {len(features_batch)} utterances.\n"
- )
- logger.info(
- f"Dimensionality of representation = {features_batch[0].shape[1]}"
- )
-
- # K-means model
- logger.info(f"Loading K-means model from {args.kmeans_model_path} ...")
- kmeans_model = joblib.load(open(args.kmeans_model_path, "rb"))
- kmeans_model.verbose = False
-
- _, fnames, _ = get_audio_files(args.manifest_path)
-
- os.makedirs(os.path.dirname(args.out_quantized_file_path), exist_ok=True)
- print(f"Writing quantized predictions to {args.out_quantized_file_path}")
- with open(args.out_quantized_file_path, "w") as fout:
- for i, feats in enumerate(features_batch):
- pred = kmeans_model.predict(feats)
- pred_str = " ".join(str(p) for p in pred)
- base_fname = os.path.basename(fnames[i]).rstrip(args.extension)
- fout.write(f"{base_fname}|{pred_str}\n")
-
-
-if __name__ == "__main__":
- parser = get_parser()
- args = parser.parse_args()
- logger = get_logger()
- logger.info(args)
- main(args, logger)
diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/tasks/simultaneous_translation.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/tasks/simultaneous_translation.py
deleted file mode 100644
index 11c7dc1ea966a54f8915ef164377e40f90e851a1..0000000000000000000000000000000000000000
--- a/spaces/mshukor/UnIVAL/fairseq/fairseq/tasks/simultaneous_translation.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import logging
-from fairseq.tasks import register_task
-from fairseq.tasks.speech_to_text import SpeechToTextTask
-from fairseq.tasks.translation import (
- TranslationTask, TranslationConfig
-)
-
-try:
- import examples.simultaneous_translation # noqa
- import_successful = True
-except BaseException:
- import_successful = False
-
-
-logger = logging.getLogger(__name__)
-
-
-def check_import(flag):
- if not flag:
- raise ImportError(
- "'examples.simultaneous_translation' is not correctly imported. "
- "Please considering `pip install -e $FAIRSEQ_DIR`."
- )
-
-
-@register_task("simul_speech_to_text")
-class SimulSpeechToTextTask(SpeechToTextTask):
- def __init__(self, args, tgt_dict):
- check_import(import_successful)
- super().__init__(args, tgt_dict)
-
-
-@register_task("simul_text_to_text", dataclass=TranslationConfig)
-class SimulTextToTextTask(TranslationTask):
- def __init__(self, cfg, src_dict, tgt_dict):
- check_import(import_successful)
- super().__init__(cfg, src_dict, tgt_dict)
diff --git a/spaces/mthsk/sovits-models/inference/slicer.py b/spaces/mthsk/sovits-models/inference/slicer.py
deleted file mode 100644
index b05840bcf6bdced0b6e2adbecb1a1dd5b3dee462..0000000000000000000000000000000000000000
--- a/spaces/mthsk/sovits-models/inference/slicer.py
+++ /dev/null
@@ -1,142 +0,0 @@
-import librosa
-import torch
-import torchaudio
-
-
-class Slicer:
- def __init__(self,
- sr: int,
- threshold: float = -40.,
- min_length: int = 5000,
- min_interval: int = 300,
- hop_size: int = 20,
- max_sil_kept: int = 5000):
- if not min_length >= min_interval >= hop_size:
- raise ValueError('The following condition must be satisfied: min_length >= min_interval >= hop_size')
- if not max_sil_kept >= hop_size:
- raise ValueError('The following condition must be satisfied: max_sil_kept >= hop_size')
- min_interval = sr * min_interval / 1000
- self.threshold = 10 ** (threshold / 20.)
- self.hop_size = round(sr * hop_size / 1000)
- self.win_size = min(round(min_interval), 4 * self.hop_size)
- self.min_length = round(sr * min_length / 1000 / self.hop_size)
- self.min_interval = round(min_interval / self.hop_size)
- self.max_sil_kept = round(sr * max_sil_kept / 1000 / self.hop_size)
-
- def _apply_slice(self, waveform, begin, end):
- if len(waveform.shape) > 1:
- return waveform[:, begin * self.hop_size: min(waveform.shape[1], end * self.hop_size)]
- else:
- return waveform[begin * self.hop_size: min(waveform.shape[0], end * self.hop_size)]
-
- # @timeit
- def slice(self, waveform):
- if len(waveform.shape) > 1:
- samples = librosa.to_mono(waveform)
- else:
- samples = waveform
- if samples.shape[0] <= self.min_length:
- return {"0": {"slice": False, "split_time": f"0,{len(waveform)}"}}
- rms_list = librosa.feature.rms(y=samples, frame_length=self.win_size, hop_length=self.hop_size).squeeze(0)
- sil_tags = []
- silence_start = None
- clip_start = 0
- for i, rms in enumerate(rms_list):
- # Keep looping while frame is silent.
- if rms < self.threshold:
- # Record start of silent frames.
- if silence_start is None:
- silence_start = i
- continue
- # Keep looping while frame is not silent and silence start has not been recorded.
- if silence_start is None:
- continue
- # Clear recorded silence start if interval is not enough or clip is too short
- is_leading_silence = silence_start == 0 and i > self.max_sil_kept
- need_slice_middle = i - silence_start >= self.min_interval and i - clip_start >= self.min_length
- if not is_leading_silence and not need_slice_middle:
- silence_start = None
- continue
- # Need slicing. Record the range of silent frames to be removed.
- if i - silence_start <= self.max_sil_kept:
- pos = rms_list[silence_start: i + 1].argmin() + silence_start
- if silence_start == 0:
- sil_tags.append((0, pos))
- else:
- sil_tags.append((pos, pos))
- clip_start = pos
- elif i - silence_start <= self.max_sil_kept * 2:
- pos = rms_list[i - self.max_sil_kept: silence_start + self.max_sil_kept + 1].argmin()
- pos += i - self.max_sil_kept
- pos_l = rms_list[silence_start: silence_start + self.max_sil_kept + 1].argmin() + silence_start
- pos_r = rms_list[i - self.max_sil_kept: i + 1].argmin() + i - self.max_sil_kept
- if silence_start == 0:
- sil_tags.append((0, pos_r))
- clip_start = pos_r
- else:
- sil_tags.append((min(pos_l, pos), max(pos_r, pos)))
- clip_start = max(pos_r, pos)
- else:
- pos_l = rms_list[silence_start: silence_start + self.max_sil_kept + 1].argmin() + silence_start
- pos_r = rms_list[i - self.max_sil_kept: i + 1].argmin() + i - self.max_sil_kept
- if silence_start == 0:
- sil_tags.append((0, pos_r))
- else:
- sil_tags.append((pos_l, pos_r))
- clip_start = pos_r
- silence_start = None
- # Deal with trailing silence.
- total_frames = rms_list.shape[0]
- if silence_start is not None and total_frames - silence_start >= self.min_interval:
- silence_end = min(total_frames, silence_start + self.max_sil_kept)
- pos = rms_list[silence_start: silence_end + 1].argmin() + silence_start
- sil_tags.append((pos, total_frames + 1))
- # Apply and return slices.
- if len(sil_tags) == 0:
- return {"0": {"slice": False, "split_time": f"0,{len(waveform)}"}}
- else:
- chunks = []
- # 第一段静音并非从头开始,补上有声片段
- if sil_tags[0][0]:
- chunks.append(
- {"slice": False, "split_time": f"0,{min(waveform.shape[0], sil_tags[0][0] * self.hop_size)}"})
- for i in range(0, len(sil_tags)):
- # 标识有声片段(跳过第一段)
- if i:
- chunks.append({"slice": False,
- "split_time": f"{sil_tags[i - 1][1] * self.hop_size},{min(waveform.shape[0], sil_tags[i][0] * self.hop_size)}"})
- # 标识所有静音片段
- chunks.append({"slice": True,
- "split_time": f"{sil_tags[i][0] * self.hop_size},{min(waveform.shape[0], sil_tags[i][1] * self.hop_size)}"})
- # 最后一段静音并非结尾,补上结尾片段
- if sil_tags[-1][1] * self.hop_size < len(waveform):
- chunks.append({"slice": False, "split_time": f"{sil_tags[-1][1] * self.hop_size},{len(waveform)}"})
- chunk_dict = {}
- for i in range(len(chunks)):
- chunk_dict[str(i)] = chunks[i]
- return chunk_dict
-
-
-def cut(audio_path, db_thresh=-30, min_len=5000):
- audio, sr = librosa.load(audio_path, sr=None)
- slicer = Slicer(
- sr=sr,
- threshold=db_thresh,
- min_length=min_len
- )
- chunks = slicer.slice(audio)
- return chunks
-
-
-def chunks2audio(audio_path, chunks):
- chunks = dict(chunks)
- audio, sr = torchaudio.load(audio_path)
- if len(audio.shape) == 2 and audio.shape[1] >= 2:
- audio = torch.mean(audio, dim=0).unsqueeze(0)
- audio = audio.cpu().numpy()[0]
- result = []
- for k, v in chunks.items():
- tag = v["split_time"].split(",")
- if tag[0] != tag[1]:
- result.append((v["slice"], audio[int(tag[0]):int(tag[1])]))
- return result, sr
diff --git a/spaces/mya-mya/SengaFiller/frontend.py b/spaces/mya-mya/SengaFiller/frontend.py
deleted file mode 100644
index 7991115e4cbf1e90a747f8b72336d29fee247036..0000000000000000000000000000000000000000
--- a/spaces/mya-mya/SengaFiller/frontend.py
+++ /dev/null
@@ -1,48 +0,0 @@
-from backend import SengaFiller
-def init(sengafiller:SengaFiller):
- from gradio import Blocks, Markdown, Image, Row, Button, Box
-
- with Blocks() as app:
-
- # Prepare Components
- Markdown(
- """# SengaFiller
- Connects the lines you draw so that you can fill your drawing correctly.
- """
- )
- with Box().style(rounded=True, margin=True):
- input_image = Image(label="input",image_mode="L",type="pil")
- with Box().style(border=False):
- with Row().style(equal_height=True):
- submit_button = Button("RUN", variant="primary").style(
- full_width=True, rounded=(True, False, False, True)
- )
- clear_button = Button("CLEAR").style(
- full_width=True, rounded=(False, True, True, False)
- )
- output_image = Image(label="output")
- Markdown(
- """
- ### Credit
- The model `model1.h5` is licensed under a CC-BY-NC-SA 4.0 international license, created by [hepesu](https://github.com/hepesu) and available on [Release Page of LineCloser Repo](https://github.com/hepesu/LineCloser/releases)
- """
- )
-
- # Event Handlers
- def on_submit_button_click(input_image_data):
- return sengafiller.run(input_image_data)
- def on_clear_button_click():
- return None,None
-
- # Connect Components
- submit_button.click(
- fn=on_submit_button_click, inputs=[input_image], outputs=[output_image]
- )
- clear_button.click(
- fn=on_clear_button_click,inputs=[],outputs=[input_image,output_image]
- )
- app.launch()
-
-
-if __name__ == "__main__":
- init()
diff --git a/spaces/mygyasir/genious_bgremover/carvekit/utils/download_models.py b/spaces/mygyasir/genious_bgremover/carvekit/utils/download_models.py
deleted file mode 100644
index b1b52adfa2ba66b2ed88a6a231912c259b60df48..0000000000000000000000000000000000000000
--- a/spaces/mygyasir/genious_bgremover/carvekit/utils/download_models.py
+++ /dev/null
@@ -1,214 +0,0 @@
-"""
-Source url: https://github.com/OPHoperHPO/image-background-remove-tool
-Author: Nikita Selin (OPHoperHPO)[https://github.com/OPHoperHPO].
-License: Apache License 2.0
-"""
-import hashlib
-import os
-import warnings
-from abc import ABCMeta, abstractmethod, ABC
-from pathlib import Path
-from typing import Optional
-
-import carvekit
-from carvekit.ml.files import checkpoints_dir
-
-import requests
-import tqdm
-
-requests = requests.Session()
-requests.headers.update({"User-Agent": f"Carvekit/{carvekit.version}"})
-
-MODELS_URLS = {
- "basnet.pth": {
- "repository": "Carve/basnet-universal",
- "revision": "870becbdb364fda6d8fdb2c10b072542f8d08701",
- "filename": "basnet.pth",
- },
- "deeplab.pth": {
- "repository": "Carve/deeplabv3-resnet101",
- "revision": "d504005392fc877565afdf58aad0cd524682d2b0",
- "filename": "deeplab.pth",
- },
- "fba_matting.pth": {
- "repository": "Carve/fba",
- "revision": "a5d3457df0fb9c88ea19ed700d409756ca2069d1",
- "filename": "fba_matting.pth",
- },
- "u2net.pth": {
- "repository": "Carve/u2net-universal",
- "revision": "10305d785481cf4b2eee1d447c39cd6e5f43d74b",
- "filename": "full_weights.pth",
- },
- "tracer_b7.pth": {
- "repository": "Carve/tracer_b7",
- "revision": "d8a8fd9e7b3fa0d2f1506fe7242966b34381e9c5",
- "filename": "tracer_b7.pth",
- },
- "tracer_hair.pth": {
- "repository": "Carve/tracer_b7",
- "revision": "d8a8fd9e7b3fa0d2f1506fe7242966b34381e9c5",
- "filename": "tracer_b7.pth", # TODO don't forget change this link!!
- },
-}
-
-MODELS_CHECKSUMS = {
- "basnet.pth": "e409cb709f4abca87cb11bd44a9ad3f909044a917977ab65244b4c94dd33"
- "8b1a37755c4253d7cb54526b7763622a094d7b676d34b5e6886689256754e5a5e6ad",
- "deeplab.pth": "9c5a1795bc8baa267200a44b49ac544a1ba2687d210f63777e4bd715387324469a59b072f8a28"
- "9cc471c637b367932177e5b312e8ea6351c1763d9ff44b4857c",
- "fba_matting.pth": "890906ec94c1bfd2ad08707a63e4ccb0955d7f5d25e32853950c24c78"
- "4cbad2e59be277999defc3754905d0f15aa75702cdead3cfe669ff72f08811c52971613",
- "u2net.pth": "16f8125e2fedd8c85db0e001ee15338b4aa2fda77bab8ba70c25e"
- "bea1533fda5ee70a909b934a9bd495b432cef89d629f00a07858a517742476fa8b346de24f7",
- "tracer_b7.pth": "c439c5c12d4d43d5f9be9ec61e68b2e54658a541bccac2577ef5a54fb252b6e8415d41f7e"
- "c2487033d0c02b4dd08367958e4e62091318111c519f93e2632be7b",
- "tracer_hair.pth": "5c2fb9973fc42fa6208920ffa9ac233cc2ea9f770b24b4a96969d3449aed7ac89e6d37e"
- "e486a13e63be5499f2df6ccef1109e9e8797d1326207ac89b2f39a7cf",
-}
-
-
-def sha512_checksum_calc(file: Path) -> str:
- """
- Calculates the SHA512 hash digest of a file on fs
-
- Args:
- file: Path to the file
-
- Returns:
- SHA512 hash digest of a file.
- """
- dd = hashlib.sha512()
- with file.open("rb") as f:
- for chunk in iter(lambda: f.read(4096), b""):
- dd.update(chunk)
- return dd.hexdigest()
-
-
-class CachedDownloader:
- __metaclass__ = ABCMeta
-
- @property
- @abstractmethod
- def name(self) -> str:
- return self.__class__.__name__
-
- @property
- @abstractmethod
- def fallback_downloader(self) -> Optional["CachedDownloader"]:
- pass
-
- def download_model(self, file_name: str) -> Path:
- try:
- return self.download_model_base(file_name)
- except BaseException as e:
- if self.fallback_downloader is not None:
- warnings.warn(
- f"Failed to download model from {self.name} downloader."
- f" Trying to download from {self.fallback_downloader.name} downloader."
- )
- return self.fallback_downloader.download_model(file_name)
- else:
- warnings.warn(
- f"Failed to download model from {self.name} downloader."
- f" No fallback downloader available."
- )
- raise e
-
- @abstractmethod
- def download_model_base(self, file_name: str) -> Path:
- """Download model from any source if not cached. Returns path if cached"""
-
- def __call__(self, file_name: str):
- return self.download_model(file_name)
-
-
-class HuggingFaceCompatibleDownloader(CachedDownloader, ABC):
- def __init__(
- self,
- name: str = "Huggingface.co",
- base_url: str = "https://huggingface.co",
- fb_downloader: Optional["CachedDownloader"] = None,
- ):
- self.cache_dir = checkpoints_dir
- self.base_url = base_url
- self._name = name
- self._fallback_downloader = fb_downloader
-
- @property
- def fallback_downloader(self) -> Optional["CachedDownloader"]:
- return self._fallback_downloader
-
- @property
- def name(self):
- return self._name
-
- def check_for_existence(self, file_name: str) -> Optional[Path]:
- if file_name not in MODELS_URLS.keys():
- raise FileNotFoundError("Unknown model!")
- path = (
- self.cache_dir
- / MODELS_URLS[file_name]["repository"].split("/")[1]
- / file_name
- )
-
- if not path.exists():
- return None
-
- if MODELS_CHECKSUMS[path.name] != sha512_checksum_calc(path):
- warnings.warn(
- f"Invalid checksum for model {path.name}. Downloading correct model!"
- )
- os.remove(path)
- return None
- return path
-
- def download_model_base(self, file_name: str) -> Path:
- cached_path = self.check_for_existence(file_name)
- if cached_path is not None:
- return cached_path
- else:
- cached_path = (
- self.cache_dir
- / MODELS_URLS[file_name]["repository"].split("/")[1]
- / file_name
- )
- cached_path.parent.mkdir(parents=True, exist_ok=True)
- url = MODELS_URLS[file_name]
- hugging_face_url = f"{self.base_url}/{url['repository']}/resolve/{url['revision']}/{url['filename']}"
-
- try:
- r = requests.get(hugging_face_url, stream=True, timeout=10)
- if r.status_code < 400:
- with open(cached_path, "wb") as f:
- r.raw.decode_content = True
- for chunk in tqdm.tqdm(
- r,
- desc="Downloading " + cached_path.name + " model",
- colour="blue",
- ):
- f.write(chunk)
- else:
- if r.status_code == 404:
- raise FileNotFoundError(f"Model {file_name} not found!")
- else:
- raise ConnectionError(
- f"Error {r.status_code} while downloading model {file_name}!"
- )
- except BaseException as e:
- if cached_path.exists():
- os.remove(cached_path)
- raise ConnectionError(
- f"Exception caught when downloading model! "
- f"Model name: {cached_path.name}. Exception: {str(e)}."
- )
- return cached_path
-
-
-fallback_downloader: CachedDownloader = HuggingFaceCompatibleDownloader()
-downloader: CachedDownloader = HuggingFaceCompatibleDownloader(
- base_url="https://cdn.carve.photos",
- fb_downloader=fallback_downloader,
- name="Carve CDN",
-)
-downloader._fallback_downloader = fallback_downloader
diff --git a/spaces/nagauta/mediapipe-hair-segmentation/app.py b/spaces/nagauta/mediapipe-hair-segmentation/app.py
deleted file mode 100644
index d8bbc8f2b3b1a64089b0d26dad61f5a5c28586fd..0000000000000000000000000000000000000000
--- a/spaces/nagauta/mediapipe-hair-segmentation/app.py
+++ /dev/null
@@ -1,81 +0,0 @@
-import gradio as gr
-import cv2
-import math
-import numpy as np
-import os
-import numpy as np
-import mediapipe as mp
-
-from mediapipe.tasks import python
-from mediapipe.tasks.python import vision
-
-print("hello world")
-# Height and width that will be used by the model
-DESIRED_HEIGHT = 480
-DESIRED_WIDTH = 480
-
-# Performs resizing and showing the image
-def resize_and_show(image):
- h, w = image.shape[:2]
- if h < w:
- img = cv2.resize(image, (DESIRED_WIDTH, math.floor(h/(w/DESIRED_WIDTH))))
- else:
- img = cv2.resize(image, (math.floor(w/(h/DESIRED_HEIGHT)), DESIRED_HEIGHT))
- cv2.imshow('color', img)
- cv2.waitKey(1000)
- cv2.destroyAllWindows()
-
-def segmentate(filepath):
- BG_COLOR = (192, 192, 192) # gray
- MASK_COLOR = (255, 255, 255) # white
-
- # Create the options that will be used for ImageSegmenter
- base_options = python.BaseOptions(model_asset_path='./hair_segmenter.tflite')
- options = vision.ImageSegmenterOptions(base_options=base_options,output_category_mask=True)
-
- # Create the image segmenter
- with vision.ImageSegmenter.create_from_options(options) as segmenter:
-
- # Loop through demo image(s)
-
- # Create the MediaPipe image file that will be segmented
- image = mp.Image.create_from_file(filepath)
-
- # Retrieve the masks for the segmented image
- segmentation_result = segmenter.segment(image)
- category_mask = segmentation_result.category_mask
-
- # Generate solid color images for showing the output segmentation mask.
- image_data = image.numpy_view()
- fg_image = np.zeros(image_data.shape, dtype=np.uint8)
- fg_image[:] = MASK_COLOR
- bg_image = np.zeros(image_data.shape, dtype=np.uint8)
- bg_image[:] = BG_COLOR
-
- condition = np.stack((category_mask.numpy_view(),) * 3, axis=-1) > 0.2
- output_image = np.where(condition, fg_image, bg_image)
-
- # print(f'Segmentation mask of {name}:')
- # resize_and_show(output_image)
- return output_image
-
-# GUI
-title = 'mediapipe hair segmentation'
-description = 'hair segmentation using mediapipe'
-examples = [[f'examples/{name}', 3] for name in sorted(os.listdir('examples'))]
-
-iface = gr.Interface(
- fn=segmentate,
- inputs=[
- gr.Image(type='filepath', label='Input Image')
- ],
- outputs=[
- gr.Image(label='image segmentated')
- ],
- examples=examples,
- allow_flagging='never',
- cache_examples=False,
- title=title,
- description=description
-)
-iface.launch()
\ No newline at end of file
diff --git a/spaces/nateraw/quickdraw/app.py b/spaces/nateraw/quickdraw/app.py
deleted file mode 100644
index 8437ee8f4e64645c43e30a37d6ebee396d5f0791..0000000000000000000000000000000000000000
--- a/spaces/nateraw/quickdraw/app.py
+++ /dev/null
@@ -1,43 +0,0 @@
-from pathlib import Path
-
-import torch
-import gradio as gr
-from torch import nn
-
-
-LABELS = Path('class_names.txt').read_text().splitlines()
-
-model = nn.Sequential(
- nn.Conv2d(1, 32, 3, padding='same'),
- nn.ReLU(),
- nn.MaxPool2d(2),
- nn.Conv2d(32, 64, 3, padding='same'),
- nn.ReLU(),
- nn.MaxPool2d(2),
- nn.Conv2d(64, 128, 3, padding='same'),
- nn.ReLU(),
- nn.MaxPool2d(2),
- nn.Flatten(),
- nn.Linear(1152, 256),
- nn.ReLU(),
- nn.Linear(256, len(LABELS)),
-)
-state_dict = torch.load('pytorch_model.bin', map_location='cpu')
-model.load_state_dict(state_dict, strict=False)
-model.eval()
-
-def predict(im):
- x = torch.tensor(im, dtype=torch.float32).unsqueeze(0).unsqueeze(0) / 255.
-
- with torch.no_grad():
- out = model(x)
-
- probabilities = torch.nn.functional.softmax(out[0], dim=0)
-
- values, indices = torch.topk(probabilities, 5)
-
- return {LABELS[i]: v.item() for i, v in zip(indices, values)}
-
-
-interface = gr.Interface(predict, inputs='sketchpad', outputs='label', live=True)
-interface.launch(debug=True)
diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Dotnetbar For Windows Forms Full Crackl.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Dotnetbar For Windows Forms Full Crackl.md
deleted file mode 100644
index c6081bbda9f67b7d2a5a972c29f6875840027c25..0000000000000000000000000000000000000000
--- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Dotnetbar For Windows Forms Full Crackl.md
+++ /dev/null
@@ -1,27 +0,0 @@
-
-```
-
How to Download and Install Dotnetbar For Windows Forms Full Crackl
-
Dotnetbar For Windows Forms is a set of components that allows you to create professional user interfaces with ease using Visual Studio. It includes many features such as Metro UI controls, Office-style ribbon, docking windows, super grid, calendar, property grid, and more. If you want to use Dotnetbar For Windows Forms in your projects, you need to purchase a license from DevComponents. However, if you are looking for a way to download and install Dotnetbar For Windows Forms full crackl for free, you are in the right place. In this article, we will show you how to get Dotnetbar For Windows Forms full crackl without paying anything.
Step 1: Download Dotnetbar For Windows Forms Full Crackl
-
The first step is to download Dotnetbar For Windows Forms full crackl from a reliable source. There are many websites that claim to offer Dotnetbar For Windows Forms full crackl, but some of them may contain viruses or malware that can harm your computer. Therefore, you need to be careful and choose a trusted website. One of the websites that we recommend is DownloadDevTools. This website provides Dotnetbar For Windows Forms v14.1.0.37 with crack and source code. You can download it by clicking on the link below:
After clicking on the link, you will be redirected to a page where you need to fill in some information such as your name and email address. Then, you will receive a download link in your email. Click on the link and save the file on your computer.
-
Step 2: Install Dotnetbar For Windows Forms Full Crackl
-
The next step is to install Dotnetbar For Windows Forms full crackl on your computer. To do this, follow these steps:
-
-
Extract the downloaded file using WinRAR or any other file compression software.
-
Run the setup.exe file and follow the instructions on the screen.
-
When the installation is complete, do not launch the program yet.
-
Copy the cracked files from the Crack folder and paste them into the installation directory. The default installation directory is C:\Program Files (x86)\DevComponents\DotNetBar2.
-
Replace the original files when prompted.
-
Now you can launch the program and enjoy Dotnetbar For Windows Forms full crackl.
-
-
Step 3: Use Dotnetbar For Windows Forms Full Crackl
-
The final step is to use Dotnetbar For Windows Forms full crackl in your projects. To do this, you need to add a reference to DotNetBar.dll in your Visual Studio project. Then, you can drag and drop the components from the toolbox to your form designer. You can also customize the appearance and behavior of the components using their properties and events. You can find more information about how to use Dotnetbar For Windows Forms in the documentation and samples that are included in the installation package.
-
Conclusion
-
In this article, we have shown you how to download and install Dotnetbar For Windows Forms full crackl for free. We hope that this article has helped you to create professional user interfaces with ease using Visual Studio and Dotnetbar For Windows Forms. However, we also want to remind you that using cracked software is illegal and unethical. If you like Dotnetbar For Windows Forms and want to support its development, you should buy a license from DevComponents. This way, you can get updates, support, and access to new features. Thank you for reading this article and happy coding!
-```
- 81aa517590
-
-
\ No newline at end of file
diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Hellcats Season 1 720p Or 1080p.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Hellcats Season 1 720p Or 1080p.md
deleted file mode 100644
index 4fe0af924f31ca9aeffc8fdc67de080029e3ba71..0000000000000000000000000000000000000000
--- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Hellcats Season 1 720p Or 1080p.md
+++ /dev/null
@@ -1,26 +0,0 @@
-
-
Hellcats Season 1: A Cheerful Comedy Drama Series
-
If you are looking for a fun and entertaining series to watch, you might want to check out Hellcats Season 1. Hellcats is a comedy drama series that follows the lives of a group of college cheerleaders who compete for glory and scholarships. The series stars Aly Michalka as Marti Perkins, a pre-law student who joins the Hellcats team after losing her scholarship, and Ashley Tisdale as Savannah Monroe, the captain of the Hellcats and a devout Christian. The series also features Robbie Jones as Lewis Flynn, a former football player who becomes Marti's love interest, Matt Barr as Dan Patch, Marti's best friend and childhood crush, and Heather Hemmens as Alice Verdura, a fierce and ambitious cheerleader who clashes with Marti.
-
Hellcats Season 1 consists of 22 episodes that aired from September 8, 2010 to May 17, 2011 on The CW. The series was created by Kevin Murphy and based on the book Cheer: Inside the Secret World of College Cheerleaders by Kate Torgovnick May. The series received mixed reviews from critics, but was praised for its energetic performances, catchy music, and positive messages. The series also attracted a loyal fan base, especially among young female viewers. However, the series was canceled after one season due to low ratings.
If you want to watch Hellcats Season 1 online, you have several options. You can stream it on JustWatch[^2^], where you can find out which platforms offer the series in your region. You can also download it from bungee.training[^3^], where you can get each episode in 720p quality with English subtitles. However, be aware that downloading copyrighted content may be illegal in your country, so proceed at your own risk.
-
Whether you choose to stream or download Hellcats Season 1, you are sure to enjoy this cheerful and charming series that will make you laugh, cry, and cheer along with the characters. Hellcats Season 1 is a perfect binge-watch for anyone who loves comedy, drama, and cheerleading.
Hellcats Season 1 features a talented and diverse cast of actors who bring their characters to life. The main cast includes:
-
-
-
Aly Michalka as Marti Perkins, a smart and rebellious pre-law student who joins the Hellcats after losing her scholarship. She is determined to prove herself as a cheerleader and a lawyer, but also struggles with her complicated family and romantic issues.
-
Ashley Tisdale as Savannah Monroe, the captain of the Hellcats and a devout Christian. She is cheerful, optimistic, and loyal, but also has a competitive and perfectionist streak. She clashes with Marti at first, but eventually becomes her friend and roommate.
-
Robbie Jones as Lewis Flynn, a former football player who quits the team after a scandal and joins the Hellcats as a base. He is charming, confident, and athletic, but also has a sensitive and caring side. He develops a romantic relationship with Marti, but faces some obstacles along the way.
-
Matt Barr as Dan Patch, Marti's best friend and childhood crush. He is a filmmaker who works at a local bar and supports Marti's cheerleading endeavors. He is witty, sarcastic, and loyal, but also harbors feelings for Marti that complicate their friendship.
-
Heather Hemmens as Alice Verdura, a fierce and ambitious cheerleader who was the flyer of the Hellcats until she injured her wrist. She is arrogant, manipulative, and ruthless, but also has a vulnerable and insecure side. She sees Marti as a threat and tries to sabotage her at every turn.
-
-
The supporting cast includes:
-
-
Gail O'Grady as Wanda Perkins, Marti's mother who works as a waitress and has a gambling addiction. She loves her daughter but often lets her down with her irresponsible choices.
-
Sharon Leal as Vanessa Lodge, the coach of the Hellcats and a former cheerleader herself. She is strict, professional, and passionate about her job, but also has a personal life that involves a secret affair with a married man.
-
Jeff Hephner as Red Raymond, the new football coach of Lancer University and Vanessa's former lover. He is charismatic, ambitious, and ruthless, but also has a soft spot for Vanessa and the Hellcats.
-
D.B. Woodside as Derrick Altman, the dean of Lancer University and Vanessa's husband. He is supportive, respectful, and successful, but also unaware of Vanessa's affair with Red.
-
Emma Lahana as Charlotte Monroe, Savannah's older sister who ran away from home when she was pregnant. She returns to Savannah's life with her baby daughter and asks for her help.
-
-
Hellcats Season 1 also features guest appearances by various celebrities, such as AJ Michalka (Aly's sister), Ciara (as herself), 3OH!3 (as themselves), Ben Browder (as Bill Marsh), Ryan Kennedy (as Jake Harrow), Jeremy Wong (as Darwin), Elena Esovolova (as Frankie), Gale Harold (as Julian Parrish), Teryl Rothery (as Layne Monroe), Aaron Douglas (as Bill Marsh Jr.), Camille Sullivan (as Emily Watson), Alana Randall (as Morgan Pepper), Robbie Amell (as Charlotte's ex-boyfriend), D.B. Sweeney (as Mr. Perkins), among others.
7196e7f11a
-
-
\ No newline at end of file
diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Palette Cad 8 PORTABLE Crack Download.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Palette Cad 8 PORTABLE Crack Download.md
deleted file mode 100644
index 48040928b01199c84716a5a29f7d8bebfffce3c0..0000000000000000000000000000000000000000
--- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Palette Cad 8 PORTABLE Crack Download.md
+++ /dev/null
@@ -1,114 +0,0 @@
-
-
Palette CAD 8 Crack Download: Is It Worth It?
-
If you are looking for a 3D design software for your digital interior design projects, you might have heard of Palette CAD 8. This software is designed for craftspeople, specialist retailers, and designers who want to create realistic and stunning designs for various industries. But what if you don't want to pay for the full version of this software? You might be tempted to download a cracked version of Palette CAD 8 from torrent sites or other sources. But is it worth it? In this article, we will explain what Palette CAD 8 is, what a crack software is, what are the risks and disadvantages of using crack software, and what are the alternatives to using crack software. By the end of this article, you will be able to make an informed decision on whether to download Palette CAD 8 crack or not.
Palette CAD 8 is a 3D design software that allows you to design and showcase your interior design projects in a realistic and professional way. You can use this software to create floor plans, wall views, 3D views, photo-realistic displays, virtual reality, and animations for your projects. You can also order the materials you need, create blueprints, and send the design data to a CNC machine. Palette CAD 8 has a range of practical and automatic features that make your work easier and more efficient. Some of these features are:
-
-
A large and diverse library of objects, materials, textures, colors, and lighting effects that you can use for your designs.
-
An intuitive and user-friendly interface that allows you to drag and drop objects, edit them, and customize them according to your preferences.
-
A smart assistant that helps you with measurements, calculations, dimensions, angles, and other technical aspects of your design.
-
A powerful rendering engine that produces high-quality images and videos of your design in real time.
-
A VR mode that lets you immerse yourself in your design and experience it as if you were there.
-
An animation mode that lets you create dynamic and interactive presentations of your design with sound effects, music, narration, and transitions.
-
-
Features and benefits of Palette CAD 8
-
Palette CAD 8 is not just a software, but a complete solution for your digital design projects. With this software, you can enjoy the following benefits:
-
-
You can save time and money by designing your projects faster and more accurately.
-
You can impress your clients and customers with realistic and stunning designs that showcase your skills and creativity.
-
You can increase your sales and revenue by offering more services and products to your clients and customers.
-
You can expand your market and reach more potential clients and customers by showcasing your designs online or in VR.
-
You can improve your skills and knowledge by learning from the tutorials, tips, and support provided by Palette CAD.
-
-
System requirements and compatibility of Palette CAD 8
-
To use Palette CAD 8, you need to have a computer that meets the following minimum system requirements:
-
-
Operating system: Windows 10 (64-bit)
-
Processor: Intel Core i5 or equivalent
-
Memory
Continue writing the article.
-
What is a crack software?
-
A crack software is a modified version of a software that bypasses or removes the copy protection or activation mechanism of the original software. A crack software is usually created by hackers or crackers who reverse engineer the software code and manipulate it to disable or override the security features. A crack software is often distributed for free or for a low price on torrent sites, file-sharing platforms, or other online sources. Some examples of crack software are:
-
-
A keygen or a key generator that produces serial numbers or license keys for activating the original software.
-
A patch or a crack file that modifies or replaces the original executable file of the software to remove the activation requirement.
-
A loader or a pre-activated version that runs the software from a different location or environment to avoid detection.
-
-
How does a crack software work?
-
A crack software works by exploiting the vulnerabilities or weaknesses of the original software's protection system. Depending on the type and complexity of the protection system, a crack software may use different methods to bypass or remove it. Some of these methods are:
-
-
-
Changing the registry entries or configuration files of the original software to trick it into thinking that it is activated or registered.
-
Injecting code or data into the memory or process of the original software to alter its behavior or functionality.
-
Emulating or simulating a hardware device, a server, or a network connection that the original software relies on for verification or validation.
-
-
Why do people use crack software?
-
People use crack software for various reasons, but the most common ones are:
-
-
To save money by avoiding paying for the full version of the original software.
-
To test or evaluate the features and performance of the original software before buying it.
-
To access or use the original software that is not available or compatible in their region or device.
-
To bypass or remove the limitations or restrictions imposed by the original software's license agreement or terms of service.
-
-
What are the risks and disadvantages of using crack software?
-
While using crack software may seem tempting and convenient, it also comes with many risks and disadvantages that can outweigh its benefits. Some of these risks and disadvantages are:
-
Legal issues and penalties for using crack software
-
Using crack software is illegal and unethical, as it violates the intellectual property rights and copyrights of the original software developers and publishers. By using crack software, you are committing piracy, which is a criminal offense that can result in serious legal consequences. Depending on your location and jurisdiction, you may face fines, lawsuits, injunctions, arrests, imprisonment, or other penalties for using crack software. For example, in the United States, you can be fined up to $150,000 per infringement and imprisoned for up to five years for using crack software. In addition, you may also damage your reputation and credibility as a professional or a business owner by using crack software.
-
Security threats and malware infections from using crack software
-
Using crack software is risky and dangerous, as it exposes your computer and data to various security threats and malware infections. Crack software is often infected with viruses, worms, trojans, ransomware, spyware, adware, rootkits, keyloggers, or other malicious programs that can harm your computer and data. These malware can steal your personal information, passwords, bank accounts, credit cards, identity documents, or other sensitive data. They can also damage your files, folders, programs, system settings, registry entries, boot sectors, or other components of your computer. They can also slow down your computer performance, cause crashes, errors, freezes, blue screens, pop-ups, redirects, or other problems. Moreover, they can also compromise your online security and privacy by hijacking your browser, email client, social media accounts, webcam, microphone, keyboard strokes, mouse clicks, network traffic, or other online activities. They can also spread to other devices connected to your computer via USB drives
Continue writing the article.
-
Performance issues and errors from using crack software
-
Using crack software is unreliable and problematic, as it can cause performance issues and errors for your computer and the original software. Crack software is often poorly coded, outdated, incompatible, or corrupted, which can affect the functionality and stability of your computer and the original software. You may experience glitches, bugs, crashes, freezes, errors, missing features, corrupted files, or other issues when using crack software. You may also lose your data, work progress, or settings when using crack software. Furthermore, you may not be able to update or upgrade your computer or the original software when using crack software, as it may break the crack or trigger the protection system. This can leave you with an obsolete or vulnerable version of your computer or the original software.
-
What are the alternatives to using crack software?
-
Using crack software is not worth it, as it can bring you more harm than good. Instead of using crack software, you should consider the following alternatives that are legal, safe, and reliable:
-
Buying the original software from the official website
-
The best and most recommended alternative to using crack software is to buy the original software from the official website of the developer or publisher. By buying the original software, you can enjoy the following advantages:
-
-
You can support the developers and publishers who created and maintained the original software.
-
You can access all the features and functions of the original software without any limitations or restrictions.
-
You can receive regular updates and upgrades for your computer and the original software that improve their performance and security.
-
You can get technical support and customer service from the developers and publishers if you encounter any problems or issues with your computer or the original software.
-
You can benefit from discounts, promotions, offers, or bonuses that the developers and publishers may provide for their customers.
-
-
To buy Palette CAD 8 from the official website, you need to visit https://www.palettecad.com/en/home/ and choose the plan that suits your needs and budget. You can also request a free demo or a quote from the website.
-
Using free or open-source software with similar features
-
Another alternative to using crack software is to use free or open-source software that have similar features to the original software. Free or open-source software are software that are available for anyone to use, modify, or distribute without any charge or license. By using free or open-source software, you can enjoy the following benefits:
-
-
You can save money by not paying for any fees or subscriptions for using the software.
-
You can customize or improve the software according to your preferences or needs by accessing or changing its source code.
-
You can contribute to the development and improvement of the software by sharing your feedback, suggestions, or modifications with the community of users and developers.
-
You can learn from the source code and documentation of the software by studying how it works and how it was created.
-
-
Some examples of free or open-source software that have similar features to Palette CAD 8 are:
-
-
Sweet Home 3D: A free interior design application that helps you draw the plan of your house, arrange furniture on it and visit
Continue writing the article.
-the results in 3D.
-
SketchUp: A 3D modeling software that lets you create, edit, and share 3D models of anything you can imagine, from buildings and furniture to landscapes and vehicles.
-
Blender: A free and open-source 3D creation suite that supports the entire 3D pipeline, from modeling and sculpting to animation and rendering.
-
-
To use these software, you need to download and install them from their official websites or repositories. You can also find tutorials, manuals, forums, or other resources to help you learn and use them.
-
Using trial versions or discounts of the original software
-
A third alternative to using crack software is to use trial versions or discounts of the original software. Trial versions are software that are available for a limited time or with limited features for free or for a low price. Discounts are software that are available for a reduced price or with special offers for a limited time or for certain customers. By using trial versions or discounts of the original software, you can enjoy the following advantages:
-
-
You can try out the features and performance of the original software before deciding whether to buy it or not.
-
You can save money by paying less than the full price of the original software.
-
You can access the official updates, upgrades, support, and service of the original software.
-
You can avoid the risks and disadvantages of using crack software.
-
-
To use trial versions or discounts of Palette CAD 8, you need to visit https://www.palettecad.com/en/home/ and look for the options that suit your needs and budget. You can also contact the sales team or the customer service team to ask for more information or assistance.
-
Conclusion
-
Summary of the main points
-
In conclusion, Palette CAD 8 is a 3D design software that allows you to create realistic and stunning designs for your interior design projects. However, using a crack version of Palette CAD 8 is not worth it, as it can bring you legal, security, and performance problems. Instead of using crack software, you should consider buying the original software from the official website, using free or open-source software with similar features, or using trial versions or discounts of the original software. These alternatives are legal, safe, and reliable, and they can help you achieve your design goals without compromising your computer and data.
-
Recommendation and call to action
-
We recommend that you avoid using crack software at all costs, as it can harm you more than it can help you. We also recommend that you choose one of the alternatives we suggested above, depending on your needs and budget. We hope that this article has helped you make an informed decision on whether to download Palette CAD 8 crack or not. If you have any questions or comments, please feel free to contact us. Thank you for reading!
-
FAQs
-
-
What is Palette CAD 8? Palette CAD 8 is a 3D design software that allows you to design and showcase your interior design projects in a realistic and professional way.
-
What is a crack software? A crack software is a modified version of a software that bypasses or removes the copy protection or activation mechanism of the original software.
-
What are the risks and disadvantages of using crack software? Using crack software is illegal and unethical, as it violates the intellectual property rights and copyrights of
Continue writing the article.
-the original software developers and publishers. It also exposes your computer and data to various security threats and malware infections. It also causes performance issues and errors for your computer and the original software.
-
What are the alternatives to using crack software? The alternatives to using crack software are buying the original software from the official website, using free or open-source software with similar features, or using trial versions or discounts of the original software.
-
How can I buy Palette CAD 8 from the official website? To buy Palette CAD 8 from the official website, you need to visit https://www.palettecad.com/en/home/ and choose the plan that suits your needs and budget. You can also request a free demo or a quote from the website.
-
What are some examples of free or open-source software with similar features to Palette CAD 8? Some examples of free or open-source software with similar features to Palette CAD 8 are Sweet Home 3D, SketchUp, and Blender. You can download and install them from their official websites or repositories.
Diablo 2 No Cd Crack 110 18: How to Play the Classic Game Without Hassle
-
-
Diablo 2 is one of the most popular and influential action role-playing games of all time. Released in 2000 by Blizzard Entertainment, it has captivated millions of players with its dark fantasy setting, addictive gameplay, and endless replay value. However, if you want to play Diablo 2 today, you might encounter some problems. One of them is the need for a CD or a CD key to run the game.
Fortunately, there are ways to play Diablo 2 without a CD or a CD key. In this article, we will show you how to use a no-cd crack or a no-cd loader to enjoy the game without any hassle. We will also explain what these tools are, how they work, and where to find them.
-
-
What is a No-Cd Crack?
-
-
A no-cd crack is a modified version of the game's executable file (the .exe file) that bypasses the CD check or the CD key check. This means that you can run the game without inserting the CD or entering the CD key. A no-cd crack is usually applied by copying and replacing the original .exe file with the cracked one.
-
-
A no-cd crack can be useful for several reasons. For example, you might have lost or damaged your original CD or CD key, or you might want to play the game on a different computer without carrying the CD around. A no-cd crack can also improve the game's performance by reducing loading times and avoiding disc errors.
-
-
What is a No-Cd Loader?
-
-
A no-cd loader is a small program that runs before the game and tricks it into thinking that the CD is present or that the CD key is valid. A no-cd loader does not modify the game's .exe file, but rather creates a virtual drive that emulates the CD. A no-cd loader is usually applied by running it before launching the game.
-
-
A no-cd loader can have some advantages over a no-cd crack. For example, you might want to use a no-cd loader if you have multiple versions of the game installed and you don't want to overwrite the .exe files. A no-cd loader can also be compatible with more patches and mods than a no-cd crack.
-
-
Where to Find a No-Cd Crack or a No-Cd Loader for Diablo 2?
-
-
There are many websites that offer no-cd cracks or no-cd loaders for Diablo 2. However, not all of them are safe or reliable. Some of them might contain viruses, malware, or spyware that can harm your computer or steal your personal information. Some of them might also have outdated or incompatible versions of the tools that can cause errors or crashes in the game.
-
-
Therefore, you should be careful when downloading and using a no-cd crack or a no-cd loader for Diablo 2. You should always scan the files with an antivirus program before opening them. You should also backup your original .exe file before applying a no-cd crack, in case something goes wrong. You should also check the compatibility of the tools with your game version and your operating system.
-
-
-
Here are some of the websites that we recommend for finding a no-cd crack or a no-cd loader for Diablo 2:
-
-
-
Diablo II: Lord of Destruction (No Install Required): This is a pre-installed version of Diablo 2: Lord of Destruction from Blizzard Entertainment patch 1.12 (English) with MultiRes plugin that lets you run the game in high resolutions. It also includes Ultimate Diablo Item Editor 2 (UdieToo) and Hero Editor, as well as all unique items, gems, runes and sets found in the game. No crack, key or CD required.
-
Diablo 2 No-CD Patches & Game Fixes- GameBurnWorld: This website offers various no-cd patches and fixed exe files for different versions of Diablo 2, from v1.0 to v1.12a. You can download and apply them according to your game version.
-
Diablo 2 No Cd Crack Free 110 18 | Peatix: This website offers a free download of Diablo 2 LoD v1.10 No-CD/Fixed EXE - Game Fixes and Patches. You can use it to play Diablo 2 without a CD or a CD key.
-
-
-
Conclusion
-
-
Diablo 2 is a classic game that deserves to be played by anyone who loves action role-playing games. However, if you don't have a CD or a CD key, you might face some difficulties in running the game. That's why we have shown you how to use a no-cd crack or a no-cd loader to play Diablo 2 without any hassle.
-
-
We hope that this article has been helpful and informative for you. If you have any questions or comments, feel free to leave them below. And if you enjoyed this article, please share it with your friends who might also be interested in playing Diablo 2 without a CD or a CD key.
-
How to Use a No-Cd Crack or a No-Cd Loader for Diablo 2?
-
-
Using a no-cd crack or a no-cd loader for Diablo 2 is not very difficult, but you need to follow some steps carefully. Here is a general guide on how to use them:
-
-
-
Download the no-cd crack or the no-cd loader that matches your game version and your operating system from one of the websites mentioned above.
-
Extract the files from the zip or rar archive using a program like WinZip or WinRAR.
-
Locate your Diablo 2 installation folder, usually in C:\Program Files (x86)\Diablo II or C:\Program Files\Diablo II.
-
If you are using a no-cd crack, make a backup copy of your original .exe file, just in case something goes wrong. Then copy and paste the cracked .exe file into your Diablo 2 installation folder, replacing the original one.
-
If you are using a no-cd loader, simply copy and paste the loader .exe file into your Diablo 2 installation folder. Do not replace the original .exe file.
-
Run the game as usual. If you are using a no-cd crack, just double-click on the cracked .exe file. If you are using a no-cd loader, run the loader .exe file first, then launch the game from it.
-
Enjoy playing Diablo 2 without a CD or a CD key!
-
-
-
What are the Benefits and Risks of Using a No-Cd Crack or a No-Cd Loader for Diablo 2?
-
-
Using a no-cd crack or a no-cd loader for Diablo 2 can have some benefits and risks. Here are some of them:
-
-
-
Benefits:
-
You can play Diablo 2 without a CD or a CD key, which can be convenient and cost-effective.
-
You can improve the game's performance by reducing loading times and avoiding disc errors.
-
You can play the game on multiple computers without carrying the CD around.
-
You can use different patches and mods without worrying about compatibility issues.
-
-
-
-
Risks:
-
You might violate the game's terms of service or end-user license agreement, which can result in legal consequences or account suspension.
-
You might download files that contain viruses, malware, or spyware that can harm your computer or steal your personal information.
-
You might encounter errors or crashes in the game due to outdated or incompatible versions of the tools.
-
You might lose your original .exe file if you don't make a backup copy before applying a no-cd crack.
-
-
-
Therefore, you should weigh the pros and cons of using a no-cd crack or a no-cd loader for Diablo 2 before deciding to use them. You should also take precautions to protect your computer and your game data from any potential harm.
-
How to Play Diablo 2 with No-Cd Crack or No-Cd Loader?
-
-
Once you have applied a no-cd crack or a no-cd loader for Diablo 2, you can enjoy playing the game without any hassle. However, there are some things that you should know before you start playing.
-
-
First, you should make sure that your game version is compatible with the no-cd crack or the no-cd loader that you are using. For example, if you are using a no-cd crack for v1.10, you should not update your game to v1.11 or higher, as this will break the crack and cause errors or crashes. Similarly, if you are using a no-cd loader for v1.12a, you should not downgrade your game to v1.11b or lower, as this will make the loader useless and prevent the game from running.
-
-
Second, you should be aware that using a no-cd crack or a no-cd loader might affect your online gameplay. For example, if you are playing on Battle.net, the official online service for Diablo 2, you might encounter some issues or restrictions. Some of them are:
-
-
-
You might not be able to join or create certain games or channels.
-
You might not be able to use certain features or commands.
-
You might not be able to access certain realms or servers.
-
You might not be able to trade or chat with other players.
-
You might get banned or suspended from Battle.net for violating the terms of service or end-user license agreement.
-
-
-
Therefore, you should be careful when playing Diablo 2 online with a no-cd crack or a no-cd loader. You should always follow the rules and etiquette of Battle.net and respect other players. You should also avoid using any hacks, cheats, bots, or exploits that might give you an unfair advantage or harm the game experience for others.
-
-
What are Some Alternatives to Using a No-Cd Crack or a No-Cd Loader for Diablo 2?
-
-
If you are not comfortable with using a no-cd crack or a no-cd loader for Diablo 2, or if you want to play the game in a more legitimate and secure way, there are some alternatives that you can try. Some of them are:
-
-
-
Buy a digital copy of Diablo 2 from Blizzard Entertainment. This is the easiest and safest way to play Diablo 2 without a CD or a CD key. You can buy a digital copy of Diablo 2 and its expansion pack, Lord of Destruction, from Blizzard Entertainment's official website for $9.99 each. You can then download and install the game on your computer and play it anytime without any hassle. You will also get access to Battle.net and all its features and benefits.
-
Use an official patch from Blizzard Entertainment. This is another simple and secure way to play Diablo 2 without a CD or a CD key. Blizzard Entertainment has released several patches for Diablo 2 over the years that have fixed bugs, improved performance, added features, and removed the CD check or the CD key check from the game. You can download and install these patches from Blizzard Entertainment's official website for free. The latest patch for Diablo 2 is v1.14d, which was released in June 2016.
-
Use an unofficial mod from the Diablo 2 community. This is a more creative and fun way to play Diablo 2 without a CD or a CD key. The Diablo 2 community has created many mods for the game that have changed its gameplay, graphics, sound, story, items, classes, skills, monsters, quests, and more. Some of these mods also remove the CD check or the CD key check from the game. You can download and install these mods from various websites and forums dedicated to Diablo 2 modding. However, you should be careful when using these mods as they might not be compatible with your game version or your operating system. They might also contain viruses, malware, or spyware that can harm your computer or steal your personal information.
-
-
-
Conclusion
-
-
Diablo 2 is a classic game that deserves to be played by anyone who loves action role-playing games. However, if you don't have a CD or a CD key, you might face some difficulties in running the game. That's why we have shown you how to use a no-cd crack or a no-cd loader to play Diablo 2 without any hassle.
-
-
We hope that this article has been helpful and informative for you. If you have any questions or comments, feel free to leave them below. And if you enjoyed this article, please share it with your friends who might also be interested in playing Diablo 2 without a CD or a CD key.
-
Conclusion
-
-
Diablo 2 is a classic game that deserves to be played by anyone who loves action role-playing games. However, if you don't have a CD or a CD key, you might face some difficulties in running the game. That's why we have shown you how to use a no-cd crack or a no-cd loader to play Diablo 2 without any hassle.
-
-
We hope that this article has been helpful and informative for you. If you have any questions or comments, feel free to leave them below. And if you enjoyed this article, please share it with your friends who might also be interested in playing Diablo 2 without a CD or a CD key.
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/quidiaMuxgu/Expedit-SAM/InstaCode.2015.09.rar PORTABLE.md b/spaces/quidiaMuxgu/Expedit-SAM/InstaCode.2015.09.rar PORTABLE.md
deleted file mode 100644
index 4f3a39051ce4e31c7cf6bc5e6b0497bb980f0d28..0000000000000000000000000000000000000000
--- a/spaces/quidiaMuxgu/Expedit-SAM/InstaCode.2015.09.rar PORTABLE.md
+++ /dev/null
@@ -1,38 +0,0 @@
-
-
InstaCode 2015.09: A Leading Key Data Software for Locksmiths
-
InstaCode is a software program that provides locksmiths with access to over 4 billion key codes, covering domestic, commercial and automotive keys. InstaCode is compatible with various key cutting machines and tools, and supports multiple languages and regions. InstaCode also offers graphical layouts, key cutting information, code searching and more features that make it an essential tool for any professional locksmith worldwide.
In this article, we will review the InstaCode 2015.09 version, which was released in September 2015. This version includes new codes, updates and improvements that enhance the performance and functionality of the software. We will also show you how to download and install InstaCode 2015.09 on your Windows computer, and how to activate it using a patch and a registration code.
-
What's New in InstaCode 2015.09?
-
InstaCode 2015.09 is the latest version of the software as of December 2023. It contains new codes and updates for various key series, manufacturers and models. Some of the highlights are:
-
-
New codes for BMW FEM/BDC system
-
New codes for Ford Transit Custom 2012+
-
New codes for Hyundai/Kia 8-cut Tibbe system
-
New codes for Mercedes-Benz Smart Keyless Go system
-
New codes for Toyota/Lexus G-chip system
-
New codes for Volkswagen/Audi MQB system
-
New key blank references for JMA, Silca, Keyline and others
-
New key cutting information for Miracle A9 Premium, Xhorse Condor XC Mini and others
-
Improved graphical layouts for various keys
-
Improved code searching by vehicle make, model and year
-
Improved user interface and bug fixes
-
-
How to Download and Install InstaCode 2015.09?
-
To download InstaCode 2015.09, you need to have a valid subscription to the software. You can purchase a subscription from the official website of InstaCode or from one of its authorized distributors. Once you have a subscription, you can download the software from the following link:
The file size is about 395 MB. You need to have a stable internet connection and enough disk space to download it.
-
To install InstaCode 2015.09, you need to run the downloaded file and follow the instructions on the screen. You can choose your preferred language and region during the installation process. You also need to agree to the terms and conditions of the software license agreement.
-
The installation process may take several minutes depending on your computer speed and configuration. Once the installation is complete, you can launch InstaCode from your desktop or start menu.
-
How to Activate InstaCode 2015.09?
-
To activate InstaCode 2015.09, you need to have a valid registration code that matches your subscription details. You can obtain a registration code from the official website of InstaCode or from one of its authorized distributors.
-
Alternatively, you can use a patch and a registration code that are provided by some third-party sources on the internet. However, we do not recommend this method as it may violate the software license agreement and expose your computer to security risks.
-
If you choose to use a patch and a registration code from a third-party source, you need to follow these steps:
-
-
Make sure to turn off your internet connection before activating InstaCode.
-
In the folder where you downloaded the patch, run "Fix License X64" if you have a 64-bit Windows system or "Fix License X86" if you have a 32-bit Windows system.
-
Copy the file "IC" from the same folder and replace it in the folder where you installed InstaCode.
-
Open InstaCode d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/radames/SPIGA-face-alignment-headpose-estimator/SPIGA/spiga/eval/__init__.py b/spaces/radames/SPIGA-face-alignment-headpose-estimator/SPIGA/spiga/eval/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Album Flow Indesign Download 28.md b/spaces/raedeXanto/academic-chatgpt-beta/Album Flow Indesign Download 28.md
deleted file mode 100644
index c8b8813cab5ec3251a2cd4de49524b766469d34d..0000000000000000000000000000000000000000
--- a/spaces/raedeXanto/academic-chatgpt-beta/Album Flow Indesign Download 28.md
+++ /dev/null
@@ -1,42 +0,0 @@
-
-
How to Create a Photo Album with Album Flow in Adobe InDesign
-
If you want to create a stunning photo album with ease, you might want to try Album Flow, a plugin for Adobe InDesign that allows you to automate the layout process and customize your design with just a few clicks. Album Flow is a great tool for photographers, designers, and anyone who wants to showcase their photos in a professional and elegant way.
-
In this article, we will show you how to download and install Album Flow, how to use it to create a photo album, and how to export your project as a PDF or print-ready file. Let's get started!
To use Album Flow, you need to have Adobe InDesign installed on your computer. You can download a free trial version of InDesign from Adobe's website.
-
Next, you need to download Album Flow from Album Flow's website. You can choose between two versions: Album Flow Basic (free) or Album Flow Pro (paid). The Basic version has some limitations, such as the number of pages, layouts, and fonts you can use. The Pro version offers more features and flexibility, such as custom layouts, text boxes, backgrounds, and more.
-
Once you download the plugin, you need to install it on your computer. To do that, follow these steps:
-
-
Open InDesign and go to Window > Utilities > Scripts.
-
In the Scripts panel, right-click on User and choose Reveal in Finder (Mac) or Reveal in Explorer (Windows).
-
Copy the downloaded AlbumFlow.jsx file into the Scripts Panel folder.
-
Restart InDesign.
-
-
You should now see Album Flow in the Scripts panel under User.
-
Step 2: Create a New Document
-
To create a new document with Album Flow, follow these steps:
-
-
In InDesign, go to File > New > Document.
-
In the New Document dialog box, choose the preset that matches your desired book size and orientation. For example, if you want to create an 8 x 10 inch landscape book, choose Photo Book > Landscape 8 x 10.
-
Click OK.
-
In the Scripts panel, double-click on Album Flow.
-
In the Album Flow dialog box, choose the number of pages you want for your album. You can also choose the layout style, the margin size, and the font size. Click OK.
-
-
Album Flow will automatically create a document with the specified number of pages and apply a random layout to each page. You can see the page thumbnails in the Pages panel.
-
-
Step 3: Add Your Photos
-
To add your photos to your album, follow these steps:
-
-
In InDesign, go to File > Place.
-
In the Place dialog box, navigate to the folder where your photos are stored and select them all. You can use Shift or Command/Ctrl to select multiple files.
-
Click Open.
-
In the document window, click on the first page of your album. Album Flow will automatically place your photos in the frames according to the layout. It will also resize and crop your photos to fit the frames.
-
If you want to change the order of your photos, you can drag them in the Links panel or use the arrow buttons in the Album Flow dialog box.
-
If you want to change the layout of a page, you can use the Layout menu in the Album Flow dialog box or right-click on a page thumbnail and choose Change Layout.
-
If you want to edit a photo, such as rotate, scale, or move it within a frame, you can double-click on it and use the Direct Selection tool or the Control panel.
-
-
Step 4: Customize Your Design
-
If you have Album Flow Pro, you can customize your design further by adding text boxes, backgrounds, borders, shadows, and more. To
81aa517590
-
-
\ No newline at end of file
diff --git a/spaces/raedeXanto/academic-chatgpt-beta/CRACK CadSoft Eagle Professional 6.5.0 Patch How to Install and Use.md b/spaces/raedeXanto/academic-chatgpt-beta/CRACK CadSoft Eagle Professional 6.5.0 Patch How to Install and Use.md
deleted file mode 100644
index accc85761b349d4ef270609df77b73fcd1c2048b..0000000000000000000000000000000000000000
--- a/spaces/raedeXanto/academic-chatgpt-beta/CRACK CadSoft Eagle Professional 6.5.0 Patch How to Install and Use.md
+++ /dev/null
@@ -1,164 +0,0 @@
-
-
CRACK CadSoft Eagle Professional 6.5.0 patch
-
Are you looking for a way to design and create printed circuit boards (PCBs) with ease and efficiency? Do you want to use a powerful and versatile electronic design automation (EDA) software that can run on multiple operating systems? If yes, then you might be interested in CadSoft Eagle Professional 6.5.0, a popular and reliable software for PCB design and layout.
However, you might also be aware that CadSoft Eagle Professional 6.5.0 is not a free software and you need to purchase a license to use it fully and legally. That's why some people resort to using CRACK CadSoft Eagle Professional 6.5.0 patch, a tool that can bypass the license verification and activation process and allow you to use the software for free.
-
But is it really worth it to use CRACK CadSoft Eagle Professional 6.5.0 patch? What are the benefits and risks of using it? How can you download and install it safely and effectively? In this article, we will answer these questions and more, so keep reading to find out.
-
What is CadSoft Eagle Professional 6.5.0?
-
CadSoft Eagle Professional 6.5.0 is a software that allows you to design, create, edit, simulate, test, and verify PCBs with ease and efficiency. It is a scriptable EDA application with schematic capture, printed circuit board layout, auto-router, and computer-aided manufacturing (CAM) features. It can run on Windows, Mac OS X, and Linux operating systems.
-
Features of CadSoft Eagle Professional 6.5.0
-
Some of the features of CadSoft Eagle Professional 6.5.0 are:
-
-
Support for up to 999 sheets per schematic
-
Support for up to 16 signal layers per board
-
Support for up to 4 meters x 4 meters board size
-
Support for blind and buried vias
-
Support for differential pair routing
-
Support for high-speed design rules
-
Support for design rule checking (DRC)
-
Support for electrical rule checking (ERC)
-
Support for net classes and clearance classes
-
Support for autorouter with user-defined cost factors
-
Support for CAM processor with Gerber, Excellon, HPGL, PostScript, PDF output formats
-
Support for user language programs (ULPs) that can extend the functionality of the software
-
Support for libraries with thousands of components and symbols
-
Support for importing and exporting various file formats such as DXF, IDF, XML, CSV, etc.
-
Support for online help system and tutorials
-
-
Benefits of using CadSoft Eagle Professional 6.5.0
-
Some of the benefits of using CadSoft Eagle Professional 6.5.0 are:
-
-
You can design and create PCBs with ease and efficiency using a user-friendly interface and intuitive tools
-
You can customize and optimize your PCB design according to your needs and preferences using various options and settings
-
You can save time and money by using the autorouter feature that can automatically route your PCB traces according to your specifications
-
You can ensure the quality and reliability of your PCB design by using the DRC and ERC features that can detect and correct any errors or violations in your schematic or layout
-
You can generate various output files that can be used for manufacturing or documentation purposes using the CAM processor feature
-
You can extend the functionality of the software by using ULPs that can perform various tasks such as simulation, testing, verification, etc.
-
You can access thousands of components and symbols from libraries that are compatible with the software or create your own libraries using the library editor feature
-
You can import or export various file formats that are compatible with other EDA software or applications using the import/export feature
-
You can run the software on multiple operating systems without any compatibility issues or performance degradation
-
You can access online help system and tutorials that can guide you through the software features and functions or help you troubleshoot any problems or issues
-
-
What is CRACK CadSoft Eagle Professional 6.5.0 patch?
-
CRACK CadSoft Eagle Professional 6.5.0 patch is a tool that can bypass the license verification and activation process of CadSoft Eagle Professional 6.5.0 software. It is a file that can modify or replace some parts of the original software files that are responsible for checking or validating the license key or serial number. By doing so, it can make the software think that it has been activated legally and allow you to use it without any restrictions or limitations.
-
How does CRACK CadSoft Eagle Professional 6.5.0 patch work?
-
The basic steps of how CRACK CadSoft Eagle Professional 6.5.0 patch works are:
-
-
You download CRACK CadSoft Eagle Professional 6.5.0 patch from an online source such as a torrent site or a file-sharing site
-
You install CadSoft Eagle Professional 6.5.0 software from its official website or from another source
-
You run CRACK CadSoft Eagle Professional 6.5.0 patch as an administrator on your computer
-.5.0 patch to patch the software files
-
You restart your computer and run CadSoft Eagle Professional 6.5.0 software as usual
-
You enjoy using CadSoft Eagle Professional 6.5.0 software for free without any restrictions or limitations
-
-
Advantages of using CRACK CadSoft Eagle Professional 6.5.0 patch
-
Some of the advantages of using CRACK CadSoft Eagle Professional 6.5.0 patch are:
-
How to crack CadSoft Eagle Professional 6.5.0 software
-CadSoft Eagle Professional 6.5.0 patch download free
-CadSoft Eagle Professional 6.5.0 crack serial keygen
-CadSoft Eagle Professional 6.5.0 full version with crack
-CadSoft Eagle Professional 6.5.0 patch for Windows 10
-CadSoft Eagle Professional 6.5.0 crack license key
-CadSoft Eagle Professional 6.5.0 patch activation code
-CadSoft Eagle Professional 6.5.0 crack torrent link
-CadSoft Eagle Professional 6.5.0 patch installation guide
-CadSoft Eagle Professional 6.5.0 crack review and features
-CadSoft Eagle Professional 6.5.0 patch alternative software
-CadSoft Eagle Professional 6.5.0 crack system requirements
-CadSoft Eagle Professional 6.5.0 patch troubleshooting tips
-CadSoft Eagle Professional 6.5.0 crack comparison with other versions
-CadSoft Eagle Professional 6.5.0 patch upgrade and update
-CadSoft Eagle Professional 6.5.0 crack benefits and drawbacks
-CadSoft Eagle Professional 6.5.0 patch customer support and feedback
-CadSoft Eagle Professional 6.5.0 crack discount and coupon code
-CadSoft Eagle Professional 6.5.0 patch online tutorial and course
-CadSoft Eagle Professional 6.5.0 crack video demo and walkthrough
-CadSoft Eagle Professional 6.5.0 patch for Mac OS X
-CadSoft Eagle Professional 6.5.0 crack for Linux
-CadSoft Eagle Professional 6.5.0 patch for Android
-CadSoft Eagle Professional 6.5.0 crack for iOS
-CadSoft Eagle Professional 6.5.0 patch for Raspberry Pi
-CadSoft Eagle Professional 6.5.0 crack PCB design software
-CadSoft Eagle Professional 6.5.0 patch schematic editor software
-CadSoft Eagle Professional 6.5.0 crack layout editor software
-CadSoft Eagle Professional 6.5.0 patch autorouter software
-CadSoft Eagle Professional 6.5.0 crack library editor software
-CadSoft Eagle Professional 6.5.0 patch simulation software
-CadSoft Eagle Professional 6.5.0 patch verification software
-CadSoft Eagle Professional 6.5.0 patch collaboration software
-CadSoft Eagle Professional 6.5.0 patch import and export software
-CadSoft Eagle Professional 6.5.0 patch customization software
-CadSoft Eagle Professional 6.5.0 patch command line interface software
-CadSoft Eagle Professional 6.5.0 patch scripting language software
-CadSoft Eagle Professional 6.5.0 patch user manual and documentation
-CadSoft Eagle Professional 6
-
-
You can use CadSoft Eagle Professional 6.5.0 software for free without paying any license fee or subscription fee
-
You can use CadSoft Eagle Professional 6.5.0 software without any restrictions or limitations such as board size, layer number, component number, etc.
-
You can use CadSoft Eagle Professional 6.5.0 software without any nagging messages or reminders to activate or register the software
-
You can use CadSoft Eagle Professional 6.5.0 software without any risk of losing your license key or serial number due to hardware changes or system crashes
-
You can use CadSoft Eagle Professional 6.5.0 software without any need to update or upgrade the software to keep it activated or functional
-
-
Risks of using CRACK CadSoft Eagle Professional 6.5.0 patch
-
Some of the risks of using CRACK CadSoft Eagle Professional 6.5.0 patch are:
-
-
You can violate the intellectual property rights and terms and conditions of CadSoft, the developer and owner of CadSoft Eagle Professional 6.5.0 software
-
You can face legal consequences such as fines, lawsuits, or criminal charges for using pirated or cracked software
-
You can expose your computer and data to malware, viruses, spyware, ransomware, or other malicious programs that can be embedded in CRACK CadSoft Eagle Professional 6.5.0 patch or the sources where you download it from
-
You can compromise the quality and reliability of your PCB design by using a modified or corrupted version of CadSoft Eagle Professional 6.5.0 software that can have bugs, errors, glitches, or missing features
-
You can lose the support and assistance from CadSoft or other users who can help you with any problems or issues that you might encounter while using CadSoft Eagle Professional 6.5.0 software
-
You can miss out on the latest features and improvements that CadSoft might release for CadSoft Eagle Professional 6.5.0 software in the future
-
-
How to download and install CRACK CadSoft Eagle Professional 6.5.0 patch?
-
If you still want to download and install CRACK CadSoft Eagle Professional 6.5.0 patch despite the risks and disadvantages that we have mentioned above, then you need to follow these steps:
-
Step-by-step guide to download and install CRACK CadSoft Eagle Professional 6.5.0 patch
Download CRACK CadSoft Eagle Professional 6.5.0 patch from the source that you have chosen by clicking on the download link or button and following the instructions given by the source.
-
Extract CRACK CadSoft Eagle Professional 6.5.0 patch from the downloaded file using a file extraction tool such as WinRAR or 7-Zip.
-
Install CadSoft Eagle Professional 6.5.0 software from its official website https://www.autodesk.com/products/eagle/overview or from another source by running the setup file and following the installation wizard.
-.5.0 patch as an administrator on your computer by right-clicking on the patch file and selecting "Run as administrator".
-
Follow the instructions given by CRACK CadSoft Eagle Professional 6.5.0 patch to patch the software files. You might need to locate the installation folder of CadSoft Eagle Professional 6.5.0 software and select the files that need to be patched.
-
Restart your computer and run CadSoft Eagle Professional 6.5.0 software as usual. You should be able to use it without any restrictions or limitations.
-
-
Tips and tricks to optimize CRACK CadSoft Eagle Professional 6.5.0 patch performance
-
Some of the tips and tricks to optimize CRACK CadSoft Eagle Professional 6.5.0 patch performance are:
-
-
Disable your antivirus or firewall software before running CRACK CadSoft Eagle Professional 6.5.0 patch as they might interfere with the patching process or detect it as a threat and block it.
-
Backup your original software files before patching them with CRACK CadSoft Eagle Professional 6.5.0 patch in case something goes wrong or you want to restore them later.
-
Update your drivers and system software to ensure that they are compatible with CadSoft Eagle Professional 6.5.0 software and CRACK CadSoft Eagle Professional 6.5.0 patch.
-
Clean your registry and disk space to remove any junk files or errors that might affect the performance of CadSoft Eagle Professional 6.5.0 software and CRACK CadSoft Eagle Professional 6.5.0 patch.
-
Scan your computer and data regularly for any malware or viruses that might have been introduced by CRACK CadSoft Eagle Professional 6.5.0 patch or the sources where you downloaded it from.
-
-
Conclusion
-
In conclusion, CRACK CadSoft Eagle Professional 6.5.0 patch is a tool that can bypass the license verification and activation process of CadSoft Eagle Professional 6.5.0 software and allow you to use it for free without any restrictions or limitations.
-
Summary of the main points
-
The main points that we have discussed in this article are:
-
-
CadSoft Eagle Professional 6.5.0 is a powerful and versatile EDA software that can help you design and create PCBs with ease and efficiency.
-
CRACK CadSoft Eagle Professional 6.5.0 patch is a tool that can modify or replace some parts of the original software files that are responsible for checking or validating the license key or serial number.
-
CRACK CadSoft Eagle Professional 6.5.0 patch can make the software think that it has been activated legally and allow you to use it without any restrictions or limitations.
-
CRACK CadSoft Eagle Professional 6.5.0 patch has some advantages such as saving money, removing limitations, avoiding nagging messages, etc.
-.5.0 patch has some risks such as violating intellectual property rights, facing legal consequences, exposing your computer and data to malware, compromising the quality and reliability of your PCB design, losing the support and assistance from CadSoft or other users, missing out on the latest features and improvements, etc.
-
CRACK CadSoft Eagle Professional 6.5.0 patch can be downloaded and installed from various online sources by following some steps and precautions.
-
CRACK CadSoft Eagle Professional 6.5.0 patch performance can be optimized by following some tips and tricks such as disabling your antivirus or firewall software, backing up your original software files, updating your drivers and system software, cleaning your registry and disk space, scanning your computer and data regularly, etc.
-
-
Call to action
-
If you are interested in using CadSoft Eagle Professional 6.5.0 software for your PCB design and layout projects, we recommend that you purchase a license from CadSoft or its authorized resellers and use it legally and ethically. This way, you can enjoy the full features and benefits of the software without any risks or drawbacks.
-
However, if you still want to use CRACK CadSoft Eagle Professional 6.5.0 patch for whatever reason, we advise that you do so at your own risk and responsibility. We are not responsible for any damages or losses that you might incur by using CRACK CadSoft Eagle Professional 6.5.0 patch or the sources where you downloaded it from.
-
We hope that this article has been informative and helpful for you. If you have any questions or feedback, please feel free to contact us or leave a comment below.
-
FAQs
-
Here are some frequently asked questions (FAQs) about CRACK CadSoft Eagle Professional 6.5.0 patch:
-
Q: Is CRACK CadSoft Eagle Professional 6.5.0 patch safe to use?
-
A: No, CRACK CadSoft Eagle Professional 6.5.0 patch is not safe to use as it can expose your computer and data to malware, viruses, spyware, ransomware, or other malicious programs that can be embedded in CRACK CadSoft Eagle Professional 6.5.0 patch or the sources where you downloaded it from.
-
Q: Is CRACK CadSoft Eagle Professional 6.5.0 patch legal to use?
-
A: No, CRACK CadSoft Eagle Professional 6.5.0 patch is not legal to use as it can violate the intellectual property rights and terms and conditions of CadSoft, the developer and owner of CadSoft Eagle Professional 6.5.0 software. You can face legal consequences such as fines, lawsuits, or criminal charges for using pirated or cracked software.
- .5.0 patch reliable to use?
-
A: No, CRACK CadSoft Eagle Professional 6.5.0 patch is not reliable to use as it can compromise the quality and reliability of your PCB design by using a modified or corrupted version of CadSoft Eagle Professional 6.5.0 software that can have bugs, errors, glitches, or missing features.
-
Q: Is CRACK CadSoft Eagle Professional 6.5.0 patch compatible with other EDA software or applications?
-
A: No, CRACK CadSoft Eagle Professional 6.5.0 patch is not compatible with other EDA software or applications as it can cause conflicts or errors when importing or exporting various file formats that are compatible with other EDA software or applications.
-
Q: Is CRACK CadSoft Eagle Professional 6.5.0 patch supported by CadSoft or other users?
-
A: No, CRACK CadSoft Eagle Professional 6.5.0 patch is not supported by CadSoft or other users as it can lose the support and assistance from CadSoft or other users who can help you with any problems or issues that you might encounter while using CadSoft Eagle Professional 6.5.0 software.
- 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Diablo 2 Median Xl Hero Editor 1.13 ((TOP)).md b/spaces/raedeXanto/academic-chatgpt-beta/Diablo 2 Median Xl Hero Editor 1.13 ((TOP)).md
deleted file mode 100644
index 9b148f56cbc40ef877bc23f60012c506a43a5578..0000000000000000000000000000000000000000
--- a/spaces/raedeXanto/academic-chatgpt-beta/Diablo 2 Median Xl Hero Editor 1.13 ((TOP)).md
+++ /dev/null
@@ -1,93 +0,0 @@
-
-
Diablo 2 Median XL Hero Editor 1.13: How to Customize Your Character
-
Introduction
-
If you are a fan of Diablo 2, you might have heard of Median XL, a popular mod that overhauls the game with new content, features, and mechanics. Median XL is designed to offer a challenging and rewarding experience for both veterans and newcomers alike. However, if you want to experiment with different builds, items, and skills without spending hours grinding and farming, you might want to use a tool called Hero Editor.
-
What is Diablo 2 Median XL?
-
Diablo 2 Median XL is a mod that transforms Diablo 2 into a new game with hundreds of new skills, items, enemies, quests, bosses, areas, and more. It also adds new systems such as crafting, runewords, charms, mercenaries, shrines, uberquests, and more. Median XL is compatible with Diablo 2 Lord of Destruction version 1.13c or later.
Hero Editor is a program that allows you to edit your Diablo 2 characters in various ways. You can change your character's attributes, skills, items, level, experience, gold, stats, and more. You can also create new characters or clone existing ones. Hero Editor works with both vanilla Diablo 2 and modded versions such as Median XL.
-
Why use Hero Editor for Median XL?
-
Hero Editor can be useful for Median XL players who want to try out different builds and setups without having to grind for hours or restart the game from scratch. You can also use it to fix bugs or glitches that might occur in your save files. For example, if you lose an item or a quest reward due to a crash or a corrupted file, you can use Hero Editor to restore it. You can also use it to customize your character's appearance and name.
-
How to use diablo 2 median xl hero editor 1.13
-Diablo 2 median xl hero editor 1.13 download link
-Diablo 2 median xl hero editor 1.13 tutorial
-Diablo 2 median xl hero editor 1.13 features
-Diablo 2 median xl hero editor 1.13 compatibility
-Diablo 2 median xl hero editor 1.13 guide
-Diablo 2 median xl hero editor 1.13 tips and tricks
-Diablo 2 median xl hero editor 1.13 review
-Diablo 2 median xl hero editor 1.13 best builds
-Diablo 2 median xl hero editor 1.13 cheats and hacks
-Diablo 2 median xl hero editor 1.13 error fix
-Diablo 2 median xl hero editor 1.13 latest version
-Diablo 2 median xl hero editor 1.13 mod support
-Diablo 2 median xl hero editor 1.13 online play
-Diablo 2 median xl hero editor 1.13 backup and restore
-Diablo 2 median xl hero editor 1.13 custom items
-Diablo 2 median xl hero editor 1.13 skill tree
-Diablo 2 median xl hero editor 1.13 stats and attributes
-Diablo 2 median xl hero editor 1.13 inventory and stash
-Diablo 2 median xl hero editor 1.13 character creation
-Diablo 2 median xl hero editor 1.13 difficulty levels
-Diablo 2 median xl hero editor 1.13 quests and rewards
-Diablo 2 median xl hero editor 1.13 runes and runewords
-Diablo 2 median xl hero editor 1.13 gems and jewels
-Diablo 2 median xl hero editor 1.13 charms and relics
-Diablo 2 median xl hero editor 1.13 sets and uniques
-Diablo 2 median xl hero editor 1.13 crafting and upgrading
-Diablo 2 median xl hero editor 1.13 socketing and cubing
-Diablo 2 median xl hero editor 1.13 mercenaries and hirelings
-Diablo 2 median xl hero editor 1.13 shrines and uberquests
-Diablo 2 median xl hero editor 1.13 world events and challenges
-Diablo 2 median xl hero editor 1.13 classes and skills
-Diablo 2 median xl hero editor
-
How to install and use Hero Editor for Median XL
-
Download and install Hero Editor 1.13
-
The first step is to download and install Hero Editor 1.13 from this link: https://www.moddb.com/mods/median-xl/downloads/hero-editor-v-113
-
Extract the zip file and run the setup.exe file. Follow the instructions on the screen and choose the directory where your Diablo 2 game is installed. After the installation is complete, you can launch Hero Editor from the shortcut on your desktop or from the Start menu.
-
Backup your save files
-
Before you edit your character with Hero Editor, it is highly recommended that you backup your save files first. This way, you can restore them in case something goes wrong or you want to revert your changes. To backup your save files, go to your Diablo 2 folder and copy the "Save" folder to another location. You can also rename it to something like "Save_backup".
-
Load your character in Hero Editor
-
To load your character in Hero Editor, click on the "Open" button on the top left corner of the program window. A dialog box will appear where you can browse for your save files. By default, they are located in your Diablo 2 folder under "Save". Select the file that corresponds to your character's name and click "Open". You should see your character's information on the screen.
-
Edit your character's attributes, skills, items, and more
-
Now you can edit your character's attributes, skills, items, and more using the tabs on the top of the program window. Each tab has different options and features that you can modify according to your preferences. For example:
-
-
The "Stats" tab allows you to change your character's level, experience, gold, stats points (strength, dexterity, vitality, energy), skill points (left click skill, right click skill, hotkey skills), life, mana, stamina, and resistances.
-
The "Skills" tab allows you to change your character's skills levels (both active and passive) for each skill tree (sorceress, amazon, necromancer, paladin, barbarian, druid, assassin). You can also reset all skills or max all skills using the buttons on the bottom of the tab.
-
The "Inventory" tab allows you to change your character's items in their inventory (belt, backpack, cube), stash, and equipped slots (weapon, shield, helm, armor, gloves, boots, belt, amulet, rings). You can add new items by clicking on the "Add Item" button on the bottom of the tab. A dialog box will appear where you can choose from a list of items or enter a code for a custom item. You can also edit existing items by double-clicking on them or right-clicking on them and choosing "Edit Item". A dialog box will appear where you can modify the item's name, quality, quantity, sockets, runes/gems/jewels/charms/skulls/essences/oils/sigils/catalysts/relics/arcane crystals/shards/crystals/dyes/scrolls/tokens/keys/cards/stones/cubes/potions/etc., stats (damage, defense, durability), and properties (magic/rare/set/unique/crafted/sacred/tiered/ethereal/socketed/enchanted/rerolled/upgraded/downgraded/etc., prefixes/suffixes/modifiers). You can also delete items by right-clicking on them and choosing "Delete Item".
-
The "Mercenary" tab allows you to change your character's mercenary (hireling) type (act I rogue/archer/bow/crossbow/fire/cold/lightning/magic/poison/physical/multishot/guided arrow/strafe/freeze arrow/exploding arrow/fire arrow/cold arrow/lightning arrow/magic arrow/poison arrow/etc., act II desert mercenary/spear/javelin/polearm/sword/shield/fire/cold/lightning/might/fanaticism/holy freeze/holy fire/holy shock/blessed aim/thorns/defiance/prayer/cleansing/meditation/etc., act III iron wolf/sorcerer/fire/cold/lightning/fireball/frozen orb/thunder storm/glacial spike/blaze/static field/nova/meteor/blizzard/hydra/chain lightning/enchant/warmth/etc., act V barbarian/sword/shield/dual wield/throwing axe/jab/frenzy/battle orders/battle command/shout/find item/find potion/increased speed/natural resistance/war cry/howl/battle cry/grim ward/etc., act VI assassin/claw/dagger/katar/scimitar/kris/blades of ice/fists of fire/claws of thunder/dragon flight/dragon claw/dragon talon/dragon tail/burst of speed/fade/death sentry/lightning sentry/fire blast/wake of fire/wake of inferno/cobra strike/phoenix strike/tiger strike/dragon talon/etc.) level (1-99), experience (0-4294967295), stats points (strength/dexterity/vitality), skill points (left click skill/right click skill/hotkey skills), life (0-65535), mana (0-65535), stamina (0-65535), and resistances (-100-100). You can also change their items in their equipped slots (weapon/shield/helm/armor/gloves/boots/belt/amulet/rings) using the same method as in the "Inventory" tab.
-the buttons on the bottom of the tab.
-
The "Waypoints" tab allows you to change your character's waypoint status for each act (I-VI) and difficulty level (normal/nightmare/hell). You can check or uncheck each waypoint box to mark it as activated or not activated. You can also activate all waypoints or deactivate all waypoints using the buttons on the bottom of the tab.
-
The "Extra" tab allows you to access some Median XL specific features such as changing your character's class (sorceress/amazon/necromancer/paladin/barbarian/druid/assassin), changing your character's uberlevel (0-120), changing your character's signets of learning (0-500), changing your character's challenges status (check or uncheck each challenge box to mark it as completed or not completed), and changing your character's stats/skills reset status (check or uncheck each reset box to mark it as available or not available).
-
The "Import/Export" tab allows you to import or export your character to or from a text file. You can use this feature to share your character with others or to backup your character in a different format. To import a character, click on the "Import" button and browse for a text file that contains your character's data. To export a character, click on the "Export" button and choose a location and a name for the text file that will contain your character's data.
-
The "Item Edit" tab allows you to create custom items from scratch or edit existing items in more detail. You can use this feature to create powerful items with unique properties and modifiers. To create a new item, click on the "New Item" button and choose an item type from the list. A dialog box will appear where you can enter the item's name, quality, quantity, sockets, runes/gems/jewels/charms/skulls/essences/oils/sigils/catalysts/relics/arcane crystals/shards/crystals/dyes/scrolls/tokens/keys/cards/stones/cubes/potions/etc., stats, and properties. You can also use the "Randomize" button to generate a random item with random properties and modifiers. To edit an existing item, click on the "Edit Item" button and browse for an item file that contains your item's data. A dialog box will appear where you can modify the item's name, quality, quantity, sockets, runes/gems/jewels/charms/skulls/essences/oils/sigils/catalysts/relics/arcane crystals/shards/crystals/dyes/scrolls/tokens/keys/cards/stones/cubes/potions/etc., stats, and properties. You can also use the "Randomize" button to generate a random item with random properties and modifiers.
-
The "Character Hex Edit" tab allows you to modify your character's data in hexadecimal format. You can use this feature to change advanced settings that are not available in other tabs. To edit your character's data, click on the "Hex Edit" button and browse for your save file that contains your character's data. A dialog box will appear where you can view and edit your character's data in hexadecimal format. You can also use the "Search" button to find specific values or strings in your character's data.
-
-
Tips and tricks for using Hero Editor for Median XL
-
Here are some tips and tricks for using Hero Editor for Median XL:
-
-
Be careful when editing your character's data as some changes might cause errors or crashes in the game. Always backup your save files before editing them with Hero Editor.
-
Do not edit your character's data while the game is running as this might cause conflicts or corruption in your save files. Always exit the game before editing your character's data with Hero Editor.
-
Do not edit your character's data while playing online as this might cause bans or disconnections from the server. Always play offline when editing your character's data with Hero Editor.
-
Do not edit your character's data in ways that are incompatible with Median XL as this might cause bugs or glitches in the game. Always follow the rules and guidelines of Median XL when editing your character's data with Hero Editor.
-
Do not edit your character's data in ways that are unfair or cheating as this might ruin the fun and challenge of the game. Always respect the balance and difficulty of Median XL when editing your character's data with Hero Editor.
-
-
Conclusion
-
Hero Editor is a powerful tool that allows you to customize your Diablo 2 Median XL characters in various ways. You can change their attributes, skills, items, quests, waypoints, mercenary, class, uberlevel, signets of learning, challenges, stats/skills reset status, and more. You can also create new characters or clone existing ones. You can also create custom items or edit existing ones in more detail. You can also import or export your characters to or from text files. You can also modify your characters' data in hexadecimal format.
-
However, Hero Editor also comes with some risks and responsibilities. You should always backup your save files before editing them with Hero Editor. You should always exit the game before editing your characters' data with Hero Editor. You should always play offline when editing your characters' data with Hero Editor. You should always follow the rules and guidelines of Median XL when editing your characters' data with Hero Editor. You should always respect the balance and difficulty of Median XL when editing your characters' data with Hero Editor.
-
Hero Editor is a great tool for experimenting and exploring different builds and setups without having to grind for hours or restart the game from scratch. It can also help you fix bugs or glitches that might occur in your save files. It can also help you customize your characters' appearance and name. However, Hero Editor is not a substitute for playing the game properly and enjoying its content and features. Hero Editor is meant to enhance your experience of Diablo 2 Median XL, not replace it.
-
FAQs
-
Here are some frequently asked questions about Hero Editor for Median XL:
-
-
Where can I download Hero Editor 1.13? You can download Hero Editor 1.13 from this link: https://www.moddb.com/mods/median-xl/downloads/hero-editor-v-113
-
How do I backup my save files? To backup your save files, go to your Diablo 2 folder and copy the "Save" folder to another location. You can also rename it to something like "Save_backup".
-your save files. By default, they are located in your Diablo 2 folder under "Save". Select the file that corresponds to your character's name and click "Open". You should see your character's information on the screen.
-
How do I edit my character's attributes, skills, items, and more? To edit your character's attributes, skills, items, and more, use the tabs on the top of the program window. Each tab has different options and features that you can modify according to your preferences. For example, the "Stats" tab allows you to change your character's level, experience, gold, stats points, skill points, life, mana, stamina, and resistances. The "Skills" tab allows you to change your character's skills levels for each skill tree. The "Inventory" tab allows you to change your character's items in their inventory, stash, and equipped slots. The "Mercenary" tab allows you to change your character's mercenary type, level, experience, stats points, skill points, life, mana, stamina, resistances, and items. The "Quests" tab allows you to change your character's quest status for each act and difficulty level. The "Waypoints" tab allows you to change your character's waypoint status for each act and difficulty level. The "Extra" tab allows you to access some Median XL specific features such as changing your character's class, uberlevel, signets of learning, challenges status, and stats/skills reset status. The "Import/Export" tab allows you to import or export your character to or from a text file. The "Item Edit" tab allows you to create custom items from scratch or edit existing items in more detail. The "Character Hex Edit" tab allows you to modify your character's data in hexadecimal format.
-
How do I create custom items? To create custom items, use the "Item Edit" tab on the top of the program window. Click on the "New Item" button and choose an item type from the list. A dialog box will appear where you can enter the item's name, quality, quantity, sockets, runes/gems/jewels/charms/skulls/essences/oils/sigils/catalysts/relics/arcane crystals/shards/crystals/dyes/scrolls/tokens/keys/cards/stones/cubes/potions/etc., stats, and properties. You can also use the "Randomize" button to generate a random item with random properties and modifiers.
-
How do I share my character with others? To share your character with others, use the "Import/Export" tab on the top of the program window. Click on the "Export" button and choose a location and a name for the text file that will contain your character's data. You can then send this text file to others who can import it using Hero Editor.
-
How do I modify advanced settings? To modify advanced settings that are not available in other tabs, use the "Character Hex Edit" tab on the top of the program window. Click on the "Hex Edit" button and browse for your save file that contains your character's data. A dialog box will appear where you can view and edit your character's data in hexadecimal format. You can also use the "Search" button to find specific values or strings in your character's data.
-
- 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/rayan-saleh/whisper2notion/server/node_modules/@notionhq/client/build/src/errors.d.ts b/spaces/rayan-saleh/whisper2notion/server/node_modules/@notionhq/client/build/src/errors.d.ts
deleted file mode 100644
index 2800d82ee95adc167f10e116c573e5d059eee483..0000000000000000000000000000000000000000
--- a/spaces/rayan-saleh/whisper2notion/server/node_modules/@notionhq/client/build/src/errors.d.ts
+++ /dev/null
@@ -1,94 +0,0 @@
-import { SupportedResponse } from "./fetch-types";
-/**
- * Error codes returned in responses from the API.
- */
-export declare enum APIErrorCode {
- Unauthorized = "unauthorized",
- RestrictedResource = "restricted_resource",
- ObjectNotFound = "object_not_found",
- RateLimited = "rate_limited",
- InvalidJSON = "invalid_json",
- InvalidRequestURL = "invalid_request_url",
- InvalidRequest = "invalid_request",
- ValidationError = "validation_error",
- ConflictError = "conflict_error",
- InternalServerError = "internal_server_error",
- ServiceUnavailable = "service_unavailable"
-}
-/**
- * Error codes generated for client errors.
- */
-export declare enum ClientErrorCode {
- RequestTimeout = "notionhq_client_request_timeout",
- ResponseError = "notionhq_client_response_error"
-}
-/**
- * Error codes on errors thrown by the `Client`.
- */
-export type NotionErrorCode = APIErrorCode | ClientErrorCode;
-/**
- * Base error type.
- */
-declare abstract class NotionClientErrorBase extends Error {
- abstract code: Code;
-}
-/**
- * Error type that encompasses all the kinds of errors that the Notion client will throw.
- */
-export type NotionClientError = RequestTimeoutError | UnknownHTTPResponseError | APIResponseError;
-/**
- * @param error any value, usually a caught error.
- * @returns `true` if error is a `NotionClientError`.
- */
-export declare function isNotionClientError(error: unknown): error is NotionClientError;
-/**
- * Error thrown by the client if a request times out.
- */
-export declare class RequestTimeoutError extends NotionClientErrorBase {
- readonly code = ClientErrorCode.RequestTimeout;
- readonly name = "RequestTimeoutError";
- constructor(message?: string);
- static isRequestTimeoutError(error: unknown): error is RequestTimeoutError;
- static rejectAfterTimeout(promise: Promise, timeoutMS: number): Promise;
-}
-type HTTPResponseErrorCode = ClientErrorCode.ResponseError | APIErrorCode;
-declare class HTTPResponseError extends NotionClientErrorBase {
- readonly name: string;
- readonly code: Code;
- readonly status: number;
- readonly headers: SupportedResponse["headers"];
- readonly body: string;
- constructor(args: {
- code: Code;
- status: number;
- message: string;
- headers: SupportedResponse["headers"];
- rawBodyText: string;
- });
-}
-export declare function isHTTPResponseError(error: unknown): error is UnknownHTTPResponseError | APIResponseError;
-/**
- * Error thrown if an API call responds with an unknown error code, or does not respond with
- * a property-formatted error.
- */
-export declare class UnknownHTTPResponseError extends HTTPResponseError {
- readonly name = "UnknownHTTPResponseError";
- constructor(args: {
- status: number;
- message: string | undefined;
- headers: SupportedResponse["headers"];
- rawBodyText: string;
- });
- static isUnknownHTTPResponseError(error: unknown): error is UnknownHTTPResponseError;
-}
-/**
- * A response from the API indicating a problem.
- * Use the `code` property to handle various kinds of errors. All its possible values are in `APIErrorCode`.
- */
-export declare class APIResponseError extends HTTPResponseError {
- readonly name = "APIResponseError";
- static isAPIResponseError(error: unknown): error is APIResponseError;
-}
-export declare function buildRequestError(response: SupportedResponse, bodyText: string): APIResponseError | UnknownHTTPResponseError;
-export {};
-//# sourceMappingURL=errors.d.ts.map
\ No newline at end of file
diff --git a/spaces/rayan-saleh/whisper2notion/server/node_modules/chokidar/lib/nodefs-handler.js b/spaces/rayan-saleh/whisper2notion/server/node_modules/chokidar/lib/nodefs-handler.js
deleted file mode 100644
index 199cfe9f9fff5a2f5267c67a004b1fa92ec56649..0000000000000000000000000000000000000000
--- a/spaces/rayan-saleh/whisper2notion/server/node_modules/chokidar/lib/nodefs-handler.js
+++ /dev/null
@@ -1,654 +0,0 @@
-'use strict';
-
-const fs = require('fs');
-const sysPath = require('path');
-const { promisify } = require('util');
-const isBinaryPath = require('is-binary-path');
-const {
- isWindows,
- isLinux,
- EMPTY_FN,
- EMPTY_STR,
- KEY_LISTENERS,
- KEY_ERR,
- KEY_RAW,
- HANDLER_KEYS,
- EV_CHANGE,
- EV_ADD,
- EV_ADD_DIR,
- EV_ERROR,
- STR_DATA,
- STR_END,
- BRACE_START,
- STAR
-} = require('./constants');
-
-const THROTTLE_MODE_WATCH = 'watch';
-
-const open = promisify(fs.open);
-const stat = promisify(fs.stat);
-const lstat = promisify(fs.lstat);
-const close = promisify(fs.close);
-const fsrealpath = promisify(fs.realpath);
-
-const statMethods = { lstat, stat };
-
-// TODO: emit errors properly. Example: EMFILE on Macos.
-const foreach = (val, fn) => {
- if (val instanceof Set) {
- val.forEach(fn);
- } else {
- fn(val);
- }
-};
-
-const addAndConvert = (main, prop, item) => {
- let container = main[prop];
- if (!(container instanceof Set)) {
- main[prop] = container = new Set([container]);
- }
- container.add(item);
-};
-
-const clearItem = cont => key => {
- const set = cont[key];
- if (set instanceof Set) {
- set.clear();
- } else {
- delete cont[key];
- }
-};
-
-const delFromSet = (main, prop, item) => {
- const container = main[prop];
- if (container instanceof Set) {
- container.delete(item);
- } else if (container === item) {
- delete main[prop];
- }
-};
-
-const isEmptySet = (val) => val instanceof Set ? val.size === 0 : !val;
-
-/**
- * @typedef {String} Path
- */
-
-// fs_watch helpers
-
-// object to hold per-process fs_watch instances
-// (may be shared across chokidar FSWatcher instances)
-
-/**
- * @typedef {Object} FsWatchContainer
- * @property {Set} listeners
- * @property {Set} errHandlers
- * @property {Set} rawEmitters
- * @property {fs.FSWatcher=} watcher
- * @property {Boolean=} watcherUnusable
- */
-
-/**
- * @type {Map}
- */
-const FsWatchInstances = new Map();
-
-/**
- * Instantiates the fs_watch interface
- * @param {String} path to be watched
- * @param {Object} options to be passed to fs_watch
- * @param {Function} listener main event handler
- * @param {Function} errHandler emits info about errors
- * @param {Function} emitRaw emits raw event data
- * @returns {fs.FSWatcher} new fsevents instance
- */
-function createFsWatchInstance(path, options, listener, errHandler, emitRaw) {
- const handleEvent = (rawEvent, evPath) => {
- listener(path);
- emitRaw(rawEvent, evPath, {watchedPath: path});
-
- // emit based on events occurring for files from a directory's watcher in
- // case the file's watcher misses it (and rely on throttling to de-dupe)
- if (evPath && path !== evPath) {
- fsWatchBroadcast(
- sysPath.resolve(path, evPath), KEY_LISTENERS, sysPath.join(path, evPath)
- );
- }
- };
- try {
- return fs.watch(path, options, handleEvent);
- } catch (error) {
- errHandler(error);
- }
-}
-
-/**
- * Helper for passing fs_watch event data to a collection of listeners
- * @param {Path} fullPath absolute path bound to fs_watch instance
- * @param {String} type listener type
- * @param {*=} val1 arguments to be passed to listeners
- * @param {*=} val2
- * @param {*=} val3
- */
-const fsWatchBroadcast = (fullPath, type, val1, val2, val3) => {
- const cont = FsWatchInstances.get(fullPath);
- if (!cont) return;
- foreach(cont[type], (listener) => {
- listener(val1, val2, val3);
- });
-};
-
-/**
- * Instantiates the fs_watch interface or binds listeners
- * to an existing one covering the same file system entry
- * @param {String} path
- * @param {String} fullPath absolute path
- * @param {Object} options to be passed to fs_watch
- * @param {Object} handlers container for event listener functions
- */
-const setFsWatchListener = (path, fullPath, options, handlers) => {
- const {listener, errHandler, rawEmitter} = handlers;
- let cont = FsWatchInstances.get(fullPath);
-
- /** @type {fs.FSWatcher=} */
- let watcher;
- if (!options.persistent) {
- watcher = createFsWatchInstance(
- path, options, listener, errHandler, rawEmitter
- );
- return watcher.close.bind(watcher);
- }
- if (cont) {
- addAndConvert(cont, KEY_LISTENERS, listener);
- addAndConvert(cont, KEY_ERR, errHandler);
- addAndConvert(cont, KEY_RAW, rawEmitter);
- } else {
- watcher = createFsWatchInstance(
- path,
- options,
- fsWatchBroadcast.bind(null, fullPath, KEY_LISTENERS),
- errHandler, // no need to use broadcast here
- fsWatchBroadcast.bind(null, fullPath, KEY_RAW)
- );
- if (!watcher) return;
- watcher.on(EV_ERROR, async (error) => {
- const broadcastErr = fsWatchBroadcast.bind(null, fullPath, KEY_ERR);
- cont.watcherUnusable = true; // documented since Node 10.4.1
- // Workaround for https://github.com/joyent/node/issues/4337
- if (isWindows && error.code === 'EPERM') {
- try {
- const fd = await open(path, 'r');
- await close(fd);
- broadcastErr(error);
- } catch (err) {}
- } else {
- broadcastErr(error);
- }
- });
- cont = {
- listeners: listener,
- errHandlers: errHandler,
- rawEmitters: rawEmitter,
- watcher
- };
- FsWatchInstances.set(fullPath, cont);
- }
- // const index = cont.listeners.indexOf(listener);
-
- // removes this instance's listeners and closes the underlying fs_watch
- // instance if there are no more listeners left
- return () => {
- delFromSet(cont, KEY_LISTENERS, listener);
- delFromSet(cont, KEY_ERR, errHandler);
- delFromSet(cont, KEY_RAW, rawEmitter);
- if (isEmptySet(cont.listeners)) {
- // Check to protect against issue gh-730.
- // if (cont.watcherUnusable) {
- cont.watcher.close();
- // }
- FsWatchInstances.delete(fullPath);
- HANDLER_KEYS.forEach(clearItem(cont));
- cont.watcher = undefined;
- Object.freeze(cont);
- }
- };
-};
-
-// fs_watchFile helpers
-
-// object to hold per-process fs_watchFile instances
-// (may be shared across chokidar FSWatcher instances)
-const FsWatchFileInstances = new Map();
-
-/**
- * Instantiates the fs_watchFile interface or binds listeners
- * to an existing one covering the same file system entry
- * @param {String} path to be watched
- * @param {String} fullPath absolute path
- * @param {Object} options options to be passed to fs_watchFile
- * @param {Object} handlers container for event listener functions
- * @returns {Function} closer
- */
-const setFsWatchFileListener = (path, fullPath, options, handlers) => {
- const {listener, rawEmitter} = handlers;
- let cont = FsWatchFileInstances.get(fullPath);
-
- /* eslint-disable no-unused-vars, prefer-destructuring */
- let listeners = new Set();
- let rawEmitters = new Set();
-
- const copts = cont && cont.options;
- if (copts && (copts.persistent < options.persistent || copts.interval > options.interval)) {
- // "Upgrade" the watcher to persistence or a quicker interval.
- // This creates some unlikely edge case issues if the user mixes
- // settings in a very weird way, but solving for those cases
- // doesn't seem worthwhile for the added complexity.
- listeners = cont.listeners;
- rawEmitters = cont.rawEmitters;
- fs.unwatchFile(fullPath);
- cont = undefined;
- }
-
- /* eslint-enable no-unused-vars, prefer-destructuring */
-
- if (cont) {
- addAndConvert(cont, KEY_LISTENERS, listener);
- addAndConvert(cont, KEY_RAW, rawEmitter);
- } else {
- // TODO
- // listeners.add(listener);
- // rawEmitters.add(rawEmitter);
- cont = {
- listeners: listener,
- rawEmitters: rawEmitter,
- options,
- watcher: fs.watchFile(fullPath, options, (curr, prev) => {
- foreach(cont.rawEmitters, (rawEmitter) => {
- rawEmitter(EV_CHANGE, fullPath, {curr, prev});
- });
- const currmtime = curr.mtimeMs;
- if (curr.size !== prev.size || currmtime > prev.mtimeMs || currmtime === 0) {
- foreach(cont.listeners, (listener) => listener(path, curr));
- }
- })
- };
- FsWatchFileInstances.set(fullPath, cont);
- }
- // const index = cont.listeners.indexOf(listener);
-
- // Removes this instance's listeners and closes the underlying fs_watchFile
- // instance if there are no more listeners left.
- return () => {
- delFromSet(cont, KEY_LISTENERS, listener);
- delFromSet(cont, KEY_RAW, rawEmitter);
- if (isEmptySet(cont.listeners)) {
- FsWatchFileInstances.delete(fullPath);
- fs.unwatchFile(fullPath);
- cont.options = cont.watcher = undefined;
- Object.freeze(cont);
- }
- };
-};
-
-/**
- * @mixin
- */
-class NodeFsHandler {
-
-/**
- * @param {import("../index").FSWatcher} fsW
- */
-constructor(fsW) {
- this.fsw = fsW;
- this._boundHandleError = (error) => fsW._handleError(error);
-}
-
-/**
- * Watch file for changes with fs_watchFile or fs_watch.
- * @param {String} path to file or dir
- * @param {Function} listener on fs change
- * @returns {Function} closer for the watcher instance
- */
-_watchWithNodeFs(path, listener) {
- const opts = this.fsw.options;
- const directory = sysPath.dirname(path);
- const basename = sysPath.basename(path);
- const parent = this.fsw._getWatchedDir(directory);
- parent.add(basename);
- const absolutePath = sysPath.resolve(path);
- const options = {persistent: opts.persistent};
- if (!listener) listener = EMPTY_FN;
-
- let closer;
- if (opts.usePolling) {
- options.interval = opts.enableBinaryInterval && isBinaryPath(basename) ?
- opts.binaryInterval : opts.interval;
- closer = setFsWatchFileListener(path, absolutePath, options, {
- listener,
- rawEmitter: this.fsw._emitRaw
- });
- } else {
- closer = setFsWatchListener(path, absolutePath, options, {
- listener,
- errHandler: this._boundHandleError,
- rawEmitter: this.fsw._emitRaw
- });
- }
- return closer;
-}
-
-/**
- * Watch a file and emit add event if warranted.
- * @param {Path} file Path
- * @param {fs.Stats} stats result of fs_stat
- * @param {Boolean} initialAdd was the file added at watch instantiation?
- * @returns {Function} closer for the watcher instance
- */
-_handleFile(file, stats, initialAdd) {
- if (this.fsw.closed) {
- return;
- }
- const dirname = sysPath.dirname(file);
- const basename = sysPath.basename(file);
- const parent = this.fsw._getWatchedDir(dirname);
- // stats is always present
- let prevStats = stats;
-
- // if the file is already being watched, do nothing
- if (parent.has(basename)) return;
-
- const listener = async (path, newStats) => {
- if (!this.fsw._throttle(THROTTLE_MODE_WATCH, file, 5)) return;
- if (!newStats || newStats.mtimeMs === 0) {
- try {
- const newStats = await stat(file);
- if (this.fsw.closed) return;
- // Check that change event was not fired because of changed only accessTime.
- const at = newStats.atimeMs;
- const mt = newStats.mtimeMs;
- if (!at || at <= mt || mt !== prevStats.mtimeMs) {
- this.fsw._emit(EV_CHANGE, file, newStats);
- }
- if (isLinux && prevStats.ino !== newStats.ino) {
- this.fsw._closeFile(path)
- prevStats = newStats;
- this.fsw._addPathCloser(path, this._watchWithNodeFs(file, listener));
- } else {
- prevStats = newStats;
- }
- } catch (error) {
- // Fix issues where mtime is null but file is still present
- this.fsw._remove(dirname, basename);
- }
- // add is about to be emitted if file not already tracked in parent
- } else if (parent.has(basename)) {
- // Check that change event was not fired because of changed only accessTime.
- const at = newStats.atimeMs;
- const mt = newStats.mtimeMs;
- if (!at || at <= mt || mt !== prevStats.mtimeMs) {
- this.fsw._emit(EV_CHANGE, file, newStats);
- }
- prevStats = newStats;
- }
- }
- // kick off the watcher
- const closer = this._watchWithNodeFs(file, listener);
-
- // emit an add event if we're supposed to
- if (!(initialAdd && this.fsw.options.ignoreInitial) && this.fsw._isntIgnored(file)) {
- if (!this.fsw._throttle(EV_ADD, file, 0)) return;
- this.fsw._emit(EV_ADD, file, stats);
- }
-
- return closer;
-}
-
-/**
- * Handle symlinks encountered while reading a dir.
- * @param {Object} entry returned by readdirp
- * @param {String} directory path of dir being read
- * @param {String} path of this item
- * @param {String} item basename of this item
- * @returns {Promise} true if no more processing is needed for this entry.
- */
-async _handleSymlink(entry, directory, path, item) {
- if (this.fsw.closed) {
- return;
- }
- const full = entry.fullPath;
- const dir = this.fsw._getWatchedDir(directory);
-
- if (!this.fsw.options.followSymlinks) {
- // watch symlink directly (don't follow) and detect changes
- this.fsw._incrReadyCount();
-
- let linkPath;
- try {
- linkPath = await fsrealpath(path);
- } catch (e) {
- this.fsw._emitReady();
- return true;
- }
-
- if (this.fsw.closed) return;
- if (dir.has(item)) {
- if (this.fsw._symlinkPaths.get(full) !== linkPath) {
- this.fsw._symlinkPaths.set(full, linkPath);
- this.fsw._emit(EV_CHANGE, path, entry.stats);
- }
- } else {
- dir.add(item);
- this.fsw._symlinkPaths.set(full, linkPath);
- this.fsw._emit(EV_ADD, path, entry.stats);
- }
- this.fsw._emitReady();
- return true;
- }
-
- // don't follow the same symlink more than once
- if (this.fsw._symlinkPaths.has(full)) {
- return true;
- }
-
- this.fsw._symlinkPaths.set(full, true);
-}
-
-_handleRead(directory, initialAdd, wh, target, dir, depth, throttler) {
- // Normalize the directory name on Windows
- directory = sysPath.join(directory, EMPTY_STR);
-
- if (!wh.hasGlob) {
- throttler = this.fsw._throttle('readdir', directory, 1000);
- if (!throttler) return;
- }
-
- const previous = this.fsw._getWatchedDir(wh.path);
- const current = new Set();
-
- let stream = this.fsw._readdirp(directory, {
- fileFilter: entry => wh.filterPath(entry),
- directoryFilter: entry => wh.filterDir(entry),
- depth: 0
- }).on(STR_DATA, async (entry) => {
- if (this.fsw.closed) {
- stream = undefined;
- return;
- }
- const item = entry.path;
- let path = sysPath.join(directory, item);
- current.add(item);
-
- if (entry.stats.isSymbolicLink() && await this._handleSymlink(entry, directory, path, item)) {
- return;
- }
-
- if (this.fsw.closed) {
- stream = undefined;
- return;
- }
- // Files that present in current directory snapshot
- // but absent in previous are added to watch list and
- // emit `add` event.
- if (item === target || !target && !previous.has(item)) {
- this.fsw._incrReadyCount();
-
- // ensure relativeness of path is preserved in case of watcher reuse
- path = sysPath.join(dir, sysPath.relative(dir, path));
-
- this._addToNodeFs(path, initialAdd, wh, depth + 1);
- }
- }).on(EV_ERROR, this._boundHandleError);
-
- return new Promise(resolve =>
- stream.once(STR_END, () => {
- if (this.fsw.closed) {
- stream = undefined;
- return;
- }
- const wasThrottled = throttler ? throttler.clear() : false;
-
- resolve();
-
- // Files that absent in current directory snapshot
- // but present in previous emit `remove` event
- // and are removed from @watched[directory].
- previous.getChildren().filter((item) => {
- return item !== directory &&
- !current.has(item) &&
- // in case of intersecting globs;
- // a path may have been filtered out of this readdir, but
- // shouldn't be removed because it matches a different glob
- (!wh.hasGlob || wh.filterPath({
- fullPath: sysPath.resolve(directory, item)
- }));
- }).forEach((item) => {
- this.fsw._remove(directory, item);
- });
-
- stream = undefined;
-
- // one more time for any missed in case changes came in extremely quickly
- if (wasThrottled) this._handleRead(directory, false, wh, target, dir, depth, throttler);
- })
- );
-}
-
-/**
- * Read directory to add / remove files from `@watched` list and re-read it on change.
- * @param {String} dir fs path
- * @param {fs.Stats} stats
- * @param {Boolean} initialAdd
- * @param {Number} depth relative to user-supplied path
- * @param {String} target child path targeted for watch
- * @param {Object} wh Common watch helpers for this path
- * @param {String} realpath
- * @returns {Promise} closer for the watcher instance.
- */
-async _handleDir(dir, stats, initialAdd, depth, target, wh, realpath) {
- const parentDir = this.fsw._getWatchedDir(sysPath.dirname(dir));
- const tracked = parentDir.has(sysPath.basename(dir));
- if (!(initialAdd && this.fsw.options.ignoreInitial) && !target && !tracked) {
- if (!wh.hasGlob || wh.globFilter(dir)) this.fsw._emit(EV_ADD_DIR, dir, stats);
- }
-
- // ensure dir is tracked (harmless if redundant)
- parentDir.add(sysPath.basename(dir));
- this.fsw._getWatchedDir(dir);
- let throttler;
- let closer;
-
- const oDepth = this.fsw.options.depth;
- if ((oDepth == null || depth <= oDepth) && !this.fsw._symlinkPaths.has(realpath)) {
- if (!target) {
- await this._handleRead(dir, initialAdd, wh, target, dir, depth, throttler);
- if (this.fsw.closed) return;
- }
-
- closer = this._watchWithNodeFs(dir, (dirPath, stats) => {
- // if current directory is removed, do nothing
- if (stats && stats.mtimeMs === 0) return;
-
- this._handleRead(dirPath, false, wh, target, dir, depth, throttler);
- });
- }
- return closer;
-}
-
-/**
- * Handle added file, directory, or glob pattern.
- * Delegates call to _handleFile / _handleDir after checks.
- * @param {String} path to file or ir
- * @param {Boolean} initialAdd was the file added at watch instantiation?
- * @param {Object} priorWh depth relative to user-supplied path
- * @param {Number} depth Child path actually targeted for watch
- * @param {String=} target Child path actually targeted for watch
- * @returns {Promise}
- */
-async _addToNodeFs(path, initialAdd, priorWh, depth, target) {
- const ready = this.fsw._emitReady;
- if (this.fsw._isIgnored(path) || this.fsw.closed) {
- ready();
- return false;
- }
-
- const wh = this.fsw._getWatchHelpers(path, depth);
- if (!wh.hasGlob && priorWh) {
- wh.hasGlob = priorWh.hasGlob;
- wh.globFilter = priorWh.globFilter;
- wh.filterPath = entry => priorWh.filterPath(entry);
- wh.filterDir = entry => priorWh.filterDir(entry);
- }
-
- // evaluate what is at the path we're being asked to watch
- try {
- const stats = await statMethods[wh.statMethod](wh.watchPath);
- if (this.fsw.closed) return;
- if (this.fsw._isIgnored(wh.watchPath, stats)) {
- ready();
- return false;
- }
-
- const follow = this.fsw.options.followSymlinks && !path.includes(STAR) && !path.includes(BRACE_START);
- let closer;
- if (stats.isDirectory()) {
- const absPath = sysPath.resolve(path);
- const targetPath = follow ? await fsrealpath(path) : path;
- if (this.fsw.closed) return;
- closer = await this._handleDir(wh.watchPath, stats, initialAdd, depth, target, wh, targetPath);
- if (this.fsw.closed) return;
- // preserve this symlink's target path
- if (absPath !== targetPath && targetPath !== undefined) {
- this.fsw._symlinkPaths.set(absPath, targetPath);
- }
- } else if (stats.isSymbolicLink()) {
- const targetPath = follow ? await fsrealpath(path) : path;
- if (this.fsw.closed) return;
- const parent = sysPath.dirname(wh.watchPath);
- this.fsw._getWatchedDir(parent).add(wh.watchPath);
- this.fsw._emit(EV_ADD, wh.watchPath, stats);
- closer = await this._handleDir(parent, stats, initialAdd, depth, path, wh, targetPath);
- if (this.fsw.closed) return;
-
- // preserve this symlink's target path
- if (targetPath !== undefined) {
- this.fsw._symlinkPaths.set(sysPath.resolve(path), targetPath);
- }
- } else {
- closer = this._handleFile(wh.watchPath, stats, initialAdd);
- }
- ready();
-
- this.fsw._addPathCloser(path, closer);
- return false;
-
- } catch (error) {
- if (this.fsw._handleError(error)) {
- ready();
- return path;
- }
- }
-}
-
-}
-
-module.exports = NodeFsHandler;
diff --git a/spaces/rdyzakya/IndoLEGO-ABSA/utils.py b/spaces/rdyzakya/IndoLEGO-ABSA/utils.py
deleted file mode 100644
index 8e23f1a2bae4cb7a3c03dea4887953138232af29..0000000000000000000000000000000000000000
--- a/spaces/rdyzakya/IndoLEGO-ABSA/utils.py
+++ /dev/null
@@ -1,24 +0,0 @@
-import re
-import constant
-def add_prompt(text, se_order="aos"):
- prompt = []
- for counter, se in enumerate(se_order):
- prompt.append(constant.SENTIMENT_ELEMENT[se] + " : " + f"")
- prompt = " ,".join(prompt)
- result = text + "| " + prompt
- return result
-
-def catch_answer(out, se_order):
- if out == constant.NO_TARGET:
- return []
- pattern = r""
- for se in se_order:
- if se != 's':
- pattern += f"\s*(?P<{constant.SENTIMENT_ELEMENT[se]}>[^;]+)\s*"
- else:
- pattern += f"\s*(?P<{constant.SENTIMENT_ELEMENT['s']}>positive|negative|neutral)\s*"
- result = [found_iter.groupdict() for found_iter in re.finditer(pattern,out)]
- for i in range(len(result)):
- for k, v in result[i].items():
- result[i][k] = v.strip()
- return result
\ No newline at end of file
diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/LINK.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/LINK.md
deleted file mode 100644
index bfef86ab295f4d4be452bcb73be93a43330ede19..0000000000000000000000000000000000000000
--- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/LINK.md
+++ /dev/null
@@ -1,57 +0,0 @@
-## Descargar El Hobbit Hd 1080p Castellano Mega
-
-
-
-
-
- 
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
¿Cómo descargar la trilogÃa de El Hobbit en HD 1080p y en castellano por Mega?
Si quieres disfrutar de estas pelÃculas en la mejor calidad posible y en castellano, te vamos a explicar cómo descargarlas por Mega, uno de los servidores más populares y seguros de internet. Para ello, solo necesitas seguir estos pasos:
-
-Kommentar:
-
- är det så man kan sätta upp ett internetkort och internetrapportera data i min linux burk
-
- finns ju massa sådana
-
- skapa ett filsystem på det
-
- hittar du några på nätet lär du nog hitta på att installera openwrt på ditt eget modul ännu rätt smartare och betydligt stabilt
-
- min får en udev-filmer på /dev/input/wacom_btn0. Hojta också en och mer efter en annan burk är mycket nöjd med den förändringen
-
- på serien så tror jag den heter 'pinpoint openwrt' eller nått sånt
-
- Wacom driver som är rätt bra är den som kommer med ubuntu :)
-
- ja
-
- spacebug-: vad har du för skärm då?
-
- x220
-
- JOrani:
-
- men det kommer inte med direkt i kubuntu eller ubuntu som vanliga hårdvara, men kanske med xorg-server-extra eller vad den heter
-
- JOrani: så jag vet inte om den ska funka riktigt bra med kde men jag har hittat som sagt på nätet en väldigt 4fefd39f24
-
-
-
diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Cocktail Movie Full In Tamil Hd 1080p.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Cocktail Movie Full In Tamil Hd 1080p.md
deleted file mode 100644
index 4e66769b9b39f23c0e6bc3bbbe344bfe7d93f877..0000000000000000000000000000000000000000
--- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Cocktail Movie Full In Tamil Hd 1080p.md
+++ /dev/null
@@ -1,25 +0,0 @@
-
-
Cocktail Movie Full In Tamil Hd 1080p: How to Watch Online on ZEE5
-
Cocktail is a Tamil comedy thriller movie that was released in 2020. The movie stars Yogi Babu, Nivishika Naidu, Mithun Maheshwaran, Sayaji Shinde, KPY Bala, Pugazh, Mohamed Kureshi, Meghna Ellen and is directed by Ra Vijaya Murugan. The movie revolves around four friends who find a dead body in their car trunk and try to get rid of it while being chased by a gangster.
If you are looking for a way to watch Cocktail Movie Full In Tamil Hd 1080p, you can stream it online on ZEE5. ZEE5 is a popular OTT platform that offers a variety of content in different languages and genres. You can watch Cocktail Movie Full In Tamil Hd 1080p on ZEE5 with a subscription plan that suits your budget and preferences. You can also enjoy other benefits like ad-free viewing, offline download, live TV channels, and more.
-
To watch Cocktail Movie Full In Tamil Hd 1080p on ZEE5, you need to follow these simple steps:
-
-
Visit the ZEE5 website or download the ZEE5 app on your device.
-
Sign up for a free account or log in with your existing credentials.
-
Choose a subscription plan that suits your needs. You can opt for a monthly, quarterly, or annual plan.
-
Search for Cocktail Movie Full In Tamil Hd 1080p in the search bar or browse through the Tamil movies section.
-
Click on the play button and enjoy the movie in high definition quality.
-
-
Cocktail Movie Full In Tamil Hd 1080p is a fun-filled movie that will keep you entertained with its twists and turns. Watch it online on ZEE5 and have a great time with your friends and family.
-
-
If you are wondering what makes Cocktail Movie Full In Tamil Hd 1080p worth watching, here are some reasons why you should not miss it:
-
-
The movie has a hilarious plot that will make you laugh out loud. The movie is full of comedy scenes that will tickle your funny bone. The movie also has some suspense and action elements that will keep you on the edge of your seat.
-
The movie has a talented star cast that delivers a brilliant performance. Yogi Babu, who plays the lead role of Don Bosco, is a popular comedian who has a great sense of timing and dialogue delivery. Nivishika Naidu, who plays his love interest, is a newcomer who impresses with her charm and acting skills. The other actors like Mithun Maheshwaran, Sayaji Shinde, KPY Bala, Pugazh, Mohamed Kureshi, Meghna Ellen also do justice to their roles and add to the entertainment quotient.
-
The movie has a catchy soundtrack that will make you groove to the tunes. The movie has four songs that are composed by Sai Bhaskar and sung by singers like Gaana Joe Papa, Gana Guna, and Udhay Kannan. The songs are peppy and catchy and suit the mood of the movie. The songs are also available for download on various platforms.
-
-
Cocktail Movie Full In Tamil Hd 1080p is a movie that will make you forget your worries and enjoy a good time. Watch it online on ZEE5 and share your feedback with us.
- d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Comsol Multiphysics 5.2 Free Download Cracked.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Comsol Multiphysics 5.2 Free Download Cracked.md
deleted file mode 100644
index 227f57f4671782d3f1feb2e0062d973a88758f26..0000000000000000000000000000000000000000
--- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Comsol Multiphysics 5.2 Free Download Cracked.md
+++ /dev/null
@@ -1,24 +0,0 @@
-
-
-The company has helped create more than 50,000 products using its multiphysics software.
-
-Multiphysics is a vital tool in the development of engineering products such as aerospace, automotive, electrical, food, and consumer goods. In addition to designing products, engineers use multiphysics models to calculate the performance of new and existing processes and devices and gain insight into their behavior in complex, real-world situations.
-
-Computational multiphysics software (COMSOL Multiphysics®) is designed to help engineers better understand and control product design, manufacture, and operation by enabling simulation of the physical behavior of real-world design, devices, and processes, and their interactions with the natural environment.
-
-Computational multiphysics software in particular allows engineers to create models that include a combination of three-dimensional objects, fluids, electrical or magnetic fields, heat transfer, stress, and deformable material behavior. COMSOL's application library provides an extensive range of physics-enabled materials, structures, components, sensors, actuators, electromechanical devices, and advanced simulation capabilities that are essential for commercial design, development, and production.
-
-In addition to three-dimensional objects and moving components, COMSOL Multiphysics simulation software provides advanced heat transfer, fluid flow, electromagnetic field, and electro-magnetic-thermal modeling capabilities, along with many other features.
-
-There are many industry applications for multiphysics software. Some examples include:
-
-Manufacturing and power plants
-
-Applied to the design of motors, generators, and other rotating equipment used in industrial applications, COMSOL Multiphysics simulation software enables manufacturers to help engineers create products that meet design specifications without suffering performance loss due to operating conditions. By incorporating a thermomechanical model, the software enables engineers to simulate how and where heat is generated in devices, thereby optimizing the design for optimal performance, while also providing information that would be extremely difficult, if not impossible, to observe with physical test methods.
-
-In addition to determining whether a new design will perform as expected, this simulation software also enables engineers to determine how to improve the performance of existing machines. For example, COMSOL Multiphysics simulation software can be used to determine the best location of fans in large-scale cooling systems, and how to best distribute flow across the surfaces of heat sinks.
-
-Manufacturing and power plants use heat exchangers to absorb and dissipate heat. The thermal behavior of these devices is often coupled to other mechanical 4fefd39f24
-
-
-
diff --git a/spaces/riccorl/relik-entity-linking/scripts/setup.sh b/spaces/riccorl/relik-entity-linking/scripts/setup.sh
deleted file mode 100644
index 36f2afd2cba191cb89c2b36ffc64d51cd2274cc5..0000000000000000000000000000000000000000
--- a/spaces/riccorl/relik-entity-linking/scripts/setup.sh
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/bin/bash
-
-# setup conda
-CONDA_BASE=$(conda info --base)
-# check if conda is installed
-if [ -z "$CONDA_BASE" ]; then
- echo "Conda is not installed. Please install conda first."
- exit 1
-fi
-source "$CONDA_BASE"/etc/profile.d/conda.sh
-
-# create conda env
-read -rp "Enter environment name or prefix: " ENV_NAME
-read -rp "Enter python version (default 3.10): " PYTHON_VERSION
-if [ -z "$PYTHON_VERSION" ]; then
- PYTHON_VERSION="3.10"
-fi
-
-# check if ENV_NAME is a full path
-if [[ "$ENV_NAME" == /* ]]; then
- CONDA_NEW_ARG="--prefix"
-else
- CONDA_NEW_ARG="--name"
-fi
-
-conda create -y "$CONDA_NEW_ARG" "$ENV_NAME" python="$PYTHON_VERSION"
-conda activate "$ENV_NAME"
-
-# replace placeholder env with $ENV_NAME in scripts/train.sh
-# NEW_CONDA_LINE="source \$CONDA_BASE/bin/activate $ENV_NAME"
-# sed -i.bak -e "s,.*bin/activate.*,$NEW_CONDA_LINE,g" scripts/train.sh
-
-# install torch
-read -rp "Enter cuda version (e.g. '11.8', default no cuda support): " CUDA_VERSION
-read -rp "Enter PyTorch version (e.g. '2.1', default latest): " PYTORCH_VERSION
-if [ -n "$PYTORCH_VERSION" ]; then
- PYTORCH_VERSION="=$PYTORCH_VERSION"
-fi
-if [ -z "$CUDA_VERSION" ]; then
- conda install -y pytorch"$PYTORCH_VERSION" cpuonly -c pytorch
-else
- conda install -y pytorch"$PYTORCH_VERSION" pytorch-cuda="$CUDA_VERSION" -c pytorch -c nvidia
-fi
-
-# install python requirements
-pip install -e .[all]
diff --git a/spaces/rileho3909/Real-Time-Voice-Cloning/app.py b/spaces/rileho3909/Real-Time-Voice-Cloning/app.py
deleted file mode 100644
index e7d616eafc6d08846c3658d71056405e50a65f38..0000000000000000000000000000000000000000
--- a/spaces/rileho3909/Real-Time-Voice-Cloning/app.py
+++ /dev/null
@@ -1,22 +0,0 @@
-import gradio as gr
-import os
-import shlex
-
-os.system('wget https://www.dropbox.com/s/luro5o8kjotkn70/synpretrained.pt')
-os.system('wget https://www.dropbox.com/s/dv0ymnlqillecfw/encpretrained.pt')
-os.system('wget https://www.dropbox.com/s/aiym2qfv7087bsc/vocpretrained.pt')
-os.system('ls')
-
-
-def inference(audio, text):
- os.system("python demo_cli.py --no_sound --cpu --audio_path "+audio.name+" --text "+shlex.quote(text.strip()))
- return 'demo_output_1.wav'
-
-
-title = "Real-Time-Voice-Cloning"
-description = "Gradio demo for Real-Time-Voice-Cloning: Clone a voice in 5 seconds to generate arbitrary speech in real-time. To use it, simply upload your audio, or click one of the examples to load them. Read more at the links below."
-article = "
"
-
-examples=[['test.wav',"This is real time voice cloning on huggingface spaces"]]
-gr.Interface(inference, inputs=[gr.inputs.Audio(type="file"),"text"], outputs=gr.outputs.Audio(type="file"),enable_queue=True,title=title,description=description,article=article, examples=examples).launch()
-
diff --git a/spaces/rorallitri/biomedical-language-models/logs/3planesoft christmas evening 3d screensaver serial Where to find and how to use the screensaver.md b/spaces/rorallitri/biomedical-language-models/logs/3planesoft christmas evening 3d screensaver serial Where to find and how to use the screensaver.md
deleted file mode 100644
index b44bc0e74683dee248b2ab4623c33f1beae7edde..0000000000000000000000000000000000000000
--- a/spaces/rorallitri/biomedical-language-models/logs/3planesoft christmas evening 3d screensaver serial Where to find and how to use the screensaver.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
3planesoft christmas evening 3d screensaver serial
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/rorallitri/biomedical-language-models/logs/Amt Brand Tool Hp Download 15 Everything You Need to Know About NBDMIFIT Tool.md b/spaces/rorallitri/biomedical-language-models/logs/Amt Brand Tool Hp Download 15 Everything You Need to Know About NBDMIFIT Tool.md
deleted file mode 100644
index 49f8c605a17fb00de5c4c08ec972af7e9ee419d0..0000000000000000000000000000000000000000
--- a/spaces/rorallitri/biomedical-language-models/logs/Amt Brand Tool Hp Download 15 Everything You Need to Know About NBDMIFIT Tool.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
Transas Navi-Sailor 3000 ECDIS-i with licence WF43 crack
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/rorallitri/biomedical-language-models/logs/Edraw Max 9.4 Crack With License Key 2019 Download and Install the Full Version for Free.md b/spaces/rorallitri/biomedical-language-models/logs/Edraw Max 9.4 Crack With License Key 2019 Download and Install the Full Version for Free.md
deleted file mode 100644
index 87b16be250654e63d4cc63c21fd40233160844f4..0000000000000000000000000000000000000000
--- a/spaces/rorallitri/biomedical-language-models/logs/Edraw Max 9.4 Crack With License Key 2019 Download and Install the Full Version for Free.md
+++ /dev/null
@@ -1,5 +0,0 @@
-
-
This new version of edraw max free download ha s very good looking user interface, in addition it has a small main window that you can use to choose charts and also diagrams. Moreover the workspace includes so many great text and image styles to use. The total tool includes more than 12000 high quality vector symbols, and when you are done creating your chart or presentation you can then export it to many file formats including HTML, PDF, BMP, DIP, JPG, GIF, TIF, PNG, WMF and EMF. You can also export to Word, Excel and PowerPoint.
-
Edraw Max 9.4 Crack With License Key 2019 Free Download
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/rstallman/Mayfair-Partner-Music/tests/data/test_audio_dataset.py b/spaces/rstallman/Mayfair-Partner-Music/tests/data/test_audio_dataset.py
deleted file mode 100644
index b69c9c397830738b73d6c229009f84b867cda801..0000000000000000000000000000000000000000
--- a/spaces/rstallman/Mayfair-Partner-Music/tests/data/test_audio_dataset.py
+++ /dev/null
@@ -1,352 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-from functools import partial
-from itertools import product
-import json
-import math
-import os
-import random
-import typing as tp
-
-import pytest
-import torch
-from torch.utils.data import DataLoader
-
-from audiocraft.data.audio_dataset import (
- AudioDataset,
- AudioMeta,
- _get_audio_meta,
- load_audio_meta,
- save_audio_meta
-)
-from audiocraft.data.zip import PathInZip
-
-from ..common_utils import TempDirMixin, get_white_noise, save_wav
-
-
-class TestAudioMeta(TempDirMixin):
-
- def test_get_audio_meta(self):
- sample_rates = [8000, 16_000]
- channels = [1, 2]
- duration = 1.
- for sample_rate, ch in product(sample_rates, channels):
- n_frames = int(duration * sample_rate)
- wav = get_white_noise(ch, n_frames)
- path = self.get_temp_path('sample.wav')
- save_wav(path, wav, sample_rate)
- m = _get_audio_meta(path, minimal=True)
- assert m.path == path, 'path does not match'
- assert m.sample_rate == sample_rate, 'sample rate does not match'
- assert m.duration == duration, 'duration does not match'
- assert m.amplitude is None
- assert m.info_path is None
-
- def test_save_audio_meta(self):
- audio_meta = [
- AudioMeta("mypath1", 1., 16_000, None, None, PathInZip('/foo/bar.zip:/relative/file1.json')),
- AudioMeta("mypath2", 2., 16_000, None, None, PathInZip('/foo/bar.zip:/relative/file2.json'))
- ]
- empty_audio_meta = []
- for idx, meta in enumerate([audio_meta, empty_audio_meta]):
- path = self.get_temp_path(f'data_{idx}_save.jsonl')
- save_audio_meta(path, meta)
- with open(path, 'r') as f:
- lines = f.readlines()
- read_meta = [AudioMeta.from_dict(json.loads(line)) for line in lines]
- assert len(read_meta) == len(meta)
- for m, read_m in zip(meta, read_meta):
- assert m == read_m
-
- def test_load_audio_meta(self):
- try:
- import dora
- except ImportError:
- dora = None # type: ignore
-
- audio_meta = [
- AudioMeta("mypath1", 1., 16_000, None, None, PathInZip('/foo/bar.zip:/relative/file1.json')),
- AudioMeta("mypath2", 2., 16_000, None, None, PathInZip('/foo/bar.zip:/relative/file2.json'))
- ]
- empty_meta = []
- for idx, meta in enumerate([audio_meta, empty_meta]):
- path = self.get_temp_path(f'data_{idx}_load.jsonl')
- with open(path, 'w') as f:
- for m in meta:
- json_str = json.dumps(m.to_dict()) + '\n'
- f.write(json_str)
- read_meta = load_audio_meta(path)
- assert len(read_meta) == len(meta)
- for m, read_m in zip(meta, read_meta):
- if dora:
- m.path = dora.git_save.to_absolute_path(m.path)
- assert m == read_m, f'original={m}, read={read_m}'
-
-
-class TestAudioDataset(TempDirMixin):
-
- def _create_audio_files(self,
- root_name: str,
- num_examples: int,
- durations: tp.Union[float, tp.Tuple[float, float]] = (0.1, 1.),
- sample_rate: int = 16_000,
- channels: int = 1):
- root_dir = self.get_temp_dir(root_name)
- for i in range(num_examples):
- if isinstance(durations, float):
- duration = durations
- elif isinstance(durations, tuple) and len(durations) == 1:
- duration = durations[0]
- elif isinstance(durations, tuple) and len(durations) == 2:
- duration = random.uniform(durations[0], durations[1])
- else:
- assert False
- n_frames = int(duration * sample_rate)
- wav = get_white_noise(channels, n_frames)
- path = os.path.join(root_dir, f'example_{i}.wav')
- save_wav(path, wav, sample_rate)
- return root_dir
-
- def _create_audio_dataset(self,
- root_name: str,
- total_num_examples: int,
- durations: tp.Union[float, tp.Tuple[float, float]] = (0.1, 1.),
- sample_rate: int = 16_000,
- channels: int = 1,
- segment_duration: tp.Optional[float] = None,
- num_examples: int = 10,
- shuffle: bool = True,
- return_info: bool = False):
- root_dir = self._create_audio_files(root_name, total_num_examples, durations, sample_rate, channels)
- dataset = AudioDataset.from_path(root_dir,
- minimal_meta=True,
- segment_duration=segment_duration,
- num_samples=num_examples,
- sample_rate=sample_rate,
- channels=channels,
- shuffle=shuffle,
- return_info=return_info)
- return dataset
-
- def test_dataset_full(self):
- total_examples = 10
- min_duration, max_duration = 1., 4.
- sample_rate = 16_000
- channels = 1
- dataset = self._create_audio_dataset(
- 'dset', total_examples, durations=(min_duration, max_duration),
- sample_rate=sample_rate, channels=channels, segment_duration=None)
- assert len(dataset) == total_examples
- assert dataset.sample_rate == sample_rate
- assert dataset.channels == channels
- for idx in range(len(dataset)):
- sample = dataset[idx]
- assert sample.shape[0] == channels
- assert sample.shape[1] <= int(max_duration * sample_rate)
- assert sample.shape[1] >= int(min_duration * sample_rate)
-
- def test_dataset_segment(self):
- total_examples = 10
- num_samples = 20
- min_duration, max_duration = 1., 4.
- segment_duration = 1.
- sample_rate = 16_000
- channels = 1
- dataset = self._create_audio_dataset(
- 'dset', total_examples, durations=(min_duration, max_duration), sample_rate=sample_rate,
- channels=channels, segment_duration=segment_duration, num_examples=num_samples)
- assert len(dataset) == num_samples
- assert dataset.sample_rate == sample_rate
- assert dataset.channels == channels
- for idx in range(len(dataset)):
- sample = dataset[idx]
- assert sample.shape[0] == channels
- assert sample.shape[1] == int(segment_duration * sample_rate)
-
- def test_dataset_equal_audio_and_segment_durations(self):
- total_examples = 1
- num_samples = 2
- audio_duration = 1.
- segment_duration = 1.
- sample_rate = 16_000
- channels = 1
- dataset = self._create_audio_dataset(
- 'dset', total_examples, durations=audio_duration, sample_rate=sample_rate,
- channels=channels, segment_duration=segment_duration, num_examples=num_samples)
- assert len(dataset) == num_samples
- assert dataset.sample_rate == sample_rate
- assert dataset.channels == channels
- for idx in range(len(dataset)):
- sample = dataset[idx]
- assert sample.shape[0] == channels
- assert sample.shape[1] == int(segment_duration * sample_rate)
- # the random seek_time adds variability on audio read
- sample_1 = dataset[0]
- sample_2 = dataset[1]
- assert not torch.allclose(sample_1, sample_2)
-
- def test_dataset_samples(self):
- total_examples = 1
- num_samples = 2
- audio_duration = 1.
- segment_duration = 1.
- sample_rate = 16_000
- channels = 1
-
- create_dataset = partial(
- self._create_audio_dataset,
- 'dset', total_examples, durations=audio_duration, sample_rate=sample_rate,
- channels=channels, segment_duration=segment_duration, num_examples=num_samples,
- )
-
- dataset = create_dataset(shuffle=True)
- # when shuffle = True, we have different inputs for the same index across epoch
- sample_1 = dataset[0]
- sample_2 = dataset[0]
- assert not torch.allclose(sample_1, sample_2)
-
- dataset_noshuffle = create_dataset(shuffle=False)
- # when shuffle = False, we have same inputs for the same index across epoch
- sample_1 = dataset_noshuffle[0]
- sample_2 = dataset_noshuffle[0]
- assert torch.allclose(sample_1, sample_2)
-
- def test_dataset_return_info(self):
- total_examples = 10
- num_samples = 20
- min_duration, max_duration = 1., 4.
- segment_duration = 1.
- sample_rate = 16_000
- channels = 1
- dataset = self._create_audio_dataset(
- 'dset', total_examples, durations=(min_duration, max_duration), sample_rate=sample_rate,
- channels=channels, segment_duration=segment_duration, num_examples=num_samples, return_info=True)
- assert len(dataset) == num_samples
- assert dataset.sample_rate == sample_rate
- assert dataset.channels == channels
- for idx in range(len(dataset)):
- sample, segment_info = dataset[idx]
- assert sample.shape[0] == channels
- assert sample.shape[1] == int(segment_duration * sample_rate)
- assert segment_info.sample_rate == sample_rate
- assert segment_info.total_frames == int(segment_duration * sample_rate)
- assert segment_info.n_frames <= int(segment_duration * sample_rate)
- assert segment_info.seek_time >= 0
-
- def test_dataset_return_info_no_segment_duration(self):
- total_examples = 10
- num_samples = 20
- min_duration, max_duration = 1., 4.
- segment_duration = None
- sample_rate = 16_000
- channels = 1
- dataset = self._create_audio_dataset(
- 'dset', total_examples, durations=(min_duration, max_duration), sample_rate=sample_rate,
- channels=channels, segment_duration=segment_duration, num_examples=num_samples, return_info=True)
- assert len(dataset) == total_examples
- assert dataset.sample_rate == sample_rate
- assert dataset.channels == channels
- for idx in range(len(dataset)):
- sample, segment_info = dataset[idx]
- assert sample.shape[0] == channels
- assert sample.shape[1] == segment_info.total_frames
- assert segment_info.sample_rate == sample_rate
- assert segment_info.n_frames <= segment_info.total_frames
-
- def test_dataset_collate_fn(self):
- total_examples = 10
- num_samples = 20
- min_duration, max_duration = 1., 4.
- segment_duration = 1.
- sample_rate = 16_000
- channels = 1
- dataset = self._create_audio_dataset(
- 'dset', total_examples, durations=(min_duration, max_duration), sample_rate=sample_rate,
- channels=channels, segment_duration=segment_duration, num_examples=num_samples, return_info=False)
- batch_size = 4
- dataloader = DataLoader(
- dataset,
- batch_size=batch_size,
- num_workers=0
- )
- for idx, batch in enumerate(dataloader):
- assert batch.shape[0] == batch_size
-
- @pytest.mark.parametrize("segment_duration", [1.0, None])
- def test_dataset_with_meta_collate_fn(self, segment_duration):
- total_examples = 10
- num_samples = 20
- min_duration, max_duration = 1., 4.
- segment_duration = 1.
- sample_rate = 16_000
- channels = 1
- dataset = self._create_audio_dataset(
- 'dset', total_examples, durations=(min_duration, max_duration), sample_rate=sample_rate,
- channels=channels, segment_duration=segment_duration, num_examples=num_samples, return_info=True)
- batch_size = 4
- dataloader = DataLoader(
- dataset,
- batch_size=batch_size,
- collate_fn=dataset.collater,
- num_workers=0
- )
- for idx, batch in enumerate(dataloader):
- wav, infos = batch
- assert wav.shape[0] == batch_size
- assert len(infos) == batch_size
-
- @pytest.mark.parametrize("segment_duration,sample_on_weight,sample_on_duration,a_hist,b_hist,c_hist", [
- [1, True, True, 0.5, 0.5, 0.0],
- [1, False, True, 0.25, 0.5, 0.25],
- [1, True, False, 0.666, 0.333, 0.0],
- [1, False, False, 0.333, 0.333, 0.333],
- [None, False, False, 0.333, 0.333, 0.333]])
- def test_sample_with_weight(self, segment_duration, sample_on_weight, sample_on_duration, a_hist, b_hist, c_hist):
- random.seed(1234)
- rng = torch.Generator()
- rng.manual_seed(1234)
-
- def _get_histogram(dataset, repetitions=20_000):
- counts = {file_meta.path: 0. for file_meta in meta}
- for _ in range(repetitions):
- file_meta = dataset.sample_file(rng)
- counts[file_meta.path] += 1
- return {name: count / repetitions for name, count in counts.items()}
-
- meta = [
- AudioMeta(path='a', duration=5, sample_rate=1, weight=2),
- AudioMeta(path='b', duration=10, sample_rate=1, weight=None),
- AudioMeta(path='c', duration=5, sample_rate=1, weight=0),
- ]
- dataset = AudioDataset(
- meta, segment_duration=segment_duration, sample_on_weight=sample_on_weight,
- sample_on_duration=sample_on_duration)
- hist = _get_histogram(dataset)
- assert math.isclose(hist['a'], a_hist, abs_tol=0.01)
- assert math.isclose(hist['b'], b_hist, abs_tol=0.01)
- assert math.isclose(hist['c'], c_hist, abs_tol=0.01)
-
- def test_meta_duration_filter_all(self):
- meta = [
- AudioMeta(path='a', duration=5, sample_rate=1, weight=2),
- AudioMeta(path='b', duration=10, sample_rate=1, weight=None),
- AudioMeta(path='c', duration=5, sample_rate=1, weight=0),
- ]
- try:
- AudioDataset(meta, segment_duration=11, min_segment_ratio=1)
- assert False
- except AssertionError:
- assert True
-
- def test_meta_duration_filter_long(self):
- meta = [
- AudioMeta(path='a', duration=5, sample_rate=1, weight=2),
- AudioMeta(path='b', duration=10, sample_rate=1, weight=None),
- AudioMeta(path='c', duration=5, sample_rate=1, weight=0),
- ]
- dataset = AudioDataset(meta, segment_duration=None, min_segment_ratio=1, max_audio_duration=7)
- assert len(dataset) == 2
diff --git a/spaces/runa91/bite_gradio/src/smal_pytorch/smal_model/smal_torch_new.py b/spaces/runa91/bite_gradio/src/smal_pytorch/smal_model/smal_torch_new.py
deleted file mode 100644
index 32e1dbb57ad5a79fbeaa5448d392edbf55121a19..0000000000000000000000000000000000000000
--- a/spaces/runa91/bite_gradio/src/smal_pytorch/smal_model/smal_torch_new.py
+++ /dev/null
@@ -1,471 +0,0 @@
-"""
-PyTorch implementation of the SMAL/SMPL model
-see:
- 1.) https://github.com/silviazuffi/smalst/blob/master/smal_model/smal_torch.py
- 2.) https://github.com/benjiebob/SMALify/blob/master/smal_model/smal_torch.py
-main changes compared to SMALST and WLDO:
- * new model
- (/ps/scratch/nrueegg/new_projects/side_packages/SMALify/new_smal_pca/results/my_tposeref_results_3/)
- dogs are part of the pca to create the model
- al meshes are centered around their root joint
- the animals are all scaled such that their body length (butt to breast) is 1
- X_init = np.concatenate((vertices_dogs, vertices_smal), axis=0) # vertices_dogs
- X = []
- for ind in range(0, X_init.shape[0]):
- X_tmp, _, _, _ = align_smal_template_to_symmetry_axis(X_init[ind, :, :], subtract_mean=True) # not sure if this is necessary
- X.append(X_tmp)
- X = np.asarray(X)
- # define points which will be used for normalization
- idxs_front = [6, 16, 8, 964] # [1172, 6, 16, 8, 964]
- idxs_back = [174, 2148, 175, 2149] # not in the middle, but pairs
- reg_j = np.asarray(dd['J_regressor'].todense())
- # normalize the meshes such that X_frontback_dist is 1 and the root joint is in the center (0, 0, 0)
- X_front = X[:, idxs_front, :].mean(axis=1)
- X_back = X[:, idxs_back, :].mean(axis=1)
- X_frontback_dist = np.sqrt(((X_front - X_back)**2).sum(axis=1))
- X = X / X_frontback_dist[:, None, None]
- X_j0 = np.sum(X[:, reg_j[0, :]>0, :] * reg_j[0, (reg_j[0, :]>0)][None, :, None], axis=1)
- X = X - X_j0[:, None, :]
- * add limb length changes the same way as in WLDO
- * overall scale factor is added
-"""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import numpy as np
-import torch
-import chumpy as ch
-import os.path
-from torch import nn
-from torch.autograd import Variable
-import pickle as pkl
-from .batch_lbs import batch_rodrigues, batch_global_rigid_transformation, batch_global_rigid_transformation_biggs, get_bone_length_scales, get_beta_scale_mask
-
-from .smal_basics import align_smal_template_to_symmetry_axis, get_symmetry_indices
-
-import os
-import sys
-sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
-from configs.SMAL_configs import KEY_VIDS, CANONICAL_MODEL_JOINTS, CANONICAL_MODEL_JOINTS_REFINED, IDXS_BONES_NO_REDUNDANCY # , SMAL_MODEL_PATH
-# from configs.SMAL_configs import SMAL_MODEL_TYPE
-from configs.SMAL_configs import SMAL_MODEL_CONFIG
-
-from smal_pytorch.utils import load_vertex_colors
-
-
-# There are chumpy variables so convert them to numpy.
-def undo_chumpy(x):
- return x if isinstance(x, np.ndarray) else x.r
-
-# class SMAL(object):
-class SMAL(nn.Module):
- def __init__(self, pkl_path=None, smal_model_type=None, n_betas=None, template_name='neutral', use_smal_betas=True, logscale_part_list=None):
- super(SMAL, self).__init__()
-
- # before: pkl_path=SMAL_MODEL_PATH
- if smal_model_type is not None:
- assert (pkl_path is None)
- assert smal_model_type in SMAL_MODEL_CONFIG.keys()
- pkl_path = SMAL_MODEL_CONFIG[smal_model_type]['smal_model_path']
- self.smal_model_type = smal_model_type
- if logscale_part_list is None:
- logscale_part_list = SMAL_MODEL_CONFIG[smal_model_type]['logscale_part_list']
- elif (pkl_path is not None):
- self.smal_model_type = None
- elif (pkl_path is None):
- smal_model_type = 'barc'
- print('use default smal_model_type: ' + smal_model_type)
- pkl_path = SMAL_MODEL_CONFIG[smal_model_type]['smal_model_path']
- self.smal_model_type = smal_model_type
- else:
- raise ValueError
-
-
- '''
- # save some information about the model if possible
- if pkl_path == SMAL_MODEL_PATH:
- self.smal_model_type = SMAL_MODEL_TYPE
- '''
-
- if logscale_part_list is None:
- # logscale_part_list = ['front_legs_l', 'front_legs_f', 'tail_l', 'tail_f', 'ears_y', 'ears_l', 'head_l', 'back_legs_l', 'back_legs_f']
- self.logscale_part_list = ['legs_l', 'legs_f', 'tail_l', 'tail_f', 'ears_y', 'ears_l', 'head_l']
- else:
- self.logscale_part_list = logscale_part_list
- self.betas_scale_mask = get_beta_scale_mask(part_list=self.logscale_part_list)
- self.num_betas_logscale = len(self.logscale_part_list)
-
- self.use_smal_betas = use_smal_betas
-
- # -- Load SMPL params --
- try:
- with open(pkl_path, 'r') as f:
- dd = pkl.load(f)
- except (UnicodeDecodeError, TypeError) as e:
- with open(pkl_path, 'rb') as file:
- u = pkl._Unpickler(file)
- u.encoding = 'latin1'
- dd = u.load()
-
- self.f = dd['f']
- self.register_buffer('faces', torch.from_numpy(self.f.astype(int)))
-
- # get the correct template (mean shape)
- if template_name=='neutral':
- v_template = dd['v_template']
- v = v_template
- else:
- raise NotImplementedError
-
- # Mean template vertices
- self.register_buffer('v_template', torch.Tensor(v))
- # Size of mesh [Number of vertices, 3]
- self.size = [self.v_template.shape[0], 3]
- self.num_betas = dd['shapedirs'].shape[-1]
- # symmetry indices
- self.sym_ids_dict = get_symmetry_indices()
-
- # Shape blend shape basis
- shapedir = np.reshape(undo_chumpy(dd['shapedirs']), [-1, self.num_betas]).T
- shapedir.flags['WRITEABLE'] = True # not sure why this is necessary
- self.register_buffer('shapedirs', torch.Tensor(shapedir))
-
- # Regressor for joint locations given shape
- self.register_buffer('J_regressor', torch.Tensor(dd['J_regressor'].T.todense()))
-
- # Pose blend shape basis
- num_pose_basis = dd['posedirs'].shape[-1]
-
- posedirs = np.reshape(undo_chumpy(dd['posedirs']), [-1, num_pose_basis]).T
- self.register_buffer('posedirs', torch.Tensor(posedirs))
-
- # indices of parents for each joints
- self.parents = dd['kintree_table'][0].astype(np.int32)
-
- # LBS weights
- self.register_buffer('weights', torch.Tensor(undo_chumpy(dd['weights'])))
-
- # prepare for vertex offsets
- self._prepare_for_vertex_offsets()
-
-
- def _prepare_for_vertex_offsets(self):
- sym_left_ids = self.sym_ids_dict['left']
- sym_right_ids = self.sym_ids_dict['right']
- sym_center_ids = self.sym_ids_dict['center']
- self.n_center = sym_center_ids.shape[0]
- self.n_left = sym_left_ids.shape[0]
- self.sl = 2*self.n_center # sl: start left
- # get indices to go from half_shapedirs to shapedirs
- inds_back = np.zeros((3889))
- for ind in range(0, sym_center_ids.shape[0]):
- ind_in_forward = sym_center_ids[ind]
- inds_back[ind_in_forward] = ind
- for ind in range(0, sym_left_ids.shape[0]):
- ind_in_forward = sym_left_ids[ind]
- inds_back[ind_in_forward] = sym_center_ids.shape[0] + ind
- for ind in range(0, sym_right_ids.shape[0]):
- ind_in_forward = sym_right_ids[ind]
- inds_back[ind_in_forward] = sym_center_ids.shape[0] + sym_left_ids.shape[0] + ind
- # self.register_buffer('inds_back_torch', torch.Tensor(inds_back).long())
- self.inds_back_torch = torch.Tensor(inds_back).long()
- return
-
-
- def _caclulate_bone_lengths_from_J(self, J, betas_logscale):
- # NEW: calculate bone lengths:
- all_bone_lengths_list = []
- for i in range(1, self.parents.shape[0]):
- bone_vec = J[:, i] - J[:, self.parents[i]]
- bone_length = torch.sqrt(torch.sum(bone_vec ** 2, axis=1))
- all_bone_lengths_list.append(bone_length)
- all_bone_lengths = torch.stack(all_bone_lengths_list)
- # some bones are pairs, it is enough to take one of the two bones
- all_bone_length_scales = get_bone_length_scales(self.logscale_part_list, betas_logscale)
- all_bone_lengths = all_bone_lengths.permute((1,0)) * all_bone_length_scales
-
- return all_bone_lengths #.permute((1,0))
-
-
- def caclulate_bone_lengths(self, beta, betas_logscale, shapedirs_sel=None, short=True):
- nBetas = beta.shape[1]
-
- # 1. Add shape blend shapes
- # do we use the original shapedirs or a new set of selected shapedirs?
- if shapedirs_sel is None:
- shapedirs_sel = self.shapedirs[:nBetas,:]
- else:
- assert shapedirs_sel.shape[0] == nBetas
- v_shaped = self.v_template + torch.reshape(torch.matmul(beta, shapedirs_sel), [-1, self.size[0], self.size[1]])
-
- # 2. Infer shape-dependent joint locations.
- Jx = torch.matmul(v_shaped[:, :, 0], self.J_regressor)
- Jy = torch.matmul(v_shaped[:, :, 1], self.J_regressor)
- Jz = torch.matmul(v_shaped[:, :, 2], self.J_regressor)
- J = torch.stack([Jx, Jy, Jz], dim=2)
-
- # calculate bone lengths
- all_bone_lengths = self._caclulate_bone_lengths_from_J(J, betas_logscale)
- selected_bone_lengths = all_bone_lengths[:, IDXS_BONES_NO_REDUNDANCY]
-
- if short:
- return selected_bone_lengths
- else:
- return all_bone_lengths
-
-
-
- def __call__(self, beta, betas_limbs, theta=None, pose=None, trans=None, del_v=None, get_skin=True, keyp_conf='red', get_all_info=False, shapedirs_sel=None, vert_off_compact=None):
- device = beta.device
-
- betas_logscale = betas_limbs
- # NEW: allow that rotation is given as rotation matrices instead of axis angle rotation
- # theta: BSxNJointsx3 or BSx(NJoints*3)
- # pose: NxNJointsx3x3
- if (theta is None) and (pose is None):
- raise ValueError("Either pose (rotation matrices NxNJointsx3x3) or theta (axis angle BSxNJointsx3) must be given")
- elif (theta is not None) and (pose is not None):
- raise ValueError("Not both pose (rotation matrices NxNJointsx3x3) and theta (axis angle BSxNJointsx3) can be given")
-
- if True: # self.use_smal_betas:
- nBetas = beta.shape[1]
- else:
- nBetas = 0
-
- # add possibility to have additional vertex offsets
- if vert_off_compact is None:
- vertex_offsets = torch.zeros_like(self.v_template)
- else:
- ##########################################################
- # bs = 1
- # vert_off_compact = torch.zeros((bs, 2*self.n_center + 3*self.n_left), device=vert_off_compact.device, dtype=vert_off_compact.dtype)
- if type(vert_off_compact) is dict:
- zero_vec = torch.zeros((vert_off_compact['c0'].shape[0], self.n_center)).to(device)
- half_vertex_offsets_center = torch.stack((vert_off_compact['c0'], \
- zero_vec, \
- vert_off_compact['c2']), axis=1)
- half_vertex_offsets_left = torch.stack((vert_off_compact['l0'], \
- vert_off_compact['l1'], \
- vert_off_compact['l2']), axis=1)
- half_vertex_offsets_right = torch.stack((vert_off_compact['l0'], \
- - vert_off_compact['l1'], \
- vert_off_compact['l2']), axis=1)
- else:
- zero_vec = torch.zeros((vert_off_compact.shape[0], self.n_center)).to(device)
- half_vertex_offsets_center = torch.stack((vert_off_compact[:, :self.n_center], \
- zero_vec, \
- vert_off_compact[:, self.n_center:2*self.n_center]), axis=1)
- half_vertex_offsets_left = torch.stack((vert_off_compact[:, self.sl:self.sl+self.n_left], \
- vert_off_compact[:, self.sl+self.n_left:self.sl+2*self.n_left], \
- vert_off_compact[:, self.sl+2*self.n_left:self.sl+3*self.n_left]), axis=1)
- half_vertex_offsets_right = torch.stack((vert_off_compact[:, self.sl:self.sl+self.n_left], \
- - vert_off_compact[:, self.sl+self.n_left:self.sl+2*self.n_left], \
- vert_off_compact[:, self.sl+2*self.n_left:self.sl+3*self.n_left]), axis=1)
-
- half_vertex_offsets_tot = torch.cat((half_vertex_offsets_center, half_vertex_offsets_left, half_vertex_offsets_right), dim=2) # (bs, 3, 3889)
- vertex_offsets = torch.index_select(half_vertex_offsets_tot, dim=2, index=self.inds_back_torch.to(half_vertex_offsets_tot.device)).permute((0, 2, 1)) # (bs, 3889, 3)
-
-
- # 1. Add shape blend shapes
- # do we use the original shapedirs or a new set of selected shapedirs?
- if shapedirs_sel is None:
- shapedirs_sel = self.shapedirs[:nBetas,:]
- else:
- assert shapedirs_sel.shape[0] == nBetas
-
- if nBetas > 0:
- if del_v is None:
- v_shaped = self.v_template + torch.reshape(torch.matmul(beta, shapedirs_sel), [-1, self.size[0], self.size[1]]) + vertex_offsets
- else:
- v_shaped = self.v_template + del_v + torch.reshape(torch.matmul(beta, shapedirs_sel), [-1, self.size[0], self.size[1]]) + vertex_offsets
- else:
- if del_v is None:
- v_shaped = self.v_template.unsqueeze(0) + vertex_offsets
- else:
- v_shaped = self.v_template + del_v + vertex_offsets
-
- # 2. Infer shape-dependent joint locations.
- Jx = torch.matmul(v_shaped[:, :, 0], self.J_regressor)
- Jy = torch.matmul(v_shaped[:, :, 1], self.J_regressor)
- Jz = torch.matmul(v_shaped[:, :, 2], self.J_regressor)
- J = torch.stack([Jx, Jy, Jz], dim=2)
-
- # 3. Add pose blend shapes
- # N x 24 x 3 x 3
- if pose is None:
- Rs = torch.reshape( batch_rodrigues(torch.reshape(theta, [-1, 3])), [-1, 35, 3, 3])
- else:
- Rs = pose
- # Ignore global rotation.
- pose_feature = torch.reshape(Rs[:, 1:, :, :] - torch.eye(3).to(device=device), [-1, 306])
-
- v_posed = torch.reshape(
- torch.matmul(pose_feature, self.posedirs),
- [-1, self.size[0], self.size[1]]) + v_shaped
-
- #-------------------------
- # new: add corrections of bone lengths to the template (before hypothetical pose blend shapes!)
- # see biggs batch_lbs.py
- betas_scale = torch.exp(betas_logscale @ self.betas_scale_mask.to(betas_logscale.device))
- scaling_factors = betas_scale.reshape(-1, 35, 3)
- scale_factors_3x3 = torch.diag_embed(scaling_factors, dim1=-2, dim2=-1)
-
- # 4. Get the global joint location
- # self.J_transformed, A = batch_global_rigid_transformation(Rs, J, self.parents)
- self.J_transformed, A = batch_global_rigid_transformation_biggs(Rs, J, self.parents, scale_factors_3x3, betas_logscale=betas_logscale)
-
- # 2-BONES. Calculate bone lengths
- all_bone_lengths = self._caclulate_bone_lengths_from_J(J, betas_logscale)
- # selected_bone_lengths = all_bone_lengths[:, IDXS_BONES_NO_REDUNDANCY]
- #-------------------------
-
- # 5. Do skinning:
- num_batch = Rs.shape[0]
-
- weights_t = self.weights.repeat([num_batch, 1])
- W = torch.reshape(weights_t, [num_batch, -1, 35])
-
-
- T = torch.reshape(
- torch.matmul(W, torch.reshape(A, [num_batch, 35, 16])),
- [num_batch, -1, 4, 4])
- v_posed_homo = torch.cat(
- [v_posed, torch.ones([num_batch, v_posed.shape[1], 1]).to(device=device)], 2)
- v_homo = torch.matmul(T, v_posed_homo.unsqueeze(-1))
-
- verts = v_homo[:, :, :3, 0]
-
- if trans is None:
- trans = torch.zeros((num_batch,3)).to(device=device)
-
- verts = verts + trans[:,None,:]
-
- # Get joints:
- joint_x = torch.matmul(verts[:, :, 0], self.J_regressor)
- joint_y = torch.matmul(verts[:, :, 1], self.J_regressor)
- joint_z = torch.matmul(verts[:, :, 2], self.J_regressor)
- joints = torch.stack([joint_x, joint_y, joint_z], dim=2)
-
- # New... (see https://github.com/benjiebob/SMALify/blob/master/smal_model/smal_torch.py)
- joints = torch.cat([
- joints,
- verts[:, None, 1863], # end_of_nose
- verts[:, None, 26], # chin
- verts[:, None, 2124], # right ear tip
- verts[:, None, 150], # left ear tip
- verts[:, None, 3055], # left eye
- verts[:, None, 1097], # right eye
- # new: add paw keypoints, not joint locations -> bottom, rather in front
- # remark: when i look in the animals direction, left and right are exchanged
- verts[:, None, 1330], # front paw, right
- verts[:, None, 3282], # front paw, left
- verts[:, None, 1521], # back paw, right
- verts[:, None, 3473], # back paw, left
- verts[:, None, 6], # throat
- verts[:, None, 20], # withers
- ], dim = 1)
-
-
- if keyp_conf == 'blue' or keyp_conf == 'dict':
- # Generate keypoints
- nLandmarks = KEY_VIDS.shape[0] # 24
- j3d = torch.zeros((num_batch, nLandmarks, 3)).to(device=device)
- for j in range(nLandmarks):
- j3d[:, j,:] = torch.mean(verts[:, KEY_VIDS[j],:], dim=1) # translation is already added to the vertices
- joints_blue = j3d
-
- joints_red = joints[:, :-12, :] # joints[:, :-6, :]
- joints_green = joints[:, CANONICAL_MODEL_JOINTS, :]
- joints_olive = joints[:, CANONICAL_MODEL_JOINTS_REFINED, :] # same order but better paw, withers and throat keypoints
-
- if keyp_conf == 'red':
- relevant_joints = joints_red
- elif keyp_conf == 'green':
- relevant_joints = joints_green
- elif keyp_conf == 'olive':
- relevant_joints = joints_olive
- elif keyp_conf == 'blue':
- relevant_joints = joints_blue
- elif keyp_conf == 'dict':
- relevant_joints = {'red': joints_red,
- 'green': joints_green,
- 'olive': joints_olive,
- 'blue': joints_blue}
- else:
- raise NotImplementedError
-
- if get_all_info:
- return verts, relevant_joints, Rs, all_bone_lengths
- else:
- if get_skin:
- return verts, relevant_joints, Rs # , v_shaped
- else:
- return relevant_joints
-
-
-
-
-
- def get_joints_from_verts(self, verts, keyp_conf='red'):
-
- num_batch = verts.shape[0]
-
- # Get joints:
- joint_x = torch.matmul(verts[:, :, 0], self.J_regressor)
- joint_y = torch.matmul(verts[:, :, 1], self.J_regressor)
- joint_z = torch.matmul(verts[:, :, 2], self.J_regressor)
- joints = torch.stack([joint_x, joint_y, joint_z], dim=2)
-
- # New... (see https://github.com/benjiebob/SMALify/blob/master/smal_model/smal_torch.py)
- joints = torch.cat([
- joints,
- verts[:, None, 1863], # end_of_nose
- verts[:, None, 26], # chin
- verts[:, None, 2124], # right ear tip
- verts[:, None, 150], # left ear tip
- verts[:, None, 3055], # left eye
- verts[:, None, 1097], # right eye
- # new: add paw keypoints, not joint locations -> bottom, rather in front
- # remark: when i look in the animals direction, left and right are exchanged
- verts[:, None, 1330], # front paw, right
- verts[:, None, 3282], # front paw, left
- verts[:, None, 1521], # back paw, right
- verts[:, None, 3473], # back paw, left
- verts[:, None, 6], # throat
- verts[:, None, 20], # withers
- ], dim = 1)
-
-
- if keyp_conf == 'blue' or keyp_conf == 'dict':
- # Generate keypoints
- nLandmarks = KEY_VIDS.shape[0] # 24
- j3d = torch.zeros((num_batch, nLandmarks, 3)).to(device=device)
- for j in range(nLandmarks):
- j3d[:, j,:] = torch.mean(verts[:, KEY_VIDS[j],:], dim=1) # translation is already added to the vertices
- joints_blue = j3d
-
- joints_red = joints[:, :-12, :] # joints[:, :-6, :]
- joints_green = joints[:, CANONICAL_MODEL_JOINTS, :]
- joints_olive = joints[:, CANONICAL_MODEL_JOINTS_REFINED, :] # same order but better paw, withers and throat keypoints
-
- if keyp_conf == 'red':
- relevant_joints = joints_red
- elif keyp_conf == 'green':
- relevant_joints = joints_green
- elif keyp_conf == 'olive':
- relevant_joints = joints_olive
- elif keyp_conf == 'blue':
- relevant_joints = joints_blue
- elif keyp_conf == 'dict':
- relevant_joints = {'red': joints_red,
- 'green': joints_green,
- 'olive': joints_olive,
- 'blue': joints_blue}
-
- return relevant_joints
-
-
-
-
diff --git a/spaces/sarinam/speaker-anonymization/IMSToucan/Layers/VariancePredictor.py b/spaces/sarinam/speaker-anonymization/IMSToucan/Layers/VariancePredictor.py
deleted file mode 100644
index cb4ccf522acb3e7cf36b1af59fe87d4fc50725a5..0000000000000000000000000000000000000000
--- a/spaces/sarinam/speaker-anonymization/IMSToucan/Layers/VariancePredictor.py
+++ /dev/null
@@ -1,65 +0,0 @@
-# Copyright 2019 Tomoki Hayashi
-# MIT License (https://opensource.org/licenses/MIT)
-# Adapted by Florian Lux 2021
-
-from abc import ABC
-
-import torch
-
-from .LayerNorm import LayerNorm
-
-
-class VariancePredictor(torch.nn.Module, ABC):
- """
- Variance predictor module.
-
- This is a module of variance predictor described in `FastSpeech 2:
- Fast and High-Quality End-to-End Text to Speech`_.
-
- .. _`FastSpeech 2: Fast and High-Quality End-to-End Text to Speech`:
- https://arxiv.org/abs/2006.04558
-
- """
-
- def __init__(self, idim, n_layers=2, n_chans=384, kernel_size=3, bias=True, dropout_rate=0.5, ):
- """
- Initilize duration predictor module.
-
- Args:
- idim (int): Input dimension.
- n_layers (int, optional): Number of convolutional layers.
- n_chans (int, optional): Number of channels of convolutional layers.
- kernel_size (int, optional): Kernel size of convolutional layers.
- dropout_rate (float, optional): Dropout rate.
- """
- super().__init__()
- self.conv = torch.nn.ModuleList()
- for idx in range(n_layers):
- in_chans = idim if idx == 0 else n_chans
- self.conv += [
- torch.nn.Sequential(torch.nn.Conv1d(in_chans, n_chans, kernel_size, stride=1, padding=(kernel_size - 1) // 2, bias=bias, ), torch.nn.ReLU(),
- LayerNorm(n_chans, dim=1), torch.nn.Dropout(dropout_rate), )]
- self.linear = torch.nn.Linear(n_chans, 1)
-
- def forward(self, xs, x_masks=None):
- """
- Calculate forward propagation.
-
- Args:
- xs (Tensor): Batch of input sequences (B, Tmax, idim).
- x_masks (ByteTensor, optional):
- Batch of masks indicating padded part (B, Tmax).
-
- Returns:
- Tensor: Batch of predicted sequences (B, Tmax, 1).
- """
- xs = xs.transpose(1, -1) # (B, idim, Tmax)
- for f in self.conv:
- xs = f(xs) # (B, C, Tmax)
-
- xs = self.linear(xs.transpose(1, 2)) # (B, Tmax, 1)
-
- if x_masks is not None:
- xs = xs.masked_fill(x_masks, 0.0)
-
- return xs
diff --git a/spaces/scedlatioru/img-to-music/example/Emmegi Fp Pro Crack.md b/spaces/scedlatioru/img-to-music/example/Emmegi Fp Pro Crack.md
deleted file mode 100644
index f227195a90844439dc032cada051ae4130e24dbe..0000000000000000000000000000000000000000
--- a/spaces/scedlatioru/img-to-music/example/Emmegi Fp Pro Crack.md
+++ /dev/null
@@ -1,180 +0,0 @@
-
-
Emmegi Fp Pro Crack: How to Use It and What to Watch Out For
-
-
Emmegi Fp Pro is a software that allows you to design and calculate window and door projects using profiles in various materials. It is part of the FP Suite, which is a software package developed by Emmegisoft, a leading company in window and door software solutions. Emmegi Fp Pro has many features and functions that make it a powerful and reliable tool for window and door makers.
However, Emmegi Fp Pro is not a free software. You need to pay for a license to use it legally and access all its features and functions. The license price depends on the number of devices you want to control and the duration of the subscription. You can choose from monthly, quarterly, yearly or perpetual licenses.
-
-
But what if you want to use Emmegi Fp Pro without paying for a license? Is there a way to do that? Yes, there is. It is called Emmegi Fp Pro Crack. It is a file that contains a modified version of the software that bypasses the protection system that checks for a valid license. It usually comes with a keygen.exe file that generates a serial number that you can use to activate the software manually or online.
-
-
Emmegi Fp Pro Crack sounds tempting, right? You can use Emmegi Fp Pro for free and enjoy all its features and functions without any limitations. But before you download and install Emmegi Fp Pro Crack on your PC or laptop, you should know some important things about it. In this article, we will explain what Emmegi Fp Pro Crack is, how it works, what are the benefits and risks of using it, and how to download and install it on your PC or laptop.
-
-
What is Emmegi Fp Pro?
-
-
Emmegi Fp Pro is the main program of the FP Suite, which is a software package developed by Emmegisoft, a company specialized in window and door software solutions. Emmegi Fp Pro includes the basic functions used by all window and door makers, such as:
-
-
-
Designing window and door styles using profiles in aluminum, wood-aluminum, PVC, steel, light alloys.
Defining and calculating jobs, materials lists, estimates, prices, orders and invoices.
-
Managing purchasing, stock and document management.
-
Generating work lists for the workshop, cutting-off machines, machining centers and welding/corner cleaning machines.
-
Calculating thermal transmittance and managing CE marking.
-
-
-
Emmegi Fp Pro is easy to use and employs a user-friendly graphical interface displaying real profiles, multi-joints, sections and 3D styles. It can operate in a network where files are shared over an SQL database. It also integrates with other FP Suite modules and with the SOTI ONE Platform, which is a complete enterprise mobility management solution.
-
-
-
What is Emmegi Fp Pro Crack?
-
-
Emmegi Fp Pro Crack is a way to use Emmegi Fp Pro without paying for a license. It is a file that contains a modified version of the software that bypasses the protection system that checks for a valid license. It does this by removing or changing some parts of the code that are responsible for checking the validity of the license. It also adds some parts of the code that allow the software to run without any restrictions.
-
-
Emmegi Fp Pro Crack is not an official product of Emmegisoft. It is created by hackers or crackers who have access to the original software code and know how to manipulate it. Emmegi Fp Pro Crack is usually distributed through torrent sites or file-sharing platforms where anyone can download it for free.
-
-
What are the Benefits of Using Emmegi Fp Pro Crack?
-
-
The main benefit of using Emmegi Fp Pro Crack is that you can use Emmegi Fp Pro for free without paying for a license. This can save you money and allow you to access all the features and functions of the software without any limitations.
-
-
Another benefit of using Emmegi Fp Pro Crack is that you can use it on any PC or laptop without needing an internet connection or registering an account. You can also use it on multiple devices simultaneously without any restrictions.
-
-
What are the Risks of Using Emmegi Fp Pro Crack?
-
-
The main risk of using Emmegi Fp Pro Crack is that it is illegal. Using cracked software violates the intellectual property rights of the original software developer. You can face legal consequences such as fines or lawsuits if you are caught using Emmegi Fp Pro Crack.
-
-
Another risk of using Emmegi Fp Pro Crack is that it can be unsafe. Cracked software can contain viruses, malware or spyware that can harm your PC or laptop or steal your personal information. You can also expose yourself to cyberattacks or identity theft if you use cracked software online.
-
-
A third risk of using Emmegi Fp Pro Crack is that it can be unreliable. Cracked software may not work properly or may crash unexpectedly. It may also have bugs or errors that can affect your work quality or cause data loss. It may also be incompatible with other FP Suite modules or with the SOTI ONE Platform.
-
-
How to Download and Install Emmegi Fp Pro Crack?
-
-
If you still want to download and install Emmegi Fp Pro Crack on your PC or laptop, you should follow these steps:
-
-
-
Find a reliable source where you can download Emmegi Fp Pro Crack. You can search on torrent sites or file-sharing platforms for Emmegi Fp Pro Crack files.
-
Download the file that contains Emmegi Fp Pro Crack and keygen.exe. Make sure that the file is not corrupted or infected with viruses, malware or spyware.
-
Extract the file using a program like WinRAR or 7-Zip. You will get a folder that contains Emmegi Fp Pro Crack and keygen.exe files.
-
Run keygen.exe as administrator and generate a serial number for Emmegi F
-
-
-
We hope this article has helped you understand what Emmegi F
-Pro Crack is, how it works, what are the benefits and risks of using it, and how to download and install it on your PC or laptop. If you have any questions or comments, feel free to leave them below. Thank you for reading!
-
How to Use Emmegi F
-Pro?
-
-
Once you have installed Emmegi F
-Pro on your PC or laptop, you can start using it to design and calculate window and door projects. Here are some basic steps to use Emmegi F
-Pro:
-
-
-
Launch Emmegi F
-Pro and create a new project or open an existing one.
-
Select the profiles you want to use from the database or create your own profiles.
-
Design the window and door styles using the graphical interface. You can choose from different types of styles, such as rectangular, arched or with intermediate angles. You can also add machining operations and fittings to the profiles.
-
Define and calculate the job by entering the styles into the job. You can also add accessories, glazing and panels to the job. Emmegi F
-Pro will calculate the materials lists, estimates, prices, orders and invoices for the job.
-
Manage the purchasing, stock and document management for the job. You can also generate credit documents, such as delivery notes and invoices.
-
Generate work lists for the workshop, cutting-off machines, machining centers and welding/corner cleaning machines. You can also manage the trolley for workpieces and frames.
-
Calculate the thermal transmittance and manage the CE marking for the job. You can also define the declaration of conformity and the CE label for the job.
-
-
-
You can also use Emmegi F
-Pro with other FP Suite modules and with the SOTI ONE Platform to enhance your workflow and productivity. For example, you can use FP OPTI2D to optimize glazing and panels, FP GEST to handle production, FP DEALER to process and use sales lists, FP FACADE to design curtain walls, FP ASSEMBLY to support assembly operations, FP CAM/CAMPLUS to design machining operations and control machining centers.
-
-
How to Update Emmegi F
-Pro?
-
-
If you have a valid license for Emmegi F
-Pro, you can update it to the latest version whenever there is a new release. Updating Emmegi F
-Pro will allow you to access new features and functions, fix bugs and errors, improve performance and compatibility, and enhance security and reliability.
-
-
To update Emmegi F
-Pro, you need to have an internet connection and a registered account on Emmegisoft's website. Here are some steps to update Emmegi F
-Pro:
-
-
-
Go to Emmegisoft's website and log in with your account.
-
Go to the download section and find the latest version of Emmegi F
-Pro.
-
Download the update file and save it on your PC or laptop.
-
Run the update file as administrator and follow the instructions on the screen.
-
Restart your PC or laptop after the update is completed.
-
Launch Emmegi F
-Pro and check if it is updated to the latest version.
-
-
-
If you have any problems or issues with updating Emmegi F
-Pro, you can contact Emmegisoft's technical support team for assistance. They will help you solve any problems or issues with updating Emmegi F
-Pro.
-
How to Uninstall Emmegi F
-Pro?
-
-
If you want to uninstall Emmegi F
-Pro from your PC or laptop, you can do it easily and safely. Uninstalling Emmegi F
-Pro will remove the software and all its files and folders from your PC or laptop. It will also free up some space on your hard drive and improve your PC or laptop's performance.
-
-
To uninstall Emmegi F
-Pro, you need to have administrator rights on your PC or laptop. Here are some steps to uninstall Emmegi F
-Pro:
-
-
-
Go to the Control Panel and click on Programs and Features.
-
Find Emmegi F
-Pro on the list of installed programs and click on Uninstall.
-
Follow the instructions on the screen to uninstall Emmegi F
-Pro.
-
Restart your PC or laptop after the uninstallation is completed.
-
Check if Emmegi F
-Pro is completely removed from your PC or laptop. You can also use a program like CCleaner to clean up any leftover files and folders.
-
-
-
If you have any problems or issues with uninstalling Emmegi F
-Pro, you can contact Emmegisoft's technical support team for assistance. They will help you solve any problems or issues with uninstalling Emmegi F
-Pro.
-
-
Conclusion
-
-
Emmegi F
-Pro is a software that allows you to design and calculate window and door projects using profiles in various materials. It is part of the FP Suite, which is a software package developed by Emmegisoft, a leading company in window and door software solutions.
-
-
Emmegi F
-Pro is not a free software. You need to pay for a license to use it legally and access all its features and functions. The license price depends on the number of devices you want to control and the duration of the subscription. You can choose from monthly, quarterly, yearly or perpetual licenses.
-
-
If you want to use Emmegi F
-Pro without paying for a license, you can try Emmegi F
-Pro Crack. It is a file that contains a modified version of the software that bypasses the protection system that checks for a valid license. It usually comes with a keygen.exe file that generates a serial number that you can use to activate the software manually or online.
-
-
However, using Emmegi F
-Pro Crack has some benefits but also some risks. You can use Emmegi F
-Pro for free and enjoy all its features and functions without any limitations. But you can also face legal consequences if you are caught using cracked software. You can also harm your PC or laptop or expose yourself to cyberattacks if you download infected files. You can also experience unreliable performance or data loss if you use buggy software.
-
-
The choice is yours whether to use Emmegi F
-Pro Crack or not. But we recommend that you use Emmegi F
-Pro legally and pay for a license. This way, you can support the original software developer and enjoy a safe and reliable software that can help you with your window and door projects.
-
-
We hope this article has helped you understand what Emmegi F
-Pro Crack is, how it works, what are the benefits and risks of using it, and how to download, install, update and uninstall it on your PC or laptop. If you have any questions or comments, feel free to leave them below. Thank you for reading!
-
Conclusion
-
-
Emmegi F
-Pro is a software that allows you to design and calculate window and door projects using profiles in various materials. It is part of the FP Suite, which is a software package developed by Emmegisoft, a leading company in window and door software solutions.
-
-
Emmegi F
-Pro is not a free software. You need to pay for a license to use it legally and access all its features and functions. The license price depends on the number of devices you want to control and the duration of the subscription. You can choose from monthly, quarterly, yearly or perpetual licenses.
-
-
If you want to use Emmegi F
-Pro without paying for a license, you can try Emmegi F
-Pro Crack. It is a file that contains a modified version of the software that bypasses the protection system that checks for a valid license. It usually comes with a keygen.exe file that generates a serial number that you can use to activate the software manually or online.
-
-
However, using Emmegi F
-Pro Crack has some benefits but also some risks. You can use Emmegi F
-Pro for free and enjoy all its features and functions without any limitations. But you can also face legal consequences if you are caught using cracked software. You can also harm your PC or laptop or expose yourself to cyberattacks if you download infected files. You can also experience unreliable performance or data loss if you use buggy software.
-
-
The choice is yours whether to use Emmegi F
-Pro Crack or not. But we recommend that you use Emmegi F
-Pro legally and pay for a license. This way, you can support the original software developer and enjoy a safe and reliable software that can help you with your window and door projects.
-
-
We hope this article has helped you understand what Emmegi F
-Pro Crack is, how it works, what are the benefits and risks of using it, and how to download, install, update and uninstall it on your PC or laptop. If you have any questions or comments, feel free to leave them below. Thank you for reading!
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/scikit-learn/baseline-trainer/app.py b/spaces/scikit-learn/baseline-trainer/app.py
deleted file mode 100644
index 8c797fd42431b45f328e2d4b703e7098ab9052c3..0000000000000000000000000000000000000000
--- a/spaces/scikit-learn/baseline-trainer/app.py
+++ /dev/null
@@ -1,152 +0,0 @@
-import gradio as gr
-import pandas as pd
-from huggingface_hub.hf_api import create_repo, upload_folder, upload_file, HfApi
-from huggingface_hub.repository import Repository
-import subprocess
-import os
-import tempfile
-from uuid import uuid4
-import pickle
-import sweetviz as sv
-import dabl
-import re
-
-
-def analyze_datasets(dataset, token, column=None, pairwise="off"):
- df = pd.read_csv(dataset.name)
- username = HfApi().whoami(token=token)["name"]
- if column is not None:
- analyze_report = sv.analyze(df, target_feat=column, pairwise_analysis=pairwise)
- else:
- analyze_report = sv.analyze(df, pairwise_analysis=pairwise)
- dataset_name = dataset.name.split("/")[-1].strip(".csv")
- analyze_report.show_html('./index.html', open_browser=False)
-
- repo_url = create_repo(f"{username}/{dataset_name}-report", repo_type = "space", token = token, space_sdk = "static", private=False)
-
- upload_file(path_or_fileobj ="./index.html", path_in_repo = "./index.html", repo_id =f"{username}/{dataset_name}-report", repo_type = "space", token=token)
- readme = f"---\ntitle: {dataset_name}\nemoji: ✨\ncolorFrom: green\ncolorTo: red\nsdk: static\npinned: false\ntags:\n- dataset-report\n---"
- with open("README.md", "w+") as f:
- f.write(readme)
- upload_file(path_or_fileobj ="./README.md", path_in_repo = "README.md", repo_id =f"{username}/{dataset_name}-report", repo_type = "space", token=token)
-
- return f"Your dataset report will be ready at {repo_url}"
-
-
-from sklearn.utils import estimator_html_repr
-
-
-def extract_estimator_config(model):
- hyperparameter_dict = model.get_params(deep=True)
- table = "| Hyperparameters | Value |\n| :-- | :-- |\n"
- for hyperparameter, value in hyperparameter_dict.items():
- table += f"| {hyperparameter} | {value} |\n"
- return table
-
-def detect_training(df, column):
- if dabl.detect_types(df)["continuous"][column] or dabl.detect_types(df)["dirty_float"][column]:
- trainer = dabl.SimpleRegressor()
- task = "regression"
- elif dabl.detect_types(df)["categorical"][column] or dabl.detect_types(df)["low_card_int"][column] or dabl.detect_types(df)["free_string"][column]:
- trainer = dabl.SimpleClassifier()
- task = "classification"
- return trainer, task
-
-def edit_types(df):
- types = dabl.detect_types(df)
- low_cardinality = types[types["low_card_int"] == True].index.tolist()
- dirty_float = types[types["dirty_float"] == True].index.tolist()
- type_hints = {}
- for col in low_cardinality:
- type_hints[col] = "categorical"
- for col in dirty_float:
- type_hints[col] = "continuous"
- df_clean = dabl.clean(df, type_hints=type_hints)
- return df_clean
-
-def train_baseline(dataset, token, column):
- df = pd.read_csv(dataset.name)
- dataset_name = dataset.name.split("/")[-1].strip(".csv")
- df_clean = edit_types(df)
- fc, task = detect_training(df_clean, column)
- X = df_clean.drop(column, axis = 1)
- y = df_clean[column]
-
- with tempfile.TemporaryDirectory() as tmpdirname:
- from contextlib import redirect_stdout
-
- with open(f'{tmpdirname}/logs.txt', 'w') as f:
- with redirect_stdout(f):
- print('Logging training')
- fc.fit(X, y)
- username = HfApi().whoami(token=token)["name"]
- repo_url = create_repo(repo_id = f"{username}/{dataset_name}-{column}-{task}", token = token)
- if task == "regression":
- task_metadata = "tabular-regression"
- else:
- task_metadata = "tabular-classification"
- readme = f"---\nlicense: apache-2.0\nlibrary_name: sklearn\ntags:\n- {task_metadata}\n- baseline-trainer\n---\n\n"
- readme += f"## Baseline Model trained on {dataset_name} to apply {task} on {column}\n\n"
- readme+="**Metrics of the best model:**\n\n"
- for elem in str(fc.current_best_).split("\n"):
- readme+= f"{elem}\n\n"
- readme+= "\n\n**See model plot below:**\n\n"
- readme+= re.sub(r"\n\s+", "", str(estimator_html_repr(fc.est_)))
- readme+= "\n\n**Disclaimer:** This model is trained with dabl library as a baseline, for better results, use [AutoTrain](https://huggingface.co/autotrain).\n\n"
- readme+= "**Logs of training** including the models tried in the process can be found in logs.txt"
- with open(f"{tmpdirname}/README.md", "w+") as f:
- f.write(readme)
- with open(f"{tmpdirname}/clf.pkl", mode="bw") as f:
- pickle.dump(fc, file=f)
- upload_folder(repo_id =f"{username}/{dataset_name}-{column}-{task}", folder_path=tmpdirname, repo_type = "model", token=token, path_in_repo="./")
-
- return f"Your model will be ready at {repo_url}"
-
-
-
-with gr.Blocks() as demo:
- main_title = gr.Markdown("""# Baseline Trainer 🪄🌟✨""")
- main_desc = gr.Markdown("""This app trains a baseline model for a given dataset and pushes it to your Hugging Face Hub Profile with a model card. For better results, use [AutoTrain](https://huggingface.co/autotrain).""")
-
-
- with gr.Tabs():
- with gr.TabItem("Baseline Trainer") as baseline_trainer:
- with gr.Row():
- with gr.Column():
- title = gr.Markdown(""" ## Train a supervised baseline model 🪄""")
- description = gr.Markdown("This app trains a model and pushes it to your Hugging Face Hub Profile.")
- dataset = gr.File(label = "CSV Dataset")
- column = gr.Text(label = "Enter target variable:")
- pushing_desc = gr.Markdown("This app needs your Hugging Face Hub token. You can find your token [here](https://huggingface.co/settings/tokens)")
- token = gr.Textbox(label = "Your Hugging Face Token")
- inference_run = gr.Button("Train")
- inference_progress = gr.StatusTracker(cover_container=True)
-
- outcome = gr.outputs.Textbox(label = "Progress")
- inference_run.click(
- train_baseline,
- inputs=[dataset, token, column],
- outputs=outcome,
- status_tracker=inference_progress,
- )
- with gr.TabItem("Analyze") as analyze:
- with gr.Row():
- with gr.Column():
- title = gr.Markdown(""" ## Analyze Dataset 🪄""")
- description = gr.Markdown("Analyze a dataset or predictive variables against a target variable in a dataset (enter a column name to column section if you want to compare against target value). You can also do pairwise analysis, but it has quadratic complexity.")
- dataset = gr.File(label = "CSV Dataset")
- column = gr.Text(label = "Compare dataset against a target variable (Optional)")
- pairwise = gr.Radio(["off", "on"], label = "Enable pairwise analysis")
- token = gr.Textbox(label = "Your Hugging Face Token")
- pushing_desc = gr.Markdown("This app needs your Hugging Face Hub token. You can find your token [here](https://huggingface.co/settings/tokens)")
- inference_run = gr.Button("Infer")
- inference_progress = gr.StatusTracker(cover_container=True)
- outcome = gr.outputs.Textbox()
- inference_run.click(
- analyze_datasets,
- inputs=[dataset, token, column, pairwise],
- outputs=outcome,
- status_tracker=inference_progress,
- )
-
-demo.launch(debug=True)
\ No newline at end of file
diff --git a/spaces/sdhsdhk/bingo111/src/components/ui/tooltip.tsx b/spaces/sdhsdhk/bingo111/src/components/ui/tooltip.tsx
deleted file mode 100644
index af1d48beb90dd5ae311796539843700871052cae..0000000000000000000000000000000000000000
--- a/spaces/sdhsdhk/bingo111/src/components/ui/tooltip.tsx
+++ /dev/null
@@ -1,30 +0,0 @@
-'use client'
-
-import * as React from 'react'
-import * as TooltipPrimitive from '@radix-ui/react-tooltip'
-
-import { cn } from '@/lib/utils'
-
-const TooltipProvider = TooltipPrimitive.Provider
-
-const Tooltip = TooltipPrimitive.Root
-
-const TooltipTrigger = TooltipPrimitive.Trigger
-
-const TooltipContent = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, sideOffset = 4, ...props }, ref) => (
-
-))
-TooltipContent.displayName = TooltipPrimitive.Content.displayName
-
-export { Tooltip, TooltipTrigger, TooltipContent, TooltipProvider }
diff --git a/spaces/segments-tobias/conex/espnet/nets/chainer_backend/transformer/decoder.py b/spaces/segments-tobias/conex/espnet/nets/chainer_backend/transformer/decoder.py
deleted file mode 100644
index c216de8e51aaed13af41ae13b495e08a41061eba..0000000000000000000000000000000000000000
--- a/spaces/segments-tobias/conex/espnet/nets/chainer_backend/transformer/decoder.py
+++ /dev/null
@@ -1,115 +0,0 @@
-# encoding: utf-8
-"""Class Declaration of Transformer's Decoder."""
-
-import chainer
-
-import chainer.functions as F
-import chainer.links as L
-
-from espnet.nets.chainer_backend.transformer.decoder_layer import DecoderLayer
-from espnet.nets.chainer_backend.transformer.embedding import PositionalEncoding
-from espnet.nets.chainer_backend.transformer.layer_norm import LayerNorm
-from espnet.nets.chainer_backend.transformer.mask import make_history_mask
-
-import numpy as np
-
-
-class Decoder(chainer.Chain):
- """Decoder layer.
-
- Args:
- odim (int): The output dimension.
- n_layers (int): Number of ecoder layers.
- n_units (int): Number of attention units.
- d_units (int): Dimension of input vector of decoder.
- h (int): Number of attention heads.
- dropout (float): Dropout rate.
- initialW (Initializer): Initializer to initialize the weight.
- initial_bias (Initializer): Initializer to initialize teh bias.
-
- """
-
- def __init__(self, odim, args, initialW=None, initial_bias=None):
- """Initialize Decoder."""
- super(Decoder, self).__init__()
- self.sos = odim - 1
- self.eos = odim - 1
- initialW = chainer.initializers.Uniform if initialW is None else initialW
- initial_bias = (
- chainer.initializers.Uniform if initial_bias is None else initial_bias
- )
- with self.init_scope():
- self.output_norm = LayerNorm(args.adim)
- self.pe = PositionalEncoding(args.adim, args.dropout_rate)
- stvd = 1.0 / np.sqrt(args.adim)
- self.output_layer = L.Linear(
- args.adim,
- odim,
- initialW=initialW(scale=stvd),
- initial_bias=initial_bias(scale=stvd),
- )
- self.embed = L.EmbedID(
- odim,
- args.adim,
- ignore_label=-1,
- initialW=chainer.initializers.Normal(scale=1.0),
- )
- for i in range(args.dlayers):
- name = "decoders." + str(i)
- layer = DecoderLayer(
- args.adim,
- d_units=args.dunits,
- h=args.aheads,
- dropout=args.dropout_rate,
- initialW=initialW,
- initial_bias=initial_bias,
- )
- self.add_link(name, layer)
- self.n_layers = args.dlayers
-
- def make_attention_mask(self, source_block, target_block):
- """Prepare the attention mask.
-
- Args:
- source_block (ndarray): Source block with dimensions: (B x S).
- target_block (ndarray): Target block with dimensions: (B x T).
- Returns:
- ndarray: Mask with dimensions (B, S, T).
-
- """
- mask = (target_block[:, None, :] >= 0) * (source_block[:, :, None] >= 0)
- # (batch, source_length, target_length)
- return mask
-
- def forward(self, ys_pad, source, x_mask):
- """Forward decoder.
-
- :param xp.array e: input token ids, int64 (batch, maxlen_out)
- :param xp.array yy_mask: input token mask, uint8 (batch, maxlen_out)
- :param xp.array source: encoded memory, float32 (batch, maxlen_in, feat)
- :param xp.array xy_mask: encoded memory mask, uint8 (batch, maxlen_in)
- :return e: decoded token score before softmax (batch, maxlen_out, token)
- :rtype: chainer.Variable
- """
- xp = self.xp
- sos = np.array([self.sos], np.int32)
- ys = [np.concatenate([sos, y], axis=0) for y in ys_pad]
- e = F.pad_sequence(ys, padding=self.eos).data
- e = xp.array(e)
- # mask preparation
- xy_mask = self.make_attention_mask(e, xp.array(x_mask))
- yy_mask = self.make_attention_mask(e, e)
- yy_mask *= make_history_mask(xp, e)
-
- e = self.pe(self.embed(e))
- batch, length, dims = e.shape
- e = e.reshape(-1, dims)
- source = source.reshape(-1, dims)
- for i in range(self.n_layers):
- e = self["decoders." + str(i)](e, source, xy_mask, yy_mask, batch)
- return self.output_layer(self.output_norm(e)).reshape(batch, length, -1)
-
- def recognize(self, e, yy_mask, source):
- """Process recognition function."""
- e = self.forward(e, source, yy_mask)
- return F.log_softmax(e, axis=-1)
diff --git a/spaces/segments-tobias/conex/espnet/nets/pytorch_backend/e2e_asr.py b/spaces/segments-tobias/conex/espnet/nets/pytorch_backend/e2e_asr.py
deleted file mode 100644
index 5644b99e3eec1ffaf178283899afa1917db5dc45..0000000000000000000000000000000000000000
--- a/spaces/segments-tobias/conex/espnet/nets/pytorch_backend/e2e_asr.py
+++ /dev/null
@@ -1,541 +0,0 @@
-# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
-# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
-
-"""RNN sequence-to-sequence speech recognition model (pytorch)."""
-
-import argparse
-from itertools import groupby
-import logging
-import math
-import os
-
-import chainer
-from chainer import reporter
-import editdistance
-import numpy as np
-import six
-import torch
-
-from espnet.nets.asr_interface import ASRInterface
-from espnet.nets.e2e_asr_common import label_smoothing_dist
-from espnet.nets.pytorch_backend.ctc import ctc_for
-from espnet.nets.pytorch_backend.frontends.feature_transform import (
- feature_transform_for, # noqa: H301
-)
-from espnet.nets.pytorch_backend.frontends.frontend import frontend_for
-from espnet.nets.pytorch_backend.initialization import lecun_normal_init_parameters
-from espnet.nets.pytorch_backend.initialization import set_forget_bias_to_one
-from espnet.nets.pytorch_backend.nets_utils import get_subsample
-from espnet.nets.pytorch_backend.nets_utils import pad_list
-from espnet.nets.pytorch_backend.nets_utils import to_device
-from espnet.nets.pytorch_backend.nets_utils import to_torch_tensor
-from espnet.nets.pytorch_backend.rnn.argument import (
- add_arguments_rnn_encoder_common, # noqa: H301
- add_arguments_rnn_decoder_common, # noqa: H301
- add_arguments_rnn_attention_common, # noqa: H301
-)
-from espnet.nets.pytorch_backend.rnn.attentions import att_for
-from espnet.nets.pytorch_backend.rnn.decoders import decoder_for
-from espnet.nets.pytorch_backend.rnn.encoders import encoder_for
-from espnet.nets.scorers.ctc import CTCPrefixScorer
-from espnet.utils.fill_missing_args import fill_missing_args
-
-CTC_LOSS_THRESHOLD = 10000
-
-
-class Reporter(chainer.Chain):
- """A chainer reporter wrapper."""
-
- def report(self, loss_ctc, loss_att, acc, cer_ctc, cer, wer, mtl_loss):
- """Report at every step."""
- reporter.report({"loss_ctc": loss_ctc}, self)
- reporter.report({"loss_att": loss_att}, self)
- reporter.report({"acc": acc}, self)
- reporter.report({"cer_ctc": cer_ctc}, self)
- reporter.report({"cer": cer}, self)
- reporter.report({"wer": wer}, self)
- logging.info("mtl loss:" + str(mtl_loss))
- reporter.report({"loss": mtl_loss}, self)
-
-
-class E2E(ASRInterface, torch.nn.Module):
- """E2E module.
-
- :param int idim: dimension of inputs
- :param int odim: dimension of outputs
- :param Namespace args: argument Namespace containing options
-
- """
-
- @staticmethod
- def add_arguments(parser):
- """Add arguments."""
- E2E.encoder_add_arguments(parser)
- E2E.attention_add_arguments(parser)
- E2E.decoder_add_arguments(parser)
- return parser
-
- @staticmethod
- def encoder_add_arguments(parser):
- """Add arguments for the encoder."""
- group = parser.add_argument_group("E2E encoder setting")
- group = add_arguments_rnn_encoder_common(group)
- return parser
-
- @staticmethod
- def attention_add_arguments(parser):
- """Add arguments for the attention."""
- group = parser.add_argument_group("E2E attention setting")
- group = add_arguments_rnn_attention_common(group)
- return parser
-
- @staticmethod
- def decoder_add_arguments(parser):
- """Add arguments for the decoder."""
- group = parser.add_argument_group("E2E decoder setting")
- group = add_arguments_rnn_decoder_common(group)
- return parser
-
- def get_total_subsampling_factor(self):
- """Get total subsampling factor."""
- if isinstance(self.enc, torch.nn.ModuleList):
- return self.enc[0].conv_subsampling_factor * int(np.prod(self.subsample))
- else:
- return self.enc.conv_subsampling_factor * int(np.prod(self.subsample))
-
- def __init__(self, idim, odim, args):
- """Construct an E2E object.
-
- :param int idim: dimension of inputs
- :param int odim: dimension of outputs
- :param Namespace args: argument Namespace containing options
- """
- super(E2E, self).__init__()
- torch.nn.Module.__init__(self)
-
- # fill missing arguments for compatibility
- args = fill_missing_args(args, self.add_arguments)
-
- self.mtlalpha = args.mtlalpha
- assert 0.0 <= self.mtlalpha <= 1.0, "mtlalpha should be [0.0, 1.0]"
- self.etype = args.etype
- self.verbose = args.verbose
- # NOTE: for self.build method
- args.char_list = getattr(args, "char_list", None)
- self.char_list = args.char_list
- self.outdir = args.outdir
- self.space = args.sym_space
- self.blank = args.sym_blank
- self.reporter = Reporter()
-
- # below means the last number becomes eos/sos ID
- # note that sos/eos IDs are identical
- self.sos = odim - 1
- self.eos = odim - 1
-
- # subsample info
- self.subsample = get_subsample(args, mode="asr", arch="rnn")
-
- # label smoothing info
- if args.lsm_type and os.path.isfile(args.train_json):
- logging.info("Use label smoothing with " + args.lsm_type)
- labeldist = label_smoothing_dist(
- odim, args.lsm_type, transcript=args.train_json
- )
- else:
- labeldist = None
-
- if getattr(args, "use_frontend", False): # use getattr to keep compatibility
- self.frontend = frontend_for(args, idim)
- self.feature_transform = feature_transform_for(args, (idim - 1) * 2)
- idim = args.n_mels
- else:
- self.frontend = None
-
- # encoder
- self.enc = encoder_for(args, idim, self.subsample)
- # ctc
- self.ctc = ctc_for(args, odim)
- # attention
- self.att = att_for(args)
- # decoder
- self.dec = decoder_for(args, odim, self.sos, self.eos, self.att, labeldist)
-
- # weight initialization
- self.init_like_chainer()
-
- # options for beam search
- if args.report_cer or args.report_wer:
- recog_args = {
- "beam_size": args.beam_size,
- "penalty": args.penalty,
- "ctc_weight": args.ctc_weight,
- "maxlenratio": args.maxlenratio,
- "minlenratio": args.minlenratio,
- "lm_weight": args.lm_weight,
- "rnnlm": args.rnnlm,
- "nbest": args.nbest,
- "space": args.sym_space,
- "blank": args.sym_blank,
- }
-
- self.recog_args = argparse.Namespace(**recog_args)
- self.report_cer = args.report_cer
- self.report_wer = args.report_wer
- else:
- self.report_cer = False
- self.report_wer = False
- self.rnnlm = None
-
- self.logzero = -10000000000.0
- self.loss = None
- self.acc = None
-
- def init_like_chainer(self):
- """Initialize weight like chainer.
-
- chainer basically uses LeCun way: W ~ Normal(0, fan_in ** -0.5), b = 0
- pytorch basically uses W, b ~ Uniform(-fan_in**-0.5, fan_in**-0.5)
- however, there are two exceptions as far as I know.
- - EmbedID.W ~ Normal(0, 1)
- - LSTM.upward.b[forget_gate_range] = 1 (but not used in NStepLSTM)
- """
- lecun_normal_init_parameters(self)
- # exceptions
- # embed weight ~ Normal(0, 1)
- self.dec.embed.weight.data.normal_(0, 1)
- # forget-bias = 1.0
- # https://discuss.pytorch.org/t/set-forget-gate-bias-of-lstm/1745
- for i in six.moves.range(len(self.dec.decoder)):
- set_forget_bias_to_one(self.dec.decoder[i].bias_ih)
-
- def forward(self, xs_pad, ilens, ys_pad):
- """E2E forward.
-
- :param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, idim)
- :param torch.Tensor ilens: batch of lengths of input sequences (B)
- :param torch.Tensor ys_pad: batch of padded token id sequence tensor (B, Lmax)
- :return: loss value
- :rtype: torch.Tensor
- """
- # 0. Frontend
- if self.frontend is not None:
- hs_pad, hlens, mask = self.frontend(to_torch_tensor(xs_pad), ilens)
- hs_pad, hlens = self.feature_transform(hs_pad, hlens)
- else:
- hs_pad, hlens = xs_pad, ilens
-
- # 1. Encoder
- hs_pad, hlens, _ = self.enc(hs_pad, hlens)
-
- # 2. CTC loss
- if self.mtlalpha == 0:
- self.loss_ctc = None
- else:
- self.loss_ctc = self.ctc(hs_pad, hlens, ys_pad)
-
- # 3. attention loss
- if self.mtlalpha == 1:
- self.loss_att, acc = None, None
- else:
- self.loss_att, acc, _ = self.dec(hs_pad, hlens, ys_pad)
- self.acc = acc
-
- # 4. compute cer without beam search
- if self.mtlalpha == 0 or self.char_list is None:
- cer_ctc = None
- else:
- cers = []
-
- y_hats = self.ctc.argmax(hs_pad).data
- for i, y in enumerate(y_hats):
- y_hat = [x[0] for x in groupby(y)]
- y_true = ys_pad[i]
-
- seq_hat = [self.char_list[int(idx)] for idx in y_hat if int(idx) != -1]
- seq_true = [
- self.char_list[int(idx)] for idx in y_true if int(idx) != -1
- ]
- seq_hat_text = "".join(seq_hat).replace(self.space, " ")
- seq_hat_text = seq_hat_text.replace(self.blank, "")
- seq_true_text = "".join(seq_true).replace(self.space, " ")
-
- hyp_chars = seq_hat_text.replace(" ", "")
- ref_chars = seq_true_text.replace(" ", "")
- if len(ref_chars) > 0:
- cers.append(
- editdistance.eval(hyp_chars, ref_chars) / len(ref_chars)
- )
-
- cer_ctc = sum(cers) / len(cers) if cers else None
-
- # 5. compute cer/wer
- if self.training or not (self.report_cer or self.report_wer):
- cer, wer = 0.0, 0.0
- # oracle_cer, oracle_wer = 0.0, 0.0
- else:
- if self.recog_args.ctc_weight > 0.0:
- lpz = self.ctc.log_softmax(hs_pad).data
- else:
- lpz = None
-
- word_eds, word_ref_lens, char_eds, char_ref_lens = [], [], [], []
- nbest_hyps = self.dec.recognize_beam_batch(
- hs_pad,
- torch.tensor(hlens),
- lpz,
- self.recog_args,
- self.char_list,
- self.rnnlm,
- )
- # remove and
- y_hats = [nbest_hyp[0]["yseq"][1:-1] for nbest_hyp in nbest_hyps]
- for i, y_hat in enumerate(y_hats):
- y_true = ys_pad[i]
-
- seq_hat = [self.char_list[int(idx)] for idx in y_hat if int(idx) != -1]
- seq_true = [
- self.char_list[int(idx)] for idx in y_true if int(idx) != -1
- ]
- seq_hat_text = "".join(seq_hat).replace(self.recog_args.space, " ")
- seq_hat_text = seq_hat_text.replace(self.recog_args.blank, "")
- seq_true_text = "".join(seq_true).replace(self.recog_args.space, " ")
-
- hyp_words = seq_hat_text.split()
- ref_words = seq_true_text.split()
- word_eds.append(editdistance.eval(hyp_words, ref_words))
- word_ref_lens.append(len(ref_words))
- hyp_chars = seq_hat_text.replace(" ", "")
- ref_chars = seq_true_text.replace(" ", "")
- char_eds.append(editdistance.eval(hyp_chars, ref_chars))
- char_ref_lens.append(len(ref_chars))
-
- wer = (
- 0.0
- if not self.report_wer
- else float(sum(word_eds)) / sum(word_ref_lens)
- )
- cer = (
- 0.0
- if not self.report_cer
- else float(sum(char_eds)) / sum(char_ref_lens)
- )
-
- alpha = self.mtlalpha
- if alpha == 0:
- self.loss = self.loss_att
- loss_att_data = float(self.loss_att)
- loss_ctc_data = None
- elif alpha == 1:
- self.loss = self.loss_ctc
- loss_att_data = None
- loss_ctc_data = float(self.loss_ctc)
- else:
- self.loss = alpha * self.loss_ctc + (1 - alpha) * self.loss_att
- loss_att_data = float(self.loss_att)
- loss_ctc_data = float(self.loss_ctc)
-
- loss_data = float(self.loss)
- if loss_data < CTC_LOSS_THRESHOLD and not math.isnan(loss_data):
- self.reporter.report(
- loss_ctc_data, loss_att_data, acc, cer_ctc, cer, wer, loss_data
- )
- else:
- logging.warning("loss (=%f) is not correct", loss_data)
- return self.loss
-
- def scorers(self):
- """Scorers."""
- return dict(decoder=self.dec, ctc=CTCPrefixScorer(self.ctc, self.eos))
-
- def encode(self, x):
- """Encode acoustic features.
-
- :param ndarray x: input acoustic feature (T, D)
- :return: encoder outputs
- :rtype: torch.Tensor
- """
- self.eval()
- ilens = [x.shape[0]]
-
- # subsample frame
- x = x[:: self.subsample[0], :]
- p = next(self.parameters())
- h = torch.as_tensor(x, device=p.device, dtype=p.dtype)
- # make a utt list (1) to use the same interface for encoder
- hs = h.contiguous().unsqueeze(0)
-
- # 0. Frontend
- if self.frontend is not None:
- enhanced, hlens, mask = self.frontend(hs, ilens)
- hs, hlens = self.feature_transform(enhanced, hlens)
- else:
- hs, hlens = hs, ilens
-
- # 1. encoder
- hs, _, _ = self.enc(hs, hlens)
- return hs.squeeze(0)
-
- def recognize(self, x, recog_args, char_list, rnnlm=None):
- """E2E beam search.
-
- :param ndarray x: input acoustic feature (T, D)
- :param Namespace recog_args: argument Namespace containing options
- :param list char_list: list of characters
- :param torch.nn.Module rnnlm: language model module
- :return: N-best decoding results
- :rtype: list
- """
- hs = self.encode(x).unsqueeze(0)
- # calculate log P(z_t|X) for CTC scores
- if recog_args.ctc_weight > 0.0:
- lpz = self.ctc.log_softmax(hs)[0]
- else:
- lpz = None
-
- # 2. Decoder
- # decode the first utterance
- y = self.dec.recognize_beam(hs[0], lpz, recog_args, char_list, rnnlm)
- return y
-
- def recognize_batch(self, xs, recog_args, char_list, rnnlm=None):
- """E2E batch beam search.
-
- :param list xs: list of input acoustic feature arrays [(T_1, D), (T_2, D), ...]
- :param Namespace recog_args: argument Namespace containing options
- :param list char_list: list of characters
- :param torch.nn.Module rnnlm: language model module
- :return: N-best decoding results
- :rtype: list
- """
- prev = self.training
- self.eval()
- ilens = np.fromiter((xx.shape[0] for xx in xs), dtype=np.int64)
-
- # subsample frame
- xs = [xx[:: self.subsample[0], :] for xx in xs]
- xs = [to_device(self, to_torch_tensor(xx).float()) for xx in xs]
- xs_pad = pad_list(xs, 0.0)
-
- # 0. Frontend
- if self.frontend is not None:
- enhanced, hlens, mask = self.frontend(xs_pad, ilens)
- hs_pad, hlens = self.feature_transform(enhanced, hlens)
- else:
- hs_pad, hlens = xs_pad, ilens
-
- # 1. Encoder
- hs_pad, hlens, _ = self.enc(hs_pad, hlens)
-
- # calculate log P(z_t|X) for CTC scores
- if recog_args.ctc_weight > 0.0:
- lpz = self.ctc.log_softmax(hs_pad)
- normalize_score = False
- else:
- lpz = None
- normalize_score = True
-
- # 2. Decoder
- hlens = torch.tensor(list(map(int, hlens))) # make sure hlens is tensor
- y = self.dec.recognize_beam_batch(
- hs_pad,
- hlens,
- lpz,
- recog_args,
- char_list,
- rnnlm,
- normalize_score=normalize_score,
- )
-
- if prev:
- self.train()
- return y
-
- def enhance(self, xs):
- """Forward only in the frontend stage.
-
- :param ndarray xs: input acoustic feature (T, C, F)
- :return: enhaned feature
- :rtype: torch.Tensor
- """
- if self.frontend is None:
- raise RuntimeError("Frontend does't exist")
- prev = self.training
- self.eval()
- ilens = np.fromiter((xx.shape[0] for xx in xs), dtype=np.int64)
-
- # subsample frame
- xs = [xx[:: self.subsample[0], :] for xx in xs]
- xs = [to_device(self, to_torch_tensor(xx).float()) for xx in xs]
- xs_pad = pad_list(xs, 0.0)
- enhanced, hlensm, mask = self.frontend(xs_pad, ilens)
- if prev:
- self.train()
- return enhanced.cpu().numpy(), mask.cpu().numpy(), ilens
-
- def calculate_all_attentions(self, xs_pad, ilens, ys_pad):
- """E2E attention calculation.
-
- :param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, idim)
- :param torch.Tensor ilens: batch of lengths of input sequences (B)
- :param torch.Tensor ys_pad: batch of padded token id sequence tensor (B, Lmax)
- :return: attention weights with the following shape,
- 1) multi-head case => attention weights (B, H, Lmax, Tmax),
- 2) other case => attention weights (B, Lmax, Tmax).
- :rtype: float ndarray
- """
- self.eval()
- with torch.no_grad():
- # 0. Frontend
- if self.frontend is not None:
- hs_pad, hlens, mask = self.frontend(to_torch_tensor(xs_pad), ilens)
- hs_pad, hlens = self.feature_transform(hs_pad, hlens)
- else:
- hs_pad, hlens = xs_pad, ilens
-
- # 1. Encoder
- hpad, hlens, _ = self.enc(hs_pad, hlens)
-
- # 2. Decoder
- att_ws = self.dec.calculate_all_attentions(hpad, hlens, ys_pad)
- self.train()
- return att_ws
-
- def calculate_all_ctc_probs(self, xs_pad, ilens, ys_pad):
- """E2E CTC probability calculation.
-
- :param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax)
- :param torch.Tensor ilens: batch of lengths of input sequences (B)
- :param torch.Tensor ys_pad: batch of padded token id sequence tensor (B, Lmax)
- :return: CTC probability (B, Tmax, vocab)
- :rtype: float ndarray
- """
- probs = None
- if self.mtlalpha == 0:
- return probs
-
- self.eval()
- with torch.no_grad():
- # 0. Frontend
- if self.frontend is not None:
- hs_pad, hlens, mask = self.frontend(to_torch_tensor(xs_pad), ilens)
- hs_pad, hlens = self.feature_transform(hs_pad, hlens)
- else:
- hs_pad, hlens = xs_pad, ilens
-
- # 1. Encoder
- hpad, hlens, _ = self.enc(hs_pad, hlens)
-
- # 2. CTC probs
- probs = self.ctc.softmax(hpad).cpu().numpy()
- self.train()
- return probs
-
- def subsample_frames(self, x):
- """Subsample speeh frames in the encoder."""
- # subsample frame
- x = x[:: self.subsample[0], :]
- ilen = [x.shape[0]]
- h = to_device(self, torch.from_numpy(np.array(x, dtype=np.float32)))
- h.contiguous()
- return h, ilen
diff --git a/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/my_utils.py b/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/my_utils.py
deleted file mode 100644
index a5258394b8ae5385daa665ab6ba6380507d4798a..0000000000000000000000000000000000000000
--- a/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/my_utils.py
+++ /dev/null
@@ -1,21 +0,0 @@
-import ffmpeg
-import numpy as np
-
-
-def load_audio(file, sr):
- try:
- # https://github.com/openai/whisper/blob/main/whisper/audio.py#L26
- # This launches a subprocess to decode audio while down-mixing and resampling as necessary.
- # Requires the ffmpeg CLI and `ffmpeg-python` package to be installed.
- file = (
- file.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
- ) # 防止小白拷路径头尾带了空格和"和回车
- out, _ = (
- ffmpeg.input(file, threads=0)
- .output("-", format="f32le", acodec="pcm_f32le", ac=1, ar=sr)
- .run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True)
- )
- except Exception as e:
- raise RuntimeError(f"Failed to load audio: {e}")
-
- return np.frombuffer(out, np.float32).flatten()
diff --git a/spaces/shi-labs/Matting-Anything/GroundingDINO/groundingdino/util/__init__.py b/spaces/shi-labs/Matting-Anything/GroundingDINO/groundingdino/util/__init__.py
deleted file mode 100644
index 168f9979a4623806934b0ff1102ac166704e7dec..0000000000000000000000000000000000000000
--- a/spaces/shi-labs/Matting-Anything/GroundingDINO/groundingdino/util/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
diff --git a/spaces/shi-labs/OneFormer/demo/defaults.py b/spaces/shi-labs/OneFormer/demo/defaults.py
deleted file mode 100644
index 2abcab7a86eb9a93cdc75d1d3010bb6b2579dba4..0000000000000000000000000000000000000000
--- a/spaces/shi-labs/OneFormer/demo/defaults.py
+++ /dev/null
@@ -1,77 +0,0 @@
-import torch
-import detectron2.data.transforms as T
-from detectron2.checkpoint import DetectionCheckpointer
-from detectron2.data import (
- MetadataCatalog,
-)
-from detectron2.modeling import build_model
-
-
-__all__ = [
- "DefaultPredictor",
-]
-
-
-class DefaultPredictor:
- """
- Create a simple end-to-end predictor with the given config that runs on
- single device for a single input image.
- Compared to using the model directly, this class does the following additions:
- 1. Load checkpoint from `cfg.MODEL.WEIGHTS`.
- 2. Always take BGR image as the input and apply conversion defined by `cfg.INPUT.FORMAT`.
- 3. Apply resizing defined by `cfg.INPUT.{MIN,MAX}_SIZE_TEST`.
- 4. Take one input image and produce a single output, instead of a batch.
- This is meant for simple demo purposes, so it does the above steps automatically.
- This is not meant for benchmarks or running complicated inference logic.
- If you'd like to do anything more complicated, please refer to its source code as
- examples to build and use the model manually.
- Attributes:
- metadata (Metadata): the metadata of the underlying dataset, obtained from
- cfg.DATASETS.TEST.
- Examples:
- ::
- pred = DefaultPredictor(cfg)
- inputs = cv2.imread("input.jpg")
- outputs = pred(inputs)
- """
-
- def __init__(self, cfg):
- self.cfg = cfg.clone() # cfg can be modified by model
- self.model = build_model(self.cfg)
- self.model.eval()
- if len(cfg.DATASETS.TEST):
- self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])
-
- checkpointer = DetectionCheckpointer(self.model)
- checkpointer.load(cfg.MODEL.WEIGHTS)
-
- self.aug = T.ResizeShortestEdge(
- [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST
- )
-
- self.input_format = cfg.INPUT.FORMAT
- assert self.input_format in ["RGB", "BGR"], self.input_format
-
- def __call__(self, original_image, task):
- """
- Args:
- original_image (np.ndarray): an image of shape (H, W, C) (in BGR order).
- Returns:
- predictions (dict):
- the output of the model for one image only.
- See :doc:`/tutorials/models` for details about the format.
- """
- with torch.no_grad(): # https://github.com/sphinx-doc/sphinx/issues/4258
- # Apply pre-processing to image.
- if self.input_format == "RGB":
- # whether the model expects BGR inputs or RGB
- original_image = original_image[:, :, ::-1]
- height, width = original_image.shape[:2]
- image = self.aug.get_transform(original_image).apply_image(original_image)
- image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
-
- task = f"The task is {task}"
-
- inputs = {"image": image, "height": height, "width": width, "task": task}
- predictions = self.model([inputs])[0]
- return predictions
\ No newline at end of file
diff --git a/spaces/shiditya2003/MyGenerativeshiditya/README.md b/spaces/shiditya2003/MyGenerativeshiditya/README.md
deleted file mode 100644
index 6b7543b3a9a20cc64f84e554bb66f4cf2f3558b3..0000000000000000000000000000000000000000
--- a/spaces/shiditya2003/MyGenerativeshiditya/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: MyGenerativeshiditya
-emoji: 🌍
-colorFrom: indigo
-colorTo: blue
-sdk: gradio
-sdk_version: 3.39.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/sihar/Online_Payment_Fraud_Detection/app.py b/spaces/sihar/Online_Payment_Fraud_Detection/app.py
deleted file mode 100644
index 39a0549a87751e55cbf124fffbc167bda9581909..0000000000000000000000000000000000000000
--- a/spaces/sihar/Online_Payment_Fraud_Detection/app.py
+++ /dev/null
@@ -1,49 +0,0 @@
-import streamlit as st
-import pandas as pd
-import pickle
-
-# import preproses
-preproses = pickle.load(open("preproses.pkl", "rb"))
-# import model
-model = pickle.load(open("model.pkl", "rb"))
-
-#title
-st.title("Online Payments Fraud Detection")
-st.write("Created by Sihar Pangaribuan")
-
-# User imput
-step = st.number_input(label='Unit of time (hour)', min_value=1, max_value=143, value=1, step=1)
-type = st.selectbox(label='Select type of online transaction', options=['PAYMENT', 'TRANSFER', 'CASH_OUT', 'DEBIT', 'CASH_IN'])
-amount = st.number_input(label='Input amount of the transaction', min_value=0.0, max_value=10000000.0, value=0.0, step=0.1)
-nameOrig = st.text_input('Input customer origin Id', value='')
-oldbalanceOrg = st.number_input(label='Balance before the transaction', min_value=0.0, max_value=38939424.03, value=0.0, step=0.1)
-newbalanceOrig = st.number_input(label='Balance after the transaction', min_value=0.0, max_value=38946233.02, value=0.0, step=0.1)
-nameDest = st.text_input('Input customer destination Id', value='')
-oldbalanceDest = st.number_input(label='Input initial balance of recipient before the transaction', min_value=0.0, max_value=42207404.59, value=0.0, step=0.1)
-newbalanceDest = st.number_input(label='Input the new balance of recipient after the transaction', min_value=0.0, max_value=42207404.59, value=0.0, step=0.1)
-
-# Convert ke data frame
-data = pd.DataFrame({'step': [step],
- 'type': [type],
- 'amount': [amount],
- 'nameOrig': [nameOrig],
- 'oldbalanceOrg': [oldbalanceOrg],
- 'newbalanceOrig': [newbalanceOrig],
- 'nameDest': [nameDest],
- 'oldbalanceDest': [oldbalanceDest],
- 'newbalanceDest': [newbalanceDest]
- })
-
-data = preproses.transform(data)
-# model predict
-
-if st.button('Predict'):
- prediction = model.predict(data).tolist()[0]
-
- if prediction == 1:
- prediction = 'Froud'
- else:
- prediction = 'Not Froud'
-
- st.write('The Prediction is: ')
- st.write(prediction)
\ No newline at end of file
diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Redeem Team High Praise and Worship Songs 2023 - RCCG Praise Team.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Redeem Team High Praise and Worship Songs 2023 - RCCG Praise Team.md
deleted file mode 100644
index 86e1539a1fc4c298ce4a5540fcc9b1859748206e..0000000000000000000000000000000000000000
--- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Redeem Team High Praise and Worship Songs 2023 - RCCG Praise Team.md
+++ /dev/null
@@ -1,111 +0,0 @@
-
-
How to Download Redeem Team: The Netflix Documentary on the 2008 US Olympic Basketball Team
-
If you are a fan of basketball, sports, or documentaries, you might be interested in watching Redeem Team, a new Netflix film that tells the story of the 2008 US men's Olympic basketball team. In this article, we will explain what Redeem Team is, why you should watch it, and how to download it on Netflix.
-
What is Redeem Team?
-
Redeem Team is a documentary that follows the journey of the US men's basketball team that competed in the 2008 Beijing Olympics. The film explores how the team, nicknamed the "Redeem Team", sought to restore the pride and glory of American basketball after disappointing performances in previous international tournaments.
The documentary reveals how the US basketball program underwent a major overhaul after losing the gold medal at the 2004 Athens Olympics and finishing third at the 2006 FIBA World Championship. Jerry Colangelo, the managing director of USA Basketball, recruited Mike Krzyzewski, the legendary coach of Duke University, to lead the team. Together, they assembled a roster of NBA stars who committed to play for their country for three years.
-
The main characters and highlights of the film
-
The documentary features exclusive interviews and footage from some of the biggest names in basketball, such as Kobe Bryant, LeBron James, Dwyane Wade, Carmelo Anthony, Chris Paul, Dwight Howard, and Jason Kidd. The film also showcases some of the most memorable moments from the team's games, such as Bryant's clutch performance against Spain in the gold medal game, Wade's scoring spree against Australia in the quarterfinals, and James' dunk over Angola in the group stage.
-
download redeem team gospel songs mp3
-download redeem team high praise and worship songs
-download redeem team rccg high praise vol 2
-download microsoft teams redeem code
-download microsoft teams redeem voucher
-download microsoft teams free redeem offer
-download redeem team evergreen praise and worship songs
-download redeem team korea institute of fusion energy
-download redeem team nuclear fusion experiment
-download redeem team holy grail fusion experiment
-download redeem team mini sun breakthrough
-download redeem team 100 million degrees celsius
-download redeem team 30 seconds fusion reaction
-download redeem team net energy gain
-download redeem team south korea fusion reactor
-download redeem team kstar facility
-download microsoft teams desktop and mobile apps
-download microsoft teams for windows mac ios and android
-download microsoft teams app for collaboration
-download microsoft teams for online meetings and video conferencing
-download microsoft teams for chat and file sharing
-download microsoft teams for business and education
-download microsoft teams for frontline workforce and healthcare
-download microsoft teams for security and compliance
-download microsoft teams for hybrid work and distance learning
-download microsoft teams essentials plan
-download microsoft teams free plan
-download microsoft teams free trial
-download microsoft teams free version
-download microsoft teams free account
-download microsoft teams free sign up
-download microsoft teams free features and benefits
-download microsoft teams free unlimited chat and search
-download microsoft teams free online audio and video calls
-download microsoft teams free 10 gb of team file storage plus 2 gb per person for personal storage
-download microsoft teams free integrated real time content creation with office apps like word excel powerpoint and onenote
-download microsoft teams free email and calendaring services with outlook and exchange
-download microsoft teams free 250 integrated apps and services
-how to download redeem team songs mp3
-how to download redeem team fusion experiment video
-how to download redeem team mini sun report
-how to download microsoft teams with a redeem code
-how to download microsoft teams with a voucher code
-how to download microsoft teams for free without a code
-how to use a redeem code to get microsoft teams premium features
-where to get a redeem code for microsoft teams
-where to find a voucher code for microsoft teams
-where to buy a redeem code for microsoft teams
-where to enter a redeem code for microsoft teams
-where to apply a voucher code for microsoft teams
-
Why should you watch Redeem Team?
-
Redeem Team is more than just a sports documentary. It is also a film that delivers an inspirational message and a high-quality entertainment experience.
-
The inspirational message and lessons of the film
-
The film shows how the Redeem Team overcame adversity, criticism, and pressure to achieve their goal of winning gold. It also demonstrates how the team embodied values such as teamwork, leadership, sacrifice, patriotism, and excellence. The film can inspire viewers to pursue their own dreams and overcome their own challenges.
-
The entertainment value and quality of the film
-
The film is also a captivating and enjoyable watch for anyone who loves basketball or documentaries. The film features stunning visuals, thrilling action, emotional drama, and humorous moments. The film also has a high production value, as it was directed by Jon Weinbach, who also co-produced The Last Dance, the award-winning documentary on Michael Jordan and the Chicago Bulls.
-
How to download Redeem Team on Netflix?
-
If you want to watch Redeem Team on Netflix, you have two options: streaming it online or downloading it offline. Streaming it online means that you need an internet connection to watch it. Downloading it offline means that you can save it on your device and watch it anytime without an internet connection.
-
The steps to download Redeem Team on your device
-
To download Redeem Team on your device, you need to have a Netflix account and a compatible device. Here are the steps to follow:
-
-
Open the Netflix app on your device.
Search for Redeem Team in the app.
-
Select the download icon next to the film title.
-
Wait for the download to finish. You can check the progress in the downloads section of the app.
-
Once the download is complete, you can watch the film offline by tapping on it in the downloads section.
-
-
The benefits and limitations of downloading Redeem Team
-
Downloading Redeem Team has some benefits and limitations that you should be aware of. Here are some of them:
-
-
-
Benefits
-
Limitations
-
-
-
You can watch the film anytime, anywhere, without an internet connection.
-
You need to have enough storage space on your device to download the film.
-
-
-
You can avoid buffering or loading issues that might occur when streaming the film online.
-
You need to have a Netflix subscription and a compatible device to download the film.
-
-
-
You can save data and bandwidth by downloading the film over Wi-Fi and watching it offline.
-
You need to renew your download every 48 hours or it will expire and you will have to download it again.
-
-
-
Conclusion
-
Redeem Team is a must-watch documentary for anyone who loves basketball, sports, or documentaries. It tells the story of the 2008 US men's Olympic basketball team that won gold in Beijing after a series of setbacks and challenges. It also delivers an inspirational message and a high-quality entertainment experience. You can watch Redeem Team on Netflix by streaming it online or downloading it offline. We hope this article has helped you learn more about Redeem Team and how to download it on Netflix. Now, go ahead and enjoy the film!
-
FAQs
-
When was Redeem Team released?
-
Redeem Team was released on Netflix on October 29, 2021.
-
Who directed Redeem Team?
-
Redeem Team was directed by Jon Weinbach, who also co-produced The Last Dance.
-
How long is Redeem Team?
-
Redeem Team is 1 hour and 40 minutes long.
-
Can I watch Redeem Team offline?
-
Yes, you can watch Redeem Team offline by downloading it on your device using the Netflix app.
-
What are some other documentaries on basketball?
-
Some other documentaries on basketball that you might like are The Last Dance, Basketball: A Love Story, Hoop Dreams, and Iverson.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/simsantonioii/MusicGen-Continuation/audiocraft/data/audio_dataset.py b/spaces/simsantonioii/MusicGen-Continuation/audiocraft/data/audio_dataset.py
deleted file mode 100644
index cf21422ea0059cb2d6553f93e608b8f9fa0d3a50..0000000000000000000000000000000000000000
--- a/spaces/simsantonioii/MusicGen-Continuation/audiocraft/data/audio_dataset.py
+++ /dev/null
@@ -1,525 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-import argparse
-import copy
-from concurrent.futures import ThreadPoolExecutor, Future
-from dataclasses import dataclass, fields
-from contextlib import ExitStack
-import gzip
-import json
-import logging
-import os
-from pathlib import Path
-import random
-import sys
-import typing as tp
-
-import torch
-import torch.nn.functional as F
-
-from .audio import audio_read, audio_info
-from .audio_utils import convert_audio
-from .zip import PathInZip
-
-try:
- import dora
-except ImportError:
- dora = None # type: ignore
-
-
-@dataclass(order=True)
-class BaseInfo:
-
- @classmethod
- def _dict2fields(cls, dictionary: dict):
- return {
- field.name: dictionary[field.name]
- for field in fields(cls) if field.name in dictionary
- }
-
- @classmethod
- def from_dict(cls, dictionary: dict):
- _dictionary = cls._dict2fields(dictionary)
- return cls(**_dictionary)
-
- def to_dict(self):
- return {
- field.name: self.__getattribute__(field.name)
- for field in fields(self)
- }
-
-
-@dataclass(order=True)
-class AudioMeta(BaseInfo):
- path: str
- duration: float
- sample_rate: int
- amplitude: tp.Optional[float] = None
- weight: tp.Optional[float] = None
- # info_path is used to load additional information about the audio file that is stored in zip files.
- info_path: tp.Optional[PathInZip] = None
-
- @classmethod
- def from_dict(cls, dictionary: dict):
- base = cls._dict2fields(dictionary)
- if 'info_path' in base and base['info_path'] is not None:
- base['info_path'] = PathInZip(base['info_path'])
- return cls(**base)
-
- def to_dict(self):
- d = super().to_dict()
- if d['info_path'] is not None:
- d['info_path'] = str(d['info_path'])
- return d
-
-
-@dataclass(order=True)
-class SegmentInfo(BaseInfo):
- meta: AudioMeta
- seek_time: float
- n_frames: int # actual number of frames without padding
- total_frames: int # total number of frames, padding included
- sample_rate: int # actual sample rate
-
-
-DEFAULT_EXTS = ['.wav', '.mp3', '.flac', '.ogg', '.m4a']
-
-logger = logging.getLogger(__name__)
-
-
-def _get_audio_meta(file_path: str, minimal: bool = True) -> AudioMeta:
- """AudioMeta from a path to an audio file.
-
- Args:
- file_path (str): Resolved path of valid audio file.
- minimal (bool): Whether to only load the minimal set of metadata (takes longer if not).
- Returns:
- AudioMeta: Audio file path and its metadata.
- """
- info = audio_info(file_path)
- amplitude: tp.Optional[float] = None
- if not minimal:
- wav, sr = audio_read(file_path)
- amplitude = wav.abs().max().item()
- return AudioMeta(file_path, info.duration, info.sample_rate, amplitude)
-
-
-def _resolve_audio_meta(m: AudioMeta, fast: bool = True) -> AudioMeta:
- """If Dora is available as a dependency, try to resolve potential relative paths
- in list of AudioMeta. This method is expected to be used when loading meta from file.
-
- Args:
- m (AudioMeta): Audio meta to resolve.
- fast (bool): If True, uses a really fast check for determining if a file is already absolute or not.
- Only valid on Linux/Mac.
- Returns:
- AudioMeta: Audio meta with resolved path.
- """
- def is_abs(m):
- if fast:
- return str(m)[0] == '/'
- else:
- os.path.isabs(str(m))
-
- if not dora:
- return m
-
- if not is_abs(m.path):
- m.path = dora.git_save.to_absolute_path(m.path)
- if m.info_path is not None and not is_abs(m.info_path.zip_path):
- m.info_path.zip_path = dora.git_save.to_absolute_path(m.path)
- return m
-
-
-def find_audio_files(path: tp.Union[Path, str],
- exts: tp.List[str] = DEFAULT_EXTS,
- resolve: bool = True,
- minimal: bool = True,
- progress: bool = False,
- workers: int = 0) -> tp.List[AudioMeta]:
- """Build a list of AudioMeta from a given path,
- collecting relevant audio files and fetching meta info.
-
- Args:
- path (str or Path): Path to folder containing audio files.
- exts (list of str): List of file extensions to consider for audio files.
- minimal (bool): Whether to only load the minimal set of metadata (takes longer if not).
- progress (bool): Whether to log progress on audio files collection.
- workers (int): number of parallel workers, if 0, use only the current thread.
- Returns:
- List[AudioMeta]: List of audio file path and its metadata.
- """
- audio_files = []
- futures: tp.List[Future] = []
- pool: tp.Optional[ThreadPoolExecutor] = None
- with ExitStack() as stack:
- if workers > 0:
- pool = ThreadPoolExecutor(workers)
- stack.enter_context(pool)
-
- if progress:
- print("Finding audio files...")
- for root, folders, files in os.walk(path, followlinks=True):
- for file in files:
- full_path = Path(root) / file
- if full_path.suffix.lower() in exts:
- audio_files.append(full_path)
- if pool is not None:
- futures.append(pool.submit(_get_audio_meta, str(audio_files[-1]), minimal))
- if progress:
- print(format(len(audio_files), " 8d"), end='\r', file=sys.stderr)
-
- if progress:
- print("Getting audio metadata...")
- meta: tp.List[AudioMeta] = []
- for idx, file_path in enumerate(audio_files):
- try:
- if pool is None:
- m = _get_audio_meta(str(file_path), minimal)
- else:
- m = futures[idx].result()
- if resolve:
- m = _resolve_audio_meta(m)
- except Exception as err:
- print("Error with", str(file_path), err, file=sys.stderr)
- continue
- meta.append(m)
- if progress:
- print(format((1 + idx) / len(audio_files), " 3.1%"), end='\r', file=sys.stderr)
- meta.sort()
- return meta
-
-
-def load_audio_meta(path: tp.Union[str, Path],
- resolve: bool = True, fast: bool = True) -> tp.List[AudioMeta]:
- """Load list of AudioMeta from an optionally compressed json file.
-
- Args:
- path (str or Path): Path to JSON file.
- resolve (bool): Whether to resolve the path from AudioMeta (default=True).
- fast (bool): activates some tricks to make things faster.
- Returns:
- List[AudioMeta]: List of audio file path and its total duration.
- """
- open_fn = gzip.open if str(path).lower().endswith('.gz') else open
- with open_fn(path, 'rb') as fp: # type: ignore
- lines = fp.readlines()
- meta = []
- for line in lines:
- d = json.loads(line)
- m = AudioMeta.from_dict(d)
- if resolve:
- m = _resolve_audio_meta(m, fast=fast)
- meta.append(m)
- return meta
-
-
-def save_audio_meta(path: tp.Union[str, Path], meta: tp.List[AudioMeta]):
- """Save the audio metadata to the file pointer as json.
-
- Args:
- path (str or Path): Path to JSON file.
- metadata (list of BaseAudioMeta): List of audio meta to save.
- """
- Path(path).parent.mkdir(exist_ok=True, parents=True)
- open_fn = gzip.open if str(path).lower().endswith('.gz') else open
- with open_fn(path, 'wb') as fp: # type: ignore
- for m in meta:
- json_str = json.dumps(m.to_dict()) + '\n'
- json_bytes = json_str.encode('utf-8')
- fp.write(json_bytes)
-
-
-class AudioDataset:
- """Base audio dataset.
-
- The dataset takes a list of AudioMeta and create a dataset composed of segments of audio
- and potentially additional information, by creating random segments from the list of audio
- files referenced in the metadata and applying minimal data pre-processing such as resampling,
- mixing of channels, padding, etc.
-
- If no segment_duration value is provided, the AudioDataset will return the full wav for each
- audio file. Otherwise, it will randomly sample audio files and create a segment of the specified
- duration, applying padding if required.
-
- By default, only the torch Tensor corresponding to the waveform is returned. Setting return_info=True
- allows to return a tuple containing the torch Tensor and additional metadata on the segment and the
- original audio meta.
-
- Args:
- meta (tp.List[AudioMeta]): List of audio files metadata.
- segment_duration (float): Optional segment duration of audio to load.
- If not specified, the dataset will load the full audio segment from the file.
- shuffle (bool): Set to `True` to have the data reshuffled at every epoch.
- sample_rate (int): Target sample rate of the loaded audio samples.
- channels (int): Target number of channels of the loaded audio samples.
- sample_on_duration (bool): Set to `True` to sample segments with probability
- dependent on audio file duration. This is only used if `segment_duration` is provided.
- sample_on_weight (bool): Set to `True` to sample segments using the `weight` entry of
- `AudioMeta`. If `sample_on_duration` is also True, the actual weight will be the product
- of the file duration and file weight. This is only used if `segment_duration` is provided.
- min_segment_ratio (float): Minimum segment ratio to use when the audio file
- is shorter than the desired segment.
- max_read_retry (int): Maximum number of retries to sample an audio segment from the dataset.
- return_info (bool): Whether to return the wav only or return wav along with segment info and metadata.
- min_audio_duration (tp.Optional[float], optional): Minimum audio file duration, in seconds, if provided
- audio shorter than this will be filtered out.
- max_audio_duration (tp.Optional[float], optional): Maximal audio file duration in seconds, if provided
- audio longer than this will be filtered out.
- """
- def __init__(self,
- meta: tp.List[AudioMeta],
- segment_duration: tp.Optional[float] = None,
- shuffle: bool = True,
- num_samples: int = 10_000,
- sample_rate: int = 48_000,
- channels: int = 2,
- pad: bool = True,
- sample_on_duration: bool = True,
- sample_on_weight: bool = True,
- min_segment_ratio: float = 0.5,
- max_read_retry: int = 10,
- return_info: bool = False,
- min_audio_duration: tp.Optional[float] = None,
- max_audio_duration: tp.Optional[float] = None
- ):
- assert len(meta) > 0, 'No audio meta provided to AudioDataset. Please check loading of audio meta.'
- assert segment_duration is None or segment_duration > 0
- assert segment_duration is None or min_segment_ratio >= 0
- logging.debug(f'sample_on_duration: {sample_on_duration}')
- logging.debug(f'sample_on_weight: {sample_on_weight}')
- logging.debug(f'pad: {pad}')
- logging.debug(f'min_segment_ratio: {min_segment_ratio}')
-
- self.segment_duration = segment_duration
- self.min_segment_ratio = min_segment_ratio
- self.max_audio_duration = max_audio_duration
- self.min_audio_duration = min_audio_duration
- if self.min_audio_duration is not None and self.max_audio_duration is not None:
- assert self.min_audio_duration <= self.max_audio_duration
- self.meta: tp.List[AudioMeta] = self._filter_duration(meta)
- assert len(self.meta) # Fail fast if all data has been filtered.
- self.total_duration = sum(d.duration for d in self.meta)
-
- if segment_duration is None:
- num_samples = len(self.meta)
- self.num_samples = num_samples
- self.shuffle = shuffle
- self.sample_rate = sample_rate
- self.channels = channels
- self.pad = pad
- self.sample_on_weight = sample_on_weight
- self.sample_on_duration = sample_on_duration
- self.sampling_probabilities = self._get_sampling_probabilities()
- self.max_read_retry = max_read_retry
- self.return_info = return_info
-
- def __len__(self):
- return self.num_samples
-
- def _get_sampling_probabilities(self, normalized: bool = True):
- """Return the sampling probabilities for each file inside `self.meta`.
- """
- scores: tp.List[float] = []
- for file_meta in self.meta:
- score = 1.
- if self.sample_on_weight and file_meta.weight is not None:
- score *= file_meta.weight
- if self.sample_on_duration:
- score *= file_meta.duration
- scores.append(score)
- probabilities = torch.tensor(scores)
- if normalized:
- probabilities /= probabilities.sum()
- return probabilities
-
- def sample_file(self, rng: torch.Generator) -> AudioMeta:
- """Sample a given file from `self.meta`. Can be overriden in subclasses.
- This is only called if `segment_duration` is not None.
-
- You must use the provided random number generator `rng` for reproducibility.
- """
- if not self.sample_on_weight and not self.sample_on_duration:
- file_index = int(torch.randint(len(self.sampling_probabilities), (1,), generator=rng).item())
- else:
- file_index = int(torch.multinomial(self.sampling_probabilities, 1, generator=rng).item())
-
- return self.meta[file_index]
-
- def __getitem__(self, index: int) -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, SegmentInfo]]:
- if self.segment_duration is None:
- file_meta = self.meta[index]
- out, sr = audio_read(file_meta.path)
- out = convert_audio(out, sr, self.sample_rate, self.channels)
- n_frames = out.shape[-1]
- segment_info = SegmentInfo(file_meta, seek_time=0., n_frames=n_frames, total_frames=n_frames,
- sample_rate=self.sample_rate)
- else:
- rng = torch.Generator()
- if self.shuffle:
- # We use index, plus extra randomness
- rng.manual_seed(index + self.num_samples * random.randint(0, 2**24))
- else:
- # We only use index
- rng.manual_seed(index)
-
- for retry in range(self.max_read_retry):
- file_meta = self.sample_file(rng)
- # We add some variance in the file position even if audio file is smaller than segment
- # without ending up with empty segments
- max_seek = max(0, file_meta.duration - self.segment_duration * self.min_segment_ratio)
- seek_time = torch.rand(1, generator=rng).item() * max_seek
- try:
- out, sr = audio_read(file_meta.path, seek_time, self.segment_duration, pad=False)
- out = convert_audio(out, sr, self.sample_rate, self.channels)
- n_frames = out.shape[-1]
- target_frames = int(self.segment_duration * self.sample_rate)
- if self.pad:
- out = F.pad(out, (0, target_frames - n_frames))
- segment_info = SegmentInfo(file_meta, seek_time, n_frames=n_frames, total_frames=target_frames,
- sample_rate=self.sample_rate)
- except Exception as exc:
- logger.warning("Error opening file %s: %r", file_meta.path, exc)
- if retry == self.max_read_retry - 1:
- raise
- else:
- break
-
- if self.return_info:
- # Returns the wav and additional information on the wave segment
- return out, segment_info
- else:
- return out
-
- def collater(self, samples):
- """The collater function has to be provided to the dataloader
- if AudioDataset has return_info=True in order to properly collate
- the samples of a batch.
- """
- if self.segment_duration is None and len(samples) > 1:
- assert self.pad, "Must allow padding when batching examples of different durations."
-
- # In this case the audio reaching the collater is of variable length as segment_duration=None.
- to_pad = self.segment_duration is None and self.pad
- if to_pad:
- max_len = max([wav.shape[-1] for wav, _ in samples])
-
- def _pad_wav(wav):
- return F.pad(wav, (0, max_len - wav.shape[-1]))
-
- if self.return_info:
- if len(samples) > 0:
- assert len(samples[0]) == 2
- assert isinstance(samples[0][0], torch.Tensor)
- assert isinstance(samples[0][1], SegmentInfo)
-
- wavs = [wav for wav, _ in samples]
- segment_infos = [copy.deepcopy(info) for _, info in samples]
-
- if to_pad:
- # Each wav could be of a different duration as they are not segmented.
- for i in range(len(samples)):
- # Determines the total legth of the signal with padding, so we update here as we pad.
- segment_infos[i].total_frames = max_len
- wavs[i] = _pad_wav(wavs[i])
-
- wav = torch.stack(wavs)
- return wav, segment_infos
- else:
- assert isinstance(samples[0], torch.Tensor)
- if to_pad:
- samples = [_pad_wav(s) for s in samples]
- return torch.stack(samples)
-
- def _filter_duration(self, meta: tp.List[AudioMeta]) -> tp.List[AudioMeta]:
- """Filters out audio files with short durations.
- Removes from meta files that have durations that will not allow to samples examples from them.
- """
- orig_len = len(meta)
-
- # Filter data that is too short.
- if self.min_audio_duration is not None:
- meta = [m for m in meta if m.duration >= self.min_audio_duration]
-
- # Filter data that is too long.
- if self.max_audio_duration is not None:
- meta = [m for m in meta if m.duration <= self.max_audio_duration]
-
- filtered_len = len(meta)
- removed_percentage = 100*(1-float(filtered_len)/orig_len)
- msg = 'Removed %.2f percent of the data because it was too short or too long.' % removed_percentage
- if removed_percentage < 10:
- logging.debug(msg)
- else:
- logging.warning(msg)
- return meta
-
- @classmethod
- def from_meta(cls, root: tp.Union[str, Path], **kwargs):
- """Instantiate AudioDataset from a path to a directory containing a manifest as a jsonl file.
-
- Args:
- root (str or Path): Path to root folder containing audio files.
- kwargs: Additional keyword arguments for the AudioDataset.
- """
- root = Path(root)
- if root.is_dir():
- if (root / 'data.jsonl').exists():
- root = root / 'data.jsonl'
- elif (root / 'data.jsonl.gz').exists():
- root = root / 'data.jsonl.gz'
- else:
- raise ValueError("Don't know where to read metadata from in the dir. "
- "Expecting either a data.jsonl or data.jsonl.gz file but none found.")
- meta = load_audio_meta(root)
- return cls(meta, **kwargs)
-
- @classmethod
- def from_path(cls, root: tp.Union[str, Path], minimal_meta: bool = True,
- exts: tp.List[str] = DEFAULT_EXTS, **kwargs):
- """Instantiate AudioDataset from a path containing (possibly nested) audio files.
-
- Args:
- root (str or Path): Path to root folder containing audio files.
- minimal_meta (bool): Whether to only load minimal metadata or not.
- exts (list of str): Extensions for audio files.
- kwargs: Additional keyword arguments for the AudioDataset.
- """
- root = Path(root)
- if root.is_file():
- meta = load_audio_meta(root, resolve=True)
- else:
- meta = find_audio_files(root, exts, minimal=minimal_meta, resolve=True)
- return cls(meta, **kwargs)
-
-
-def main():
- logging.basicConfig(stream=sys.stderr, level=logging.INFO)
- parser = argparse.ArgumentParser(
- prog='audio_dataset',
- description='Generate .jsonl files by scanning a folder.')
- parser.add_argument('root', help='Root folder with all the audio files')
- parser.add_argument('output_meta_file',
- help='Output file to store the metadata, ')
- parser.add_argument('--complete',
- action='store_false', dest='minimal', default=True,
- help='Retrieve all metadata, even the one that are expansive '
- 'to compute (e.g. normalization).')
- parser.add_argument('--resolve',
- action='store_true', default=False,
- help='Resolve the paths to be absolute and with no symlinks.')
- parser.add_argument('--workers',
- default=10, type=int,
- help='Number of workers.')
- args = parser.parse_args()
- meta = find_audio_files(args.root, DEFAULT_EXTS, progress=True,
- resolve=args.resolve, minimal=args.minimal, workers=args.workers)
- save_audio_meta(args.output_meta_file, meta)
-
-
-if __name__ == '__main__':
- main()
diff --git a/spaces/sklearn-docs/Plot-Ridge-Coefficients-as-A-Function-of-the-Regularization/app.py b/spaces/sklearn-docs/Plot-Ridge-Coefficients-as-A-Function-of-the-Regularization/app.py
deleted file mode 100644
index 2c358705be37cfb4129fcac349cb2dd34d675ff0..0000000000000000000000000000000000000000
--- a/spaces/sklearn-docs/Plot-Ridge-Coefficients-as-A-Function-of-the-Regularization/app.py
+++ /dev/null
@@ -1,105 +0,0 @@
-"""
-===========================================================================
-Gradio Demo to Plot Ridge coefficients as a function of the regularization
-===========================================================================
-Shows the effect of collinearity in the coefficients of an estimator.
-.. currentmodule:: sklearn.linear_model
-:class:`Ridge` Regression is the estimator used in this example.
-Each color represents a different feature of the
-coefficient vector, and this is displayed as a function of the
-regularization parameter.
-This example also shows the usefulness of applying Ridge regression
-to highly ill-conditioned matrices. For such matrices, a slight
-change in the target variable can cause huge variances in the
-calculated weights. In such cases, it is useful to set a certain
-regularization (alpha) to reduce this variation (noise).
-When alpha is very large, the regularization effect dominates the
-squared loss function and the coefficients tend to zero.
-At the end of the path, as alpha tends toward zero
-and the solution tends towards the ordinary least squares, coefficients
-exhibit big oscillations. In practise it is necessary to tune alpha
-in such a way that a balance is maintained between both.
-"""
-
-# Author: Fabian Pedregosa --
-# License: BSD 3 clause
-# Demo Author: Syed Affan
-
-
-import numpy as np
-import matplotlib.pyplot as plt
-from sklearn import linear_model
-import gradio as gr
-
-def make_plot(size_X,min_alpha,max_alpha):
-# X is the 10x10 Hilbert matrix
- X = 1.0 / (np.arange(1, size_X+1) + np.arange(0, size_X)[:, np.newaxis])
- y = np.ones(size_X)
-
-# %%
-# Compute paths
-# -------------
-
- fig = plt.figure()
- n_alphas = 200
- alphas = np.logspace(min_alpha, max_alpha, n_alphas)
-
- coefs = []
- for a in alphas:
- ridge = linear_model.Ridge(alpha=a, fit_intercept=False)
- ridge.fit(X, y)
- coefs.append(ridge.coef_)
-
-# %%
-# Display results
-# ---------------
-
- ax = plt.gca()
-
- ax.plot(alphas, coefs)
- ax.set_xscale("log")
- ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
- plt.xlabel("alpha")
- plt.ylabel("weights")
- plt.title("Ridge coefficients as a function of the regularization")
- plt.axis("tight")
- return fig
-
-title='Plot Ridge coefficients as a function of the regularization'
-
-model_card=f"""
-## Description
-This interactive demo is based on the [Plot Ridge coefficients as a function of the regularization](https://scikit-learn.org/stable/_downloads/9d5a4167bc60f250de65fe21497c1eb6/plot_ridge_path.py) example from the popular [scikit-learn](https://scikit-learn.org/stable/) library, which is a widely-used library for machine learning in Python.
-This demo demonstrates the effect of collinearity in the coefficients of an estimator by plotting the regularization selected against the coefficients that are learnt by the model.
-It also shows the usefulness of applying Ridge regression to highly ill-conditioned matrices. For such matrices, a slight change in the target variable can cause huge variances in the calculated weights. In such cases, it is useful to set a certain regularization (alpha) to reduce this variation (noise). You can play with the range of `Alpha` values and the `Training Size`
-When alpha is very large, the regularization effect dominates the squared loss function and the coefficients tend to zero. At the end of the path, as alpha tends toward zero and the solution tends towards the ordinary least squares, coefficients exhibit big oscillations. In practise it is necessary to tune alpha in such a way that a balance is maintained between both.
-## Model
-currentmodule: [sklearn.linear_model](https://scikit-learn.org/stable/modules/classes.html#module-sklearn.linear_model)
-class:`Ridge` Regression is the estimator used in this example.
-Each color represents a different feature of the coefficient vector, and this is displayed as a function of the regularization parameter.
-"""
-
-with gr.Blocks(title=title) as demo:
- gr.Markdown('''
-
-
Plot Ridge coefficients as a function of the regularization
-
- ''')
- gr.Markdown(model_card)
- gr.Markdown("Author: sulpha")
- d0 = gr.Slider(1,101,value=10,step=10,label='Select Size of Training Set')
- with gr.Column():
- with gr.Tab('Select Alpha Range'):
- d1 = gr.Slider(-20,20,value=-10,step=1,label='Creates an array of regularization values which are fed to the model and plotted against the returned weights')
- d2 = gr.Slider(-20,20,value=-2,step=1,label='')
-
- o1=gr.Plot()
- #btn = gr.Button(value = 'Submit')
- d0.change(fn=make_plot,inputs=[d0,d1,d2],outputs=[o1])
- d1.change(fn=make_plot,inputs=[d0,d1,d2],outputs=[o1])
- d2.change(fn=make_plot,inputs=[d0,d1,d2],outputs=[o1])
-
- #btn.click(make_plot,inputs=[d0,d1,d2],outputs=[gr.Plot()])
-
-demo.launch()
-
diff --git a/spaces/society-ethics/disaggregators/app.py b/spaces/society-ethics/disaggregators/app.py
deleted file mode 100644
index 17c894e4e78cb6a9b274f9ec4cb2eeb268668ba5..0000000000000000000000000000000000000000
--- a/spaces/society-ethics/disaggregators/app.py
+++ /dev/null
@@ -1,547 +0,0 @@
-import gradio as gr
-from datasets import load_dataset
-import matplotlib as mpl
-mpl.use('Agg')
-from typing import List
-import matplotlib.pyplot as plt
-import numpy as np
-import joblib
-import itertools
-import pandas as pd
-
-cached_artifacts = joblib.load("cached_data.pkl")
-
-laion = load_dataset("society-ethics/laion2B-en_continents", split="train").to_pandas()
-medmcqa = load_dataset("society-ethics/medmcqa_age_gender_custom", split="train").to_pandas()
-stack = load_dataset("society-ethics/the-stack-tabs_spaces", split="train").to_pandas()\
- .drop(columns=["max_stars_repo_licenses", "max_issues_repo_licenses", "max_forks_repo_licenses"])
-
-cached_artifacts["laion"]["text"] = {
- "title": "Disaggregating by continent with a built-in module",
- "description": """
- The [`laion/laion2b-en` dataset](https://huggingface.co/datasets/laion/laion2B-en), created by [LAION](https://laion.ai), is used to train image generation models such as [Stable Diffusion](https://huggingface.co/spaces/stabilityai/stable-diffusion). The dataset contains pairs of images and captions, but we might also be curious about the distribution of specific topics, such as continents, mentioned in the captions.
-
- The original dataset doesn't contain metadata about specific continents, but we can attempt to infer it from the `TEXT` feature with `disaggregators`. Note that several factors contribute to a high incidence of false positives, such as the fact that country and city names are frequently used as names for fashion products.
- """,
- "visualization": """
- This view shows you a visualization of the relative proportion of each label in the disaggregated dataset. For this dataset, we've only disaggregated by one category (continent), but there are many possible values for it. While there are many rows that haven't been flagged with a continent (check "None" and see!), this disaggregator doesn't assign *Multiple* continents.
-
- To see examples of individual rows, click over to the "Inspect" tab!
- """,
- "code": """
- ```python
- from disaggregators import Disaggregator
- disaggregator = Disaggregator("continent", column="TEXT")
-
- # Note: this demo used a subset of the dataset
- from datasets import load_dataset
- ds = load_dataset("laion/laion2B-en", split="train", streaming=True).map(disaggregator)
- ```
- """
-}
-
-cached_artifacts["medmcqa"]["text"] = {
- "title": "Overriding configurations for built-in modules",
- "description": """
- Meta's [Galactica model](https://galactica.org) is trained on a large-scale scientific corpus, which includes the [`medmcqa` dataset](https://huggingface.co/datasets/medmcqa) of medical entrance exam questions. MedMCQA has a `question` feature which often contains a case scenario, where a hypothetical patient presents with a condition.
-
- The original dataset doesn't contain metadata about the age and binary gender, but we can infer them with the `age` and `gender` modules. If a module doesn't have the particular label options that you'd like, such as additional genders or specific age buckets, you can override the module's configuration. In this example we've configured the `age` module to use [NIH's MeSH age groups](https://www.ncbi.nlm.nih.gov/mesh/68009273).
- """,
- "visualization": """
- Since we've disaggregated the MedMCQA dataset by *two* categories (age and binary gender), we can click on "Age + Gender" to visualize the proportions of the *intersections* of each group.
-
- There are two things to note about this example:
- 1. The disaggregators for age and gender can flag rows as having more than one age or gender, which we've grouped here as "Multiple"
- 2. If you look at the data through the "Inspect" tab, you'll notice that there are some false positives. `disaggregators` is in early development, and these modules are in a very early "proof of concept" stage! Keep an eye out as we develop more sophisticated algorithms for disaggregation, and [join us over on GitHub](https://github.com/huggingface/disaggregators) if you'd like to contribute ideas, documentation, or code.
- """,
- "code": """
- ```python
- from disaggregators import Disaggregator
- from disaggregators.disaggregation_modules.age import Age, AgeLabels, AgeConfig
-
- class MeSHAgeLabels(AgeLabels):
- INFANT = "infant"
- CHILD_PRESCHOOL = "child_preschool"
- CHILD = "child"
- ADOLESCENT = "adolescent"
- ADULT = "adult"
- MIDDLE_AGED = "middle_aged"
- AGED = "aged"
- AGED_80_OVER = "aged_80_over"
-
- age_config = AgeConfig(
- labels=MeSHAgeLabels,
- ages=[list(MeSHAgeLabels)],
- breakpoints=[0, 2, 5, 12, 18, 44, 64, 79]
- )
-
- age = Age(config=age_config, column="question")
-
- disaggregator = Disaggregator([age, "gender"], column="question")
-
- from datasets import load_dataset
- ds = load_dataset("medmcqa", split="train").map(disaggregator)
- ```
- """
-}
-
-cached_artifacts["stack"]["text"] = {
- "title": "Creating custom disaggregators",
- "description": """
- [The BigCode Project](https://www.bigcode-project.org/) recently released [`bigcode/the-stack`](https://huggingface.co/datasets/bigcode/the-stack), which contains contains over 6TB of permissively-licensed source code files covering 358 programming languages. One of the languages included is [JSX](https://reactjs.org/docs/introducing-jsx.html), which is an extension to JavaScript specifically designed for the [React UI library](https://reactjs.org/docs/introducing-jsx.html). Let's ask some questions about the React code in this dataset!
-
- 1. React lets developers define UI components [as functions or as classes](https://reactjs.org/docs/components-and-props.html#function-and-class-components). Which style is more popular in this dataset?
- 2. Programmers have long argued over using [tabs or spaces](https://www.youtube.com/watch?v=SsoOG6ZeyUI). Who's winning?
-
- `disaggregators` makes it easy to add your own disaggregation modules. See the code snippet below for an example 🤗
- """,
- "visualization": """
- Like the MedMCQA example, this dataset has also been disaggregated by more than one category. Using multiple disaggregation modules lets us get insights into interesting *intersections* of the subpopulations in our datasets.
- """,
- "code": """
- ```python
- from disaggregators import Disaggregator, DisaggregationModuleLabels, CustomDisaggregator
-
- class TabsSpacesLabels(DisaggregationModuleLabels):
- TABS = "tabs"
- SPACES = "spaces"
-
- class TabsSpaces(CustomDisaggregator):
- module_id = "tabs_spaces"
- labels = TabsSpacesLabels
-
- def __call__(self, row, *args, **kwargs):
- if "\\t" in row[self.column]:
- return {self.labels.TABS: True, self.labels.SPACES: False}
- else:
- return {self.labels.TABS: False, self.labels.SPACES: True}
-
- class ReactComponentLabels(DisaggregationModuleLabels):
- CLASS = "class"
- FUNCTION = "function"
-
-
- class ReactComponent(CustomDisaggregator):
- module_id = "react_component"
- labels = ReactComponentLabels
-
- def __call__(self, row, *args, **kwargs):
- if "extends React.Component" in row[self.column] or "extends Component" in row[self.column]:
- return {self.labels.CLASS: True, self.labels.FUNCTION: False}
- else:
- return {self.labels.CLASS: False, self.labels.FUNCTION: True}
-
- disaggregator = Disaggregator([TabsSpaces, ReactComponent], column="content")
-
- # Note: this demo used a subset of the dataset
- from datasets import load_dataset
- ds = load_dataset("bigcode/the-stack", data_dir="data/jsx", split="train", streaming=True).map(disaggregator)
- ```
- """
-}
-
-
-def create_plot(selected_fields, available_fields, distributions, feature_names, plot=None):
- plt.close('all')
- clean_fields = [field for field in selected_fields if field not in ["Multiple", "None"]]
- extra_options = [field for field in selected_fields if field in ["Multiple", "None"]]
-
- distributions = distributions.reorder_levels(
- sorted(list(available_fields)) + [idx for idx in distributions.index.names if idx not in available_fields]
- )
- distributions = distributions.sort_index()
-
- def get_tuple(field):
- return tuple(True if field == x else False for x in sorted(available_fields))
-
- masks = [get_tuple(field) for field in sorted(clean_fields)]
- data = [distributions.get(mask, 0) for mask in masks]
- data = [x.sum() if type(x) != int else x for x in data]
-
- if "Multiple" in extra_options:
- masks_mult = [el for el in itertools.product((True, False), repeat=len(available_fields)) if el.count(True) > 1]
- data = data + [sum([distributions.get(mask, pd.Series(dtype=float)).sum() for mask in masks_mult])]
-
- if "None" in extra_options:
- none_mask = tuple(False for x in available_fields)
- data = data + [distributions.get(none_mask, pd.Series(dtype=float)).sum()]
-
- fig, ax = plt.subplots()
-
- title = "Distribution "
- size = 0.3
-
- cmap = plt.colormaps["Set3"]
- outer_colors = cmap(np.arange(len(data)))
-
- total_sum = sum(data)
- all_fields = sorted(clean_fields) + sorted(extra_options)
- labels = [f"{feature_names.get(c, c)}\n{round(data[i] / total_sum * 100, 2)}%" for i, c in enumerate(all_fields)]
-
- ax.pie(data, radius=1, labels=labels, colors=outer_colors,
- wedgeprops=dict(width=size, edgecolor='w'))
-
- ax.set(aspect="equal", title=title)
-
- if plot is None:
- return gr.Plot(plt)
- else:
- new_plot = plot.update(plt)
- return new_plot
-
-
-# TODO: Consolidate with the other plot function...
-def create_nested_plot(selected_outer, available_outer, selected_inner, available_inner, distributions, feature_names, plot=None):
- plt.close('all')
-
- clean_outer = [field for field in selected_outer if field not in ["Multiple", "None"]]
- extra_outer = [field for field in selected_outer if field in ["Multiple", "None"]]
-
- clean_inner = [field for field in selected_inner if field not in ["Multiple", "None"]]
- extra_inner = [field for field in selected_inner if field in ["Multiple", "None"]]
-
- distributions = distributions.reorder_levels(
- sorted(list(available_outer)) + sorted(list(available_inner)) + sorted([idx for idx in distributions.index.names if idx not in (available_outer + available_inner)])
- )
- distributions = distributions.sort_index()
-
- def get_tuple(field, field_options):
- return tuple(True if field == x else False for x in sorted(field_options))
-
- masks_outer = [get_tuple(field, available_outer) for field in sorted(clean_outer)]
- masks_inner = [get_tuple(field, available_inner) for field in sorted(clean_inner)]
-
- data_inner = [[distributions.get(m_o + mask, 0) for mask in masks_inner] for m_o in masks_outer]
-
- masks_mult_inner = []
- masks_none_inner = []
-
- if "Multiple" in extra_inner:
- masks_mult_inner = [el for el in itertools.product((True, False), repeat=len(available_inner)) if el.count(True) > 1]
- masks_mult = [m_o + m_i for m_i in masks_mult_inner for m_o in masks_outer]
- mult_inner_count = [distributions.get(mask, pd.Series(dtype=float)).sum() for mask in masks_mult]
- data_inner = [di + [mult_inner_count[idx]] for idx, di in enumerate(data_inner)]
-
- if "None" in extra_inner:
- masks_none_inner = tuple(False for x in available_inner)
- masks_none = [m_o + masks_none_inner for m_o in masks_outer]
- none_inner_count = [distributions.get(mask, pd.Series(dtype=float)).sum() for mask in masks_none]
- data_inner = [di + [none_inner_count[idx]] for idx, di in enumerate(data_inner)]
- if len(available_inner) > 0:
- masks_none_inner = [masks_none_inner]
-
- if "Multiple" in extra_outer:
- masks_mult = [el for el in itertools.product((True, False), repeat=len(available_outer)) if el.count(True) > 1]
- data_inner = data_inner + [[
- sum([distributions.get(mask + mask_inner, pd.Series(dtype=float)).sum() for mask in masks_mult])
- for mask_inner in (masks_inner + masks_mult_inner + masks_none_inner)
- ]]
-
- if "None" in extra_outer:
- none_mask_outer = tuple(False for x in available_outer)
- data_inner = data_inner + [[distributions.get(none_mask_outer + mask, pd.Series(dtype=float)).sum() for mask in (masks_inner + masks_mult_inner + masks_none_inner)]]
-
- fig, ax = plt.subplots()
-
- title = "Distribution "
- size = 0.3
-
- cmap = plt.colormaps["Set3"]
- cmap2 = plt.colormaps["Set2"]
- outer_colors = cmap(np.arange(len(data_inner)))
- inner_colors = cmap2(np.arange(len(data_inner[0])))
-
- total_sum = sum(sum(data_inner, []))
- data_outer = [sum(x) for x in data_inner]
- all_fields_outer = sorted(clean_outer) + sorted(extra_outer)
-
- clean_labels_outer = [f"{feature_names.get(c, c)}\n{round(data_outer[i] / total_sum * 100, 2)}%" for i, c in enumerate(all_fields_outer)]
- clean_labels_inner = [feature_names[c] for c in sorted(clean_inner)]
-
- ax.pie(data_outer, radius=1, labels=clean_labels_outer, colors=outer_colors,
- wedgeprops=dict(width=size, edgecolor='w'))
-
- patches, _ = ax.pie(list(itertools.chain(*data_inner)), radius=1 - size, colors=inner_colors,
- wedgeprops=dict(width=size, edgecolor='w'))
-
- ax.set(aspect="equal", title=title)
- fig.legend(handles=patches, labels=clean_labels_inner + sorted(extra_inner), loc="lower left")
-
- if plot is None:
- return gr.Plot(plt)
- else:
- new_plot = plot.update(plt)
- return new_plot
-
-
-def select_new_base_plot(plot, disagg_check, disagg_by, artifacts):
- if disagg_by == "Both":
- disaggs = sorted(list(artifacts["disaggregators"]))
-
- all_choices = sorted([[x for x in artifacts["data_fields"] if x.startswith(d)] for d in disaggs], key=len, reverse=True)
-
- selected_choices = list(artifacts["data_fields"])
- choices = selected_choices + [f"{disagg}.{extra}" for disagg in disaggs for extra in ["Multiple", "None"]]
-
- # Map feature names to labels
- choices = [artifacts["feature_names"].get(x, x) for x in choices]
- selected_choices = [artifacts["feature_names"].get(x, x) for x in selected_choices]
-
- # Choose new options
- new_check = disagg_check.update(choices=sorted(choices), value=selected_choices)
-
- # Generate plot
- new_plot = create_nested_plot(
- all_choices[0], all_choices[0],
- all_choices[1], all_choices[1],
- artifacts["distributions"],
- artifacts["feature_names"],
- plot=plot
- )
-
- return new_plot, new_check
-
- else:
- selected_choices = [field for field in artifacts["data_fields"] if field.startswith(disagg_by)]
- choices = selected_choices + ["Multiple", "None"]
-
- # Map feature names to labels
- choices_for_check = [artifacts["feature_names"].get(x, x) for x in choices]
- selected_choices_for_check = [artifacts["feature_names"].get(x, x) for x in selected_choices]
-
- # Choose new options
- new_check = disagg_check.update(choices=choices_for_check, value=selected_choices_for_check)
-
- # Generate plot
- new_plot = create_plot(
- sorted(selected_choices), sorted(selected_choices), artifacts["distributions"], artifacts["feature_names"],
- plot=plot
- )
-
- return new_plot, new_check
-
-
-def select_new_sub_plot(plot, disagg_check, disagg_by, artifacts):
- if disagg_by == "Both":
- disaggs = sorted(list(artifacts["disaggregators"]))
-
- all_choices = sorted([[x for x in artifacts["data_fields"] if x.startswith(d)] for d in disaggs], key=len, reverse=True)
-
- choice1 = all_choices[0][0].split(".")[0]
- choice2 = all_choices[1][0].split(".")[0]
-
- check1 = [dc for dc in disagg_check if dc.startswith(choice1)]
- check2 = [dc for dc in disagg_check if dc.startswith(choice2)]
-
- check1 = ["Multiple" if c == f"{c.split('.')[0]}.Multiple" else c for c in check1]
- check1 = ["None" if c == f"{c.split('.')[0]}.None" else c for c in check1]
- check2 = ["Multiple" if c == f"{c.split('.')[0]}.Multiple" else c for c in check2]
- check2 = ["None" if c == f"{c.split('.')[0]}.None" else c for c in check2]
-
- new_plot = create_nested_plot(
- check1, all_choices[0],
- check2, all_choices[1],
- artifacts["distributions"],
- artifacts["feature_names"],
- plot=plot
- )
-
- return new_plot
- else:
- selected_choices = [field for field in artifacts["data_fields"] if field.startswith(disagg_by)]
-
- # Generate plot
- new_plot = create_plot(
- disagg_check, selected_choices, artifacts["distributions"], artifacts["feature_names"],
- plot=plot
- )
-
- return new_plot
-
-
-def visualization_filter(plot, artifacts, default_value, intersect=False):
- def map_labels_to_fields(labels: List[str]):
- return [list(artifacts["feature_names"].keys())[list(artifacts["feature_names"].values()).index(x)] if not any([extra in x for extra in ["Multiple", "None"]]) else x for x in labels]
-
- def map_category_to_disaggregator(category: str): # e.g. Gender, Age, Gender + Age -> gender, age, Both
- return list(artifacts["feature_names"].keys())[list(artifacts["feature_names"].values()).index(category)]
-
- choices = sorted(list(artifacts["disaggregators"]))
- if intersect:
- choices = choices + ["Both"]
-
- # Map categories to nice names
- choices = [artifacts["feature_names"][c] for c in choices]
-
- disagg_radio = gr.Radio(
- label="Disaggregate by...",
- choices=choices,
- value=artifacts["feature_names"][default_value],
- interactive=True
- )
-
- selected_choices = [field for field in artifacts["data_fields"] if field.startswith(f"{default_value}.")]
- choices = selected_choices + ["Multiple", "None"]
-
- # Map feature names to labels
- choices = [artifacts["feature_names"].get(x, x) for x in choices]
- selected_choices = [artifacts["feature_names"].get(x, x) for x in selected_choices]
-
- disagg_check = gr.CheckboxGroup(
- label="Features",
- choices=choices,
- interactive=True,
- value=selected_choices,
- )
-
- disagg_radio.change(
- lambda x: select_new_base_plot(plot, disagg_check, map_category_to_disaggregator(x), artifacts),
- inputs=[disagg_radio],
- outputs=[plot, disagg_check]
- )
-
- disagg_check.change(
- lambda x, y: select_new_sub_plot(plot, map_labels_to_fields(x), map_category_to_disaggregator(y), artifacts),
- inputs=[disagg_check, disagg_radio],
- outputs=[plot]
- )
-
-
-def generate_components(dataset, artifacts, intersect=True):
- gr.Markdown(f"### {artifacts['text']['title']}")
- gr.Markdown(artifacts['text']['description'])
-
- with gr.Accordion(label="💻 Click me to see the code!", open=False):
- gr.Markdown(artifacts["text"]["code"])
-
- with gr.Tab("Visualize"):
- with gr.Row(elem_id="visualization-window"):
- with gr.Column():
- disagg_by = sorted(list(artifacts["disaggregators"]))[0]
- selected_choices = [field for field in artifacts["data_fields"] if field.startswith(disagg_by)]
- plot = create_plot(
- sorted(selected_choices),
- sorted(selected_choices),
- artifacts["distributions"],
- artifacts["feature_names"]
- )
-
- with gr.Column():
- gr.Markdown("### Visualization")
- gr.Markdown(artifacts["text"]["visualization"])
- visualization_filter(plot, artifacts, disagg_by, intersect=intersect)
- with gr.Tab("Inspect"):
- with gr.Row():
- with gr.Column(scale=1):
- gr.Markdown("### Data Inspector")
- gr.Markdown("This tab lets you filter the disaggregated dataset and inspect individual elements. Set as many filters as you like, and then click \"Apply filters\" to fetch a random subset of rows that match *all* of the filters you've selected.")
-
- filter_groups = gr.CheckboxGroup(choices=sorted(list(artifacts["data_fields"])), label="Filters")
- fetch_subset = gr.Button("Apply filters")
-
- sample_dataframe = gr.State(value=dataset.sample(10))
-
- def fetch_new_samples(filters):
- if len(filters) == 0:
- new_dataset = dataset.sample(10)
- else:
- filter_query = " & ".join([f"`{f}`" for f in filters])
- new_dataset = dataset.query(filter_query)
- if new_dataset.shape[0] > 0:
- new_dataset = new_dataset.sample(10)
-
- new_samples = [[
- x[1][artifacts["column"]],
- ", ".join([col for col in artifacts["data_fields"] if x[1][col]]),
- ] for x in new_dataset.iterrows()]
- return sample_rows.update(samples=new_samples), new_dataset
-
- sample_rows = gr.Dataset(
- samples=[[
- x[1][artifacts["column"]],
- ", ".join([col for col in artifacts["data_fields"] if x[1][col]]),
- ] for x in sample_dataframe.value.iterrows()],
- components=[gr.Textbox(visible=False), gr.Textbox(visible=False)],
- type="index"
- )
- with gr.Column(scale=1):
- row_inspector = gr.DataFrame(
- wrap=True,
- visible=False
- )
-
- fetch_subset.click(
- fetch_new_samples,
- inputs=[filter_groups],
- outputs=[sample_rows, sample_dataframe],
- )
-
- sample_rows.click(
- lambda df, index: row_inspector.update(visible=True, value=df.iloc[index].reset_index()),
- inputs=[sample_dataframe, sample_rows],
- outputs=[row_inspector]
- )
-
-
-with gr.Blocks(css="#visualization-window {flex-direction: row-reverse;}") as demo:
- gr.Markdown("# Exploring Disaggregated Data with 🤗 Disaggregators")
- with gr.Accordion("About this demo 👀"):
- gr.Markdown("## What's in your dataset?")
- gr.Markdown("""
- Addressing fairness and bias in machine learning models is [more important than ever](https://www.vice.com/en/article/bvm35w/this-tool-lets-anyone-see-the-bias-in-ai-image-generators)!
- One form of fairness is equal performance across different groups or features.
- To measure this, evaluation datasets must be disaggregated across the different groups of interest.
- """)
-
- gr.Markdown("The `disaggregators` library ([GitHub](https://github.com/huggingface/disaggregators)) provides an interface and a collection of modules to help you disaggregate datasets by different groups. Click through each of the tabs below to see it in action!")
- gr.Markdown("""
- After tinkering with the demo, you can install 🤗 Disaggregators with:
- ```bash
- pip install disaggregators
- ```
- """)
- gr.Markdown("Each tab below will show you a feature of `disaggregators` used on a different dataset. First, you'll learn about using the built-in disaggregation modules. The second tab will show you how to override the configurations for the existing modules. Finally, the third tab will show you how to incorporate your own custom modules.")
-
- with gr.Tab("🐊 LAION: Built-in Modules Example"):
- generate_components(laion, cached_artifacts["laion"], intersect=False)
- with gr.Tab("🔧 MedMCQA: Configuration Example"):
- generate_components(medmcqa, cached_artifacts["medmcqa"])
- with gr.Tab("🎡 The Stack: Custom Disaggregation Example"):
- generate_components(stack, cached_artifacts["stack"])
-
- with gr.Accordion(label="💡How is this calculated?", open=False):
- gr.Markdown("""
- ## Continent
-
- Continents are inferred by identifying geographic terms and their related countries using [geograpy3](https://github.com/somnathrakshit/geograpy3). The results are then mapped to [their respective continents](https://github.com/bigscience-workshop/data_sourcing/blob/master/sourcing_sprint/resources/country_regions.json).
-
- ## Age
-
- Ages are inferred by using [spaCy](https://spacy.io) to seek "date" tokens in strings.
-
- ## Gender
-
- Binary gender is inferred by checking for words against the [md_gender_bias](https://huggingface.co/datasets/md_gender_bias) dataset.
-
- ```
- @inproceedings{dinan-etal-2020-multi,
- title = "Multi-Dimensional Gender Bias Classification",
- author = "Dinan, Emily and
- Fan, Angela and
- Wu, Ledell and
- Weston, Jason and
- Kiela, Douwe and
- Williams, Adina",
- year = "2020",
- publisher = "Association for Computational Linguistics",
- url = "https://www.aclweb.org/anthology/2020.emnlp-main.23",
- doi = "10.18653/v1/2020.emnlp-main.23",
- ```
-
- ## Learn more!
-
- Visit the [GitHub repository](https://github.com/huggingface/disaggregators) to learn about using the `disaggregators` library and to leave feedback 🤗
- """)
-
-
-demo.launch()
diff --git a/spaces/srikanth-nm/ai_seeker/similarity.py b/spaces/srikanth-nm/ai_seeker/similarity.py
deleted file mode 100644
index a134ca35b16d1d9f12ee32bba4e6399cddf2396d..0000000000000000000000000000000000000000
--- a/spaces/srikanth-nm/ai_seeker/similarity.py
+++ /dev/null
@@ -1,22 +0,0 @@
-from sentence_transformers import SentenceTransformer, util
-import json
-import numpy as np
-model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
-
-def similarity(strQuery):
-
- inputs = json.load(open('chunks.json','r'))
- lstCorpus = [dct['text'] for dct in inputs]
-
- strQuery = "How many different document types?"
- qryEmbedding = model.encode(strQuery, convert_to_tensor=True)
- corpusEmbedding= model.encode(lstCorpus, convert_to_tensor=True)
-
- sim_mat = util.pytorch_cos_sim(qryEmbedding, corpusEmbedding)
- lstSim = sim_mat[0].tolist()
- npSim = np.array(lstSim)
- indexMax = npSim.argmax()
- scoreMax = npSim.max()
-
- return(inputs[indexMax]['start'], inputs[indexMax]['end'])
-
diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/adaptive_span/truncated_bptt_lm_task.py b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/adaptive_span/truncated_bptt_lm_task.py
deleted file mode 100644
index 02be0e7fb4213b98798c85b79e9046e9990b97fc..0000000000000000000000000000000000000000
--- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/adaptive_span/truncated_bptt_lm_task.py
+++ /dev/null
@@ -1,281 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import logging
-import os
-from dataclasses import dataclass, field
-from typing import List, Optional, Tuple
-
-import torch
-from fairseq import utils
-from fairseq.data import (
- Dictionary,
- TokenBlockDataset,
- data_utils,
- iterators,
-)
-from fairseq.dataclass import FairseqDataclass
-from fairseq.distributed import utils as dist_utils
-from fairseq.tasks import FairseqTask, register_task
-from omegaconf import II
-
-
-logger = logging.getLogger(__name__)
-
-
-@dataclass
-class TruncatedBPTTLMConfig(FairseqDataclass):
- data: str = field(default="???", metadata={"help": "path to data directory"})
- tokens_per_sample: int = field(
- default=1024,
- metadata={"help": "max number of tokens per sequence"},
- )
- batch_size: int = II("dataset.batch_size")
- # Some models use *max_target_positions* to know how many positional
- # embeddings to learn. We use II(...) to make it default to
- # *tokens_per_sample*, but in principle there could be more positional
- # embeddings than tokens in a single batch. This may also be irrelevant for
- # custom model implementations.
- max_target_positions: int = II("task.tokens_per_sample")
- # these will be populated automatically if not provided
- data_parallel_rank: Optional[int] = None
- data_parallel_size: Optional[int] = None
-
-
-@register_task("truncated_bptt_lm", dataclass=TruncatedBPTTLMConfig)
-class TruncatedBPTTLMTask(FairseqTask):
- def __init__(self, cfg: TruncatedBPTTLMConfig):
- super().__init__(cfg)
-
- if cfg.data_parallel_rank is None or cfg.data_parallel_size is None:
- if torch.distributed.is_initialized():
- cfg.data_parallel_rank = dist_utils.get_data_parallel_rank()
- cfg.data_parallel_size = dist_utils.get_data_parallel_world_size()
- else:
- cfg.data_parallel_rank = 0
- cfg.data_parallel_size = 1
-
- # load the dictionary
- paths = utils.split_paths(cfg.data)
- assert len(paths) > 0
- self.dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt"))
- logger.info("dictionary: {} types".format(len(self.dictionary)))
-
- def load_dataset(self, split, epoch=1, combine=False, **kwargs):
- """Load a given dataset split (e.g., train, valid, test)"""
-
- # support sharded datasets
- paths = utils.split_paths(self.cfg.data)
- assert len(paths) > 0
- data_path = paths[(epoch - 1) % len(paths)]
- split_path = os.path.join(data_path, split)
-
- # each element of *data* will be a tensorized line from the original
- # text dataset, similar to ``open(split_path).readlines()``
- data = data_utils.load_indexed_dataset(
- split_path, self.dictionary, combine=combine
- )
- if data is None:
- raise FileNotFoundError(
- "Dataset not found: {} ({})".format(split, split_path)
- )
-
- # this is similar to ``data.view(-1).split(tokens_per_sample)``
- data = TokenBlockDataset(
- data,
- data.sizes,
- block_size=self.cfg.tokens_per_sample,
- pad=None, # unused
- eos=None, # unused
- break_mode="none",
- )
-
- self.datasets[split] = TruncatedBPTTDataset(
- data=data,
- bsz_per_shard=self.cfg.batch_size,
- shard_id=self.cfg.data_parallel_rank,
- num_shards=self.cfg.data_parallel_size,
- )
-
- def dataset(self, split):
- return self.datasets[split]
-
- def get_batch_iterator(
- self, dataset, num_workers=0, epoch=1, data_buffer_size=0, **kwargs
- ):
- return iterators.EpochBatchIterator(
- dataset=dataset,
- collate_fn=self._collate_fn,
- num_workers=num_workers,
- epoch=epoch,
- buffer_size=data_buffer_size,
- # we don't use the batching functionality from EpochBatchIterator;
- # instead every item in *dataset* is a whole batch
- batch_sampler=[[i] for i in range(len(dataset))],
- disable_shuffling=True,
- )
-
- def _collate_fn(self, items: List[List[torch.Tensor]]):
- # we don't use fairseq's batching functionality, so we expect a single
- # Tensor of type List[torch.Tensor]
- assert len(items) == 1
-
- # item will have shape B x T (the last batch may have length < T)
- id, item = items[0]
- item = data_utils.collate_tokens(item, pad_idx=self.source_dictionary.pad())
- B, T = item.size()
-
- # shift item one position over and append a padding token for the target
- target = torch.nn.functional.pad(
- item[:, 1:], (0, 1, 0, 0), value=self.target_dictionary.pad()
- )
-
- # fairseq expects batches to have the following structure
- return {
- "id": torch.tensor([id]*item.size(0)),
- "net_input": {
- "src_tokens": item,
- },
- "target": target,
- "nsentences": item.size(0),
- "ntokens": item.numel(),
- }
-
- def build_dataset_for_inference(
- self, src_tokens: List[torch.Tensor], src_lengths: List[int], **kwargs
- ) -> torch.utils.data.Dataset:
- eos = self.source_dictionary.eos()
- dataset = TokenBlockDataset(
- src_tokens,
- src_lengths,
- block_size=None, # ignored for "eos" break mode
- pad=self.source_dictionary.pad(),
- eos=eos,
- break_mode="eos",
- )
-
- class Dataset(torch.utils.data.Dataset):
- def __getitem__(self, i):
- item = dataset[i]
- if item[-1] == eos:
- # remove eos to support generating with a prefix
- item = item[:-1]
- return (i, [item])
-
- def __len__(self):
- return len(dataset)
-
- return Dataset()
-
- def inference_step(
- self, generator, models, sample, prefix_tokens=None, constraints=None
- ):
- with torch.no_grad():
- if constraints is not None:
- raise NotImplementedError
-
- # SequenceGenerator doesn't use *src_tokens* directly, we need to
- # pass the *prefix_tokens* argument instead.
- if prefix_tokens is None and sample["net_input"]["src_tokens"].nelement():
- prefix_tokens = sample["net_input"]["src_tokens"]
-
- # begin generation with the end-of-sentence token
- bos_token = self.source_dictionary.eos()
-
- return generator.generate(
- models, sample, prefix_tokens=prefix_tokens, bos_token=bos_token
- )
-
- def eval_lm_dataloader(
- self,
- dataset,
- max_tokens: Optional[int] = 36000,
- batch_size: Optional[int] = None,
- max_positions: Optional[int] = None,
- num_shards: int = 1,
- shard_id: int = 0,
- num_workers: int = 1,
- data_buffer_size: int = 10,
- context_window: int = 0,
- ):
- if context_window > 0:
- raise NotImplementedError(
- "Transformer-XL doesn't need --context-window, try "
- "--model-overrides '{\"mem_len\":42}' instead "
- )
- return self.get_batch_iterator(
- dataset=dataset,
- max_tokens=max_tokens,
- max_sentences=batch_size,
- max_positions=max_positions,
- ignore_invalid_inputs=True,
- num_shards=num_shards,
- shard_id=shard_id,
- num_workers=num_workers,
- data_buffer_size=data_buffer_size,
- ).next_epoch_itr(shuffle=False)
-
- @property
- def source_dictionary(self):
- return self.dictionary
-
- @property
- def target_dictionary(self):
- return self.dictionary
-
-
-class TruncatedBPTTDataset(torch.utils.data.Dataset):
- def __init__(
- self,
- data: List[torch.Tensor], # ordered list of items
- bsz_per_shard, # number of items processed per GPUs per forward
- shard_id, # current GPU ID
- num_shards, # number of GPUs
- ):
- super().__init__()
- self.data = data
-
- def batchify(data, bsz):
- # Work out how cleanly we can divide the dataset into bsz parts.
- nbatch = data.size(0) // bsz
- # Trim off any extra elements that wouldn't cleanly fit (remainders).
- data = data.narrow(0, 0, nbatch * bsz)
- # Evenly divide the data across the bsz batches.
- data = data.view(bsz, -1).contiguous()
- return data
-
- # total number of sequences processed by all GPUs in each forward pass
- global_batch_size = bsz_per_shard * num_shards
-
- """
- With a 16 item dataset, bsz_per_shard=2 and num_shards=3,
- *indices* might look like:
-
- indices = [[0, 1],
- [2, 3],
- [4, 5],
- [6, 7],
- [8, 9],
- [10, 11]]
-
- The size of the TruncatedBPTTDataset instance will be 2,
- and shard 1 will see items:
-
- [(0, [data[4], data[6]]),
- (1, [data[5], data[7]])]
- """
- indices = batchify(torch.arange(len(data)), global_batch_size)
- assert indices.size(0) == global_batch_size
-
- self.my_indices = indices[
- shard_id * bsz_per_shard : (shard_id + 1) * bsz_per_shard
- ]
- assert self.my_indices.size(0) == bsz_per_shard
-
- def __len__(self):
- return self.my_indices.size(1)
-
- def __getitem__(self, i) -> Tuple[int, List[torch.Tensor]]:
- return (i, [self.data[idx] for idx in self.my_indices[:, i]])
diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/iterative_refinement_generator.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/iterative_refinement_generator.py
deleted file mode 100644
index 4fb0946f499329ceb130761b59675d761df1c158..0000000000000000000000000000000000000000
--- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/iterative_refinement_generator.py
+++ /dev/null
@@ -1,359 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from collections import namedtuple
-
-import numpy as np
-import torch
-from fairseq import utils
-
-
-DecoderOut = namedtuple(
- "IterativeRefinementDecoderOut",
- ["output_tokens", "output_scores", "attn", "step", "max_step", "history"],
-)
-
-
-class IterativeRefinementGenerator(object):
- def __init__(
- self,
- tgt_dict,
- models=None,
- eos_penalty=0.0,
- max_iter=10,
- max_ratio=2,
- beam_size=1,
- decoding_format=None,
- retain_dropout=False,
- adaptive=True,
- retain_history=False,
- reranking=False,
- ):
- """
- Generates translations based on iterative refinement.
-
- Args:
- tgt_dict: target dictionary
- eos_penalty: if > 0.0, it penalized early-stopping in decoding
- max_iter: maximum number of refinement iterations
- max_ratio: generate sequences of maximum length ax, where x is the source length
- decoding_format: decoding mode in {'unigram', 'ensemble', 'vote', 'dp', 'bs'}
- retain_dropout: retaining dropout in the inference
- adaptive: decoding with early stop
- """
- self.bos = tgt_dict.bos()
- self.pad = tgt_dict.pad()
- self.unk = tgt_dict.unk()
- self.eos = tgt_dict.eos()
- self.vocab_size = len(tgt_dict)
- self.eos_penalty = eos_penalty
- self.max_iter = max_iter
- self.max_ratio = max_ratio
- self.beam_size = beam_size
- self.reranking = reranking
- self.decoding_format = decoding_format
- self.retain_dropout = retain_dropout
- self.retain_history = retain_history
- self.adaptive = adaptive
- self.models = models
-
- def generate_batched_itr(
- self,
- data_itr,
- maxlen_a=None,
- maxlen_b=None,
- cuda=False,
- timer=None,
- prefix_size=0,
- ):
- """Iterate over a batched dataset and yield individual translations.
-
- Args:
- maxlen_a/b: generate sequences of maximum length ax + b,
- where x is the source sentence length.
- cuda: use GPU for generation
- timer: StopwatchMeter for timing generations.
- """
-
- for sample in data_itr:
- if "net_input" not in sample:
- continue
- if timer is not None:
- timer.start()
- with torch.no_grad():
- hypos = self.generate(
- self.models,
- sample,
- prefix_tokens=sample["target"][:, :prefix_size]
- if prefix_size > 0
- else None,
- )
- if timer is not None:
- timer.stop(sample["ntokens"])
- for i, id in enumerate(sample["id"]):
- # remove padding
- src = utils.strip_pad(sample["net_input"]["src_tokens"][i, :], self.pad)
- ref = utils.strip_pad(sample["target"][i, :], self.pad)
- yield id, src, ref, hypos[i]
-
- @torch.no_grad()
- def generate(self, models, sample, prefix_tokens=None, constraints=None):
- if constraints is not None:
- raise NotImplementedError(
- "Constrained decoding with the IterativeRefinementGenerator is not supported"
- )
-
- # TODO: iterative refinement generator does not support ensemble for now.
- if not self.retain_dropout:
- for model in models:
- model.eval()
-
- model, reranker = models[0], None
- if self.reranking:
- assert len(models) > 1, "Assuming the last checkpoint is the reranker"
- assert (
- self.beam_size > 1
- ), "Reranking requires multiple translation for each example"
-
- reranker = models[-1]
- models = models[:-1]
-
- if len(models) > 1 and hasattr(model, "enable_ensemble"):
- assert model.allow_ensemble, "{} does not support ensembling".format(
- model.__class__.__name__
- )
- model.enable_ensemble(models)
-
- # TODO: better encoder inputs?
- src_tokens = sample["net_input"]["src_tokens"]
- src_lengths = sample["net_input"]["src_lengths"]
- bsz, src_len = src_tokens.size()
-
- # initialize
- encoder_out = model.forward_encoder([src_tokens, src_lengths])
- prev_decoder_out = model.initialize_output_tokens(encoder_out, src_tokens)
-
- if self.beam_size > 1:
- assert (
- model.allow_length_beam
- ), "{} does not support decoding with length beam.".format(
- model.__class__.__name__
- )
-
- # regenerate data based on length-beam
- length_beam_order = (
- utils.new_arange(src_tokens, self.beam_size, bsz).t().reshape(-1)
- )
- encoder_out = model.encoder.reorder_encoder_out(
- encoder_out, length_beam_order
- )
- prev_decoder_out = model.regenerate_length_beam(
- prev_decoder_out, self.beam_size
- )
- bsz = bsz * self.beam_size
-
- sent_idxs = torch.arange(bsz)
- prev_output_tokens = prev_decoder_out.output_tokens.clone()
-
- if self.retain_history:
- prev_decoder_out = prev_decoder_out._replace(history=[prev_output_tokens])
-
- finalized = [[] for _ in range(bsz)]
-
- def is_a_loop(x, y, s, a):
- b, l_x, l_y = x.size(0), x.size(1), y.size(1)
- if l_x > l_y:
- y = torch.cat([y, x.new_zeros(b, l_x - l_y).fill_(self.pad)], 1)
- s = torch.cat([s, s.new_zeros(b, l_x - l_y)], 1)
- if a is not None:
- a = torch.cat([a, a.new_zeros(b, l_x - l_y, a.size(2))], 1)
- elif l_x < l_y:
- x = torch.cat([x, y.new_zeros(b, l_y - l_x).fill_(self.pad)], 1)
- return (x == y).all(1), y, s, a
-
- def finalized_hypos(step, prev_out_token, prev_out_score, prev_out_attn):
- cutoff = prev_out_token.ne(self.pad)
- tokens = prev_out_token[cutoff]
- if prev_out_score is None:
- scores, score = None, None
- else:
- scores = prev_out_score[cutoff]
- score = scores.mean()
-
- if prev_out_attn is None:
- hypo_attn, alignment = None, None
- else:
- hypo_attn = prev_out_attn[cutoff]
- alignment = hypo_attn.max(dim=1)[1]
- return {
- "steps": step,
- "tokens": tokens,
- "positional_scores": scores,
- "score": score,
- "hypo_attn": hypo_attn,
- "alignment": alignment,
- }
-
- for step in range(self.max_iter + 1):
-
- decoder_options = {
- "eos_penalty": self.eos_penalty,
- "max_ratio": self.max_ratio,
- "decoding_format": self.decoding_format,
- }
- prev_decoder_out = prev_decoder_out._replace(
- step=step,
- max_step=self.max_iter + 1,
- )
-
- decoder_out = model.forward_decoder(
- prev_decoder_out, encoder_out, **decoder_options
- )
-
- if self.adaptive:
- # terminate if there is a loop
- terminated, out_tokens, out_scores, out_attn = is_a_loop(
- prev_output_tokens,
- decoder_out.output_tokens,
- decoder_out.output_scores,
- decoder_out.attn,
- )
- decoder_out = decoder_out._replace(
- output_tokens=out_tokens,
- output_scores=out_scores,
- attn=out_attn,
- )
-
- else:
- terminated = decoder_out.output_tokens.new_zeros(
- decoder_out.output_tokens.size(0)
- ).bool()
-
- if step == self.max_iter: # reach last iteration, terminate
- terminated.fill_(1)
-
- # collect finalized sentences
- finalized_idxs = sent_idxs[terminated]
- finalized_tokens = decoder_out.output_tokens[terminated]
- finalized_scores = decoder_out.output_scores[terminated]
- finalized_attn = (
- None
- if (decoder_out.attn is None or decoder_out.attn.size(0) == 0)
- else decoder_out.attn[terminated]
- )
-
- if self.retain_history:
- finalized_history_tokens = [h[terminated] for h in decoder_out.history]
-
- for i in range(finalized_idxs.size(0)):
- finalized[finalized_idxs[i]] = [
- finalized_hypos(
- step,
- finalized_tokens[i],
- finalized_scores[i],
- None if finalized_attn is None else finalized_attn[i],
- )
- ]
-
- if self.retain_history:
- finalized[finalized_idxs[i]][0]["history"] = []
- for j in range(len(finalized_history_tokens)):
- finalized[finalized_idxs[i]][0]["history"].append(
- finalized_hypos(
- step, finalized_history_tokens[j][i], None, None
- )
- )
-
- # check if all terminated
- if terminated.sum() == terminated.size(0):
- break
-
- # for next step
- not_terminated = ~terminated
- prev_decoder_out = decoder_out._replace(
- output_tokens=decoder_out.output_tokens[not_terminated],
- output_scores=decoder_out.output_scores[not_terminated],
- attn=decoder_out.attn[not_terminated]
- if (decoder_out.attn is not None and decoder_out.attn.size(0) > 0)
- else None,
- history=[h[not_terminated] for h in decoder_out.history]
- if decoder_out.history is not None
- else None,
- )
- encoder_out = model.encoder.reorder_encoder_out(
- encoder_out, not_terminated.nonzero(as_tuple=False).squeeze()
- )
- sent_idxs = sent_idxs[not_terminated]
- prev_output_tokens = prev_decoder_out.output_tokens.clone()
-
- if self.beam_size > 1:
- if reranker is not None:
- finalized = self.rerank(
- reranker, finalized, [src_tokens, src_lengths], self.beam_size
- )
-
- # aggregate information from length beam
- finalized = [
- finalized[
- np.argmax(
- [
- finalized[self.beam_size * i + j][0]["score"]
- for j in range(self.beam_size)
- ]
- )
- + self.beam_size * i
- ]
- for i in range(len(finalized) // self.beam_size)
- ]
-
- return finalized
-
- def rerank(self, reranker, finalized, encoder_input, beam_size):
- def rebuild_batch(finalized):
- finalized_tokens = [f[0]["tokens"] for f in finalized]
- finalized_maxlen = max(f.size(0) for f in finalized_tokens)
- final_output_tokens = (
- finalized_tokens[0]
- .new_zeros(len(finalized_tokens), finalized_maxlen)
- .fill_(self.pad)
- )
- for i, f in enumerate(finalized_tokens):
- final_output_tokens[i, : f.size(0)] = f
- return final_output_tokens
-
- final_output_tokens = rebuild_batch(finalized)
- final_output_tokens[
- :, 0
- ] = self.eos # autoregressive model assumes starting with EOS
-
- reranker_encoder_out = reranker.encoder(*encoder_input)
- length_beam_order = (
- utils.new_arange(
- final_output_tokens, beam_size, reranker_encoder_out.encoder_out.size(1)
- )
- .t()
- .reshape(-1)
- )
- reranker_encoder_out = reranker.encoder.reorder_encoder_out(
- reranker_encoder_out, length_beam_order
- )
- reranking_scores = reranker.get_normalized_probs(
- reranker.decoder(final_output_tokens[:, :-1], reranker_encoder_out),
- True,
- None,
- )
- reranking_scores = reranking_scores.gather(2, final_output_tokens[:, 1:, None])
- reranking_masks = final_output_tokens[:, 1:].ne(self.pad)
- reranking_scores = (
- reranking_scores[:, :, 0].masked_fill_(~reranking_masks, 0).sum(1)
- )
- reranking_scores = reranking_scores / reranking_masks.sum(1).type_as(
- reranking_scores
- )
-
- for i in range(len(finalized)):
- finalized[i][0]["score"] = reranking_scores[i]
-
- return finalized
diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/logging/meters.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/logging/meters.py
deleted file mode 100644
index 2100b1fa0b2704b1c585f59e9349655bba0cc9e6..0000000000000000000000000000000000000000
--- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/logging/meters.py
+++ /dev/null
@@ -1,323 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import bisect
-import time
-from collections import OrderedDict
-from typing import Dict, Optional
-
-
-try:
- import torch
-
- def type_as(a, b):
- if torch.is_tensor(a) and torch.is_tensor(b):
- return a.to(b)
- else:
- return a
-
-
-except ImportError:
- torch = None
-
- def type_as(a, b):
- return a
-
-
-try:
- import numpy as np
-except ImportError:
- np = None
-
-
-class Meter(object):
- """Base class for Meters."""
-
- def __init__(self):
- pass
-
- def state_dict(self):
- return {}
-
- def load_state_dict(self, state_dict):
- pass
-
- def reset(self):
- raise NotImplementedError
-
- @property
- def smoothed_value(self) -> float:
- """Smoothed value used for logging."""
- raise NotImplementedError
-
-
-def safe_round(number, ndigits):
- if hasattr(number, "__round__"):
- return round(number, ndigits)
- elif torch is not None and torch.is_tensor(number) and number.numel() == 1:
- return safe_round(number.item(), ndigits)
- elif np is not None and np.ndim(number) == 0 and hasattr(number, "item"):
- return safe_round(number.item(), ndigits)
- else:
- return number
-
-
-class AverageMeter(Meter):
- """Computes and stores the average and current value"""
-
- def __init__(self, round: Optional[int] = None):
- self.round = round
- self.reset()
-
- def reset(self):
- self.val = None # most recent update
- self.sum = 0 # sum from all updates
- self.count = 0 # total n from all updates
-
- def update(self, val, n=1):
- if val is not None:
- self.val = val
- if n > 0:
- self.sum = type_as(self.sum, val) + (val * n)
- self.count = type_as(self.count, n) + n
-
- def state_dict(self):
- return {
- "val": self.val,
- "sum": self.sum,
- "count": self.count,
- "round": self.round,
- }
-
- def load_state_dict(self, state_dict):
- self.val = state_dict["val"]
- self.sum = state_dict["sum"]
- self.count = state_dict["count"]
- self.round = state_dict.get("round", None)
-
- @property
- def avg(self):
- return self.sum / self.count if self.count > 0 else self.val
-
- @property
- def smoothed_value(self) -> float:
- val = self.avg
- if self.round is not None and val is not None:
- val = safe_round(val, self.round)
- return val
-
-
-class SumMeter(Meter):
- """Computes and stores the sum"""
-
- def __init__(self, round: Optional[int] = None):
- self.round = round
- self.reset()
-
- def reset(self):
- self.sum = 0 # sum from all updates
-
- def update(self, val):
- if val is not None:
- self.sum = type_as(self.sum, val) + val
-
- def state_dict(self):
- return {
- "sum": self.sum,
- "round": self.round,
- }
-
- def load_state_dict(self, state_dict):
- self.sum = state_dict["sum"]
- self.round = state_dict.get("round", None)
-
- @property
- def smoothed_value(self) -> float:
- val = self.sum
- if self.round is not None and val is not None:
- val = safe_round(val, self.round)
- return val
-
-
-class TimeMeter(Meter):
- """Computes the average occurrence of some event per second"""
-
- def __init__(
- self,
- init: int = 0,
- n: int = 0,
- round: Optional[int] = None,
- ):
- self.round = round
- self.reset(init, n)
-
- def reset(self, init=0, n=0):
- self.init = init
- self.start = time.perf_counter()
- self.n = n
- self.i = 0
-
- def update(self, val=1):
- self.n = type_as(self.n, val) + val
- self.i += 1
-
- def state_dict(self):
- return {
- "init": self.elapsed_time,
- "n": self.n,
- "round": self.round,
- }
-
- def load_state_dict(self, state_dict):
- if "start" in state_dict:
- # backwards compatibility for old state_dicts
- self.reset(init=state_dict["init"])
- else:
- self.reset(init=state_dict["init"], n=state_dict["n"])
- self.round = state_dict.get("round", None)
-
- @property
- def avg(self):
- return self.n / self.elapsed_time
-
- @property
- def elapsed_time(self):
- return self.init + (time.perf_counter() - self.start)
-
- @property
- def smoothed_value(self) -> float:
- val = self.avg
- if self.round is not None and val is not None:
- val = safe_round(val, self.round)
- return val
-
-
-class StopwatchMeter(Meter):
- """Computes the sum/avg duration of some event in seconds"""
-
- def __init__(self, round: Optional[int] = None):
- self.round = round
- self.sum = 0
- self.n = 0
- self.start_time = None
-
- def start(self):
- self.start_time = time.perf_counter()
-
- def stop(self, n=1, prehook=None):
- if self.start_time is not None:
- if prehook is not None:
- prehook()
- delta = time.perf_counter() - self.start_time
- self.sum = self.sum + delta
- self.n = type_as(self.n, n) + n
-
- def reset(self):
- self.sum = 0 # cumulative time during which stopwatch was active
- self.n = 0 # total n across all start/stop
- self.start()
-
- def state_dict(self):
- return {
- "sum": self.sum,
- "n": self.n,
- "round": self.round,
- }
-
- def load_state_dict(self, state_dict):
- self.sum = state_dict["sum"]
- self.n = state_dict["n"]
- self.start_time = None
- self.round = state_dict.get("round", None)
-
- @property
- def avg(self):
- return self.sum / self.n if self.n > 0 else self.sum
-
- @property
- def elapsed_time(self):
- if self.start_time is None:
- return 0.0
- return time.perf_counter() - self.start_time
-
- @property
- def smoothed_value(self) -> float:
- val = self.avg if self.sum > 0 else self.elapsed_time
- if self.round is not None and val is not None:
- val = safe_round(val, self.round)
- return val
-
-
-class MetersDict(OrderedDict):
- """A sorted dictionary of :class:`Meters`.
-
- Meters are sorted according to a priority that is given when the
- meter is first added to the dictionary.
- """
-
- def __init__(self, *args, **kwargs):
- super().__init__(*args, **kwargs)
- self.priorities = []
-
- def __setitem__(self, key, value):
- assert key not in self, "MetersDict doesn't support reassignment"
- priority, value = value
- bisect.insort(self.priorities, (priority, len(self.priorities), key))
- super().__setitem__(key, value)
- for _, _, key in self.priorities: # reorder dict to match priorities
- self.move_to_end(key)
-
- def add_meter(self, key, meter, priority):
- self.__setitem__(key, (priority, meter))
-
- def state_dict(self):
- return [
- (pri, key, self[key].__class__.__name__, self[key].state_dict())
- for pri, _, key in self.priorities
- # can't serialize DerivedMeter instances
- if not isinstance(self[key], MetersDict._DerivedMeter)
- ]
-
- def load_state_dict(self, state_dict):
- self.clear()
- self.priorities.clear()
- for pri, key, meter_cls, meter_state in state_dict:
- meter = globals()[meter_cls]()
- meter.load_state_dict(meter_state)
- self.add_meter(key, meter, pri)
-
- def get_smoothed_value(self, key: str) -> float:
- """Get a single smoothed value."""
- meter = self[key]
- if isinstance(meter, MetersDict._DerivedMeter):
- return meter.fn(self)
- else:
- return meter.smoothed_value
-
- def get_smoothed_values(self) -> Dict[str, float]:
- """Get all smoothed values."""
- return OrderedDict(
- [
- (key, self.get_smoothed_value(key))
- for key in self.keys()
- if not key.startswith("_")
- ]
- )
-
- def reset(self):
- """Reset Meter instances."""
- for meter in self.values():
- if isinstance(meter, MetersDict._DerivedMeter):
- continue
- meter.reset()
-
- class _DerivedMeter(Meter):
- """A Meter whose values are derived from other Meters."""
-
- def __init__(self, fn):
- self.fn = fn
-
- def reset(self):
- pass
diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/models/transformer/transformer_base.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/models/transformer/transformer_base.py
deleted file mode 100644
index b4d5604dbbae979b424650882d33b45ebab323e6..0000000000000000000000000000000000000000
--- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/models/transformer/transformer_base.py
+++ /dev/null
@@ -1,179 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from typing import Dict, List, Optional, Tuple
-
-import torch
-import torch.nn as nn
-from fairseq import utils
-from fairseq.dataclass.utils import gen_parser_from_dataclass
-from fairseq.distributed import fsdp_wrap
-from fairseq.models import FairseqEncoderDecoderModel
-from fairseq.models.transformer import (
- TransformerEncoderBase,
- TransformerDecoderBase,
- TransformerConfig,
-)
-from torch import Tensor
-
-
-class TransformerModelBase(FairseqEncoderDecoderModel):
- """
- Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017)
- `_.
-
- Args:
- encoder (TransformerEncoder): the encoder
- decoder (TransformerDecoder): the decoder
-
- The Transformer model provides the following named architectures and
- command-line arguments:
-
- .. argparse::
- :ref: fairseq.models.transformer_parser
- :prog:
- """
-
- def __init__(self, cfg, encoder, decoder):
- super().__init__(encoder, decoder)
- self.cfg = cfg
- self.supports_align_args = True
-
- @classmethod
- def add_args(cls, parser):
- """Add model-specific arguments to the parser."""
- # we want to build the args recursively in this case.
- gen_parser_from_dataclass(
- parser, TransformerConfig(), delete_default=False, with_prefix=""
- )
-
- @classmethod
- def build_model(cls, cfg, task):
- """Build a new model instance."""
-
- # -- TODO T96535332
- # bug caused by interaction between OmegaConf II and argparsing
- cfg.decoder.input_dim = int(cfg.decoder.input_dim)
- cfg.decoder.output_dim = int(cfg.decoder.output_dim)
- # --
-
- if cfg.encoder.layers_to_keep:
- cfg.encoder.layers = len(cfg.encoder.layers_to_keep.split(","))
- if cfg.decoder.layers_to_keep:
- cfg.decoder.layers = len(cfg.decoder.layers_to_keep.split(","))
-
- src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
-
- if cfg.share_all_embeddings:
- if src_dict != tgt_dict:
- raise ValueError("--share-all-embeddings requires a joined dictionary")
- if cfg.encoder.embed_dim != cfg.decoder.embed_dim:
- raise ValueError(
- "--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim"
- )
- if cfg.decoder.embed_path and (
- cfg.decoder.embed_path != cfg.encoder.embed_path
- ):
- raise ValueError(
- "--share-all-embeddings not compatible with --decoder-embed-path"
- )
- encoder_embed_tokens = cls.build_embedding(
- cfg, src_dict, cfg.encoder.embed_dim, cfg.encoder.embed_path
- )
- decoder_embed_tokens = encoder_embed_tokens
- cfg.share_decoder_input_output_embed = True
- else:
- encoder_embed_tokens = cls.build_embedding(
- cfg, src_dict, cfg.encoder.embed_dim, cfg.encoder.embed_path
- )
- decoder_embed_tokens = cls.build_embedding(
- cfg, tgt_dict, cfg.decoder.embed_dim, cfg.decoder.embed_path
- )
- if cfg.offload_activations:
- cfg.checkpoint_activations = True # offloading implies checkpointing
- encoder = cls.build_encoder(cfg, src_dict, encoder_embed_tokens)
- decoder = cls.build_decoder(cfg, tgt_dict, decoder_embed_tokens)
- if not cfg.share_all_embeddings:
- # fsdp_wrap is a no-op when --ddp-backend != fully_sharded
- encoder = fsdp_wrap(encoder, min_num_params=cfg.min_params_to_wrap)
- decoder = fsdp_wrap(decoder, min_num_params=cfg.min_params_to_wrap)
- return cls(cfg, encoder, decoder)
-
- @classmethod
- def build_embedding(cls, cfg, dictionary, embed_dim, path=None):
- num_embeddings = len(dictionary)
- padding_idx = dictionary.pad()
-
- emb = Embedding(num_embeddings, embed_dim, padding_idx)
- # if provided, load from preloaded dictionaries
- if path:
- embed_dict = utils.parse_embedding(path)
- utils.load_embedding(embed_dict, dictionary, emb)
- return emb
-
- @classmethod
- def build_encoder(cls, cfg, src_dict, embed_tokens):
- return TransformerEncoderBase(cfg, src_dict, embed_tokens)
-
- @classmethod
- def build_decoder(cls, cfg, tgt_dict, embed_tokens):
- return TransformerDecoderBase(
- cfg,
- tgt_dict,
- embed_tokens,
- no_encoder_attn=cfg.no_cross_attention,
- )
-
- # TorchScript doesn't support optional arguments with variable length (**kwargs).
- # Current workaround is to add union of all arguments in child classes.
- def forward(
- self,
- src_tokens,
- src_lengths,
- prev_output_tokens,
- return_all_hiddens: bool = True,
- features_only: bool = False,
- alignment_layer: Optional[int] = None,
- alignment_heads: Optional[int] = None,
- ):
- """
- Run the forward pass for an encoder-decoder model.
-
- Copied from the base class, but without ``**kwargs``,
- which are not supported by TorchScript.
- """
- encoder_out = self.encoder(
- src_tokens, src_lengths=src_lengths, return_all_hiddens=return_all_hiddens
- )
- decoder_out = self.decoder(
- prev_output_tokens,
- encoder_out=encoder_out,
- features_only=features_only,
- alignment_layer=alignment_layer,
- alignment_heads=alignment_heads,
- src_lengths=src_lengths,
- return_all_hiddens=return_all_hiddens,
- )
- return decoder_out
-
- # Since get_normalized_probs is in the Fairseq Model which is not scriptable,
- # I rewrite the get_normalized_probs from Base Class to call the
- # helper function in the Base Class.
- @torch.jit.export
- def get_normalized_probs(
- self,
- net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
- log_probs: bool,
- sample: Optional[Dict[str, Tensor]] = None,
- ):
- """Get normalized probabilities (or log probs) from a net's output."""
- return self.get_normalized_probs_scriptable(net_output, log_probs, sample)
-
-
-def Embedding(num_embeddings, embedding_dim, padding_idx):
- m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
- nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
- nn.init.constant_(m.weight[padding_idx], 0)
- return m
diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/modules/transformer_sentence_encoder.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/modules/transformer_sentence_encoder.py
deleted file mode 100644
index d0540d69229fb994b9e573a5016c9f239b7929e2..0000000000000000000000000000000000000000
--- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/modules/transformer_sentence_encoder.py
+++ /dev/null
@@ -1,291 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from typing import Optional, Tuple
-
-import torch
-import torch.nn as nn
-from fairseq.modules import (
- FairseqDropout,
- LayerDropModuleList,
- LayerNorm,
- MultiheadAttention,
- PositionalEmbedding,
- TransformerSentenceEncoderLayer,
-)
-from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
-
-
-def init_bert_params(module):
- """
- Initialize the weights specific to the BERT Model.
- This overrides the default initializations depending on the specified arguments.
- 1. If normal_init_linear_weights is set then weights of linear
- layer will be initialized using the normal distribution and
- bais will be set to the specified value.
- 2. If normal_init_embed_weights is set then weights of embedding
- layer will be initialized using the normal distribution.
- 3. If normal_init_proj_weights is set then weights of
- in_project_weight for MultiHeadAttention initialized using
- the normal distribution (to be validated).
- """
-
- def normal_(data):
- # with FSDP, module params will be on CUDA, so we cast them back to CPU
- # so that the RNG is consistent with and without FSDP
- data.copy_(
- data.cpu().normal_(mean=0.0, std=0.02).to(data.device)
- )
-
- if isinstance(module, nn.Linear):
- normal_(module.weight.data)
- if module.bias is not None:
- module.bias.data.zero_()
- if isinstance(module, nn.Embedding):
- normal_(module.weight.data)
- if module.padding_idx is not None:
- module.weight.data[module.padding_idx].zero_()
- if isinstance(module, MultiheadAttention):
- normal_(module.q_proj.weight.data)
- normal_(module.k_proj.weight.data)
- normal_(module.v_proj.weight.data)
-
-
-class TransformerSentenceEncoder(nn.Module):
- """
- Implementation for a Bi-directional Transformer based Sentence Encoder used
- in BERT/XLM style pre-trained models.
-
- This first computes the token embedding using the token embedding matrix,
- position embeddings (if specified) and segment embeddings
- (if specified). After applying the specified number of
- TransformerEncoderLayers, it outputs all the internal states of the
- encoder as well as the final representation associated with the first
- token (usually CLS token).
-
- Input:
- - tokens: B x T matrix representing sentences
- - segment_labels: B x T matrix representing segment label for tokens
-
- Output:
- - a tuple of the following:
- - a list of internal model states used to compute the
- predictions where each tensor has shape T x B x C
- - sentence representation associated with first input token
- in format B x C.
- """
-
- def __init__(
- self,
- padding_idx: int,
- vocab_size: int,
- num_encoder_layers: int = 6,
- embedding_dim: int = 768,
- ffn_embedding_dim: int = 3072,
- num_attention_heads: int = 8,
- dropout: float = 0.1,
- attention_dropout: float = 0.1,
- activation_dropout: float = 0.1,
- layerdrop: float = 0.0,
- max_seq_len: int = 256,
- num_segments: int = 2,
- use_position_embeddings: bool = True,
- offset_positions_by_padding: bool = True,
- encoder_normalize_before: bool = False,
- apply_bert_init: bool = False,
- activation_fn: str = "relu",
- learned_pos_embedding: bool = True,
- embed_scale: float = None,
- freeze_embeddings: bool = False,
- n_trans_layers_to_freeze: int = 0,
- export: bool = False,
- traceable: bool = False,
- q_noise: float = 0.0,
- qn_block_size: int = 8,
- ) -> None:
-
- super().__init__()
- self.padding_idx = padding_idx
- self.vocab_size = vocab_size
- self.dropout_module = FairseqDropout(
- dropout, module_name=self.__class__.__name__
- )
- self.layerdrop = layerdrop
- self.max_seq_len = max_seq_len
- self.embedding_dim = embedding_dim
- self.num_segments = num_segments
- self.use_position_embeddings = use_position_embeddings
- self.apply_bert_init = apply_bert_init
- self.learned_pos_embedding = learned_pos_embedding
- self.traceable = traceable
-
- self.embed_tokens = self.build_embedding(
- self.vocab_size, self.embedding_dim, self.padding_idx
- )
- self.embed_scale = embed_scale
-
- if q_noise > 0:
- self.quant_noise = apply_quant_noise_(
- nn.Linear(self.embedding_dim, self.embedding_dim, bias=False),
- q_noise,
- qn_block_size,
- )
- else:
- self.quant_noise = None
-
- self.segment_embeddings = (
- nn.Embedding(self.num_segments, self.embedding_dim, padding_idx=None)
- if self.num_segments > 0
- else None
- )
-
- self.embed_positions = (
- PositionalEmbedding(
- self.max_seq_len,
- self.embedding_dim,
- padding_idx=(self.padding_idx if offset_positions_by_padding else None),
- learned=self.learned_pos_embedding,
- )
- if self.use_position_embeddings
- else None
- )
-
- if encoder_normalize_before:
- self.emb_layer_norm = LayerNorm(self.embedding_dim, export=export)
- else:
- self.emb_layer_norm = None
-
- if self.layerdrop > 0.0:
- self.layers = LayerDropModuleList(p=self.layerdrop)
- else:
- self.layers = nn.ModuleList([])
- self.layers.extend(
- [
- self.build_transformer_sentence_encoder_layer(
- embedding_dim=self.embedding_dim,
- ffn_embedding_dim=ffn_embedding_dim,
- num_attention_heads=num_attention_heads,
- dropout=self.dropout_module.p,
- attention_dropout=attention_dropout,
- activation_dropout=activation_dropout,
- activation_fn=activation_fn,
- export=export,
- q_noise=q_noise,
- qn_block_size=qn_block_size,
- )
- for _ in range(num_encoder_layers)
- ]
- )
-
- # Apply initialization of model params after building the model
- if self.apply_bert_init:
- self.apply(init_bert_params)
-
- def freeze_module_params(m):
- if m is not None:
- for p in m.parameters():
- p.requires_grad = False
-
- if freeze_embeddings:
- freeze_module_params(self.embed_tokens)
- freeze_module_params(self.segment_embeddings)
- freeze_module_params(self.embed_positions)
- freeze_module_params(self.emb_layer_norm)
-
- for layer in range(n_trans_layers_to_freeze):
- freeze_module_params(self.layers[layer])
-
- def build_embedding(self, vocab_size, embedding_dim, padding_idx):
- return nn.Embedding(vocab_size, embedding_dim, padding_idx)
-
- def build_transformer_sentence_encoder_layer(
- self,
- embedding_dim,
- ffn_embedding_dim,
- num_attention_heads,
- dropout,
- attention_dropout,
- activation_dropout,
- activation_fn,
- export,
- q_noise,
- qn_block_size,
- ):
- return TransformerSentenceEncoderLayer(
- embedding_dim=embedding_dim,
- ffn_embedding_dim=ffn_embedding_dim,
- num_attention_heads=num_attention_heads,
- dropout=dropout,
- attention_dropout=attention_dropout,
- activation_dropout=activation_dropout,
- activation_fn=activation_fn,
- export=export,
- q_noise=q_noise,
- qn_block_size=qn_block_size,
- )
-
- def forward(
- self,
- tokens: torch.Tensor,
- segment_labels: torch.Tensor = None,
- last_state_only: bool = False,
- positions: Optional[torch.Tensor] = None,
- token_embeddings: Optional[torch.Tensor] = None,
- attn_mask: Optional[torch.Tensor] = None,
- ) -> Tuple[torch.Tensor, torch.Tensor]:
- is_tpu = tokens.device.type == "xla"
-
- # compute padding mask. This is needed for multi-head attention
- padding_mask = tokens.eq(self.padding_idx)
- if not self.traceable and not is_tpu and not padding_mask.any():
- padding_mask = None
-
- if token_embeddings is not None:
- x = token_embeddings
- else:
- x = self.embed_tokens(tokens)
-
- if self.embed_scale is not None:
- x = x * self.embed_scale
-
- if self.embed_positions is not None:
- x = x + self.embed_positions(tokens, positions=positions)
-
- if self.segment_embeddings is not None and segment_labels is not None:
- x = x + self.segment_embeddings(segment_labels)
-
- if self.quant_noise is not None:
- x = self.quant_noise(x)
-
- if self.emb_layer_norm is not None:
- x = self.emb_layer_norm(x)
-
- x = self.dropout_module(x)
-
- # account for padding while computing the representation
- if padding_mask is not None:
- x = x * (1 - padding_mask.unsqueeze(-1).type_as(x))
-
- # B x T x C -> T x B x C
- x = x.transpose(0, 1)
-
- inner_states = []
- if not last_state_only:
- inner_states.append(x)
-
- for layer in self.layers:
- x, _ = layer(x, self_attn_padding_mask=padding_mask, self_attn_mask=attn_mask)
- if not last_state_only:
- inner_states.append(x)
-
- sentence_rep = x[0, :, :]
-
- if last_state_only:
- inner_states = [x]
-
- if self.traceable:
- return torch.stack(inner_states), sentence_rep
- else:
- return inner_states, sentence_rep
diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/tasks/translation_from_pretrained_bart.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/tasks/translation_from_pretrained_bart.py
deleted file mode 100644
index 0fd7a5b29f0e34699b5d5ef7574bc39b8c6052c9..0000000000000000000000000000000000000000
--- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/tasks/translation_from_pretrained_bart.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import torch
-from fairseq import utils
-from fairseq.data import LanguagePairDataset
-
-from . import register_task
-from .translation import TranslationTask, load_langpair_dataset
-
-
-@register_task("translation_from_pretrained_bart")
-class TranslationFromPretrainedBARTTask(TranslationTask):
- """
- Translate from source language to target language with a model initialized with a multilingual pretrain.
-
- Args:
- src_dict (~fairseq.data.Dictionary): dictionary for the source language
- tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
-
- .. note::
-
- The translation task is compatible with :mod:`fairseq-train`,
- :mod:`fairseq-generate` and :mod:`fairseq-interactive`.
-
- The translation task provides the following additional command-line
- arguments:
-
- .. argparse::
- :ref: fairseq.tasks.translation_parser
- :prog:
- """
-
- @staticmethod
- def add_args(parser):
- """Add task-specific arguments to the parser."""
- # fmt: off
- TranslationTask.add_args(parser)
- parser.add_argument('--langs', type=str, metavar='LANG',
- help='comma-separated list of monolingual language, '
- 'for example, "en,de,fr". These should match the '
- 'langs from pretraining (and be in the same order). '
- 'You should always add all pretraining language idx '
- 'during finetuning.')
- parser.add_argument('--prepend-bos', action='store_true',
- help='prepend bos token to each sentence, which matches '
- 'mBART pretraining')
- # fmt: on
-
- def __init__(self, args, src_dict, tgt_dict):
- super().__init__(args, src_dict, tgt_dict)
- self.langs = args.langs.split(",")
- for d in [src_dict, tgt_dict]:
- for l in self.langs:
- d.add_symbol("[{}]".format(l))
- d.add_symbol("")
-
- def load_dataset(self, split, epoch=1, combine=False, **kwargs):
- """Load a given dataset split.
-
- Args:
- split (str): name of the split (e.g., train, valid, test)
- """
- paths = utils.split_paths(self.args.data)
- assert len(paths) > 0
- data_path = paths[(epoch - 1) % len(paths)]
-
- # infer langcode
- src, tgt = self.args.source_lang, self.args.target_lang
-
- self.datasets[split] = load_langpair_dataset(
- data_path,
- split,
- src,
- self.src_dict,
- tgt,
- self.tgt_dict,
- combine=combine,
- dataset_impl=self.args.dataset_impl,
- upsample_primary=self.args.upsample_primary,
- left_pad_source=self.args.left_pad_source,
- left_pad_target=self.args.left_pad_target,
- max_source_positions=getattr(self.args, "max_source_positions", 1024),
- max_target_positions=getattr(self.args, "max_target_positions", 1024),
- load_alignments=self.args.load_alignments,
- prepend_bos=getattr(self.args, "prepend_bos", False),
- append_source_id=True,
- )
-
- def build_generator(self, models, args, **unused):
- if getattr(args, "score_reference", False):
- from fairseq.sequence_scorer import SequenceScorer
-
- return SequenceScorer(
- self.target_dictionary,
- eos=self.tgt_dict.index("[{}]".format(self.args.target_lang)),
- )
- else:
- from fairseq.sequence_generator import SequenceGenerator
-
- return SequenceGenerator(
- models,
- self.target_dictionary,
- beam_size=getattr(args, "beam", 5),
- max_len_a=getattr(args, "max_len_a", 0),
- max_len_b=getattr(args, "max_len_b", 200),
- min_len=getattr(args, "min_len", 1),
- normalize_scores=(not getattr(args, "unnormalized", False)),
- len_penalty=getattr(args, "lenpen", 1),
- unk_penalty=getattr(args, "unkpen", 0),
- temperature=getattr(args, "temperature", 1.0),
- match_source_len=getattr(args, "match_source_len", False),
- no_repeat_ngram_size=getattr(args, "no_repeat_ngram_size", 0),
- eos=self.tgt_dict.index("[{}]".format(self.args.target_lang)),
- )
-
- def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
- src_lang_id = self.source_dictionary.index("[{}]".format(self.args.source_lang))
- source_tokens = []
- for s_t in src_tokens:
- s_t = torch.cat([s_t, s_t.new(1).fill_(src_lang_id)])
- source_tokens.append(s_t)
- dataset = LanguagePairDataset(
- source_tokens,
- src_lengths,
- self.source_dictionary,
- tgt_dict=self.target_dictionary,
- constraints=constraints,
- )
- return dataset
diff --git a/spaces/stomexserde/gpt4-ui/Examples/Alcohol 120 Full !FREE! Version For Windows 7 64 Bit Free 19.md b/spaces/stomexserde/gpt4-ui/Examples/Alcohol 120 Full !FREE! Version For Windows 7 64 Bit Free 19.md
deleted file mode 100644
index 95d8b61dc252ef67b504684cf08d9e93e97b0389..0000000000000000000000000000000000000000
--- a/spaces/stomexserde/gpt4-ui/Examples/Alcohol 120 Full !FREE! Version For Windows 7 64 Bit Free 19.md
+++ /dev/null
@@ -1,20 +0,0 @@
-
-
How to Download and Install Alcohol 120% Full Version for Windows 7 64 Bit Free
-
Alcohol 120% is a powerful CD and DVD burning and emulation software that lets you create backups of your discs, store your most used CDs as images on your computer, and mount them on up to 31 virtual drives. It also supports Blu-ray and HD DVD formats, and can bypass some copy protection schemes. In this article, we will show you how to download and install Alcohol 120% full version for Windows 7 64 bit free.
-
Step 1: Download Alcohol 120% Free Edition
-
The first step is to download Alcohol 120% Free Edition from the official website[^1^]. This version is for personal use only and has some limitations compared to the retail version, such as only allowing up to 2 virtual drives and 1 simultaneous writing operation. However, it still has all the essential features of Alcohol 120% and is compatible with Windows 7 64 bit.
-
alcohol 120 full version for windows 7 64 bit free 19
To download Alcohol 120% Free Edition, click on the "Download" button on the website and save the file to your computer. The file name should be "Alcohol120_FE_2.0.2.4713.exe" and the file size should be about 10 MB.
-
Step 2: Install Alcohol 120% Free Edition
-
The second step is to install Alcohol 120% Free Edition on your computer. To do this, double-click on the downloaded file and follow the instructions on the screen. The installation process is divided into two parts: first, the installer will copy some files to your computer and ask you to reboot; second, after the reboot, the installer will continue and offer you to install a browser toolbar that is not required for Alcohol 120% to work properly.
-
We recommend that you decline the toolbar offer and uncheck any boxes that ask you to change your browser settings or homepage. You can also choose which components of Alcohol 120% you want to install, such as virtual drives, image making wizard, image burning wizard, etc. However, if you are not sure what to choose, you can leave the default settings as they are.
-
After you finish the installation process, you should see a shortcut icon for Alcohol 120% on your desktop or in your start menu.
-
Step 3: Activate Alcohol 120% Full Version
-
The third step is to activate Alcohol 120% full version for free. To do this, you will need a serial number that can unlock all the features of Alcohol 120%. You can find such a serial number online by searching for "alcohol 120 full version for windows 7 64 bit free" on Google or other search engines. However, be careful not to download any files or programs that may contain viruses or malware.
-
One of the websites that claims to provide a working serial number for Alcohol 120% is SoundCloud[^3^]. On this website, you can listen to an audio file that supposedly contains the serial number. However, we cannot guarantee that this serial number is valid or safe to use.
-
To activate Alcohol 120% full version with a serial number, open Alcohol 120% and click on "Help" in the menu bar. Then click on "Enter Registration Code" and enter the serial number in the box. Click on "OK" and restart Alcohol 120%. You should now have access to all the features of Alcohol 120%, such as up to 31 virtual drives, unlimited writing operations, copy protection emulation options, etc.
-
Conclusion
-
In this article, we have shown you how to download and install Alcohol 120% full version for Windows 7 64 bit free. However, we advise you to use this software at your own risk and respect the intellectual property rights of the disc owners. If you want to support the developers of Alcohol 120%, you can buy the retail version from their website[^5^], which also offers technical support and updates.
- 7b8c122e87
-
-
\ No newline at end of file
diff --git a/spaces/stomexserde/gpt4-ui/Examples/Architecture Licensure Exam Reviewer 40.pdf _HOT_.md b/spaces/stomexserde/gpt4-ui/Examples/Architecture Licensure Exam Reviewer 40.pdf _HOT_.md
deleted file mode 100644
index b3a46085468d546498012a83ab1299f94cb4369b..0000000000000000000000000000000000000000
--- a/spaces/stomexserde/gpt4-ui/Examples/Architecture Licensure Exam Reviewer 40.pdf _HOT_.md
+++ /dev/null
@@ -1,12 +0,0 @@
-
-
How to Ace the Architecture Licensure Exam with Reviewer 40.pdf
-
If you are an aspiring architect who wants to pass the Architecture Licensure Exam (ALE), you need to prepare well and study hard. The ALE is a comprehensive test that covers various topics such as history and theory of architecture, professional practice, building laws and codes, structural design, building materials and methods of construction, utilities systems, architectural design and site planning. The ALE is designed to protect the health, safety and welfare of the public by regulating the practice of architecture. It does this by testing to see if someone has the knowledge, skills and abilities to perform the services required of an entry-level architect[^1^].
One of the best ways to prepare for the ALE is to use a reviewer that contains sample questions, answers and explanations that are similar to the actual exam. Reviewers can help you familiarize yourself with the format, content and difficulty level of the ALE. They can also help you identify your strengths and weaknesses, improve your test-taking strategies and boost your confidence. Reviewers can be found online or in bookstores, but not all of them are reliable and updated. Some reviewers may contain outdated or inaccurate information, or may not cover all the topics that are included in the ALE.
-
That's why we recommend using Reviewer 40.pdf as your main reviewer for the ALE. Reviewer 40.pdf is a free ebook that you can download from Scribd.com. It is written by George Salvan, a licensed architect and a respected author of several books on architecture. Reviewer 40.pdf is a comprehensive and updated reviewer that covers all the topics that are included in the ALE. It contains more than 400 pages of questions, answers and explanations that are based on the latest laws, codes, standards and trends in architecture. Reviewer 40.pdf also includes tips and tricks on how to answer different types of questions, such as multiple choice, true or false, matching type, identification and essay.
-
Reviewer 40.pdf is easy to use and understand. You can read it on your computer, tablet or smartphone. You can also print it out if you prefer a hard copy. You can use Reviewer 40.pdf as a self-study guide or as a supplement to your review classes. You can study it at your own pace and convenience. You can also use it as a mock exam by answering the questions under time pressure and checking your answers afterwards.
-
By using Reviewer 40.pdf as your reviewer for the ALE, you will be able to master the concepts, principles and applications of architecture. You will be able to improve your analytical, critical and creative thinking skills. You will be able to enhance your problem-solving, decision-making and communication skills. You will be able to increase your chances of passing the ALE and becoming a licensed architect.
-
-
So what are you waiting for? Download Reviewer 40.pdf today and start your journey towards achieving your dream of becoming an architect!
81aa517590
-
-
\ No newline at end of file
diff --git a/spaces/stomexserde/gpt4-ui/Examples/Caiete Speciale Clasa Pregatitoare Pdf Download [PATCHED].md b/spaces/stomexserde/gpt4-ui/Examples/Caiete Speciale Clasa Pregatitoare Pdf Download [PATCHED].md
deleted file mode 100644
index 3f85bda16afd8bc67ff3c69273bd64bc6196fcbc..0000000000000000000000000000000000000000
--- a/spaces/stomexserde/gpt4-ui/Examples/Caiete Speciale Clasa Pregatitoare Pdf Download [PATCHED].md
+++ /dev/null
@@ -1,11 +0,0 @@
-
-
Caiete Speciale Clasa Pregatitoare: Ce Sunt si De Unde Le Poti Descarca
-
Caietele speciale clasa pregatitoare sunt materiale auxiliare care vin in sprijinul elevilor si al parintilor pentru a facilita invatarea si dezvoltarea personala a copiilor. Aceste caiete contin exercitii si activitati variate, adaptate la nivelul si nevoile copiilor de 6-7 ani, care ii ajuta sa isi consolideze cunostintele si abilitatile din diverse domenii: comunicare in limba romana, matematica si explorarea mediului, educatie plastica, educatie muzicala, educatie fizica, dezvoltare personala etc.
Caietele speciale clasa pregatitoare pot fi achizitionate de la librarii sau magazine online, dar pot fi si descarcate gratuit de pe internet sub forma de fisiere PDF. Un exemplu de site care ofera astfel de caiete este Studocu, unde poti gasi un caiet de scriere pentru clasa pregatitoare, cu exercitii de formare a literelor si a cifrelor[^1^]. Un alt exemplu este Intuitext, unde poti gasi un pachet standard de caiete auxiliare clasa pregatitoare, care cuprinde caiete de comunicare in limba romana, matematica si explorarea mediului, educatie plastica si dezvoltare personala[^2^]. Aceste caiete pot fi comandate online sau descarcate gratuit dupa ce te inregistrezi pe site.
-
Caietele speciale clasa pregatitoare sunt resurse utile si practice pentru a pregati copiii pentru intrarea in scoala. Ele ii ajuta sa isi dezvolte competentele necesare pentru a face fata noilor provocari si a se adapta la mediul scolar. De asemenea, ele ii stimuleaza pe copii sa fie curiosi, creativi si sa se distreze invatand.
Un aspect important al caietelor speciale clasa pregatitoare este ca ele sunt concepute in conformitate cu programa scolara si cu cerintele de evaluare ale Ministerului Educatiei. Astfel, ele respecta standardele de performanta si de continut pentru fiecare disciplina si domeniu de invatare. Ele sunt structurate pe unitati tematice si pe saptamani de lucru, oferind o planificare clara si eficienta a activitatilor. Ele contin si indicatii metodologice pentru parinti sau profesori, precum si sugestii de evaluare si autoevaluare.
-
-
Caietele speciale clasa pregatitoare sunt realizate de autori cu experienta in domeniul educatiei, care cunosc nevoile si interesele copiilor de varsta prescolara. Ei folosesc o metodologie moderna si atractiva, bazata pe joc, poveste, experiment, descoperire si colaborare. Ei folosesc imagini colorate si sugestive, care capteaza atentia si stimuleaza imaginatia copiilor. Ei propun exercitii si activitati variate si interactive, care dezvolta gandirea critica, creativitatea, memoria, atentia, logica, motricitatea fina si comunicarea copiilor.
Caietele speciale clasa pregatitoare sunt instrumente de lucru care pot fi folosite atat acasa, cat si la gradinita sau la scoala. Ele pot fi utilizate individual sau in grup, in functie de preferintele si posibilitatile copiilor si ale adultilor care ii indruma. Ele pot fi integrate in proiecte tematice sau interdisciplinare, care sa lege diferite domenii de invatare si sa ofere o viziune de ansamblu asupra lumii. Ele pot fi adaptate la ritmul si nivelul fiecarui copil, oferindu-i posibilitatea sa isi exprime personalitatea si potentialul.
-
In concluzie, caietele speciale clasa pregatitoare sunt resurse valoroase pentru educatia si dezvoltarea copiilor de 6-7 ani. Ele le ofera copiilor oportunitati de invatare si de joaca, care ii pregatesc pentru viata scolara si pentru viitor. Ele le ofera parintilor si profesorilor un sprijin eficient si accesibil, care ii ajuta sa isi indeplineasca rolul de educatori. Ele reprezinta o investitie in educatia si fericirea copiilor.
7196e7f11a
-
-
\ No newline at end of file
diff --git a/spaces/stratussox/yolov5_inference/CONTRIBUTING.md b/spaces/stratussox/yolov5_inference/CONTRIBUTING.md
deleted file mode 100644
index 7498f8995d40122520e67b193ba4091a783beb86..0000000000000000000000000000000000000000
--- a/spaces/stratussox/yolov5_inference/CONTRIBUTING.md
+++ /dev/null
@@ -1,93 +0,0 @@
-## Contributing to YOLOv5 🚀
-
-We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible, whether it's:
-
-- Reporting a bug
-- Discussing the current state of the code
-- Submitting a fix
-- Proposing a new feature
-- Becoming a maintainer
-
-YOLOv5 works so well due to our combined community effort, and for every small improvement you contribute you will be
-helping push the frontiers of what's possible in AI 😃!
-
-## Submitting a Pull Request (PR) 🛠️
-
-Submitting a PR is easy! This example shows how to submit a PR for updating `requirements.txt` in 4 steps:
-
-### 1. Select File to Update
-
-Select `requirements.txt` to update by clicking on it in GitHub.
-
-
-
-### 2. Click 'Edit this file'
-
-Button is in top-right corner.
-
-
-
-### 3. Make Changes
-
-Change `matplotlib` version from `3.2.2` to `3.3`.
-
-
-
-### 4. Preview Changes and Submit PR
-
-Click on the **Preview changes** tab to verify your updates. At the bottom of the screen select 'Create a **new branch**
-for this commit', assign your branch a descriptive name such as `fix/matplotlib_version` and click the green **Propose
-changes** button. All done, your PR is now submitted to YOLOv5 for review and approval 😃!
-
-
-
-### PR recommendations
-
-To allow your work to be integrated as seamlessly as possible, we advise you to:
-
-- ✅ Verify your PR is **up-to-date** with `ultralytics/yolov5` `master` branch. If your PR is behind you can update
- your code by clicking the 'Update branch' button or by running `git pull` and `git merge master` locally.
-
-
-
-- ✅ Verify all YOLOv5 Continuous Integration (CI) **checks are passing**.
-
-
-
-- ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase
- but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ — Bruce Lee
-
-## Submitting a Bug Report 🐛
-
-If you spot a problem with YOLOv5 please submit a Bug Report!
-
-For us to start investigating a possible problem we need to be able to reproduce it ourselves first. We've created a few
-short guidelines below to help users provide what we need in order to get started.
-
-When asking a question, people will be better able to provide help if you provide **code** that they can easily
-understand and use to **reproduce** the problem. This is referred to by community members as creating
-a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). Your code that reproduces
-the problem should be:
-
-- ✅ **Minimal** – Use as little code as possible that still produces the same problem
-- ✅ **Complete** – Provide **all** parts someone else needs to reproduce your problem in the question itself
-- ✅ **Reproducible** – Test the code you're about to provide to make sure it reproduces the problem
-
-In addition to the above requirements, for [Ultralytics](https://ultralytics.com/) to provide assistance your code
-should be:
-
-- ✅ **Current** – Verify that your code is up-to-date with current
- GitHub [master](https://github.com/ultralytics/yolov5/tree/master), and if necessary `git pull` or `git clone` a new
- copy to ensure your problem has not already been resolved by previous commits.
-- ✅ **Unmodified** – Your problem must be reproducible without any modifications to the codebase in this
- repository. [Ultralytics](https://ultralytics.com/) does not provide support for custom code ⚠️.
-
-If you believe your problem meets all of the above criteria, please close this issue and raise a new one using the 🐛
-**Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose) and providing
-a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) to help us better
-understand and diagnose your problem.
-
-## License
-
-By contributing, you agree that your contributions will be licensed under
-the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/)
diff --git a/spaces/sub314xxl/MusicGen/audiocraft/models/lm.py b/spaces/sub314xxl/MusicGen/audiocraft/models/lm.py
deleted file mode 100644
index c8aad8f06797eef3293605056e1de14d07c56c2a..0000000000000000000000000000000000000000
--- a/spaces/sub314xxl/MusicGen/audiocraft/models/lm.py
+++ /dev/null
@@ -1,527 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-from dataclasses import dataclass
-from functools import partial
-import logging
-import math
-import typing as tp
-
-import torch
-from torch import nn
-
-from ..utils import utils
-from ..modules.streaming import StreamingModule, State
-from ..modules.transformer import StreamingTransformer, create_norm_fn
-from ..modules.conditioners import (
- ConditionFuser,
- ClassifierFreeGuidanceDropout,
- AttributeDropout,
- ConditioningProvider,
- ConditioningAttributes,
- ConditionType,
-)
-from ..modules.codebooks_patterns import CodebooksPatternProvider
-from ..modules.activations import get_activation_fn
-
-
-logger = logging.getLogger(__name__)
-ConditionTensors = tp.Dict[str, ConditionType]
-CFGConditions = tp.Union[ConditionTensors, tp.Tuple[ConditionTensors, ConditionTensors]]
-
-
-def get_init_fn(method: str, input_dim: int, init_depth: tp.Optional[int] = None):
- """LM layer initialization.
- Inspired from xlformers: https://github.com/fairinternal/xlformers
-
- Args:
- method (str): Method name for init function. Valid options are:
- 'gaussian', 'uniform'.
- input_dim (int): Input dimension of the initialized module.
- init_depth (Optional[int]): Optional init depth value used to rescale
- the standard deviation if defined.
- """
- # Compute std
- std = 1 / math.sqrt(input_dim)
- # Rescale with depth
- if init_depth is not None:
- std = std / math.sqrt(2 * init_depth)
-
- if method == 'gaussian':
- return partial(
- torch.nn.init.trunc_normal_, mean=0.0, std=std, a=-3 * std, b=3 * std
- )
- elif method == 'uniform':
- bound = math.sqrt(3) * std # ensure the standard deviation is `std`
- return partial(torch.nn.init.uniform_, a=-bound, b=bound)
- else:
- raise ValueError("Unsupported layer initialization method")
-
-
-def init_layer(m: nn.Module,
- method: str,
- init_depth: tp.Optional[int] = None,
- zero_bias_init: bool = False):
- """Wrapper around ``get_init_fn`` for proper initialization of LM modules.
-
- Args:
- m (nn.Module): Module to initialize.
- method (str): Method name for the init function.
- init_depth (Optional[int]): Optional init depth value used to rescale
- the standard deviation if defined.
- zero_bias_init (bool): Whether to initialize the bias to 0 or not.
- """
- if isinstance(m, nn.Linear):
- init_fn = get_init_fn(method, m.in_features, init_depth=init_depth)
- if m.weight.device.type == 'cpu' and m.weight.dtype == torch.float16:
- weight = m.weight.float()
- init_fn(weight)
- m.weight.data[:] = weight.half()
- else:
- init_fn(m.weight)
- if zero_bias_init and m.bias is not None:
- nn.init.constant_(m.bias, 0)
- elif isinstance(m, nn.Embedding):
- init_fn = get_init_fn(method, m.embedding_dim, init_depth=None)
- if m.weight.device.type == 'cpu' and m.weight.dtype == torch.float16:
- weight = m.weight.float()
- init_fn(weight)
- m.weight.data[:] = weight.half()
- else:
- init_fn(m.weight)
-
-
-class ScaledEmbedding(nn.Embedding):
- """Boost learning rate for embeddings (with `scale`).
- """
- def __init__(self, *args, lr=None, **kwargs):
- super().__init__(*args, **kwargs)
- self.lr = lr
-
- def make_optim_group(self):
- group = {"params": list(self.parameters())}
- if self.lr is not None:
- group["lr"] = self.lr
- return group
-
-
-@dataclass
-class LMOutput:
- # The logits are already re-aligned with the input codes
- # hence no extra shift is required, e.g. when computing CE
- logits: torch.Tensor # [B, K, T, card]
- mask: torch.Tensor # [B, K, T]
-
-
-class LMModel(StreamingModule):
- """Transformer-based language model on multiple streams of codes.
-
- Args:
- pattern_provider (CodebooksPatternProvider): Pattern provider for codebook interleaving.
- condition_provider (MusicConditioningProvider): Conditioning provider from metadata.
- fuser (ConditionFuser): Fuser handling the fusing of conditions with language model input.
- n_q (int): Number of parallel streams to model.
- card (int): Cardinality, vocabulary size.
- dim (int): Dimension of the transformer encoder.
- num_heads (int): Number of heads for the transformer encoder.
- hidden_scale (int): Scale for hidden feed forward dimension of the transformer encoder.
- norm (str): Normalization method.
- norm_first (bool): Use pre-norm instead of post-norm.
- emb_lr (Optional[float]): Embedding-specific learning rate.
- bias_proj (bool): Use bias for output projections.
- weight_init (Optional[str]): Method for weight initialization.
- depthwise_init (Optional[str]): Method for depthwise weight initialization.
- zero_bias_init (bool): If true and bias in Linears, initialize bias to zeros.
- cfg_dropout (float): Classifier-free guidance dropout.
- cfg_coef (float): Classifier-free guidance coefficient.
- attribute_dropout (dict): Attribute dropout probabilities.
- two_step_cfg (bool): Whether to run classifier free-guidance with 2 distinct steps.
- **kwargs: Additional parameters for the transformer encoder.
- """
- def __init__(self, pattern_provider: CodebooksPatternProvider, condition_provider: ConditioningProvider,
- fuser: ConditionFuser, n_q: int = 8, card: int = 1024, dim: int = 128, num_heads: int = 8,
- hidden_scale: int = 4, norm: str = 'layer_norm', norm_first: bool = False,
- emb_lr: tp.Optional[float] = None, bias_proj: bool = True,
- weight_init: tp.Optional[str] = None, depthwise_init: tp.Optional[str] = None,
- zero_bias_init: bool = False, cfg_dropout: float = 0, cfg_coef: float = 1.0,
- attribute_dropout: tp.Dict[str, tp.Dict[str, float]] = {}, two_step_cfg: bool = False,
- **kwargs):
- super().__init__()
- self.cfg_coef = cfg_coef
- self.cfg_dropout = ClassifierFreeGuidanceDropout(p=cfg_dropout)
- self.att_dropout = AttributeDropout(p=attribute_dropout)
- self.condition_provider = condition_provider
- self.fuser = fuser
- self.card = card
- embed_dim = self.card + 1
- self.n_q = n_q
- self.dim = dim
- self.pattern_provider = pattern_provider
- self.two_step_cfg = two_step_cfg
- self.emb = nn.ModuleList([ScaledEmbedding(embed_dim, dim, lr=emb_lr) for _ in range(n_q)])
- if 'activation' in kwargs:
- kwargs['activation'] = get_activation_fn(kwargs['activation'])
- self.transformer = StreamingTransformer(
- d_model=dim, num_heads=num_heads, dim_feedforward=int(hidden_scale * dim),
- norm=norm, norm_first=norm_first, **kwargs)
- self.out_norm: tp.Optional[nn.Module] = None
- if norm_first:
- self.out_norm = create_norm_fn(norm, dim)
- self.linears = nn.ModuleList([nn.Linear(dim, self.card, bias=bias_proj) for _ in range(n_q)])
- self._init_weights(weight_init, depthwise_init, zero_bias_init)
- self._fsdp: tp.Optional[nn.Module]
- self.__dict__['_fsdp'] = None
-
- def _init_weights(self, weight_init: tp.Optional[str], depthwise_init: tp.Optional[str], zero_bias_init: bool):
- """Initialization of the transformer module weights.
-
- Args:
- weight_init (Optional[str]): Weight initialization strategy. See ``get_init_fn`` for valid options.
- depthwise_init (Optional[str]): Depwthwise initialization strategy. The following options are valid:
- 'current' where the depth corresponds to the current layer index or 'global' where the total number
- of layer is used as depth. If not set, no depthwise initialization strategy is used.
- zero_bias_init (bool): Whether to initalize bias to zero or not.
- """
- assert depthwise_init is None or depthwise_init in ['current', 'global']
- assert depthwise_init is None or weight_init is not None, \
- "If 'depthwise_init' is defined, a 'weight_init' method should be provided."
- assert not zero_bias_init or weight_init is not None, \
- "If 'zero_bias_init', a 'weight_init' method should be provided"
-
- if weight_init is None:
- return
-
- for emb_layer in self.emb:
- init_layer(emb_layer, method=weight_init, init_depth=None, zero_bias_init=zero_bias_init)
-
- for layer_idx, tr_layer in enumerate(self.transformer.layers):
- depth = None
- if depthwise_init == 'current':
- depth = layer_idx + 1
- elif depthwise_init == 'global':
- depth = len(self.transformer.layers)
- init_fn = partial(init_layer, method=weight_init, init_depth=depth, zero_bias_init=zero_bias_init)
- tr_layer.apply(init_fn)
-
- for linear in self.linears:
- init_layer(linear, method=weight_init, init_depth=None, zero_bias_init=zero_bias_init)
-
- @property
- def special_token_id(self) -> int:
- return self.card
-
- @property
- def num_codebooks(self) -> int:
- return self.n_q
-
- def forward(self, sequence: torch.Tensor,
- conditions: tp.List[ConditioningAttributes],
- condition_tensors: tp.Optional[ConditionTensors] = None) -> torch.Tensor:
- """Apply language model on sequence and conditions.
- Given a tensor of sequence of shape [B, K, S] with K the number of codebooks and
- S the sequence steps, return the logits with shape [B, card, K, S].
-
- Args:
- indices (torch.Tensor): indices of the codes to model.
- conditions (list[ConditioningAttributes]): conditionings to use when modeling
- the given codes. Note that when evaluating multiple time with the same conditioning
- you should pre-compute those and pass them as `condition_tensors`.
- condition_tensors (dict[str, ConditionType] or None): pre-computed conditioning
- tensors, see `conditions`.
- Returns:
- torch.Tensor: Logits.
- """
- B, K, S = sequence.shape
- assert K == self.num_codebooks, 'Sequence shape must match the specified number of codebooks'
- input_ = sum([self.emb[k](sequence[:, k]) for k in range(K)])
- if condition_tensors is None:
- assert not self._is_streaming, "Conditions tensors should be precomputed when streaming."
- # apply dropout modules
- conditions = self.cfg_dropout(conditions)
- conditions = self.att_dropout(conditions)
- tokenized = self.condition_provider.tokenize(conditions)
- # encode conditions and fuse, both have a streaming cache to not recompute when generating.
- condition_tensors = self.condition_provider(tokenized)
- else:
- assert not conditions, "Shouldn't pass both conditions and condition_tensors."
-
- input_, cross_attention_input = self.fuser(input_, condition_tensors)
-
- out = self.transformer(input_, cross_attention_src=cross_attention_input)
- if self.out_norm:
- out = self.out_norm(out)
- logits = torch.stack([self.linears[k](out) for k in range(K)], dim=1) # [B, K, S, card]
-
- # remove the prefix from the model outputs
- if len(self.fuser.fuse2cond['prepend']) > 0:
- logits = logits[:, :, -S:]
-
- return logits # [B, K, S, card]
-
- def compute_predictions(
- self, codes: torch.Tensor,
- conditions: tp.List[ConditioningAttributes],
- condition_tensors: tp.Optional[ConditionTensors] = None) -> LMOutput:
- """Given an input tensor of codes [B, K, T] and list of conditions, runs the model
- forward using the specified codes interleaving pattern.
-
- Args:
- codes (torch.Tensor): Input codes of shape [B, K, T] with B the batch size,
- K the number of codebooks and T the number of timesteps.
- conditions (list[ConditioningAttributes]): conditionings to use when modeling
- the given codes. Note that when evaluating multiple time with the same conditioning
- you should pre-compute those and pass them as `condition_tensors`.
- condition_tensors (dict[str, ConditionType] or None): pre-computed conditioning
- tensors, see `conditions`.
- Returns:
- LMOutput: Language model outputs
- logits (torch.Tensor) of shape [B, K, T, card] corresponding to the provided codes,
- i.e. the first item corresponds to logits to predict the first code, meaning that
- no additional shifting of codes and logits is required.
- mask (torch.Tensor) of shape [B, K, T], mask over valid and invalid positions.
- Given the specified interleaving strategies, parts of the logits and codes should
- not be considered as valid predictions because of invalid context.
- """
- B, K, T = codes.shape
- codes = codes.contiguous()
- # map codes [B, K, T] into pattern sequence [B, K, S] using special_token_id for masked tokens
- pattern = self.pattern_provider.get_pattern(T)
- sequence_codes, sequence_indexes, sequence_mask = pattern.build_pattern_sequence(
- codes, self.special_token_id, keep_only_valid_steps=True
- )
- # apply model on pattern sequence
- model = self if self._fsdp is None else self._fsdp
- logits = model(sequence_codes, conditions, condition_tensors) # [B, K, S, card]
- # map back the logits on pattern sequence to logits on original codes: [B, K, S, card] -> [B, K, T, card]
- # and provide the corresponding mask over invalid positions of tokens
- logits = logits.permute(0, 3, 1, 2) # [B, card, K, S]
- # note: we use nans as special token to make it obvious if we feed unexpected logits
- logits, logits_indexes, logits_mask = pattern.revert_pattern_logits(
- logits, float('nan'), keep_only_valid_steps=True
- )
- logits = logits.permute(0, 2, 3, 1) # [B, K, T, card]
- logits_mask = logits_mask[None, :, :].expand(B, -1, -1) # [K, T] -> [B, K, T]
- return LMOutput(logits, logits_mask)
-
- def _sample_next_token(self,
- sequence: torch.Tensor,
- cfg_conditions: CFGConditions,
- unconditional_state: State,
- use_sampling: bool = False,
- temp: float = 1.0,
- top_k: int = 0,
- top_p: float = 0.0,
- cfg_coef: tp.Optional[float] = None) -> torch.Tensor:
- """Sample next token from the model given a sequence and a set of conditions. The model supports
- multiple sampling strategies (greedy sampling, softmax, top-k, top-p...).
-
- Args:
- sequence (torch.Tensor): Current sequence of shape [B, K, S]
- with K corresponding to the number of codebooks and S the number of sequence steps.
- S = 1 in streaming mode, except for the first step that contains a bigger prompt.
- condition_tensors (Dict[str, ConditionType): Set of conditions. If CFG is used,
- should be twice the batch size, being the concatenation of the conditions + null conditions.
- use_sampling (bool): Whether to use a sampling strategy or not.
- temp (float): Sampling temperature.
- top_k (int): K for "top-k" sampling.
- top_p (float): P for "top-p" sampling.
- cfg_coef (float): classifier free guidance coefficient
- Returns:
- next_token (torch.Tensor): Next token tensor of shape [B, K, 1].
- """
- B = sequence.shape[0]
- cfg_coef = self.cfg_coef if cfg_coef is None else cfg_coef
- model = self if self._fsdp is None else self._fsdp
- if self.two_step_cfg and cfg_conditions != {}:
- assert isinstance(cfg_conditions, tuple)
- condition_tensors, null_condition_tensors = cfg_conditions
- cond_logits = model(sequence, conditions=[], condition_tensors=condition_tensors)
- state = self.get_streaming_state()
- self.set_streaming_state(unconditional_state)
- uncond_logits = model(sequence, conditions=[], condition_tensors=null_condition_tensors)
- unconditional_state.update(self.get_streaming_state())
- self.set_streaming_state(state)
- logits = uncond_logits + (cond_logits - uncond_logits) * self.cfg_coef
- else:
- assert isinstance(cfg_conditions, dict)
- condition_tensors = cfg_conditions
- if condition_tensors:
- # Preparing for CFG, predicting both conditional and unconditional logits.
- sequence = torch.cat([sequence, sequence], dim=0)
- all_logits = model(
- sequence,
- conditions=[], condition_tensors=condition_tensors)
- if condition_tensors:
- cond_logits, uncond_logits = all_logits.split(B, dim=0) # [B, K, T, card]
- logits = uncond_logits + (cond_logits - uncond_logits) * cfg_coef
- else:
- logits = all_logits
-
- logits = logits.permute(0, 1, 3, 2) # [B, K, card, T]
- logits = logits[..., -1] # [B x K x card]
-
- # Apply softmax for sampling if temp > 0. Else, do greedy sampling to avoid zero division error.
- if use_sampling and temp > 0.0:
- probs = torch.softmax(logits / temp, dim=-1)
- if top_p > 0.0:
- next_token = utils.sample_top_p(probs, p=top_p)
- elif top_k > 0:
- next_token = utils.sample_top_k(probs, k=top_k)
- else:
- next_token = utils.multinomial(probs, num_samples=1)
- else:
- next_token = torch.argmax(logits, dim=-1, keepdim=True)
-
- return next_token
-
- @torch.no_grad()
- def generate(self,
- prompt: tp.Optional[torch.Tensor] = None,
- conditions: tp.List[ConditioningAttributes] = [],
- num_samples: tp.Optional[int] = None,
- max_gen_len: int = 256,
- use_sampling: bool = True,
- temp: float = 1.0,
- top_k: int = 250,
- top_p: float = 0.0,
- cfg_coef: tp.Optional[float] = None,
- two_step_cfg: bool = False,
- remove_prompts: bool = False,
- check: bool = False,
- callback: tp.Optional[tp.Callable[[int, int], None]] = None) -> torch.Tensor:
- """Generate tokens sampling from the model given a prompt or unconditionally. Generation can
- be perform in a greedy fashion or using sampling with top K and top P strategies.
-
- Args:
- prompt (Optional[torch.Tensor]): Prompt tokens of shape [B, K, T].
- conditions_tensors (Dict[str, torch.Tensor]): Set of conditions or None.
- num_samples (int or None): Number of samples to generate when no prompt and no conditions are given.
- max_gen_len (int): Maximum generation length.
- use_sampling (bool): Whether to use a sampling strategy or not.
- temp (float): Sampling temperature.
- top_k (int): K for "top-k" sampling.
- top_p (float): P for "top-p" sampling.
- remove_prompts (bool): Whether to remove prompts from generation or not.
- Returns:
- torch.Tensor: Generated tokens.
- """
- assert not self.training, "generation shouldn't be used in training mode."
- first_param = next(iter(self.parameters()))
- device = first_param.device
-
- # Checking all input shapes are consistents.
- possible_num_samples = []
- if num_samples is not None:
- possible_num_samples.append(num_samples)
- elif prompt is not None:
- possible_num_samples.append(prompt.shape[0])
- elif conditions:
- possible_num_samples.append(len(conditions))
- else:
- possible_num_samples.append(1)
- assert [x == possible_num_samples[0] for x in possible_num_samples], "Inconsitent inputs shapes"
- num_samples = possible_num_samples[0]
-
- # below we create set of conditions: one conditional and one unconditional
- # to do that we merge the regular condition together with the null condition
- # we then do 1 forward pass instead of 2.
- # the reason for that is two-fold:
- # 1. it is about x2 faster than doing 2 forward passes
- # 2. avoid the streaming API treating the 2 passes as part of different time steps
- # We also support doing two different passes, in particular to ensure that
- # the padding structure is exactly the same between train anf test.
- # With a batch size of 1, this can be slower though.
- cfg_conditions: CFGConditions
- two_step_cfg = self.two_step_cfg if two_step_cfg is None else two_step_cfg
- if conditions:
- null_conditions = ClassifierFreeGuidanceDropout(p=1.0)(conditions)
- if two_step_cfg:
- cfg_conditions = (
- self.condition_provider(self.condition_provider.tokenize(conditions)),
- self.condition_provider(self.condition_provider.tokenize(null_conditions)),
- )
- else:
- conditions = conditions + null_conditions
- tokenized = self.condition_provider.tokenize(conditions)
- cfg_conditions = self.condition_provider(tokenized)
- else:
- cfg_conditions = {}
-
- if prompt is None:
- assert num_samples > 0
- prompt = torch.zeros((num_samples, self.num_codebooks, 0), dtype=torch.long, device=device)
-
- B, K, T = prompt.shape
- start_offset = T
- assert start_offset < max_gen_len
-
- pattern = self.pattern_provider.get_pattern(max_gen_len)
- # this token is used as default value for codes that are not generated yet
- unknown_token = -1
-
- # we generate codes up to the max_gen_len that will be mapped to the pattern sequence
- gen_codes = torch.full((B, K, max_gen_len), unknown_token, dtype=torch.long, device=device)
- # filling the gen_codes with the prompt if needed
- gen_codes[..., :start_offset] = prompt
- # create the gen_sequence with proper interleaving from the pattern: [B, K, S]
- gen_sequence, indexes, mask = pattern.build_pattern_sequence(gen_codes, self.special_token_id)
- # retrieve the start_offset in the sequence:
- # it is the first sequence step that contains the `start_offset` timestep
- start_offset_sequence = pattern.get_first_step_with_timesteps(start_offset)
- assert start_offset_sequence is not None
-
- with self.streaming():
- unconditional_state = self.get_streaming_state()
- prev_offset = 0
- gen_sequence_len = gen_sequence.shape[-1] # gen_sequence shape is [B, K, S]
- for offset in range(start_offset_sequence, gen_sequence_len):
- # get current sequence (note that the streaming API is providing the caching over previous offsets)
- curr_sequence = gen_sequence[..., prev_offset:offset]
- curr_mask = mask[None, ..., prev_offset:offset].expand(B, -1, -1)
- if check:
- # check coherence between mask and sequence
- assert (curr_sequence == torch.where(curr_mask, curr_sequence, self.special_token_id)).all()
- # should never happen as gen_sequence is filled progressively
- assert not (curr_sequence == unknown_token).any()
- # sample next token from the model, next token shape is [B, K, 1]
- next_token = self._sample_next_token(
- curr_sequence, cfg_conditions, unconditional_state, use_sampling, temp, top_k, top_p,
- cfg_coef=cfg_coef)
- # ensure the tokens that should be masked are properly set to special_token_id
- # as the model never output special_token_id
- valid_mask = mask[..., offset:offset+1].expand(B, -1, -1)
- next_token[~valid_mask] = self.special_token_id
- # ensure we don't overwrite prompt tokens, we only write over unknown tokens
- # (then mask tokens should be left as is as well, which is correct)
- gen_sequence[..., offset:offset+1] = torch.where(
- gen_sequence[..., offset:offset+1] == unknown_token,
- next_token, gen_sequence[..., offset:offset+1]
- )
- prev_offset = offset
- if callback is not None:
- callback(1 + offset - start_offset_sequence, gen_sequence_len - start_offset_sequence)
- unconditional_state.clear()
-
- # ensure sequence has been entirely filled
- assert not (gen_sequence == unknown_token).any()
- # ensure gen_sequence pattern and mask are matching
- # which means the gen_sequence is valid according to the pattern
- assert (
- gen_sequence == torch.where(mask[None, ...].expand(B, -1, -1), gen_sequence, self.special_token_id)
- ).all()
- # get back the codes, trimming the prompt if needed and cutting potentially incomplete timesteps
- out_codes, out_indexes, out_mask = pattern.revert_pattern_sequence(gen_sequence, special_token=unknown_token)
-
- # sanity checks over the returned codes and corresponding masks
- assert (out_codes[..., :max_gen_len] != unknown_token).all()
- assert (out_mask[..., :max_gen_len] == 1).all()
-
- out_start_offset = start_offset if remove_prompts else 0
- out_codes = out_codes[..., out_start_offset:max_gen_len]
-
- # ensure the returned codes are all valid
- assert (out_codes >= 0).all() and (out_codes <= self.card).all()
- return out_codes
diff --git a/spaces/supertori/files/stable-diffusion-webui/modules/models/diffusion/uni_pc/sampler.py b/spaces/supertori/files/stable-diffusion-webui/modules/models/diffusion/uni_pc/sampler.py
deleted file mode 100644
index bf346ff4866d2de7e08ef03ce2a1f1a07421c8a6..0000000000000000000000000000000000000000
--- a/spaces/supertori/files/stable-diffusion-webui/modules/models/diffusion/uni_pc/sampler.py
+++ /dev/null
@@ -1,100 +0,0 @@
-"""SAMPLING ONLY."""
-
-import torch
-
-from .uni_pc import NoiseScheduleVP, model_wrapper, UniPC
-from modules import shared, devices
-
-
-class UniPCSampler(object):
- def __init__(self, model, **kwargs):
- super().__init__()
- self.model = model
- to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device)
- self.before_sample = None
- self.after_sample = None
- self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod))
-
- def register_buffer(self, name, attr):
- if type(attr) == torch.Tensor:
- if attr.device != devices.device:
- attr = attr.to(devices.device)
- setattr(self, name, attr)
-
- def set_hooks(self, before_sample, after_sample, after_update):
- self.before_sample = before_sample
- self.after_sample = after_sample
- self.after_update = after_update
-
- @torch.no_grad()
- def sample(self,
- S,
- batch_size,
- shape,
- conditioning=None,
- callback=None,
- normals_sequence=None,
- img_callback=None,
- quantize_x0=False,
- eta=0.,
- mask=None,
- x0=None,
- temperature=1.,
- noise_dropout=0.,
- score_corrector=None,
- corrector_kwargs=None,
- verbose=True,
- x_T=None,
- log_every_t=100,
- unconditional_guidance_scale=1.,
- unconditional_conditioning=None,
- # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
- **kwargs
- ):
- if conditioning is not None:
- if isinstance(conditioning, dict):
- ctmp = conditioning[list(conditioning.keys())[0]]
- while isinstance(ctmp, list): ctmp = ctmp[0]
- cbs = ctmp.shape[0]
- if cbs != batch_size:
- print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
-
- elif isinstance(conditioning, list):
- for ctmp in conditioning:
- if ctmp.shape[0] != batch_size:
- print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
-
- else:
- if conditioning.shape[0] != batch_size:
- print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
-
- # sampling
- C, H, W = shape
- size = (batch_size, C, H, W)
- print(f'Data shape for UniPC sampling is {size}')
-
- device = self.model.betas.device
- if x_T is None:
- img = torch.randn(size, device=device)
- else:
- img = x_T
-
- ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod)
-
- # SD 1.X is "noise", SD 2.X is "v"
- model_type = "v" if self.model.parameterization == "v" else "noise"
-
- model_fn = model_wrapper(
- lambda x, t, c: self.model.apply_model(x, t, c),
- ns,
- model_type=model_type,
- guidance_type="classifier-free",
- #condition=conditioning,
- #unconditional_condition=unconditional_conditioning,
- guidance_scale=unconditional_guidance_scale,
- )
-
- uni_pc = UniPC(model_fn, ns, predict_x0=True, thresholding=False, variant=shared.opts.uni_pc_variant, condition=conditioning, unconditional_condition=unconditional_conditioning, before_sample=self.before_sample, after_sample=self.after_sample, after_update=self.after_update)
- x = uni_pc.sample(img, steps=S, skip_type=shared.opts.uni_pc_skip_type, method="multistep", order=shared.opts.uni_pc_order, lower_order_final=shared.opts.uni_pc_lower_order_final)
-
- return x.to(device), None
diff --git a/spaces/supertori/files/stable-diffusion-webui/modules/ui_extra_networks_checkpoints.py b/spaces/supertori/files/stable-diffusion-webui/modules/ui_extra_networks_checkpoints.py
deleted file mode 100644
index e6d19d60d6b6b4b22ce3a1d315c32543be3d70db..0000000000000000000000000000000000000000
--- a/spaces/supertori/files/stable-diffusion-webui/modules/ui_extra_networks_checkpoints.py
+++ /dev/null
@@ -1,31 +0,0 @@
-import html
-import json
-import os
-
-from modules import shared, ui_extra_networks, sd_models
-
-
-class ExtraNetworksPageCheckpoints(ui_extra_networks.ExtraNetworksPage):
- def __init__(self):
- super().__init__('Checkpoints')
-
- def refresh(self):
- shared.refresh_checkpoints()
-
- def list_items(self):
- checkpoint: sd_models.CheckpointInfo
- for name, checkpoint in sd_models.checkpoints_list.items():
- path, ext = os.path.splitext(checkpoint.filename)
- yield {
- "name": checkpoint.name_for_extra,
- "filename": path,
- "preview": self.find_preview(path),
- "description": self.find_description(path),
- "search_term": self.search_terms_from_path(checkpoint.filename) + " " + (checkpoint.sha256 or ""),
- "onclick": '"' + html.escape(f"""return selectCheckpoint({json.dumps(name)})""") + '"',
- "local_preview": f"{path}.{shared.opts.samples_format}",
- }
-
- def allowed_directories_for_previews(self):
- return [v for v in [shared.cmd_opts.ckpt_dir, sd_models.model_path] if v is not None]
-
diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Playbox Airbox And Title Box Cracked 14 [BETTER].md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Playbox Airbox And Title Box Cracked 14 [BETTER].md
deleted file mode 100644
index 95493ccc9fb7451048d0eb8dfd2e8c6cfd111843..0000000000000000000000000000000000000000
--- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Playbox Airbox And Title Box Cracked 14 [BETTER].md
+++ /dev/null
@@ -1,41 +0,0 @@
-
-
How to Use Playbox Airbox and Titlebox Cracked 14 for Professional Broadcasting
-
-
If you are looking for a reliable and affordable solution for your broadcasting needs, you might be interested in Playbox Airbox and Titlebox Cracked 14. These are software applications that allow you to create, manage and playout high-quality video and graphics on multiple channels.
Playbox Airbox is a playout automation software that supports all major formats and resolutions. It can handle live feeds, video files, images, audio files and subtitles. You can create playlists, schedule events, insert commercials and logos, and control the playout from a web-based interface.
-
-
Playbox Titlebox is a graphics generator software that provides on-air graphics that can be interactively controlled. You can create multi-layer crawls, rolls, animated icons, clocks and text templates with a click of the mouse. You can also use Titlebox to add lower thirds, banners, tickers, logos and other graphics to your video.
-
-
Playbox Airbox and Titlebox Cracked 14 are versions of the software that have been modified to bypass the original license protection. This means that you can use them without paying for a subscription or a dongle. However, this also means that you are using them illegally and at your own risk.
-
-
If you want to use Playbox Airbox and Titlebox Cracked 14 for professional broadcasting, you should be aware of the possible consequences. You might face legal action from the original developers or the authorities. You might also experience technical issues, bugs, viruses or malware that could compromise your broadcasting quality or security.
-
-
-
Therefore, we recommend that you use Playbox Airbox and Titlebox Cracked 14 only for testing or educational purposes. If you want to use them for commercial or professional purposes, you should purchase the official versions from the Playbox Technology website[^2^]. This way, you will get the full features, support and updates of the software.
-
-
In this article, we will show you how to use Playbox Airbox and Titlebox Cracked 14 for professional broadcasting. We will cover the following topics:
-
-
-
How to install and configure Playbox Airbox and Titlebox Cracked 14 on your computer.
-
How to create and edit playlists and graphics for your channel.
-
How to playout your content on multiple outputs and streams.
-
How to monitor and control your playout from a web-based interface.
-
-
-
How to install and configure Playbox Airbox and Titlebox Cracked 14 on your computer
-
-
To install Playbox Airbox and Titlebox Cracked 14 on your computer, you will need to download the software from a reliable source. You will also need to download a crack or a dongle emulator that can bypass the license protection of the software. Be careful when downloading these files, as they might contain viruses or malware that could harm your computer or compromise your security.
-
-
Once you have downloaded the software and the crack, you will need to run the setup file and follow the instructions on the screen. You will be asked to choose a destination folder, a language and a license agreement. You will also be asked to enter a serial number, which you can find in the crack folder. After the installation is complete, you will need to copy the crack file or the dongle emulator file to the destination folder and replace the original file.
-
-
Now you are ready to launch Playbox Airbox and Titlebox Cracked 14 from your desktop or start menu. You will see two icons: one for Airbox and one for Titlebox. You can run them separately or together, depending on your needs. You will also need to configure some settings before you can start using them.
-
-
To configure Airbox, you will need to go to the Options menu and select Settings. Here you can set up your video and audio formats, resolutions, frame rates, aspect ratios, codecs and bitrates. You can also set up your output devices, such as SDI cards, IP streams or NDI streams. You can choose from different output modes, such as single output, multi parallel output or multi channel output. You can also set up your input devices, such as live feeds, capture cards or network sources.
-
-
To configure Titlebox, you will need to go to the File menu and select Preferences. Here you can set up your graphics formats, resolutions, frame rates, aspect ratios, codecs and bitrates. You can also set up your output devices, such as SDI cards, IP streams or NDI streams. You can choose from different output modes, such as single output or multi parallel output. You can also set up your input devices, such as live feeds, capture cards or network sources.
-
-
After you have configured your settings, you can save them as presets for future use. You can also test your settings by clicking on the Test button in each window.
d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/RaOne Tamil Movie Download In Utorrent.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/RaOne Tamil Movie Download In Utorrent.md
deleted file mode 100644
index c6cc7ec23cb5f71558a9ce901ef4c7aabf7f4014..0000000000000000000000000000000000000000
--- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/RaOne Tamil Movie Download In Utorrent.md
+++ /dev/null
@@ -1,11 +0,0 @@
-
-
When Do we Have The Movie Ra.One Tamil in Tamil? This is an enjoyable movie which is distributed by the very famous company of tamil movies T-series. Watch Ra.One 2010 Tamil full movie online right now on T-Series.
-
The story of a gaming programmer who discovers he is a victim of a virtual avatar created in his name that has the ability to survive any disaster. Watch RaOne Trailer and enjoy it on T-Series.
The heavily promoted streaming platform is only available to those that are logged in to their Netflix accounts. It's our Life. You can download yify movies in 720p/1080p-HD 720p-4K torrents/4K UHD MP4, MP4 HD, HDR GOG, Switch, 3D, AVI/XVID/XMV, TTA, SPA from Free Download. The Fate of the Furious Full Movie Watch Online, 2018 Drama Free HD Movie Ra One 4K >1080p Full Movie Free Download HD - A Fan of Mahesh Babu
-
Watch and download Gangs of Wasseypur 3 and more full movies, only on YIFY - India's first yuppie 4k movies Torrent site! Watch your favorite yuppie movies free from all the limitations!
-
Download "Ra One" (2017) Hindi movie free with torrents or magnet links Torrent from mediafire.com. Browse and download free movies Torrents, premium links, movies Torrent, games Torrent, music Torrent and more. Also you can share your links to family, friends and the rest of the world!
-
Watch Ra One movie online free full hd tv on putlocker. StreamingRa One Movie Free Download (2017) full hd online in HD-720p/1080p in SDRip/DTSrip/BluRay/Streaming online on PutLocker, B0B1, watchfree, vimeo, ABC,123MoviesHD,hulu,YIFY-India,ZYXFilms and many more popular movie streaming site...
- 899543212b
-
-
\ No newline at end of file
diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/World Vision Tharanga Sinhala Font [BEST].md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/World Vision Tharanga Sinhala Font [BEST].md
deleted file mode 100644
index 48484e7805611759a368ee825a6d53569d1e6c0f..0000000000000000000000000000000000000000
--- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/World Vision Tharanga Sinhala Font [BEST].md
+++ /dev/null
@@ -1,52 +0,0 @@
-
-
-?
-
- when I dpkg -i something, how do I "commit" the changes so that they will be applied when I install something later
-
- kontact does not work anymore
-
- MonkeyDust: uhh, I'm sorry... where is it?
-
- IsmAvatar on the left panel
-
- I'm trying to install the sun-java6-jdk package and it wants to remove a bunch of stuff, but I'm not sure how to commit the changes so it won't do that
-
- Tul: dpkg -i --force-all packagename is one way
-
- Well, I am back to Ubuntu now. :)
-
- Tul, dpkg-reconfigure is used to tweak things.
-
- it's not there, MonkeyDust.
-
- I like how my 5 mins of updating did not crash...
-
- IsmAvatar i'm not on gnome3, never used it, don't know what it looks like
-
- what I'm saying is that I want to install the package, and not have it remove a bunch of stuff
-
- MonkeyDust: oh, well I'll just try it the regular way
-
- but, there is a problem.
-
- is there a flag I can pass to dpkg to make that happen?
-
- Tul, no promises that is what is being made
-
- Tul: so thats a yes. - force = makes the force even across all pacakges.
-
- Tul: i normally use -f (force) ;)
-
- When the update was finished, it said "migration complete" for like 5 minutes, and then "downloading and installing the upgrade".
-
- and then it "Stopped" on this screen:
-
-
-
- I had to force quit it and go back into Ubuntu...
-
- so... I have 4fefd39f24
-
-
-
diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Zweckform Mietvertrag 2873 Pdf Download.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Zweckform Mietvertrag 2873 Pdf Download.md
deleted file mode 100644
index 39317f264ab1885215f738a3f0c96f568966904a..0000000000000000000000000000000000000000
--- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Zweckform Mietvertrag 2873 Pdf Download.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-Durch das Inkrafttreten des Handelsgesetzes, ist eine vorgeschriebene. Übersicht über die von dem Gesetzgebungs- fahne, die sich zur. Schadens- und Unfallhilfe oder das.. einem.., geregelt wird. Bei dem aufwendungen unter weniger oder weniger als 30.., der Landwirtschaft oder dem Bergbau oder der Landwirtschaft... gewinnhafte Vermögen.. lung... nach einer drei-..... Sie zieht aus dem.......................................................................................................................................................................................................................................................................................................................................................... 4fefd39f24
-
-
-
diff --git a/spaces/swzamir/Restormer/README.md b/spaces/swzamir/Restormer/README.md
deleted file mode 100644
index 83ca738b6156fc88c4e16ff5a954591d29310b0f..0000000000000000000000000000000000000000
--- a/spaces/swzamir/Restormer/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Image Restoration with Restormer
-emoji: 🌍
-colorFrom: yellow
-colorTo: yellow
-sdk: gradio
-sdk_version: 2.9.0
-app_file: app.py
-pinned: false
-license: afl-3.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
diff --git a/spaces/szukevin/VISOR-GPT/train/inference/run_classifier_siamese_infer.py b/spaces/szukevin/VISOR-GPT/train/inference/run_classifier_siamese_infer.py
deleted file mode 100644
index b045f6aa09158efaaee996a85a12f98122da095c..0000000000000000000000000000000000000000
--- a/spaces/szukevin/VISOR-GPT/train/inference/run_classifier_siamese_infer.py
+++ /dev/null
@@ -1,160 +0,0 @@
-"""
- This script provides an example to wrap TencentPretrain for classification inference.
-"""
-import sys
-import os
-import torch
-import argparse
-import collections
-import torch.nn as nn
-
-tencentpretrain_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
-sys.path.append(tencentpretrain_dir)
-
-from tencentpretrain.utils.constants import *
-from tencentpretrain.utils import *
-from tencentpretrain.utils.config import load_hyperparam
-from tencentpretrain.utils.seed import set_seed
-from tencentpretrain.model_loader import load_model
-from tencentpretrain.opts import infer_opts, tokenizer_opts
-from finetune.run_classifier_siamese import SiameseClassifier
-
-
-def batch_loader(batch_size, src, seg):
- src_a, src_b = src
- seg_a, seg_b = seg
- instances_num = src_a.size()[0]
- for i in range(instances_num // batch_size):
- src_a_batch = src_a[i * batch_size : (i + 1) * batch_size, :]
- src_b_batch = src_b[i * batch_size : (i + 1) * batch_size, :]
- seg_a_batch = seg_a[i * batch_size : (i + 1) * batch_size, :]
- seg_b_batch = seg_b[i * batch_size : (i + 1) * batch_size, :]
- yield (src_a_batch, src_b_batch), (seg_a_batch, seg_b_batch)
- if instances_num > instances_num // batch_size * batch_size:
- src_a_batch = src_a[instances_num // batch_size * batch_size :, :]
- src_b_batch = src_b[instances_num // batch_size * batch_size :, :]
- seg_a_batch = seg_a[instances_num // batch_size * batch_size :, :]
- seg_b_batch = seg_b[instances_num // batch_size * batch_size :, :]
- yield (src_a_batch, src_b_batch), (seg_a_batch, seg_b_batch)
-
-
-def read_dataset(args, path):
- dataset, columns = [], {}
- with open(path, mode="r", encoding="utf-8") as f:
- for line_id, line in enumerate(f):
- if line_id == 0:
- line = line.rstrip("\r\n").split("\t")
- for i, column_name in enumerate(line):
- columns[column_name] = i
- continue
- line = line.rstrip("\r\n").split("\t")
- text_a, text_b = line[columns["text_a"]], line[columns["text_b"]]
- src_a = args.tokenizer.convert_tokens_to_ids([CLS_TOKEN] + args.tokenizer.tokenize(text_a) + [SEP_TOKEN])
- src_b = args.tokenizer.convert_tokens_to_ids([CLS_TOKEN] + args.tokenizer.tokenize(text_b) + [SEP_TOKEN])
- seg_a = [1] * len(src_a)
- seg_b = [1] * len(src_b)
- PAD_ID = args.tokenizer.convert_tokens_to_ids([PAD_TOKEN])[0]
-
- if len(src_a) >= args.seq_length:
- src_a = src_a[:args.seq_length]
- seg_a = seg_a[:args.seq_length]
- while len(src_a) < args.seq_length:
- src_a.append(PAD_ID)
- seg_a.append(0)
-
- if len(src_b) >= args.seq_length:
- src_b = src_b[:args.seq_length]
- seg_b = seg_b[:args.seq_length]
- while len(src_b) < args.seq_length:
- src_b.append(PAD_ID)
- seg_b.append(0)
-
- dataset.append(((src_a, src_b), (seg_a, seg_b)))
-
- return dataset
-
-
-def main():
- parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
-
- infer_opts(parser)
-
- parser.add_argument("--labels_num", type=int, required=True,
- help="Number of prediction labels.")
- tokenizer_opts(parser)
-
- parser.add_argument("--output_logits", action="store_true", help="Write logits to output file.")
- parser.add_argument("--output_prob", action="store_true", help="Write probabilities to output file.")
-
- args = parser.parse_args()
-
- # Load the hyperparameters from the config file.
- args = load_hyperparam(args)
-
- # Build tokenizer.
- args.tokenizer = str2tokenizer[args.tokenizer](args)
-
- # Build classification model and load parameters.
- args.soft_targets, args.soft_alpha = False, False
- model = SiameseClassifier(args)
- model = load_model(model, args.load_model_path)
-
- # For simplicity, we use DataParallel wrapper to use multiple GPUs.
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
- model = model.to(device)
- if torch.cuda.device_count() > 1:
- print("{} GPUs are available. Let's use them.".format(torch.cuda.device_count()))
- model = torch.nn.DataParallel(model)
-
- dataset = read_dataset(args, args.test_path)
-
- src_a = torch.LongTensor([example[0][0] for example in dataset])
- src_b = torch.LongTensor([example[0][1] for example in dataset])
- seg_a = torch.LongTensor([example[1][0] for example in dataset])
- seg_b = torch.LongTensor([example[1][1] for example in dataset])
-
- batch_size = args.batch_size
- instances_num = src_a.size()[0]
-
- print("The number of prediction instances: ", instances_num)
-
- model.eval()
-
- with open(args.prediction_path, mode="w", encoding="utf-8") as f:
- f.write("label")
- if args.output_logits:
- f.write("\t" + "logits")
- if args.output_prob:
- f.write("\t" + "prob")
- f.write("\n")
- for i, (src_batch, seg_batch) in enumerate(batch_loader(batch_size, (src_a, src_b), (seg_a, seg_b))):
-
- src_a_batch, src_b_batch = src_batch
- seg_a_batch, seg_b_batch = seg_batch
-
- src_a_batch = src_a_batch.to(device)
- src_b_batch = src_b_batch.to(device)
-
- seg_a_batch = seg_a_batch.to(device)
- seg_b_batch = seg_b_batch.to(device)
-
- with torch.no_grad():
- _, logits = model((src_a_batch, src_b_batch), None, (seg_a_batch, seg_b_batch))
-
- pred = torch.argmax(logits, dim=1)
- pred = pred.cpu().numpy().tolist()
- prob = nn.Softmax(dim=1)(logits)
- logits = logits.cpu().numpy().tolist()
- prob = prob.cpu().numpy().tolist()
-
- for j in range(len(pred)):
- f.write(str(pred[j]))
- if args.output_logits:
- f.write("\t" + " ".join([str(v) for v in logits[j]]))
- if args.output_prob:
- f.write("\t" + " ".join([str(v) for v in prob[j]]))
- f.write("\n")
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/taesiri/DeticChatGPT/detic/modeling/roi_heads/detic_roi_heads.py b/spaces/taesiri/DeticChatGPT/detic/modeling/roi_heads/detic_roi_heads.py
deleted file mode 100644
index c87559359e0516443a43ed327110ec55fa4fa307..0000000000000000000000000000000000000000
--- a/spaces/taesiri/DeticChatGPT/detic/modeling/roi_heads/detic_roi_heads.py
+++ /dev/null
@@ -1,271 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import copy
-import numpy as np
-import json
-import math
-import torch
-from torch import nn
-from torch.autograd.function import Function
-from typing import Dict, List, Optional, Tuple, Union
-from torch.nn import functional as F
-
-from detectron2.config import configurable
-from detectron2.layers import ShapeSpec
-from detectron2.layers import batched_nms
-from detectron2.structures import Boxes, Instances, pairwise_iou
-from detectron2.utils.events import get_event_storage
-
-from detectron2.modeling.box_regression import Box2BoxTransform
-from detectron2.modeling.roi_heads.fast_rcnn import fast_rcnn_inference
-from detectron2.modeling.roi_heads.roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads
-from detectron2.modeling.roi_heads.cascade_rcnn import CascadeROIHeads, _ScaleGradient
-from detectron2.modeling.roi_heads.box_head import build_box_head
-from .detic_fast_rcnn import DeticFastRCNNOutputLayers
-from ..debug import debug_second_stage
-
-from torch.cuda.amp import autocast
-
-@ROI_HEADS_REGISTRY.register()
-class DeticCascadeROIHeads(CascadeROIHeads):
- @configurable
- def __init__(
- self,
- *,
- mult_proposal_score: bool = False,
- with_image_labels: bool = False,
- add_image_box: bool = False,
- image_box_size: float = 1.0,
- ws_num_props: int = 512,
- add_feature_to_prop: bool = False,
- mask_weight: float = 1.0,
- one_class_per_proposal: bool = False,
- **kwargs,
- ):
- super().__init__(**kwargs)
- self.mult_proposal_score = mult_proposal_score
- self.with_image_labels = with_image_labels
- self.add_image_box = add_image_box
- self.image_box_size = image_box_size
- self.ws_num_props = ws_num_props
- self.add_feature_to_prop = add_feature_to_prop
- self.mask_weight = mask_weight
- self.one_class_per_proposal = one_class_per_proposal
-
- @classmethod
- def from_config(cls, cfg, input_shape):
- ret = super().from_config(cfg, input_shape)
- ret.update({
- 'mult_proposal_score': cfg.MODEL.ROI_BOX_HEAD.MULT_PROPOSAL_SCORE,
- 'with_image_labels': cfg.WITH_IMAGE_LABELS,
- 'add_image_box': cfg.MODEL.ROI_BOX_HEAD.ADD_IMAGE_BOX,
- 'image_box_size': cfg.MODEL.ROI_BOX_HEAD.IMAGE_BOX_SIZE,
- 'ws_num_props': cfg.MODEL.ROI_BOX_HEAD.WS_NUM_PROPS,
- 'add_feature_to_prop': cfg.MODEL.ROI_BOX_HEAD.ADD_FEATURE_TO_PROP,
- 'mask_weight': cfg.MODEL.ROI_HEADS.MASK_WEIGHT,
- 'one_class_per_proposal': cfg.MODEL.ROI_HEADS.ONE_CLASS_PER_PROPOSAL,
- })
- return ret
-
-
- @classmethod
- def _init_box_head(self, cfg, input_shape):
- ret = super()._init_box_head(cfg, input_shape)
- del ret['box_predictors']
- cascade_bbox_reg_weights = cfg.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS
- box_predictors = []
- for box_head, bbox_reg_weights in zip(ret['box_heads'], \
- cascade_bbox_reg_weights):
- box_predictors.append(
- DeticFastRCNNOutputLayers(
- cfg, box_head.output_shape,
- box2box_transform=Box2BoxTransform(weights=bbox_reg_weights)
- ))
- ret['box_predictors'] = box_predictors
- return ret
-
-
- def _forward_box(self, features, proposals, targets=None,
- ann_type='box', classifier_info=(None,None,None)):
- """
- Add mult proposal scores at testing
- Add ann_type
- """
- if (not self.training) and self.mult_proposal_score:
- if len(proposals) > 0 and proposals[0].has('scores'):
- proposal_scores = [p.get('scores') for p in proposals]
- else:
- proposal_scores = [p.get('objectness_logits') for p in proposals]
-
- features = [features[f] for f in self.box_in_features]
- head_outputs = [] # (predictor, predictions, proposals)
- prev_pred_boxes = None
- image_sizes = [x.image_size for x in proposals]
-
- for k in range(self.num_cascade_stages):
- if k > 0:
- proposals = self._create_proposals_from_boxes(
- prev_pred_boxes, image_sizes,
- logits=[p.objectness_logits for p in proposals])
- if self.training and ann_type in ['box']:
- proposals = self._match_and_label_boxes(
- proposals, k, targets)
- predictions = self._run_stage(features, proposals, k,
- classifier_info=classifier_info)
- prev_pred_boxes = self.box_predictor[k].predict_boxes(
- (predictions[0], predictions[1]), proposals)
- head_outputs.append((self.box_predictor[k], predictions, proposals))
-
- if self.training:
- losses = {}
- storage = get_event_storage()
- for stage, (predictor, predictions, proposals) in enumerate(head_outputs):
- with storage.name_scope("stage{}".format(stage)):
- if ann_type != 'box':
- stage_losses = {}
- if ann_type in ['image', 'caption', 'captiontag']:
- image_labels = [x._pos_category_ids for x in targets]
- weak_losses = predictor.image_label_losses(
- predictions, proposals, image_labels,
- classifier_info=classifier_info,
- ann_type=ann_type)
- stage_losses.update(weak_losses)
- else: # supervised
- stage_losses = predictor.losses(
- (predictions[0], predictions[1]), proposals,
- classifier_info=classifier_info)
- if self.with_image_labels:
- stage_losses['image_loss'] = \
- predictions[0].new_zeros([1])[0]
- losses.update({k + "_stage{}".format(stage): v \
- for k, v in stage_losses.items()})
- return losses
- else:
- # Each is a list[Tensor] of length #image. Each tensor is Ri x (K+1)
- scores_per_stage = [h[0].predict_probs(h[1], h[2]) for h in head_outputs]
- scores = [
- sum(list(scores_per_image)) * (1.0 / self.num_cascade_stages)
- for scores_per_image in zip(*scores_per_stage)
- ]
- if self.mult_proposal_score:
- scores = [(s * ps[:, None]) ** 0.5 \
- for s, ps in zip(scores, proposal_scores)]
- if self.one_class_per_proposal:
- scores = [s * (s == s[:, :-1].max(dim=1)[0][:, None]).float() for s in scores]
- predictor, predictions, proposals = head_outputs[-1]
- boxes = predictor.predict_boxes(
- (predictions[0], predictions[1]), proposals)
- pred_instances, _ = fast_rcnn_inference(
- boxes,
- scores,
- image_sizes,
- predictor.test_score_thresh,
- predictor.test_nms_thresh,
- predictor.test_topk_per_image,
- )
- return pred_instances
-
-
- def forward(self, images, features, proposals, targets=None,
- ann_type='box', classifier_info=(None,None,None)):
- '''
- enable debug and image labels
- classifier_info is shared across the batch
- '''
- if self.training:
- if ann_type in ['box', 'prop', 'proptag']:
- proposals = self.label_and_sample_proposals(
- proposals, targets)
- else:
- proposals = self.get_top_proposals(proposals)
-
- losses = self._forward_box(features, proposals, targets, \
- ann_type=ann_type, classifier_info=classifier_info)
- if ann_type == 'box' and targets[0].has('gt_masks'):
- mask_losses = self._forward_mask(features, proposals)
- losses.update({k: v * self.mask_weight \
- for k, v in mask_losses.items()})
- losses.update(self._forward_keypoint(features, proposals))
- else:
- losses.update(self._get_empty_mask_loss(
- features, proposals,
- device=proposals[0].objectness_logits.device))
- return proposals, losses
- else:
- pred_instances = self._forward_box(
- features, proposals, classifier_info=classifier_info)
- pred_instances = self.forward_with_given_boxes(features, pred_instances)
- return pred_instances, {}
-
-
- def get_top_proposals(self, proposals):
- for i in range(len(proposals)):
- proposals[i].proposal_boxes.clip(proposals[i].image_size)
- proposals = [p[:self.ws_num_props] for p in proposals]
- for i, p in enumerate(proposals):
- p.proposal_boxes.tensor = p.proposal_boxes.tensor.detach()
- if self.add_image_box:
- proposals[i] = self._add_image_box(p)
- return proposals
-
-
- def _add_image_box(self, p):
- image_box = Instances(p.image_size)
- n = 1
- h, w = p.image_size
- f = self.image_box_size
- image_box.proposal_boxes = Boxes(
- p.proposal_boxes.tensor.new_tensor(
- [w * (1. - f) / 2.,
- h * (1. - f) / 2.,
- w * (1. - (1. - f) / 2.),
- h * (1. - (1. - f) / 2.)]
- ).view(n, 4))
- image_box.objectness_logits = p.objectness_logits.new_ones(n)
- return Instances.cat([p, image_box])
-
-
- def _get_empty_mask_loss(self, features, proposals, device):
- if self.mask_on:
- return {'loss_mask': torch.zeros(
- (1, ), device=device, dtype=torch.float32)[0]}
- else:
- return {}
-
-
- def _create_proposals_from_boxes(self, boxes, image_sizes, logits):
- """
- Add objectness_logits
- """
- boxes = [Boxes(b.detach()) for b in boxes]
- proposals = []
- for boxes_per_image, image_size, logit in zip(
- boxes, image_sizes, logits):
- boxes_per_image.clip(image_size)
- if self.training:
- inds = boxes_per_image.nonempty()
- boxes_per_image = boxes_per_image[inds]
- logit = logit[inds]
- prop = Instances(image_size)
- prop.proposal_boxes = boxes_per_image
- prop.objectness_logits = logit
- proposals.append(prop)
- return proposals
-
-
- def _run_stage(self, features, proposals, stage, \
- classifier_info=(None,None,None)):
- """
- Support classifier_info and add_feature_to_prop
- """
- pool_boxes = [x.proposal_boxes for x in proposals]
- box_features = self.box_pooler(features, pool_boxes)
- box_features = _ScaleGradient.apply(box_features, 1.0 / self.num_cascade_stages)
- box_features = self.box_head[stage](box_features)
- if self.add_feature_to_prop:
- feats_per_image = box_features.split(
- [len(p) for p in proposals], dim=0)
- for feat, p in zip(feats_per_image, proposals):
- p.feat = feat
- return self.box_predictor[stage](
- box_features,
- classifier_info=classifier_info)
diff --git a/spaces/talhaty/Faceswapper/roop/processors/frame/__init__.py b/spaces/talhaty/Faceswapper/roop/processors/frame/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/teamnassim/Fictionista/torch_utils/custom_ops.py b/spaces/teamnassim/Fictionista/torch_utils/custom_ops.py
deleted file mode 100644
index 439e445b16da7ac985f7a1f2053e665385d47e87..0000000000000000000000000000000000000000
--- a/spaces/teamnassim/Fictionista/torch_utils/custom_ops.py
+++ /dev/null
@@ -1,157 +0,0 @@
-# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-#
-# NVIDIA CORPORATION and its licensors retain all intellectual property
-# and proprietary rights in and to this software, related documentation
-# and any modifications thereto. Any use, reproduction, disclosure or
-# distribution of this software and related documentation without an express
-# license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-import glob
-import hashlib
-import importlib
-import os
-import re
-import shutil
-import uuid
-
-import torch
-import torch.utils.cpp_extension
-from torch.utils.file_baton import FileBaton
-
-#----------------------------------------------------------------------------
-# Global options.
-
-verbosity = 'brief' # Verbosity level: 'none', 'brief', 'full'
-
-#----------------------------------------------------------------------------
-# Internal helper funcs.
-
-def _find_compiler_bindir():
- patterns = [
- 'C:/Program Files*/Microsoft Visual Studio/*/Professional/VC/Tools/MSVC/*/bin/Hostx64/x64',
- 'C:/Program Files*/Microsoft Visual Studio/*/BuildTools/VC/Tools/MSVC/*/bin/Hostx64/x64',
- 'C:/Program Files*/Microsoft Visual Studio/*/Community/VC/Tools/MSVC/*/bin/Hostx64/x64',
- 'C:/Program Files*/Microsoft Visual Studio */vc/bin',
- ]
- for pattern in patterns:
- matches = sorted(glob.glob(pattern))
- if len(matches):
- return matches[-1]
- return None
-
-#----------------------------------------------------------------------------
-
-def _get_mangled_gpu_name():
- name = torch.cuda.get_device_name().lower()
- out = []
- for c in name:
- if re.match('[a-z0-9_-]+', c):
- out.append(c)
- else:
- out.append('-')
- return ''.join(out)
-
-#----------------------------------------------------------------------------
-# Main entry point for compiling and loading C++/CUDA plugins.
-
-_cached_plugins = dict()
-
-def get_plugin(module_name, sources, headers=None, source_dir=None, **build_kwargs):
- assert verbosity in ['none', 'brief', 'full']
- if headers is None:
- headers = []
- if source_dir is not None:
- sources = [os.path.join(source_dir, fname) for fname in sources]
- headers = [os.path.join(source_dir, fname) for fname in headers]
-
- # Already cached?
- if module_name in _cached_plugins:
- return _cached_plugins[module_name]
-
- # Print status.
- if verbosity == 'full':
- print(f'Setting up PyTorch plugin "{module_name}"...')
- elif verbosity == 'brief':
- print(f'Setting up PyTorch plugin "{module_name}"... ', end='', flush=True)
- verbose_build = (verbosity == 'full')
-
- # Compile and load.
- try: # pylint: disable=too-many-nested-blocks
- # Make sure we can find the necessary compiler binaries.
- if os.name == 'nt' and os.system("where cl.exe >nul 2>nul") != 0:
- compiler_bindir = _find_compiler_bindir()
- if compiler_bindir is None:
- raise RuntimeError(f'Could not find MSVC/GCC/CLANG installation on this computer. Check _find_compiler_bindir() in "{__file__}".')
- os.environ['PATH'] += ';' + compiler_bindir
-
- # Some containers set TORCH_CUDA_ARCH_LIST to a list that can either
- # break the build or unnecessarily restrict what's available to nvcc.
- # Unset it to let nvcc decide based on what's available on the
- # machine.
- os.environ['TORCH_CUDA_ARCH_LIST'] = ''
-
- # Incremental build md5sum trickery. Copies all the input source files
- # into a cached build directory under a combined md5 digest of the input
- # source files. Copying is done only if the combined digest has changed.
- # This keeps input file timestamps and filenames the same as in previous
- # extension builds, allowing for fast incremental rebuilds.
- #
- # This optimization is done only in case all the source files reside in
- # a single directory (just for simplicity) and if the TORCH_EXTENSIONS_DIR
- # environment variable is set (we take this as a signal that the user
- # actually cares about this.)
- #
- # EDIT: We now do it regardless of TORCH_EXTENSIOS_DIR, in order to work
- # around the *.cu dependency bug in ninja config.
- #
- all_source_files = sorted(sources + headers)
- all_source_dirs = set(os.path.dirname(fname) for fname in all_source_files)
- if len(all_source_dirs) == 1: # and ('TORCH_EXTENSIONS_DIR' in os.environ):
-
- # Compute combined hash digest for all source files.
- hash_md5 = hashlib.md5()
- for src in all_source_files:
- with open(src, 'rb') as f:
- hash_md5.update(f.read())
-
- # Select cached build directory name.
- source_digest = hash_md5.hexdigest()
- build_top_dir = torch.utils.cpp_extension._get_build_directory(module_name, verbose=verbose_build) # pylint: disable=protected-access
- cached_build_dir = os.path.join(build_top_dir, f'{source_digest}-{_get_mangled_gpu_name()}')
-
- if not os.path.isdir(cached_build_dir):
- tmpdir = f'{build_top_dir}/srctmp-{uuid.uuid4().hex}'
- os.makedirs(tmpdir)
- for src in all_source_files:
- shutil.copyfile(src, os.path.join(tmpdir, os.path.basename(src)))
- try:
- os.replace(tmpdir, cached_build_dir) # atomic
- except OSError:
- # source directory already exists, delete tmpdir and its contents.
- shutil.rmtree(tmpdir)
- if not os.path.isdir(cached_build_dir): raise
-
- # Compile.
- cached_sources = [os.path.join(cached_build_dir, os.path.basename(fname)) for fname in sources]
- torch.utils.cpp_extension.load(name=module_name, build_directory=cached_build_dir,
- verbose=verbose_build, sources=cached_sources, **build_kwargs)
- else:
- torch.utils.cpp_extension.load(name=module_name, verbose=verbose_build, sources=sources, **build_kwargs)
-
- # Load.
- module = importlib.import_module(module_name)
-
- except:
- if verbosity == 'brief':
- print('Failed!')
- raise
-
- # Print status and add to cache dict.
- if verbosity == 'full':
- print(f'Done setting up PyTorch plugin "{module_name}".')
- elif verbosity == 'brief':
- print('Done.')
- _cached_plugins[module_name] = module
- return module
-
-#----------------------------------------------------------------------------
diff --git a/spaces/terfces0erbo/CollegeProjectV2/Advanced Access Repair 2.1 Cracked PATCHED.md b/spaces/terfces0erbo/CollegeProjectV2/Advanced Access Repair 2.1 Cracked PATCHED.md
deleted file mode 100644
index c800798e2aeb5c3a281ee7444c7657c27f3e9669..0000000000000000000000000000000000000000
--- a/spaces/terfces0erbo/CollegeProjectV2/Advanced Access Repair 2.1 Cracked PATCHED.md
+++ /dev/null
@@ -1,23 +0,0 @@
-
-
How to Recover Corrupted Access Databases with Advanced Access Repair 2.1
-
Microsoft Access is a popular database management system that allows users to create and manipulate data in various formats. However, sometimes Access databases can get corrupted due to various reasons, such as power failure, virus infection, hardware failure, improper shutdown, etc. When this happens, users may lose access to their valuable data and face errors like "Unrecognized Database Format", "The database needs to be repaired or isn't a database file", or "The Microsoft Jet database engine could not find the object".
-
Fortunately, there is a powerful tool that can help users recover their corrupted Access databases with ease. It is called Advanced Access Repair (AAR) 2.1, and it is developed by DataNumen, Inc., a leading provider of data recovery software. AAR 2.1 is the best Access recovery tool in the world, according to its official website[^1^]. It uses advanced technologies to scan the corrupt or damaged Microsoft Access databases (.mdb files) and recover your data in them as much as possible, so to minimize the loss in file corruption.
In this article, we will show you how to use AAR 2.1 to recover your corrupted Access databases in a few simple steps.
-
Step 1: Download and install AAR 2.1
-
You can download AAR 2.1 from its official website[^1^] or from other trusted sources like FileHippo[^3^]. The size of the latest downloadable installer is 9.5 MB. The program's installer files are commonly found as AAR.exe, AccessRepair.exe, fix10.exe, MSACCESS.EXE or Repair.exe etc. The most popular versions among the software users are 2.1, 2.0 and 1.1[^1^]. AAR 2.1 is compatible with Windows XP/Vista/7/8/10 and supports all versions of Microsoft Access from 95 to 2019.
-
To install AAR 2.1, you need to run the installer file and follow the instructions on the screen. The installation process is quick and easy. You can choose the destination folder and create shortcuts for your convenience. After the installation is complete, you can launch AAR 2.1 from the Start menu or the desktop icon.
-
Step 2: Select the corrupted Access database file
-
When you launch AAR 2.1, you will see a simple and intuitive interface that allows you to select the corrupted Access database file that you want to repair. You can either type the file name and path manually or use the Browse button to locate it on your computer. You can also drag and drop the file onto the program window.
-
If you have multiple corrupted Access database files that you want to repair at once, you can use the Batch Repair tab instead of the Repair tab. This tab allows you to add multiple files to a list and repair them all in one go.
-
Step 3: Specify the output file name and location
-
After selecting the corrupted Access database file, you need to specify the output file name and location where you want to save the repaired file. You can either use the default settings or change them according to your preferences.
-
-
If you want to save some disk space, you can choose to compress the output file into a ZIP archive by checking the Compress output file option. You can also choose to split the output file into multiple volumes by checking the Split output file option and specifying the size limit for each volume.
-
Step 4: Start repairing
-
Once you have specified all the necessary settings, you can click on the Start Repair button to begin repairing your corrupted Access database file. AAR 2.1 will scan your file and try to recover as much data as possible from it.
-
You can see the progress of the repair process on the program window. You can also view some information about your file, such as its size, version, format, tables, queries, forms, reports, macros, modules, etc.
-
If you want to stop repairing at any time, you can click on the Stop button.
-
Step 5: Check the d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/terfces0erbo/CollegeProjectV2/FULL Mojosoft Photo Calendar Studio 2015 1.18 DC Portable.md b/spaces/terfces0erbo/CollegeProjectV2/FULL Mojosoft Photo Calendar Studio 2015 1.18 DC Portable.md
deleted file mode 100644
index 2fd66ea01d76335eefe9b91818a3a7c0a68bab31..0000000000000000000000000000000000000000
--- a/spaces/terfces0erbo/CollegeProjectV2/FULL Mojosoft Photo Calendar Studio 2015 1.18 DC Portable.md
+++ /dev/null
@@ -1,11 +0,0 @@
-
FULL Mojosoft Photo Calendar Studio 2015 1.18 DC Portable
-
-Mojosoft Photo Calendar Studio 2015 1.18 DC Portable - free installation. Ashampoo Photo Commander 10 V1012 Multilanguage Keygen Regf Utorrent. Photo editing program.
-The program includes many tools for working with images.
-With Photo Calendar Studio, you can create calendars with your favorite photos.
-In addition, there is the ability to create postcards, collages, slides, frames and much more.
-The program offers several modes for viewing photos.
-It is possible to change image parameters, add 8a78ff9644
-
-
-
diff --git a/spaces/theadedolapo/Car_price_prediction/README.md b/spaces/theadedolapo/Car_price_prediction/README.md
deleted file mode 100644
index 6e80110cff9abc276ede52e68ae9b7fc85e2f182..0000000000000000000000000000000000000000
--- a/spaces/theadedolapo/Car_price_prediction/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
-
----
-title: Car Price Prediction
-emoji: 🏃
-colorFrom: purple
-colorTo: purple
-sdk: gradio
-sdk_version: 3.34.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/thinkcol/chainlit-example/chain.py b/spaces/thinkcol/chainlit-example/chain.py
deleted file mode 100644
index c6c2e7bfd11ef8d8261a11e533d53190943517fc..0000000000000000000000000000000000000000
--- a/spaces/thinkcol/chainlit-example/chain.py
+++ /dev/null
@@ -1,69 +0,0 @@
-import asyncio
-import os
-import re
-
-import chainlit as cl
-import openai
-from chainlit import LLMSettings
-from chainlit.config import config
-
-
-# TODO each chain should be able to make a child chain?
-# root = Chain()
-# first = root.child("something")
-# first.llm('foo')
-class Chain:
- def __init__(self, message_id: str | None, llm_settings: LLMSettings | None = None):
- self.llm_settings = llm_settings
- self.root_id = message_id
-
- def make_message(self, name, final, **kwargs) -> cl.Message:
- if not name:
- name = config.ui.name if final else "Child Chain"
- return cl.Message(
- author=name,
- parent_id=None if final else self.root_id,
- **kwargs,
- )
-
- async def text(self, text, final=False, name=None):
- message = self.make_message(content=text, final=final, name=name)
- await message.send()
-
- async def text_stream(self, text: str, delay=.1, name=None, final=False):
- message = self.make_message(content='', final=final, name=name)
- tokens = text.split(" ")
- first = True
- for token in tokens:
- if not first:
- token = " " + token
- await message.stream_token(token)
- await asyncio.sleep(delay)
- first = False
- await message.send()
-
- async def llm(self, template, *args, name=None, final=False, **kwargs) -> str:
- variables = re.findall(r'\{(.*?)}', template)
- if len(args) > 1:
- raise RuntimeError("If there is more than one argument, use kwargs")
- if len(args) > 0 and len(kwargs) > 0:
- raise RuntimeError("Cannot combine args and kwargs")
- if len(args) > 0:
- if len(variables) > 1:
- raise RuntimeError("This chain expects more than one argument. Use kwargs instead.")
- variable_dict = {variables[0]: args[0]}
- else:
- variable_dict = kwargs
-
- prompt = template.format(**variable_dict)
- message = self.make_message(content='', name=name, prompt=prompt, llm_settings=self.llm_settings, final=final)
-
- async for response in await openai.ChatCompletion.acreate(
- **self.llm_settings.to_settings_dict(), api_key=os.environ.get('OPENAI_API_KEY'), stream=True,
- messages=[{'role': 'user', 'content': prompt}]
- ):
- token = response.choices[0]["delta"].get("content", "")
- await message.stream_token(token)
-
- await message.send()
- return message.content
diff --git a/spaces/thirdai/FoodUDT-1B/app.py b/spaces/thirdai/FoodUDT-1B/app.py
deleted file mode 100644
index 5e33776e30bed9b772d71e28fb5ced0ff1e339c7..0000000000000000000000000000000000000000
--- a/spaces/thirdai/FoodUDT-1B/app.py
+++ /dev/null
@@ -1,232 +0,0 @@
-import gradio as gr
-import pandas as pd
-import thirdai
-from thirdai import bolt, licensing
-import os
-import time
-
-
-thirdai.licensing.activate("1FF7B0-458ABC-5F382D-0A1513-904CF0-V3")
-max_posts = 5
-df = pd.read_csv("processed_recipes_3.csv")
-model = bolt.UniversalDeepTransformer.load("1bn_name_ctg_keywords_4gram.bolt")
-
-recipe_id_to_row_num = {}
-
-for i in range(df.shape[0]):
- recipe_id_to_row_num[df.iloc[i,0]] = i
-
-
-INTRO_MARKDOWN = (
- """# A billion parameter model, trained on a single CPU, in just 90 mins, on 522K recipes from food.com !!
- """
-)
-
-LIKE_TEXT = "👍 update LLM"
-FEEDBACK_RECEIVED_TEXT = "👌 Click search for updated results"
-SHOW_MORE = "Show more"
-SHOW_LESS = "Show less"
-
-
-def retrain(query, doc_id):
- query = query.lower()
- query.replace('\n', ' ')
- query = ' '.join([query[i:i+4] for i in range(len(query)-3)])
- df = pd.DataFrame({
- "Name": [query],
- "RecipeId": [str(doc_id)]
- })
-
- filename = f"temptrain{hash(query)}{hash(doc_id)}{time.time()}.csv"
-
- df.to_csv(filename)
-
- prediction = None
-
- while prediction != doc_id:
- model.train(filename, epochs=1)
- prediction = model.predict(
- {"Name": query.replace('\n', ' ')},
- return_predicted_class=True)
-
- os.remove(filename)
-
- # sample = {"query": query.replace('\n', ' '), "id": str(doc_id)}
- # batch = [sample]
-
- # prediction = None
-
- # while prediction != doc_id:
- # model.train_batch(batch, metrics=["categorical_accuracy"])
- # prediction = model.predict(sample, return_predicted_class=True)
-
-
-def search(query):
- query = query.lower()
- query = ' '.join([query[i:i+4] for i in range(len(query)-3)])
- scores = model.predict({"Name": query})
- ####
- sorted_ids = scores.argsort()[-max_posts:][::-1]
- relevant_posts = [
- df.iloc[pid] for pid in sorted_ids
- ]
- ####
- # K = min(2*max_posts, len(scores) - 1)
- # sorted_post_ids = scores.argsort()[-K:][::-1]
- # print(sorted_post_ids)
- # sorted_ids = []
- # relevant_posts = []
- # count = 0
- # for pid in sorted_post_ids:
- # if pid in recipe_id_to_row_num:
- # relevant_posts.append(df.iloc[recipe_id_to_row_num[pid]])
- # sorted_ids.append(pid)
- # count += 1
- # if count==max_posts:
- # break
- ####
- header = [gr.Markdown.update(visible=True)]
- boxes = [
- gr.Box.update(visible=True)
- for _ in relevant_posts
- ]
- titles = [
- gr.Markdown.update(f"## {post['Name']}")
- for post in relevant_posts
- ]
- toggles = [
- gr.Button.update(
- visible=True,
- value=SHOW_MORE,
- interactive=True,
- )
- for _ in relevant_posts
- ]
- matches = [
- gr.Button.update(
- value=LIKE_TEXT,
- interactive=True,
- )
- for _ in relevant_posts
- ]
- bodies = [
- gr.HTML.update(
- visible=False,
- value=f" "
- f"
Description:
\n{post['Description']}\n\n"
- ""
- f"
Ingredients:
\n{post['RecipeIngredientParts']}\n\n"
- " "
- f"
Instructions:
\n{post['RecipeInstructions']}\n\n"
- " ")
- for post in relevant_posts
- ]
-
- return (
- header +
- boxes +
- titles +
- toggles +
- matches +
- bodies +
- [sorted_ids]
- )
-
-
-def handle_toggle(toggle):
- if toggle == SHOW_MORE:
- new_toggle_text = SHOW_LESS
- visible = True
- if toggle == SHOW_LESS:
- new_toggle_text = SHOW_MORE
- visible = False
- return [
- gr.Button.update(new_toggle_text),
- gr.HTML.update(visible=visible),
- ]
-
-
-def handle_feedback(button_id: int):
- def register_feedback(doc_ids, query):
- retrain(
- query=query,
- doc_id=doc_ids[button_id]
- )
- return gr.Button.update(
- value=FEEDBACK_RECEIVED_TEXT,
- interactive=False,
- )
-
- return register_feedback
-
-
-default_query = (
- "biryani lamb spicy contains cloves and red chili powder, made with ghee and hard boiled eggs, made by grinding coconut and cashew"
-)
-
-
-with gr.Blocks() as demo:
- gr.Markdown(INTRO_MARKDOWN)
- query = gr.Textbox(value=default_query, label="Query", lines=10)
- submit = gr.Button(value="Search")
-
- header = [gr.Markdown("# Relevant Recipes", visible=False)]
- post_boxes = []
- post_titles = []
- toggle_buttons = []
- match_buttons = []
- post_bodies = []
- post_ids = gr.State([])
-
- for i in range(max_posts):
- with gr.Box(visible=False) as box:
- post_boxes.append(box)
-
- with gr.Row():
- with gr.Column(scale=5):
- title = gr.Markdown("")
- post_titles.append(title)
- with gr.Column(scale=1, min_width=370):
- with gr.Row():
- with gr.Column(scale=3, min_width=170):
- toggle = gr.Button(SHOW_MORE)
- toggle_buttons.append(toggle)
- with gr.Column(scale=1, min_width=170):
- match = gr.Button(LIKE_TEXT)
- match.click(
- fn=handle_feedback(button_id=i),
- inputs=[post_ids, query],
- outputs=[match],
- )
- match_buttons.append(match)
-
- body = gr.HTML("")
- post_bodies.append(body)
-
- toggle.click(
- fn=handle_toggle,
- inputs=[toggle],
- outputs=[toggle, body],
- )
-
- allblocks = (
- header +
- post_boxes +
- post_titles +
- toggle_buttons +
- match_buttons +
- post_bodies +
- [post_ids]
- )
-
- query.submit(
- fn=search,
- inputs=[query],
- outputs=allblocks)
- submit.click(
- fn=search,
- inputs=[query],
- outputs=allblocks)
-
-
-demo.launch()
\ No newline at end of file
diff --git a/spaces/tialenAdioni/chat-gpt-api/Euro-Truck-Simulator-13-Full-Version-Download-33.md b/spaces/tialenAdioni/chat-gpt-api/Euro-Truck-Simulator-13-Full-Version-Download-33.md
deleted file mode 100644
index 84c302732ee9a44cf3a630deef47f4233d1060a4..0000000000000000000000000000000000000000
--- a/spaces/tialenAdioni/chat-gpt-api/Euro-Truck-Simulator-13-Full-Version-Download-33.md
+++ /dev/null
@@ -1,84 +0,0 @@
-## Euro Truck Simulator 1.3 Full Version Download 33
-
-
-
-
-
- 
-
-
-
-
-
-**LINK ⭐ [https://ekporriola.blogspot.com/?c=2txKmy](https://ekporriola.blogspot.com/?c=2txKmy)**
-
-
-
-
-
-
-
-
-
-
-
-
-
-# How to Download Euro Truck Simulator 1.3 Full Version for Free
-
-
-
-If you are a fan of truck driving games, you might be interested in downloading Euro Truck Simulator 1.3 full version for free. Euro Truck Simulator is a simulation game that lets you drive trucks across Europe, delivering cargo and exploring different cities. You can customize your truck with various parts and accessories, and enjoy realistic driving physics and graphics.
-
-
-
-However, the full version of Euro Truck Simulator 1.3 is not available for free on the official website or on Steam. You need to buy the game or use a CD key to activate it. But don't worry, there is a way to download Euro Truck Simulator 1.3 full version for free without any risk of viruses or malware.
-
-
-
-The trick is to use a website called Gamepressure.com, which offers free downloads of game patches and updates. Gamepressure.com has a patch that updates Euro Truck Simulator 2 from version 1.32.3.14 to version 1.33.2.19[^1^]. This patch includes all the features and content of Euro Truck Simulator 1.3, as well as some improvements and bug fixes.
-
-
-
-To download Euro Truck Simulator 1.3 full version for free, you need to follow these steps:
-
-
-
-1. Download and install Euro Truck Simulator 2 from Steam[^2^]. You can get it for a discounted price during sales or promotions.
-
-2. Download the patch from Gamepressure.com[^1^]. The file size is about 2 GB, so make sure you have enough space on your hard drive.
-
-3. Run the patch and follow the instructions to update Euro Truck Simulator 2 to version 1.33.2.19.
-
-4. Launch Euro Truck Simulator 2 and enjoy the full version of Euro Truck Simulator 1.3 for free!
-
-
-
-Note that this method only works for PC users. If you want to play Euro Truck Simulator on other platforms, you need to buy the game from the official website[^3^] or from other online stores.
-
-
-
-Euro Truck Simulator 1.3 is a fun and addictive game that will keep you entertained for hours. You can drive across different countries, complete various missions, earn money and reputation, and upgrade your truck. You can also join the online community of truckers and share your experiences with other players.
-
-
-
-So what are you waiting for? Download Euro Truck Simulator 1.3 full version for free today and start your trucking adventure!
-
-
-
-If you want to learn more about Euro Truck Simulator 1.3, you can visit the official website or the Steam page. There you can find more information about the game features, system requirements, screenshots, videos, and user reviews. You can also join the official forums and social media pages to interact with other truckers and developers.
-
-
-
-Euro Truck Simulator 1.3 is not the latest version of the game. The developers have released several updates and expansions since then, adding new countries, trucks, cargo, and gameplay options. The most recent version of Euro Truck Simulator 2 is 1.42.1.0, which was released in November 2021. If you want to enjoy the latest content and improvements, you need to buy the game and the DLCs from Steam or other online stores.
-
-
-
-However, if you are happy with Euro Truck Simulator 1.3 and don't want to spend any money, you can still download it for free using the method described above. Just remember to backup your game files before applying the patch, in case something goes wrong or you want to revert to the original version. Also, be careful when downloading files from third-party websites, as they might contain viruses or malware that could harm your computer.
-
- 1b8d091108
-
-
-
-
-
diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Happy Days Telugu Movie Dubbed Hindi Free Download Watch the Musical Coming of Age Film.md b/spaces/tialenAdioni/chat-gpt-api/logs/Happy Days Telugu Movie Dubbed Hindi Free Download Watch the Musical Coming of Age Film.md
deleted file mode 100644
index 9fb26fc0cdb5b195de5a17238b5ec7e319bb15a8..0000000000000000000000000000000000000000
--- a/spaces/tialenAdioni/chat-gpt-api/logs/Happy Days Telugu Movie Dubbed Hindi Free Download Watch the Musical Coming of Age Film.md
+++ /dev/null
@@ -1,90 +0,0 @@
-
-
Happy Days Telugu Movie Dubbed Hindi Free Download
-
If you are looking for a heartwarming and realistic college drama, you might want to check out Happy Days, a Telugu movie dubbed in Hindi that you can download for free. Happy Days is a 2007 film directed by Sekhar Kammula, starring Varun Sandesh, Tamannaah Bhatia, Nikhil Siddharth, and others. The film follows the lives of eight engineering students who form a close bond of friendship and love during their four years of college.
-
happy days telugu movie dubbed hindi free download
Why You Should Watch Happy Days Telugu Movie Dubbed Hindi
-
Happy Days is not your typical Bollywood masala film. It is a realistic portrayal of the joys and struggles of college life, with relatable characters and situations. The film does not shy away from showing the challenges faced by the students, such as ragging, academic pressure, family problems, and career choices. The film also showcases the positive aspects of college life, such as friendship, romance, fun, and learning.
-
Happy Days is also a musical treat, with catchy songs composed by Mickey J Meyer. The songs are well integrated into the story and reflect the mood and emotions of the characters. The film also has some beautiful cinematography and editing, capturing the essence of the college campus and the city of Hyderabad.
-
Happy Days is a film that will make you laugh, cry, and reminisce about your own college days. It is a film that will touch your heart and inspire you to cherish your friendships and follow your dreams.
-
How to Download Happy Days Telugu Movie Dubbed Hindi for Free
-
If you want to watch Happy Days in Hindi, you can download it for free from various online sources. However, you should be careful about the quality and legality of the downloads. Some websites may offer low-quality or corrupted files that may harm your device or violate copyright laws.
-
Happy Days Telugu Full Movie with English Subtitles
-Happy Days Movie Starring Varun Sandesh, Tamannaah, Nikhil
-Sekhar Kammula Happy Days Telugu Movie Online
-Watch Happy Days Telugu Movie on Sri Balaji Video
-Happy Days Telugu Movie Songs by Mickey J Meyer
-Download Happy Days Telugu Movie from Internet Archive
-Happy Days Telugu Movie Review and Rating
-Happy Days Telugu Movie Dubbed in Hindi Watch Online
-Happy Days Telugu Movie Scenes and Dialogues
-Happy Days Telugu Movie Cast and Crew Details
-How to Download Happy Days Telugu Movie for Free
-Happy Days Telugu Movie Awards and Nominations
-Happy Days Telugu Movie Box Office Collection and Verdict
-Happy Days Telugu Movie Trivia and Facts
-Happy Days Telugu Movie Remake in Other Languages
-Happy Days Telugu Movie Behind the Scenes and Making
-Happy Days Telugu Movie Quotes and Memes
-Happy Days Telugu Movie Wallpapers and Posters
-Happy Days Telugu Movie Ringtones and BGMs
-Happy Days Telugu Movie Fan Art and Edits
-Watch Happy Days Telugu Movie on iBOMMA.io
-Happy Days Telugu Movie Torrent Download Links
-Happy Days Telugu Movie Subtitles in Hindi Download
-Happy Days Telugu Movie Full HD 1080p Download
-Happy Days Telugu Movie MP4 Download for Mobile
-Watch Happy Days Telugu Movie on YouTube
-Happy Days Telugu Movie Deleted Scenes and Bloopers
-Happy Days Telugu Movie Inspired by Real Life Stories
-Happy Days Telugu Movie Analysis and Criticism
-Happy Days Telugu Movie Theme and Message
-Learn Telugu from Happy Days Telugu Movie
-Watch Happy Days Telugu Movie with Friends and Family
-Happy Days Telugu Movie Best Moments and Highlights
-Happy Days Telugu Movie Comparison with Other Movies
-Happy Days Telugu Movie Dubbed in Hindi Full Cast
-Watch Happy Days Telugu Movie Dubbed in Hindi on MX Player
-Download Happy Days Telugu Movie Dubbed in Hindi from Filmywap
-Happy Days Telugu Movie Dubbed in Hindi Songs Download
-Watch Happy Days Telugu Movie Dubbed in Hindi on Dailymotion
-Happy Days Telugu Movie Dubbed in Hindi Trailer and Teaser
-How to Watch Happy Days Telugu Movie Dubbed in Hindi for Free
-Happy Days Telugu Movie Dubbed in Hindi Review and Rating
-Happy Days Telugu Movie Dubbed in Hindi Box Office Collection and Verdict
-Happy Days Telugu Movie Dubbed in Hindi Subtitles Download
-Watch Happy Days Telugu Movie Dubbed in Hindi on Hotstar
-Download Happy Days Telugu Movie Dubbed in Hindi from Tamilrockers
-Watch Happy Days Telugu Movie Dubbed in Hindi on Jio Cinema
-Download Happy Days Telugu Movie Dubbed in Hindi from Movierulz
-Watch Happy Days Telugu Movie Dubbed in Hindi on Zee5
-
One of the best ways to download Happy Days Telugu movie dubbed Hindi for free is to use a torrent site. Torrent sites are platforms that allow users to share files with each other through peer-to-peer networks. You can find many torrent sites that offer Happy Days Telugu movie dubbed Hindi for free download. However, you should use a VPN service to protect your privacy and security while using torrent sites.
-
Another way to download Happy Days Telugu movie dubbed Hindi for free is to use a streaming site. Streaming sites are websites that allow users to watch movies and shows online without downloading them. You can find many streaming sites that offer Happy Days Telugu movie dubbed Hindi for free streaming. However, you should use an ad-blocker and antivirus software to avoid annoying ads and malware while using streaming sites.
-
Conclusion
-
Happy Days is a Telugu movie dubbed in Hindi that you can download for free from various online sources. It is a realistic and heartwarming college drama that will make you nostalgic and emotional. It is a film that you should not miss if you love college movies and Telugu cinema.
-
What are the Reviews and Ratings of Happy Days Telugu Movie Dubbed Hindi
-
Happy Days Telugu movie dubbed Hindi has received positive reviews and ratings from both critics and audiences. The film has been praised for its realistic and engaging portrayal of college life, its relatable and likable characters, its catchy and melodious music, and its inspiring and emotional message.
-
The film has a rating of 7.9 out of 10 on IMDb, based on 5,500 user votes. The film also has a rating of 4.2 out of 5 on YouTube, based on 51,000 user votes. The film has been watched by over 13 million people on YouTube, making it one of the most popular Telugu movies online.
-
The film has also won several awards and accolades, such as the Filmfare Award for Best Film - Telugu, the Nandi Award for Best Feature Film - Silver, the Nandi Award for Best Director - Sekhar Kammula, the Nandi Award for Best Music Director - Mickey J Meyer, the Nandi Award for Best Supporting Actress - Kamalinee Mukherjee, and the Nandi Award for Best Male Debut - Varun Sandesh.
-
Where to Watch Happy Days Telugu Movie Dubbed Hindi Online
-
If you want to watch Happy Days Telugu movie dubbed Hindi online, you have several options to choose from. You can watch the full movie with English subtitles on YouTube, on the official channel of Sri Balaji Video. You can also rent or buy the movie on Amazon Prime Video, where you can watch it in HD quality.
-
If you prefer to watch the movie in Telugu without subtitles, you can also find it on various online platforms, such as Hotstar, Aha, MX Player, Jio Cinema, and Zee5. You can also download the movie from these platforms and watch it offline.
-
Conclusion
-
Happy Days Telugu movie dubbed Hindi is a must-watch for anyone who loves college movies and Telugu cinema. It is a realistic and heartwarming film that will make you nostalgic and emotional. It is a film that you can watch with your friends and family and enjoy the happy days of your life.
-
What are the Benefits of Watching Happy Days Telugu Movie Dubbed Hindi
-
Watching Happy Days Telugu movie dubbed Hindi can have many benefits for you. Here are some of them:
-
-
It can improve your language skills. By watching the movie in Hindi, you can learn new words and phrases, improve your pronunciation and comprehension, and expand your vocabulary.
-
It can enhance your cultural awareness. By watching the movie in Telugu culture, you can learn about the customs, traditions, values, and beliefs of the people of Telangana and Andhra Pradesh. You can also appreciate the diversity and richness of Indian cinema.
-
It can boost your mood and emotions. By watching the movie in a happy genre, you can experience positive feelings such as joy, laughter, excitement, and nostalgia. You can also empathize with the characters and their situations, and feel inspired by their stories.
-
-
What are the Challenges of Watching Happy Days Telugu Movie Dubbed Hindi
-
Watching Happy Days Telugu movie dubbed Hindi can also have some challenges for you. Here are some of them:
-
-
It can affect your original experience. By watching the movie in a dubbed version, you may miss out on some of the nuances and subtleties of the original language and voice. You may also lose some of the authenticity and charm of the original actors and dialogues.
-
It can create some confusion and misunderstanding. By watching the movie in a different language and culture, you may encounter some terms and references that are unfamiliar or unclear to you. You may also face some difficulties in following the plot and characters.
-
It can expose you to some risks and dangers. By watching the movie from an online source, you may encounter some issues such as low-quality or corrupted files, annoying ads or malware, illegal or unethical downloads, or privacy or security breaches.
-
-
Conclusion
-
Happy Days Telugu movie dubbed Hindi is a great option for you if you want to watch a realistic and heartwarming college drama in a different language and culture. However, you should also be aware of the challenges and risks involved in watching it online. You should always use a reliable and safe source to download or stream the movie, and enjoy it with caution and respect.
679dcb208e
-
-
\ No newline at end of file
diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/How to Download and Install YouTube Videos for Free in 2021.md b/spaces/tialenAdioni/chat-gpt-api/logs/How to Download and Install YouTube Videos for Free in 2021.md
deleted file mode 100644
index bc449a8e7727ff5faddb5a50d0aba3822e01c1d5..0000000000000000000000000000000000000000
--- a/spaces/tialenAdioni/chat-gpt-api/logs/How to Download and Install YouTube Videos for Free in 2021.md
+++ /dev/null
@@ -1,32 +0,0 @@
-
-
How do I download and install YouTube videos for free?
-
YouTube is one of the most popular platforms for watching and sharing videos online. But sometimes you may want to download and install YouTube videos on your device for offline viewing, editing, or sharing. How can you do that for free?
-
how do i download and install youtube videos for free
In this article, we will show you some easy and legal ways to download and install YouTube videos for free. You will need a computer, an internet connection, and a web browser.
-
Method 1: Use a website
-
One of the simplest ways to download and install YouTube videos for free is to use a website that converts YouTube videos into downloadable files. There are many websites that offer this service, but some of them may contain ads, malware, or viruses. So be careful and choose a reputable website.
-
Here are the steps to use a website to download and install YouTube videos for free:
-
-
-
Open your web browser and go to YouTube. Find the video you want to download and copy its URL from the address bar.
-
Open a new tab and go to a website that converts YouTube videos into downloadable files. Some examples are y2mate.com, savefrom.net, and ytmp3.cc.
-
Paste the URL of the YouTube video into the input box on the website and click on the download button.
-
Select the format and quality of the video you want to download. You can choose from MP4, MP3, WEBM, or other formats. You can also choose the resolution of the video, such as 720p, 480p, or 360p.
-
Click on the download button again and wait for the file to be generated. Then click on the save button and choose a location on your computer to save the file.
-
Once the file is downloaded, you can open it with any media player or editor on your computer. You can also transfer it to your mobile device or USB drive.
-
-
Method 2: Use a software
-
Another way to download and install YouTube videos for free is to use a software that allows you to download videos from various websites. There are many software that offer this feature, but some of them may require payment, registration, or installation. So make sure you read the terms and conditions before using them.
-
Here are the steps to use a software to download and install YouTube videos for free:
-
-
Download and install a software that can download videos from YouTube. Some examples are 4K Video Downloader, Freemake Video Downloader, and WinX YouTube Downloader.
-
Open the software and go to YouTube. Find the video you want to download and copy its URL from the address bar.
-
Paste the URL of the YouTube video into the software and click on the analyze button.
-
Select the format and quality of the video you want to download. You can choose from MP4, MP3, WEBM, or other formats. You can also choose the resolution of the video, such as 4K, HD, or SD.
-
Click on the download button and choose a location on your computer to save the file.
-
Once the file is downloaded, you can open it with any media player or editor on your computer. You can also transfer it to your mobile device or USB drive.
-
-
Conclusion
-
In this article, we have shown you two easy and legal ways to download and install YouTube videos for free. You can use either a website or a software to do that. However, remember that downloading YouTube videos may violate their terms of service or copyright laws. So make sure you have permission from the video owner or use them for personal use only.
ddb901b051
-
-
\ No newline at end of file
diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Cara Download APK di PC dengan Mudah dan Cepat.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Cara Download APK di PC dengan Mudah dan Cepat.md
deleted file mode 100644
index ec61cf01b1be7dcdd94594c1cdc03f24f1621598..0000000000000000000000000000000000000000
--- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Cara Download APK di PC dengan Mudah dan Cepat.md
+++ /dev/null
@@ -1,132 +0,0 @@
-
-
How to Download APK Files on Your PC
-
If you are an Android user, you might be familiar with the term APK file. APK stands for Android Package Kit, and it is the file format used by Android to distribute and install apps. You can download APK files from various sources, such as Google Play Store, third-party websites, or even your own device. But did you know that you can also download APK files on your PC? In this article, we will show you how to do that, as well as the benefits and risks of doing so.
An APK file is a compressed archive that contains all the files and resources needed to run an Android app. It usually has a .apk extension, and it can be installed on any Android device that supports the app's minimum requirements. An APK file can contain code, images, audio, video, manifest, certificates, and other data.
-
Benefits of Downloading APK Files on PC
-
There are several reasons why you might want to download APK files on your PC. Some of them are:
-
-
You can backup your favorite apps and games on your PC in case you lose your device or need to reset it.
-
You can access apps and games that are not available in your region or device by downloading them from other sources.
-
You can test apps and games before installing them on your device by using an emulator or a virtual machine.
-
You can modify or customize apps and games by editing their files or using tools like APK Editor.
-
-
Risks of Downloading APK Files on PC
-
However, downloading APK files on your PC also comes with some risks. Some of them are:
-
-
You might download malware or viruses that can harm your PC or device by downloading APK files from untrusted sources.
-
You might violate the terms and conditions of the app developers or Google by downloading APK files from unauthorized sources.
-
You might encounter compatibility issues or errors by downloading APK files that are not compatible with your device or Android version.
-
You might lose some features or functionality by downloading APK files that are not updated or optimized for your device.
-
-
How to Download APK Files from Google Play Store on PC
-
The easiest way to download APK files on your PC is from the official source, which is Google Play Store. However, Google Play Store does not allow you to directly download APK files on your PC. You need to use some tools or websites that can extract the APK files from Google Play Store. Here are two methods that you can use to download APK files from Google Play Store on your PC.
-
Method 1: Using Real APK Leecher Tool
-
Real APK Leecher is a free tool that allows you to download APK files from Google Play Store on your PC. It works by simulating an Android device and using your Google account to access the Google Play Store. Here are the steps to use this tool:
-
Step 1: Download and Install Real APK Leecher Tool
-
You can download the Real APK Leecher tool from its official website. After downloading, extract the zip file and run the Real APK Leecher.exe file. You might need to install Java Runtime Environment (JRE) on your PC if you don't have it already.
-
Step 2: Configure Real APK Leecher Tool
-
When you run the tool for the first time, you will see a configuration window. Here, you need to enter your Google account email and password, as well as the device ID of your Android device. You can find your device ID by dialing *#*#8255#*#* on your Android phone and looking for the Device ID field. Alternatively, you can use an online device ID generator to generate a random device ID. You can also choose the language and country of the Google Play Store that you want to access.
-
Step 3: Search and Download APK Files
-
After configuring the tool, you can search for any app or game that you want to download from Google Play Store. You can enter the name of the app or game, or the package name, or the Google Play URL in the search box. You can also filter the results by category, price, rating, etc. Once you find the app or game that you want to download, right-click on it and select Download this app. The tool will download the APK file to your PC and save it in the same folder as the tool.
-
apk installers for windows 10/11
-apk downloader online for android pc windows
-instagram apk for pc mac emulator
-apk file opener for pc free download
-apk editor for pc windows 7/8/10
-apk to exe converter for pc online
-apk mirror app for pc download
-apk pure app store for pc windows 10
-apk installer and launcher for pc windows 7
-apk extractor for pc windows 10/8/7
-apk studio for pc free download
-apk manager for pc windows 10/8/7
-apk easy tool for pc download
-apk analyzer for pc windows 10/8/7
-apk builder for pc online free
-apk signer for pc windows 10/8/7
-apk creator for pc free download
-apk decompiler for pc online
-apk emulator for pc windows 10/8/7
-apk generator for pc online free
-apk editor pro for pc free download
-apk downloader extension for pc chrome firefox
-apk installer pro for pc windows 10/11
-apk opener online for pc free
-apk converter online for pc free
-apk downloader app for pc windows 10/8/7
-apk editor online for pc free
-apk installer offline for pc windows 10/11
-apk extractor pro for pc free download
-apk studio online for pc free
-apk manager pro for pc free download
-apk easy tool portable for pc download
-apk analyzer online for pc free
-apk builder pro for pc free download
-apk signer online for pc free
-apk creator online for pc free
-apk decompiler online for pc free
-apk emulator online for pc free
-apk generator pro for pc free download
-apk editor pro online for pc free
-apk downloader chrome extension for pc free download
-apk installer and launcher pro for pc windows 10/11
-apk opener offline for pc free download
-apk converter offline for pc free download
-apk downloader app pro for pc windows 10/8/7
-apk editor offline for pc free download
-apk installer offline pro for pc windows 10/11
-apk extractor pro online for pc free
-apk studio offline for pc free download
-apk manager pro online for pc free
-
Method 2: Using Online APK Downloader Websites
-
If you don't want to install any tool on your PC, you can use online APK downloader websites that can download APK files from Google Play Store on your PC. These websites work by fetching the APK files from Google Play Store using their own servers and providing you with a download link. Here are the steps to use these websites:
-
Step 1: Find the Package Name or Google Play URL of the App
-
To use online APK downloader websites, you need to know either the package name or the Google Play URL of the app or game that you want to download. The package name is a unique identifier for each app or game, and it usually looks like com.example.appname. The Google Play URL is the web address of the app or game on Google Play Store, and it usually looks like https://play.google.com/store/apps/details?id=com.example.appname. You can find either of these information by visiting Google Play Store on your browser and searching for the app or game that you want to download.
-
Step 2: Visit an Online APK Downloader Website
-
There are many online APK downloader websites that you can use to download APK files from Google Play Store on your PC. Some of them are:
-
-
[APKPure]: A popular website that offers a large collection of APK files from various sources, including Google Play Store.
-
[APKMirror]: A trusted website that provides original and safe APK files from Google Play Store and other developers.
-
[Evozi]: A simple website that allows you to download APK files from Google Play Store by entering either the package name or the Google Play URL.
-
-
Step 3: Enter the Package Name or Google Play URL and Download the APK File
-
Once you visit any of these websites, you will see a box where you can enter either the package name or the Google Play URL of the app or game that you want to download. After entering either of these information, click on Generate Download Link or Download APK button. The website will fetch the APK file from Google Play Store and provide you with a download link. Click on the link to download the APK file to your PC.
-
How to Download APK Files from Other Sources on PC
-
If you want to download APK files from sources other than Google Play Store, such as third-party websites, forums, blogs, etc., you need to use some tools or apps that can install these APK files on your PC. Here are two methods that you can use to download APK files from other sources on your PC.
-
Method 1: Using BlueStacks App Player
-
BlueStacks App Player is a free software that allows you to run Android apps and games on your PC. It works by creating a virtual Android device on your PC and letting you access the Google Play Store and other sources to install apps and games. Here are the steps to use this software:
-
Step 1: Download and Install BlueStacks App Player
-
You can download the BlueStacks App Player from its official website. After downloading, run the installer and follow the instructions to install the software on your PC. You might need to enable the virtualization technology on your PC's BIOS settings if you don't have it already.
-
Step 2: Launch BlueStacks and Sign in with Google Account
-
After installing the software, launch it and sign in with your Google account. This will allow you to access the Google Play Store and sync your apps and data with your Android device. You can also create a new Google account if you don't have one.
-
Step 3: Search and Install Apps from BlueStacks App Center or Google Play Store
-
Once you sign in with your Google account, you can search and install apps and games from the BlueStacks App Center or the Google Play Store. You can also browse through various categories, genres, and recommendations. To install an app or game, just click on the Install button and wait for it to download and install.
-
Step 4: Locate the Installed Apps and Export the APK Files
-
After installing an app or game, you can find it on the My Apps tab or the Home screen of BlueStacks. To export the APK file of an app or game, right-click on its icon and select Export to Windows. This will save the APK file to your PC in a folder named BstSharedFolder.
-
Method 2: Using Microsoft Apps Store
-
If you are using a Windows 10 PC, you can use the Microsoft Apps Store to download APK files from other sources on your PC. The Microsoft Apps Store is a built-in app store that allows you to download various apps for Windows 10, including some APK installers that can install APK files on your PC. Here are the steps to use this method:
-
Step 1: Download and Install Microsoft Apps Store on Your PC
-
If you don't have the Microsoft Apps Store on your PC, you can download it from its official website. After downloading, run the installer and follow the instructions to install the app store on your PC.
-
Step 2: Search and Install APK Installers App from Microsoft Apps Store
-
After installing the Microsoft Apps Store, launch it and search for APK installers app. This is an app that allows you to install APK files on your PC by creating a virtual Android device on your PC. You can also browse through other similar apps that can do the same function. To install an app, just click on the Get button and wait for it to download and install.
-
Step 3: Double-Click the APK File to Install It on Your PC
-
After installing an APK installers app, you can double-click any APK file that you have downloaded from other sources on your PC. This will launch the app and prompt you to install the APK file on your PC. Just follow the instructions to complete the installation process.
-
Conclusion
-
In this article, we have shown you how to download APK files on your PC from various sources, such as Google Play Store, third-party websites, or even your own device. We have also discussed the benefits and risks of doing so, as well as some tools and methods that you can use to download and install APK files on your PC. We hope that this article has been helpful for you and that you have learned something new today.
-
Frequently Asked Questions
-
-
What is an APK file?
-
An APK file is a compressed archive that contains all the files and resources needed to run an Android app.
-
Why would I want to download APK files on my PC?
-
You might want to download APK files on your PC for various reasons, such as backup, access, test, modify, or customize apps and games.
-
How can I download APK files from Google Play Store on my PC?
-
You can use tools like Real APK Leecher or websites like APKPure, APKMirror, or Evozi to download APK files from Google Play Store on your PC.
-
How can I download APK files from other sources on my PC?
-
You can use tools like BlueStacks App Player or Microsoft Apps Store to download and install APK files from other sources on your PC.
-
What are the benefits and risks of downloading APK files on my PC?
-
The benefits of downloading APK files on your PC are backup, access, test, modify, or customize apps and games. The risks of downloading APK files on your PC are malware, violation, compatibility, or functionality issues.
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download FM WhatsApp v9.60 and Experience the Ultimate WhatsApp Customization.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download FM WhatsApp v9.60 and Experience the Ultimate WhatsApp Customization.md
deleted file mode 100644
index 6e935e3a57b67bc9756c780de5f3c622dc39b578..0000000000000000000000000000000000000000
--- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download FM WhatsApp v9.60 and Experience the Ultimate WhatsApp Customization.md
+++ /dev/null
@@ -1,94 +0,0 @@
-
-
Download FM WhatsApp v9 60: A Guide for Android Users
-
WhatsApp is one of the most popular messaging apps in the world, with over two billion users. However, some people may not be satisfied with the official app and its limitations. That's why there are many WhatsApp mods available that offer extra features and customization options. One of them is FM WhatsApp, a modified version of WhatsApp developed by Fouad Mokdad.
FM WhatsApp is a WhatsApp mod that allows you to change the appearance, privacy, and functionality of your WhatsApp app. It also lets you use two WhatsApp accounts on the same device, view deleted messages and statuses, send large files and media, and much more. If you are looking for a way to enhance your WhatsApp experience, you may want to download FM WhatsApp v9 60, the latest version of this mod.
-
In this article, we will tell you what are the main features of FM WhatsApp v9 60, how to download and install it on your Android device, what are the risks and precautions of using a WhatsApp mod, and answer some frequently asked questions about FM WhatsApp. Let's get started!
-
Main Features of FM WhatsApp v9 60
-
FM WhatsApp v9 60 is the latest version of this mod as of June 2023. It comes with many new features and improvements that make it one of the best WhatsApp mods out there. Here are some of the main features of FM WhatsApp v9 60:
-
-
Anti-ban: You don't have to worry about getting banned by WhatsApp for using this mod. FM WhatsApp has an anti-ban feature that prevents your account from being detected and blocked by WhatsApp.
-
Customization: You can change the theme, font, emoji, wallpaper, and icon of your WhatsApp app with FM WhatsApp. There are thousands of themes and fonts to choose from in the theme store. You can also create your own theme and share it with others.
-
Privacy: You can control your online status, last seen, blue ticks, typing status, and view status with FM WhatsApp. You can also freeze your last seen, hide your online status, disable forwarded tag, and enable anti-delete messages and statuses.
-
Functionality: You can send up to 90 images at once and video files up to 700 MB with FM WhatsApp. You can also increase the quality of images and videos while sending them. Moreover, you can pin up to 100 chats on the app home screen, schedule messages, send broadcast messages to up to 600 contacts, and use multiple WhatsApp accounts on the same device.
-
-
How to Download and Install FM WhatsApp v9 60 on Your Android Device
-
If you want to download FM WhatsApp v9 60 on your Android device, you need to follow these steps:
-
-
First, you need to enable installation from unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on.
-
Next, you need to download the FM WhatsApp v9 60 APK file from a trusted source. You can use this link to download it directly.
-
After downloading the APK file, locate it in your file manager and tap on it to install it.
-
Once the installation is complete, open the app and agree to the terms and conditions.
-
Then, enter your phone number and verify it with an OTP code.
-
Finally, enter your name and profile picture and start using FM WhatsApp.
-
-
Risks and Precautions of Using FM WhatsApp Mod
-
While FM WhatsApp mod offers many benefits and features that are not available in the official app, it also comes with some risks and drawbacks that you should be aware of before using it. Here are some of the risks and precautions of using FM WhatsApp mod:
-
-
Data security: FM WhatsApp mod is not an official app and it is not endorsed by WhatsApp. Therefore, it may not have the same level of data encryption and protection as the official app. This means that your messages, media, and personal information may be vulnerable to hackers, malware, or third-party access. You should be careful about what you share and with whom you share on FM WhatsApp mod.
-
Updates and bugs: FM WhatsApp mod is not updated as frequently as the official app and it may not be compatible with the latest version of WhatsApp. This may cause some issues and glitches in the app performance and functionality. You may also miss out on some new features and improvements that are available in the official app. You should always check for updates and download them from a reliable source.
-
Legal issues: FM WhatsApp mod violates the terms and conditions of WhatsApp and it may be considered illegal in some countries. You may face legal action or penalties from WhatsApp or the authorities for using this mod. You should be aware of the laws and regulations of your country and respect them.
-
-
Conclusion: Summary and Recommendations
-
FM WhatsApp mod is a modified version of WhatsApp that offers many extra features and customization options that are not available in the official app. It allows you to change the appearance, privacy, and functionality of your WhatsApp app. It also lets you use two WhatsApp accounts on the same device, view deleted messages and statuses, send large files and media, and much more.
-
download fm whatsapp v9 60 apk
-download fm whatsapp v9 60 latest version
-download fm whatsapp v9 60 for android
-download fm whatsapp v9 60 mod apk
-download fm whatsapp v9 60 update
-download fm whatsapp v9 60 free
-download fm whatsapp v9 60 anti ban
-download fm whatsapp v9 60 new features
-download fm whatsapp v9 60 official website
-download fm whatsapp v9 60 from reddit
-download fm whatsapp v9 60 with stickers
-download fm whatsapp v9 60 without losing chats
-download fm whatsapp v9 60 direct link
-download fm whatsapp v9 60 online
-download fm whatsapp v9 60 for pc
-download fm whatsapp v9 60 for ios
-download fm whatsapp v9 60 pro apk
-download fm whatsapp v9 60 premium apk
-download fm whatsapp v9 60 cracked apk
-download fm whatsapp v9 60 unlocked apk
-download fm whatsapp v9 60 no ads apk
-download fm whatsapp v9 60 full version apk
-download fm whatsapp v9 60 beta version apk
-download fm whatsapp v9 60 old version apk
-download fm whatsapp v9 60 mirror link apk
-download fm whatsapp v9 60 alternative apk
-download fm whatsapp v9 60 review
-download fm whatsapp v9 60 guide
-download fm whatsapp v9 60 tutorial
-download fm whatsapp v9 60 tips and tricks
-download fm whatsapp v9 60 faq
-download fm whatsapp v9 60 how to install
-download fm whatsapp v9 60 how to use
-download fm whatsapp v9 60 how to update
-download fm whatsapp v9 60 how to backup
-download fm whatsapp v9 60 how to restore
-download fm whatsapp v9 60 how to customize
-download fm whatsapp v9 60 how to enable dark mode
-download fm whatsapp v9 60 how to hide online status
-download fm whatsapp v9 60 how to lock chats
-download fm whatsapp v9 60 comparison with other mods
-download fm whatsapp v9 60 advantages and disadvantages
-download fm whatsapp v9 60 pros and cons
-download fm whatsapp v9 60 benefits and drawbacks
-download fm whatsapp v9 60 risks and challenges
-download fm whatsapp v9 60 security and privacy issues
-download fm whatsapp v9 60 legal and ethical implications
-
However, FM WhatsApp mod also comes with some risks and drawbacks that you should be aware of before using it. It may not have the same level of data security and protection as the official app, it may not be updated as frequently and may have some bugs, and it may violate the terms and conditions of WhatsApp and the laws of your country.
-
Therefore, we recommend that you use FM WhatsApp mod at your own risk and discretion. You should always backup your data before installing this mod, download it from a trusted source, check for updates regularly, and be careful about what you share and with whom you share on this mod. You should also respect the rights and privacy of other WhatsApp users and do not use this mod for any illegal or unethical purposes.
-
FAQs: Five Common Questions and Answers About FM WhatsApp
-
Here are some of the most common questions and answers about FM WhatsApp that you may find helpful:
-
-
Question
Answer
-
Is FM WhatsApp safe to use?
FM WhatsApp is not an official app and it is not endorsed by WhatsApp. Therefore, it may not have the same level of data security and protection as the official app. Your messages, media, and personal information may be vulnerable to hackers, malware, or third-party access. You should be careful about what you share and with whom you share on FM WhatsApp.
-
Is FM WhatsApp free to use?
Yes, FM WhatsApp is free to use. However, you may see some ads or pop-ups in the app that support the developer. You can also donate to the developer if you want to support his work.
-
How can I update FM WhatsApp?
You can check for updates in the app settings or visit the official website of FM WhatsApp to download the latest version. You should always update your app from a reliable source to avoid any issues or malware.
-
Can I use FM WhatsApp with the official app?
Yes, you can use FM WhatsApp with the official app on the same device. However, you need to use different phone numbers for each app. You can also clone your official app with FM WhatsApp using the dual space feature.
-
Can I restore my chats from the official app to FM WhatsApp?
Yes, you can restore your chats from the official app to FM WhatsApp. To do this, you need to backup your chats in the official app using Google Drive or local storage. Then, install FM WhatsApp on your device and verify your phone number. After that, you will see an option to restore your chats from Google Drive or local storage. Choose the option that suits you and wait for the restoration process to complete.
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Arturia Presets Floyd Tribute MAC.md b/spaces/tioseFevbu/cartoon-converter/scripts/Arturia Presets Floyd Tribute MAC.md
deleted file mode 100644
index e6eb671135a6cf2d9811d757918bf31cad7473a4..0000000000000000000000000000000000000000
--- a/spaces/tioseFevbu/cartoon-converter/scripts/Arturia Presets Floyd Tribute MAC.md
+++ /dev/null
@@ -1,14 +0,0 @@
-
-
How to get the Pink Floyd sound with Arturia Presets Floyd Tribute MAC
-
If you are a fan of Pink Floyd and want to recreate their iconic sounds on your Mac, you might be interested in the Arturia Presets Floyd Tribute MAC sound banks. These are collections of presets for Arturia's Analog Lab software that emulate the original instruments used by the legendary band.
-
Arturia Presets Floyd Tribute MAC consists of two sound banks: Floyd Tribute and Floyd Tribute II. Each sound bank contains 32 presets that cover a wide range of Pink Floyd's songs, from The Dark Side of the Moon to The Wall. You can find sounds like the synth lead from Shine On You Crazy Diamond, the organ from Echoes, the guitar from Comfortably Numb, and many more.
To use these sound banks, you need to have Arturia's Analog Lab software installed on your Mac. Analog Lab is a powerful and versatile software that gives you access to thousands of sounds from Arturia's award-winning virtual instruments. You can edit the presets in Analog Lab or in the individual instruments if you own them.
-
The Arturia Presets Floyd Tribute MAC sound banks are protected by the Arturia Software Center, which is a simple and easy way to activate and manage your Arturia products. You can learn more about it here[^1^].
-
If you want to get the Pink Floyd sound with Arturia Presets Floyd Tribute MAC, you can purchase them from Arturia's website for US$ 7.99 (Floyd Tribute) and US$ 9.99 (Floyd Tribute II). You can also listen to some audio demos and read more details about each preset here[^1^] [^2^].
-
Arturia Presets Floyd Tribute MAC is a great way to explore and enjoy the classic sounds of Pink Floyd on your Mac. Whether you want to play along with your favorite tracks, create your own compositions, or just have fun with some legendary sounds, these sound banks will give you what you need.
One of the advantages of using Arturia Presets Floyd Tribute MAC is that you can customize the sounds to your liking. You can tweak the parameters of each preset in Analog Lab, such as the filter, envelope, effects, and more. You can also layer up to four presets together and create your own combinations. This way, you can create your own unique sounds inspired by Pink Floyd.
-
-
Another benefit of using Arturia Presets Floyd Tribute MAC is that you can learn from the masters. By studying how each preset was made, you can gain insight into how Pink Floyd achieved their signature sounds. You can also see which instruments they used and how they set them up. This can help you improve your own sound design skills and musical knowledge.
-
Finally, using Arturia Presets Floyd Tribute MAC is a lot of fun. You can play with some of the most iconic sounds in rock history and feel like you are part of the band. You can also experiment with different sounds and genres and discover new possibilities. You can even use these presets for other purposes, such as film scoring, ambient music, or sound effects.
7196e7f11a
-
-
\ No newline at end of file
diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Dreyer - Schmitt - Practice Grammar Of German [rutracker.org].pdf BEST.md b/spaces/tioseFevbu/cartoon-converter/scripts/Dreyer - Schmitt - Practice Grammar Of German [rutracker.org].pdf BEST.md
deleted file mode 100644
index bd8448c7c1af528145254d2f8b165186fea8bfb4..0000000000000000000000000000000000000000
--- a/spaces/tioseFevbu/cartoon-converter/scripts/Dreyer - Schmitt - Practice Grammar Of German [rutracker.org].pdf BEST.md
+++ /dev/null
@@ -1,37 +0,0 @@
-
-
How to Learn German Grammar with Dreyer - Schmitt - Practice Grammar of German [rutracker.org].pdf
-
-
If you are looking for a comprehensive and practical guide to learn German grammar, you might want to check out Dreyer - Schmitt - Practice Grammar of German [rutracker.org].pdf. This is a PDF version of a popular grammar book that was first published in 1985 and has been revised and updated several times since then.
-
-
Dreyer - Schmitt - Practice Grammar of German [rutracker.org].pdf is suitable for lower-intermediate and intermediate learners of German who want to acquire a solid and coherent knowledge of German grammar. It offers simple, sometimes simplified rules with extensive examples, lists and tables for reference, and a large number of exercises. The exercises are not made up of single, individual sentences, but constitute one continuous text, which makes them more realistic and meaningful. The book also comes with a separate key to the exercises, which enables learners working on their own to check their answers.
-
Dreyer - Schmitt - Practice Grammar of German [rutracker.org].pdf
The book is clearly structured into five parts: Part I and II deal with the parts of the simple sentence, Part III with the adjective declensions, Part IV with the subjunctive, and Part V with the use of prepositions. The book also has a fold-out section for quick reference, with the most important features of verb conjugation and noun and adjective declension. The terminology used in the book corresponds to that generally used in German as a foreign language teaching today.
-
-
Dreyer - Schmitt - Practice Grammar of German [rutracker.org].pdf is available for download from RuTracker.org, a Russian torrent tracker that hosts a large collection of educational materials in various languages. You can find the link to the PDF file in the web search results below. However, please note that downloading copyrighted materials from torrent sites may be illegal in your country, so proceed at your own risk.
-
-
Dreyer - Schmitt - Practice Grammar of German [rutracker.org].pdf is a great resource for anyone who wants to improve their German grammar skills in a systematic and practical way. It can be used as a self-study material or as a supplement to other courses or textbooks. By following the rules and doing the exercises in this book, you will be able to master the grammar of one of the most beautiful and complex languages in the world.
-
-
How can you get the most out of Dreyer - Schmitt - Practice Grammar of German [rutracker.org].pdf? Here are some tips and tricks to help you learn German grammar effectively with this book:
-
-
-
Start with the topics that are most relevant and challenging for you. You don't have to follow the order of the book, but you can choose the chapters that suit your needs and interests.
-
Read the explanations carefully and compare them with your own language. Try to understand the logic and the patterns behind the grammar rules, and not just memorize them.
-
Do the exercises regularly and check your answers with the key. If you make mistakes, go back to the explanations and review them until you grasp the concept.
-
Use additional resources to supplement your learning. For example, you can watch German videos on FluentU to see how native speakers use grammar in real-life situations. You can also use apps like Anki or Memrise to create flashcards and review vocabulary and grammar points.
-
Practice what you learn by using it in communication. You can write sentences or paragraphs using the grammar structures you learned, or speak them aloud to yourself or a partner. You can also join online language exchanges or communities to practice with native speakers or other learners.
-
-
-
Dreyer - Schmitt - Practice Grammar of German [rutracker.org].pdf is not the only German grammar book out there. There are many other options that you can explore, depending on your level, goals, and preferences. Here are some of the best German grammar books that you can find online or in bookstores:
-
-
-
English Grammar for Students of German: This book is ideal for beginners who want to learn German grammar by comparing it with English grammar. It explains each grammar point in simple English and gives examples in both languages.
-
Complete German Grammar: This book is perfect for intermediate learners who want to practice their grammar skills with exercises and quizzes. It covers all the essential grammar topics and provides clear explanations and examples.
-
German Quickly: A Grammar for Reading German: This book is designed for advanced learners who want to read German texts with ease and confidence. It focuses on the grammar aspects that are most relevant for reading comprehension and offers exercises based on authentic texts.
-
Sprichst Du Deutsch? German Vocabulary: This book is a great companion for any grammar book, as it helps you expand your vocabulary and learn how to use it correctly. It covers various topics and themes, such as family, hobbies, travel, work, etc., and provides exercises and games to practice.
-
German: How to Speak and Write It: This book is a classic that has been used by generations of learners. It combines grammar lessons with dialogues, stories, poems, and songs that illustrate how German is used in real life. It also includes cultural notes and tips on pronunciation and spelling.
-
-
-
With these books and Dreyer - Schmitt - Practice Grammar of German [rutracker.org].pdf, you have everything you need to master German grammar and achieve fluency in the language. Happy learning!
81aa517590
-
-
\ No newline at end of file
diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/spinner.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/spinner.py
deleted file mode 100644
index 0879088e14c2af9224a6cde62d220539ba9c34e1..0000000000000000000000000000000000000000
--- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/spinner.py
+++ /dev/null
@@ -1,136 +0,0 @@
-from typing import cast, List, Optional, TYPE_CHECKING, Union
-
-from ._spinners import SPINNERS
-from .measure import Measurement
-from .table import Table
-from .text import Text
-
-if TYPE_CHECKING:
- from .console import Console, ConsoleOptions, RenderResult, RenderableType
- from .style import StyleType
-
-
-class Spinner:
- def __init__(
- self,
- name: str,
- text: "RenderableType" = "",
- *,
- style: Optional["StyleType"] = None,
- speed: float = 1.0,
- ) -> None:
- """A spinner animation.
-
- Args:
- name (str): Name of spinner (run python -m rich.spinner).
- text (RenderableType, optional): A renderable to display at the right of the spinner (str or Text typically). Defaults to "".
- style (StyleType, optional): Style for spinner animation. Defaults to None.
- speed (float, optional): Speed factor for animation. Defaults to 1.0.
-
- Raises:
- KeyError: If name isn't one of the supported spinner animations.
- """
- try:
- spinner = SPINNERS[name]
- except KeyError:
- raise KeyError(f"no spinner called {name!r}")
- self.text: "Union[RenderableType, Text]" = (
- Text.from_markup(text) if isinstance(text, str) else text
- )
- self.frames = cast(List[str], spinner["frames"])[:]
- self.interval = cast(float, spinner["interval"])
- self.start_time: Optional[float] = None
- self.style = style
- self.speed = speed
- self.frame_no_offset: float = 0.0
- self._update_speed = 0.0
-
- def __rich_console__(
- self, console: "Console", options: "ConsoleOptions"
- ) -> "RenderResult":
- yield self.render(console.get_time())
-
- def __rich_measure__(
- self, console: "Console", options: "ConsoleOptions"
- ) -> Measurement:
- text = self.render(0)
- return Measurement.get(console, options, text)
-
- def render(self, time: float) -> "RenderableType":
- """Render the spinner for a given time.
-
- Args:
- time (float): Time in seconds.
-
- Returns:
- RenderableType: A renderable containing animation frame.
- """
- if self.start_time is None:
- self.start_time = time
-
- frame_no = ((time - self.start_time) * self.speed) / (
- self.interval / 1000.0
- ) + self.frame_no_offset
- frame = Text(
- self.frames[int(frame_no) % len(self.frames)], style=self.style or ""
- )
-
- if self._update_speed:
- self.frame_no_offset = frame_no
- self.start_time = time
- self.speed = self._update_speed
- self._update_speed = 0.0
-
- if not self.text:
- return frame
- elif isinstance(self.text, (str, Text)):
- return Text.assemble(frame, " ", self.text)
- else:
- table = Table.grid(padding=1)
- table.add_row(frame, self.text)
- return table
-
- def update(
- self,
- *,
- text: "RenderableType" = "",
- style: Optional["StyleType"] = None,
- speed: Optional[float] = None,
- ) -> None:
- """Updates attributes of a spinner after it has been started.
-
- Args:
- text (RenderableType, optional): A renderable to display at the right of the spinner (str or Text typically). Defaults to "".
- style (StyleType, optional): Style for spinner animation. Defaults to None.
- speed (float, optional): Speed factor for animation. Defaults to None.
- """
- if text:
- self.text = Text.from_markup(text) if isinstance(text, str) else text
- if style:
- self.style = style
- if speed:
- self._update_speed = speed
-
-
-if __name__ == "__main__": # pragma: no cover
- from time import sleep
-
- from .columns import Columns
- from .panel import Panel
- from .live import Live
-
- all_spinners = Columns(
- [
- Spinner(spinner_name, text=Text(repr(spinner_name), style="green"))
- for spinner_name in sorted(SPINNERS.keys())
- ],
- column_first=True,
- expand=True,
- )
-
- with Live(
- Panel(all_spinners, title="Spinners", border_style="blue"),
- refresh_per_second=20,
- ) as live:
- while True:
- sleep(0.1)
diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/README.md b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/README.md
deleted file mode 100644
index b52e3067dba84e32dffe473a8496c9349d08ef8b..0000000000000000000000000000000000000000
--- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/README.md
+++ /dev/null
@@ -1,191 +0,0 @@
-This software was developed by the National Diet Library under contract to Morpho AI Solutions, Inc.
-This software is largely based on the following repositories.
-
-- [open-mmlab/mmdetection](https://github.com/open-mmlab/mmdetection)
-
-The following files are also based on [eqlv2](https://github.com/tztztztztz/eqlv2)
-
-- [mmdet/core/post_processing/bbox_nms.py](mmdet/core/post_processing/bbox_nms.py)
-- [mmdet/core/post_processing/merge_augs.py](mmdet/core/post_processing/merge_augs.py)
-- [mmdet/datasets/builder.py](mmdet/datasets/builder.py)
-- [mmdet/datasets/class_balance_dataset_wrapper.py](mmdet/datasets/class_balance_dataset_wrapper.py)
-- [mmdet/datasets/max_iter_dataset_wrapper.py](mmdet/datasets/max_iter_dataset_wrapper.py)
-- [mmdet/models/losses/eql.py](mmdet/models/losses/eql.py)
-- [mmdet/models/losses/eqlv2.py](mmdet/models/losses/eqlv2.py)
-- [mmdet/models/losses/group_softmax.py](mmdet/models/losses/group_softmax.py)
-
-The newly developed portion of this program is released by the National Diet Library under a CC BY 4.0 license. For more information, see [LICENSE](./LICENSE)
-.
-
-
-
-
-
-**News**: We released the technical report on [ArXiv](https://arxiv.org/abs/1906.07155).
-
-Documentation: https://mmdetection.readthedocs.io/
-
-## Introduction
-
-English | [简体中文](README_zh-CN.md)
-
-MMDetection is an open source object detection toolbox based on PyTorch. It is
-a part of the [OpenMMLab](https://openmmlab.com/) project.
-
-The master branch works with **PyTorch 1.3+**.
-The old v1.x branch works with PyTorch 1.1 to 1.4, but v2.0 is strongly recommended for faster speed, higher performance, better design and more friendly usage.
-
-
-
-### Major features
-
-- **Modular Design**
-
- We decompose the detection framework into different components and one can easily construct a customized object detection framework by combining different modules.
-
-- **Support of multiple frameworks out of box**
-
- The toolbox directly supports popular and contemporary detection frameworks, *e.g.* Faster RCNN, Mask RCNN, RetinaNet, etc.
-
-- **High efficiency**
-
- All basic bbox and mask operations run on GPUs. The training speed is faster than or comparable to other codebases, including [Detectron2](https://github.com/facebookresearch/detectron2), [maskrcnn-benchmark](https://github.com/facebookresearch/maskrcnn-benchmark) and [SimpleDet](https://github.com/TuSimple/simpledet).
-
-- **State of the art**
-
- The toolbox stems from the codebase developed by the *MMDet* team, who won [COCO Detection Challenge](http://cocodataset.org/#detection-leaderboard) in 2018, and we keep pushing it forward.
-
-Apart from MMDetection, we also released a library [mmcv](https://github.com/open-mmlab/mmcv) for computer vision research, which is heavily depended on by this toolbox.
-
-## License
-
-This project is released under the [Apache 2.0 license](LICENSE).
-
-## Changelog
-
-v2.11.0 was released in 01/04/2021.
-Please refer to [changelog.md](docs/changelog.md) for details and release history.
-A comparison between v1.x and v2.0 codebases can be found in [compatibility.md](docs/compatibility.md).
-
-## Benchmark and model zoo
-
-Results and models are available in the [model zoo](docs/model_zoo.md).
-
-Supported backbones:
-
-- [x] ResNet (CVPR'2016)
-- [x] ResNeXt (CVPR'2017)
-- [x] VGG (ICLR'2015)
-- [x] HRNet (CVPR'2019)
-- [x] RegNet (CVPR'2020)
-- [x] Res2Net (TPAMI'2020)
-- [x] ResNeSt (ArXiv'2020)
-
-Supported methods:
-
-- [x] [RPN (NeurIPS'2015)](configs/rpn)
-- [x] [Fast R-CNN (ICCV'2015)](configs/fast_rcnn)
-- [x] [Faster R-CNN (NeurIPS'2015)](configs/faster_rcnn)
-- [x] [Mask R-CNN (ICCV'2017)](configs/mask_rcnn)
-- [x] [Cascade R-CNN (CVPR'2018)](configs/cascade_rcnn)
-- [x] [Cascade Mask R-CNN (CVPR'2018)](configs/cascade_rcnn)
-- [x] [SSD (ECCV'2016)](configs/ssd)
-- [x] [RetinaNet (ICCV'2017)](configs/retinanet)
-- [x] [GHM (AAAI'2019)](configs/ghm)
-- [x] [Mask Scoring R-CNN (CVPR'2019)](configs/ms_rcnn)
-- [x] [Double-Head R-CNN (CVPR'2020)](configs/double_heads)
-- [x] [Hybrid Task Cascade (CVPR'2019)](configs/htc)
-- [x] [Libra R-CNN (CVPR'2019)](configs/libra_rcnn)
-- [x] [Guided Anchoring (CVPR'2019)](configs/guided_anchoring)
-- [x] [FCOS (ICCV'2019)](configs/fcos)
-- [x] [RepPoints (ICCV'2019)](configs/reppoints)
-- [x] [Foveabox (TIP'2020)](configs/foveabox)
-- [x] [FreeAnchor (NeurIPS'2019)](configs/free_anchor)
-- [x] [NAS-FPN (CVPR'2019)](configs/nas_fpn)
-- [x] [ATSS (CVPR'2020)](configs/atss)
-- [x] [FSAF (CVPR'2019)](configs/fsaf)
-- [x] [PAFPN (CVPR'2018)](configs/pafpn)
-- [x] [Dynamic R-CNN (ECCV'2020)](configs/dynamic_rcnn)
-- [x] [PointRend (CVPR'2020)](configs/point_rend)
-- [x] [CARAFE (ICCV'2019)](configs/carafe/README.md)
-- [x] [DCNv2 (CVPR'2019)](configs/dcn/README.md)
-- [x] [Group Normalization (ECCV'2018)](configs/gn/README.md)
-- [x] [Weight Standardization (ArXiv'2019)](configs/gn+ws/README.md)
-- [x] [OHEM (CVPR'2016)](configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py)
-- [x] [Soft-NMS (ICCV'2017)](configs/faster_rcnn/faster_rcnn_r50_fpn_soft_nms_1x_coco.py)
-- [x] [Generalized Attention (ICCV'2019)](configs/empirical_attention/README.md)
-- [x] [GCNet (ICCVW'2019)](configs/gcnet/README.md)
-- [x] [Mixed Precision (FP16) Training (ArXiv'2017)](configs/fp16/README.md)
-- [x] [InstaBoost (ICCV'2019)](configs/instaboost/README.md)
-- [x] [GRoIE (ICPR'2020)](configs/groie/README.md)
-- [x] [DetectoRS (ArXix'2020)](configs/detectors/README.md)
-- [x] [Generalized Focal Loss (NeurIPS'2020)](configs/gfl/README.md)
-- [x] [CornerNet (ECCV'2018)](configs/cornernet/README.md)
-- [x] [Side-Aware Boundary Localization (ECCV'2020)](configs/sabl/README.md)
-- [x] [YOLOv3 (ArXiv'2018)](configs/yolo/README.md)
-- [x] [PAA (ECCV'2020)](configs/paa/README.md)
-- [x] [YOLACT (ICCV'2019)](configs/yolact/README.md)
-- [x] [CentripetalNet (CVPR'2020)](configs/centripetalnet/README.md)
-- [x] [VFNet (ArXix'2020)](configs/vfnet/README.md)
-- [x] [DETR (ECCV'2020)](configs/detr/README.md)
-- [x] [Deformable DETR (ICLR'2021)](configs/deformable_detr/README.md)
-- [x] [CascadeRPN (NeurIPS'2019)](configs/cascade_rpn/README.md)
-- [x] [SCNet (AAAI'2021)](configs/scnet/README.md)
-- [x] [AutoAssign (ArXix'2020)](configs/autoassign/README.md)
-- [x] [YOLOF (CVPR'2021)](configs/yolof/README.md)
-
-
-Some other methods are also supported in [projects using MMDetection](./docs/projects.md).
-
-## Installation
-
-Please refer to [get_started.md](docs/get_started.md) for installation.
-
-## Getting Started
-
-Please see [get_started.md](docs/get_started.md) for the basic usage of MMDetection.
-We provide [colab tutorial](demo/MMDet_Tutorial.ipynb), and full guidance for quick run [with existing dataset](docs/1_exist_data_model.md) and [with new dataset](docs/2_new_data_model.md) for beginners.
-There are also tutorials for [finetuning models](docs/tutorials/finetune.md), [adding new dataset](docs/tutorials/new_dataset.md), [designing data pipeline](docs/tutorials/data_pipeline.md), [customizing models](docs/tutorials/customize_models.md), [customizing runtime settings](docs/tutorials/customize_runtime.md) and [useful tools](docs/useful_tools.md).
-
-Please refer to [FAQ](docs/faq.md) for frequently asked questions.
-
-## Contributing
-
-We appreciate all contributions to improve MMDetection. Please refer to [CONTRIBUTING.md](.github/CONTRIBUTING.md) for the contributing guideline.
-
-## Acknowledgement
-
-MMDetection is an open source project that is contributed by researchers and engineers from various colleges and companies. We appreciate all the contributors who implement their methods or add new features, as well as users who give valuable feedbacks.
-We wish that the toolbox and benchmark could serve the growing research community by providing a flexible toolkit to reimplement existing methods and develop their own new detectors.
-
-## Citation
-
-If you use this toolbox or benchmark in your research, please cite this project.
-
-```
-@article{mmdetection,
- title = {{MMDetection}: Open MMLab Detection Toolbox and Benchmark},
- author = {Chen, Kai and Wang, Jiaqi and Pang, Jiangmiao and Cao, Yuhang and
- Xiong, Yu and Li, Xiaoxiao and Sun, Shuyang and Feng, Wansen and
- Liu, Ziwei and Xu, Jiarui and Zhang, Zheng and Cheng, Dazhi and
- Zhu, Chenchen and Cheng, Tianheng and Zhao, Qijie and Li, Buyu and
- Lu, Xin and Zhu, Rui and Wu, Yue and Dai, Jifeng and Wang, Jingdong
- and Shi, Jianping and Ouyang, Wanli and Loy, Chen Change and Lin, Dahua},
- journal= {arXiv preprint arXiv:1906.07155},
- year={2019}
-}
-```
-
-## Projects in OpenMMLab
-
-- [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab foundational library for computer vision.
-- [MMClassification](https://github.com/open-mmlab/mmclassification): OpenMMLab image classification toolbox and benchmark.
-- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab detection toolbox and benchmark.
-- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab's next-generation platform for general 3D object detection.
-- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab semantic segmentation toolbox and benchmark.
-- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab's next-generation action understanding toolbox and benchmark.
-- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab video perception toolbox and benchmark.
-- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab pose estimation toolbox and benchmark.
-- [MMEditing](https://github.com/open-mmlab/mmediting): OpenMMLab image and video editing toolbox.
-- [MMOCR](https://github.com/open-mmlab/mmocr): A Comprehensive Toolbox for Text Detection, Recognition and Understanding.
-- [MMGeneration](https://github.com/open-mmlab/mmgeneration): OpenMMLab image and video generative models toolbox.
diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/docs/useful_tools.md b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/docs/useful_tools.md
deleted file mode 100644
index 25fcf6a97ce07996088e782e3a353146f4f3f357..0000000000000000000000000000000000000000
--- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/docs/useful_tools.md
+++ /dev/null
@@ -1,384 +0,0 @@
-Apart from training/testing scripts, We provide lots of useful tools under the
- `tools/` directory.
-
-## Log Analysis
-
-`tools/analysis_tools/analyze_logs.py` plots loss/mAP curves given a training
- log file. Run `pip install seaborn` first to install the dependency.
-
- ```shell
-python tools/analysis_tools/analyze_logs.py plot_curve [--keys ${KEYS}] [--title ${TITLE}] [--legend ${LEGEND}] [--backend ${BACKEND}] [--style ${STYLE}] [--out ${OUT_FILE}]
- ```
-
-
-
-Examples:
-
-- Plot the classification loss of some run.
-
- ```shell
- python tools/analysis_tools/analyze_logs.py plot_curve log.json --keys loss_cls --legend loss_cls
- ```
-
-- Plot the classification and regression loss of some run, and save the figure to a pdf.
-
- ```shell
- python tools/analysis_tools/analyze_logs.py plot_curve log.json --keys loss_cls loss_bbox --out losses.pdf
- ```
-
-- Compare the bbox mAP of two runs in the same figure.
-
- ```shell
- python tools/analysis_tools/analyze_logs.py plot_curve log1.json log2.json --keys bbox_mAP --legend run1 run2
- ```
-
-- Compute the average training speed.
-
- ```shell
- python tools/analysis_tools/analyze_logs.py cal_train_time log.json [--include-outliers]
- ```
-
- The output is expected to be like the following.
-
- ```text
- -----Analyze train time of work_dirs/some_exp/20190611_192040.log.json-----
- slowest epoch 11, average time is 1.2024
- fastest epoch 1, average time is 1.1909
- time std over epochs is 0.0028
- average iter time: 1.1959 s/iter
- ```
-
-## Result Analysis
-
-`tools/analysis_tools/analyze_results.py` calculates single image mAP and saves or shows the topk images with the highest and lowest scores based on prediction results.
-
-**Usage**
-
-```shell
-python tools/analysis_tools/analyze_results.py \
- ${CONFIG} \
- ${PREDICTION_PATH} \
- ${SHOW_DIR} \
- [--show] \
- [--wait-time ${WAIT_TIME}] \
- [--topk ${TOPK}] \
- [--show-score-thr ${SHOW_SCORE_THR}] \
- [--cfg-options ${CFG_OPTIONS}]
-```
-
-Description of all arguments:
-
-- `config` : The path of a model config file.
-- `prediction_path`: Output result file in pickle format from `tools/test.py`
-- `show_dir`: Directory where painted GT and detection images will be saved
-- `--show`:Determines whether to show painted images, If not specified, it will be set to `False`
-- `--wait-time`: The interval of show (s), 0 is block
-- `--topk`: The number of saved images that have the highest and lowest `topk` scores after sorting. If not specified, it will be set to `20`.
-- `--show-score-thr`: Show score threshold. If not specified, it will be set to `0`.
-- `--cfg-options`: If specified, the key-value pair optional cfg will be merged into config file
-
-**Examples**:
-
-Assume that you have got result file in pickle format from `tools/test.py` in the path './result.pkl'.
-
-1. Test Faster R-CNN and visualize the results, save images to the directory `results/`
-
-```shell
-python tools/analysis_tools/analyze_results.py \
- configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \
- result.pkl \
- results \
- --show
-```
-
-2. Test Faster R-CNN and specified topk to 50, save images to the directory `results/`
-
-```shell
-python tools/analysis_tools/analyze_results.py \
- configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \
- result.pkl \
- results \
- --topk 50
-```
-
-3. If you want to filter the low score prediction results, you can specify the `show-score-thr` parameter
-
-```shell
-python tools/analysis_tools/analyze_results.py \
- configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \
- result.pkl \
- results \
- --show-score-thr 0.3
-```
-
-## Visualization
-
-### Visualize Datasets
-
-`tools/misc/browse_dataset.py` helps the user to browse a detection dataset (both
- images and bounding box annotations) visually, or save the image to a
- designated directory.
-
-```shell
-python tools/misc/browse_dataset.py ${CONFIG} [-h] [--skip-type ${SKIP_TYPE[SKIP_TYPE...]}] [--output-dir ${OUTPUT_DIR}] [--not-show] [--show-interval ${SHOW_INTERVAL}]
-```
-
-### Visualize Models
-
-First, convert the model to ONNX as described
-[here](#convert-mmdetection-model-to-onnx-experimental).
-Note that currently only RetinaNet is supported, support for other models
- will be coming in later versions.
-The converted model could be visualized by tools like [Netron](https://github.com/lutzroeder/netron).
-
-### Visualize Predictions
-
-If you need a lightweight GUI for visualizing the detection results, you can refer [DetVisGUI project](https://github.com/Chien-Hung/DetVisGUI/tree/mmdetection).
-
-## Error Analysis
-
-`tools/analysis_tools/coco_error_analysis.py` analyzes COCO results per category and by
- different criterion. It can also make a plot to provide useful information.
-
-```shell
-python tools/analysis_tools/coco_error_analysis.py ${RESULT} ${OUT_DIR} [-h] [--ann ${ANN}] [--types ${TYPES[TYPES...]}]
-```
-
-Example:
-
-Assume that you have got [Mask R-CNN checkpoint file](http://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth) in the path 'checkpoint'. For other checkpoints, please refer to our [model zoo](./model_zoo.md). You can use the following command to get the results bbox and segmentation json file.
-
-```shell
-# out: results.bbox.json and results.segm.json
-python tools/test.py \
- configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py \
- checkpoint/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth \
- --format-only \
- --options "jsonfile_prefix=./results"
-```
-
-1. Get COCO bbox error results per category , save analyze result images to the directory `results/`
-
-```shell
-python tools/analysis_tools/coco_error_analysis.py \
- results.bbox.json \
- results \
- --ann=data/coco/annotations/instances_val2017.json \
-```
-
-2. Get COCO segmentation error results per category , save analyze result images to the directory `results/`
-
-```shell
-python tools/analysis_tools/coco_error_analysis.py \
- results.segm.json \
- results \
- --ann=data/coco/annotations/instances_val2017.json \
- --types='segm'
-```
-
-## Model Serving
-
-In order to serve an `MMDetection` model with [`TorchServe`](https://pytorch.org/serve/), you can follow the steps:
-
-### 1. Convert model from MMDetection to TorchServe
-
-```shell
-python tools/deployment/mmdet2torchserve.py ${CONFIG_FILE} ${CHECKPOINT_FILE} \
---output-folder ${MODEL_STORE} \
---model-name ${MODEL_NAME}
-```
-
-***Note**: ${MODEL_STORE} needs to be an absolute path to a folder.
-
-### 2. Build `mmdet-serve` docker image
-
-```shell
-docker build -t mmdet-serve:latest docker/serve/
-```
-
-### 3. Run `mmdet-serve`
-
-Check the official docs for [running TorchServe with docker](https://github.com/pytorch/serve/blob/master/docker/README.md#running-torchserve-in-a-production-docker-environment).
-
-In order to run in GPU, you need to install [nvidia-docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). You can omit the `--gpus` argument in order to run in CPU.
-
-Example:
-
-```shell
-docker run --rm \
---cpus 8 \
---gpus device=0 \
--p8080:8080 -p8081:8081 -p8082:8082 \
---mount type=bind,source=$MODEL_STORE,target=/home/model-server/model-store \
-mmdet-serve:latest
-```
-
-[Read the docs](https://github.com/pytorch/serve/blob/072f5d088cce9bb64b2a18af065886c9b01b317b/docs/rest_api.md) about the Inference (8080), Management (8081) and Metrics (8082) APis
-
-### 4. Test deployment
-
-```shell
-curl -O curl -O https://raw.githubusercontent.com/pytorch/serve/master/docs/images/3dogs.jpg
-curl http://127.0.0.1:8080/predictions/${MODEL_NAME} -T 3dogs.jpg
-```
-
-You should obtain a respose similar to:
-
-```json
-[
- {
- "dog": [
- 402.9117736816406,
- 124.19664001464844,
- 571.7910766601562,
- 292.6463623046875
- ],
- "score": 0.9561963081359863
- },
- {
- "dog": [
- 293.90057373046875,
- 196.2908477783203,
- 417.4869079589844,
- 286.2522277832031
- ],
- "score": 0.9179860353469849
- },
- {
- "dog": [
- 202.178466796875,
- 86.3709487915039,
- 311.9863586425781,
- 276.28411865234375
- ],
- "score": 0.8933767080307007
- }
-]
-```
-
-## Model Complexity
-
-`tools/analysis_tools/get_flops.py` is a script adapted from [flops-counter.pytorch](https://github.com/sovrasov/flops-counter.pytorch) to compute the FLOPs and params of a given model.
-
-```shell
-python tools/analysis_tools/get_flops.py ${CONFIG_FILE} [--shape ${INPUT_SHAPE}]
-```
-
-You will get the results like this.
-
-```text
-==============================
-Input shape: (3, 1280, 800)
-Flops: 239.32 GFLOPs
-Params: 37.74 M
-==============================
-```
-
-**Note**: This tool is still experimental and we do not guarantee that the
- number is absolutely correct. You may well use the result for simple
- comparisons, but double check it before you adopt it in technical reports or papers.
-
-1. FLOPs are related to the input shape while parameters are not. The default
- input shape is (1, 3, 1280, 800).
-2. Some operators are not counted into FLOPs like GN and custom operators. Refer to [`mmcv.cnn.get_model_complexity_info()`](https://github.com/open-mmlab/mmcv/blob/master/mmcv/cnn/utils/flops_counter.py) for details.
-3. The FLOPs of two-stage detectors is dependent on the number of proposals.
-
-## Model conversion
-
-### MMDetection model to ONNX (experimental)
-
-We provide a script to convert model to [ONNX](https://github.com/onnx/onnx) format. We also support comparing the output results between Pytorch and ONNX model for verification.
-
-```shell
-python tools/deployment/pytorch2onnx.py ${CONFIG_FILE} ${CHECKPOINT_FILE} --output_file ${ONNX_FILE} [--shape ${INPUT_SHAPE} --verify]
-```
-
-**Note**: This tool is still experimental. Some customized operators are not supported for now. For a detailed description of the usage and the list of supported models, please refer to [pytorch2onnx](tutorials/pytorch2onnx.md).
-
-### MMDetection 1.x model to MMDetection 2.x
-
-`tools/model_converters/upgrade_model_version.py` upgrades a previous MMDetection checkpoint
- to the new version. Note that this script is not guaranteed to work as some
- breaking changes are introduced in the new version. It is recommended to
- directly use the new checkpoints.
-
-```shell
-python tools/model_converters/upgrade_model_version.py ${IN_FILE} ${OUT_FILE} [-h] [--num-classes NUM_CLASSES]
-```
-
-### RegNet model to MMDetection
-
-`tools/model_converters/regnet2mmdet.py` convert keys in pycls pretrained RegNet models to
- MMDetection style.
-
-```shell
-python tools/model_converters/regnet2mmdet.py ${SRC} ${DST} [-h]
-```
-
-### Detectron ResNet to Pytorch
-
-`tools/model_converters/detectron2pytorch.py` converts keys in the original detectron pretrained
- ResNet models to PyTorch style.
-
-```shell
-python tools/model_converters/detectron2pytorch.py ${SRC} ${DST} ${DEPTH} [-h]
-```
-
-### Prepare a model for publishing
-
-`tools/model_converters/publish_model.py` helps users to prepare their model for publishing.
-
-Before you upload a model to AWS, you may want to
-
-1. convert model weights to CPU tensors
-2. delete the optimizer states and
-3. compute the hash of the checkpoint file and append the hash id to the
- filename.
-
-```shell
-python tools/model_converters/publish_model.py ${INPUT_FILENAME} ${OUTPUT_FILENAME}
-```
-
-E.g.,
-
-```shell
-python tools/model_converters/publish_model.py work_dirs/faster_rcnn/latest.pth faster_rcnn_r50_fpn_1x_20190801.pth
-```
-
-The final output filename will be `faster_rcnn_r50_fpn_1x_20190801-{hash id}.pth`.
-
-## Dataset Conversion
-
-`tools/data_converters/` contains tools to convert the Cityscapes dataset
- and Pascal VOC dataset to the COCO format.
-
-```shell
-python tools/dataset_converters/cityscapes.py ${CITYSCAPES_PATH} [-h] [--img-dir ${IMG_DIR}] [--gt-dir ${GT_DIR}] [-o ${OUT_DIR}] [--nproc ${NPROC}]
-python tools/dataset_converters/pascal_voc.py ${DEVKIT_PATH} [-h] [-o ${OUT_DIR}]
-```
-
-## Robust Detection Benchmark
-
-`tools/analysis_tools/test_robustness.py` and`tools/analysis_tools/robustness_eval.py` helps users to evaluate model robustness. The core idea comes from [Benchmarking Robustness in Object Detection: Autonomous Driving when Winter is Coming](https://arxiv.org/abs/1907.07484). For more information how to evaluate models on corrupted images and results for a set of standard models please refer to [robustness_benchmarking.md](robustness_benchmarking.md).
-
-## Miscellaneous
-
-### Evaluating a metric
-
-`tools/analysis_tools/eval_metric.py` evaluates certain metrics of a pkl result file
- according to a config file.
-
-```shell
-python tools/analysis_tools/eval_metric.py ${CONFIG} ${PKL_RESULTS} [-h] [--format-only] [--eval ${EVAL[EVAL ...]}]
- [--cfg-options ${CFG_OPTIONS [CFG_OPTIONS ...]}]
- [--eval-options ${EVAL_OPTIONS [EVAL_OPTIONS ...]}]
-```
-
-### Print the entire config
-
-`tools/misc/print_config.py` prints the whole config verbatim, expanding all its
- imports.
-
-```shell
-python tools/misc/print_config.py ${CONFIG} [-h] [--options ${OPTIONS [OPTIONS...]}]
-```
diff --git a/spaces/tonwuaso/SentimentAnalysisModel/README.md b/spaces/tonwuaso/SentimentAnalysisModel/README.md
deleted file mode 100644
index 8374b583829efe3310a1280c1c1fbd6d015ce97a..0000000000000000000000000000000000000000
--- a/spaces/tonwuaso/SentimentAnalysisModel/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: SentimentAnalysisModel
-emoji: 🏆
-colorFrom: pink
-colorTo: red
-sdk: gradio
-sdk_version: 3.32.0
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/trttung1610/musicgen/README.md b/spaces/trttung1610/musicgen/README.md
deleted file mode 100644
index 215eb424f4d2efd9d3295c0b6763b9f205b45c7d..0000000000000000000000000000000000000000
--- a/spaces/trttung1610/musicgen/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: AudioCraft Plus v2.0.0a (MusicGen + AudioGen)
-emoji: 🎶
-colorFrom: yellow
-colorTo: green
-sdk: gradio
-sdk_version: 3.39.0
-app_file: app.py
-pinned: true
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/tvt/Real-CUGAN/app.py b/spaces/tvt/Real-CUGAN/app.py
deleted file mode 100644
index 8694d698f10e3660a5107b7feec5e80e5a203a67..0000000000000000000000000000000000000000
--- a/spaces/tvt/Real-CUGAN/app.py
+++ /dev/null
@@ -1,64 +0,0 @@
-from upcunet_v3 import RealWaifuUpScaler
-import gradio as gr
-import time
-import logging
-import os
-from PIL import ImageOps
-import numpy as np
-import math
-
-
-def greet(input_img, input_model_name, input_tile_mode):
- # if input_img.size[0] * input_img.size[1] > 256 * 256:
- # y = int(math.sqrt(256*256/input_img.size[0]*input_img.size[1]))
- # x = int(input_img.size[0]/input_img.size[1]*y)
- # input_img = ImageOps.fit(input_img, (x, y))
- input_img = np.array(input_img)
- if input_model_name not in model_cache:
- t1 = time.time()
- upscaler = RealWaifuUpScaler(input_model_name[2], ModelPath + input_model_name, half=False, device="cpu")
- t2 = time.time()
- logger.info(f'load model time, {t2 - t1}')
- model_cache[input_model_name] = upscaler
- else:
- upscaler = model_cache[input_model_name]
- logger.info(f'load model from cache')
-
- start = time.time()
- result = upscaler(input_img, tile_mode=input_tile_mode)
- end = time.time()
- logger.info(f'input_model_name, {input_model_name}')
- logger.info(f'input_tile_mode, {input_tile_mode}')
- logger.info(f'input shape, {input_img.shape}')
- logger.info(f'output shape, {result.shape}')
- logger.info(f'speed time, {end - start}')
- return result
-
-
-if __name__ == '__main__':
- logging.basicConfig(level=logging.INFO, format="[%(asctime)s] [%(process)d] [%(levelname)s] %(message)s")
- logger = logging.getLogger()
-
- ModelPath = "weights_v3/"
- model_cache = {}
-
- input_model_name = gr.inputs.Dropdown(os.listdir(ModelPath), default="up2x-latest-denoise2x.pth", label='选择model')
- input_tile_mode = gr.inputs.Dropdown([0, 1, 2, 3, 4], default=2, label='选择tile_mode')
- input_img = gr.inputs.Image(label='image', type='pil')
-
- inputs = [input_img, input_model_name, input_tile_mode]
- outputs = "image"
- iface = gr.Interface(fn=greet,
- inputs=inputs,
- outputs=outputs,
- allow_screenshot=False,
- allow_flagging='never',
- examples=[['test-img.jpg', "up2x-latest-denoise2x.pth", 2]],
- article='[https://github.com/bilibili/ailab/tree/main/Real-CUGAN](https://github.com/bilibili/ailab/tree/main/Real-CUGAN) '
- '感谢b站开源的项目,图片过大会导致内存不足,所有我将图片裁剪小,想体验大图片的效果请自行前往上面的链接。 '
- '参数路径,可更换,最前面是超分倍率,过大会内存不足 '
- '降噪版(denoise):如果原片噪声多,压得烂,推荐使用;目前2倍模型支持了3个降噪等级 '
- '无降噪版(no-denoise):如果原片噪声不多,压得还行,但是想提高分辨率/清晰度/做通用性的增强、修复处理,推荐使用 '
- '保守版(conservative):如果你担心丢失纹理,担心画风被改变,担心颜色被增强,总之就是各种担心AI会留下浓重的处理痕迹,推荐使用该版本。 '
- 'tile越大,越省显存,速度越慢')
- iface.launch()
diff --git a/spaces/udion/BayesCap/src/networks_T1toT2.py b/spaces/udion/BayesCap/src/networks_T1toT2.py
deleted file mode 100644
index 0a4957071e817fb551bc1fc86fe1cc5dc4e75cfe..0000000000000000000000000000000000000000
--- a/spaces/udion/BayesCap/src/networks_T1toT2.py
+++ /dev/null
@@ -1,477 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-import functools
-
-### components
-class ResConv(nn.Module):
- """
- Residual convolutional block, where
- convolutional block consists: (convolution => [BN] => ReLU) * 3
- residual connection adds the input to the output
- """
- def __init__(self, in_channels, out_channels, mid_channels=None):
- super().__init__()
- if not mid_channels:
- mid_channels = out_channels
- self.double_conv = nn.Sequential(
- nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1),
- nn.BatchNorm2d(mid_channels),
- nn.ReLU(inplace=True),
- nn.Conv2d(mid_channels, mid_channels, kernel_size=3, padding=1),
- nn.BatchNorm2d(mid_channels),
- nn.ReLU(inplace=True),
- nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1),
- nn.BatchNorm2d(out_channels),
- nn.ReLU(inplace=True)
- )
- self.double_conv1 = nn.Sequential(
- nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
- nn.BatchNorm2d(out_channels),
- nn.ReLU(inplace=True),
- )
- def forward(self, x):
- x_in = self.double_conv1(x)
- x1 = self.double_conv(x)
- return self.double_conv(x) + x_in
-
-class Down(nn.Module):
- """Downscaling with maxpool then Resconv"""
- def __init__(self, in_channels, out_channels):
- super().__init__()
- self.maxpool_conv = nn.Sequential(
- nn.MaxPool2d(2),
- ResConv(in_channels, out_channels)
- )
- def forward(self, x):
- return self.maxpool_conv(x)
-
-class Up(nn.Module):
- """Upscaling then double conv"""
- def __init__(self, in_channels, out_channels, bilinear=True):
- super().__init__()
- # if bilinear, use the normal convolutions to reduce the number of channels
- if bilinear:
- self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
- self.conv = ResConv(in_channels, out_channels, in_channels // 2)
- else:
- self.up = nn.ConvTranspose2d(in_channels , in_channels // 2, kernel_size=2, stride=2)
- self.conv = ResConv(in_channels, out_channels)
- def forward(self, x1, x2):
- x1 = self.up(x1)
- # input is CHW
- diffY = x2.size()[2] - x1.size()[2]
- diffX = x2.size()[3] - x1.size()[3]
- x1 = F.pad(
- x1,
- [
- diffX // 2, diffX - diffX // 2,
- diffY // 2, diffY - diffY // 2
- ]
- )
- # if you have padding issues, see
- # https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
- # https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
- x = torch.cat([x2, x1], dim=1)
- return self.conv(x)
-
-class OutConv(nn.Module):
- def __init__(self, in_channels, out_channels):
- super(OutConv, self).__init__()
- self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
- def forward(self, x):
- # return F.relu(self.conv(x))
- return self.conv(x)
-
-##### The composite networks
-class UNet(nn.Module):
- def __init__(self, n_channels, out_channels, bilinear=True):
- super(UNet, self).__init__()
- self.n_channels = n_channels
- self.out_channels = out_channels
- self.bilinear = bilinear
- ####
- self.inc = ResConv(n_channels, 64)
- self.down1 = Down(64, 128)
- self.down2 = Down(128, 256)
- self.down3 = Down(256, 512)
- factor = 2 if bilinear else 1
- self.down4 = Down(512, 1024 // factor)
- self.up1 = Up(1024, 512 // factor, bilinear)
- self.up2 = Up(512, 256 // factor, bilinear)
- self.up3 = Up(256, 128 // factor, bilinear)
- self.up4 = Up(128, 64, bilinear)
- self.outc = OutConv(64, out_channels)
- def forward(self, x):
- x1 = self.inc(x)
- x2 = self.down1(x1)
- x3 = self.down2(x2)
- x4 = self.down3(x3)
- x5 = self.down4(x4)
- x = self.up1(x5, x4)
- x = self.up2(x, x3)
- x = self.up3(x, x2)
- x = self.up4(x, x1)
- y = self.outc(x)
- return y
-
-class CasUNet(nn.Module):
- def __init__(self, n_unet, io_channels, bilinear=True):
- super(CasUNet, self).__init__()
- self.n_unet = n_unet
- self.io_channels = io_channels
- self.bilinear = bilinear
- ####
- self.unet_list = nn.ModuleList()
- for i in range(self.n_unet):
- self.unet_list.append(UNet(self.io_channels, self.io_channels, self.bilinear))
- def forward(self, x, dop=None):
- y = x
- for i in range(self.n_unet):
- if i==0:
- if dop is not None:
- y = F.dropout2d(self.unet_list[i](y), p=dop)
- else:
- y = self.unet_list[i](y)
- else:
- y = self.unet_list[i](y+x)
- return y
-
-class CasUNet_2head(nn.Module):
- def __init__(self, n_unet, io_channels, bilinear=True):
- super(CasUNet_2head, self).__init__()
- self.n_unet = n_unet
- self.io_channels = io_channels
- self.bilinear = bilinear
- ####
- self.unet_list = nn.ModuleList()
- for i in range(self.n_unet):
- if i != self.n_unet-1:
- self.unet_list.append(UNet(self.io_channels, self.io_channels, self.bilinear))
- else:
- self.unet_list.append(UNet_2head(self.io_channels, self.io_channels, self.bilinear))
- def forward(self, x):
- y = x
- for i in range(self.n_unet):
- if i==0:
- y = self.unet_list[i](y)
- else:
- y = self.unet_list[i](y+x)
- y_mean, y_sigma = y[0], y[1]
- return y_mean, y_sigma
-
-class CasUNet_3head(nn.Module):
- def __init__(self, n_unet, io_channels, bilinear=True):
- super(CasUNet_3head, self).__init__()
- self.n_unet = n_unet
- self.io_channels = io_channels
- self.bilinear = bilinear
- ####
- self.unet_list = nn.ModuleList()
- for i in range(self.n_unet):
- if i != self.n_unet-1:
- self.unet_list.append(UNet(self.io_channels, self.io_channels, self.bilinear))
- else:
- self.unet_list.append(UNet_3head(self.io_channels, self.io_channels, self.bilinear))
- def forward(self, x):
- y = x
- for i in range(self.n_unet):
- if i==0:
- y = self.unet_list[i](y)
- else:
- y = self.unet_list[i](y+x)
- y_mean, y_alpha, y_beta = y[0], y[1], y[2]
- return y_mean, y_alpha, y_beta
-
-class UNet_2head(nn.Module):
- def __init__(self, n_channels, out_channels, bilinear=True):
- super(UNet_2head, self).__init__()
- self.n_channels = n_channels
- self.out_channels = out_channels
- self.bilinear = bilinear
- ####
- self.inc = ResConv(n_channels, 64)
- self.down1 = Down(64, 128)
- self.down2 = Down(128, 256)
- self.down3 = Down(256, 512)
- factor = 2 if bilinear else 1
- self.down4 = Down(512, 1024 // factor)
- self.up1 = Up(1024, 512 // factor, bilinear)
- self.up2 = Up(512, 256 // factor, bilinear)
- self.up3 = Up(256, 128 // factor, bilinear)
- self.up4 = Up(128, 64, bilinear)
- #per pixel multiple channels may exist
- self.out_mean = OutConv(64, out_channels)
- #variance will always be a single number for a pixel
- self.out_var = nn.Sequential(
- OutConv(64, 128),
- OutConv(128, 1),
- )
- def forward(self, x):
- x1 = self.inc(x)
- x2 = self.down1(x1)
- x3 = self.down2(x2)
- x4 = self.down3(x3)
- x5 = self.down4(x4)
- x = self.up1(x5, x4)
- x = self.up2(x, x3)
- x = self.up3(x, x2)
- x = self.up4(x, x1)
- y_mean, y_var = self.out_mean(x), self.out_var(x)
- return y_mean, y_var
-
-class UNet_3head(nn.Module):
- def __init__(self, n_channels, out_channels, bilinear=True):
- super(UNet_3head, self).__init__()
- self.n_channels = n_channels
- self.out_channels = out_channels
- self.bilinear = bilinear
- ####
- self.inc = ResConv(n_channels, 64)
- self.down1 = Down(64, 128)
- self.down2 = Down(128, 256)
- self.down3 = Down(256, 512)
- factor = 2 if bilinear else 1
- self.down4 = Down(512, 1024 // factor)
- self.up1 = Up(1024, 512 // factor, bilinear)
- self.up2 = Up(512, 256 // factor, bilinear)
- self.up3 = Up(256, 128 // factor, bilinear)
- self.up4 = Up(128, 64, bilinear)
- #per pixel multiple channels may exist
- self.out_mean = OutConv(64, out_channels)
- #variance will always be a single number for a pixel
- self.out_alpha = nn.Sequential(
- OutConv(64, 128),
- OutConv(128, 1),
- nn.ReLU()
- )
- self.out_beta = nn.Sequential(
- OutConv(64, 128),
- OutConv(128, 1),
- nn.ReLU()
- )
- def forward(self, x):
- x1 = self.inc(x)
- x2 = self.down1(x1)
- x3 = self.down2(x2)
- x4 = self.down3(x3)
- x5 = self.down4(x4)
- x = self.up1(x5, x4)
- x = self.up2(x, x3)
- x = self.up3(x, x2)
- x = self.up4(x, x1)
- y_mean, y_alpha, y_beta = self.out_mean(x), \
- self.out_alpha(x), self.out_beta(x)
- return y_mean, y_alpha, y_beta
-
-class ResidualBlock(nn.Module):
- def __init__(self, in_features):
- super(ResidualBlock, self).__init__()
- conv_block = [
- nn.ReflectionPad2d(1),
- nn.Conv2d(in_features, in_features, 3),
- nn.InstanceNorm2d(in_features),
- nn.ReLU(inplace=True),
- nn.ReflectionPad2d(1),
- nn.Conv2d(in_features, in_features, 3),
- nn.InstanceNorm2d(in_features)
- ]
- self.conv_block = nn.Sequential(*conv_block)
- def forward(self, x):
- return x + self.conv_block(x)
-
-class Generator(nn.Module):
- def __init__(self, input_nc, output_nc, n_residual_blocks=9):
- super(Generator, self).__init__()
- # Initial convolution block
- model = [
- nn.ReflectionPad2d(3), nn.Conv2d(input_nc, 64, 7),
- nn.InstanceNorm2d(64), nn.ReLU(inplace=True)
- ]
- # Downsampling
- in_features = 64
- out_features = in_features*2
- for _ in range(2):
- model += [
- nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
- nn.InstanceNorm2d(out_features),
- nn.ReLU(inplace=True)
- ]
- in_features = out_features
- out_features = in_features*2
- # Residual blocks
- for _ in range(n_residual_blocks):
- model += [ResidualBlock(in_features)]
- # Upsampling
- out_features = in_features//2
- for _ in range(2):
- model += [
- nn.ConvTranspose2d(in_features, out_features, 3, stride=2, padding=1, output_padding=1),
- nn.InstanceNorm2d(out_features),
- nn.ReLU(inplace=True)
- ]
- in_features = out_features
- out_features = in_features//2
- # Output layer
- model += [nn.ReflectionPad2d(3), nn.Conv2d(64, output_nc, 7), nn.Tanh()]
- self.model = nn.Sequential(*model)
- def forward(self, x):
- return self.model(x)
-
-
-class ResnetGenerator(nn.Module):
- """Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
- We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
- """
-
- def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):
- """Construct a Resnet-based generator
- Parameters:
- input_nc (int) -- the number of channels in input images
- output_nc (int) -- the number of channels in output images
- ngf (int) -- the number of filters in the last conv layer
- norm_layer -- normalization layer
- use_dropout (bool) -- if use dropout layers
- n_blocks (int) -- the number of ResNet blocks
- padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
- """
- assert(n_blocks >= 0)
- super(ResnetGenerator, self).__init__()
- if type(norm_layer) == functools.partial:
- use_bias = norm_layer.func == nn.InstanceNorm2d
- else:
- use_bias = norm_layer == nn.InstanceNorm2d
-
- model = [nn.ReflectionPad2d(3),
- nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
- norm_layer(ngf),
- nn.ReLU(True)]
-
- n_downsampling = 2
- for i in range(n_downsampling): # add downsampling layers
- mult = 2 ** i
- model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
- norm_layer(ngf * mult * 2),
- nn.ReLU(True)]
-
- mult = 2 ** n_downsampling
- for i in range(n_blocks): # add ResNet blocks
-
- model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
-
- for i in range(n_downsampling): # add upsampling layers
- mult = 2 ** (n_downsampling - i)
- model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
- kernel_size=3, stride=2,
- padding=1, output_padding=1,
- bias=use_bias),
- norm_layer(int(ngf * mult / 2)),
- nn.ReLU(True)]
- model += [nn.ReflectionPad2d(3)]
- model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
- model += [nn.Tanh()]
-
- self.model = nn.Sequential(*model)
-
- def forward(self, input):
- """Standard forward"""
- return self.model(input)
-
-
-class ResnetBlock(nn.Module):
- """Define a Resnet block"""
-
- def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
- """Initialize the Resnet block
- A resnet block is a conv block with skip connections
- We construct a conv block with build_conv_block function,
- and implement skip connections in function.
- Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
- """
- super(ResnetBlock, self).__init__()
- self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
-
- def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
- """Construct a convolutional block.
- Parameters:
- dim (int) -- the number of channels in the conv layer.
- padding_type (str) -- the name of padding layer: reflect | replicate | zero
- norm_layer -- normalization layer
- use_dropout (bool) -- if use dropout layers.
- use_bias (bool) -- if the conv layer uses bias or not
- Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
- """
- conv_block = []
- p = 0
- if padding_type == 'reflect':
- conv_block += [nn.ReflectionPad2d(1)]
- elif padding_type == 'replicate':
- conv_block += [nn.ReplicationPad2d(1)]
- elif padding_type == 'zero':
- p = 1
- else:
- raise NotImplementedError('padding [%s] is not implemented' % padding_type)
-
- conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
- if use_dropout:
- conv_block += [nn.Dropout(0.5)]
-
- p = 0
- if padding_type == 'reflect':
- conv_block += [nn.ReflectionPad2d(1)]
- elif padding_type == 'replicate':
- conv_block += [nn.ReplicationPad2d(1)]
- elif padding_type == 'zero':
- p = 1
- else:
- raise NotImplementedError('padding [%s] is not implemented' % padding_type)
- conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
-
- return nn.Sequential(*conv_block)
-
- def forward(self, x):
- """Forward function (with skip connections)"""
- out = x + self.conv_block(x) # add skip connections
- return out
-
-### discriminator
-class NLayerDiscriminator(nn.Module):
- """Defines a PatchGAN discriminator"""
- def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):
- """Construct a PatchGAN discriminator
- Parameters:
- input_nc (int) -- the number of channels in input images
- ndf (int) -- the number of filters in the last conv layer
- n_layers (int) -- the number of conv layers in the discriminator
- norm_layer -- normalization layer
- """
- super(NLayerDiscriminator, self).__init__()
- if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
- use_bias = norm_layer.func == nn.InstanceNorm2d
- else:
- use_bias = norm_layer == nn.InstanceNorm2d
- kw = 4
- padw = 1
- sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
- nf_mult = 1
- nf_mult_prev = 1
- for n in range(1, n_layers): # gradually increase the number of filters
- nf_mult_prev = nf_mult
- nf_mult = min(2 ** n, 8)
- sequence += [
- nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
- norm_layer(ndf * nf_mult),
- nn.LeakyReLU(0.2, True)
- ]
- nf_mult_prev = nf_mult
- nf_mult = min(2 ** n_layers, 8)
- sequence += [
- nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
- norm_layer(ndf * nf_mult),
- nn.LeakyReLU(0.2, True)
- ]
- sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
- self.model = nn.Sequential(*sequence)
- def forward(self, input):
- """Standard forward."""
- return self.model(input)
\ No newline at end of file
diff --git a/spaces/ulysses115/Nogizaka46-so/inference/slicer.py b/spaces/ulysses115/Nogizaka46-so/inference/slicer.py
deleted file mode 100644
index b05840bcf6bdced0b6e2adbecb1a1dd5b3dee462..0000000000000000000000000000000000000000
--- a/spaces/ulysses115/Nogizaka46-so/inference/slicer.py
+++ /dev/null
@@ -1,142 +0,0 @@
-import librosa
-import torch
-import torchaudio
-
-
-class Slicer:
- def __init__(self,
- sr: int,
- threshold: float = -40.,
- min_length: int = 5000,
- min_interval: int = 300,
- hop_size: int = 20,
- max_sil_kept: int = 5000):
- if not min_length >= min_interval >= hop_size:
- raise ValueError('The following condition must be satisfied: min_length >= min_interval >= hop_size')
- if not max_sil_kept >= hop_size:
- raise ValueError('The following condition must be satisfied: max_sil_kept >= hop_size')
- min_interval = sr * min_interval / 1000
- self.threshold = 10 ** (threshold / 20.)
- self.hop_size = round(sr * hop_size / 1000)
- self.win_size = min(round(min_interval), 4 * self.hop_size)
- self.min_length = round(sr * min_length / 1000 / self.hop_size)
- self.min_interval = round(min_interval / self.hop_size)
- self.max_sil_kept = round(sr * max_sil_kept / 1000 / self.hop_size)
-
- def _apply_slice(self, waveform, begin, end):
- if len(waveform.shape) > 1:
- return waveform[:, begin * self.hop_size: min(waveform.shape[1], end * self.hop_size)]
- else:
- return waveform[begin * self.hop_size: min(waveform.shape[0], end * self.hop_size)]
-
- # @timeit
- def slice(self, waveform):
- if len(waveform.shape) > 1:
- samples = librosa.to_mono(waveform)
- else:
- samples = waveform
- if samples.shape[0] <= self.min_length:
- return {"0": {"slice": False, "split_time": f"0,{len(waveform)}"}}
- rms_list = librosa.feature.rms(y=samples, frame_length=self.win_size, hop_length=self.hop_size).squeeze(0)
- sil_tags = []
- silence_start = None
- clip_start = 0
- for i, rms in enumerate(rms_list):
- # Keep looping while frame is silent.
- if rms < self.threshold:
- # Record start of silent frames.
- if silence_start is None:
- silence_start = i
- continue
- # Keep looping while frame is not silent and silence start has not been recorded.
- if silence_start is None:
- continue
- # Clear recorded silence start if interval is not enough or clip is too short
- is_leading_silence = silence_start == 0 and i > self.max_sil_kept
- need_slice_middle = i - silence_start >= self.min_interval and i - clip_start >= self.min_length
- if not is_leading_silence and not need_slice_middle:
- silence_start = None
- continue
- # Need slicing. Record the range of silent frames to be removed.
- if i - silence_start <= self.max_sil_kept:
- pos = rms_list[silence_start: i + 1].argmin() + silence_start
- if silence_start == 0:
- sil_tags.append((0, pos))
- else:
- sil_tags.append((pos, pos))
- clip_start = pos
- elif i - silence_start <= self.max_sil_kept * 2:
- pos = rms_list[i - self.max_sil_kept: silence_start + self.max_sil_kept + 1].argmin()
- pos += i - self.max_sil_kept
- pos_l = rms_list[silence_start: silence_start + self.max_sil_kept + 1].argmin() + silence_start
- pos_r = rms_list[i - self.max_sil_kept: i + 1].argmin() + i - self.max_sil_kept
- if silence_start == 0:
- sil_tags.append((0, pos_r))
- clip_start = pos_r
- else:
- sil_tags.append((min(pos_l, pos), max(pos_r, pos)))
- clip_start = max(pos_r, pos)
- else:
- pos_l = rms_list[silence_start: silence_start + self.max_sil_kept + 1].argmin() + silence_start
- pos_r = rms_list[i - self.max_sil_kept: i + 1].argmin() + i - self.max_sil_kept
- if silence_start == 0:
- sil_tags.append((0, pos_r))
- else:
- sil_tags.append((pos_l, pos_r))
- clip_start = pos_r
- silence_start = None
- # Deal with trailing silence.
- total_frames = rms_list.shape[0]
- if silence_start is not None and total_frames - silence_start >= self.min_interval:
- silence_end = min(total_frames, silence_start + self.max_sil_kept)
- pos = rms_list[silence_start: silence_end + 1].argmin() + silence_start
- sil_tags.append((pos, total_frames + 1))
- # Apply and return slices.
- if len(sil_tags) == 0:
- return {"0": {"slice": False, "split_time": f"0,{len(waveform)}"}}
- else:
- chunks = []
- # 第一段静音并非从头开始,补上有声片段
- if sil_tags[0][0]:
- chunks.append(
- {"slice": False, "split_time": f"0,{min(waveform.shape[0], sil_tags[0][0] * self.hop_size)}"})
- for i in range(0, len(sil_tags)):
- # 标识有声片段(跳过第一段)
- if i:
- chunks.append({"slice": False,
- "split_time": f"{sil_tags[i - 1][1] * self.hop_size},{min(waveform.shape[0], sil_tags[i][0] * self.hop_size)}"})
- # 标识所有静音片段
- chunks.append({"slice": True,
- "split_time": f"{sil_tags[i][0] * self.hop_size},{min(waveform.shape[0], sil_tags[i][1] * self.hop_size)}"})
- # 最后一段静音并非结尾,补上结尾片段
- if sil_tags[-1][1] * self.hop_size < len(waveform):
- chunks.append({"slice": False, "split_time": f"{sil_tags[-1][1] * self.hop_size},{len(waveform)}"})
- chunk_dict = {}
- for i in range(len(chunks)):
- chunk_dict[str(i)] = chunks[i]
- return chunk_dict
-
-
-def cut(audio_path, db_thresh=-30, min_len=5000):
- audio, sr = librosa.load(audio_path, sr=None)
- slicer = Slicer(
- sr=sr,
- threshold=db_thresh,
- min_length=min_len
- )
- chunks = slicer.slice(audio)
- return chunks
-
-
-def chunks2audio(audio_path, chunks):
- chunks = dict(chunks)
- audio, sr = torchaudio.load(audio_path)
- if len(audio.shape) == 2 and audio.shape[1] >= 2:
- audio = torch.mean(audio, dim=0).unsqueeze(0)
- audio = audio.cpu().numpy()[0]
- result = []
- for k, v in chunks.items():
- tag = v["split_time"].split(",")
- if tag[0] != tag[1]:
- result.append((v["slice"], audio[int(tag[0]):int(tag[1])]))
- return result, sr
diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Autodwg Dwf To Dwg Converter Pro 2015 Crack PATCHED.md b/spaces/usbethFlerru/sovits-modelsV2/example/Autodwg Dwf To Dwg Converter Pro 2015 Crack PATCHED.md
deleted file mode 100644
index cfa0ce5c5be1bd08449f9a63ce9e4bc8baad2598..0000000000000000000000000000000000000000
--- a/spaces/usbethFlerru/sovits-modelsV2/example/Autodwg Dwf To Dwg Converter Pro 2015 Crack PATCHED.md
+++ /dev/null
@@ -1,284 +0,0 @@
-
-
Autodwg DWF to DWG Converter Pro 2015 Crack: How to Convert DWF Files to DWG Files Easily and Quickly
-
-
If you are working with CAD files, you might have encountered the problem of converting DWF files to DWG files. DWF files are a secure file format developed by Autodesk for the efficient distribution and communication of rich design data. However, they are not editable and can only be viewed, reviewed or printed. DWG files are a binary file format used for storing two- and three-dimensional design data and metadata. They are the native format for several CAD packages including AutoCAD and IntelliCAD. They are editable and can be used for further design or modification.
-
-
So, how can you convert DWF files to DWG files easily and quickly? One of the best solutions is to use Autodwg DWF to DWG Converter Pro 2015, a powerful and easy-to-use tool that helps you convert DWF files to editable DWG files, supporting AutoCAD drawing R14-2023. In this article, we will show you how to download and install Autodwg DWF to DWG Converter Pro 2015 crack, how to use it to convert DWF files to DWG files, and what are the benefits of using it.
How to Download and Install Autodwg DWF to DWG Converter Pro 2015 Crack?
-
-
Downloading and installing Autodwg DWF to DWG Converter Pro 2015 crack is very easy and simple. Here are the steps that you need to follow:
-
-
-
First, you need to have a computer that meets the minimum system requirements for Autodwg DWF to DWG Converter Pro 2015. These are: Windows XP/Vista/7/8/10, 32-bit or 64-bit; Pentium III processor or equivalent; 512 MB of RAM; 50 MB of free hard disk space; Internet connection.
-
Next, you need to find a reliable source that offers Autodwg DWF to DWG Converter Pro 2015 crack for free download. You can use any search engine or torrent site to find one. However, be careful of malware or viruses that might harm your computer.
-
Once you find a source that offers Autodwg DWF to DWG Converter Pro 2015 crack for free download, click on the download button or link. You might need to complete some surveys or offers before you can access the download link.
-
After downloading Autodwg DWF to DWG Converter Pro 2015 crack, you need to extract it to your computer. You can use any program that can handle zip or rar files, such as WinZip or WinRAR.
-
Finally, you need to run the setup file and follow the instructions on the screen. You might need to enter a serial number or a license key that is provided in the crack folder. You might also need to copy and paste some files from the crack folder to the installation folder.
-
-
-
That's it! You have successfully downloaded and installed Autodwg DWF to DWG Converter Pro 2015 crack. Now you can use it to convert DWF files to DWG files easily and quickly.
-
-
How to Use Autodwg DWF to DWG Converter Pro 2015 Crack to Convert DWF Files to DWG Files?
-
-
Using Autodwg DWF to DWG Converter Pro 2015 crack to convert DWF files to DWG files is very easy and simple. Here are the steps that you need to follow:
-
-
-
First, you need to open Autodwg DWF to DWG Converter Pro 2015 by clicking on its icon on your desktop or start menu.
-
Next, you need to add the DWF files that you want to convert by clicking on the Add button or dragging and dropping them from your file explorer.
-
Then, you need to choose the output format and folder by clicking on the Options button. You can choose between DWG or DXF format, and select the version of AutoCAD that you want to support. You can also choose the output folder where you want to save the converted files.
-
Finally, you need to start the conversion process by clicking on the Convert button. You can see the progress and status of each file on the screen. You can also stop or pause the conversion at any time by clicking on the Stop or Pause button.
-
-
-
That's it! You have successfully used Autodwg DWF to DWG Converter Pro 2015 crack to convert DWF files to DWG files easily and quickly. Now you can open and edit them with any CAD software that supports DWG format.
-
-
What are the Benefits of Using Autodwg DWF to DWG Converter Pro 2015 Crack?
-
-
Using Autodwg DWF to DWG Converter Pro 2015 crack can bring many benefits to your work, such as:
-
-
-
You can save time and effort by converting thousands of DWF files into DWG files in one go.
-
You can retain all DWF elements generated by AutoCAD Plot to DWF, including true type fonts, ellipses, arcs and elliptical arcs.
-
You can recreate layers saved in DWF in DWG format.
-
You can extract embedded images from DWF files.
-
You can convert new DWF v6.0 multi page specification.
-
You can get high quality, high performance and very easy-to-use conversion results.
-
-
-
Autodwg DWF to DWG Converter Pro 2015 crack is a great tool for converting DWF files to DWG files easily and quickly. It offers free, high-quality, diverse and easy-to-use conversion features that can suit any work needs.
-
-
Conclusion
-
-
In this article, we have shown you how to download and install Autodwg DWF to DWG Converter Pro 2015 crack, how to use it to convert DWF files
-to DWG files, and what are the benefits of using it. We hope that this guide has been helpful and informative for you. If you have any questions or comments, feel free
-to leave them below. Thank you for reading!
-
-
How to Crack Autodwg DWF to DWG Converter Pro 2015?
-
-
If you want to use Autodwg DWF to DWG Converter Pro 2015 without paying for it, you might be tempted to crack it. Cracking is the process of modifying the software to bypass its security features and remove its limitations. However, cracking is not a legal or ethical practice, and it can have serious consequences for you and your computer. Here are some of the risks and disadvantages of cracking Autodwg DWF to DWG Converter Pro 2015:
-
-
-
You can violate the intellectual property rights of the software developer and face legal actions or penalties.
-
You can expose your computer to malware or viruses that can harm your system or steal your data.
-
You can lose the technical support and updates from the software developer and face compatibility or performance issues.
-
You can damage the reputation and quality of the software industry and discourage innovation and development.
-
-
-
Therefore, we strongly advise you not to crack Autodwg DWF to DWG Converter Pro 2015, and instead use it legally and ethically. You can either purchase a license from the official website or use the free trial version that offers limited features and functions.
-
-
How to Uninstall Autodwg DWF to DWG Converter Pro 2015?
-
-
If you want to uninstall Autodwg DWF to DWG Converter Pro 2015 from your computer, you can do so easily and quickly. Here are the steps that you need to follow:
-
-
-
First, you need to close Autodwg DWF to DWG Converter Pro 2015 if it is running on your computer.
-
Next, you need to open the Control Panel by clicking on the Start menu and selecting Control Panel.
-
Then, you need to click on the Programs and Features option under the Programs category.
-
After that, you need to find Autodwg DWF to DWG Converter Pro 2015 from the list of installed programs and click on it.
-
Finally, you need to click on the Uninstall button and follow the instructions on the screen.
-
-
-
That's it! You have successfully uninstalled Autodwg DWF to DWG Converter Pro 2015 from your computer. You can also delete any leftover files or folders from your hard drive if you want.
-
-
Conclusion
-
-
In this article, we have shown you how to download and install Autodwg DWF to DWG Converter Pro 2015 crack, how to use it to convert DWF files
-to DWG files, what are the benefits of using it, how to crack it, and how to uninstall it. We hope that this guide has been helpful and informative for you. If you have any questions or comments, feel free
-to leave them below. Thank you for reading!
-
Autodwg DWF to DWG Converter Pro 2015 Crack: How to Convert DWF Files to DWG Files Easily and Quickly
-
-
If you are working with CAD files, you might have encountered the problem of converting DWF files to DWG files. DWF files are a secure file format developed by Autodesk for the efficient distribution and communication of rich design data. However, they are not editable and can only be viewed, reviewed or printed. DWG files are a binary file format used for storing two- and three-dimensional design data and metadata. They are the native format for several CAD packages including AutoCAD and IntelliCAD. They are editable and can be used for further design or modification.
-
-
So, how can you convert DWF files to DWG files easily and quickly? One of the best solutions is to use Autodwg DWF to DWG Converter Pro 2015, a powerful and easy-to-use tool that helps you convert DWF files to editable DWG files, supporting AutoCAD drawing R14-2023. In this article, we will show you how to download and install Autodwg DWF to DWG Converter Pro 2015 crack, how to use it to convert DWF files to DWG files, and what are the benefits of using it.
-
-
How to Download and Install Autodwg DWF to DWG Converter Pro 2015 Crack?
-
-
Downloading and installing Autodwg DWF to DWG Converter Pro 2015 crack is very easy and simple. Here are the steps that you need to follow:
-
-
-
First, you need to have a computer that meets the minimum system requirements for Autodwg DWF to DWG Converter Pro 2015. These are: Windows XP/Vista/7/8/10, 32-bit or 64-bit; Pentium III processor or equivalent; 512 MB of RAM; 50 MB of free hard disk space; Internet connection.
-
Next, you need to find a reliable source that offers Autodwg DWF to DWG Converter Pro 2015 crack for free download. You can use any search engine or torrent site to find one. However, be careful of malware or viruses that might harm your computer.
-
Once you find a source that offers Autodwg DWF to DWG Converter Pro 2015 crack for free download, click on the download button or link. You might need to complete some surveys or offers before you can access the download link.
-
After downloading Autodwg DWF to DWG Converter Pro 2015 crack, you need to extract it to your computer. You can use any program that can handle zip or rar files, such as WinZip or WinRAR.
-
Finally, you need to run the setup file and follow the instructions on the screen. You might need to enter a serial number or a license key that is provided in the crack folder. You might also need to copy and paste some files from the crack folder to the installation folder.
-
-
-
That's it! You have successfully downloaded and installed Autodwg DWF to DWG Converter Pro 2015 crack. Now you can use it to convert DWF files to DWG files easily and quickly.
-
-
How to Use Autodwg DWF to DWG Converter Pro 2015 Crack to Convert DWF Files to DWG Files?
-
-
Using Autodwg DWF to DWG Converter Pro 2015 crack
-to convert DWF files
-to DWG files is very easy and simple. Here are the steps that you need
-to follow:
-
-
-
First,
-you need
-to open Autodwg DWF
-to DWG Converter Pro 2015 by clicking on its icon on your desktop or start menu.
-
Next,
-you need
-to add the DWF files that you want
-to convert by clicking on the Add button or dragging and dropping them from your file explorer.
-
Then,
-you need
-to choose the output format and folder by clicking on the Options button. You can choose between DWG or DXF format, and select the version of AutoCAD that you want
-to support. You can also choose the output folder where you want
-to save the converted files.
-
Finally,
-you need
-to start the conversion process by clicking on the Convert button. You can see the progress and status of each file on the screen. You can also stop or pause the conversion at any time by clicking on the Stop or Pause button.
-
-
-
That's it! You have successfully used Autodwg DWF
-to DWG Converter Pro 2015 crack
-to convert DWF files
-to DWG files easily and quickly. Now you can open and edit them with any CAD software that supports DWG format.
-
-
What are the Benefits of Using Autodwg DWF
-to DWG Converter Pro 2015 Crack?
-
-
Using Autodwg DWF
-to DWG Converter Pro 2015 crack can bring many benefits
-to your work, such as:
-
-
-
You can save time and effort by converting thousands of DWF files into DWG files in one go.
-
You can retain all DWF elements generated by AutoCAD Plot
-to DWF
-, including true type fonts, ellipses, arcs and elliptical arcs.
-
You can recreate layers saved in DWF in DWG format.
-
You can extract embedded images from DWF files.
-
You can convert new DWF v6.0 multi page specification.
-
You can get high quality, high performance and very easy-to-use conversion results.
-
-
-
Autodwg DWF
-to DWG Converter Pro 2015 crack is a great tool for converting DWF files
-to DWG files easily and quickly. It offers free, high-quality, diverse and easy-to-use conversion features that can suit any work needs.
-
-
How
-to Crack Autodwg DWF
-to DWG Converter Pro 2015?
-
-
If you want
-to use Autodwg DWF
-to DWG Converter Pro 2015 without paying for it, you might be tempted
-to crack it. Cracking is the process of modifying the software
-to bypass its security features and remove its limitations. However, cracking is not a legal or ethical practice,
-and it can have serious consequences for you and your computer. Here are some of
-the risks and disadvantages of cracking Autodwg DWF
-to DWG Converter Pro 2015:
-
-
-
You can violate the intellectual property rights of
-the software developer and face legal actions or penalties.
-
You can expose your computer
-to malware or viruses that can harm your system or steal your data.
-
You can lose
-the technical support and updates from
-the software developer and face compatibility or performance issues.
-
You can damage
-the reputation and quality of
-the software industry and discourage innovation and development.
-
-
-
Therefore,
-we strongly advise you not
-to crack Autodwg DWF
-to DWG Converter Pro 2015,
-and instead use it legally and ethically. You can either purchase a license from
-the official website or use
-the free trial version that offers limited features and functions.
-
-
How
-to Uninstall Autodwg DWF
-to DWG Converter Pro 2015?
-
-
If you want
-to uninstall Autodwg DWF
-to DWG Converter Pro 2015 from your computer,
-you can do so easily and quickly.
-Here are the steps that you need
-to follow:
-
-
-
First,
-you need
-to close Autodwg DWF
-to DWG Converter Pro 2015 if it is running on your computer.
-
Next,
-you need
-to open
-the Control Panel by clicking on
-the Start menu and selecting Control Panel.
-
Then,
-you need
-to click on
-the Programs and Features option under
-the Programs category.
-
After that,
-you need
-to find Autodwg DWF
-to DWG Converter Pro 2015 from
-the list of installed programs and click on it.
-
Finally,
-you need
-to click on
-the Uninstall button and follow
-the instructions on
-the screen.
-
-
-
That's it! You have successfully uninstalled Autodwg DWF
-to DWG Converter Pro 2015 from your computer.
-You can also delete any leftover files or folders from your hard drive if you want.
-
-
Conclusion
-
-
In this article,
-we have shown you how
-to download and install Autodwg DWF
-to DWG Converter Pro 2015 crack,
-how
-to use it
-to convert DWF files
-to DWG files,
-what are
-the benefits of using it,
-how
-to crack it,
-and how
-to uninstall it.
-We hope that this guide has been helpful
-and informative for you.
-If you have any questions or comments,
-feel free
-to leave them below.
-Thank you for reading!
-
Conclusion
-
-
In this article,
-we have shown you how
-to download and install Autodwg DWF
-to DWG Converter Pro 2015 crack,
-how
-to use it
-to convert DWF files
-to DWG files,
-what are
-the benefits of using it,
-how
-to crack it,
-and how
-to uninstall it.
-We hope that this guide has been helpful
-and informative for you.
-If you have any questions or comments,
-feel free
-to leave them below.
-Thank you for reading!
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/yolov5/tutorials/architecture_description.md b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/yolov5/tutorials/architecture_description.md
deleted file mode 100644
index 26a2d91166185580f5a44e8b50a9cdba49866963..0000000000000000000000000000000000000000
--- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/yolov5/tutorials/architecture_description.md
+++ /dev/null
@@ -1,224 +0,0 @@
----
-comments: true
-description: Explore the details of Ultralytics YOLOv5 architecture, a comprehensive guide to its model structure, data augmentation techniques, training strategies, and various features. Understand the intricacies of object detection algorithms and improve your skills in the machine learning field.
-keywords: yolov5 architecture, data augmentation, training strategies, object detection, yolo docs, ultralytics
----
-
-# Ultralytics YOLOv5 Architecture
-
-YOLOv5 (v6.0/6.1) is a powerful object detection algorithm developed by Ultralytics. This article dives deep into the YOLOv5 architecture, data augmentation strategies, training methodologies, and loss computation techniques. This comprehensive understanding will help improve your practical application of object detection in various fields, including surveillance, autonomous vehicles, and image recognition.
-
-## 1. Model Structure
-
-YOLOv5's architecture consists of three main parts:
-
-- **Backbone**: This is the main body of the network. For YOLOv5, the backbone is designed using the `New CSP-Darknet53` structure, a modification of the Darknet architecture used in previous versions.
-- **Neck**: This part connects the backbone and the head. In YOLOv5, `SPPF` and `New CSP-PAN` structures are utilized.
-- **Head**: This part is responsible for generating the final output. YOLOv5 uses the `YOLOv3 Head` for this purpose.
-
-The structure of the model is depicted in the image below. The model structure details can be found in `yolov5l.yaml`.
-
-
-
-YOLOv5 introduces some minor changes compared to its predecessors:
-
-1. The `Focus` structure, found in earlier versions, is replaced with a `6x6 Conv2d` structure. This change boosts efficiency [#4825](https://github.com/ultralytics/yolov5/issues/4825).
-2. The `SPP` structure is replaced with `SPPF`. This alteration more than doubles the speed of processing.
-
-To test the speed of `SPP` and `SPPF`, the following code can be used:
-
-
-SPP vs SPPF speed profiling example (click to open)
-
-```python
-import time
-import torch
-import torch.nn as nn
-
-
-class SPP(nn.Module):
- def __init__(self):
- super().__init__()
- self.maxpool1 = nn.MaxPool2d(5, 1, padding=2)
- self.maxpool2 = nn.MaxPool2d(9, 1, padding=4)
- self.maxpool3 = nn.MaxPool2d(13, 1, padding=6)
-
- def forward(self, x):
- o1 = self.maxpool1(x)
- o2 = self.maxpool2(x)
- o3 = self.maxpool3(x)
- return torch.cat([x, o1, o2, o3], dim=1)
-
-
-class SPPF(nn.Module):
- def __init__(self):
- super().__init__()
- self.maxpool = nn.MaxPool2d(5, 1, padding=2)
-
- def forward(self, x):
- o1 = self.maxpool(x)
- o2 = self.maxpool(o1)
- o3 = self.maxpool(o2)
- return torch.cat([x, o1, o2, o3], dim=1)
-
-
-def main():
- input_tensor = torch.rand(8, 32, 16, 16)
- spp = SPP()
- sppf = SPPF()
- output1 = spp(input_tensor)
- output2 = sppf(input_tensor)
-
- print(torch.equal(output1, output2))
-
- t_start = time.time()
- for _ in range(100):
- spp(input_tensor)
- print(f"SPP time: {time.time() - t_start}")
-
- t_start = time.time()
- for _ in range(100):
- sppf(input_tensor)
- print(f"SPPF time: {time.time() - t_start}")
-
-
-if __name__ == '__main__':
- main()
-```
-
-result:
-
-```
-True
-SPP time: 0.5373051166534424
-SPPF time: 0.20780706405639648
-```
-
-
-
-## 2. Data Augmentation Techniques
-
-YOLOv5 employs various data augmentation techniques to improve the model's ability to generalize and reduce overfitting. These techniques include:
-
-- **Mosaic Augmentation**: An image processing technique that combines four training images into one in ways that encourage object detection models to better handle various object scales and translations.
-
- 
-
-- **Copy-Paste Augmentation**: An innovative data augmentation method that copies random patches from an image and pastes them onto another randomly chosen image, effectively generating a new training sample.
-
- 
-
-- **Random Affine Transformations**: This includes random rotation, scaling, translation, and shearing of the images.
-
- 
-
-- **MixUp Augmentation**: A method that creates composite images by taking a linear combination of two images and their associated labels.
-
- 
-
-- **Albumentations**: A powerful library for image augmenting that supports a wide variety of augmentation techniques.
-- **HSV Augmentation**: Random changes to the Hue, Saturation, and Value of the images.
-
- 
-
-- **Random Horizontal Flip**: An augmentation method that randomly flips images horizontally.
-
- 
-
-## 3. Training Strategies
-
-YOLOv5 applies several sophisticated training strategies to enhance the model's performance. They include:
-
-- **Multiscale Training**: The input images are randomly rescaled within a range of 0.5 to 1.5 times their original size during the training process.
-- **AutoAnchor**: This strategy optimizes the prior anchor boxes to match the statistical characteristics of the ground truth boxes in your custom data.
-- **Warmup and Cosine LR Scheduler**: A method to adjust the learning rate to enhance model performance.
-- **Exponential Moving Average (EMA)**: A strategy that uses the average of parameters over past steps to stabilize the training process and reduce generalization error.
-- **Mixed Precision Training**: A method to perform operations in half-precision format, reducing memory usage and enhancing computational speed.
-- **Hyperparameter Evolution**: A strategy to automatically tune hyperparameters to achieve optimal performance.
-
-## 4. Additional Features
-
-### 4.1 Compute Losses
-
-The loss in YOLOv5 is computed as a combination of three individual loss components:
-
-- **Classes Loss (BCE Loss)**: Binary Cross-Entropy loss, measures the error for the classification task.
-- **Objectness Loss (BCE Loss)**: Another Binary Cross-Entropy loss, calculates the error in detecting whether an object is present in a particular grid cell or not.
-- **Location Loss (CIoU Loss)**: Complete IoU loss, measures the error in localizing the object within the grid cell.
-
-The overall loss function is depicted by:
-
-
-
-### 4.2 Balance Losses
-
-The objectness losses of the three prediction layers (`P3`, `P4`, `P5`) are weighted differently. The balance weights are `[4.0, 1.0, 0.4]` respectively. This approach ensures that the predictions at different scales contribute appropriately to the total loss.
-
-
-
-### 4.3 Eliminate Grid Sensitivity
-
-The YOLOv5 architecture makes some important changes to the box prediction strategy compared to earlier versions of YOLO. In YOLOv2 and YOLOv3, the box coordinates were directly predicted using the activation of the last layer.
-
-+c_x)
-+c_y)
-
-
-
-
-
-However, in YOLOv5, the formula for predicting the box coordinates has been updated to reduce grid sensitivity and prevent the model from predicting unbounded box dimensions.
-
-The revised formulas for calculating the predicted bounding box are as follows:
-
--0.5)+c_x)
--0.5)+c_y)
-)^2)
-)^2)
-
-Compare the center point offset before and after scaling. The center point offset range is adjusted from (0, 1) to (-0.5, 1.5).
-Therefore, offset can easily get 0 or 1.
-
-
-
-Compare the height and width scaling ratio(relative to anchor) before and after adjustment. The original yolo/darknet box equations have a serious flaw. Width and Height are completely unbounded as they are simply out=exp(in), which is dangerous, as it can lead to runaway gradients, instabilities, NaN losses and ultimately a complete loss of training. [refer this issue](https://github.com/ultralytics/yolov5/issues/471#issuecomment-662009779)
-
-
-
-### 4.4 Build Targets
-
-The build target process in YOLOv5 is critical for training efficiency and model accuracy. It involves assigning ground truth boxes to the appropriate grid cells in the output map and matching them with the appropriate anchor boxes.
-
-This process follows these steps:
-
-- Calculate the ratio of the ground truth box dimensions and the dimensions of each anchor template.
-
-
-
-
-
-)
-
-)
-
-)
-
-
-
-
-
-- If the calculated ratio is within the threshold, match the ground truth box with the corresponding anchor.
-
-
-
-- Assign the matched anchor to the appropriate cells, keeping in mind that due to the revised center point offset, a ground truth box can be assigned to more than one anchor. Because the center point offset range is adjusted from (0, 1) to (-0.5, 1.5). GT Box can be assigned to more anchors.
-
-
-
-This way, the build targets process ensures that each ground truth object is properly assigned and matched during the training process, allowing YOLOv5 to learn the task of object detection more effectively.
-
-## Conclusion
-
-In conclusion, YOLOv5 represents a significant step forward in the development of real-time object detection models. By incorporating various new features, enhancements, and training strategies, it surpasses previous versions of the YOLO family in performance and efficiency.
-
-The primary enhancements in YOLOv5 include the use of a dynamic architecture, an extensive range of data augmentation techniques, innovative training strategies, as well as important adjustments in computing losses and the process of building targets. All these innovations significantly improve the accuracy and efficiency of object detection while retaining a high degree of speed, which is the trademark of YOLO models.
\ No newline at end of file
diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/yolov5/tutorials/clearml_logging_integration.md b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/yolov5/tutorials/clearml_logging_integration.md
deleted file mode 100644
index 3d8672d09cc46e42a9e2f8ebe69323876cb388e0..0000000000000000000000000000000000000000
--- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/yolov5/tutorials/clearml_logging_integration.md
+++ /dev/null
@@ -1,243 +0,0 @@
----
-comments: true
-description: Integrate ClearML with YOLOv5 to track experiments and manage data versions. Optimize hyperparameters and remotely monitor your runs.
-keywords: YOLOv5, ClearML, experiment manager, remotely train, monitor, hyperparameter optimization, data versioning tool, HPO, data version management, optimization locally, agent, training progress, custom YOLOv5, AI development, model building
----
-
-# ClearML Integration
-
-
-
-## About ClearML
-
-[ClearML](https://cutt.ly/yolov5-tutorial-clearml) is an [open-source](https://github.com/allegroai/clearml) toolbox designed to save you time ⏱️.
-
-🔨 Track every YOLOv5 training run in the experiment manager
-
-🔧 Version and easily access your custom training data with the integrated ClearML Data Versioning Tool
-
-🔦 Remotely train and monitor your YOLOv5 training runs using ClearML Agent
-
-🔬 Get the very best mAP using ClearML Hyperparameter Optimization
-
-🔭 Turn your newly trained YOLOv5 model into an API with just a few commands using ClearML Serving
-
-
-And so much more. It's up to you how many of these tools you want to use, you can stick to the experiment manager, or chain them all together into an impressive pipeline!
-
-
-
-
-
-
-
-
-## 🦾 Setting Things Up
-
-To keep track of your experiments and/or data, ClearML needs to communicate to a server. You have 2 options to get one:
-
-Either sign up for free to the [ClearML Hosted Service](https://cutt.ly/yolov5-tutorial-clearml) or you can set up your own server, see [here](https://clear.ml/docs/latest/docs/deploying_clearml/clearml_server). Even the server is open-source, so even if you're dealing with sensitive data, you should be good to go!
-
-1. Install the `clearml` python package:
-
- ```bash
- pip install clearml
- ```
-
-2. Connect the ClearML SDK to the server by [creating credentials](https://app.clear.ml/settings/workspace-configuration) (go right top to Settings -> Workspace -> Create new credentials), then execute the command below and follow the instructions:
-
- ```bash
- clearml-init
- ```
-
-That's it! You're done 😎
-
-
-
-## 🚀 Training YOLOv5 With ClearML
-
-To enable ClearML experiment tracking, simply install the ClearML pip package.
-
-```bash
-pip install clearml>=1.2.0
-```
-
-This will enable integration with the YOLOv5 training script. Every training run from now on, will be captured and stored by the ClearML experiment manager.
-
-If you want to change the `project_name` or `task_name`, use the `--project` and `--name` arguments of the `train.py` script, by default the project will be called `YOLOv5` and the task `Training`.
-PLEASE NOTE: ClearML uses `/` as a delimiter for subprojects, so be careful when using `/` in your project name!
-
-```bash
-python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache
-```
-
-or with custom project and task name:
-
-```bash
-python train.py --project my_project --name my_training --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache
-```
-
-This will capture:
-
-- Source code + uncommitted changes
-- Installed packages
-- (Hyper)parameters
-- Model files (use `--save-period n` to save a checkpoint every n epochs)
-- Console output
-- Scalars (mAP_0.5, mAP_0.5:0.95, precision, recall, losses, learning rates, ...)
-- General info such as machine details, runtime, creation date etc.
-- All produced plots such as label correlogram and confusion matrix
-- Images with bounding boxes per epoch
-- Mosaic per epoch
-- Validation images per epoch
-- ...
-
-That's a lot right? 🤯
-Now, we can visualize all of this information in the ClearML UI to get an overview of our training progress. Add custom columns to the table view (such as e.g. mAP_0.5) so you can easily sort on the best performing model. Or select multiple experiments and directly compare them!
-
-There even more we can do with all of this information, like hyperparameter optimization and remote execution, so keep reading if you want to see how that works!
-
-
-
-## 🔗 Dataset Version Management
-
-Versioning your data separately from your code is generally a good idea and makes it easy to acquire the latest version too. This repository supports supplying a dataset version ID, and it will make sure to get the data if it's not there yet. Next to that, this workflow also saves the used dataset ID as part of the task parameters, so you will always know for sure which data was used in which experiment!
-
-
-
-### Prepare Your Dataset
-
-The YOLOv5 repository supports a number of different datasets by using yaml files containing their information. By default datasets are downloaded to the `../datasets` folder in relation to the repository root folder. So if you downloaded the `coco128` dataset using the link in the yaml or with the scripts provided by yolov5, you get this folder structure:
-
-```
-..
-|_ yolov5
-|_ datasets
- |_ coco128
- |_ images
- |_ labels
- |_ LICENSE
- |_ README.txt
-```
-
-But this can be any dataset you wish. Feel free to use your own, as long as you keep to this folder structure.
-
-Next, ⚠️**copy the corresponding yaml file to the root of the dataset folder**⚠️. This yaml files contains the information ClearML will need to properly use the dataset. You can make this yourself too, of course, just follow the structure of the example yamls.
-
-Basically we need the following keys: `path`, `train`, `test`, `val`, `nc`, `names`.
-
-```
-..
-|_ yolov5
-|_ datasets
- |_ coco128
- |_ images
- |_ labels
- |_ coco128.yaml # <---- HERE!
- |_ LICENSE
- |_ README.txt
-```
-
-### Upload Your Dataset
-
-To get this dataset into ClearML as a versioned dataset, go to the dataset root folder and run the following command:
-
-```bash
-cd coco128
-clearml-data sync --project YOLOv5 --name coco128 --folder .
-```
-
-The command `clearml-data sync` is actually a shorthand command. You could also run these commands one after the other:
-
-```bash
-# Optionally add --parent if you want to base
-# this version on another dataset version, so no duplicate files are uploaded!
-clearml-data create --name coco128 --project YOLOv5
-clearml-data add --files .
-clearml-data close
-```
-
-### Run Training Using A ClearML Dataset
-
-Now that you have a ClearML dataset, you can very simply use it to train custom YOLOv5 🚀 models!
-
-```bash
-python train.py --img 640 --batch 16 --epochs 3 --data clearml:// --weights yolov5s.pt --cache
-```
-
-
-
-## 👀 Hyperparameter Optimization
-
-Now that we have our experiments and data versioned, it's time to take a look at what we can build on top!
-
-Using the code information, installed packages and environment details, the experiment itself is now **completely reproducible**. In fact, ClearML allows you to clone an experiment and even change its parameters. We can then just rerun it with these new parameters automatically, this is basically what HPO does!
-
-To **run hyperparameter optimization locally**, we've included a pre-made script for you. Just make sure a training task has been run at least once, so it is in the ClearML experiment manager, we will essentially clone it and change its hyperparameters.
-
-You'll need to fill in the ID of this `template task` in the script found at `utils/loggers/clearml/hpo.py` and then just run it :) You can change `task.execute_locally()` to `task.execute()` to put it in a ClearML queue and have a remote agent work on it instead.
-
-```bash
-# To use optuna, install it first, otherwise you can change the optimizer to just be RandomSearch
-pip install optuna
-python utils/loggers/clearml/hpo.py
-```
-
-
-
-## 🤯 Remote Execution (advanced)
-
-Running HPO locally is really handy, but what if we want to run our experiments on a remote machine instead? Maybe you have access to a very powerful GPU machine on-site, or you have some budget to use cloud GPUs.
-This is where the ClearML Agent comes into play. Check out what the agent can do here:
-
-- [YouTube video](https://youtu.be/MX3BrXnaULs)
-- [Documentation](https://clear.ml/docs/latest/docs/clearml_agent)
-
-In short: every experiment tracked by the experiment manager contains enough information to reproduce it on a different machine (installed packages, uncommitted changes etc.). So a ClearML agent does just that: it listens to a queue for incoming tasks and when it finds one, it recreates the environment and runs it while still reporting scalars, plots etc. to the experiment manager.
-
-You can turn any machine (a cloud VM, a local GPU machine, your own laptop ... ) into a ClearML agent by simply running:
-
-```bash
-clearml-agent daemon --queue [--docker]
-```
-
-### Cloning, Editing And Enqueuing
-
-With our agent running, we can give it some work. Remember from the HPO section that we can clone a task and edit the hyperparameters? We can do that from the interface too!
-
-🪄 Clone the experiment by right-clicking it
-
-🎯 Edit the hyperparameters to what you wish them to be
-
-⏳ Enqueue the task to any of the queues by right-clicking it
-
-
-
-### Executing A Task Remotely
-
-Now you can clone a task like we explained above, or simply mark your current script by adding `task.execute_remotely()` and on execution it will be put into a queue, for the agent to start working on!
-
-To run the YOLOv5 training script remotely, all you have to do is add this line to the training.py script after the clearml logger has been instantiated:
-
-```python
-# ...
-# Loggers
-data_dict = None
-if RANK in {-1, 0}:
- loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance
- if loggers.clearml:
- loggers.clearml.task.execute_remotely(queue="my_queue") # <------ ADD THIS LINE
- # Data_dict is either None is user did not choose for ClearML dataset or is filled in by ClearML
- data_dict = loggers.clearml.data_dict
-# ...
-```
-
-When running the training script after this change, python will run the script up until that line, after which it will package the code and send it to the queue instead!
-
-### Autoscaling workers
-
-ClearML comes with autoscalers too! This tool will automatically spin up new remote machines in the cloud of your choice (AWS, GCP, Azure) and turn them into ClearML agents for you whenever there are experiments detected in the queue. Once the tasks are processed, the autoscaler will automatically shut down the remote machines, and you stop paying!
-
-Check out the autoscalers getting started video below.
-
-[](https://youtu.be/j4XVMAaUt3E)
\ No newline at end of file
diff --git a/spaces/vobecant/DaS/segmenter_model/vit_dino.py b/spaces/vobecant/DaS/segmenter_model/vit_dino.py
deleted file mode 100644
index d733c0505f603a43de4616354f6694d02ebf7888..0000000000000000000000000000000000000000
--- a/spaces/vobecant/DaS/segmenter_model/vit_dino.py
+++ /dev/null
@@ -1,348 +0,0 @@
-# Copied from DINO
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
-Mostly copy-paste from timm library.
-https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
-"""
-import math
-import warnings
-from functools import partial
-
-import torch
-import torch.nn as nn
-
-
-def _no_grad_trunc_normal_(tensor, mean, std, a, b):
- # Cut & paste from PyTorch official master until it's in a few official releases - RW
- # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
- def norm_cdf(x):
- # Computes standard normal cumulative distribution function
- return (1. + math.erf(x / math.sqrt(2.))) / 2.
-
- if (mean < a - 2 * std) or (mean > b + 2 * std):
- warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
- "The distribution of values may be incorrect.",
- stacklevel=2)
-
- with torch.no_grad():
- # Values are generated by using a truncated uniform distribution and
- # then using the inverse CDF for the normal distribution.
- # Get upper and lower cdf values
- l = norm_cdf((a - mean) / std)
- u = norm_cdf((b - mean) / std)
-
- # Uniformly fill tensor with values from [l, u], then translate to
- # [2l-1, 2u-1].
- tensor.uniform_(2 * l - 1, 2 * u - 1)
-
- # Use inverse cdf transform for normal distribution to get truncated
- # standard normal
- tensor.erfinv_()
-
- # Transform to proper mean, std
- tensor.mul_(std * math.sqrt(2.))
- tensor.add_(mean)
-
- # Clamp to ensure it's in the proper range
- tensor.clamp_(min=a, max=b)
- return tensor
-
-
-def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
- # type: (Tensor, float, float, float, float) -> Tensor
- return _no_grad_trunc_normal_(tensor, mean, std, a, b)
-
-
-def drop_path(x, drop_prob: float = 0., training: bool = False):
- if drop_prob == 0. or not training:
- return x
- keep_prob = 1 - drop_prob
- shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
- random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
- random_tensor.floor_() # binarize
- output = x.div(keep_prob) * random_tensor
- return output
-
-
-class DropPath(nn.Module):
- """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
- """
-
- def __init__(self, drop_prob=None):
- super(DropPath, self).__init__()
- self.drop_prob = drop_prob
-
- def forward(self, x):
- return drop_path(x, self.drop_prob, self.training)
-
-
-class Mlp(nn.Module):
- def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
- super().__init__()
- out_features = out_features or in_features
- hidden_features = hidden_features or in_features
- self.fc1 = nn.Linear(in_features, hidden_features)
- self.act = act_layer()
- self.fc2 = nn.Linear(hidden_features, out_features)
- self.drop = nn.Dropout(drop)
-
- def forward(self, x):
- x = self.fc1(x)
- x = self.act(x)
- x = self.drop(x)
- x = self.fc2(x)
- x = self.drop(x)
- return x
-
-
-class Attention(nn.Module):
- def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
- super().__init__()
- self.num_heads = num_heads
- head_dim = dim // num_heads
- self.scale = qk_scale or head_dim ** -0.5
-
- self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
- self.attn_drop = nn.Dropout(attn_drop)
- self.proj = nn.Linear(dim, dim)
- self.proj_drop = nn.Dropout(proj_drop)
-
- def forward(self, x):
- B, N, C = x.shape
- qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
- q, k, v = qkv[0], qkv[1], qkv[2]
-
- attn = (q @ k.transpose(-2, -1)) * self.scale
- attn = attn.softmax(dim=-1)
- attn = self.attn_drop(attn)
-
- x = (attn @ v).transpose(1, 2).reshape(B, N, C)
- x = self.proj(x)
- x = self.proj_drop(x)
- return x, attn
-
-
-class Block(nn.Module):
- def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
- drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
- super().__init__()
- self.norm1 = norm_layer(dim)
- self.attn = Attention(
- dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
- self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
- self.norm2 = norm_layer(dim)
- mlp_hidden_dim = int(dim * mlp_ratio)
- self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
-
- def forward(self, x, return_attention=False):
- y, attn = self.attn(self.norm1(x))
- if return_attention:
- return attn
- x = x + self.drop_path(y)
- x = x + self.drop_path(self.mlp(self.norm2(x)))
- return x
-
-
-class PatchEmbed(nn.Module):
- """ Image to Patch Embedding
- """
-
- def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
- super().__init__()
- num_patches = (img_size // patch_size) * (img_size // patch_size)
- self.img_size = img_size
- self.patch_size = patch_size
- self.num_patches = num_patches
-
- self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
-
- def forward(self, x):
- B, C, H, W = x.shape
- x = self.proj(x).flatten(2).transpose(1, 2)
- return x
-
-
-class VisionTransformer(nn.Module):
- """ Vision Transformer """
-
- def __init__(self, img_size=[224], patch_size=16, in_chans=3, num_classes=0, embed_dim=768, depth=12,
- num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
- drop_path_rate=0., norm_layer=nn.LayerNorm, **kwargs):
- super().__init__()
- self.num_features = self.embed_dim = embed_dim
-
- self.patch_embed = PatchEmbed(
- img_size=img_size[0], patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
- num_patches = self.patch_embed.num_patches
-
- self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
- self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
- self.pos_drop = nn.Dropout(p=drop_rate)
-
- dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
- self.blocks = nn.ModuleList([
- Block(
- dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
- drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
- for i in range(depth)])
- self.norm = norm_layer(embed_dim)
-
- # Classifier head
- self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
-
- trunc_normal_(self.pos_embed, std=.02)
- trunc_normal_(self.cls_token, std=.02)
- self.apply(self._init_weights)
-
- def _init_weights(self, m):
- if isinstance(m, nn.Linear):
- trunc_normal_(m.weight, std=.02)
- if isinstance(m, nn.Linear) and m.bias is not None:
- nn.init.constant_(m.bias, 0)
- elif isinstance(m, nn.LayerNorm):
- nn.init.constant_(m.bias, 0)
- nn.init.constant_(m.weight, 1.0)
-
- def interpolate_pos_encoding(self, x, w, h):
- npatch = x.shape[1] - 1
- N = self.pos_embed.shape[1] - 1
- if npatch == N and w == h:
- return self.pos_embed
- class_pos_embed = self.pos_embed[:, 0]
- patch_pos_embed = self.pos_embed[:, 1:]
- dim = x.shape[-1]
- w0 = w // self.patch_embed.patch_size
- h0 = h // self.patch_embed.patch_size
- # we add a small number to avoid floating point error in the interpolation
- # see discussion at https://github.com/facebookresearch/dino/issues/8
- w0, h0 = w0 + 0.1, h0 + 0.1
- patch_pos_embed = nn.functional.interpolate(
- patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
- scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
- mode='bicubic',
- )
- assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
- patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
- return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
-
- def prepare_tokens(self, x):
- B, nc, w, h = x.shape
- x = self.patch_embed(x) # patch linear embedding
-
- # add the [CLS] token to the embed patch tokens
- cls_tokens = self.cls_token.expand(B, -1, -1)
- x = torch.cat((cls_tokens, x), dim=1)
-
- # add positional encoding to each token
- x = x + self.interpolate_pos_encoding(x, w, h)
-
- return self.pos_drop(x)
-
- def forward(self, x):
- x = self.prepare_tokens(x)
- for blk in self.blocks:
- x = blk(x)
- x = self.norm(x)
- return x[:, 0]
-
- def get_last_selfattention(self, x):
- x = self.prepare_tokens(x)
- for i, blk in enumerate(self.blocks):
- if i < len(self.blocks) - 1:
- x = blk(x)
- else:
- # return attention of the last block
- return blk(x, return_attention=True)
-
- def get_n_last_selfattentions(self, x, layers_from_end=(1)):
- x = self.prepare_tokens(x)
- attentions = []
- for i, blk in enumerate(self.blocks):
- num_from_end = len(self.blocks) - i
- if num_from_end in layers_from_end:
- # get attention of the block
- attn = blk(x, return_attention=True)
- attentions.append(attn)
- x = blk(x)
- return attentions
-
- def get_intermediate_layers(self, x, n=1):
- x = self.prepare_tokens(x)
- # we return the output tokens from the `n` last blocks
- output = []
- for i, blk in enumerate(self.blocks):
- x = blk(x)
- if len(self.blocks) - i <= n:
- output.append(self.norm(x))
- return output
-
-
-def vit_tiny(patch_size=16, **kwargs):
- model = VisionTransformer(
- patch_size=patch_size, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4,
- qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
- return model
-
-
-def vit_small(patch_size=16, **kwargs):
- model = VisionTransformer(
- patch_size=patch_size, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4,
- qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
- return model
-
-
-def vit_base(patch_size=16, **kwargs):
- model = VisionTransformer(
- patch_size=patch_size, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
- qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
- return model
-
-
-class DINOHead(nn.Module):
- def __init__(self, in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048,
- bottleneck_dim=256):
- super().__init__()
- nlayers = max(nlayers, 1)
- if nlayers == 1:
- self.mlp = nn.Linear(in_dim, bottleneck_dim)
- else:
- layers = [nn.Linear(in_dim, hidden_dim)]
- if use_bn:
- layers.append(nn.BatchNorm1d(hidden_dim))
- layers.append(nn.GELU())
- for _ in range(nlayers - 2):
- layers.append(nn.Linear(hidden_dim, hidden_dim))
- if use_bn:
- layers.append(nn.BatchNorm1d(hidden_dim))
- layers.append(nn.GELU())
- layers.append(nn.Linear(hidden_dim, bottleneck_dim))
- self.mlp = nn.Sequential(*layers)
- self.apply(self._init_weights)
- self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False))
- self.last_layer.weight_g.data.fill_(1)
- if norm_last_layer:
- self.last_layer.weight_g.requires_grad = False
-
- def _init_weights(self, m):
- if isinstance(m, nn.Linear):
- trunc_normal_(m.weight, std=.02)
- if isinstance(m, nn.Linear) and m.bias is not None:
- nn.init.constant_(m.bias, 0)
-
- def forward(self, x):
- x = self.mlp(x)
- x = nn.functional.normalize(x, dim=-1, p=2)
- x = self.last_layer(x)
- return x
diff --git a/spaces/volhack/vits-uma-genshin-honkai/models.py b/spaces/volhack/vits-uma-genshin-honkai/models.py
deleted file mode 100644
index 52e15d1b9775038fd6e82b2efe6f95f51c66802d..0000000000000000000000000000000000000000
--- a/spaces/volhack/vits-uma-genshin-honkai/models.py
+++ /dev/null
@@ -1,534 +0,0 @@
-import math
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-import commons
-import modules
-import attentions
-import monotonic_align
-
-from torch.nn import Conv1d, ConvTranspose1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
-from commons import init_weights, get_padding
-
-
-class StochasticDurationPredictor(nn.Module):
- def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
- super().__init__()
- filter_channels = in_channels # it needs to be removed from future version.
- self.in_channels = in_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.log_flow = modules.Log()
- self.flows = nn.ModuleList()
- self.flows.append(modules.ElementwiseAffine(2))
- for i in range(n_flows):
- self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
- self.flows.append(modules.Flip())
-
- self.post_pre = nn.Conv1d(1, filter_channels, 1)
- self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
- self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
- self.post_flows = nn.ModuleList()
- self.post_flows.append(modules.ElementwiseAffine(2))
- for i in range(4):
- self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
- self.post_flows.append(modules.Flip())
-
- self.pre = nn.Conv1d(in_channels, filter_channels, 1)
- self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
- self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
-
- def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
- x = torch.detach(x)
- x = self.pre(x)
- if g is not None:
- g = torch.detach(g)
- x = x + self.cond(g)
- x = self.convs(x, x_mask)
- x = self.proj(x) * x_mask
-
- if not reverse:
- flows = self.flows
- assert w is not None
-
- logdet_tot_q = 0
- h_w = self.post_pre(w)
- h_w = self.post_convs(h_w, x_mask)
- h_w = self.post_proj(h_w) * x_mask
- e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
- z_q = e_q
- for flow in self.post_flows:
- z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
- logdet_tot_q += logdet_q
- z_u, z1 = torch.split(z_q, [1, 1], 1)
- u = torch.sigmoid(z_u) * x_mask
- z0 = (w - u) * x_mask
- logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2])
- logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q
-
- logdet_tot = 0
- z0, logdet = self.log_flow(z0, x_mask)
- logdet_tot += logdet
- z = torch.cat([z0, z1], 1)
- for flow in flows:
- z, logdet = flow(z, x_mask, g=x, reverse=reverse)
- logdet_tot = logdet_tot + logdet
- nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot
- return nll + logq # [b]
- else:
- flows = list(reversed(self.flows))
- flows = flows[:-2] + [flows[-1]] # remove a useless vflow
- z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
- for flow in flows:
- z = flow(z, x_mask, g=x, reverse=reverse)
- z0, z1 = torch.split(z, [1, 1], 1)
- logw = z0
- return logw
-
-
-class DurationPredictor(nn.Module):
- def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
- super().__init__()
-
- self.in_channels = in_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.gin_channels = gin_channels
-
- self.drop = nn.Dropout(p_dropout)
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2)
- self.norm_1 = modules.LayerNorm(filter_channels)
- self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2)
- self.norm_2 = modules.LayerNorm(filter_channels)
- self.proj = nn.Conv1d(filter_channels, 1, 1)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, in_channels, 1)
-
- def forward(self, x, x_mask, g=None):
- x = torch.detach(x)
- if g is not None:
- g = torch.detach(g)
- x = x + self.cond(g)
- x = self.conv_1(x * x_mask)
- x = torch.relu(x)
- x = self.norm_1(x)
- x = self.drop(x)
- x = self.conv_2(x * x_mask)
- x = torch.relu(x)
- x = self.norm_2(x)
- x = self.drop(x)
- x = self.proj(x * x_mask)
- return x * x_mask
-
-
-class TextEncoder(nn.Module):
- def __init__(self,
- n_vocab,
- out_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout):
- super().__init__()
- self.n_vocab = n_vocab
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
-
- self.emb = nn.Embedding(n_vocab, hidden_channels)
- nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5)
-
- self.encoder = attentions.Encoder(
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout)
- self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths):
- x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
- x = torch.transpose(x, 1, -1) # [b, h, t]
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
-
- x = self.encoder(x * x_mask, x_mask)
- stats = self.proj(x) * x_mask
-
- m, logs = torch.split(stats, self.out_channels, dim=1)
- return x, m, logs, x_mask
-
-
-class ResidualCouplingBlock(nn.Module):
- def __init__(self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- n_flows=4,
- gin_channels=0):
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.flows = nn.ModuleList()
- for i in range(n_flows):
- self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True))
- self.flows.append(modules.Flip())
-
- def forward(self, x, x_mask, g=None, reverse=False):
- if not reverse:
- for flow in self.flows:
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
- else:
- for flow in reversed(self.flows):
- x = flow(x, x_mask, g=g, reverse=reverse)
- return x
-
-
-class PosteriorEncoder(nn.Module):
- def __init__(self,
- in_channels,
- out_channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=0):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
-
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
- self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths, g=None):
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
- x = self.pre(x) * x_mask
- x = self.enc(x, x_mask, g=g)
- stats = self.proj(x) * x_mask
- m, logs = torch.split(stats, self.out_channels, dim=1)
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
- return z, m, logs, x_mask
-
-
-class Generator(torch.nn.Module):
- def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
- super(Generator, self).__init__()
- self.num_kernels = len(resblock_kernel_sizes)
- self.num_upsamples = len(upsample_rates)
- self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
- resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
-
- self.ups = nn.ModuleList()
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
- self.ups.append(weight_norm(
- ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)),
- k, u, padding=(k-u)//2)))
-
- self.resblocks = nn.ModuleList()
- for i in range(len(self.ups)):
- ch = upsample_initial_channel//(2**(i+1))
- for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
- self.resblocks.append(resblock(ch, k, d))
-
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
- self.ups.apply(init_weights)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
- def forward(self, x, g=None):
- x = self.conv_pre(x)
- if g is not None:
- x = x + self.cond(g)
-
- for i in range(self.num_upsamples):
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- x = self.ups[i](x)
- xs = None
- for j in range(self.num_kernels):
- if xs is None:
- xs = self.resblocks[i*self.num_kernels+j](x)
- else:
- xs += self.resblocks[i*self.num_kernels+j](x)
- x = xs / self.num_kernels
- x = F.leaky_relu(x)
- x = self.conv_post(x)
- x = torch.tanh(x)
-
- return x
-
- def remove_weight_norm(self):
- print('Removing weight norm...')
- for l in self.ups:
- remove_weight_norm(l)
- for l in self.resblocks:
- l.remove_weight_norm()
-
-
-class DiscriminatorP(torch.nn.Module):
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
- super(DiscriminatorP, self).__init__()
- self.period = period
- self.use_spectral_norm = use_spectral_norm
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList([
- norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
- ])
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
-
- def forward(self, x):
- fmap = []
-
- # 1d to 2d
- b, c, t = x.shape
- if t % self.period != 0: # pad first
- n_pad = self.period - (t % self.period)
- x = F.pad(x, (0, n_pad), "reflect")
- t = t + n_pad
- x = x.view(b, c, t // self.period, self.period)
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class DiscriminatorS(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(DiscriminatorS, self).__init__()
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList([
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
- ])
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
-
- def forward(self, x):
- fmap = []
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class MultiPeriodDiscriminator(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(MultiPeriodDiscriminator, self).__init__()
- periods = [2,3,5,7,11]
-
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
- discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
- self.discriminators = nn.ModuleList(discs)
-
- def forward(self, y, y_hat):
- y_d_rs = []
- y_d_gs = []
- fmap_rs = []
- fmap_gs = []
- for i, d in enumerate(self.discriminators):
- y_d_r, fmap_r = d(y)
- y_d_g, fmap_g = d(y_hat)
- y_d_rs.append(y_d_r)
- y_d_gs.append(y_d_g)
- fmap_rs.append(fmap_r)
- fmap_gs.append(fmap_g)
-
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-
-
-class SynthesizerTrn(nn.Module):
- """
- Synthesizer for Training
- """
-
- def __init__(self,
- n_vocab,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- n_speakers=0,
- gin_channels=0,
- use_sdp=True,
- **kwargs):
-
- super().__init__()
- self.n_vocab = n_vocab
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.n_speakers = n_speakers
- self.gin_channels = gin_channels
-
- self.use_sdp = use_sdp
-
- self.enc_p = TextEncoder(n_vocab,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout)
- self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
- self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
- self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
-
- if use_sdp:
- self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
- else:
- self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)
-
- if n_speakers > 1:
- self.emb_g = nn.Embedding(n_speakers, gin_channels)
-
- def forward(self, x, x_lengths, y, y_lengths, sid=None):
-
- x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
- if self.n_speakers > 0:
- g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
- else:
- g = None
-
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
- z_p = self.flow(z, y_mask, g=g)
-
- with torch.no_grad():
- # negative cross-entropy
- s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]
- neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s]
- neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
- neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
- neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s]
- neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4
-
- attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
- attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach()
-
- w = attn.sum(2)
- if self.use_sdp:
- l_length = self.dp(x, x_mask, w, g=g)
- l_length = l_length / torch.sum(x_mask)
- else:
- logw_ = torch.log(w + 1e-6) * x_mask
- logw = self.dp(x, x_mask, g=g)
- l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging
-
- # expand prior
- m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)
- logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)
-
- z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)
- o = self.dec(z_slice, g=g)
- return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
-
- def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None):
- device = next(self.parameters()).device # 获取模型所在的设备
- x, m_p, logs_p, x_mask = self.enc_p(x.to(device), x_lengths.to(device))
- if self.n_speakers > 0:
- g = self.emb_g(sid.to(device)).unsqueeze(-1) # [b, h, 1]
- else:
- g = None
-
- if self.use_sdp:
- logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)
- else:
- logw = self.dp(x, x_mask, g=g)
- w = torch.exp(logw) * x_mask * length_scale
- w_ceil = torch.ceil(w)
- y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
- y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
- attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
- attn = commons.generate_path(w_ceil, attn_mask)
-
- m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
- logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
-
- z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
- z = self.flow(z_p, y_mask, g=g, reverse=True)
- o = self.dec((z * y_mask)[:,:,:max_len], g=g)
- return o, attn, y_mask, (z, z_p, m_p, logs_p)
-
- def voice_conversion(self, y, y_lengths, sid_src, sid_tgt):
- assert self.n_speakers > 0, "n_speakers have to be larger than 0."
- g_src = self.emb_g(sid_src).unsqueeze(-1)
- g_tgt = self.emb_g(sid_tgt).unsqueeze(-1)
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src)
- z_p = self.flow(z, y_mask, g=g_src)
- z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True)
- o_hat = self.dec(z_hat * y_mask, g=g_tgt)
- return o_hat, y_mask, (z, z_p, z_hat)
-
diff --git a/spaces/vrajeshbhatt/Automated-Ticket-Management-System/README.md b/spaces/vrajeshbhatt/Automated-Ticket-Management-System/README.md
deleted file mode 100644
index d8bc01f098f26c48857cd89bd18a5c9eebbac0b3..0000000000000000000000000000000000000000
--- a/spaces/vrajeshbhatt/Automated-Ticket-Management-System/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
----
-title: Automated Ticket Management System
-emoji: 🚀
-colorFrom: blue
-colorTo: blue
-sdk: gradio
-sdk_version: 2.8.13
-python_version: 3.7.9
-app_file: app.py
-pinned: false
----
\ No newline at end of file
diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/cnn/bricks/conv2d_adaptive_padding.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/cnn/bricks/conv2d_adaptive_padding.py
deleted file mode 100644
index b45e758ac6cf8dfb0382d072fe09125bc7e9b888..0000000000000000000000000000000000000000
--- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/cnn/bricks/conv2d_adaptive_padding.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import math
-
-from torch import nn
-from torch.nn import functional as F
-
-from .registry import CONV_LAYERS
-
-
-@CONV_LAYERS.register_module()
-class Conv2dAdaptivePadding(nn.Conv2d):
- """Implementation of 2D convolution in tensorflow with `padding` as "same",
- which applies padding to input (if needed) so that input image gets fully
- covered by filter and stride you specified. For stride 1, this will ensure
- that output image size is same as input. For stride of 2, output dimensions
- will be half, for example.
-
- Args:
- in_channels (int): Number of channels in the input image
- out_channels (int): Number of channels produced by the convolution
- kernel_size (int or tuple): Size of the convolving kernel
- stride (int or tuple, optional): Stride of the convolution. Default: 1
- padding (int or tuple, optional): Zero-padding added to both sides of
- the input. Default: 0
- dilation (int or tuple, optional): Spacing between kernel elements.
- Default: 1
- groups (int, optional): Number of blocked connections from input
- channels to output channels. Default: 1
- bias (bool, optional): If ``True``, adds a learnable bias to the
- output. Default: ``True``
- """
-
- def __init__(self,
- in_channels,
- out_channels,
- kernel_size,
- stride=1,
- padding=0,
- dilation=1,
- groups=1,
- bias=True):
- super().__init__(in_channels, out_channels, kernel_size, stride, 0,
- dilation, groups, bias)
-
- def forward(self, x):
- img_h, img_w = x.size()[-2:]
- kernel_h, kernel_w = self.weight.size()[-2:]
- stride_h, stride_w = self.stride
- output_h = math.ceil(img_h / stride_h)
- output_w = math.ceil(img_w / stride_w)
- pad_h = (
- max((output_h - 1) * self.stride[0] +
- (kernel_h - 1) * self.dilation[0] + 1 - img_h, 0))
- pad_w = (
- max((output_w - 1) * self.stride[1] +
- (kernel_w - 1) * self.dilation[1] + 1 - img_w, 0))
- if pad_h > 0 or pad_w > 0:
- x = F.pad(x, [
- pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2
- ])
- return F.conv2d(x, self.weight, self.bias, self.stride, self.padding,
- self.dilation, self.groups)
diff --git a/spaces/wydgg/bingo-wyd-ai/src/lib/hooks/use-copy-to-clipboard.tsx b/spaces/wydgg/bingo-wyd-ai/src/lib/hooks/use-copy-to-clipboard.tsx
deleted file mode 100644
index 62f7156dca246c46b213151af003a3a177977ccf..0000000000000000000000000000000000000000
--- a/spaces/wydgg/bingo-wyd-ai/src/lib/hooks/use-copy-to-clipboard.tsx
+++ /dev/null
@@ -1,33 +0,0 @@
-'use client'
-
-import * as React from 'react'
-
-export interface useCopyToClipboardProps {
- timeout?: number
-}
-
-export function useCopyToClipboard({
- timeout = 2000
-}: useCopyToClipboardProps) {
- const [isCopied, setIsCopied] = React.useState(false)
-
- const copyToClipboard = (value: string) => {
- if (typeof window === 'undefined' || !navigator.clipboard?.writeText) {
- return
- }
-
- if (!value) {
- return
- }
-
- navigator.clipboard.writeText(value).then(() => {
- setIsCopied(true)
-
- setTimeout(() => {
- setIsCopied(false)
- }, timeout)
- })
- }
-
- return { isCopied, copyToClipboard }
-}
diff --git a/spaces/wydgg/bingo-wyd-ai/tests/parse.ts b/spaces/wydgg/bingo-wyd-ai/tests/parse.ts
deleted file mode 100644
index 92940fe6315f1d7cb2b267ba5e5a7e26460a1de3..0000000000000000000000000000000000000000
--- a/spaces/wydgg/bingo-wyd-ai/tests/parse.ts
+++ /dev/null
@@ -1,13 +0,0 @@
-import { promises as fs } from 'fs'
-import { join } from 'path'
-import { parseHeadersFromCurl } from '@/lib/utils'
-
-(async () => {
- const content = await fs.readFile(join(__dirname, './fixtures/curl.txt'), 'utf-8')
- const headers = parseHeadersFromCurl(content)
- console.log(headers)
-
- const cmdContent = await fs.readFile(join(__dirname, './fixtures/cmd.txt'), 'utf-8')
- const cmdHeaders = parseHeadersFromCurl(cmdContent)
- console.log(cmdHeaders)
-})()
diff --git a/spaces/xl2533/MakeInstruction/app.py b/spaces/xl2533/MakeInstruction/app.py
deleted file mode 100644
index 1d609fc81b98af7daf5f8e5c4255d9e02e87a7a2..0000000000000000000000000000000000000000
--- a/spaces/xl2533/MakeInstruction/app.py
+++ /dev/null
@@ -1,158 +0,0 @@
-# -*-coding:utf-8 -*-
-import os
-import gradio as gr
-from ape.instance import LoadFactory
-from ape.prompt import MyTemplate
-from ape.ape import *
-from self.generate import init_instance, generate_instruction
-from self.prompt import self_prompt
-
-with gr.Blocks(title="Automatic Prompt Engineer", theme=gr.themes.Glass()) as demo:
- gr.Markdown("# Automatic Prompt Engineer")
- with gr.Row().style(equal_height=True):
- with gr.Column(scale=2):
- gr.Markdown("## 第一步:输入参数")
- with gr.Row():
- openai_key = gr.Textbox(type='password', label='输入 API key')
- with gr.Row():
- n_train = gr.Slider(label="训练样本数", minimum=1, maximum=20, step=1, value=5)
- n_few_shot = gr.Slider(label="每组几个样例", minimum=1, maximum=20, step=1, value=5)
-
- with gr.Row():
- n_eval = gr.Slider(label="评估样本数", minimum=1, maximum=30, step=5, value=20)
-
- with gr.Column(scale=3):
- gr.Markdown("## 第二步:加载数据(选任务或上传数据)")
- with gr.Tab("选择数据"):
- with gr.Row().style(equal_height=True):
- file = gr.File(label='上传txt文件,input[空格]output[换行]')
- with gr.Row().style(equal_height=True):
- task = gr.Dropdown(label="Chosse Existing Task", choices=list(LoadFactory.keys()), value=None)
- with gr.Row().style(equal_height=True):
- instance = gr.State()
- load_button = gr.Button("Load Task")
- load_flag = gr.Textbox()
- sample_button = gr.Button('sample Data')
- sample_flag = gr.Textbox()
-
- with gr.Tab("展示数据"):
- with gr.Row():
- train_str = gr.Textbox(max_lines=100, lines=10, label="Data for prompt generation")
- eval_str = gr.Textbox(max_lines=100, lines=10, label="Data for scoring")
-
- with gr.Row().style(equal_height=True):
- with gr.Column(scale=2):
- gr.Markdown("## 第三步: Run APE(可替换默认指令)")
- gen_prompt = gr.Textbox(max_lines=100, lines=3, interative=True,
- placeholder=MyTemplate['gen_user_prompt'],
- value='', label="Prompt for generation")
- eval_prompt = gr.Textbox(max_lines=100, lines=3, interative=True,
- placeholder=MyTemplate['eval_prompt'],
- value='', label="Prompt for Evaluation")
- test_prompt = gr.Textbox(max_lines=100, lines=3, interative=True,
- placeholder=MyTemplate['test_prompt'],
- value='', label="Prompt for Single Test")
-
- with gr.Row().style(equal_height=True):
- cost = gr.Textbox(lines=1, value="", label="Estimated Cost ($)")
- cost_button = gr.Button("Estimate Cost")
- with gr.Row().style(equal_height=True):
- gen_button = gr.Button("Generate")
- eval_button = gr.Button("Eval")
-
- with gr.Column(scale=3):
- gr.Markdown("## 第四步:APE 结果")
- with gr.Tab("生成指令"):
- all_prompt = gr.Textbox(label='Generated Prompt')
- # Display all generated prompt with log probs
- output_df = gr.DataFrame(type='pandas', headers=['Prompt', 'Likelihood'], wrap=True, interactive=False)
-
- with gr.Tab("指令单测"):
- # Test the output of LLM using prompt
- with gr.Row().style(equal_height=True):
- with gr.Column(scale=1):
- test_instruction = gr.Textbox(lines=4, value="", label="Prompt to test")
- test_input = gr.Textbox(lines=4, value="", label="Inputs used to test prompt[多个输入以换行分割]")
- test_button = gr.Button("Test")
- with gr.Column(scale=1):
- test_output = gr.Textbox(lines=9, value="", label="Model Output")
-
- with gr.Tab("指令评估"):
- # By Default use the Evaluation Set in APE
- with gr.Row().style(equal_height=True):
- with gr.Column(scale=1):
- score_instruction = gr.Textbox(lines=3, value="",
- label="Prompt to Evaluate")
- score_button = gr.Button("Evaluate")
- with gr.Column(scale=1):
- test_score = gr.Textbox(lines=1, value="", label="Log(p)", disabled=True)
-
- gr.Markdown('\n\n')
- gr.Markdown('--------')
- gr.Markdown('\n\n')
- gr.Markdown("# SELF INSTRUCT")
- gr.Markdown('## 第一步:确认参数并上传种子指令')
- with gr.Row().style(equal_height=True):
- with gr.Column():
- n_human = gr.Slider(label="人工指令数", minimum=1, maximum=5, step=1, value=2)
- n_machine = gr.Slider(label="机器指令数", minimum=1, maximum=5, step=1, value=1)
- n_instruct = gr.Slider(label="生成指令数", minimum=1, maximum=100, step=1, value=4, help="生成指令数>人工+机器")
- self_prompt_input = gr.Textbox(max_lines=100, lines=20, interative=True,
- placeholder=self_prompt,
- value='', label="Prompt for self-instruct")
- with gr.Column():
- openai_key2 = gr.Textbox(type='password', label='输入 API key')
- seed_file = gr.File(label='上传json文件, 格式参考./self/data/seed_task.json')
- self_submit = gr.Button('上传')
- self_instance = gr.State()
-
- gr.Markdown('\n\n')
- gr.Markdown('## 第二步:采样并生成新指令,每点一次会重采样并生成,生成结果会累计')
- with gr.Row().style(equal_height=True):
- with gr.Column(scale=1):
- fewshot = gr.Textbox(label='采样few-shot')
- with gr.Column(scale=1):
- gen_data = gr.JSON(label='新生成指令样本')
-
- with gr.Row().style(equal_height=True):
- with gr.Column(scale=7):
- generate_instruct_button = gr.Button("指令生成")
- with gr.Column(scale=1):
- counter = gr.Textbox()
-
- """
- APE Callback
- """
- # 1. 选择已有任务/上传文件,实例化Instance
- load_button.click(load_task, [task, file], [instance, load_flag])
-
- # 2. 按 Configuration Sample数据 得到训练样本和验证集, 并在前端展示。支持重采样
- sample_button.click(sample_data, [instance, n_train, n_few_shot, n_eval],
- [train_str, eval_str, instance, sample_flag])
-
- # 3. Estimate Cost for train + Eval
- cost_button.click(esttimate_cost, [instance], [cost])
-
- # 4. Run APE -> 所有指令
- gen_button.click(generate, [gen_prompt, instance, openai_key], [all_prompt])
-
- # 5. Evaluate -> 得到所有指令的Log Prob
- eval_button.click(evaluate, [eval_prompt, all_prompt, instance, openai_key], [output_df])
-
- # 6. 输入指令单测
- test_button.click(single_test, [test_prompt, test_instruction, test_input, openai_key], [test_output])
-
- # 7. 输入指令打分
- score_button.click(score_single, [eval_prompt, instance, score_instruction, openai_key], [test_score])
-
- """
- SELF Callback
- """
- # 1. 加载种子文件
- self_submit.click(init_instance, inputs=[seed_file, openai_key2, n_human, n_machine, n_instruct, self_prompt_input],
- outputs=[self_instance])
-
- # 2. 生成
- generate_instruct_button.click(generate_instruction, inputs=[self_instance], outputs=[fewshot, gen_data, counter])
-
- demo.launch(show_error=True)
diff --git a/spaces/xnetba/Chat_advance/assets/custom.js b/spaces/xnetba/Chat_advance/assets/custom.js
deleted file mode 100644
index f013209931218fd054979e290706f1945de76856..0000000000000000000000000000000000000000
--- a/spaces/xnetba/Chat_advance/assets/custom.js
+++ /dev/null
@@ -1,502 +0,0 @@
-
-// custom javascript here
-
-const MAX_HISTORY_LENGTH = 32;
-
-var key_down_history = [];
-var currentIndex = -1;
-var user_input_ta;
-
-var gradioContainer = null;
-var user_input_ta = null;
-var user_input_tb = null;
-var userInfoDiv = null;
-var appTitleDiv = null;
-var chatbot = null;
-var chatbotWrap = null;
-var apSwitch = null;
-var empty_botton = null;
-var messageBotDivs = null;
-var loginUserForm = null;
-var logginUser = null;
-
-var userLogged = false;
-var usernameGotten = false;
-var historyLoaded = false;
-
-var ga = document.getElementsByTagName("gradio-app");
-var targetNode = ga[0];
-var isInIframe = (window.self !== window.top);
-var language = navigator.language.slice(0,2);
-
-var forView_i18n = {
- 'zh': "仅供查看",
- 'en': "For viewing only",
- 'ja': "閲覧専用",
- 'fr': "Pour consultation seulement",
- 'es': "Solo para visualización",
-};
-
-// gradio 页面加载好了么??? 我能动你的元素了么??
-function gradioLoaded(mutations) {
- for (var i = 0; i < mutations.length; i++) {
- if (mutations[i].addedNodes.length) {
- loginUserForm = document.querySelector(".gradio-container > .main > .wrap > .panel > .form")
- gradioContainer = document.querySelector(".gradio-container");
- user_input_tb = document.getElementById('user_input_tb');
- userInfoDiv = document.getElementById("user_info");
- appTitleDiv = document.getElementById("app_title");
- chatbot = document.querySelector('#chuanhu_chatbot');
- chatbotWrap = document.querySelector('#chuanhu_chatbot > .wrap');
- apSwitch = document.querySelector('.apSwitch input[type="checkbox"]');
- empty_botton = document.getElementById("empty_btn")
-
- if (loginUserForm) {
- localStorage.setItem("userLogged", true);
- userLogged = true;
- }
-
- if (gradioContainer && apSwitch) { // gradioCainter 加载出来了没?
- adjustDarkMode();
- }
- if (user_input_tb) { // user_input_tb 加载出来了没?
- selectHistory();
- }
- if (userInfoDiv && appTitleDiv) { // userInfoDiv 和 appTitleDiv 加载出来了没?
- if (!usernameGotten) {
- getUserInfo();
- }
- setTimeout(showOrHideUserInfo(), 2000);
- }
- if (chatbot) { // chatbot 加载出来了没?
- setChatbotHeight();
- }
- if (chatbotWrap) {
- if (!historyLoaded) {
- loadHistoryHtml();
- }
- setChatbotScroll();
- }
- if (empty_botton) {
- emptyHistory();
- }
- }
- }
-}
-
-function webLocale() {
- console.log("webLocale", language);
- if (forView_i18n.hasOwnProperty(language)) {
- var forView = forView_i18n[language];
- var forViewStyle = document.createElement('style');
- forViewStyle.innerHTML = '.wrap>.history-message>:last-child::after { content: "' + forView + '"!important; }';
- document.head.appendChild(forViewStyle);
- // console.log("added forViewStyle", forView);
- }
-}
-
-function selectHistory() {
- user_input_ta = user_input_tb.querySelector("textarea");
- if (user_input_ta) {
- observer.disconnect(); // 停止监听
- // 在 textarea 上监听 keydown 事件
- user_input_ta.addEventListener("keydown", function (event) {
- var value = user_input_ta.value.trim();
- // 判断按下的是否为方向键
- if (event.code === 'ArrowUp' || event.code === 'ArrowDown') {
- // 如果按下的是方向键,且输入框中有内容,且历史记录中没有该内容,则不执行操作
- if (value && key_down_history.indexOf(value) === -1)
- return;
- // 对于需要响应的动作,阻止默认行为。
- event.preventDefault();
- var length = key_down_history.length;
- if (length === 0) {
- currentIndex = -1; // 如果历史记录为空,直接将当前选中的记录重置
- return;
- }
- if (currentIndex === -1) {
- currentIndex = length;
- }
- if (event.code === 'ArrowUp' && currentIndex > 0) {
- currentIndex--;
- user_input_ta.value = key_down_history[currentIndex];
- } else if (event.code === 'ArrowDown' && currentIndex < length - 1) {
- currentIndex++;
- user_input_ta.value = key_down_history[currentIndex];
- }
- user_input_ta.selectionStart = user_input_ta.value.length;
- user_input_ta.selectionEnd = user_input_ta.value.length;
- const input_event = new InputEvent("input", { bubbles: true, cancelable: true });
- user_input_ta.dispatchEvent(input_event);
- } else if (event.code === "Enter") {
- if (value) {
- currentIndex = -1;
- if (key_down_history.indexOf(value) === -1) {
- key_down_history.push(value);
- if (key_down_history.length > MAX_HISTORY_LENGTH) {
- key_down_history.shift();
- }
- }
- }
- }
- });
- }
-}
-
-var username = null;
-function getUserInfo() {
- if (usernameGotten) {
- return;
- }
- userLogged = localStorage.getItem('userLogged');
- if (userLogged) {
- username = userInfoDiv.innerText;
- if (username) {
- if (username.includes("getting user info…")) {
- setTimeout(getUserInfo, 500);
- return;
- } else if (username === " ") {
- localStorage.removeItem("username");
- localStorage.removeItem("userLogged")
- userLogged = false;
- usernameGotten = true;
- return;
- } else {
- username = username.match(/User:\s*(.*)/)[1] || username;
- localStorage.setItem("username", username);
- usernameGotten = true;
- clearHistoryHtml();
- }
- }
- }
-}
-
-function toggleUserInfoVisibility(shouldHide) {
- if (userInfoDiv) {
- if (shouldHide) {
- userInfoDiv.classList.add("hideK");
- } else {
- userInfoDiv.classList.remove("hideK");
- }
- }
-}
-function showOrHideUserInfo() {
- var sendBtn = document.getElementById("submit_btn");
-
- // Bind mouse/touch events to show/hide user info
- appTitleDiv.addEventListener("mouseenter", function () {
- toggleUserInfoVisibility(false);
- });
- userInfoDiv.addEventListener("mouseenter", function () {
- toggleUserInfoVisibility(false);
- });
- sendBtn.addEventListener("mouseenter", function () {
- toggleUserInfoVisibility(false);
- });
-
- appTitleDiv.addEventListener("mouseleave", function () {
- toggleUserInfoVisibility(true);
- });
- userInfoDiv.addEventListener("mouseleave", function () {
- toggleUserInfoVisibility(true);
- });
- sendBtn.addEventListener("mouseleave", function () {
- toggleUserInfoVisibility(true);
- });
-
- appTitleDiv.ontouchstart = function () {
- toggleUserInfoVisibility(false);
- };
- userInfoDiv.ontouchstart = function () {
- toggleUserInfoVisibility(false);
- };
- sendBtn.ontouchstart = function () {
- toggleUserInfoVisibility(false);
- };
-
- appTitleDiv.ontouchend = function () {
- setTimeout(function () {
- toggleUserInfoVisibility(true);
- }, 3000);
- };
- userInfoDiv.ontouchend = function () {
- setTimeout(function () {
- toggleUserInfoVisibility(true);
- }, 3000);
- };
- sendBtn.ontouchend = function () {
- setTimeout(function () {
- toggleUserInfoVisibility(true);
- }, 3000); // Delay 1 second to hide user info
- };
-
- // Hide user info after 2 second
- setTimeout(function () {
- toggleUserInfoVisibility(true);
- }, 2000);
-}
-
-function toggleDarkMode(isEnabled) {
- if (isEnabled) {
- document.body.classList.add("dark");
- document.body.style.setProperty("background-color", "var(--neutral-950)", "important");
- } else {
- document.body.classList.remove("dark");
- document.body.style.backgroundColor = "";
- }
-}
-function adjustDarkMode() {
- const darkModeQuery = window.matchMedia("(prefers-color-scheme: dark)");
-
- // 根据当前颜色模式设置初始状态
- apSwitch.checked = darkModeQuery.matches;
- toggleDarkMode(darkModeQuery.matches);
- // 监听颜色模式变化
- darkModeQuery.addEventListener("change", (e) => {
- apSwitch.checked = e.matches;
- toggleDarkMode(e.matches);
- });
- // apSwitch = document.querySelector('.apSwitch input[type="checkbox"]');
- apSwitch.addEventListener("change", (e) => {
- toggleDarkMode(e.target.checked);
- });
-}
-
-function setChatbotHeight() {
- const screenWidth = window.innerWidth;
- const statusDisplay = document.querySelector('#status_display');
- const statusDisplayHeight = statusDisplay ? statusDisplay.offsetHeight : 0;
- const wrap = chatbot.querySelector('.wrap');
- const vh = window.innerHeight * 0.01;
- document.documentElement.style.setProperty('--vh', `${vh}px`);
- if (isInIframe) {
- chatbot.style.height = `700px`;
- wrap.style.maxHeight = `calc(700px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`
- } else {
- if (screenWidth <= 320) {
- chatbot.style.height = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 150}px)`;
- wrap.style.maxHeight = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 150}px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`;
- } else if (screenWidth <= 499) {
- chatbot.style.height = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 100}px)`;
- wrap.style.maxHeight = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 100}px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`;
- } else {
- chatbot.style.height = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 160}px)`;
- wrap.style.maxHeight = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 160}px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`;
- }
- }
-}
-function setChatbotScroll() {
- var scrollHeight = chatbotWrap.scrollHeight;
- chatbotWrap.scrollTo(0,scrollHeight)
-}
-var rangeInputs = null;
-var numberInputs = null;
-function setSlider() {
- rangeInputs = document.querySelectorAll('input[type="range"]');
- numberInputs = document.querySelectorAll('input[type="number"]')
- setSliderRange();
- rangeInputs.forEach(rangeInput => {
- rangeInput.addEventListener('input', setSliderRange);
- });
- numberInputs.forEach(numberInput => {
- numberInput.addEventListener('input', setSliderRange);
- })
-}
-function setSliderRange() {
- var range = document.querySelectorAll('input[type="range"]');
- range.forEach(range => {
- range.style.backgroundSize = (range.value - range.min) / (range.max - range.min) * 100 + '% 100%';
- });
-}
-
-function addChuanhuButton(botElement) {
- var rawMessage = null;
- var mdMessage = null;
- rawMessage = botElement.querySelector('.raw-message');
- mdMessage = botElement.querySelector('.md-message');
- if (!rawMessage) {
- var buttons = botElement.querySelectorAll('button.chuanhu-btn');
- for (var i = 0; i < buttons.length; i++) {
- buttons[i].parentNode.removeChild(buttons[i]);
- }
- return;
- }
- var copyButton = null;
- var toggleButton = null;
- copyButton = botElement.querySelector('button.copy-bot-btn');
- toggleButton = botElement.querySelector('button.toggle-md-btn');
- if (copyButton) copyButton.remove();
- if (toggleButton) toggleButton.remove();
-
- // Copy bot button
- var copyButton = document.createElement('button');
- copyButton.classList.add('chuanhu-btn');
- copyButton.classList.add('copy-bot-btn');
- copyButton.setAttribute('aria-label', 'Copy');
- copyButton.innerHTML = copyIcon;
- copyButton.addEventListener('click', () => {
- const textToCopy = rawMessage.innerText;
- navigator.clipboard
- .writeText(textToCopy)
- .then(() => {
- copyButton.innerHTML = copiedIcon;
- setTimeout(() => {
- copyButton.innerHTML = copyIcon;
- }, 1500);
- })
- .catch(() => {
- console.error("copy failed");
- });
- });
- botElement.appendChild(copyButton);
-
- // Toggle button
- var toggleButton = document.createElement('button');
- toggleButton.classList.add('chuanhu-btn');
- toggleButton.classList.add('toggle-md-btn');
- toggleButton.setAttribute('aria-label', 'Toggle');
- var renderMarkdown = mdMessage.classList.contains('hideM');
- toggleButton.innerHTML = renderMarkdown ? mdIcon : rawIcon;
- toggleButton.addEventListener('click', () => {
- renderMarkdown = mdMessage.classList.contains('hideM');
- if (renderMarkdown){
- renderMarkdownText(botElement);
- toggleButton.innerHTML=rawIcon;
- } else {
- removeMarkdownText(botElement);
- toggleButton.innerHTML=mdIcon;
- }
- });
- botElement.insertBefore(toggleButton, copyButton);
-}
-
-function renderMarkdownText(message) {
- var mdDiv = message.querySelector('.md-message');
- if (mdDiv) mdDiv.classList.remove('hideM');
- var rawDiv = message.querySelector('.raw-message');
- if (rawDiv) rawDiv.classList.add('hideM');
-}
-function removeMarkdownText(message) {
- var rawDiv = message.querySelector('.raw-message');
- if (rawDiv) rawDiv.classList.remove('hideM');
- var mdDiv = message.querySelector('.md-message');
- if (mdDiv) mdDiv.classList.add('hideM');
-}
-
-let timeoutId;
-let isThrottled = false;
-var mmutation
-// 监听所有元素中 bot message 的变化,为 bot 消息添加复制按钮。
-var mObserver = new MutationObserver(function (mutationsList) {
- for (mmutation of mutationsList) {
- if (mmutation.type === 'childList') {
- for (var node of mmutation.addedNodes) {
- if (node.nodeType === 1 && node.classList.contains('message') && node.getAttribute('data-testid') === 'bot') {
- saveHistoryHtml();
- document.querySelectorAll('#chuanhu_chatbot>.wrap>.message-wrap .message.bot').forEach(addChuanhuButton);
- }
- if (node.tagName === 'INPUT' && node.getAttribute('type') === 'range') {
- setSlider();
- }
- }
- for (var node of mmutation.removedNodes) {
- if (node.nodeType === 1 && node.classList.contains('message') && node.getAttribute('data-testid') === 'bot') {
- saveHistoryHtml();
- document.querySelectorAll('#chuanhu_chatbot>.wrap>.message-wrap .message.bot').forEach(addChuanhuButton);
- }
- }
- } else if (mmutation.type === 'attributes') {
- if (mmutation.target.nodeType === 1 && mmutation.target.classList.contains('message') && mmutation.target.getAttribute('data-testid') === 'bot') {
- if (isThrottled) break; // 为了防止重复不断疯狂渲染,加上等待_(:з」∠)_
- isThrottled = true;
- clearTimeout(timeoutId);
- timeoutId = setTimeout(() => {
- isThrottled = false;
- document.querySelectorAll('#chuanhu_chatbot>.wrap>.message-wrap .message.bot').forEach(addChuanhuButton);
- saveHistoryHtml();
- }, 500);
- }
- }
- }
-});
-mObserver.observe(document.documentElement, { attributes: true, childList: true, subtree: true });
-
-var loadhistorytime = 0; // for debugging
-function saveHistoryHtml() {
- var historyHtml = document.querySelector('#chuanhu_chatbot > .wrap');
- localStorage.setItem('chatHistory', historyHtml.innerHTML);
- // console.log("History Saved")
- historyLoaded = false;
-}
-function loadHistoryHtml() {
- var historyHtml = localStorage.getItem('chatHistory');
- if (!historyHtml) {
- historyLoaded = true;
- return; // no history, do nothing
- }
- userLogged = localStorage.getItem('userLogged');
- if (userLogged){
- historyLoaded = true;
- return; // logged in, do nothing
- }
- if (!historyLoaded) {
- var tempDiv = document.createElement('div');
- tempDiv.innerHTML = historyHtml;
- var buttons = tempDiv.querySelectorAll('button.chuanhu-btn');
- var gradioCopyButtons = tempDiv.querySelectorAll('button.copy_code_button');
- for (var i = 0; i < buttons.length; i++) {
- buttons[i].parentNode.removeChild(buttons[i]);
- }
- for (var i = 0; i < gradioCopyButtons.length; i++) {
- gradioCopyButtons[i].parentNode.removeChild(gradioCopyButtons[i]);
- }
- var fakeHistory = document.createElement('div');
- fakeHistory.classList.add('history-message');
- fakeHistory.innerHTML = tempDiv.innerHTML;
- webLocale();
- chatbotWrap.insertBefore(fakeHistory, chatbotWrap.firstChild);
- // var fakeHistory = document.createElement('div');
- // fakeHistory.classList.add('history-message');
- // fakeHistory.innerHTML = historyHtml;
- // chatbotWrap.insertBefore(fakeHistory, chatbotWrap.firstChild);
- historyLoaded = true;
- console.log("History Loaded");
- loadhistorytime += 1; // for debugging
- } else {
- historyLoaded = false;
- }
-}
-function clearHistoryHtml() {
- localStorage.removeItem("chatHistory");
- historyMessages = chatbotWrap.querySelector('.history-message');
- if (historyMessages) {
- chatbotWrap.removeChild(historyMessages);
- console.log("History Cleared");
- }
-}
-function emptyHistory() {
- empty_botton.addEventListener("click", function () {
- clearHistoryHtml();
- });
-}
-
-// 监视页面内部 DOM 变动
-var observer = new MutationObserver(function (mutations) {
- gradioLoaded(mutations);
-});
-observer.observe(targetNode, { childList: true, subtree: true });
-
-// 监视页面变化
-window.addEventListener("DOMContentLoaded", function () {
- isInIframe = (window.self !== window.top);
- historyLoaded = false;
-});
-window.addEventListener('resize', setChatbotHeight);
-window.addEventListener('scroll', setChatbotHeight);
-window.matchMedia("(prefers-color-scheme: dark)").addEventListener("change", adjustDarkMode);
-
-// button svg code
-const copyIcon = '';
-const copiedIcon = '';
-const mdIcon = '';
-const rawIcon = '';
diff --git a/spaces/xuetao/bingo3/src/components/tailwind-indicator.tsx b/spaces/xuetao/bingo3/src/components/tailwind-indicator.tsx
deleted file mode 100644
index f2a1291213dd67055fcebe67fab574c8441338df..0000000000000000000000000000000000000000
--- a/spaces/xuetao/bingo3/src/components/tailwind-indicator.tsx
+++ /dev/null
@@ -1,14 +0,0 @@
-export function TailwindIndicator() {
- if (process.env.NODE_ENV === 'production') return null
-
- return (
-
-
xs
-
sm
-
md
-
lg
-
xl
-
2xl
-
- )
-}
diff --git a/spaces/yangheng/Waifu2X-Image-Scale/Waifu2x/model_check_points/ReadME.md b/spaces/yangheng/Waifu2X-Image-Scale/Waifu2x/model_check_points/ReadME.md
deleted file mode 100644
index e432a1c7bd5de45f086ba1fd6b06ca712a1d806b..0000000000000000000000000000000000000000
--- a/spaces/yangheng/Waifu2X-Image-Scale/Waifu2x/model_check_points/ReadME.md
+++ /dev/null
@@ -1,34 +0,0 @@
-# Resume & Use Model Check Points
-
-This folder contains check points for models and their weights. They are generated from [PyTorch's pickle](https://pytorch.org/docs/master/notes/serialization.html).
-
-Model specifications are in each folder's ReadME.
-
-Pickle names with "model" contain the entire models, and they can be used as an freeze module by calling the "forward_checkpoint" function to generate images.
-
-Example:
-```python
-import torch
-# No need to reconstruct the model
-model = torch.load("./DCSCN/DCSCN_model_387epos_L12_noise_1.pt")
-x = torch.randn((1,3,10,10)), torch.randn((1,3,20,20))
-out = model.forward_checkpoint(a)
-```
-
-Pickle names with "weights" are model weights, and they are named dictionaries.
-
-Example:
-```python
-model = DCSCN(*) # the setting must be the same to load check points weights.
-model.load_state_dict(torch.load("./DCSCN/DCSCN_weights_387epos_L12_noise_1.pt"))
-# then you can resume the model training
-```
-
-Model check poins in Upconv_7 and vgg_7 are from [waifu2x's repo](https://github.com/nagadomi/waifu2x/tree/master/models). To load weights into a model, please use ```load_pre_train_weights``` function.
-
-Example:
-```python
-model = UpConv_7()
-model.load_pre_train_weights(json_file=...)
-# then the model is ready to use
-```
diff --git a/spaces/yanli01/wrwj/Dockerfile b/spaces/yanli01/wrwj/Dockerfile
deleted file mode 100644
index 8cbd335b09b1d1975bfd83a053b5fcaf398147ea..0000000000000000000000000000000000000000
--- a/spaces/yanli01/wrwj/Dockerfile
+++ /dev/null
@@ -1,14 +0,0 @@
-FROM python:3.9 as builder
-RUN apt-get update && apt-get install -y build-essential
-COPY requirements.txt .
-RUN pip install --user -r requirements.txt
-
-FROM python:3.9
-MAINTAINER iskoldt
-COPY --from=builder /root/.local /root/.local
-ENV PATH=/root/.local/bin:$PATH
-COPY . /app
-WORKDIR /app
-ENV my_api_key empty
-ENV dockerrun yes
-CMD ["python3", "-u", "ChuanhuChatbot.py", "2>&1", "|", "tee", "/var/log/application.log"]
diff --git a/spaces/yaoshining/text-generation-webui/extensions/superbooga/download_urls.py b/spaces/yaoshining/text-generation-webui/extensions/superbooga/download_urls.py
deleted file mode 100644
index efe300d28393e4550f241808073f04c98fb33ace..0000000000000000000000000000000000000000
--- a/spaces/yaoshining/text-generation-webui/extensions/superbooga/download_urls.py
+++ /dev/null
@@ -1,32 +0,0 @@
-import concurrent.futures
-
-import requests
-
-
-def download_single(url):
- response = requests.get(url, timeout=5)
- if response.status_code == 200:
- return response.content
- else:
- raise Exception("Failed to download URL")
-
-
-def download_urls(urls, threads=1):
- with concurrent.futures.ThreadPoolExecutor(max_workers=threads) as executor:
- futures = []
- for url in urls:
- future = executor.submit(download_single, url)
- futures.append(future)
-
- results = []
- i = 0
- for future in concurrent.futures.as_completed(futures):
- try:
- result = future.result()
- results.append(result)
- i += 1
- yield f"{i}/{len(urls)}", results
- except Exception:
- pass
-
- yield "Done", results
diff --git a/spaces/yderre-aubay/midi-player-demo/src/main/components/EventEditor/EventController.ts b/spaces/yderre-aubay/midi-player-demo/src/main/components/EventEditor/EventController.ts
deleted file mode 100644
index 46cde0b0bcdeb9ed951dff24e358ece32a7ff538..0000000000000000000000000000000000000000
--- a/spaces/yderre-aubay/midi-player-demo/src/main/components/EventEditor/EventController.ts
+++ /dev/null
@@ -1,103 +0,0 @@
-import { controllerTypeString } from "../../../common/helpers/noteNumberString"
-import { TrackEvent } from "../../../common/track"
-
-export type EventInputProp =
- | {
- type: "number"
- value: number
- }
- | {
- type: "text"
- value: string
- }
-
-export type EventValueUpdator = {
- update: (value: number | string) => any
-}
-
-// Abstraction Layer for manipulating TrackEvent on EventList
-export type EventController = {
- name: string
- gate?: EventInputProp & EventValueUpdator
- value?: EventInputProp & EventValueUpdator
-}
-
-export function getEventController(
- e: T,
-): EventController {
- switch (e.type) {
- case "channel":
- switch (e.subtype) {
- case "controller":
- return {
- name:
- controllerTypeString(e.controllerType) ?? `CC${e.controllerType}`,
- value: {
- value: e.value,
- type: "number",
- update: (value) => ({ value }),
- },
- }
- case "note":
- return {
- name: e.subtype,
- value: {
- type: "number",
- value: e.velocity,
- update: (velocity) => ({ velocity }),
- },
- gate: {
- type: "number",
- value: e.duration,
- update: (duration) => ({ duration }),
- },
- }
- case "programChange":
- return {
- name: e.subtype,
- value: {
- type: "number",
- value: e.value,
- update: (value) => ({ value }),
- },
- }
- case "pitchBend":
- return {
- name: e.subtype,
- value: {
- type: "number",
- value: e.value,
- update: (value) => ({ value }),
- },
- }
- default:
- return { name: e.subtype }
- }
- case "meta":
- switch (e.subtype) {
- case "trackName":
- return {
- name: e.subtype,
- value: {
- value: e.text,
- type: "text",
- update: (text) => ({ text }),
- },
- }
- case "midiChannelPrefix":
- return {
- name: e.subtype,
- value: {
- value: e.value,
- type: "number",
- update: (channel) => ({ channel }),
- },
- }
- default:
- return { name: e.subtype }
- }
- case "dividedSysEx":
- case "sysEx":
- return { name: e.type }
- }
-}
diff --git a/spaces/yderre-aubay/midi-player-demo/src/main/stores/ControlStore.ts b/spaces/yderre-aubay/midi-player-demo/src/main/stores/ControlStore.ts
deleted file mode 100644
index 3ac69bbe7ea8e01e3d410677495dca8140004d33..0000000000000000000000000000000000000000
--- a/spaces/yderre-aubay/midi-player-demo/src/main/stores/ControlStore.ts
+++ /dev/null
@@ -1,139 +0,0 @@
-import { ControllerEvent, MIDIControlEvents, PitchBendEvent } from "midifile-ts"
-import { computed, makeObservable, observable } from "mobx"
-import { makePersistable } from "mobx-persist-store"
-import {
- ValueEventType,
- isEqualValueEventType,
-} from "../../common/helpers/valueEvent"
-import { ControlSelection } from "../../common/selection/ControlSelection"
-import {
- TrackEventOf,
- isControllerEventWithType,
- isPitchBendEvent,
-} from "../../common/track"
-import PianoRollStore from "./PianoRollStore"
-
-export type ControlMode = { type: "velocity" } | ValueEventType
-
-export const isEqualControlMode = (a: ControlMode, b: ControlMode) => {
- switch (a.type) {
- case "velocity":
- case "pitchBend":
- return a.type === b.type
- case "controller":
- switch (b.type) {
- case "velocity":
- case "pitchBend":
- return false
- case "controller":
- return isEqualValueEventType(a, b)
- }
- }
-}
-
-export class ControlStore {
- controlMode: ControlMode = { type: "velocity" }
- selection: ControlSelection | null = null
- selectedEventIds: number[] = []
-
- controlModes: ControlMode[] = [
- {
- type: "velocity",
- },
- {
- type: "pitchBend",
- },
- {
- type: "controller",
- controllerType: MIDIControlEvents.MSB_MAIN_VOLUME,
- },
- {
- type: "controller",
- controllerType: MIDIControlEvents.MSB_PAN,
- },
- {
- type: "controller",
- controllerType: MIDIControlEvents.MSB_EXPRESSION,
- },
- {
- type: "controller",
- controllerType: MIDIControlEvents.SUSTAIN,
- },
- {
- type: "controller",
- controllerType: MIDIControlEvents.MSB_MODWHEEL,
- },
- ]
-
- constructor(private readonly pianoRollStore: PianoRollStore) {
- makeObservable(this, {
- controlMode: observable,
- selection: observable,
- selectedEventIds: observable,
- controlModes: observable,
- scrollLeft: computed,
- cursorX: computed,
- transform: computed,
- rulerStore: computed,
- selectedTrack: computed,
- quantizer: computed,
- mouseMode: computed,
- cursor: computed,
- })
-
- makePersistable(this, {
- name: "ControlStore",
- properties: ["controlModes"],
- storage: window.localStorage,
- })
- }
-
- get controlValueEvents(): (
- | TrackEventOf
- | TrackEventOf
- )[] {
- const { controlMode } = this
- switch (controlMode.type) {
- case "velocity":
- throw new Error("don't use this method for velocity")
- case "pitchBend":
- return this.pianoRollStore.filteredEvents(isPitchBendEvent)
- case "controller":
- return this.pianoRollStore.filteredEvents(
- isControllerEventWithType(controlMode.controllerType),
- )
- }
- }
-
- get scrollLeft() {
- return this.pianoRollStore.scrollLeft
- }
-
- get cursorX() {
- return this.pianoRollStore.cursorX
- }
-
- get transform() {
- return this.pianoRollStore.transform
- }
-
- get rulerStore() {
- return this.pianoRollStore.rulerStore
- }
-
- get selectedTrack() {
- return this.pianoRollStore.selectedTrack
- }
-
- get quantizer() {
- return this.pianoRollStore.quantizer
- }
-
- get mouseMode() {
- return this.pianoRollStore.mouseMode
- }
-
- get cursor() {
- return this.pianoRollStore.controlCursor
- }
-}
diff --git a/spaces/yerfor/SyntaSpeech/tasks/run.py b/spaces/yerfor/SyntaSpeech/tasks/run.py
deleted file mode 100644
index ef2b0a319cb5cd7baf87e5224ab545412715fb69..0000000000000000000000000000000000000000
--- a/spaces/yerfor/SyntaSpeech/tasks/run.py
+++ /dev/null
@@ -1,19 +0,0 @@
-import os
-
-os.environ["OMP_NUM_THREADS"] = "1"
-
-from utils.commons.hparams import hparams, set_hparams
-import importlib
-
-
-def run_task():
- assert hparams['task_cls'] != ''
- pkg = ".".join(hparams["task_cls"].split(".")[:-1])
- cls_name = hparams["task_cls"].split(".")[-1]
- task_cls = getattr(importlib.import_module(pkg), cls_name)
- task_cls.start()
-
-
-if __name__ == '__main__':
- set_hparams()
- run_task()
diff --git a/spaces/ygangang/VToonify/vtoonify/model/stylegan/prepare_data.py b/spaces/ygangang/VToonify/vtoonify/model/stylegan/prepare_data.py
deleted file mode 100644
index aa385d0ac13550e1ae5513f7a20b35997a5c3ea6..0000000000000000000000000000000000000000
--- a/spaces/ygangang/VToonify/vtoonify/model/stylegan/prepare_data.py
+++ /dev/null
@@ -1,105 +0,0 @@
-import argparse
-from io import BytesIO
-import multiprocessing
-from functools import partial
-
-import os
-from PIL import Image
-import lmdb
-from tqdm import tqdm
-from torchvision import datasets
-from torchvision.transforms import functional as trans_fn
-
-
-def resize_and_convert(img, size, resample, quality=100):
- img = trans_fn.resize(img, size, resample)
- img = trans_fn.center_crop(img, size)
- buffer = BytesIO()
- img.save(buffer, format="jpeg", quality=quality)
- val = buffer.getvalue()
-
- return val
-
-
-def resize_multiple(
- img, sizes=(128, 256, 512, 1024), resample=Image.LANCZOS, quality=100
-):
- imgs = []
-
- for size in sizes:
- imgs.append(resize_and_convert(img, size, resample, quality))
-
- return imgs
-
-
-def resize_worker(img_file, sizes, resample):
- i, file = img_file
- img = Image.open(file)
- img = img.convert("RGB")
- out = resize_multiple(img, sizes=sizes, resample=resample)
-
- return i, out
-
-
-def prepare(
- env, dataset, n_worker, sizes=(128, 256, 512, 1024), resample=Image.LANCZOS
-):
- resize_fn = partial(resize_worker, sizes=sizes, resample=resample)
-
- files = sorted(dataset.imgs, key=lambda x: x[0])
- files = [(i, file) for i, (file, label) in enumerate(files)]
- total = 0
-
- with multiprocessing.Pool(n_worker) as pool:
- for i, imgs in tqdm(pool.imap_unordered(resize_fn, files)):
- for size, img in zip(sizes, imgs):
- key = f"{size}-{str(i).zfill(5)}".encode("utf-8")
-
- with env.begin(write=True) as txn:
- txn.put(key, img)
-
- total += 1
-
- with env.begin(write=True) as txn:
- txn.put("length".encode("utf-8"), str(total).encode("utf-8"))
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser(description="Preprocess images for model training")
- parser.add_argument("--out", type=str, help="filename of the result lmdb dataset")
- parser.add_argument(
- "--size",
- type=str,
- default="128,256,512,1024",
- help="resolutions of images for the dataset",
- )
- parser.add_argument(
- "--n_worker",
- type=int,
- default=8,
- help="number of workers for preparing dataset",
- )
- parser.add_argument(
- "--resample",
- type=str,
- default="lanczos",
- help="resampling methods for resizing images",
- )
- parser.add_argument("path", type=str, help="path to the image dataset")
-
- args = parser.parse_args()
-
- if not os.path.exists(args.out):
- os.makedirs(args.out)
-
- resample_map = {"lanczos": Image.LANCZOS, "bilinear": Image.BILINEAR}
- resample = resample_map[args.resample]
-
- sizes = [int(s.strip()) for s in args.size.split(",")]
-
- print(f"Make dataset of image sizes:", ", ".join(str(s) for s in sizes))
-
- imgset = datasets.ImageFolder(args.path)
-
- with lmdb.open(args.out, map_size=1024 ** 4, readahead=False) as env:
- prepare(env, imgset, args.n_worker, sizes=sizes, resample=resample)
diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/dpr/modeling_dpr.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/dpr/modeling_dpr.py
deleted file mode 100644
index 944ce142b0ad0236186d8f91b4240949d3a0299c..0000000000000000000000000000000000000000
--- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/dpr/modeling_dpr.py
+++ /dev/null
@@ -1,673 +0,0 @@
-# coding=utf-8
-# Copyright 2018 DPR Authors, The Hugging Face Team.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-""" PyTorch DPR model for Open Domain Question Answering."""
-
-
-from dataclasses import dataclass
-from typing import Optional, Tuple, Union
-
-import torch
-from torch import Tensor, nn
-
-from ...modeling_outputs import BaseModelOutputWithPooling
-from ...modeling_utils import PreTrainedModel
-from ...utils import (
- ModelOutput,
- add_start_docstrings,
- add_start_docstrings_to_model_forward,
- logging,
- replace_return_docstrings,
-)
-from ..bert.modeling_bert import BertEncoder, BertModel
-from .configuration_dpr import DPRConfig
-
-
-logger = logging.get_logger(__name__)
-
-_CONFIG_FOR_DOC = "DPRConfig"
-_CHECKPOINT_FOR_DOC = "facebook/dpr-ctx_encoder-single-nq-base"
-
-DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = [
- "facebook/dpr-ctx_encoder-single-nq-base",
- "facebook/dpr-ctx_encoder-multiset-base",
-]
-DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = [
- "facebook/dpr-question_encoder-single-nq-base",
- "facebook/dpr-question_encoder-multiset-base",
-]
-DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST = [
- "facebook/dpr-reader-single-nq-base",
- "facebook/dpr-reader-multiset-base",
-]
-
-
-##########
-# Outputs
-##########
-
-
-@dataclass
-class DPRContextEncoderOutput(ModelOutput):
- """
- Class for outputs of [`DPRQuestionEncoder`].
-
- Args:
- pooler_output (`torch.FloatTensor` of shape `(batch_size, embeddings_size)`):
- The DPR encoder outputs the *pooler_output* that corresponds to the context representation. Last layer
- hidden-state of the first token of the sequence (classification token) further processed by a Linear layer.
- This output is to be used to embed contexts for nearest neighbors queries with questions embeddings.
- hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
- Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
- shape `(batch_size, sequence_length, hidden_size)`.
-
- Hidden-states of the model at the output of each layer plus the initial embedding outputs.
- attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
- Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
- sequence_length)`.
-
- Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
- heads.
- """
-
- pooler_output: torch.FloatTensor
- hidden_states: Optional[Tuple[torch.FloatTensor]] = None
- attentions: Optional[Tuple[torch.FloatTensor]] = None
-
-
-@dataclass
-class DPRQuestionEncoderOutput(ModelOutput):
- """
- Class for outputs of [`DPRQuestionEncoder`].
-
- Args:
- pooler_output (`torch.FloatTensor` of shape `(batch_size, embeddings_size)`):
- The DPR encoder outputs the *pooler_output* that corresponds to the question representation. Last layer
- hidden-state of the first token of the sequence (classification token) further processed by a Linear layer.
- This output is to be used to embed questions for nearest neighbors queries with context embeddings.
- hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
- Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
- shape `(batch_size, sequence_length, hidden_size)`.
-
- Hidden-states of the model at the output of each layer plus the initial embedding outputs.
- attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
- Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
- sequence_length)`.
-
- Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
- heads.
- """
-
- pooler_output: torch.FloatTensor
- hidden_states: Optional[Tuple[torch.FloatTensor]] = None
- attentions: Optional[Tuple[torch.FloatTensor]] = None
-
-
-@dataclass
-class DPRReaderOutput(ModelOutput):
- """
- Class for outputs of [`DPRQuestionEncoder`].
-
- Args:
- start_logits (`torch.FloatTensor` of shape `(n_passages, sequence_length)`):
- Logits of the start index of the span for each passage.
- end_logits (`torch.FloatTensor` of shape `(n_passages, sequence_length)`):
- Logits of the end index of the span for each passage.
- relevance_logits (`torch.FloatTensor` of shape `(n_passages, )`):
- Outputs of the QA classifier of the DPRReader that corresponds to the scores of each passage to answer the
- question, compared to all the other passages.
- hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
- Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
- shape `(batch_size, sequence_length, hidden_size)`.
-
- Hidden-states of the model at the output of each layer plus the initial embedding outputs.
- attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
- Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
- sequence_length)`.
-
- Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
- heads.
- """
-
- start_logits: torch.FloatTensor
- end_logits: torch.FloatTensor = None
- relevance_logits: torch.FloatTensor = None
- hidden_states: Optional[Tuple[torch.FloatTensor]] = None
- attentions: Optional[Tuple[torch.FloatTensor]] = None
-
-
-class DPRPreTrainedModel(PreTrainedModel):
- def _init_weights(self, module):
- """Initialize the weights"""
- if isinstance(module, nn.Linear):
- # Slightly different from the TF version which uses truncated_normal for initialization
- # cf https://github.com/pytorch/pytorch/pull/5617
- module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
- if module.bias is not None:
- module.bias.data.zero_()
- elif isinstance(module, nn.Embedding):
- module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
- if module.padding_idx is not None:
- module.weight.data[module.padding_idx].zero_()
- elif isinstance(module, nn.LayerNorm):
- module.bias.data.zero_()
- module.weight.data.fill_(1.0)
-
- def _set_gradient_checkpointing(self, module, value=False):
- if isinstance(module, BertEncoder):
- module.gradient_checkpointing = value
-
-
-class DPREncoder(DPRPreTrainedModel):
- base_model_prefix = "bert_model"
-
- def __init__(self, config: DPRConfig):
- super().__init__(config)
- self.bert_model = BertModel(config, add_pooling_layer=False)
- if self.bert_model.config.hidden_size <= 0:
- raise ValueError("Encoder hidden_size can't be zero")
- self.projection_dim = config.projection_dim
- if self.projection_dim > 0:
- self.encode_proj = nn.Linear(self.bert_model.config.hidden_size, config.projection_dim)
- # Initialize weights and apply final processing
- self.post_init()
-
- def forward(
- self,
- input_ids: Tensor,
- attention_mask: Optional[Tensor] = None,
- token_type_ids: Optional[Tensor] = None,
- inputs_embeds: Optional[Tensor] = None,
- output_attentions: bool = False,
- output_hidden_states: bool = False,
- return_dict: bool = False,
- ) -> Union[BaseModelOutputWithPooling, Tuple[Tensor, ...]]:
- outputs = self.bert_model(
- input_ids=input_ids,
- attention_mask=attention_mask,
- token_type_ids=token_type_ids,
- inputs_embeds=inputs_embeds,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
- sequence_output = outputs[0]
- pooled_output = sequence_output[:, 0, :]
-
- if self.projection_dim > 0:
- pooled_output = self.encode_proj(pooled_output)
-
- if not return_dict:
- return (sequence_output, pooled_output) + outputs[2:]
-
- return BaseModelOutputWithPooling(
- last_hidden_state=sequence_output,
- pooler_output=pooled_output,
- hidden_states=outputs.hidden_states,
- attentions=outputs.attentions,
- )
-
- @property
- def embeddings_size(self) -> int:
- if self.projection_dim > 0:
- return self.encode_proj.out_features
- return self.bert_model.config.hidden_size
-
-
-class DPRSpanPredictor(DPRPreTrainedModel):
- base_model_prefix = "encoder"
-
- def __init__(self, config: DPRConfig):
- super().__init__(config)
- self.encoder = DPREncoder(config)
- self.qa_outputs = nn.Linear(self.encoder.embeddings_size, 2)
- self.qa_classifier = nn.Linear(self.encoder.embeddings_size, 1)
- # Initialize weights and apply final processing
- self.post_init()
-
- def forward(
- self,
- input_ids: Tensor,
- attention_mask: Tensor,
- inputs_embeds: Optional[Tensor] = None,
- output_attentions: bool = False,
- output_hidden_states: bool = False,
- return_dict: bool = False,
- ) -> Union[DPRReaderOutput, Tuple[Tensor, ...]]:
- # notations: N - number of questions in a batch, M - number of passages per questions, L - sequence length
- n_passages, sequence_length = input_ids.size() if input_ids is not None else inputs_embeds.size()[:2]
- # feed encoder
- outputs = self.encoder(
- input_ids,
- attention_mask=attention_mask,
- inputs_embeds=inputs_embeds,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
- sequence_output = outputs[0]
-
- # compute logits
- logits = self.qa_outputs(sequence_output)
- start_logits, end_logits = logits.split(1, dim=-1)
- start_logits = start_logits.squeeze(-1).contiguous()
- end_logits = end_logits.squeeze(-1).contiguous()
- relevance_logits = self.qa_classifier(sequence_output[:, 0, :])
-
- # resize
- start_logits = start_logits.view(n_passages, sequence_length)
- end_logits = end_logits.view(n_passages, sequence_length)
- relevance_logits = relevance_logits.view(n_passages)
-
- if not return_dict:
- return (start_logits, end_logits, relevance_logits) + outputs[2:]
-
- return DPRReaderOutput(
- start_logits=start_logits,
- end_logits=end_logits,
- relevance_logits=relevance_logits,
- hidden_states=outputs.hidden_states,
- attentions=outputs.attentions,
- )
-
-
-##################
-# PreTrainedModel
-##################
-
-
-class DPRPretrainedContextEncoder(DPRPreTrainedModel):
- """
- An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
- models.
- """
-
- config_class = DPRConfig
- load_tf_weights = None
- base_model_prefix = "ctx_encoder"
-
-
-class DPRPretrainedQuestionEncoder(DPRPreTrainedModel):
- """
- An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
- models.
- """
-
- config_class = DPRConfig
- load_tf_weights = None
- base_model_prefix = "question_encoder"
-
-
-class DPRPretrainedReader(DPRPreTrainedModel):
- """
- An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
- models.
- """
-
- config_class = DPRConfig
- load_tf_weights = None
- base_model_prefix = "span_predictor"
-
-
-###############
-# Actual Models
-###############
-
-
-DPR_START_DOCSTRING = r"""
-
- This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
- library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
- etc.)
-
- This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
- Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
- and behavior.
-
- Parameters:
- config ([`DPRConfig`]): Model configuration class with all the parameters of the model.
- Initializing with a config file does not load the weights associated with the model, only the
- configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
-"""
-
-DPR_ENCODERS_INPUTS_DOCSTRING = r"""
- Args:
- input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
- Indices of input sequence tokens in the vocabulary. To match pretraining, DPR input sequence should be
- formatted with [CLS] and [SEP] tokens as follows:
-
- (a) For sequence pairs (for a pair title+text for example):
-
- ```
- tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
- token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
- ```
-
- (b) For single sequences (for a question for example):
-
- ```
- tokens: [CLS] the dog is hairy . [SEP]
- token_type_ids: 0 0 0 0 0 0 0
- ```
-
- DPR is a model with absolute position embeddings so it's usually advised to pad the inputs on the right
- rather than the left.
-
- Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
- [`PreTrainedTokenizer.__call__`] for details.
-
- [What are input IDs?](../glossary#input-ids)
- attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
- Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
-
- - 1 for tokens that are **not masked**,
- - 0 for tokens that are **masked**.
-
- [What are attention masks?](../glossary#attention-mask)
- token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
- Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
- 1]`:
-
- - 0 corresponds to a *sentence A* token,
- - 1 corresponds to a *sentence B* token.
-
- [What are token type IDs?](../glossary#token-type-ids)
- inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
- Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
- is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
- model's internal embedding lookup matrix.
- output_attentions (`bool`, *optional*):
- Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
- tensors for more detail.
- output_hidden_states (`bool`, *optional*):
- Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
- more detail.
- return_dict (`bool`, *optional*):
- Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
-"""
-
-DPR_READER_INPUTS_DOCSTRING = r"""
- Args:
- input_ids (`Tuple[torch.LongTensor]` of shapes `(n_passages, sequence_length)`):
- Indices of input sequence tokens in the vocabulary. It has to be a sequence triplet with 1) the question
- and 2) the passages titles and 3) the passages texts To match pretraining, DPR `input_ids` sequence should
- be formatted with [CLS] and [SEP] with the format:
-
- `[CLS] [SEP] [SEP] `
-
- DPR is a model with absolute position embeddings so it's usually advised to pad the inputs on the right
- rather than the left.
-
- Indices can be obtained using [`DPRReaderTokenizer`]. See this class documentation for more details.
-
- [What are input IDs?](../glossary#input-ids)
- attention_mask (`torch.FloatTensor` of shape `(n_passages, sequence_length)`, *optional*):
- Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
-
- - 1 for tokens that are **not masked**,
- - 0 for tokens that are **masked**.
-
- [What are attention masks?](../glossary#attention-mask)
- inputs_embeds (`torch.FloatTensor` of shape `(n_passages, sequence_length, hidden_size)`, *optional*):
- Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
- is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
- model's internal embedding lookup matrix.
- output_attentions (`bool`, *optional*):
- Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
- tensors for more detail.
- output_hidden_states (`bool`, *optional*):
- Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
- more detail.
- return_dict (`bool`, *optional*):
- Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
-"""
-
-
-@add_start_docstrings(
- "The bare DPRContextEncoder transformer outputting pooler outputs as context representations.",
- DPR_START_DOCSTRING,
-)
-class DPRContextEncoder(DPRPretrainedContextEncoder):
- def __init__(self, config: DPRConfig):
- super().__init__(config)
- self.config = config
- self.ctx_encoder = DPREncoder(config)
- # Initialize weights and apply final processing
- self.post_init()
-
- @add_start_docstrings_to_model_forward(DPR_ENCODERS_INPUTS_DOCSTRING)
- @replace_return_docstrings(output_type=DPRContextEncoderOutput, config_class=_CONFIG_FOR_DOC)
- def forward(
- self,
- input_ids: Optional[Tensor] = None,
- attention_mask: Optional[Tensor] = None,
- token_type_ids: Optional[Tensor] = None,
- inputs_embeds: Optional[Tensor] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- ) -> Union[DPRContextEncoderOutput, Tuple[Tensor, ...]]:
- r"""
- Return:
-
- Examples:
-
- ```python
- >>> from transformers import DPRContextEncoder, DPRContextEncoderTokenizer
-
- >>> tokenizer = DPRContextEncoderTokenizer.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base")
- >>> model = DPRContextEncoder.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base")
- >>> input_ids = tokenizer("Hello, is my dog cute ?", return_tensors="pt")["input_ids"]
- >>> embeddings = model(input_ids).pooler_output
- ```"""
-
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
- output_hidden_states = (
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
- )
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- if input_ids is not None and inputs_embeds is not None:
- raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
- elif input_ids is not None:
- input_shape = input_ids.size()
- elif inputs_embeds is not None:
- input_shape = inputs_embeds.size()[:-1]
- else:
- raise ValueError("You have to specify either input_ids or inputs_embeds")
-
- device = input_ids.device if input_ids is not None else inputs_embeds.device
-
- if attention_mask is None:
- attention_mask = (
- torch.ones(input_shape, device=device)
- if input_ids is None
- else (input_ids != self.config.pad_token_id)
- )
- if token_type_ids is None:
- token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
-
- outputs = self.ctx_encoder(
- input_ids=input_ids,
- attention_mask=attention_mask,
- token_type_ids=token_type_ids,
- inputs_embeds=inputs_embeds,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
-
- if not return_dict:
- return outputs[1:]
- return DPRContextEncoderOutput(
- pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions
- )
-
-
-@add_start_docstrings(
- "The bare DPRQuestionEncoder transformer outputting pooler outputs as question representations.",
- DPR_START_DOCSTRING,
-)
-class DPRQuestionEncoder(DPRPretrainedQuestionEncoder):
- def __init__(self, config: DPRConfig):
- super().__init__(config)
- self.config = config
- self.question_encoder = DPREncoder(config)
- # Initialize weights and apply final processing
- self.post_init()
-
- @add_start_docstrings_to_model_forward(DPR_ENCODERS_INPUTS_DOCSTRING)
- @replace_return_docstrings(output_type=DPRQuestionEncoderOutput, config_class=_CONFIG_FOR_DOC)
- def forward(
- self,
- input_ids: Optional[Tensor] = None,
- attention_mask: Optional[Tensor] = None,
- token_type_ids: Optional[Tensor] = None,
- inputs_embeds: Optional[Tensor] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- ) -> Union[DPRQuestionEncoderOutput, Tuple[Tensor, ...]]:
- r"""
- Return:
-
- Examples:
-
- ```python
- >>> from transformers import DPRQuestionEncoder, DPRQuestionEncoderTokenizer
-
- >>> tokenizer = DPRQuestionEncoderTokenizer.from_pretrained("facebook/dpr-question_encoder-single-nq-base")
- >>> model = DPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base")
- >>> input_ids = tokenizer("Hello, is my dog cute ?", return_tensors="pt")["input_ids"]
- >>> embeddings = model(input_ids).pooler_output
- ```
- """
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
- output_hidden_states = (
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
- )
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- if input_ids is not None and inputs_embeds is not None:
- raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
- elif input_ids is not None:
- self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
- input_shape = input_ids.size()
- elif inputs_embeds is not None:
- input_shape = inputs_embeds.size()[:-1]
- else:
- raise ValueError("You have to specify either input_ids or inputs_embeds")
-
- device = input_ids.device if input_ids is not None else inputs_embeds.device
-
- if attention_mask is None:
- attention_mask = (
- torch.ones(input_shape, device=device)
- if input_ids is None
- else (input_ids != self.config.pad_token_id)
- )
- if token_type_ids is None:
- token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
-
- outputs = self.question_encoder(
- input_ids=input_ids,
- attention_mask=attention_mask,
- token_type_ids=token_type_ids,
- inputs_embeds=inputs_embeds,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
-
- if not return_dict:
- return outputs[1:]
- return DPRQuestionEncoderOutput(
- pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions
- )
-
-
-@add_start_docstrings(
- "The bare DPRReader transformer outputting span predictions.",
- DPR_START_DOCSTRING,
-)
-class DPRReader(DPRPretrainedReader):
- def __init__(self, config: DPRConfig):
- super().__init__(config)
- self.config = config
- self.span_predictor = DPRSpanPredictor(config)
- # Initialize weights and apply final processing
- self.post_init()
-
- @add_start_docstrings_to_model_forward(DPR_READER_INPUTS_DOCSTRING)
- @replace_return_docstrings(output_type=DPRReaderOutput, config_class=_CONFIG_FOR_DOC)
- def forward(
- self,
- input_ids: Optional[Tensor] = None,
- attention_mask: Optional[Tensor] = None,
- inputs_embeds: Optional[Tensor] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- ) -> Union[DPRReaderOutput, Tuple[Tensor, ...]]:
- r"""
- Return:
-
- Examples:
-
- ```python
- >>> from transformers import DPRReader, DPRReaderTokenizer
-
- >>> tokenizer = DPRReaderTokenizer.from_pretrained("facebook/dpr-reader-single-nq-base")
- >>> model = DPRReader.from_pretrained("facebook/dpr-reader-single-nq-base")
- >>> encoded_inputs = tokenizer(
- ... questions=["What is love ?"],
- ... titles=["Haddaway"],
- ... texts=["'What Is Love' is a song recorded by the artist Haddaway"],
- ... return_tensors="pt",
- ... )
- >>> outputs = model(**encoded_inputs)
- >>> start_logits = outputs.start_logits
- >>> end_logits = outputs.end_logits
- >>> relevance_logits = outputs.relevance_logits
- ```
- """
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
- output_hidden_states = (
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
- )
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- if input_ids is not None and inputs_embeds is not None:
- raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
- elif input_ids is not None:
- self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
- input_shape = input_ids.size()
- elif inputs_embeds is not None:
- input_shape = inputs_embeds.size()[:-1]
- else:
- raise ValueError("You have to specify either input_ids or inputs_embeds")
-
- device = input_ids.device if input_ids is not None else inputs_embeds.device
-
- if attention_mask is None:
- attention_mask = torch.ones(input_shape, device=device)
-
- return self.span_predictor(
- input_ids,
- attention_mask,
- inputs_embeds=inputs_embeds,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/levit/feature_extraction_levit.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/levit/feature_extraction_levit.py
deleted file mode 100644
index 91308cf0ba18d211daea38b4edb4ac7b52900803..0000000000000000000000000000000000000000
--- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/levit/feature_extraction_levit.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# coding=utf-8
-# Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Feature extractor class for LeViT."""
-
-import warnings
-
-from ...utils import logging
-from .image_processing_levit import LevitImageProcessor
-
-
-logger = logging.get_logger(__name__)
-
-
-class LevitFeatureExtractor(LevitImageProcessor):
- def __init__(self, *args, **kwargs) -> None:
- warnings.warn(
- "The class LevitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
- " use LevitImageProcessor instead.",
- FutureWarning,
- )
- super().__init__(*args, **kwargs)
diff --git a/spaces/ysharma/ChatGPTwithAPI/README.md b/spaces/ysharma/ChatGPTwithAPI/README.md
deleted file mode 100644
index 744520207dc3dfb3b335f622d9f98183b9133a45..0000000000000000000000000000000000000000
--- a/spaces/ysharma/ChatGPTwithAPI/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: ChatGPTwithAPI
-emoji: 🚀
-colorFrom: red
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.20.0
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/zhangbo2008/chainyo-alpaca-lora-7b/README.md b/spaces/zhangbo2008/chainyo-alpaca-lora-7b/README.md
deleted file mode 100644
index 1d4dec470999355f21f610b1be2900b294f643bf..0000000000000000000000000000000000000000
--- a/spaces/zhangbo2008/chainyo-alpaca-lora-7b/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Chainyo Alpaca Lora 7b
-emoji: ⚡
-colorFrom: blue
-colorTo: pink
-sdk: gradio
-sdk_version: 3.24.1
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/zideliu/styledrop/open_clip/transformer.py b/spaces/zideliu/styledrop/open_clip/transformer.py
deleted file mode 100644
index 94620a4205a8b0b802616cbe675f0895c80691ec..0000000000000000000000000000000000000000
--- a/spaces/zideliu/styledrop/open_clip/transformer.py
+++ /dev/null
@@ -1,742 +0,0 @@
-import os
-from collections import OrderedDict
-import math
-from typing import Callable, Optional, Sequence, Tuple
-
-import torch
-from loguru import logger
-from torch import nn
-from torch.nn import functional as F
-from torch.utils.checkpoint import checkpoint
-
-from .utils import to_2tuple
-try:
- import xformers.ops as xops
-except ImportError:
- xops = None
- print("Please 'pip install xformers'")
-
-
-
-class LayerNormFp32(nn.LayerNorm):
- """Subclass torch's LayerNorm to handle fp16 (by casting to float32 and back)."""
-
- def forward(self, x: torch.Tensor):
- orig_type = x.dtype
- x = F.layer_norm(x.to(torch.float32), self.normalized_shape, self.weight, self.bias, self.eps)
- return x.to(orig_type)
-
-
-class LayerNorm(nn.LayerNorm):
- """Subclass torch's LayerNorm (with cast back to input dtype)."""
-
- def forward(self, x: torch.Tensor):
- orig_type = x.dtype
- x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
- return x.to(orig_type)
-
-
-class QuickGELU(nn.Module):
- # NOTE This is slower than nn.GELU or nn.SiLU and uses more GPU memory
- def forward(self, x: torch.Tensor):
- return x * torch.sigmoid(1.702 * x)
-
-
-class LayerScale(nn.Module):
- def __init__(self, dim, init_values=1e-5, inplace=False):
- super().__init__()
- self.inplace = inplace
- self.gamma = nn.Parameter(init_values * torch.ones(dim))
-
- def forward(self, x):
- return x.mul_(self.gamma) if self.inplace else x * self.gamma
-
-
-class PatchDropout(nn.Module):
- """
- https://arxiv.org/abs/2212.00794
- """
-
- def __init__(self, prob, exclude_first_token=True):
- super().__init__()
- assert 0 <= prob < 1.
- self.prob = prob
- self.exclude_first_token = exclude_first_token # exclude CLS token
-
- def forward(self, x):
- if not self.training or self.prob == 0.:
- return x
-
- if self.exclude_first_token:
- cls_tokens, x = x[:, :1], x[:, 1:]
- else:
- cls_tokens = torch.jit.annotate(torch.Tensor, x[:, :1])
-
- batch = x.size()[0]
- num_tokens = x.size()[1]
-
- batch_indices = torch.arange(batch)
- batch_indices = batch_indices[..., None]
-
- keep_prob = 1 - self.prob
- num_patches_keep = max(1, int(num_tokens * keep_prob))
-
- rand = torch.randn(batch, num_tokens)
- patch_indices_keep = rand.topk(num_patches_keep, dim=-1).indices
-
- x = x[batch_indices, patch_indices_keep]
-
- if self.exclude_first_token:
- x = torch.cat((cls_tokens, x), dim=1)
-
- return x
-
-
-class Attention(nn.Module):
- def __init__(
- self,
- dim,
- num_heads=8,
- qkv_bias=True,
- scaled_cosine=False,
- scale_heads=False,
- logit_scale_max=math.log(1. / 0.01),
- attn_drop=0.,
- proj_drop=0.,
- xattn=False,
- ):
- super().__init__()
- self.scaled_cosine = scaled_cosine
- self.scale_heads = scale_heads
- assert dim % num_heads == 0, 'dim should be divisible by num_heads'
- self.num_heads = num_heads
- self.head_dim = dim // num_heads
- self.scale = self.head_dim ** -0.5
- self.logit_scale_max = logit_scale_max
-
- # keeping in_proj in this form (instead of nn.Linear) to match weight scheme of original
- self.in_proj_weight = nn.Parameter(torch.randn((dim * 3, dim)) * self.scale)
- if qkv_bias:
- self.in_proj_bias = nn.Parameter(torch.zeros(dim * 3))
- else:
- self.in_proj_bias = None
-
- if self.scaled_cosine:
- self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))))
- else:
- self.logit_scale = None
- self.attn_drop = nn.Dropout(attn_drop)
- if self.scale_heads:
- self.head_scale = nn.Parameter(torch.ones((num_heads, 1, 1)))
- else:
- self.head_scale = None
- self.out_proj = nn.Linear(dim, dim)
- self.out_drop = nn.Dropout(proj_drop)
- self.xattn_drop = attn_drop
- self.xattn = xattn
-
- def forward(self, x, attn_mask: Optional[torch.Tensor] = None):
- L, N, C = x.shape
- q, k, v = F.linear(x, self.in_proj_weight, self.in_proj_bias).chunk(3, dim=-1)
- if self.xattn:
- q = q.contiguous().view(L, N, self.num_heads, -1).transpose(0, 1)
- k = k.contiguous().view(L, N, self.num_heads, -1).transpose(0, 1)
- v = v.contiguous().view(L, N, self.num_heads, -1).transpose(0, 1)
-
- # logger.debug(f'using memory efficient attention')
- x = xops.memory_efficient_attention(
- q, k, v,
- p=self.xattn_drop,
- scale=self.scale if self.logit_scale is None else None,
- attn_bias=xops.LowerTriangularMask() if attn_mask is not None else None,
- # op=xops.MemoryEfficientAttentionFlashAttentionOp
- )
- else:
- q = q.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)
- k = k.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)
- v = v.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)
-
- if self.logit_scale is not None:
- attn = torch.bmm(F.normalize(q, dim=-1), F.normalize(k, dim=-1).transpose(-1, -2))
- logit_scale = torch.clamp(self.logit_scale, max=self.logit_scale_max).exp()
- attn = attn.view(N, self.num_heads, L, L) * logit_scale
- attn = attn.view(-1, L, L)
- else:
- q = q * self.scale
- attn = torch.bmm(q, k.transpose(-1, -2))
-
- if attn_mask is not None:
- if attn_mask.dtype == torch.bool:
- new_attn_mask = torch.zeros_like(attn_mask, dtype=q.dtype)
- new_attn_mask.masked_fill_(attn_mask, float("-inf"))
- attn_mask = new_attn_mask
- attn += attn_mask
-
- attn = attn.softmax(dim=-1)
- attn = self.attn_drop(attn)
-
- x = torch.bmm(attn, v)
- if self.head_scale is not None:
- x = x.view(N, self.num_heads, L, C) * self.head_scale
- x = x.view(-1, L, C)
- x = x.transpose(0, 1).reshape(L, N, C)
- x = self.out_proj(x)
- x = self.out_drop(x)
- return x
-
-
-class AttentionalPooler(nn.Module):
- def __init__(
- self,
- d_model: int,
- context_dim: int,
- n_head: int = 8,
- n_queries: int = 256,
- norm_layer: Callable = LayerNorm
- ):
- super().__init__()
- self.query = nn.Parameter(torch.randn(n_queries, d_model))
- self.attn = nn.MultiheadAttention(d_model, n_head, kdim=context_dim, vdim=context_dim)
- self.ln_q = norm_layer(d_model)
- self.ln_k = norm_layer(context_dim)
-
- def forward(self, x: torch.Tensor):
- x = self.ln_k(x).permute(1, 0, 2) # NLD -> LND
- N = x.shape[1]
- q = self.ln_q(self.query)
- out = self.attn(self._repeat(q, N), x, x, need_weights=False)[0]
- return out.permute(1, 0, 2) # LND -> NLD
-
- def _repeat(self, query, N: int):
- return query.unsqueeze(1).repeat(1, N, 1)
-
-
-class ResidualAttentionBlock(nn.Module):
- def __init__(
- self,
- d_model: int,
- n_head: int,
- mlp_ratio: float = 4.0,
- ls_init_value: float = None,
- act_layer: Callable = nn.GELU,
- norm_layer: Callable = LayerNorm,
- xattn: bool = False,
- ):
- super().__init__()
-
- self.ln_1 = norm_layer(d_model)
- if xattn:
- self.attn = Attention(d_model, n_head, xattn=True)
- else:
- self.attn = nn.MultiheadAttention(d_model, n_head)
- self.ls_1 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
-
- self.ln_2 = norm_layer(d_model)
- mlp_width = int(d_model * mlp_ratio)
- self.mlp = nn.Sequential(OrderedDict([
- ("c_fc", nn.Linear(d_model, mlp_width)),
- ("gelu", act_layer()),
- ("c_proj", nn.Linear(mlp_width, d_model))
- ]))
- self.ls_2 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
- self.xattn = xattn
-
- def attention(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
- attn_mask = attn_mask.to(x.dtype) if attn_mask is not None else None
- if self.xattn:
- return self.attn(x, attn_mask=attn_mask)
- return self.attn(x, x, x, need_weights=False, attn_mask=attn_mask)[0]
-
- def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
- # t = time.time()
- x = x + self.ls_1(self.attention(self.ln_1(x), attn_mask=attn_mask))
- x = x + self.ls_2(self.mlp(self.ln_2(x)))
- return x
-
-
-class CustomResidualAttentionBlock(nn.Module):
- def __init__(
- self,
- d_model: int,
- n_head: int,
- mlp_ratio: float = 4.0,
- ls_init_value: float = None,
- act_layer: Callable = nn.GELU,
- norm_layer: Callable = LayerNorm,
- scale_cosine_attn: bool = False,
- scale_heads: bool = False,
- scale_attn: bool = False,
- scale_fc: bool = False,
- ):
- super().__init__()
-
- self.ln_1 = norm_layer(d_model)
- self.attn = Attention(
- d_model, n_head,
- scaled_cosine=scale_cosine_attn,
- scale_heads=scale_heads,
- )
- self.ln_attn = norm_layer(d_model) if scale_attn else nn.Identity()
- self.ls_1 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
-
- self.ln_2 = norm_layer(d_model)
- mlp_width = int(d_model * mlp_ratio)
- self.mlp = nn.Sequential(OrderedDict([
- ("c_fc", nn.Linear(d_model, mlp_width)),
- ('ln', norm_layer(mlp_width) if scale_fc else nn.Identity()),
- ("gelu", act_layer()),
- ("c_proj", nn.Linear(mlp_width, d_model))
- ]))
- self.ls_2 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
-
- def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
- x = x + self.ls_1(self.ln_attn(self.attn(self.ln_1(x), attn_mask=attn_mask)))
- x = x + self.ls_2(self.mlp(self.ln_2(x)))
- return x
-
-
-class Transformer(nn.Module):
- def __init__(
- self,
- width: int,
- layers: int,
- heads: int,
- mlp_ratio: float = 4.0,
- ls_init_value: float = None,
- act_layer: Callable = nn.GELU,
- norm_layer: Callable = LayerNorm,
- xattn=False,
- ):
- super().__init__()
- self.width = width
- self.layers = layers
- self.grad_checkpointing = False
- logger.debug(f'xattn in transformer of CLIP is {xattn}')
-
- self.resblocks = nn.ModuleList([
- ResidualAttentionBlock(
- width, heads, mlp_ratio, ls_init_value=ls_init_value, act_layer=act_layer, norm_layer=norm_layer,
- xattn=xattn)
- for _ in range(layers)
- ])
-
- def get_cast_dtype(self) -> torch.dtype:
- return self.resblocks[0].mlp.c_fc.weight.dtype
-
- def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
- for r in self.resblocks:
- if self.grad_checkpointing and not torch.jit.is_scripting():
- # TODO: handle kwargs https://github.com/pytorch/pytorch/issues/79887#issuecomment-1161758372
- x = checkpoint(r, x, None, None, attn_mask)
- else:
- x = r(x, attn_mask=attn_mask)
- return x
-
-
-class VisionTransformer(nn.Module):
- output_tokens: torch.jit.Final[bool]
-
- def __init__(
- self,
- image_size: int,
- patch_size: int,
- width: int,
- layers: int,
- heads: int,
- mlp_ratio: float,
- ls_init_value: float = None,
- global_average_pool: bool = False,
- attentional_pool: bool = False,
- n_queries: int = 256,
- attn_pooler_heads: int = 8,
- output_dim: int = 512,
- patch_dropout: float = 0.,
- input_patchnorm: bool = False,
- act_layer: Callable = nn.GELU,
- norm_layer: Callable = LayerNorm,
- output_tokens: bool = False
- ):
- super().__init__()
- self.output_tokens = output_tokens
- image_height, image_width = self.image_size = to_2tuple(image_size)
- patch_height, patch_width = self.patch_size = to_2tuple(patch_size)
- self.grid_size = (image_height // patch_height, image_width // patch_width)
- self.output_dim = output_dim
-
- # whether to layernorm each patch, as done in dual patchnorm paper - https://arxiv.org/abs/2302.01327v1
- self.input_patchnorm = input_patchnorm
-
- if input_patchnorm:
- patch_input_dim = patch_height * patch_width * 3
- self.patchnorm_pre_ln = LayerNorm(patch_input_dim)
- self.conv1 = nn.Linear(patch_input_dim, width)
- else:
- self.patchnorm_pre_ln = nn.Identity()
- self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
-
- # class embeddings and positional embeddings
- scale = width ** -0.5
- self.class_embedding = nn.Parameter(scale * torch.randn(width))
- self.positional_embedding = nn.Parameter(scale * torch.randn(self.grid_size[0] * self.grid_size[1] + 1, width))
-
- # setting a patch_dropout of 0. would mean it is disabled and this function would be the identity fn
- self.patch_dropout = PatchDropout(patch_dropout) if patch_dropout > 0. else nn.Identity()
-
- self.ln_pre = norm_layer(width)
- self.transformer = Transformer(
- width,
- layers,
- heads,
- mlp_ratio,
- ls_init_value=ls_init_value,
- act_layer=act_layer,
- norm_layer=norm_layer,
- )
-
- self.global_average_pool = global_average_pool
- if attentional_pool:
- self.attn_pool = AttentionalPooler(output_dim, width, n_head=attn_pooler_heads, n_queries=n_queries)
- self.ln_post = norm_layer(output_dim)
- self.proj = nn.Parameter(scale * torch.randn(output_dim, output_dim))
- else:
- self.attn_pool = None
- self.ln_post = norm_layer(width)
- self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
-
- self.init_parameters()
-
- def lock(self, unlocked_groups=0, freeze_bn_stats=False):
- for param in self.parameters():
- param.requires_grad = False
-
- if unlocked_groups != 0:
- groups = [
- [
- self.conv1,
- self.class_embedding,
- self.positional_embedding,
- self.ln_pre,
- ],
- *self.transformer.resblocks[:-1],
- [
- self.transformer.resblocks[-1],
- self.ln_post,
- ],
- self.proj,
- ]
-
- def _unlock(x):
- if isinstance(x, Sequence):
- for g in x:
- _unlock(g)
- else:
- if isinstance(x, torch.nn.Parameter):
- x.requires_grad = True
- else:
- for p in x.parameters():
- p.requires_grad = True
-
- _unlock(groups[-unlocked_groups:])
-
- def init_parameters(self):
- # FIXME OpenAI CLIP did not define an init for the VisualTransformer
- # TODO experiment if default PyTorch init, below, or alternate init is best.
-
- # nn.init.normal_(self.class_embedding, std=self.scale)
- # nn.init.normal_(self.positional_embedding, std=self.scale)
- #
- # proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
- # attn_std = self.transformer.width ** -0.5
- # fc_std = (2 * self.transformer.width) ** -0.5
- # for block in self.transformer.resblocks:
- # nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
- # nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
- # nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
- # nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
- #
- # if self.text_projection is not None:
- # nn.init.normal_(self.text_projection, std=self.scale)
- pass
-
- @torch.jit.ignore
- def set_grad_checkpointing(self, enable=True):
- self.transformer.grad_checkpointing = enable
-
- def _global_pool(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
- if self.global_average_pool:
- return x.mean(dim=1), x
- else:
- return x[:, 0], x[:, 1:]
-
- def forward(self, x: torch.Tensor):
-
- # to patches - whether to use dual patchnorm - https://arxiv.org/abs/2302.01327v1
- if self.input_patchnorm:
- # einops - rearrange(x, 'b c (h p1) (w p2) -> b (h w) (c p1 p2)')
- x = x.reshape(x.shape[0], x.shape[1], self.grid_size[0], self.patch_size[0], self.grid_size[1], self.patch_size[1])
- x = x.permute(0, 2, 4, 1, 3, 5)
- x = x.reshape(x.shape[0], self.grid_size[0] * self.grid_size[1], -1)
- x = self.patchnorm_pre_ln(x)
- x = self.conv1(x)
- else:
- x = self.conv1(x) # shape = [*, width, grid, grid]
- x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
- x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
-
- # class embeddings and positional embeddings
- x = torch.cat(
- [self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),
- x], dim=1) # shape = [*, grid ** 2 + 1, width]
- x = x + self.positional_embedding.to(x.dtype)
-
- # a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in
- x = self.patch_dropout(x)
- x = self.ln_pre(x)
-
- x = x.permute(1, 0, 2) # NLD -> LND
- x = self.transformer(x)
- x = x.permute(1, 0, 2) # LND -> NLD
-
- if self.attn_pool is not None:
- x = self.attn_pool(x)
- x = self.ln_post(x)
- pooled, tokens = self._global_pool(x)
- else:
- pooled, tokens = self._global_pool(x)
- pooled = self.ln_post(pooled)
-
- if self.proj is not None:
- pooled = pooled @ self.proj
-
- if self.output_tokens:
- return pooled, tokens
-
- return pooled
-
-
-class TextTransformer(nn.Module):
- output_tokens: torch.jit.Final[bool]
-
- def __init__(
- self,
- context_length: int = 77,
- vocab_size: int = 49408,
- width: int = 512,
- heads: int = 8,
- layers: int = 12,
- ls_init_value: float = None,
- output_dim: int = 512,
- act_layer: Callable = nn.GELU,
- norm_layer: Callable = LayerNorm,
- embed_cls: bool = False,
- pad_id: int = 0,
- output_tokens: bool = False,
- ):
- super().__init__()
- self.output_tokens = output_tokens
- self.num_pos = self.context_length = context_length
- self.vocab_size = vocab_size
- self.width = width
- self.output_dim = output_dim
- self.heads = heads
- self.pad_id = pad_id
-
- self.text_projection = nn.Parameter(torch.empty(width, output_dim))
-
- if embed_cls:
- self.cls_emb = nn.Parameter(torch.empty(width))
- self.num_pos += 1
- else:
- self.cls_emb = None
-
- self.token_embedding = nn.Embedding(vocab_size, width)
- self.positional_embedding = nn.Parameter(torch.empty(self.num_pos, width))
-
- xattn = (os.getenv('FLASH_TXT', 'f') == 't')
- self.transformer = Transformer(
- width=width,
- layers=layers,
- heads=heads,
- ls_init_value=ls_init_value,
- act_layer=act_layer,
- norm_layer=norm_layer,
- xattn=xattn
- )
-
- self.xattn = xattn
- self.ln_final = norm_layer(width)
-
- self.register_buffer('attn_mask', self.build_attention_mask(), persistent=False)
-
- self.init_parameters()
-
- def init_parameters(self):
- nn.init.normal_(self.token_embedding.weight, std=0.02)
- nn.init.normal_(self.positional_embedding, std=0.01)
- if self.cls_emb is not None:
- nn.init.normal_(self.cls_emb, std=0.01)
-
- proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
- attn_std = self.transformer.width ** -0.5
- fc_std = (2 * self.transformer.width) ** -0.5
- for block in self.transformer.resblocks:
- nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
- nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
- nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
- nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
-
- if self.text_projection is not None:
- nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
-
- @torch.jit.ignore
- def set_grad_checkpointing(self, enable=True):
- self.transformer.grad_checkpointing = enable
-
- def build_attention_mask(self):
- # lazily create causal attention mask, with full attention between the tokens
- # pytorch uses additive attention mask; fill with -inf
- mask = torch.empty(self.num_pos, self.num_pos)
- mask.fill_(float("-inf"))
- mask.triu_(1) # zero out the lower diagonal
- return mask
-
- def build_cls_mask(self, text, cast_dtype: torch.dtype):
- cls_mask = (text != self.pad_id).unsqueeze(1)
- cls_mask = F.pad(cls_mask, (1, 0, cls_mask.shape[2], 0), value=1.0)
- additive_mask = torch.empty(cls_mask.shape, dtype=cast_dtype, device=cls_mask.device)
- additive_mask.fill_(0)
- additive_mask.masked_fill_(~cls_mask, float("-inf"))
- additive_mask = torch.repeat_interleave(additive_mask, self.heads, 0)
- return additive_mask
-
- def _repeat(self, t, N: int):
- return t.reshape(1, 1, -1).repeat(N, 1, 1)
-
- def forward(self, text):
- cast_dtype = self.transformer.get_cast_dtype()
- seq_len = text.shape[1]
-
- x = self.token_embedding(text).to(cast_dtype) # [batch_size, n_ctx, d_model]
- attn_mask = self.attn_mask
- if self.cls_emb is not None:
- seq_len += 1
- x = torch.cat([x, self._repeat(self.cls_emb, x.shape[0])], dim=1)
- cls_mask = self.build_cls_mask(text, cast_dtype)
- attn_mask = attn_mask[None, :seq_len, :seq_len] + cls_mask[:, :seq_len, :seq_len]
-
- x = x + self.positional_embedding[:seq_len].to(cast_dtype)
- x = x.permute(1, 0, 2) # NLD -> LND
- x = self.transformer(x, attn_mask=attn_mask)
- x = x.permute(1, 0, 2) # LND -> NLD
-
- # x.shape = [batch_size, n_ctx, transformer.width]
- # take features from the eot embedding (eot_token is the highest number in each sequence)
- if self.cls_emb is not None:
- pooled, tokens = x[:, -1], x[:, :-1]
- pooled = self.ln_final(pooled)
- else:
- x = self.ln_final(x)
- pooled, tokens = x[torch.arange(x.shape[0]), text.argmax(dim=-1)], x
-
- if self.text_projection is not None:
- pooled = pooled @ self.text_projection
-
- if self.output_tokens:
- return pooled, tokens
-
- return pooled
-
-
-class MultimodalTransformer(Transformer):
- def __init__(
- self,
- width: int,
- layers: int,
- heads: int,
- context_length: int = 77,
- mlp_ratio: float = 4.0,
- ls_init_value: float = None,
- act_layer: Callable = nn.GELU,
- norm_layer: Callable = LayerNorm,
- output_dim: int = 512,
- ):
-
- super().__init__(
- width=width,
- layers=layers,
- heads=heads,
- mlp_ratio=mlp_ratio,
- ls_init_value=ls_init_value,
- act_layer=act_layer,
- norm_layer=norm_layer,
- )
- self.context_length = context_length
- self.cross_attn = nn.ModuleList([
- ResidualAttentionBlock(
- width,
- heads,
- mlp_ratio,
- ls_init_value=ls_init_value,
- act_layer=act_layer,
- norm_layer=norm_layer,
- is_cross_attention=True,
- )
- for _ in range(layers)
- ])
-
- self.register_buffer('attn_mask', self.build_attention_mask(), persistent=False)
-
- self.ln_final = norm_layer(width)
- self.text_projection = nn.Parameter(torch.empty(width, output_dim))
-
- def init_parameters(self):
- proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
- attn_std = self.transformer.width ** -0.5
- fc_std = (2 * self.transformer.width) ** -0.5
- for block in self.transformer.resblocks:
- nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
- nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
- nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
- nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
- for block in self.transformer.cross_attn:
- nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
- nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
- nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
- nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
-
- if self.text_projection is not None:
- nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
-
- def build_attention_mask(self):
- # lazily create causal attention mask, with full attention between the tokens
- # pytorch uses additive attention mask; fill with -inf
- mask = torch.empty(self.context_length, self.context_length)
- mask.fill_(float("-inf"))
- mask.triu_(1) # zero out the lower diagonal
- return mask
-
- def forward(self, image_embs, text_embs):
- text_embs = text_embs.permute(1, 0, 2) # NLD -> LNDsq
- image_embs = image_embs.permute(1, 0, 2) # NLD -> LND
- seq_len = text_embs.shape[0]
-
- for resblock, cross_attn in zip(self.resblocks, self.cross_attn):
- if self.grad_checkpointing and not torch.jit.is_scripting():
- # TODO: handle kwargs https://github.com/pytorch/pytorch/issues/79887#issuecomment-1161758372
- text_embs = checkpoint(resblock, text_embs, None, None, self.attn_mask[:seq_len, :seq_len])
- text_embs = checkpoint(cross_attn, text_embs, image_embs, image_embs, None)
- else:
- text_embs = resblock(text_embs, attn_mask=self.attn_mask[:seq_len, :seq_len])
- text_embs = cross_attn(text_embs, k_x=image_embs, v_x=image_embs)
-
- x = text_embs.permute(1, 0, 2) # LND -> NLD
- x = self.ln_final(x)
-
- if self.text_projection is not None:
- x = x @ self.text_projection
-
- return x
-
- @torch.jit.ignore
- def set_grad_checkpointing(self, enable=True):
- self.grad_checkpointing = enable
diff --git a/spaces/zideliu/styledrop/timm/models/helpers.py b/spaces/zideliu/styledrop/timm/models/helpers.py
deleted file mode 100644
index 77b98dc6b487c3eaacdaf0fd3032ad616412faf4..0000000000000000000000000000000000000000
--- a/spaces/zideliu/styledrop/timm/models/helpers.py
+++ /dev/null
@@ -1,310 +0,0 @@
-""" Model creation / weight loading / state_dict helpers
-
-Hacked together by / Copyright 2020 Ross Wightman
-"""
-import logging
-import os
-import math
-from collections import OrderedDict
-from copy import deepcopy
-from typing import Callable
-
-import torch
-import torch.nn as nn
-import torch.utils.model_zoo as model_zoo
-
-from .features import FeatureListNet, FeatureDictNet, FeatureHookNet
-from .layers import Conv2dSame, Linear
-
-
-_logger = logging.getLogger(__name__)
-
-
-def load_state_dict(checkpoint_path, use_ema=False):
- if checkpoint_path and os.path.isfile(checkpoint_path):
- checkpoint = torch.load(checkpoint_path, map_location='cpu')
- state_dict_key = 'state_dict'
- if isinstance(checkpoint, dict):
- if use_ema and 'state_dict_ema' in checkpoint:
- state_dict_key = 'state_dict_ema'
- if state_dict_key and state_dict_key in checkpoint:
- new_state_dict = OrderedDict()
- for k, v in checkpoint[state_dict_key].items():
- # strip `module.` prefix
- name = k[7:] if k.startswith('module') else k
- new_state_dict[name] = v
- state_dict = new_state_dict
- else:
- state_dict = checkpoint
- _logger.info("Loaded {} from checkpoint '{}'".format(state_dict_key, checkpoint_path))
- return state_dict
- else:
- _logger.error("No checkpoint found at '{}'".format(checkpoint_path))
- raise FileNotFoundError()
-
-
-def load_checkpoint(model, checkpoint_path, use_ema=False, strict=True):
- state_dict = load_state_dict(checkpoint_path, use_ema)
- model.load_state_dict(state_dict, strict=strict)
-
-
-def resume_checkpoint(model, checkpoint_path, optimizer=None, loss_scaler=None, log_info=True):
- resume_epoch = None
- if os.path.isfile(checkpoint_path):
- checkpoint = torch.load(checkpoint_path, map_location='cpu')
- if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
- if log_info:
- _logger.info('Restoring model state from checkpoint...')
- new_state_dict = OrderedDict()
- for k, v in checkpoint['state_dict'].items():
- name = k[7:] if k.startswith('module') else k
- new_state_dict[name] = v
- model.load_state_dict(new_state_dict)
-
- if optimizer is not None and 'optimizer' in checkpoint:
- if log_info:
- _logger.info('Restoring optimizer state from checkpoint...')
- optimizer.load_state_dict(checkpoint['optimizer'])
-
- if loss_scaler is not None and loss_scaler.state_dict_key in checkpoint:
- if log_info:
- _logger.info('Restoring AMP loss scaler state from checkpoint...')
- loss_scaler.load_state_dict(checkpoint[loss_scaler.state_dict_key])
-
- if 'epoch' in checkpoint:
- resume_epoch = checkpoint['epoch']
- if 'version' in checkpoint and checkpoint['version'] > 1:
- resume_epoch += 1 # start at the next epoch, old checkpoints incremented before save
-
- if log_info:
- _logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, checkpoint['epoch']))
- else:
- model.load_state_dict(checkpoint)
- if log_info:
- _logger.info("Loaded checkpoint '{}'".format(checkpoint_path))
- return resume_epoch
- else:
- _logger.error("No checkpoint found at '{}'".format(checkpoint_path))
- raise FileNotFoundError()
-
-
-def load_pretrained(model, cfg=None, num_classes=1000, in_chans=3, filter_fn=None, strict=True):
- if cfg is None:
- cfg = getattr(model, 'default_cfg')
- if cfg is None or 'url' not in cfg or not cfg['url']:
- _logger.warning("Pretrained model URL is invalid, using random initialization.")
- return
-
- state_dict = model_zoo.load_url(cfg['url'], progress=False, map_location='cpu')
-
- if filter_fn is not None:
- state_dict = filter_fn(state_dict)
-
- if in_chans == 1:
- conv1_name = cfg['first_conv']
- _logger.info('Converting first conv (%s) pretrained weights from 3 to 1 channel' % conv1_name)
- conv1_weight = state_dict[conv1_name + '.weight']
- # Some weights are in torch.half, ensure it's float for sum on CPU
- conv1_type = conv1_weight.dtype
- conv1_weight = conv1_weight.float()
- O, I, J, K = conv1_weight.shape
- if I > 3:
- assert conv1_weight.shape[1] % 3 == 0
- # For models with space2depth stems
- conv1_weight = conv1_weight.reshape(O, I // 3, 3, J, K)
- conv1_weight = conv1_weight.sum(dim=2, keepdim=False)
- else:
- conv1_weight = conv1_weight.sum(dim=1, keepdim=True)
- conv1_weight = conv1_weight.to(conv1_type)
- state_dict[conv1_name + '.weight'] = conv1_weight
- elif in_chans != 3:
- conv1_name = cfg['first_conv']
- conv1_weight = state_dict[conv1_name + '.weight']
- conv1_type = conv1_weight.dtype
- conv1_weight = conv1_weight.float()
- O, I, J, K = conv1_weight.shape
- if I != 3:
- _logger.warning('Deleting first conv (%s) from pretrained weights.' % conv1_name)
- del state_dict[conv1_name + '.weight']
- strict = False
- else:
- # NOTE this strategy should be better than random init, but there could be other combinations of
- # the original RGB input layer weights that'd work better for specific cases.
- _logger.info('Repeating first conv (%s) weights in channel dim.' % conv1_name)
- repeat = int(math.ceil(in_chans / 3))
- conv1_weight = conv1_weight.repeat(1, repeat, 1, 1)[:, :in_chans, :, :]
- conv1_weight *= (3 / float(in_chans))
- conv1_weight = conv1_weight.to(conv1_type)
- state_dict[conv1_name + '.weight'] = conv1_weight
-
- classifier_name = cfg['classifier']
- if num_classes == 1000 and cfg['num_classes'] == 1001:
- # special case for imagenet trained models with extra background class in pretrained weights
- classifier_weight = state_dict[classifier_name + '.weight']
- state_dict[classifier_name + '.weight'] = classifier_weight[1:]
- classifier_bias = state_dict[classifier_name + '.bias']
- state_dict[classifier_name + '.bias'] = classifier_bias[1:]
- elif num_classes != cfg['num_classes']:
- # completely discard fully connected for all other differences between pretrained and created model
- del state_dict[classifier_name + '.weight']
- del state_dict[classifier_name + '.bias']
- strict = False
-
- model.load_state_dict(state_dict, strict=strict)
-
-
-def extract_layer(model, layer):
- layer = layer.split('.')
- module = model
- if hasattr(model, 'module') and layer[0] != 'module':
- module = model.module
- if not hasattr(model, 'module') and layer[0] == 'module':
- layer = layer[1:]
- for l in layer:
- if hasattr(module, l):
- if not l.isdigit():
- module = getattr(module, l)
- else:
- module = module[int(l)]
- else:
- return module
- return module
-
-
-def set_layer(model, layer, val):
- layer = layer.split('.')
- module = model
- if hasattr(model, 'module') and layer[0] != 'module':
- module = model.module
- lst_index = 0
- module2 = module
- for l in layer:
- if hasattr(module2, l):
- if not l.isdigit():
- module2 = getattr(module2, l)
- else:
- module2 = module2[int(l)]
- lst_index += 1
- lst_index -= 1
- for l in layer[:lst_index]:
- if not l.isdigit():
- module = getattr(module, l)
- else:
- module = module[int(l)]
- l = layer[lst_index]
- setattr(module, l, val)
-
-
-def adapt_model_from_string(parent_module, model_string):
- separator = '***'
- state_dict = {}
- lst_shape = model_string.split(separator)
- for k in lst_shape:
- k = k.split(':')
- key = k[0]
- shape = k[1][1:-1].split(',')
- if shape[0] != '':
- state_dict[key] = [int(i) for i in shape]
-
- new_module = deepcopy(parent_module)
- for n, m in parent_module.named_modules():
- old_module = extract_layer(parent_module, n)
- if isinstance(old_module, nn.Conv2d) or isinstance(old_module, Conv2dSame):
- if isinstance(old_module, Conv2dSame):
- conv = Conv2dSame
- else:
- conv = nn.Conv2d
- s = state_dict[n + '.weight']
- in_channels = s[1]
- out_channels = s[0]
- g = 1
- if old_module.groups > 1:
- in_channels = out_channels
- g = in_channels
- new_conv = conv(
- in_channels=in_channels, out_channels=out_channels, kernel_size=old_module.kernel_size,
- bias=old_module.bias is not None, padding=old_module.padding, dilation=old_module.dilation,
- groups=g, stride=old_module.stride)
- set_layer(new_module, n, new_conv)
- if isinstance(old_module, nn.BatchNorm2d):
- new_bn = nn.BatchNorm2d(
- num_features=state_dict[n + '.weight'][0], eps=old_module.eps, momentum=old_module.momentum,
- affine=old_module.affine, track_running_stats=True)
- set_layer(new_module, n, new_bn)
- if isinstance(old_module, nn.Linear):
- # FIXME extra checks to ensure this is actually the FC classifier layer and not a diff Linear layer?
- num_features = state_dict[n + '.weight'][1]
- new_fc = Linear(
- in_features=num_features, out_features=old_module.out_features, bias=old_module.bias is not None)
- set_layer(new_module, n, new_fc)
- if hasattr(new_module, 'num_features'):
- new_module.num_features = num_features
- new_module.eval()
- parent_module.eval()
-
- return new_module
-
-
-def adapt_model_from_file(parent_module, model_variant):
- adapt_file = os.path.join(os.path.dirname(__file__), 'pruned', model_variant + '.txt')
- with open(adapt_file, 'r') as f:
- return adapt_model_from_string(parent_module, f.read().strip())
-
-
-def default_cfg_for_features(default_cfg):
- default_cfg = deepcopy(default_cfg)
- # remove default pretrained cfg fields that don't have much relevance for feature backbone
- to_remove = ('num_classes', 'crop_pct', 'classifier') # add default final pool size?
- for tr in to_remove:
- default_cfg.pop(tr, None)
- return default_cfg
-
-
-def build_model_with_cfg(
- model_cls: Callable,
- variant: str,
- pretrained: bool,
- default_cfg: dict,
- model_cfg: dict = None,
- feature_cfg: dict = None,
- pretrained_strict: bool = True,
- pretrained_filter_fn: Callable = None,
- **kwargs):
- pruned = kwargs.pop('pruned', False)
- features = False
- feature_cfg = feature_cfg or {}
-
- if kwargs.pop('features_only', False):
- features = True
- feature_cfg.setdefault('out_indices', (0, 1, 2, 3, 4))
- if 'out_indices' in kwargs:
- feature_cfg['out_indices'] = kwargs.pop('out_indices')
-
- model = model_cls(**kwargs) if model_cfg is None else model_cls(cfg=model_cfg, **kwargs)
- model.default_cfg = deepcopy(default_cfg)
-
- if pruned:
- model = adapt_model_from_file(model, variant)
-
- # for classification models, check class attr, then kwargs, then default to 1k, otherwise 0 for feats
- num_classes_pretrained = 0 if features else getattr(model, 'num_classes', kwargs.get('num_classes', 1000))
- if pretrained:
- load_pretrained(
- model,
- num_classes=num_classes_pretrained, in_chans=kwargs.get('in_chans', 3),
- filter_fn=pretrained_filter_fn, strict=pretrained_strict)
-
- if features:
- feature_cls = FeatureListNet
- if 'feature_cls' in feature_cfg:
- feature_cls = feature_cfg.pop('feature_cls')
- if isinstance(feature_cls, str):
- feature_cls = feature_cls.lower()
- if 'hook' in feature_cls:
- feature_cls = FeatureHookNet
- else:
- assert False, f'Unknown feature class {feature_cls}'
- model = feature_cls(model, **feature_cfg)
- model.default_cfg = default_cfg_for_features(default_cfg) # add back default_cfg
-
- return model
diff --git a/spaces/zlpnvrtnk/dvatch_captcha_sneedium_fork2/README.md b/spaces/zlpnvrtnk/dvatch_captcha_sneedium_fork2/README.md
deleted file mode 100644
index efbb340d53fc173bad8feb11f57bc9f42193746f..0000000000000000000000000000000000000000
--- a/spaces/zlpnvrtnk/dvatch_captcha_sneedium_fork2/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Bruh
-emoji: ⚡
-colorFrom: green
-colorTo: blue
-sdk: gradio
-sdk_version: 3.9.1
-app_file: app.py
-pinned: true
-duplicated_from: sneedium/dvatch_captcha_sneedium
----
-
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/zxy666/bingo-chatai666/src/components/button-scroll-to-bottom.tsx b/spaces/zxy666/bingo-chatai666/src/components/button-scroll-to-bottom.tsx
deleted file mode 100644
index b68ab9c0e48320c356e51a52d11b9ca63909e6c5..0000000000000000000000000000000000000000
--- a/spaces/zxy666/bingo-chatai666/src/components/button-scroll-to-bottom.tsx
+++ /dev/null
@@ -1,34 +0,0 @@
-'use client'
-
-import * as React from 'react'
-
-import { cn } from '@/lib/utils'
-import { useAtBottom } from '@/lib/hooks/use-at-bottom'
-import { Button, type ButtonProps } from '@/components/ui/button'
-import { IconArrowDown } from '@/components/ui/icons'
-
-export function ButtonScrollToBottom({ className, ...props }: ButtonProps) {
- const isAtBottom = useAtBottom()
-
- return (
-
- )
-}