diff --git a/spaces/1line/AutoGPT/tests/unit/json_tests.py b/spaces/1line/AutoGPT/tests/unit/json_tests.py
deleted file mode 100644
index 25c383377708359b5cfec28e0625343c5692f15c..0000000000000000000000000000000000000000
--- a/spaces/1line/AutoGPT/tests/unit/json_tests.py
+++ /dev/null
@@ -1,114 +0,0 @@
-import unittest
-
-from autogpt.json_utils.json_fix_llm import fix_and_parse_json
-
-
-class TestParseJson(unittest.TestCase):
- def test_valid_json(self):
- # Test that a valid JSON string is parsed correctly
- json_str = '{"name": "John", "age": 30, "city": "New York"}'
- obj = fix_and_parse_json(json_str)
- self.assertEqual(obj, {"name": "John", "age": 30, "city": "New York"})
-
- def test_invalid_json_minor(self):
- # Test that an invalid JSON string can be fixed with gpt
- json_str = '{"name": "John", "age": 30, "city": "New York",}'
- self.assertEqual(
- fix_and_parse_json(json_str, try_to_fix_with_gpt=False),
- {"name": "John", "age": 30, "city": "New York"},
- )
-
- def test_invalid_json_major_with_gpt(self):
- # Test that an invalid JSON string raises an error when try_to_fix_with_gpt is False
- json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END'
- self.assertEqual(
- fix_and_parse_json(json_str, try_to_fix_with_gpt=True),
- {"name": "John", "age": 30, "city": "New York"},
- )
-
- def test_invalid_json_major_without_gpt(self):
- # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False
- json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END'
- # Assert that this raises an exception:
- with self.assertRaises(Exception):
- fix_and_parse_json(json_str, try_to_fix_with_gpt=False)
-
- def test_invalid_json_leading_sentence_with_gpt(self):
- # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False
- json_str = """I suggest we start by browsing the repository to find any issues that we can fix.
-
-{
- "command": {
- "name": "browse_website",
- "args":{
- "url": "https://github.com/Torantulino/Auto-GPT"
- }
- },
- "thoughts":
- {
- "text": "I suggest we start browsing the repository to find any issues that we can fix.",
- "reasoning": "Browsing the repository will give us an idea of the current state of the codebase and identify any issues that we can address to improve the repo.",
- "plan": "- Look through the repository to find any issues.\n- Investigate any issues to determine what needs to be fixed\n- Identify possible solutions to fix the issues\n- Open Pull Requests with fixes",
- "criticism": "I should be careful while browsing so as not to accidentally introduce any new bugs or issues.",
- "speak": "I will start browsing the repository to find any issues we can fix."
- }
-}"""
- good_obj = {
- "command": {
- "name": "browse_website",
- "args": {"url": "https://github.com/Torantulino/Auto-GPT"},
- },
- "thoughts": {
- "text": "I suggest we start browsing the repository to find any issues that we can fix.",
- "reasoning": "Browsing the repository will give us an idea of the current state of the codebase and identify any issues that we can address to improve the repo.",
- "plan": "- Look through the repository to find any issues.\n- Investigate any issues to determine what needs to be fixed\n- Identify possible solutions to fix the issues\n- Open Pull Requests with fixes",
- "criticism": "I should be careful while browsing so as not to accidentally introduce any new bugs or issues.",
- "speak": "I will start browsing the repository to find any issues we can fix.",
- },
- }
- # Assert that this raises an exception:
- self.assertEqual(
- fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj
- )
-
- def test_invalid_json_leading_sentence_with_gpt(self):
- # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False
- json_str = """I will first need to browse the repository (https://github.com/Torantulino/Auto-GPT) and identify any potential bugs that need fixing. I will use the "browse_website" command for this.
-
-{
- "command": {
- "name": "browse_website",
- "args":{
- "url": "https://github.com/Torantulino/Auto-GPT"
- }
- },
- "thoughts":
- {
- "text": "Browsing the repository to identify potential bugs",
- "reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.",
- "plan": "- Analyze the repository for potential bugs and areas of improvement",
- "criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.",
- "speak": "I am browsing the repository to identify potential bugs."
- }
-}"""
- good_obj = {
- "command": {
- "name": "browse_website",
- "args": {"url": "https://github.com/Torantulino/Auto-GPT"},
- },
- "thoughts": {
- "text": "Browsing the repository to identify potential bugs",
- "reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.",
- "plan": "- Analyze the repository for potential bugs and areas of improvement",
- "criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.",
- "speak": "I am browsing the repository to identify potential bugs.",
- },
- }
- # Assert that this raises an exception:
- self.assertEqual(
- fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj
- )
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Car Parking Multiplayer Customize Your Car and Race with Friends.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Car Parking Multiplayer Customize Your Car and Race with Friends.md
deleted file mode 100644
index 1bc7800d613a2598a1a9da856a5ddc5128832ab0..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Car Parking Multiplayer Customize Your Car and Race with Friends.md
+++ /dev/null
@@ -1,128 +0,0 @@
-
-
Car Parking Games: How to Master the Art of Parking
-
Do you love driving cars but hate parking them? Do you want to test your parking skills in different scenarios and environments? Do you enjoy playing games that are realistic, fun, and challenging? If you answered yes to any of these questions, then you might be interested in car parking games.
-
Introduction
-
Car parking games are a genre of video games that simulate parking cars in various situations. They can be played on different devices, such as computers, smartphones, tablets, or consoles. Car parking games can have different objectives, such as completing a level within a time limit, avoiding collisions, or collecting coins. Car parking games can also have different themes, such as urban, rural, futuristic, or fantasy.
Car parking games are a type of driving game that focuses on parking cars rather than racing them. They usually involve maneuvering a car into a designated parking spot, following the rules of the road and traffic signs. Some car parking games also include other tasks, such as towing, delivering, or escaping.
-
Why are car parking games fun and challenging?
-
Car parking games are fun and challenging because they require precision, patience, and strategy. They can also be very realistic, with 3D graphics, physics, and sounds. Car parking games can offer a variety of levels, from easy to hard, with different cars, locations, and obstacles. Some car parking games also have multiplayer modes, where you can compete or cooperate with other players online.
-
How can car parking games improve your driving skills?
-
Car parking games can improve your driving skills by teaching you how to park cars in different situations. They can help you improve your spatial awareness, coordination, and reaction time. They can also help you learn how to follow traffic rules, signs, and signals. Car parking games can also make you more confident and comfortable behind the wheel.
-
Types of car parking games
-
There are many types of car parking games available online. Here are some of the most popular ones:
-
Simulation games
-
Simulation games are car parking games that aim to be as realistic as possible. They usually have high-quality graphics, physics, and sounds. They also have a variety of cars, locations, and scenarios. Some examples of simulation car parking games are:
-
Car Parking Multiplayer
-
This is a multiplayer open world game that lets you park cars in real gas stations and car services. You can also race against other players, exchange cars with them, or chat with them using voice chat. You can customize your car with different engines, turbos, gearboxes, exhausts, vinyls, and body parts. You can also walk around the city and explore different buildings.
-
Real Car Parking 3D
-
This is a realistic 3D game that tests your parking skills in various levels. You can choose from different cars with real interiors and exteriors. You can also adjust the camera angle and view to suit your preference. You can park your car in different places, such as garages, streets, or lots.
-
car parking games online
-car parking games 3d
-car parking games for kids
-car parking games free download
-car parking games multiplayer
-car parking games with steering wheel
-car parking games unblocked
-car parking games simulator
-car parking games crazy games
-car parking games poki
-car parking games for pc
-car parking games for android
-car parking games y8
-car parking games friv
-car parking games 2023
-car parking games real
-car parking games offline
-car parking games hd
-car parking games pro
-car parking games new
-car parking games police
-car parking games bus
-car parking games truck
-car parking games taxi
-car parking games drift
-car parking games city
-car parking games airport
-car parking games school
-car parking games garage
-car parking games mall
-car parking games hospital
-car parking games beach
-car parking games snow
-car parking games night
-car parking games hard
-car parking games easy
-car parking games fun
-car parking games cool math
-car parking games kizi
-car parking games lagged
-car parking games miniclip
-car parking games silvergames
-car parking games agame
-car parking games addictinggames
-car parking games a10.com
-
Racing games
-
Racing games are car parking games that involve speed and adrenaline They also have a lot of obstacles, such as traffic, pedestrians, or police. Some examples of racing car parking games are:
-
Parking Fury 3D: Beach City
-
This is a 3D racing game that lets you drive and park different cars in a sunny beach city. You can steal cars from other drivers, evade the cops, or perform stunts. You can also customize your car with different colors, wheels, and stickers.
-
Drift Parking
-
This is a 2D racing game that challenges you to park your car while drifting. You can choose from different cars with different handling and speed. You can also unlock new levels and locations as you progress.
-
Puzzle games
-
Puzzle games are car parking games that require logic and strategy. They usually have simple graphics and controls, but complex levels and objectives. They also have a lot of fun and creative elements, such as magnets, portals, or bombs. Some examples of puzzle car parking games are:
-
Parking Jam Escape
-
This is a puzzle game that asks you to clear the traffic jam by moving the cars in the right order. You can swipe to move the cars horizontally or vertically. You can also use hints or undo moves if you get stuck. You can play hundreds of levels with different cars and difficulties.
-
Hexa Parking
-
This is a puzzle game that involves parking hexagonal cars on a hexagonal grid. You can tap to rotate the cars and drag them to the right spot. You can also collect coins and stars to unlock new cars and levels. You can play in different modes, such as classic, time trial, or challenge.
-
Tips and tricks for car parking games
-
Car parking games can be fun and rewarding, but they can also be frustrating and difficult. Here are some tips and tricks that can help you master the art of parking:
-
Choose the right vehicle for the level
-
Some car parking games allow you to choose from different vehicles, such as cars, trucks, buses, or motorcycles. Each vehicle has its own advantages and disadvantages, such as size, speed, handling, or fuel. You should choose the vehicle that suits the level best, depending on the space, time limit, or obstacles.
-
Adjust the camera angle and view
-
Some car parking games let you adjust the camera angle and view to see your car better. You can switch between different views, such as top-down, side-view, or first-person. You can also zoom in or out to see more details or the bigger picture. You should use the camera angle and view that helps you park your car more accurately and safely.
-
Follow the directions and indicators
-
Some car parking games give you directions and indicators to guide you to your parking spot. You should follow them carefully and pay attention to the signs, arrows, or lines. They can help you avoid getting lost, going the wrong way, or breaking the rules.
-
Avoid collisions and obstacles
-
Some car parking games have collisions and obstacles that can damage your car or make you fail the level. You should avoid them as much as possible and drive carefully and slowly. You should also use your brakes, steering wheel, or mirrors to control your car better.
-
Practice and learn from your mistakes
-
Some car parking games have a lot of levels that get harder and harder as you progress. You should practice and learn from your mistakes to improve your skills and confidence. You should also try different strategies and techniques to find what works best for you.
-
Conclusion
-
Car parking games are a great way to have fun and challenge yourself while improving your driving skills. They can offer a variety of types, themes, objectives, and difficulties to suit your preferences and needs. They can also teach you how to park cars in different situations and environments.
-
Summary of the main points
-
In this article, we have covered:
-
-
What are car parking games?
-
Why are car parking games fun and challenging?
-
How can car parking games improve your driving skills?
-
Types of car parking games: simulation, racing, puzzle.
-
Tips and tricks for car parking games: choose the right vehicle, adjust the camera angle and view, follow the directions and indicators, avoid collisions and obstacles, practice and learn from your mistakes.
-
-
Call to action
-
If you are interested in playing some car parking games online for free, you can check out some of the examples we have mentioned in this article. You can also browse through other websites that offer a wide range of car parking games for different devices and platforms. You can also share your feedback, opinions, or suggestions with us in the comments section below. We would love to hear from you and see how you are doing with your parking skills.
-
FAQs
-
Here are some of the frequently asked questions about car parking games:
-
What are the benefits of playing car parking games?
-
Some of the benefits of playing car parking games are:
-
-
They can improve your driving skills, such as spatial awareness, coordination, and reaction time.
-
They can make you more confident and comfortable behind the wheel.
-
They can provide entertainment and challenge for different ages and preferences.
-
They can stimulate your brain and enhance your cognitive abilities.
-
-
What are the drawbacks of playing car parking games?
-
Some of the drawbacks of playing car parking games are:
-
-
They can be addictive and time-consuming, especially if you play them too much or too often.
-
They can be frustrating and stressful, especially if you fail or get stuck on a level.
-
They can be unrealistic and misleading, especially if they do not follow the real rules and laws of driving and parking.
-
They can be harmful to your eyesight and posture, especially if you play them on a small screen or without proper lighting and ergonomics.
-
-
How to play car parking games on a computer?
-
To play car parking games on a computer, you need to have a compatible device, such as a laptop or a desktop, with a good internet connection. You also need to have a web browser that supports HTML5 or Flash. You can then visit any website that offers car parking games online and choose the game you want to play. You can use your keyboard, mouse, or touchpad to control your car and park it in the designated spot.
-
How to play car parking games on a smartphone or tablet?
-
To play car parking games on a smartphone or tablet, you need to have a compatible device, such as an iPhone, iPad, Android phone, or tablet, with a good internet connection. You also need to have an app store that allows you to download and install car parking games on your device. You can then search for any car parking game you want to play and download it on your device. You can use your touchscreen to control your car and park it in the designated spot.
-
How to play car parking games with friends?
-
To play car parking games with friends, you need to have a compatible device, such as a computer, smartphone, tablet, or console, with a good internet connection. You also need to have a car parking game that supports multiplayer mode, either online or offline. You can then invite your friends to join you in the game and compete or cooperate with them in different levels and scenarios. You can also chat with them using voice chat or text chat while playing.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Chicken Gun Mod Menu APK A Crazy and Hilarious Shooting Game with Unlimited Coins.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Chicken Gun Mod Menu APK A Crazy and Hilarious Shooting Game with Unlimited Coins.md
deleted file mode 100644
index af426224a74aa688f1e286bc24b2994fbe167522..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Chicken Gun Mod Menu APK A Crazy and Hilarious Shooting Game with Unlimited Coins.md
+++ /dev/null
@@ -1,171 +0,0 @@
-
-
Chicken Gun Mod Menu APK Indir: A Fun and Quirky Shooter Game for Android
-
If you are looking for a hilarious and action-packed shooter game for your Android device, you might want to check out Chicken Gun. This game lets you play as armed chickens that shoot and fight with each other in various modes and maps. You can also download the Chicken Gun Mod Menu APK Indir, which gives you access to unlimited coins, mega menu, and other features that will make your gameplay more enjoyable. In this article, we will tell you everything you need to know about Chicken Gun and its modded version, as well as some tips and tricks to help you win more matches.
-
What is Chicken Gun?
-
Chicken Gun is an online multiplayer shooter game developed by ChaloApps. It has over 50 million downloads on Google Play Store and a 4.4-star rating from more than 400 thousand users. The game is rated Teen for violence, blood, and crude humor.
Chicken Gun has many features that make it a fun and unique game, such as:
-
-
Chicken characters in the roleplay. You can choose from different types of chickens, such as rooster, hen, duck, turkey, or even robot chicken. Each chicken has its own stats, such as health, speed, damage, and armor.
-
Classic maps to figure out the enemies. You can play on various maps inspired by real-world locations, such as farm, city, desert, forest, or space. Each map has its own obstacles, hiding spots, and strategic points.
-
Shooting simulation in an ultimate way. You can use different weapons to shoot your enemies, such as shotguns, machine guns, pistols, assault rifles, and carbines. You can also throw explosive eggs that can cause massive damage to your opponents.
-
User interface with ease. You can easily control your chicken with simple touch gestures. You can move around with the joystick, aim with the crosshair, shoot with the fire button, jump with the jump button, and switch weapons with the weapon button.
-
Multiplayer online gameplay to enjoy. You can play with or against other players from around the world in two modes: 5 vs 5 team deathmatch or free-for-all. You can also chat with your teammates or enemies using the chat feature.
-
Animated characters. You can enjoy the funny animations of your chicken and its enemies as they shoot, run, jump, die, or celebrate. You can also see the blood splatter and bullet holes on the screen.
-
Huge arsenal and explosives. You can customize your chicken and weapon with various items that you can buy with coins or gems. You can change your chicken's beak, sneakers, caps, glasses, masks, or even wings. You can also upgrade your weapon's power, accuracy, fire rate, or magazine size.
-
-
How to play Chicken Gun
-
To play Chicken Gun, you need to follow these steps:
-
-
Download and install the game from Google Play Store or from the link provided below.
-
Launch the game and choose your preferred language.
-
Create your account or log in with your existing account.
-
Select a mode: team deathmatch or free-for-all.
-
Select a map: farm, city, desert, forest, or space.
-
Select a chicken: rooster, hen, duck, turkey, or robot chicken.
-
Select a weapon: shotgun, machine gun, pistol, assault rifle, or carbine.
-
Join a match and start shooting your enemies.
-
Earn coins and gems by killing enemies, winning matches, or watching ads.
-
Use coins and gems to buy items and upgrade your chicken and weapon.
-
-
What is Chicken Gun Mod Menu APK Indir?
-
Chicken Gun Mod Menu APK Indir is a modified version of the original Chicken Gun game that gives you some extra features and advantages that are not available in the official game. You can download the Chicken Gun Mod Menu APK Indir from the link provided below.
-
Benefits of Chicken Gun Mod Menu APK Indir
-
Some of the benefits of Chicken Gun Mod Menu APK Indir are:
-
-
Unlimited coins and gems. You can get unlimited coins and gems in the game without spending any real money or watching any ads. You can use them to buy any items and upgrade your chicken and weapon as much as you want.
-
Mega menu. You can access a mega menu that lets you activate or deactivate various cheats and hacks in the game, such as god mode, infinite ammo, one-hit kill, speed hack, wall hack, no recoil, no spread, and more. You can also change your chicken's size, color, or voice.
-
No root required. You do not need to root your device to install or use the Chicken Gun Mod Menu APK Indir. It works on any Android device that supports the original game.
-
No ban risk. You do not have to worry about getting banned from the game or losing your account when using the Chicken Gun Mod Menu APK Indir. It has an anti-ban feature that protects you from detection by the game's security system.
-
-
How to download and install Chicken Gun Mod Menu APK Indir
-
To download and install the Chicken Gun Mod Menu APK Indir, you need to follow these steps:
-
-
Click on the link below to download the Chicken Gun Mod Menu APK Indir file on your device.
-
Go to your device's settings and enable the installation of apps from unknown sources.
-
Locate the downloaded file and tap on it to start the installation process.
-
Follow the instructions on the screen and wait for the installation to finish.
-
Launch the game and enjoy the modded features.
-
-
Tips and tricks for Chicken Gun
-
If you want to improve your skills and performance in Chicken Gun, you can use these tips and tricks:
-
chicken gun mod menu apk download
-chicken gun mod menu apk free
-chicken gun mod menu apk latest version
-chicken gun mod menu apk unlimited coins
-chicken gun mod menu apk mega menu
-chicken gun mod menu apk android
-chicken gun mod menu apk no root
-chicken gun mod menu apk online
-chicken gun mod menu apk offline
-chicken gun mod menu apk hack
-chicken gun mod menu apk cheat
-chicken gun mod menu apk 2023
-chicken gun mod menu apk 3.3.01
-chicken gun mod menu apk getmodsapk.com[^1^]
-chicken gun mod menu apk rexdl.com
-chicken gun mod menu apk revdl.com
-chicken gun mod menu apk happymod.com
-chicken gun mod menu apk an1.com
-chicken gun mod menu apk apkpure.com
-chicken gun mod menu apk apkmody.io
-chicken gun mod menu apk androidoyun.club
-chicken gun mod menu apk android1.com
-chicken gun mod menu apk andropalace.org
-chicken gun mod menu apk androeed.ru
-chicken gun mod menu apk androgamer.org
-chicken gun mod menu apk gameplay
-chicken gun mod menu apk features
-chicken gun mod menu apk review
-chicken gun mod menu apk installation
-chicken gun mod menu apk tutorial
-chicken gun mod menu apk tips and tricks
-chicken gun mod menu apk how to use
-chicken gun mod menu apk how to download
-chicken gun mod menu apk how to install
-chicken gun mod menu apk how to play
-chicken gun mod menu apk how to hack
-chicken gun mod menu apk how to cheat
-chicken gun mod menu apk benefits
-chicken gun mod menu apk pros and cons
-chicken gun mod menu apk comparison
-chicken gun mod menu apk alternatives
-chicken gun mod menu apk similar apps
-chicken gun mod menu apk vs original app
-chicken gun mod menu apk vs other mods
-chicken gun mod menu apk for pc
-chicken gun mod menu apk for ios
-chicken gun mod menu apk for windows 10
-chicken gun mod menu apk for macbook
-
Choose your chicken wisely
-
Different chickens have different stats and abilities in the game. You should choose a chicken that suits your play style and strategy. For example, if you want to be fast and agile, you can choose a duck or a robot chicken. If you want to be tanky and durable, you can choose a turkey or a rooster. If you want to be balanced and versatile, you can choose a hen.
-
Use explosive eggs strategically
-
Explosive eggs are powerful weapons that can cause a lot of damage to your enemies. However, they are also limited in number and take time to reload. Therefore, you should use them wisely and strategically. For example, you can use them to break walls or doors, to create diversions or traps, to hit multiple enemies at once, or to finish off low-health enemies.
-
Aim for the head
-
Aiming for the head is one of the best ways to deal more damage and kill your enemies faster in Chicken Gun. Headshots can deal double or triple damage depending on your weapon and distance. However, aiming for the head is also more difficult and risky than aiming for the body. Therefore, you should practice your aim and timing in different situations and scenarios.
-
Customize your chicken and weapon
- accessories with coins or gems.
-
Review of Chicken Gun
-
Chicken Gun is a game that has received a lot of positive reviews and feedback from its users. Here are some of the pros and cons of Chicken Gun, as well as some of the user ratings and feedback.
-
Pros and cons of Chicken Gun
-
Some of the pros of Chicken Gun are:
-
-
Fun and quirky gameplay. Chicken Gun is a game that offers a lot of fun and laughter with its hilarious and action-packed gameplay. You can enjoy shooting and fighting with other chickens in various modes and maps, using different weapons and explosive eggs.
-
Smooth and easy controls. Chicken Gun is a game that has smooth and easy controls that let you control your chicken with simple touch gestures. You can move around, aim, shoot, jump, and switch weapons with ease.
-
Customizable and upgradeable characters and weapons. Chicken Gun is a game that lets you customize and upgrade your chicken and weapon with various items that you can buy with coins or gems. You can change your chicken's appearance, stats, and abilities, as well as your weapon's power, accuracy, fire rate, and magazine size.
-
Multiplayer online mode. Chicken Gun is a game that lets you play with or against other players from around the world in two modes: 5 vs 5 team deathmatch or free-for-all. You can also chat with your teammates or enemies using the chat feature.
-
-
Some of the cons of Chicken Gun are:
-
-
Ads and in-app purchases. Chicken Gun is a game that has ads and in-app purchases that can be annoying and expensive. You have to watch ads to earn coins or gems, or to unlock some features. You also have to spend real money to buy more coins or gems, or to remove ads.
-
Bugs and glitches. Chicken Gun is a game that has some bugs and glitches that can affect your gameplay and experience. Some of the bugs and glitches include crashing, freezing, lagging, disconnecting, or losing progress.
-
Cheaters and hackers. Chicken Gun is a game that has some cheaters and hackers that can ruin your gameplay and fun. Some of the cheaters and hackers use modded versions of the game, such as the Chicken Gun Mod Menu APK Indir, to activate cheats and hacks, such as god mode, infinite ammo, one-hit kill, speed hack, wall hack, no recoil, no spread, and more.
-
-
User ratings and feedback
-
Here are some of the user ratings and feedback for Chicken Gun:
-
-
-
User
-
Rating
-
Feedback
-
-
-
A Google user
-
5 stars
-
This game is awesome! I love the graphics, the gameplay, the weapons, the maps, everything! It's so funny and addictive. I play it every day with my friends. It's the best shooter game ever!
-
-
-
A Google user
-
4 stars
-
This game is good but it has some problems. The ads are too many and too long. The in-app purchases are too expensive. The bugs and glitches are too frequent. The cheaters and hackers are too annoying. Please fix these issues.
-
-
-
A Google user
-
3 stars
-
This game is okay but it could be better. The gameplay is fun but it gets boring after a while. The controls are easy but they are not responsive sometimes. The customization is cool but it is not enough. The multiplayer mode is nice but it is not stable sometimes. Please improve these aspects.
-
-
-
A Google user
-
2 stars
-
This game is bad but it has some potential. The gameplay is lame but it has some humor. The controls are hard but they are adjustable. The customization is poor but it has some variety. The multiplayer mode is awful but it has some options. Please work on these areas.
-
-
-
A Google user
-
1 star
-
This game is terrible! I hate the graphics, the gameplay, the weapons, the maps, everything! It's so boring and frustrating. I play it never with anyone. It's the worst shooter game ever!
-
-
-
Conclusion
-
In conclusion, Chicken Gun is a fun and quirky shooter game for Android that lets you play as armed chickens that shoot and fight with each other in various modes and maps. You can also download the Chicken Gun Mod Menu APK Indir, which gives you access to unlimited coins, mega menu, and other features that will make your gameplay more enjoyable. However, the game also has some drawbacks, such as ads, in-app purchases, bugs, glitches, cheaters, and hackers. Therefore, you should be careful and responsible when playing the game. If you are looking for a hilarious and action-packed shooter game for your Android device, you might want to check out Chicken Gun.
-
FAQs
-
Here are some of the frequently asked questions about Chicken Gun and its modded version:
-
-
Is Chicken Gun free to play? Yes, Chicken Gun is free to play, but it has ads and in-app purchases that can be annoying and expensive.
-
Is Chicken Gun safe to play? Yes, Chicken Gun is safe to play, but it has some bugs and glitches that can affect your gameplay and experience. You should also be aware of the cheaters and hackers that can ruin your gameplay and fun.
-
Is Chicken Gun Mod Menu APK Indir legal to use? No, Chicken Gun Mod Menu APK Indir is not legal to use, as it violates the terms and conditions of the original game. You should also be careful of the risks of getting banned from the game or losing your account when using the modded version.
-
Is Chicken Gun Mod Menu APK Indir compatible with all Android devices? No, Chicken Gun Mod Menu APK Indir is not compatible with all Android devices. It only works on devices that support the original game. You should also make sure that your device has enough storage space and battery life to run the game smoothly.
-
How can I contact the developer of Chicken Gun? You can contact the developer of Chicken Gun by sending an email to chaloapps@gmail.com or by visiting their Facebook page at https://www.facebook.com/ChaloApps-1008960989276920/.
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Construction Simulator 2015 Build Your Dream Projects with Realistic Machines.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Construction Simulator 2015 Build Your Dream Projects with Realistic Machines.md
deleted file mode 100644
index bddfa88f748e0a0761a53e5eeb2b4c2b4df3aea0..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Construction Simulator 2015 Build Your Dream Projects with Realistic Machines.md
+++ /dev/null
@@ -1,115 +0,0 @@
-
-
Construction Simulator 2015: A Review
-
If you have ever dreamed of driving a huge excavator, a crane, or a truck, then you might want to check out Construction Simulator 2015. This is a simulation game that lets you take the controls of 16 realistic construction machines made by LIEBHERR, STILL, and MAN. You can build houses, bridges, roads, and more in a large open world with high-quality 3D graphics. But is this game worth your time and money? Let's find out in this review.
-
What is Construction Simulator 2015?
-
A simulation game that lets you operate realistic construction machines
-
Construction Simulator 2015 is a game that simulates the work of a construction company. You can choose from various missions that range from simple digging to complex engineering projects. You can also create your own missions using the editor or download user-made mods from the Steam Workshop. You can operate different types of machines, such as excavators, cranes, loaders, trucks, forklifts, and more. Each machine has its own functions, controls, and physics. You can switch between first-person and third-person views to get the best perspective of your work.
A game developed by weltenbauer and published by astragon Entertainment
-
Construction Simulator 2015 is a game developed by weltenbauer Software Entwicklung GmbH, a German company that specializes in simulation software. The game was released in November 2014 for Windows and Mac OS. It is the sequel to Construction Simulator 2014, which was a mobile game for iOS and Android devices. The game was published by astragon Entertainment GmbH, another German company that focuses on simulation games. The game has received mostly positive reviews from critics and players alike.
-
A game that features 16 different vehicles, various missions, and a multiplayer mode
-
Construction Simulator 2015 offers you a lot of variety in terms of gameplay. You can choose from 16 different vehicles that are based on real models from LIEBHERR, STILL, and MAN. Each vehicle has its own characteristics, advantages, and disadvantages. You can also customize your vehicles with different colors and accessories. You can take on over 200 missions that cover different aspects of construction work. You can also play with up to four friends in the online multiplayer mode. You can cooperate or compete with other players in building projects or creating your own company.
-
What are the main features of Construction Simulator 2015?
-
High-quality 3D graphics and realistic physics
-
One of the main attractions of Construction Simulator 2015 is its high-quality 3D graphics. The game boasts detailed models of vehicles, buildings, environments, and people. The game also features realistic physics that affect the behavior of the machines and the materials. You can see how the soil reacts to your digging, how the concrete flows from your bucket, how the crane sways with the wind, and more. The game also supports various graphical settings that allow you to adjust the performance according to your system.
-
A large open world with different locations and scenarios
-
Another feature of Construction Simulator 2015 is its large open world that spans over 15 square kilometers. The game offers
different locations and scenarios that you can explore and work on. You can visit the city, the countryside, the industrial area, the harbor, and more. You can also encounter different weather conditions, such as rain, snow, fog, and sunshine. The game also features a day-night cycle that affects the visibility and the traffic. You can also interact with other characters, such as your workers, your clients, and your competitors.
-
A dynamic economy system and a day-night cycle
-
Another feature of Construction Simulator 2015 is its dynamic economy system that influences your income and expenses. The game simulates the supply and demand of the construction market, as well as the fluctuations of the prices. You have to manage your budget wisely and invest in new vehicles, equipment, and staff. You also have to pay for fuel, maintenance, wages, taxes, and more. The game also features a day-night cycle that affects your working hours and the availability of missions. You have to plan your schedule carefully and balance your workload and your rest.
-
A modding support and a Steam Workshop integration
-
Another feature of Construction Simulator 2015 is its modding support and its Steam Workshop integration. The game allows you to create your own content using the editor or using external tools. You can create new vehicles, buildings, missions, maps, and more. You can also share your creations with other players through the Steam Workshop. You can also download and install mods made by other users to enhance your gaming experience. The game has a large and active modding community that offers a lot of variety and quality content.
-
What are the pros and cons of Construction Simulator 2015?
-
Pros: fun, challenging, educational, and immersive gameplay
-
One of the pros of Construction Simulator 2015 is its fun, challenging, educational, and immersive gameplay. The game offers you a lot of opportunities to learn about the construction industry and the machines involved. You can also enjoy the thrill of operating heavy machinery and completing complex projects. The game also immerses you in a realistic and lively world that reacts to your actions. The game can be very satisfying and rewarding when you see the results of your work.
One of the cons of Construction Simulator 2015 is its steep learning curve, repetitive tasks, technical issues, and expensive DLCs. The game can be very difficult and frustrating for beginners who are not familiar with the controls and the mechanics. The game also requires a lot of patience and precision to perform some tasks that can be tedious and boring. The game also suffers from some technical issues, such as bugs, glitches, crashes, and performance problems. The game also has a lot of DLCs that add new vehicles and missions, but they are quite expensive compared to the base game.
If you are interested in getting Construction Simulator 2015, you can buy it on Steam for Windows and Mac OS. The game costs $14.99 USD (or equivalent in other currencies) for the base game. You can also buy the Gold Edition that includes the base game and all DLCs for $24.99 USD (or equivalent in other currencies). You can also buy each DLC separately for $1.99 USD to $4.99 USD (or equivalent in other currencies) each.
-
Price, system requirements, and ratings
-
Before you buy Construction Simulator 2015, you should check if your system meets the minimum or recommended requirements for the game. Here are the system requirements for Windows:
-
Minimum
Recommended
-
OS: Windows Vista/7/8
OS: Windows Vista/7/8
-
Processor: Dual-Core Processor with 2.4 GHz
Processor: Dual or Quad-Core Processor with 3 GHz or better recommended
-
Memory: 4 GB RAM
Memory: 4 GB RAM
-
Graphics: DirectX 9 compatible graphics card with 1 GB VRAM (Geforce 400 Series or better | ATI Radeon HD 57xx or better)
Graphics: DirectX 9 compatible graphics card with 2 GB+ RAM (Geforce GTX 660 or better | ATI Radeon HD 68xx, 77xx or better)
-
DirectX: Version 9.0
DirectX: Version 9.0
Storage: 3 GB available space
Storage: 3 GB available space
-
Sound Card: Any
Sound Card: Any
-
- Here are the system requirements for Mac OS:
-
Minimum
Recommended
-
OS: Mac OS X 10.8
OS: Mac OS X 10.10
-
Processor: Intel Core i5 or i7 Processor
Processor: Intel Core i5 or i7 Processor
-
Memory: 4 GB RAM
Memory: 4 GB RAM
-
Graphics: ATI Radeon 4670 with 1 GB VRAM or better
Graphics: NVIDIA GeForce GTX 750M or better
-
Storage: 3 GB available space
Storage: 3 GB available space
-
Sound Card: Any
Sound Card: Any
-
- You should also check the ratings and reviews of the game before you buy it. Here are the ratings and reviews of the game on Steam as of June 21, 2023:
-
Ratings
Reviews
-
Mixed (6,543)
"It's a good game, but it needs more content and polish." - Anonymous "I love this game, it's very realistic and fun to play with friends." - John "This game is a waste of money, it's full of bugs and glitches." - Lisa "This game is very educational and challenging, I learned a lot about construction." - Mark "This game is boring and repetitive, it's like doing real work." - Jane
-
-
Conclusion
-
In conclusion, Construction Simulator 2015 is a simulation game that lets you operate realistic construction machines and build various projects in a large open world. The game has high-quality 3D graphics, realistic physics, a dynamic economy system, a multiplayer mode, and a modding support. The game also has some drawbacks, such as a steep learning curve, repetitive tasks, technical issues, and expensive DLCs. The game is available on Steam for Windows and Mac OS for $14.99 USD (or equivalent in other currencies) for the base game or $24.99 USD (or equivalent in other currencies) for the Gold Edition. The game is suitable for anyone who likes simulation games, construction machines, or engineering projects.
-
Frequently Asked Questions (FAQs)
-
Q: How many players can play in the multiplayer mode?
-
A: Up to four players can play in the multiplayer mode. You can join or host a server and invite your friends or other players to join you. You can also chat with other players using the text or voice chat.
-
Q: How do I install mods from the Steam Workshop?
-
A: To install mods from the Steam Workshop, you need to subscribe to the mod you want to download. Then, you need to launch the game and go to the options menu. There, you can enable or disable the mods you have subscribed to.
-
Q: How do I create my own mods for the game?
-
A: To create your own mods for the game, you need to use the editor that comes with the game. You can access the editor from the main menu of the game. There, you can create new vehicles, buildings, missions, maps, and more. You can also use external tools, such as Blender or Photoshop, to create your own models and textures.
-
Q: How do I fix the technical issues of the game?
-
A: To fix the technical issues of the game, you need to update your drivers, verify your game files, run the game as administrator, or reinstall the game. You can also check the official forums or contact the support team for more help.
-
Q: How do I get more money in the game?
-
A: To get more money in the game, you need to complete more missions, sell your vehicles or materials, or use cheats. You can also use mods that increase your income or give you unlimited money.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Devon Ke Dev Mahadev All Episodes Download Google Drive - The Complete Collection of the Legendary Show.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Devon Ke Dev Mahadev All Episodes Download Google Drive - The Complete Collection of the Legendary Show.md
deleted file mode 100644
index 2dfedff139435b9b9cd4493ca147ca5517621ca6..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Devon Ke Dev Mahadev All Episodes Download Google Drive - The Complete Collection of the Legendary Show.md
+++ /dev/null
@@ -1,128 +0,0 @@
-
-
Devon Ke Dev Mahadev: How to Download All Episodes from Google Drive
-
Are you a fan of Devon Ke Dev Mahadev, the popular mythological series based on the life of Lord Shiva? Do you want to watch all the episodes of this epic show without any interruption or hassle? If yes, then you are in luck. In this article, we will show you how to download all the episodes of Devon Ke Dev Mahadev from Google Drive, one of the most reliable and convenient cloud storage platforms. We will also share some tips and tricks to make your downloading experience easier and faster. So, without further ado, let's get started.
-
Introduction
-
What is Devon Ke Dev Mahadev?
-
Devon Ke Dev Mahadev, also known as DKDM, is a series based on the Hindu god, Shiva, also known as Mahadev. It premiered on 18 December 2011, airing Monday to Friday nights on Life OK. The series concluded on 14 December 2014, having completed a total of 820 episodes. The show depicts the story of Shiva, his avatars, his marriage to Parvati, and his battles with various demons and enemies. The show also features other Hindu deities such as Vishnu, Brahma, Ganesha, Kartikeya, Lakshmi, Saraswati, and more. The show is praised for its captivating storyline, stunning visuals, powerful performances, and authentic portrayal of Hindu mythology.
-
devon ke dev mahadev all episodes download google drive
Devon Ke Dev Mahadev is a show that can be enjoyed by anyone who loves mythology, drama, romance, action, and spirituality. The show has a loyal fan base that admires the show for its inspiring messages, emotional moments, and thrilling scenes. However, watching the show online can be challenging due to various reasons such as:
-
-
Limited availability: The show is not available on all streaming platforms or websites. Some platforms may require a subscription or registration to access the show. Some websites may have broken links or low-quality videos.
-
Slow internet speed: Streaming online videos can consume a lot of bandwidth and data. If your internet connection is slow or unstable, you may face buffering issues, lagging, or interruptions while watching the show.
-
Offline viewing: Sometimes you may want to watch the show offline without any internet connection. For example, you may want to watch the show while traveling, during a power outage, or when you are in a remote area.
-
-
To overcome these challenges, downloading the episodes of Devon Ke Dev Mahadev from Google Drive can be a great solution. Google Drive is a cloud storage service that allows you to store and access files online. You can also download files from Google Drive to your device for offline viewing. Downloading the episodes from Google Drive has many benefits such as:
-
-
Unlimited availability: You can find all the episodes of Devon Ke Dev Mahadev on Google Drive links shared by other fans or users. You can access these links anytime and anywhere without any restriction or registration.
-
Fast internet speed: Downloading files from Google Drive is faster and smoother than streaming online videos. You can also pause and resume the download as per your convenience. You can save your bandwidth and data by downloading the files once and watching them multiple times.
-
Offline viewing: You can watch the downloaded episodes offline without any internet connection. You can also transfer the files to other devices or share them with your friends and family.
-
-
How to download Devon Ke Dev Mahadev episodes from Google Drive
-
Downloading Devon Ke Dev Mahadev episodes from Google Drive is easy and simple. You just need to follow these four steps:
-
Step 1: Find a Google Drive link that contains the episodes
-
The first step is to find a Google Drive link that contains the episodes of Devon Ke Dev Mahadev. There are many ways to find such links, such as:
-
-
Searching on Google: You can use keywords like "Devon Ke Dev Mahadev Google Drive", "Devon Ke Dev Mahadev all episodes download", "Devon Ke Dev Mahadev full episodes Google Drive", etc. to search for links on Google. You may have to check multiple results and pages to find a working and updated link.
-
Asking on social media: You can also ask for links on social media platforms like Facebook, Twitter, Instagram, Reddit, Quora, etc. You can join groups or communities related to Devon Ke Dev Mahadev or Indian mythology and request for links from other members or admins. You may get some helpful responses or suggestions from other fans or users.
-
Using third-party websites: You can also use third-party websites that provide links to download Devon Ke Dev Mahadev episodes from Google Drive. Some examples of such websites are . However, you should be careful while using these websites as they may contain ads, pop-ups, malware, or viruses. You should also check the credibility and reputation of these websites before using them.
-
-
Once you find a Google Drive link that contains the episodes, you can proceed to the next step.
-
Step 2: Open the link and select the episodes you want to download
-
The second step is to open the Google Drive link and select the episodes you want to download. To do this, you need to:
-
-
Click on the link or copy and paste it in your browser's address bar.
-
Wait for the Google Drive page to load. You may have to sign in with your Google account if you are not already logged in.
-
Browse through the folders and files that contain the episodes of Devon Ke Dev Mahadev. The episodes may be organized by seasons, volumes, or dates.
-
Select the episodes you want to download by clicking on the checkboxes next to them. You can select multiple episodes at once by holding the Ctrl key (Windows) or Command key (Mac) while clicking.
-
-
After selecting the episodes, you can move on to the next step.
Step 3: Click on the download icon or right-click and choose download
-
The third step is to click on the download icon or right-click and choose download to start the download process. To do this, you need to:
-
devon ke dev mahadev full episodes free download google drive
-devon ke dev mahadev all episodes watch online google drive
-devon ke dev mahadev all episodes download hd google drive
-devon ke dev mahadev all episodes download 720p google drive
-devon ke dev mahadev all episodes download zip file google drive
-devon ke dev mahadev all episodes download mp4 google drive
-devon ke dev mahadev all episodes download link google drive
-devon ke dev mahadev all episodes download folder google drive
-devon ke dev mahadev all episodes download hotstar google drive
-devon ke dev mahadev all episodes download archive.org google drive
-devon ke dev mahadev all seasons download google drive
-devon ke dev mahadev all parts download google drive
-devon ke dev mahadev complete series download google drive
-devon ke dev mahadev entire show download google drive
-devon ke dev mahadev full series download google drive
-how to download devon ke dev mahadev all episodes from google drive
-where to download devon ke dev mahadev all episodes on google drive
-best way to download devon ke dev mahadev all episodes via google drive
-easiest way to download devon ke dev mahadev all episodes using google drive
-fastest way to download devon ke dev mahadev all episodes through google drive
-watch and download devon ke dev mahadev all episodes on google drive
-stream and download devon ke dev mahadev all episodes from google drive
-access and download devon ke dev mahadev all episodes in google drive
-view and download devon ke dev mahadev all episodes with google drive
-enjoy and download devon ke dev mahadev all episodes by google drive
-free download of devon ke dev mahadev all episodes in google drive
-direct download of devon ke dev mahadev all episodes from google drive
-high quality download of devon ke dev mahadev all episodes on google drive
-low size download of devon ke dev mahadev all episodes via google drive
-high speed download of devon ke dev mahadev all episodes using google drive
-torrent download of devon ke dev mahadev all episodes through google drive
-magnet link download of devon ke dev mahadev all episodes with google drive
-single click download of devon ke dev mahadev all episodes by google drive
-one by one download of devon ke dev mahadev all episodes in google drive
-batch download of devon ke dev mahadev all episodes from google drive
-resumeable download of devon ke dev mahadev all episodes on google drive
-unlimited download of devon ke dev mahadev all episodes via google drive
-secure download of devon ke dev mahadev all episodes using google drive
-safe download of devon ke dev mahadev all episodes through google drive
-easy download of devon ke dev mahadev all episodes with google drive
-
-
After selecting the episodes, click on the download icon that appears on the top right corner of the Google Drive page. Alternatively, you can right-click on the selected episodes and choose download from the menu that appears.
-
A pop-up window will appear asking you to confirm the download. You can see the total file size and number of files that you are about to download. You can also choose to zip the files or not. Zipping the files will compress them and reduce their size, but it will also take longer to download and unzip them later.
-
Click on the download button to confirm the download. You may have to allow Google Drive to download multiple files if your browser blocks them.
-
-
The download will start and you can see the progress in your browser's download manager. You can also pause, resume, or cancel the download as per your preference.
-
Step 4: Wait for the download to finish and enjoy watching the episodes
-
The final step is to wait for the download to finish and enjoy watching the episodes. To do this, you need to:
-
-
Wait for the download to complete. Depending on your internet speed and file size, this may take a few minutes or hours.
-
Once the download is done, you can find the downloaded files in your device's default download folder or location. You can also open them directly from your browser's download manager.
-
If you downloaded zip files, you need to unzip them first before watching them. You can use any software or tool that can extract zip files, such as WinZip, 7-Zip, or PeaZip.
-
After unzipping the files, you can watch the episodes using any media player that can play video files, such as VLC, Windows Media Player, or QuickTime.
-
-
Congratulations! You have successfully downloaded all the episodes of Devon Ke Dev Mahadev from Google Drive. You can now enjoy watching this amazing show at your own pace and convenience.
-
Tips and tricks for downloading Devon Ke Dev Mahadev episodes from Google Drive
-
To make your downloading experience even better, here are some tips and tricks that you can use:
-
Tip 1: Use a VPN or proxy to bypass geo-restrictions or network issues
-
Sometimes, you may face problems while accessing or downloading files from Google Drive due to geo-restrictions or network issues. For example, some links may not work in your country or region, or your internet service provider may block or throttle your connection. To overcome these problems, you can use a VPN or proxy service that can change your IP address and location, and encrypt your traffic. This way, you can access any Google Drive link without any restriction or interference. Some examples of VPN or proxy services are NordVPN, ExpressVPN, ProtonVPN, Tor Browser, etc.
-
Tip 2: Use a download manager or accelerator to speed up the download process
-
Sometimes, you may want to speed up the download process and save time and effort. For example, you may want to download multiple episodes at once, resume broken downloads, schedule downloads, or split large files into smaller parts. To do this, you can use a download manager or accelerator that can enhance your downloading capabilities and performance. Some examples of download managers or accelerators are Internet Download Manager (IDM), Free Download Manager (FDM), EagleGet, etc.
Tip 3: Check the file size and format before downloading to avoid corrupted or incompatible files
-
Sometimes, you may encounter corrupted or incompatible files while downloading from Google Drive. For example, some files may be incomplete, damaged, or infected with malware or viruses. Some files may not play properly or at all on your device or media player. To avoid these issues, you should check the file size and format before downloading. You can do this by:
-
-
Hovering over the file name and looking at the file size and format that appears below it. You can also click on the file name and look at the file details that appear on the right side of the page.
-
Comparing the file size and format with the original source or website that provides the link. You can also use online tools or websites that can verify the file size and format, such as FileFormat.com, FileInfo.com, etc.
-
Choosing a file format that is compatible with your device and media player. Some common video file formats are MP4, MKV, AVI, WMV, etc. You can also use online tools or websites that can convert video files to different formats, such as Online-Convert.com, CloudConvert.com, etc.
-
-
By checking the file size and format before downloading, you can ensure that you get high-quality and playable files.
-
Conclusion
-
Summary of the main points
-
In this article, we have shown you how to download all the episodes of Devon Ke Dev Mahadev from Google Drive. We have explained the steps involved in finding, selecting, downloading, and watching the episodes. We have also shared some tips and tricks to make your downloading experience easier and faster. We hope that you have found this article helpful and informative.
-
Call to action and recommendation
-
If you are a fan of Devon Ke Dev Mahadev or Indian mythology in general, we highly recommend that you download and watch this amazing show. You will not regret it. You will be mesmerized by the story of Shiva, his love for Parvati, and his battles with evil forces. You will also learn a lot about Hindu culture, history, and philosophy. You will be inspired by the messages of devotion, courage, and wisdom that the show conveys.
-
To download all the episodes of Devon Ke Dev Mahadev from Google Drive, follow the steps and tips that we have provided in this article. You can also share this article with your friends and family who may be interested in watching this show. You can also leave your feedback or questions in the comments section below. We would love to hear from you.
-
Thank you for reading this article. We hope you enjoy watching Devon Ke Dev Mahadev as much as we do. Happy downloading!
-
Frequently Asked Questions
-
Q: Is Devon Ke Dev Mahadev available on Netflix or Amazon Prime?
-
A: No, Devon Ke Dev Mahadev is not available on Netflix or Amazon Prime as of now. However, you can watch some episodes of the show on Hotstar, a streaming platform owned by Disney.
-
Q: Who are the actors who play Shiva and Parvati in Devon Ke Dev Mahadev?
-
A: The actors who play Shiva and Parvati in Devon Ke Dev Mahadev are Mohit Raina and Sonarika Bhadoria respectively. They are both popular Indian television actors who have won many awards and accolades for their roles in the show.
-
Q: How many seasons and episodes are there in Devon Ke Dev Mahadev?
-
A: There are 36 seasons and 820 episodes in Devon Ke Dev Mahadev. The show ran from 18 December 2011 to 14 December 2014 on Life OK.
-
Q: What is the theme song of Devon Ke Dev Mahadev?
-
A: The theme song of Devon Ke Dev Mahadev is called "Shiv Shiv Shiv". It is composed by Sandeep Mukherjee and sung by various singers such as Shankar Mahadevan, Sonu Nigam, Rituraj Mohanty, etc. The song is based on the mantra "Om Namah Shivaya", which means "I bow to Shiva".
-
Q: Where can I find more information about Devon Ke Dev Mahadev?
-
A: You can find more information about Devon Ke Dev Mahadev on its official website, its IMDb page, its Wikipedia page, or its social media pages. You can also watch interviews, behind-the-scenes, trailers, and clips of the show on YouTube or other video platforms.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Call of Duty Mobile x86 APK How to Unlock All the Weapons Skins and Perks in COD for Android.md b/spaces/1phancelerku/anime-remove-background/Call of Duty Mobile x86 APK How to Unlock All the Weapons Skins and Perks in COD for Android.md
deleted file mode 100644
index 6f031ce0459f7087f87fa7566b8aaae10ad6bfa6..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Call of Duty Mobile x86 APK How to Unlock All the Weapons Skins and Perks in COD for Android.md
+++ /dev/null
@@ -1,113 +0,0 @@
-
-
Call of Duty Mobile x86 APK: What You Need to Know
-
If you are a fan of first-person shooter games, you have probably heard of Call of Duty Mobile, the mobile version of the popular franchise that lets you enjoy thrilling multiplayer battles on iconic maps from the series. But did you know that you can also play Call of Duty Mobile on your PC using an x86 APK? In this article, we will explain what an x86 APK is, how to download and install it on your PC, and what are the pros and cons of playing Call of Duty Mobile on your PC. We will also share some tips and tricks to help you get the most out of your gaming experience.
-
What is an x86 APK and why do you need it?
-
An APK is a file format that contains all the code, resources, and metadata of an Android app. It is similar to an EXE file for Windows or a DMG file for Mac. However, not all APKs are compatible with all devices. This is because different devices use different architectures, which are the ways that processors handle instructions and data. The most common architectures for Android devices are x86 and ARM.
x86 is an architecture that is used by most PCs and laptops. It is also supported by some Android devices, such as Chromebooks or tablets with Intel processors. ARM is an architecture that is used by most smartphones and tablets. It is more power-efficient and optimized for mobile devices than x86.
-
The problem is that most Android apps are designed for ARM devices, not x86 devices. This means that if you try to run an ARM app on an x86 device, it will not work properly or at all. This is where an x86 APK comes in handy. An x86 APK is a modified version of an app that has been recompiled or translated to run on x86 devices. By using an x86 APK, you can run apps that are normally not compatible with your device.
-
One example of such an app is Call of Duty Mobile. Since it is a high-end game that requires a lot of resources, it is only available for ARM devices. However, if you want to play it on your PC, which has an x86 processor, you will need an x86 APK.
-
How to download and install Call of Duty Mobile x86 APK on your PC
-
There are two main ways to download and install Call of Duty Mobile x86 APK on your PC: using an Android emulator or using an Android-x86 OS.
-
Using an Android emulator
-
An Android emulator is a software that simulates the Android operating system on your PC. It allows you to run Android apps and games on your PC as if they were running on a real device. There are many Android emulators available for Windows, such as BlueStacks, NoxPlayer, LDPlayer, etc.
-
To use an Android emulator to play Call of Duty Mobile on your PC, you need to follow these steps:
-
-
Download and install an Android emulator of your choice from its official website.
-
Launch the emulator and sign in with your Google account.
-
Download the Call of Duty Mobile x86 APK from a reliable source, such as [Call of Duty Mobile | Home](^2^) or [COD Mobile PTB 2022 Global version: APK download link](^3^).
-
Locate the downloaded APK file on your PC and drag and drop it to the emulator window. Alternatively, you can use the emulator's file manager to browse and install the APK file.
-
Wait for the installation to complete and launch Call of Duty Mobile from the emulator's app drawer.
-
-
Congratulations, you can now play Call of Duty Mobile on your PC using an Android emulator!
-
call of duty mobile x86 apk download
-call of duty mobile x86 apk uptodown
-call of duty mobile x86 apk free
-call of duty mobile x86 apk latest version
-call of duty mobile x86 apk mod
-call of duty mobile x86 apk offline
-call of duty mobile x86 apk obb
-call of duty mobile x86 apk 2023
-call of duty mobile x86 apk for pc
-call of duty mobile x86 apk android
-call of duty mobile legends of war x86 apk
-call of duty mobile zombies mode x86 apk
-call of duty mobile warzone x86 apk
-call of duty mobile vanguard x86 apk
-call of duty mobile modern warfare 2 x86 apk
-call of duty mobile black ops 4 x86 apk
-call of duty mobile season 7 x86 apk
-call of duty mobile season 8 x86 apk
-call of duty mobile season 9 x86 apk
-call of duty mobile season 10 x86 apk
-how to install call of duty mobile x86 apk
-how to play call of duty mobile x86 apk
-how to update call of duty mobile x86 apk
-how to fix call of duty mobile x86 apk error
-how to hack call of duty mobile x86 apk
-best settings for call of duty mobile x86 apk
-best guns in call of duty mobile x86 apk
-best maps in call of duty mobile x86 apk
-best tips and tricks for call of duty mobile x86 apk
-best cheats and codes for call of duty mobile x86 apk
-is call of duty mobile x86 apk safe
-is call of duty mobile x86 apk compatible with my device
-is call of duty mobile x86 apk legal
-is call of duty mobile x86 apk online or offline
-is call of duty mobile x86 apk free or paid
-what is new in call of duty mobile x86 apk
-what is the size of call of duty mobile x86 apk
-what is the rating of call of duty mobile x86 apk
-what is the genre of call of duty mobile x86 apk
-what is the developer of call of duty mobile x86 apk
-
Using an Android-x86 OS
-
An Android-x86 OS is a modified version of Android that can run natively on x86 devices, such as PCs and laptops. It is similar to installing a Linux or Windows OS on your PC. There are several Android-x86 OS projects available, such as Bliss OS, PrimeOS, Phoenix OS, etc.
-
To use an Android-x86 OS to play Call of Duty Mobile on your PC, you need to follow these steps:
-
-
Download an Android-x86 OS image from its official website. Make sure it supports x86 APKs and Google Play Services.
-
Burn the image to a CD or USB drive using a tool like Rufus or Etcher.
-
Boot from the CD or USB drive and choose the 'Install Android-x86 to hard disk' option.
-
Select a partition where you want to install Android-x86. You can also create or modify partitions using the 'Create/Modify partitions' option. Note that installing Android-x86 will erase all data on the selected partition, so make sure you have a backup.
-
Choose a filesystem type to format the partition. You can use ext4, ntfs, or fat32.
-
Choose whether to install the GRUB boot loader and whether to make the /system directory writable.
-
Wait for the installation to finish and reboot your PC.
-
Select Android-x86 from the boot menu and sign in with your Google account.
-
Download Call of Duty Mobile from the Google Play Store or from a reliable source as an x86 APK file.
-
Install and launch Call of Duty Mobile from the app drawer.
-
-
Congratulations, you can now play Call of Duty Mobile on your PC using an Android-x86 OS!
-
Pros and cons of playing Call of Duty Mobile on your PC
-
Playing Call of Duty Mobile on your PC has its advantages and disadvantages. Here are some of them:
-
Pros
-
-
You can enjoy a larger screen and better graphics than on your mobile device.
-
You can use keyboard and mouse controls, which are more precise and responsive than touch controls.
-
You can customize your settings and preferences according to your PC's specifications and performance.
-
You can record and stream your gameplay more easily using your PC's software and hardware.
-
You can avoid battery drain and overheating issues that may affect your mobile device.
-
-
Cons
-
-
You may encounter compatibility issues or bugs when running Call of Duty Mobile on your PC, especially if you use an x86 APK or an emulator.
-
You may face unfair competition or accusations of cheating from other players who play on their mobile devices.
-
You may risk getting banned by Activision if you violate their terms of service or use unauthorized software or modifications.
-
You may lose some features or functionality that are exclusive to the mobile version, such as gyroscope aiming or voice chat.
-
You may miss out on the convenience and portability of playing Call of Duty Mobile on your mobile device anytime and anywhere.
-
-
Tips and tricks for playing Call of Duty Mobile on your PC
-
If you decide to play Call of Duty Mobile on your PC, here are some tips and tricks to help you improve your gaming experience:
- - Adjust your graphics settings according to your PC's capabilities. You can choose from low, medium, high, or very high graphics quality. You can also enable or disable anti-aliasing, depth of field, bloom, real-time shadows, ragdoll physics, etc. - Configure your keyboard and mouse controls according to your preferences. You can customize the key mapping, sensitivity, acceleration, inversion, etc. You can also use preset layouts for different modes, such as Battle Royale or Multiplayer. - Enable FPS mode to get a more immersive view of the game. FPS mode lets you see the game from a first-person perspective instead of a third-person perspective. You can toggle FPS mode by pressing F11 on your keyboard. - Use headphones and a microphone to communicate with your teammates and enemies. You can use the in-game voice chat or a third-party app like Discord or Skype. You can also adjust the volume and mute options in the settings menu. - Join a clan or create your own to play with other players who share your interests and goals. You can participate in clan wars, events, and challenges to earn rewards and rank up. You can also chat with your clan members and invite them to join your matches. - Keep an eye on the events and updates that are regularly added to the game. You can find new modes, maps, weapons, skins, characters, and more. You can also earn rewards by completing missions, achievements, and seasonal challenges. - Have fun and enjoy the game. Remember that Call of Duty Mobile is a game that is meant to entertain you and not frustrate you. Don't take it too seriously or get angry at other players or yourself. Play fair and respect the rules and the community.
Conclusion and FAQs
-
In conclusion, Call of Duty Mobile is a great game that you can play on your PC using an x86 APK. You can enjoy a larger screen, better graphics, and keyboard and mouse controls. However, you also need to be aware of the potential issues and risks that come with playing on your PC. You need to find a reliable source for the x86 APK, use a compatible emulator or OS, and avoid getting banned by Activision. You also need to optimize your settings, use headphones and microphone, join a clan, and keep up with the events and updates.
-
If you have any questions about playing Call of Duty Mobile on your PC, here are some FAQs that might help you:
-
Q: Is Call of Duty Mobile free to play on PC?
-
A: Yes, Call of Duty Mobile is free to play on PC. You don't need to pay anything to download or install the game. However, you may need to pay for some optional in-game items or features, such as skins, weapons, characters, or battle passes.
-
Q: Is Call of Duty Mobile safe to play on PC?
-
A: Call of Duty Mobile is generally safe to play on PC as long as you download it from a trusted source and use a reputable emulator or OS. However, you should always be careful about malware, viruses, phishing, or hacking attempts that may target your PC or your account. You should also avoid using any unauthorized software or modifications that may compromise your security or violate Activision's terms of service.
-
Q: Is Call of Duty Mobile cross-platform on PC?
-
A: Call of Duty Mobile is cross-platform on PC, which means that you can play with or against other players who are using different devices, such as smartphones, tablets, or PCs. However, you may not be able to play with players who are using different versions or regions of the game.
-
Q: How can I update Call of Duty Mobile on PC?
-
A: To update Call of Duty Mobile on PC, you need to download and install the latest version of the x86 APK from a reliable source. You may also need to update your emulator or OS if they have new versions available. You should always backup your data before updating to avoid losing any progress or settings.
-
Q: How can I contact Activision for support or feedback?
-
A: To contact Activision for support or feedback regarding Call of Duty Mobile on PC, you can visit their official website at [Call of Duty®] or their social media pages on Facebook, Twitter, Instagram, YouTube, etc. You can also use the in-game report or feedback feature to report any bugs, glitches, cheaters, or suggestions.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download jogo roblox e personalize seu avatar com itens incrveis.md b/spaces/1phancelerku/anime-remove-background/Download jogo roblox e personalize seu avatar com itens incrveis.md
deleted file mode 100644
index 18c9c505b861d31c5b12731da08249cbc1157260..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download jogo roblox e personalize seu avatar com itens incrveis.md
+++ /dev/null
@@ -1,173 +0,0 @@
-
-
Download Jogo Roblox: How to Play Roblox on Any Device
-
Roblox is a global platform that brings people together through play. It is the ultimate virtual universe that lets you create, share, and play together with millions of people across an infinite variety of immersive, user-generated 3D worlds. Whether you are in the mood for an epic adventure, a competitive challenge, or a casual hangout, you can find it on Roblox. In this article, we will show you how to download jogo roblox (download game roblox) on any device and enjoy the endless possibilities of this amazing platform.
The first thing you need is a Roblox account. You can sign up for free on the Roblox website or the Roblox app. All you need is your birthday, a username, a password, and an optional gender. By signing up, you agree to the Terms of Use and acknowledge the Privacy Policy. You also get access to millions of experiences created by the community, as well as the ability to create your own experiences using Roblox Studio.
-
A Compatible Device
-
The second thing you need is a compatible device. Roblox supports a wide range of devices, including Windows, Mac, iOS, Android, Amazon devices, Xbox One, and some VR headsets. You can check the Roblox download page for more information on the system requirements and compatibility for each device.
-
A Stable Internet Connection
-
The third thing you need is a stable internet connection. Roblox works best over Wi-Fi or a wired connection. A network connection is required to join experiences and play with other users online. You may also need to update your device's software or browser settings to ensure optimal performance.
-
How to Download Roblox on Different Devices
-
Once you have a Roblox account and a compatible device, you can download jogo roblox (download game roblox) on your device and start playing. Here are the steps for downloading Roblox on different devices:
Wait for the game to download and install on your console.
-
Launch Roblox from your games library and login with your Roblox account.
-
Start exploring experiences using your Xbox controller.
-
-
How to Download Roblox on VR Headsets
-
-
Make sure you have a compatible VR headset, such as Oculus Rift, HTC Vive, or Windows Mixed Reality.
-
Connect your VR headset to your PC and set it up according to the manufacturer's instructions.
-
Download and install the Roblox Player on your PC following the steps above.
-
Launch the Roblox Player and put on your VR headset.
-
Login with your Roblox account and start exploring experiences in VR mode.
-
-
How to Join and Create Experiences on Roblox
-
Roblox is more than just a game. It is a platform where you can join and create experiences that suit your interests, passions, and imagination. Here are some tips on how to join and create experiences on Roblox:
-
download jogo roblox para pc
-download jogo roblox gratis
-download jogo roblox online
-download jogo roblox apk
-download jogo roblox para android
-download jogo roblox para celular
-download jogo roblox studio
-download jogo roblox mod menu
-download jogo roblox no computador
-download jogo roblox hackeado
-download jogo roblox com dinheiro infinito
-download jogo roblox para windows 10
-download jogo roblox para ios
-download jogo roblox para xbox one
-download jogo roblox para ps4
-download jogo roblox em portugues
-download jogo roblox sem internet
-download jogo roblox offline
-download jogo roblox completo
-download jogo roblox atualizado
-download jogo roblox original
-download jogo roblox baixar
-download jogo roblox site oficial
-download jogo roblox pelo google play
-download jogo roblox pelo navegador
-download jogo roblox pelo mega
-download jogo roblox pelo mediafire
-download jogo roblox pelo utorrent
-download jogo roblox pelo uptodown
-download jogo roblox pelo apkpure
-download jogo roblox como instalar
-download jogo roblox como jogar
-download jogo roblox como criar conta
-download jogo roblox como fazer amigos
-download jogo roblox como ganhar robux
-download jogo roblox o que é
-download jogo roblox o que fazer
-download jogo roblox o que tem de bom
-download jogo roblox o que tem de ruim
-download jogo roblox o que precisa para jogar
-download jogo roblox quais são os melhores jogos
-download jogo roblox quais são os mais populares
-download jogo roblox quais são os mais divertidos
-download jogo roblox quais são os mais assustadores
-download jogo roblox quais são os mais educativos
-download jogo roblox qual é a classificação indicativa
-download jogo roblox qual é a idade mínima para jogar
-download jogo roblox qual é a versão mais recente
-download jogo roblox qual é a diferença entre premium e grátis
-
How to Join Experiences Created by Other Users
-
-
Browse the Roblox catalog of experiences by category, genre, popularity, or keyword.
-
Select an experience that catches your eye and click on the Play button.
-
Wait for the experience to load and join other users in the game world.
-
Follow the rules and objectives of the experience, or just have fun exploring and interacting with other users.
-
You can also join experiences that are recommended for you based on your preferences, or experiences that are created or played by your friends or favorite creators.
-
-
How to Create Your Own Experiences Using Roblox Studio
-
-
If you have a creative streak and want to make your own experiences, you can use Roblox Studio, a free and powerful tool that lets you design, build, script, and publish your own 3D worlds.
-
You can access Roblox Studio from the Roblox website or the Roblox app on PC or Mac. You will need to login with your Roblox account to use it.
-
You can start from scratch or use one of the many templates available in Roblox Studio. You can also edit existing experiences that are open for collaboration by other users.
-
You can use the tools and features in Roblox Studio to create anything you can imagine, from landscapes and buildings, to characters and vehicles, to gameplay mechanics and logic, to sounds and effects, and more.
-
You can test your experience in Roblox Studio using the Play or Test modes. You can also invite other users to test your experience with you using the Team Create feature.
-
When you are happy with your experience, you can publish it to the Roblox platform and share it with other users. You can also update and manage your experience using Roblox Studio.
-
-
How to Customize Your Avatar and Chat with Friends on Roblox
-
Roblox is not only a place to play, but also a place to socialize and express yourself. You can customize your avatar and chat with friends on Roblox using these tips:
-
How to Access and Change Your Avatar Settings
-
-
To access your avatar settings, go to the Avatar page on the Roblox website or app. You will need to login with your Roblox account to do so.
-
You can change various aspects of your avatar's appearance, such as body type, skin color, face, hair, clothing, accessories, animations, emotes, and more. You can use items that you own or items that are free in the Roblox catalog.
-
You can also create and save multiple outfits for your avatar using the Outfits page. You can switch between your outfits anytime using the Roblox website or app.
-
You can also customize your avatar's display name, which is the name that other users see when they interact with you on Roblox. You can change your display name once every seven days using the Account Settings page.
-
-
How to Use Chat Features, Private Messages, and Groups
-
-
To chat with other users on Roblox, you can use the chat bar that appears on the bottom of the screen when you join an experience. You can type your message and press enter to send it. You can also use emotes and chat commands to enhance your communication.
-
To chat with your friends on Roblox, you can use the Friends page on the Roblox website or app. You can see who is online and send them private messages. You can also join their experiences or invite them to yours.
-
To chat with other users who share your interests on Roblox, you can use the Groups page on the Roblox website or app. You can join or create groups that are based on different topics, genres, games, or activities. You can also participate in group chats, forums, and events.
-
-
Conclusion and FAQs
-
Roblox is a fun and creative platform that lets you play and create experiences with millions of people around the world. You can download jogo roblox (download game roblox) on any device and enjoy the endless possibilities of this amazing platform. You can also customize your avatar and chat with your friends and other users on Roblox. Here are some FAQs that might help you get started:
-
Q: How much does it cost to play Roblox?
-
A: Roblox is free to play, but you can also purchase Robux, the virtual currency of Roblox, to buy premium items, game passes, and subscriptions. You can also earn Robux by creating and selling your own items and experiences.
-
Q: How do I keep my account and device safe on Roblox?
-
A: To keep your account and device safe on Roblox, you should follow these tips:
-
-
Never share your username, password, or personal information with anyone.
-
Never click on suspicious links or download unknown files from other users.
A: To have fun on Roblox, you can do these things:
-
-
Explore the Roblox catalog and find experiences that match your interests, hobbies, and moods.
-
Play with your friends or make new friends from around the world on Roblox.
-
Express your creativity and personality by customizing your avatar and creating your own experiences.
-
Learn new skills and knowledge by playing educational and informative experiences on Roblox.
-
Join events and contests on Roblox and win prizes and rewards.
-
Support your favorite creators and developers by buying their items and game passes.
-
-
Roblox is a platform that offers endless fun and possibilities for everyone. Download jogo roblox (download game roblox) today and join the millions of users who are playing, creating, and learning on Roblox. You will never get bored or run out of things to do on Roblox. Have fun!
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Enjoy Summertime Saga 0.30.1 with Mod Apk All Unlocked Cheat Menu and HD Graphics.md b/spaces/1phancelerku/anime-remove-background/Enjoy Summertime Saga 0.30.1 with Mod Apk All Unlocked Cheat Menu and HD Graphics.md
deleted file mode 100644
index 80b9d2bccd160b78c7cc9fe772e04e67e40b1dc1..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Enjoy Summertime Saga 0.30.1 with Mod Apk All Unlocked Cheat Menu and HD Graphics.md
+++ /dev/null
@@ -1,110 +0,0 @@
-
-
Summertime Saga 30.0 APK Download: A Guide for Android Users
-
If you are looking for a fun and interactive game that combines adventure, romance, comedy, and mystery, then you should try Summertime Saga. This is a popular dating simulation game that has millions of fans around the world. In this article, we will tell you everything you need to know about Summertime Saga 30.0 APK download, the latest version of the game that is available for Android devices.
-
What is Summertime Saga?
-
A brief introduction to the game and its features
-
Summertime Saga is a game that follows the life of a young man who is trying to cope with the death of his father and the debt he left behind. He also has to deal with his school, his friends, his love interests, and his enemies. The game has a rich and immersive story that is influenced by the choices you make. You can explore different paths and outcomes depending on your actions and interactions.
The game is constantly updated by its developers, DarkCookie and his team, who add new content and features every few months. The latest version of the game is 30.0, which was released in June 2023. This version introduces a new character, Eve's sister Grace, who has her own storyline and events. It also adds new scenes, locations, items, mini-games, achievements, and bug fixes. You can check out the full changelog on the official website of the game.
-
How to download and install Summertime Saga 30.0 APK on Android?
-
The official source and the modded version
-
If you want to download and install Summertime Saga 30.0 APK on your Android device, you have two options: the official source or the modded version. The official source is the one that is provided by the developers on their website or their Patreon page. This is the safest and most reliable way to get the game, but it may require a donation or a subscription to access some features or content.
-
summertime saga 30.0 apk mod unlock all
-summertime saga 30.0 apk cheat menu
-summertime saga 30.0 apk latest version
-summertime saga 30.0 apk free download
-summertime saga 30.0 apk unlimited money
-summertime saga 30.0 apk android download
-summertime saga 30.0 apk no root
-summertime saga 30.0 apk no ads
-summertime saga 30.0 apk full game
-summertime saga 30.0 apk offline
-summertime saga 30.0 apk mega link
-summertime saga 30.0 apk update download
-summertime saga 30.0 apk new characters
-summertime saga 30.0 apk walkthrough
-summertime saga 30.0 apk save data
-summertime saga 30.0 apk highly compressed
-summertime saga 30.0 apk hack download
-summertime saga 30.0 apk premium features
-summertime saga 30.0 apk best graphics
-summertime saga 30.0 apk soulmate sound
-summertime saga 30.0 apk easy to use
-summertime saga 30.0 apk direct download
-summertime saga 30.0 apk fast download
-summertime saga 30.0 apk secure download
-summertime saga 30.0 apk virus free
-summertime saga 30.0 apk for pc
-summertime saga 30.0 apk for ios
-summertime saga 30.0 apk for windows
-summertime saga 30.0 apk for mac
-summertime saga 30.0 apk for linux
-summertime saga 30.0 apk for chromebook
-summertime saga 30.0 apk for firestick
-summertime saga 30.0 apk for smart tv
-summertime saga 30.0 apk for android tv box
-summertime saga 30.0 apk for bluestacks
-summertime saga 30.0 apk for nox player
-summertime saga 30.0 apk for ldplayer
-summertime saga 30.0 apk for memu play
-summertime saga 30.0 apk for gameloop
-summertime saga 30.0 apk for genymotion
-
The modded version is the one that is modified by third-party developers or users who add extra features or content to the game, such as unlimited money, unlock all characters and locations, enable cheats, no bans, no ads, no root, soulmate sound, easy to use and amazing HD graphics. This may sound tempting, but it also comes with some risks, such as malware, viruses, compatibility issues, or legal problems.
-
The steps to follow and the requirements to meet
-
Regardless of which option you choose, you need to follow some steps and meet some requirements to download and install Summertime Saga 30.0 APK on your Android device. Here are the steps:
-
-
Go to the source of your choice and download the Summertime Saga 30.0 APK file. The file size is about 1 GB, so make sure you have enough storage space and a stable internet connection.
-
Before you install the APK file, you need to enable the installation of apps from unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on.
-
Locate the downloaded APK file on your device and tap on it to start the installation process. Follow the instructions on the screen and wait for the installation to complete.
-
Once the installation is done, you can launch the game from your app drawer or home screen and enjoy Summertime Saga 30.0 on your Android device.
-
-
Here are the requirements you need to meet:
-
-
Your Android device should have at least Android 5.0 or higher as the operating system.
-
Your Android device should have at least 2 GB of RAM and 4 GB of free storage space.
-
Your Android device should have a good processor and graphics card to run the game smoothly and without lag.
-
-
The benefits and risks of using an APK file
-
Using an APK file to download and install Summertime Saga 30.0 on your Android device has some benefits and risks that you should be aware of. Here are some of them:
-
-
Benefits
Risks
-
You can access the latest version of the game before it is available on other platforms or stores.
You may download a fake or corrupted file that can harm your device or steal your data.
-
You can enjoy some extra features or content that are not available on the official version of the game.
You may violate the terms and conditions of the game or the law and face legal consequences or bans.
-
You can save some money by not paying for a subscription or a donation to access some features or content.
You may miss out on some updates or support from the developers or the community.
-
-
How to play Summertime Saga 30.0 on Android?
-
The main storyline and the side quests
-
Summertime Saga 30.0 is a game that has a lot of content and gameplay options for you to explore and enjoy. The main storyline of the game is divided into three parts: Part 1, Part 2, and Part 3. Each part has its own chapters, events, and endings that depend on your choices and actions. You can also unlock different side quests that involve various characters, locations, and activities. Some side quests are mandatory, while others are optional. You can complete them in any order you want, but some of them may require certain conditions or prerequisites to be met.
-
The characters and the locations
-
The game has a large and diverse cast of characters that you can interact with, befriend, romance, or antagonize. Each character has their own personality, background, appearance, voice, and story arc. You can learn more about them by talking to them, giving them gifts, doing favors for them, or dating them. Some characters are more important than others, and some may have hidden secrets or surprises for you. You can also visit different locations in the game world, such as your home, your school, your workplace, shops, parks, beaches, clubs, etc. Each location has its own atmosphere, music, graphics, and events that you can discover and experience.
-
The tips and tricks to enjoy the game
-
If you want to have a fun and satisfying time playing Summertime Saga 30.0 on your Android device, here are some tips and tricks that you should keep in mind:
-
-
Save your progress frequently and use multiple slots to avoid losing your data or making irreversible mistakes.
-
Check your phone regularly for messages, calls, emails, or notifications from other characters or apps.
-
Use the map to navigate around the game world and see where you can go and what you can do at any given time.
-
Use the stats screen to see your attributes, inventory, money, achievements, relationships, quests, etc.
-
Use the hints system to get some clues or guidance on what to do next or how to complete a quest.
-
Use the skip button to fast-forward through dialogues or scenes that you have already seen or heard before.
-
Use the settings menu to adjust the volume, language, graphics quality, text speed, etc. according to your preferences.
-
Use the help menu to access the FAQ section, the bug report, the contact information, etc.
-
Use the gallery to view the images and videos that you have unlocked or collected throughout the game.
-
Use the cheats menu to access some shortcuts or bonuses that can make the game easier or more fun. However, be careful, as some cheats may have negative consequences or disable some features or achievements.
-
-
Conclusion
-
A summary of the main points and a call to action
-
Summertime Saga 30.0 is a game that offers you a lot of entertainment, excitement, and adventure. You can download and install it on your Android device using an APK file, either from the official source or the modded version. However, you should be aware of the benefits and risks of using an APK file and follow the steps and requirements to do it safely and correctly. You can also enjoy the game by following the main storyline and the side quests, interacting with the characters and the locations, and using the tips and tricks that we have shared with you. If you are ready to embark on this summertime saga, then go ahead and download the game now!
-
FAQs
-
Q1: Is Summertime Saga 30.0 free to play?
-
A1: Yes, Summertime Saga 30.0 is free to play, but you may need to make a donation or a subscription to access some features or content on the official version of the game. The modded version of the game may offer you everything for free, but it may also come with some risks or drawbacks.
-
Q2: Is Summertime Saga 30.0 safe to download?
-
A2: Summertime Saga 30.0 is safe to download if you use the official source or a trusted modded version. However, you should always scan the APK file for viruses or malware before installing it on your device. You should also enable the installation of apps from unknown sources only when you need to install the APK file and disable it afterward.
-
Q3: How often is Summertime Saga updated?
-
A3: Summertime Saga is updated every few months by its developers, who add new content and features to the game. The latest version of the game is 30.0, which was released in June 2023. You can check out the official website or the Patreon page of the game for more information on the updates and the release dates.
-
Q4: Can I play Summertime Saga 30.0 offline?
-
A4: Yes, you can play Summertime Saga 30.0 offline once you have downloaded and installed it on your device. However, you may need an internet connection to download updates or access some online features or content.
-
Q5: Can I transfer my progress from an older version to Summertime Saga 30.0?
-
A5: Yes, you can transfer your progress from an older version to Summertime Saga 30.0 by using the save files that are stored on your device. However, you should make a backup of your save files before installing the new version and make sure that they are compatible with it. You should also avoid using cheats or mods that may corrupt your save files or cause errors in the game.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/A00001/bingothoo/src/components/ui/input.tsx b/spaces/A00001/bingothoo/src/components/ui/input.tsx
deleted file mode 100644
index 684a857f3d769b78818fb13de1abaebfb09ca79c..0000000000000000000000000000000000000000
--- a/spaces/A00001/bingothoo/src/components/ui/input.tsx
+++ /dev/null
@@ -1,25 +0,0 @@
-import * as React from 'react'
-
-import { cn } from '@/lib/utils'
-
-export interface InputProps
- extends React.InputHTMLAttributes {}
-
-const Input = React.forwardRef(
- ({ className, type, ...props }, ref) => {
- return (
-
- )
- }
-)
-Input.displayName = 'Input'
-
-export { Input }
diff --git a/spaces/A00001/bingothoo/src/lib/hooks/use-enter-submit.tsx b/spaces/A00001/bingothoo/src/lib/hooks/use-enter-submit.tsx
deleted file mode 100644
index d66b2d3253baff164235d4ca791aae6d84721835..0000000000000000000000000000000000000000
--- a/spaces/A00001/bingothoo/src/lib/hooks/use-enter-submit.tsx
+++ /dev/null
@@ -1,23 +0,0 @@
-import { useRef, type RefObject } from 'react'
-
-export function useEnterSubmit(): {
- formRef: RefObject
- onKeyDown: (event: React.KeyboardEvent) => void
-} {
- const formRef = useRef(null)
-
- const handleKeyDown = (
- event: React.KeyboardEvent
- ): void => {
- if (
- event.key === 'Enter' &&
- !event.shiftKey &&
- !event.nativeEvent.isComposing
- ) {
- formRef.current?.requestSubmit()
- event.preventDefault()
- }
- }
-
- return { formRef, onKeyDown: handleKeyDown }
-}
diff --git a/spaces/A00001/bingothoo/src/lib/storage.ts b/spaces/A00001/bingothoo/src/lib/storage.ts
deleted file mode 100644
index a5b7825c4f76a28c704da512ae39e8bb45addd09..0000000000000000000000000000000000000000
--- a/spaces/A00001/bingothoo/src/lib/storage.ts
+++ /dev/null
@@ -1,27 +0,0 @@
-import { getMany, set, del, clear } from 'idb-keyval';
-
-export const Storage = {
- async get(key: string | string[] | null): Promise {
- if (key === null) return null;
- if (typeof key === 'string') {
- key = [key]
- }
- const returnData: Record = {}
- const values = await getMany(key)
- key.forEach((k, idx)=> {
- returnData[k] = values[idx]
- })
- return returnData;
- },
- async set(object: any) {
- for (let key of Object.keys(object)) {
- await set(key, object[key])
- }
- },
- async remove(key: string) {
- return del(key);
- },
- async clear() {
- return clear();
- }
-}
diff --git a/spaces/AI-Dashboards/Streamlit-Markdown-ChatGPT-CCD/app.py b/spaces/AI-Dashboards/Streamlit-Markdown-ChatGPT-CCD/app.py
deleted file mode 100644
index 83a042e7c9ac7f64d53682c3f83406b30e7d6752..0000000000000000000000000000000000000000
--- a/spaces/AI-Dashboards/Streamlit-Markdown-ChatGPT-CCD/app.py
+++ /dev/null
@@ -1,150 +0,0 @@
-import streamlit as st
-st.markdown("""
-
-# 🏥 CCDA (Consolidated Clinical Document Architecture) 📄
-The CCD is a document standard developed by Health Level Seven International (HL7) for the exchange of clinical information. 🔄 It is a universally accepted format for sharing clinical data across different electronic health record (EHR) systems. 🌐
-
-## 📚 CCD Standard Templates 📋
-The CCD standard includes templates for different types of clinical documents, like:
-- 📤 Discharge summaries
-- 🗒️ Progress notes
-- 📄 Clinical summaries
-These templates are based on existing document standards like the Continuity of Care Record (CCR). ✅
-
-# FHIR Map to CCD
-| CCD Templates | Emoji | FHIR Resources |
-|---------------|-------|----------------|
-| Patient Demographics | 🏥 | [Patient](https://www.hl7.org/fhir/patient.html) |
-| Encounters | 👩⚕️ | [Encounter](https://www.hl7.org/fhir/encounter.html) |
-| Procedures | 🚑 | [Procedure](https://www.hl7.org/fhir/procedure.html) |
-| Laboratory Results | 🔬 | [Observation](https://www.hl7.org/fhir/observation.html) |
-| Vital Signs | 📈 | [Observation](https://www.hl7.org/fhir/observation.html) |
-| Clinical Notes | 📝 | [ClinicalImpression](https://www.hl7.org/fhir/clinicalimpression.html), [Composition](https://www.hl7.org/fhir/composition.html) |
-| Medications | 💊 | [MedicationStatement](https://www.hl7.org/fhir/medicationstatement.html), [MedicationRequest](https://www.hl7.org/fhir/medicationrequest.html) |
-| Immunizations | 📅 | [Immunization](https://www.hl7.org/fhir/immunization.html) |
-
-
-# CCD Documents - Standard Templates
-
-## 1. 🏥 Patient Demographics
-| Attribute | Description |
-|-----------|-------------|
-| Patient ID | Unique identifier for the patient |
-| Name | Full name of the patient |
-| Date of Birth | Birth date of the patient |
-| Sex | Gender of the patient |
-| Address | Residential address of the patient |
-
-## 2. 💊 Medications
-| Attribute | Description |
-|-----------|-------------|
-| Medication Name | Name of the medication |
-| Dosage | Dosage of the medication |
-| Frequency | How often the medication is taken |
-| Start Date | When the medication was started |
-| End Date | When the medication was stopped |
-
-## 3. 👩⚕️ Encounters
-| Attribute | Description |
-|-----------|-------------|
-| Encounter ID | Unique identifier for the encounter |
-| Encounter Type | Type of encounter (e.g., office visit, hospitalization) |
-| Start Date/Time | When the encounter began |
-| End Date/Time | When the encounter ended |
-| Encounter Provider | Healthcare provider during the encounter |
-
-## 4. 🔬 Laboratory Results
-| Attribute | Description |
-|-----------|-------------|
-| Test Name | Name of the lab test |
-| Date/Time | When the lab test was performed |
-| Result | Result of the lab test |
-| Normal Range | Normal range for the lab test result |
-
-## 5. 🚑 Procedures
-| Attribute | Description |
-|-----------|-------------|
-| Procedure Name | Name of the procedure |
-| Date/Time | When the procedure was performed |
-| Performing Provider | Healthcare provider who performed the procedure |
-
-## 6. 📅 Immunizations
-| Attribute | Description |
-|-----------|-------------|
-| Vaccine Name | Name of the vaccine |
-| Administration Date | When the vaccine was administered |
-| Administering Provider | Healthcare provider who administered the vaccine |
-
-## 7. 📈 Vital Signs
-| Attribute | Description |
-|-----------|-------------|
-| Vital Sign Type | Type of vital sign (e.g., blood pressure, temperature) |
-| Date/Time | When the vital sign was measured |
-| Value | Value of the vital sign |
-| Unit | Unit of the vital sign value |
-
-
-## 8. 📝 Clinical Notes
-| Attribute | Description |
-|-----------|-------------|
-| Note Type | Type of clinical note (e.g., progress note, discharge summary) |
-| Note Date | When the note was written |
-| Note Author | Healthcare provider who wrote the note |
-| Note Content | Content of the note |
-
-# Messages for ADT, ORM, SIU, EDI, Procedures, Observations
-
-## ADT (Admit/Discharge/Transfer) messages
-| Patient ID | Name | Admission Date/Time | Discharge Date/Time | Clinical Encounter |
-|------------|------|---------------------|---------------------|--------------------|
-| 001 | John Doe | 2023-05-01 10:00 | 2023-05-10 10:00 | Heart Surgery |
-
-
-## ORM (Order Entry) messages
-| Order ID | Order Date/Time | Order Status | Relevant Clinical Data |
-|----------|-----------------|--------------|------------------------|
-| 1001 | 2023-05-01 11:00 | Completed | Lab Test: Blood Sugar Level |
-
-## SIU (Scheduling Information Update) messages
-| Patient Name | Appointment Date/Time | Provider Name | Relevant Clinical Information |
-|--------------|-----------------------|---------------|-------------------------------|
-| John Doe | 2023-05-15 10:00 | Dr. Smith | Follow-up: Heart Surgery |
-
-## EDI (Electronic Data Interchange)
-| Patient Information | Clinical Data | Billing Information |
-|---------------------|---------------|---------------------|
-| John Doe, Male, 55 | Heart Surgery | $5000 |
-
-## Procedures
-| Procedure Type | Date/Time of Procedure | Relevant Clinical Data or Reports |
-|----------------|------------------------|-----------------------------------|
-| Heart Surgery | 2023-05-01 12:00 | Surgery Successful |
-
-## Observations
-| Observation Type | Date/Time of Observation | Relevant Clinical Data or Reports |
-|------------------|--------------------------|-----------------------------------|
-| Blood Pressure | 2023-05-10 09:00 | 120/80 mmHg |
-
-## 🔄 Translation to CCD Format 🗂️
-To translate different healthcare documents to the CCD format, follow these guidelines:
-1. **ADT (Admit/Discharge/Transfer) messages** 🏥
- - Patient registration, admission, transfer, and discharge
- - Include patient demographic information, admission and discharge date/time, and clinical encounter information
-2. **ORM (Order Entry) messages** 💊
- - Contains requests for labs, procedures, or medication
- - Include the order request, order date/time, order status, and any relevant clinical data
-3. **SIU (Scheduling Information Update) messages** 📆
- - Used for scheduling appointments and updating appointment status
- - Include the patient name, appointment date/time, provider name, and any relevant clinical information
-4. **EDI (Electronic Data Interchange)** 💻
- - A standardized format for transmitting healthcare data
- - Include patient information, clinical data, and billing information
-5. **Procedures** 🚑
- - Any procedures or surgeries performed on a patient
- - Include the procedure type, date/time of the procedure, and any relevant clinical data or reports
-6. **Observations** 🔬
- - Any relevant clinical observations or measurements
- - Include the observation type, date/time of the observation, and any relevant clinical data or reports
-In summary, the CCD is a standardized format for exchanging clinical information. To translate different healthcare documents to the CCD format, follow the guidelines above. ✨
-
-""")
\ No newline at end of file
diff --git a/spaces/AIZ2H/03-Streamlit-Video-ASR-NLP/streaming.py b/spaces/AIZ2H/03-Streamlit-Video-ASR-NLP/streaming.py
deleted file mode 100644
index cc2048269b3e9ac09886471ef9b6dc681db09f25..0000000000000000000000000000000000000000
--- a/spaces/AIZ2H/03-Streamlit-Video-ASR-NLP/streaming.py
+++ /dev/null
@@ -1,66 +0,0 @@
-import subprocess
-
-import numpy as np
-
-
-def ffmpeg_stream(youtube_url, sampling_rate=16_000, chunk_duration_ms=5000, pad_duration_ms=200):
- """
- Helper function to read an audio file through ffmpeg.
- """
- chunk_len = int(sampling_rate * chunk_duration_ms / 1000)
- pad_len = int(sampling_rate * pad_duration_ms / 1000)
- read_chunk_len = chunk_len + pad_len * 2
-
- ar = f"{sampling_rate}"
- ac = "1"
- format_for_conversion = "f32le"
- dtype = np.float32
- size_of_sample = 4
-
- ffmpeg_command = [
- "ffmpeg",
- "-i",
- "pipe:",
- "-ac",
- ac,
- "-ar",
- ar,
- "-f",
- format_for_conversion,
- "-hide_banner",
- "-loglevel",
- "quiet",
- "pipe:1",
- ]
-
- ytdl_command = ["yt-dlp", "-f", "bestaudio", youtube_url, "--quiet", "-o", "-"]
-
- try:
- ffmpeg_process = subprocess.Popen(ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, bufsize=-1)
- ytdl_process = subprocess.Popen(ytdl_command, stdout=ffmpeg_process.stdin)
- except FileNotFoundError:
- raise ValueError("ffmpeg was not found but is required to stream audio files from filename")
-
- acc = b""
- leftover = np.zeros((0,), dtype=np.float32)
- while ytdl_process.poll() is None:
- buflen = read_chunk_len * size_of_sample
-
- raw = ffmpeg_process.stdout.read(buflen)
- if raw == b"":
- break
-
- if len(acc) + len(raw) > buflen:
- acc = raw
- else:
- acc += raw
-
- audio = np.frombuffer(acc, dtype=dtype)
- audio = np.concatenate([leftover, audio])
- if len(audio) < pad_len * 2:
- # TODO: handle end of stream better than this
- break
- yield audio
-
- leftover = audio[-pad_len * 2 :]
- read_chunk_len = chunk_len
\ No newline at end of file
diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/yolov5_m-p6-v62_syncbn_fast_8xb16-300e_coco.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/yolov5_m-p6-v62_syncbn_fast_8xb16-300e_coco.py
deleted file mode 100644
index f593e378a9fbbf1381e48a186a645a559b1f129a..0000000000000000000000000000000000000000
--- a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/yolov5_m-p6-v62_syncbn_fast_8xb16-300e_coco.py
+++ /dev/null
@@ -1,79 +0,0 @@
-_base_ = './yolov5_s-p6-v62_syncbn_fast_8xb16-300e_coco.py'
-
-# ========================modified parameters======================
-deepen_factor = 0.67
-widen_factor = 0.75
-lr_factor = 0.1
-affine_scale = 0.9
-loss_cls_weight = 0.3
-loss_obj_weight = 0.7
-mixup_prob = 0.1
-
-# =======================Unmodified in most cases==================
-num_classes = _base_.num_classes
-num_det_layers = _base_.num_det_layers
-img_scale = _base_.img_scale
-
-model = dict(
- backbone=dict(
- deepen_factor=deepen_factor,
- widen_factor=widen_factor,
- ),
- neck=dict(
- deepen_factor=deepen_factor,
- widen_factor=widen_factor,
- ),
- bbox_head=dict(
- head_module=dict(widen_factor=widen_factor),
- loss_cls=dict(loss_weight=loss_cls_weight *
- (num_classes / 80 * 3 / num_det_layers)),
- loss_obj=dict(loss_weight=loss_obj_weight *
- ((img_scale[0] / 640)**2 * 3 / num_det_layers))))
-
-pre_transform = _base_.pre_transform
-albu_train_transforms = _base_.albu_train_transforms
-
-mosaic_affine_pipeline = [
- dict(
- type='Mosaic',
- img_scale=img_scale,
- pad_val=114.0,
- pre_transform=pre_transform),
- dict(
- type='YOLOv5RandomAffine',
- max_rotate_degree=0.0,
- max_shear_degree=0.0,
- scaling_ratio_range=(1 - affine_scale, 1 + affine_scale),
- # img_scale is (width, height)
- border=(-img_scale[0] // 2, -img_scale[1] // 2),
- border_val=(114, 114, 114))
-]
-
-# enable mixup
-train_pipeline = [
- *pre_transform, *mosaic_affine_pipeline,
- dict(
- type='YOLOv5MixUp',
- prob=mixup_prob,
- pre_transform=[*pre_transform, *mosaic_affine_pipeline]),
- dict(
- type='mmdet.Albu',
- transforms=albu_train_transforms,
- bbox_params=dict(
- type='BboxParams',
- format='pascal_voc',
- label_fields=['gt_bboxes_labels', 'gt_ignore_flags']),
- keymap={
- 'img': 'image',
- 'gt_bboxes': 'bboxes'
- }),
- dict(type='YOLOv5HSVRandomAug'),
- dict(type='mmdet.RandomFlip', prob=0.5),
- dict(
- type='mmdet.PackDetInputs',
- meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip',
- 'flip_direction'))
-]
-
-train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
-default_hooks = dict(param_scheduler=dict(lr_factor=lr_factor))
diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov6/README.md b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov6/README.md
deleted file mode 100644
index 8edd149efc51fa0f54a87ac7894a7f5fd3aad8f1..0000000000000000000000000000000000000000
--- a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov6/README.md
+++ /dev/null
@@ -1,53 +0,0 @@
-# YOLOv6
-
-> [YOLOv6: A Single-Stage Object Detection Framework for Industrial Applications](https://arxiv.org/abs/2209.02976)
-
-
-
-## Abstract
-
-For years, YOLO series have been de facto industry-level standard for efficient object detection. The YOLO community has prospered overwhelmingly to enrich its use in a multitude of hardware platforms and abundant scenarios. In this technical report, we strive to push its limits to the next level, stepping forward with an unwavering mindset for industry application. Considering the diverse requirements for speed and accuracy in the real environment, we extensively examine the up-to-date object detection advancements either from industry or academy. Specifically, we heavily assimilate ideas from recent network design, training strategies, testing techniques, quantization and optimization methods. On top of this, we integrate our thoughts and practice to build a suite of deployment-ready networks at various scales to accommodate diversified use cases. With the generous permission of YOLO authors, we name it YOLOv6. We also express our warm welcome to users and contributors for further enhancement. For a glimpse of performance, our YOLOv6-N hits 35.9% AP on COCO dataset at a throughput of 1234 FPS on an NVIDIA Tesla T4 GPU. YOLOv6-S strikes 43.5% AP at 495 FPS, outperforming other mainstream detectors at the same scale (YOLOv5-S, YOLOX-S and PPYOLOE-S). Our quantized version of YOLOv6-S even brings a new state-of-the-art 43.3% AP at 869 FPS. Furthermore, YOLOv6-M/L also achieves better accuracy performance (i.e., 49.5%/52.3%) than other detectors with the similar inference speed. We carefully conducted experiments to validate the effectiveness of each component.
-
-
-
-
-
-
-
-YOLOv6-s model structure
-
-
-
-
-YOLOv6-l model structure
-
-
-## Results and models
-
-### COCO
-
-| Backbone | Arch | Size | Epoch | SyncBN | AMP | Mem (GB) | Box AP | Config | Download |
-| :------: | :--: | :--: | :---: | :----: | :-: | :------: | :----: | :---------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
-| YOLOv6-n | P5 | 640 | 400 | Yes | Yes | 6.04 | 36.2 | [config](yolov6_n_syncbn_fast_8xb32-400e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov6/yolov6_n_syncbn_fast_8xb32-400e_coco/yolov6_n_syncbn_fast_8xb32-400e_coco_20221030_202726-d99b2e82.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov6/yolov6_n_syncbn_fast_8xb32-400e_coco/yolov6_n_syncbn_fast_8xb32-400e_coco_20221030_202726.log.json) |
-| YOLOv6-t | P5 | 640 | 400 | Yes | Yes | 8.13 | 41.0 | [config](yolov6_t_syncbn_fast_8xb32-400e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov6/yolov6_t_syncbn_fast_8xb32-400e_coco/yolov6_t_syncbn_fast_8xb32-400e_coco_20221030_143755-cf0d278f.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov6/yolov6_t_syncbn_fast_8xb32-400e_coco/yolov6_t_syncbn_fast_8xb32-400e_coco_20221030_143755.log.json) |
-| YOLOv6-s | P5 | 640 | 400 | Yes | Yes | 8.88 | 44.0 | [config](yolov6_s_syncbn_fast_8xb32-400e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov6/yolov6_s_syncbn_fast_8xb32-400e_coco/yolov6_s_syncbn_fast_8xb32-400e_coco_20221102_203035-932e1d91.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov6/yolov6_s_syncbn_fast_8xb32-400e_coco/yolov6_s_syncbn_fast_8xb32-400e_coco_20221102_203035.log.json) |
-| YOLOv6-m | P5 | 640 | 300 | Yes | Yes | 16.69 | 48.4 | [config](yolov6_m_syncbn_fast_8xb32-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov6/yolov6_m_syncbn_fast_8xb32-300e_coco/yolov6_m_syncbn_fast_8xb32-300e_coco_20221109_182658-85bda3f4.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov6/yolov6_m_syncbn_fast_8xb32-300e_coco/yolov6_m_syncbn_fast_8xb32-300e_coco_20221109_182658.log.json) |
-| YOLOv6-l | P5 | 640 | 300 | Yes | Yes | 20.86 | 51.0 | [config](yolov6_l_syncbn_fast_8xb32-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov6/yolov6_l_syncbn_fast_8xb32-300e_coco/yolov6_l_syncbn_fast_8xb32-300e_coco_20221109_183156-91e3c447.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov6/yolov6_l_syncbn_fast_8xb32-300e_coco/yolov6_l_syncbn_fast_8xb32-300e_coco_20221109_183156.log.json) |
-
-**Note**:
-
-1. The official m and l models use knowledge distillation, but our version does not support it, which will be implemented in [MMRazor](https://github.com/open-mmlab/mmrazor) in the future.
-2. The performance is unstable and may fluctuate by about 0.3 mAP.
-3. If users need the weight of 300 epoch for nano, tiny and small model, they can train according to the configs of 300 epoch provided by us, or convert the official weight according to the [converter script](../../tools/model_converters/).
-4. We have observed that the [base model](https://github.com/meituan/YOLOv6/tree/main/configs/base) has been officially released in v6 recently. Although the accuracy has decreased, it is more efficient. We will also provide the base model configuration in the future.
-
-## Citation
-
-```latex
-@article{li2022yolov6,
- title={YOLOv6: A Single-Stage Object Detection Framework for Industrial Applications},
- author={Li, Chuyi and Li, Lulu and Jiang, Hongliang and Weng, Kaiheng and Geng, Yifei and Li, Liang and Ke, Zaidan and Li, Qingyuan and Cheng, Meng and Nie, Weiqiang and others},
- journal={arXiv preprint arXiv:2209.02976},
- year={2022}
-}
-```
diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb64-210e_deepfashion2_shorts_256x192.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb64-210e_deepfashion2_shorts_256x192.py
deleted file mode 100644
index bfb8867d898f3b5c8918d068e95e6e7b211cdfd9..0000000000000000000000000000000000000000
--- a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb64-210e_deepfashion2_shorts_256x192.py
+++ /dev/null
@@ -1,176 +0,0 @@
-_base_ = [
- '../../../_base_/default_runtime.py',
- '../../../_base_/datasets/deepfashion2.py'
-]
-
-default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater'))
-
-resume = False # 断点恢复
-load_from = None # 模型权重加载
-train_cfg = dict(by_epoch=True, max_epochs=210, val_interval=10) # 训练轮数,测试间隔
-param_scheduler = [
- dict( # warmup策略
- type='LinearLR',
- begin=0,
- end=500,
- start_factor=0.001,
- by_epoch=False),
- dict( # scheduler
- type='MultiStepLR',
- begin=0,
- end=210,
- milestones=[100, 160],
- gamma=0.1,
- by_epoch=True)
-]
-optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.0005)) # 优化器和学习率
-auto_scale_lr = dict(base_batch_size=512) # 根据batch_size自动缩放学习率
-
-backend_args = dict(backend='local') # 数据加载后端设置,默认从本地硬盘加载
-dataset_type = 'DeepFashion2Dataset' # 数据集类名 DeepFashionDataset
-data_mode = 'topdown' # 算法结构类型,用于指定标注信息加载策略
-data_root = 'data/deepfashion2/' # 数据存放路径
-# 定义数据编解码器,用于生成target和对pred进行解码,同时包含了输入图片和输出heatmap尺寸等信息
-codec = dict(
- type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2)
-
-train_pipeline = [
- dict(type='LoadImage'),
- dict(type='GetBBoxCenterScale'),
- dict(type='RandomFlip', direction='horizontal'),
- dict(
- type='RandomBBoxTransform',
- shift_prob=0,
- rotate_factor=60,
- scale_factor=(0.75, 1.25)),
- dict(type='TopdownAffine', input_size=codec['input_size']),
- dict(type='GenerateTarget', encoder=codec),
- dict(type='PackPoseInputs')
-]
-val_pipeline = [ # 测试时数据增强
- dict(type='LoadImage', backend_args=backend_args), # 加载图片
- dict(type='GetBBoxCenterScale'), # 根据bbox获取center和scale
- dict(type='TopdownAffine', input_size=codec['input_size']), # 根据变换矩阵更新目标数据
- dict(type='PackPoseInputs') # 对target进行打包用于训练
-]
-train_dataloader = dict( # 训练数据加载
- batch_size=64, # 批次大小
- num_workers=6, # 数据加载进程数
- persistent_workers=True, # 在不活跃时维持进程不终止,避免反复启动进程的开销
- sampler=dict(type='DefaultSampler', shuffle=True), # 采样策略,打乱数据
- dataset=dict(
- type=dataset_type, # 数据集类名
- data_root=data_root, # 数据集路径
- data_mode=data_mode, # 算法类型
- ann_file='train/deepfashion2_shorts.json', # 标注文件路径
- data_prefix=dict(img='train/image/'), # 图像路径
- pipeline=train_pipeline # 数据流水线
- ))
-val_dataloader = dict(
- batch_size=32,
- num_workers=6,
- persistent_workers=True, # 在不活跃时维持进程不终止,避免反复启动进程的开销
- drop_last=False,
- sampler=dict(type='DefaultSampler', shuffle=False), # 采样策略,不进行打乱
- dataset=dict(
- type=dataset_type, # 数据集类名
- data_root=data_root, # 数据集路径
- data_mode=data_mode, # 算法类型
- ann_file='validation/deepfashion2_shorts.json', # 标注文件路径
- data_prefix=dict(img='validation/image/'), # 图像路径
- test_mode=True, # 测试模式开关
- pipeline=val_pipeline # 数据流水线
- ))
-test_dataloader = val_dataloader # 默认情况下不区分验证集和测试集,用户根据需要来自行定义
-
-channel_cfg = dict(
- num_output_channels=294,
- dataset_joints=294,
- dataset_channel=[
- [
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
- 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
- 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
- 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
- 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86,
- 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102,
- 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
- 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128,
- 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141,
- 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154,
- 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167,
- 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180,
- 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193,
- 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206,
- 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
- 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232,
- 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245,
- 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258,
- 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271,
- 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
- 285, 286, 287, 288, 289, 290, 291, 292, 293
- ],
- ],
- inference_channel=[
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
- 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
- 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
- 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
- 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
- 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
- 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
- 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
- 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
- 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
- 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
- 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
- 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
- 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
- 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
- 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
- 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
- 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
- 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
- 290, 291, 292, 293
- ])
-
-model = dict(
- type='TopdownPoseEstimator', # 模型结构决定了算法流程
- data_preprocessor=dict( # 数据归一化和通道顺序调整,作为模型的一部分
- type='PoseDataPreprocessor',
- mean=[123.675, 116.28, 103.53],
- std=[58.395, 57.12, 57.375],
- bgr_to_rgb=True),
- backbone=dict(
- type='ResNet',
- depth=50,
- init_cfg=dict(
- type='Pretrained', # 预训练参数,只加载backbone权重用于迁移学习
- checkpoint='torchvision://resnet50')),
- head=dict( # 模型头部
- type='HeatmapHead',
- in_channels=2048,
- out_channels=channel_cfg['num_output_channels'],
- # deconv_out_channels=None,
- loss=dict(type='KeypointMSELoss', use_target_weight=True), # 损失函数
- decoder=codec), # 解码器,将heatmap解码成坐标值
- test_cfg=dict(
- flip_test=True, # 开启测试时水平翻转集成
- flip_mode='heatmap', # 对heatmap进行翻转
- shift_heatmap=True, # 对翻转后的结果进行平移提高精度
- ))
-
-val_evaluator = [
- dict(type='PCKAccuracy', thr=0.2),
- dict(type='AUC'),
- dict(type='EPE'),
-]
-
-# val_evaluator = dict(
-# type='CocoMetric', # coco 评测指标
-# ann_file='data/deepfashion2/validation/deepfashion2_short_sleeved_shirt_validation.json')
-test_evaluator = val_evaluator # 默认情况下不区分验证集和测试集,用户根据需要来自行定义
-
-visualizer = dict(
- vis_backends=[dict(type='LocalVisBackend'),
- dict(type='WandbVisBackend')])
diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnext101_4xb32_2048e_3c_noF.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnext101_4xb32_2048e_3c_noF.py
deleted file mode 100644
index 5a75884d5bc715db4c86e429ebc4512dbab7c017..0000000000000000000000000000000000000000
--- a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnext101_4xb32_2048e_3c_noF.py
+++ /dev/null
@@ -1,108 +0,0 @@
-_base_ = [ # 此配置文件将继承所有 `_base_` 中的配置
- '../configs/_base_/schedules/custom_schedule.py', # 训练策略配置
- '../configs/_base_/datasets/custom_ds.py',
- '../configs/_base_/default_runtime.py' # 默认运行设置
-]
-
-default_hooks = dict(
- # print log every 50 iterations.
- logger=dict(type='LoggerHook', interval=10),
- # save checkpoint per 8 epochs.
- checkpoint=dict(save_best='auto', interval=16)
-)
-
-# visualizer = dict(
-# vis_backends=[dict(type='LocalVisBackend'),
-# dict(type='WandbVisBackend')])
-
-dataset_type = 'CustomDataset'
-
-# config of pipline
-train_pipeline = [
- dict(type='LoadImageFromFile'), # 读取图像
- dict(type='RandomResizedCrop', scale=224), # 随机放缩裁剪
- dict(type='RandomFlip', prob=0.5, direction='horizontal'), # 随机水平翻转
- dict(type='mmpretrain.PackInputs'), # 准备图像以及标签
-]
-
-test_pipeline = [
- dict(type='LoadImageFromFile'), # 读取图像
- dict(type='mmpretrain.ResizeEdge', scale=256, edge='short'), # 缩放短边尺寸至 256px
- dict(type='CenterCrop', crop_size=224), # 中心裁剪
- dict(type='mmpretrain.PackInputs'), # 准备图像以及标签
-]
-
-# config of dataloader
-train_dataloader = dict(
- batch_size=32, # 每张 GPU 的 batchsize
- num_workers=5, # 每个 GPU 的线程数
- dataset=dict( # 训练数据集
- type=dataset_type,
- data_root='../2_preprocess_data_3000',
- with_label=True,
- ann_file='',
- data_prefix='train',
- pipeline=train_pipeline),
- sampler=dict(type='DefaultSampler', shuffle=True), # 默认采样器
- persistent_workers=True, # 是否保持进程,可以缩短每个 epoch 的准备时间
-)
-
-# 构造验证集 dataloader
-val_dataloader = dict(
- batch_size=32,
- num_workers=5,
- dataset=dict(
- type=dataset_type,
- data_root='Model6_2/2_preprocess_data_3000',
- with_label=True,
- ann_file='',
- data_prefix='val',
- pipeline=test_pipeline),
- sampler=dict(type='DefaultSampler', shuffle=False),
- persistent_workers=True,
-)
-
-# set evaluator of validation dataset. Here uses top1 and top3 accuracy
-val_evaluator = dict(type='Accuracy', topk=(1, 3))
-
-test_dataloader = val_dataloader
-test_evaluator = val_evaluator
-
-model = dict(
- type='ImageClassifier', # 主模型类型(对于图像分类任务,使用 `ImageClassifier`)
- backbone=dict(
- type='ResNeXt', # 主干网络类型
- depth=101,
- in_channels=3, # 输入通道数
- ),
- neck=dict(type='GlobalAveragePooling'), # 颈网络类型
- head=dict(
- type='LinearClsHead', # 分类颈网络类型
- # 除了 `type` 之外的所有字段都来自 `LinearClsHead` 类的 __init__ 方法
- # 可查阅 https://mmpretrain.readthedocs.io/zh_CN/latest/api/generated/mmpretrain.models.heads.LinearClsHead.html
- num_classes=7, # 分类类别数
- in_channels=2048,
- loss=dict(type='CrossEntropyLoss', loss_weight=1.0), # 损失函数配置信息
- topk=(1, 3), # 评估指标,Top-k 准确率
- ))
-
-optim_wrapper = dict(
- accumulative_counts=8
-)
-
-param_scheduler = [
- # 在前10轮迭代中,逐迭代次数,线性预热
- dict(type='LinearLR',
- start_factor=0.00001,
- by_epoch=True,
- end=10,
- convert_to_iter_based=True, # 逐迭代次数更新学习率.
- ),
- # 在 10 轮次后,通过余弦退火衰减
- dict(type='MultiStepLR',
- by_epoch=True, # 按轮次更新学习率
- milestones=[30, 210, 390, 570, 750, 930, 1110, 1290, 1470, 1650, 1830],
- gamma=0.9)
-]
-
-train_cfg = dict(by_epoch=True, max_epochs=2048, val_interval=16)
\ No newline at end of file
diff --git a/spaces/AbandonedMuse/UnlimitedMusicGen/app.py b/spaces/AbandonedMuse/UnlimitedMusicGen/app.py
deleted file mode 100644
index 0b62724a4e5515d175da5f58d67c9c6aa01aec1a..0000000000000000000000000000000000000000
--- a/spaces/AbandonedMuse/UnlimitedMusicGen/app.py
+++ /dev/null
@@ -1,543 +0,0 @@
-"""
-Copyright (c) Meta Platforms, Inc. and affiliates.
-All rights reserved.
-
-This source code is licensed under the license found in the
-LICENSE file in the root directory of this source tree.
-"""
-
-from tempfile import NamedTemporaryFile
-import argparse
-import torch
-import gradio as gr
-import os
-import subprocess
-import sys
-from pathlib import Path
-import time
-import typing as tp
-import warnings
-from audiocraft.models import MusicGen
-from audiocraft.data.audio import audio_write
-from audiocraft.data.audio_utils import apply_fade, apply_tafade, apply_splice_effect
-from audiocraft.utils.extend import generate_music_segments, add_settings_to_image, INTERRUPTING
-import numpy as np
-import random
-#from pathlib import Path
-#from typing import List, Union
-import librosa
-
-MODEL = None
-MODELS = None
-IS_SHARED_SPACE = "Surn/UnlimitedMusicGen" in os.environ.get('SPACE_ID', '')
-INTERRUPTED = False
-UNLOAD_MODEL = False
-MOVE_TO_CPU = False
-MAX_PROMPT_INDEX = 0
-git = os.environ.get('GIT', "git")
-os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:128"
-
-def interrupt_callback():
- return INTERRUPTED
-
-def interrupt():
- global INTERRUPTING
- INTERRUPTING = True
-
-class FileCleaner:
- def __init__(self, file_lifetime: float = 3600):
- self.file_lifetime = file_lifetime
- self.files = []
-
- def add(self, path: tp.Union[str, Path]):
- self._cleanup()
- self.files.append((time.time(), Path(path)))
-
- def _cleanup(self):
- now = time.time()
- for time_added, path in list(self.files):
- if now - time_added > self.file_lifetime:
- if path.exists():
- path.unlink()
- self.files.pop(0)
- else:
- break
-
-
-#file_cleaner = FileCleaner()
-
-def toggle_audio_src(choice):
- if choice == "mic":
- return gr.update(source="microphone", value=None, label="Microphone")
- else:
- return gr.update(source="upload", value=None, label="File")
-
-def make_waveform(*args, **kwargs):
- # Further remove some warnings.
- be = time.time()
- with warnings.catch_warnings():
- warnings.simplefilter('ignore')
- out = gr.make_waveform(*args, **kwargs)
- print("Make a video took", time.time() - be)
- return out
-
-def load_model(version):
- global MODEL, MODELS, UNLOAD_MODEL
- print("Loading model", version)
- if MODELS is None:
- return MusicGen.get_pretrained(version)
- else:
- t1 = time.monotonic()
- if MODEL is not None:
- MODEL.to('cpu') # move to cache
- print("Previous model moved to CPU in %.2fs" % (time.monotonic() - t1))
- t1 = time.monotonic()
- if MODELS.get(version) is None:
- print("Loading model %s from disk" % version)
- result = MusicGen.get_pretrained(version)
- MODELS[version] = result
- print("Model loaded in %.2fs" % (time.monotonic() - t1))
- return result
- result = MODELS[version].to('cuda')
- print("Cached model loaded in %.2fs" % (time.monotonic() - t1))
- return result
-
-def get_filename(file):
- # extract filename from file object
- filename = None
- if file is not None:
- filename = file.name
- return filename
-
-def get_filename_from_filepath(filepath):
- file_name = os.path.basename(filepath)
- file_base, file_extension = os.path.splitext(file_name)
- return file_base, file_extension
-
-def get_melody(melody_filepath):
- audio_data= list(librosa.load(melody_filepath, sr=None))
- audio_data[0], audio_data[1] = audio_data[1], audio_data[0]
- melody = tuple(audio_data)
- return melody
-
-
-def commit_hash():
- try:
- return subprocess.check_output([git, "rev-parse", "HEAD"], shell=False, encoding='utf8').strip()
- except Exception:
- return ""
-
-
-def git_tag():
- try:
- return subprocess.check_output([git, "describe", "--tags"], shell=False, encoding='utf8').strip()
- except Exception:
- try:
- from pathlib import Path
- changelog_md = Path(__file__).parent.parent / "CHANGELOG.md"
- with changelog_md.open(encoding="utf-8") as file:
- return next((line.strip() for line in file if line.strip()), "")
- except Exception:
- return ""
-
-def versions_html():
- import torch
-
- python_version = ".".join([str(x) for x in sys.version_info[0:3]])
- commit = commit_hash()
- #tag = git_tag()
-
- import xformers
- xformers_version = xformers.__version__
-
- return f"""
- version: " else commit}" target="_blank">{"huggingface" if commit == "" else commit}
- •
- python: {python_version}
- •
- torch: {getattr(torch, '__long_version__',torch.__version__)}
- •
- xformers: {xformers_version}
- •
- gradio: {gr.__version__}
- """
-
-def load_melody_filepath(melody_filepath, title):
- # get melody filename
- #$Union[str, os.PathLike]
- symbols = ['_', '.', '-']
- if (melody_filepath is None) or (melody_filepath == ""):
- return title, gr.update(maximum=0, value=0) , gr.update(value="melody", interactive=True)
-
- if (title is None) or ("MusicGen" in title) or (title == ""):
- melody_name, melody_extension = get_filename_from_filepath(melody_filepath)
- # fix melody name for symbols
- for symbol in symbols:
- melody_name = melody_name.replace(symbol, ' ').title()
- else:
- melody_name = title
-
- print(f"Melody name: {melody_name}, Melody Filepath: {melody_filepath}\n")
-
- # get melody length in number of segments and modify the UI
- melody = get_melody(melody_filepath)
- sr, melody_data = melody[0], melody[1]
- segment_samples = sr * 30
- total_melodys = max(min((len(melody_data) // segment_samples), 25), 0)
- print(f"Melody length: {len(melody_data)}, Melody segments: {total_melodys}\n")
- MAX_PROMPT_INDEX = total_melodys
-
- return gr.Textbox.update(value=melody_name), gr.update(maximum=MAX_PROMPT_INDEX, value=0), gr.update(value="melody", interactive=False)
-
-def predict(model, text, melody_filepath, duration, dimension, topk, topp, temperature, cfg_coef, background, title, settings_font, settings_font_color, seed, overlap=1, prompt_index = 0, include_title = True, include_settings = True, harmony_only = False):
- global MODEL, INTERRUPTED, INTERRUPTING, MOVE_TO_CPU
- output_segments = None
- melody_name = "Not Used"
- melody = None
- if melody_filepath:
- melody_name, melody_extension = get_filename_from_filepath(melody_filepath)
- melody = get_melody(melody_filepath)
-
- INTERRUPTED = False
- INTERRUPTING = False
- if temperature < 0:
- raise gr.Error("Temperature must be >= 0.")
- if topk < 0:
- raise gr.Error("Topk must be non-negative.")
- if topp < 0:
- raise gr.Error("Topp must be non-negative.")
-
- if MODEL is None or MODEL.name != model:
- MODEL = load_model(model)
- else:
- if MOVE_TO_CPU:
- MODEL.to('cuda')
-
- # prevent hacking
- duration = min(duration, 720)
- overlap = min(overlap, 15)
- #
-
- output = None
- segment_duration = duration
- initial_duration = duration
- output_segments = []
- while duration > 0:
- if not output_segments: # first pass of long or short song
- if segment_duration > MODEL.lm.cfg.dataset.segment_duration:
- segment_duration = MODEL.lm.cfg.dataset.segment_duration
- else:
- segment_duration = duration
- else: # next pass of long song
- if duration + overlap < MODEL.lm.cfg.dataset.segment_duration:
- segment_duration = duration + overlap
- else:
- segment_duration = MODEL.lm.cfg.dataset.segment_duration
- # implement seed
- if seed < 0:
- seed = random.randint(0, 0xffff_ffff_ffff)
- torch.manual_seed(seed)
-
-
- print(f'Segment duration: {segment_duration}, duration: {duration}, overlap: {overlap}')
- MODEL.set_generation_params(
- use_sampling=True,
- top_k=topk,
- top_p=topp,
- temperature=temperature,
- cfg_coef=cfg_coef,
- duration=segment_duration,
- two_step_cfg=False,
- rep_penalty=0.5
- )
-
- if melody:
- # todo return excess duration, load next model and continue in loop structure building up output_segments
- if duration > MODEL.lm.cfg.dataset.segment_duration:
- output_segments, duration = generate_music_segments(text, melody, seed, MODEL, duration, overlap, MODEL.lm.cfg.dataset.segment_duration, prompt_index, harmony_only=False)
- else:
- # pure original code
- sr, melody = melody[0], torch.from_numpy(melody[1]).to(MODEL.device).float().t().unsqueeze(0)
- print(melody.shape)
- if melody.dim() == 2:
- melody = melody[None]
- melody = melody[..., :int(sr * MODEL.lm.cfg.dataset.segment_duration)]
- output = MODEL.generate_with_chroma(
- descriptions=[text],
- melody_wavs=melody,
- melody_sample_rate=sr,
- progress=True
- )
- # All output_segments are populated, so we can break the loop or set duration to 0
- break
- else:
- #output = MODEL.generate(descriptions=[text], progress=False)
- if not output_segments:
- next_segment = MODEL.generate(descriptions=[text], progress=True)
- duration -= segment_duration
- else:
- last_chunk = output_segments[-1][:, :, -overlap*MODEL.sample_rate:]
- next_segment = MODEL.generate_continuation(last_chunk, MODEL.sample_rate, descriptions=[text], progress=False)
- duration -= segment_duration - overlap
- output_segments.append(next_segment)
-
- if INTERRUPTING:
- INTERRUPTED = True
- INTERRUPTING = False
- print("Function execution interrupted!")
- raise gr.Error("Interrupted.")
-
- if output_segments:
- try:
- # Combine the output segments into one long audio file or stack tracks
- #output_segments = [segment.detach().cpu().float()[0] for segment in output_segments]
- #output = torch.cat(output_segments, dim=dimension)
-
- output = output_segments[0]
- for i in range(1, len(output_segments)):
- if overlap > 0:
- overlap_samples = overlap * MODEL.sample_rate
- #stack tracks and fade out/in
- overlapping_output_fadeout = output[:, :, -overlap_samples:]
- #overlapping_output_fadeout = apply_fade(overlapping_output_fadeout,sample_rate=MODEL.sample_rate,duration=overlap,out=True,start=True, curve_end=0.0, current_device=MODEL.device)
- overlapping_output_fadeout = apply_tafade(overlapping_output_fadeout,sample_rate=MODEL.sample_rate,duration=overlap,out=True,start=True,shape="linear")
-
- overlapping_output_fadein = output_segments[i][:, :, :overlap_samples]
- #overlapping_output_fadein = apply_fade(overlapping_output_fadein,sample_rate=MODEL.sample_rate,duration=overlap,out=False,start=False, curve_start=0.0, current_device=MODEL.device)
- overlapping_output_fadein = apply_tafade(overlapping_output_fadein,sample_rate=MODEL.sample_rate,duration=overlap,out=False,start=False, shape="linear")
-
- overlapping_output = torch.cat([overlapping_output_fadeout[:, :, :-(overlap_samples // 2)], overlapping_output_fadein],dim=2)
- ###overlapping_output, overlap_sample_rate = apply_splice_effect(overlapping_output_fadeout, MODEL.sample_rate, overlapping_output_fadein, MODEL.sample_rate, overlap)
- print(f" overlap size Fade:{overlapping_output.size()}\n output: {output.size()}\n segment: {output_segments[i].size()}")
- ##overlapping_output = torch.cat([output[:, :, -overlap_samples:], output_segments[i][:, :, :overlap_samples]], dim=1) #stack tracks
- ##print(f" overlap size stack:{overlapping_output.size()}\n output: {output.size()}\n segment: {output_segments[i].size()}")
- #overlapping_output = torch.cat([output[:, :, -overlap_samples:], output_segments[i][:, :, :overlap_samples]], dim=2) #stack tracks
- #print(f" overlap size cat:{overlapping_output.size()}\n output: {output.size()}\n segment: {output_segments[i].size()}")
- output = torch.cat([output[:, :, :-overlap_samples], overlapping_output, output_segments[i][:, :, overlap_samples:]], dim=dimension)
- else:
- output = torch.cat([output, output_segments[i]], dim=dimension)
- output = output.detach().cpu().float()[0]
- except Exception as e:
- print(f"Error combining segments: {e}. Using the first segment only.")
- output = output_segments[0].detach().cpu().float()[0]
- else:
- output = output.detach().cpu().float()[0]
-
- with NamedTemporaryFile("wb", suffix=".wav", delete=False) as file:
- video_description = f"{text}\n Duration: {str(initial_duration)} Dimension: {dimension}\n Top-k:{topk} Top-p:{topp}\n Randomness:{temperature}\n cfg:{cfg_coef} overlap: {overlap}\n Seed: {seed}\n Model: {model}\n Melody Condition:{melody_name}\n Sample Segment: {prompt_index}"
- if include_settings or include_title:
- background = add_settings_to_image(title if include_title else "", video_description if include_settings else "", background_path=background, font=settings_font, font_color=settings_font_color)
- audio_write(
- file.name, output, MODEL.sample_rate, strategy="loudness",
- loudness_headroom_db=18, loudness_compressor=True, add_suffix=False, channels=2)
- waveform_video = make_waveform(file.name,bg_image=background, bar_count=45)
- if MOVE_TO_CPU:
- MODEL.to('cpu')
- if UNLOAD_MODEL:
- MODEL = None
- torch.cuda.empty_cache()
- torch.cuda.ipc_collect()
- return waveform_video, file.name, seed
-
-def ui(**kwargs):
- css="""
- #col-container {max-width: 910px; margin-left: auto; margin-right: auto;}
- a {text-decoration-line: underline; font-weight: 600;}
- #btn-generate {background-image:linear-gradient(to right bottom, rgb(157, 255, 157), rgb(229, 255, 235));}
- #btn-generate:hover {background-image:linear-gradient(to right bottom, rgb(229, 255, 229), rgb(255, 255, 255));}
- #btn-generate:active {background-image:linear-gradient(to right bottom, rgb(229, 255, 235), rgb(157, 255, 157));}
- #versions {margin-top: 1em; width:100%; text-align:center;}
- .small-btn {max-width:75px;}
- """
- with gr.Blocks(title="UnlimitedMusicGen", css=css) as demo:
- gr.Markdown(
- """
- # UnlimitedMusicGen
- This is your private demo for [UnlimitedMusicGen](https://github.com/Oncorporation/audiocraft), a simple and controllable model for music generation
- presented at: ["Simple and Controllable Music Generation"](https://huggingface.co/papers/2306.05284)
-
- Disclaimer: This won't run on CPU only. Clone this App and run on GPU instance!
-
- Todo: Working on improved transitions between 30 second segments, improve Interrupt.
- """
- )
- if IS_SHARED_SPACE and not torch.cuda.is_available():
- gr.Markdown("""
- ⚠ This Space doesn't work in this shared UI ⚠
-
-
-
- to use it privately, or use the public demo
- """)
- with gr.Row():
- with gr.Column():
- with gr.Row():
- text = gr.Text(label="Describe your music", interactive=True, value="4/4 100bpm 320kbps 48khz, Industrial/Electronic Soundtrack, Dark, Intense, Sci-Fi")
- with gr.Column():
- duration = gr.Slider(minimum=1, maximum=720, value=10, label="Duration (s)", interactive=True)
- model = gr.Radio(["melody", "medium", "small", "large"], label="AI Model", value="melody", interactive=True)
- with gr.Row():
- submit = gr.Button("Generate", elem_id="btn-generate")
- # Adapted from https://github.com/rkfg/audiocraft/blob/long/app.py, MIT license.
- _ = gr.Button("Interrupt", elem_id="btn-interrupt").click(fn=interrupt, queue=False)
- with gr.Row():
- with gr.Column():
- radio = gr.Radio(["file", "mic"], value="file", label="Condition on a melody (optional) File or Mic")
- melody_filepath = gr.Audio(source="upload", type="filepath", label="Melody Condition (optional)", interactive=True, elem_id="melody-input")
- with gr.Column():
- harmony_only = gr.Radio(label="Use Harmony Only",choices=["No", "Yes"], value="No", interactive=True, info="Remove Drums?")
- prompt_index = gr.Slider(label="Melody Condition Sample Segment", minimum=-1, maximum=MAX_PROMPT_INDEX, step=1, value=0, interactive=True, info="Which 30 second segment to condition with, - 1 condition each segment independantly")
- with gr.Accordion("Video", open=False):
- with gr.Row():
- background= gr.Image(value="./assets/background.png", source="upload", label="Background", shape=(768,512), type="filepath", interactive=True)
- with gr.Column():
- include_title = gr.Checkbox(label="Add Title", value=True, interactive=True)
- include_settings = gr.Checkbox(label="Add Settings to background", value=True, interactive=True)
- with gr.Row():
- title = gr.Textbox(label="Title", value="UnlimitedMusicGen", interactive=True)
- settings_font = gr.Text(label="Settings Font", value="./assets/arial.ttf", interactive=True)
- settings_font_color = gr.ColorPicker(label="Settings Font Color", value="#c87f05", interactive=True)
- with gr.Accordion("Expert", open=False):
- with gr.Row():
- overlap = gr.Slider(minimum=0, maximum=15, value=2, step=1, label="Verse Overlap", interactive=True)
- dimension = gr.Slider(minimum=-2, maximum=2, value=2, step=1, label="Dimension", info="determines which direction to add new segements of audio. (1 = stack tracks, 2 = lengthen, -2..0 = ?)", interactive=True)
- with gr.Row():
- topk = gr.Number(label="Top-k", value=280, precision=0, interactive=True)
- topp = gr.Number(label="Top-p", value=1150, precision=0, interactive=True)
- temperature = gr.Number(label="Randomness Temperature", value=0.7, precision=None, interactive=True)
- cfg_coef = gr.Number(label="Classifier Free Guidance", value=8.5, precision=None, interactive=True)
- with gr.Row():
- seed = gr.Number(label="Seed", value=-1, precision=0, interactive=True)
- gr.Button('\U0001f3b2\ufe0f', elem_classes="small-btn").click(fn=lambda: -1, outputs=[seed], queue=False)
- reuse_seed = gr.Button('\u267b\ufe0f', elem_classes="small-btn")
- with gr.Column() as c:
- output = gr.Video(label="Generated Music")
- wave_file = gr.File(label=".wav file", elem_id="output_wavefile", interactive=True)
- seed_used = gr.Number(label='Seed used', value=-1, interactive=False)
-
- radio.change(toggle_audio_src, radio, [melody_filepath], queue=False, show_progress=False)
- melody_filepath.change(load_melody_filepath, inputs=[melody_filepath, title], outputs=[title, prompt_index , model], api_name="melody_filepath_change", queue=False)
- reuse_seed.click(fn=lambda x: x, inputs=[seed_used], outputs=[seed], queue=False, api_name="reuse_seed")
- submit.click(predict, inputs=[model, text,melody_filepath, duration, dimension, topk, topp, temperature, cfg_coef, background, title, settings_font, settings_font_color, seed, overlap, prompt_index, include_title, include_settings, harmony_only], outputs=[output, wave_file, seed_used], api_name="submit")
- gr.Examples(
- fn=predict,
- examples=[
- [
- "4/4 120bpm 320kbps 48khz, An 80s driving pop song with heavy drums and synth pads in the background",
- "./assets/bach.mp3",
- "melody",
- "80s Pop Synth"
- ],
- [
- "4/4 120bpm 320kbps 48khz, A cheerful country song with acoustic guitars",
- "./assets/bolero_ravel.mp3",
- "melody",
- "Country Guitar"
- ],
- [
- "4/4 120bpm 320kbps 48khz, 90s rock song with electric guitar and heavy drums",
- None,
- "medium",
- "90s Rock Guitar"
- ],
- [
- "4/4 120bpm 320kbps 48khz, a light and cheerly EDM track, with syncopated drums, aery pads, and strong emotions",
- "./assets/bach.mp3",
- "melody",
- "EDM my Bach"
- ],
- [
- "4/4 320kbps 48khz, lofi slow bpm electro chill with organic samples",
- None,
- "medium",
- "LoFi Chill"
- ],
- ],
- inputs=[text, melody_filepath, model, title],
- outputs=[output]
- )
- gr.HTML(value=versions_html(), visible=True, elem_id="versions")
-
- # Show the interface
- launch_kwargs = {}
- share = kwargs.get('share', False)
- server_port = kwargs.get('server_port', 0)
- server_name = kwargs.get('listen')
-
- launch_kwargs['server_name'] = server_name
-
- if server_port > 0:
- launch_kwargs['server_port'] = server_port
- if share:
- launch_kwargs['share'] = share
- launch_kwargs['favicon_path']= "./assets/favicon.ico"
-
-
-
- demo.queue(max_size=10, concurrency_count=1, api_open=False).launch(**launch_kwargs)
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
-
- parser.add_argument(
- '--listen',
- type=str,
- default='0.0.0.0' if 'SPACE_ID' in os.environ else '127.0.0.1',
- help='IP to listen on for connections to Gradio',
- )
- parser.add_argument(
- '--username', type=str, default='', help='Username for authentication'
- )
- parser.add_argument(
- '--password', type=str, default='', help='Password for authentication'
- )
- parser.add_argument(
- '--server_port',
- type=int,
- default=0,
- help='Port to run the server listener on',
- )
- parser.add_argument(
- '--inbrowser', action='store_true', help='Open in browser'
- )
- parser.add_argument(
- '--share', action='store_true', help='Share the gradio UI'
- )
- parser.add_argument(
- '--unload_model', action='store_true', help='Unload the model after every generation to save GPU memory'
- )
-
- parser.add_argument(
- '--unload_to_cpu', action='store_true', help='Move the model to main RAM after every generation to save GPU memory but reload faster than after full unload (see above)'
- )
-
- parser.add_argument(
- '--cache', action='store_true', help='Cache models in RAM to quickly switch between them'
- )
-
- args = parser.parse_args()
-
- launch_kwargs = {}
- launch_kwargs['server_name'] = args.listen
-
- if args.username and args.password:
- launch_kwargs['auth'] = (args.username, args.password)
- if args.server_port:
- launch_kwargs['server_port'] = args.server_port
- if args.inbrowser:
- launch_kwargs['inbrowser'] = args.inbrowser
- if args.share:
- launch_kwargs['share'] = args.share
- launch_kwargs['favicon_path']= "./assets/favicon.ico"
-
-
- UNLOAD_MODEL = args.unload_model
- MOVE_TO_CPU = args.unload_to_cpu
- if args.cache:
- MODELS = {}
-
- ui(
- unload_to_cpu = MOVE_TO_CPU,
- share=args.share
-
- )
diff --git a/spaces/AchyuthGamer/OpenGPT/client/html/index.html b/spaces/AchyuthGamer/OpenGPT/client/html/index.html
deleted file mode 100644
index a7a0eb2207def3480ce5649379ce167a37d13ed6..0000000000000000000000000000000000000000
--- a/spaces/AchyuthGamer/OpenGPT/client/html/index.html
+++ /dev/null
@@ -1,135 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- FreeGPT
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- {{_('Web Access')}}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/deprecated/FastGpt.py b/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/deprecated/FastGpt.py
deleted file mode 100644
index 65efa29db78507f7085bc7cc9767490b0d1aac5e..0000000000000000000000000000000000000000
--- a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/deprecated/FastGpt.py
+++ /dev/null
@@ -1,87 +0,0 @@
-from __future__ import annotations
-
-import json
-import random
-from abc import ABC, abstractmethod
-
-import requests
-
-from ...typing import Any, CreateResult
-from ..base_provider import BaseProvider
-
-
-class FastGpt(BaseProvider):
- url: str = 'https://chat9.fastgpt.me/'
- working = False
- needs_auth = False
- supports_stream = True
- supports_gpt_35_turbo = True
- supports_gpt_4 = False
-
- @staticmethod
- @abstractmethod
- def create_completion(
- model: str,
- messages: list[dict[str, str]],
- stream: bool, **kwargs: Any) -> CreateResult:
-
- headers = {
- 'authority' : 'chat9.fastgpt.me',
- 'accept' : 'text/event-stream',
- 'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
- 'cache-control' : 'no-cache',
- 'content-type' : 'application/json',
- 'origin' : 'https://chat9.fastgpt.me',
- 'plugins' : '0',
- 'pragma' : 'no-cache',
- 'referer' : 'https://chat9.fastgpt.me/',
- 'sec-ch-ua' : '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
- 'sec-ch-ua-mobile' : '?0',
- 'sec-ch-ua-platform': '"macOS"',
- 'sec-fetch-dest' : 'empty',
- 'sec-fetch-mode' : 'cors',
- 'sec-fetch-site' : 'same-origin',
- 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
- 'usesearch' : 'false',
- 'x-requested-with' : 'XMLHttpRequest',
- }
-
- json_data = {
- 'messages' : messages,
- 'stream' : stream,
- 'model' : model,
- 'temperature' : kwargs.get('temperature', 0.5),
- 'presence_penalty' : kwargs.get('presence_penalty', 0),
- 'frequency_penalty' : kwargs.get('frequency_penalty', 0),
- 'top_p' : kwargs.get('top_p', 1),
- }
-
- subdomain = random.choice([
- 'jdaen979ew',
- 'chat9'
- ])
-
- response = requests.post(f'https://{subdomain}.fastgpt.me/api/openai/v1/chat/completions',
- headers=headers, json=json_data, stream=stream)
-
- for line in response.iter_lines():
- if line:
- try:
- if b'content' in line:
- line_json = json.loads(line.decode('utf-8').split('data: ')[1])
- token = line_json['choices'][0]['delta'].get('content')
- if token:
- yield token
- except:
- continue
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fixwidthsizer/GetMaxChildHeight.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fixwidthsizer/GetMaxChildHeight.js
deleted file mode 100644
index 30bac27c98263ad6d93ecd375559a0339fd40f6d..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fixwidthsizer/GetMaxChildHeight.js
+++ /dev/null
@@ -1,22 +0,0 @@
-import { GetDisplayHeight } from '../../../plugins/utils/size/GetDisplaySize.js';
-
-var GetMaxChildHeight = function (children) {
- if (children === undefined) {
- children = this.sizerChildren;
- }
- var result = 0;
- var child, childHeight;
- for (var i = 0, cnt = children.length; i < cnt; i++) {
- child = children[i];
- if (child === '\n') {
- continue;
- }
-
- childHeight = (child.isRexSizer) ?
- Math.max(child.minHeight, child.childrenHeight) :
- (child.hasOwnProperty('minHeight')) ? child.minHeight : GetDisplayHeight(child);
- result = Math.max(childHeight, result);
- }
- return result;
-}
-export default GetMaxChildHeight;
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/holygrail/methods/LayoutMode1.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/holygrail/methods/LayoutMode1.js
deleted file mode 100644
index 33954d6ca7118e354b1d18090186ff680b4ab606..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/holygrail/methods/LayoutMode1.js
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
-Elements:
- ```
- HHH
- LCR
- LFF
- ```
-*/
-
-import {
- GetAddHeaderConfig,
- GetAddLeftSideConfig, GetAddContentConfig, GetAddRightSideConfig,
- GetAddFooterConfig,
- GetAddContainerConfig
-} from './GetAddChildConfig.js';
-import CreatExpandContainer from './CreatExpandContainer.js';
-
-var LayoutMode1 = function (config) {
- var scene = this.scene;
-
- // Add Header
- var header = config.header;
- if (header) {
- this.add(header, GetAddHeaderConfig(config));
- }
-
- /*
- L CR
- L FF
- */
- var bodySizer0 = CreatExpandContainer(scene, 0);
- this.add(bodySizer0, GetAddContainerConfig(config));
-
- // Add Left-side
- var leftSide = config.leftSide;
- if (leftSide) {
- bodySizer0.add(leftSide, GetAddLeftSideConfig(config));
- }
-
- /*
- CR
-
- FF
- */
- var bodySizer1 = CreatExpandContainer(scene, 1);
- bodySizer0.add(bodySizer1, GetAddContainerConfig(config));
-
- /*
- C R
- */
- var bodySizer2 = CreatExpandContainer(scene, 0);
- bodySizer1.add(bodySizer2, GetAddContainerConfig(config));
-
- // Add content
- var content = config.content;
- if (content) {
- bodySizer2.add(content, GetAddContentConfig(config));
- }
-
- // Add Right-side
- var rightSide = config.rightSide;
- if (rightSide) {
- bodySizer2.add(rightSide, GetAddRightSideConfig(config));
- }
-
- // Add Footer
- var footer = config.footer;
- if (footer) {
- bodySizer1.add(footer, GetAddFooterConfig(config));
- }
-}
-
-export default LayoutMode1;
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sizer/GetNearestChildIndex.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sizer/GetNearestChildIndex.js
deleted file mode 100644
index 6e9f917b0261da4219ae919287b8341ade1aae12..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sizer/GetNearestChildIndex.js
+++ /dev/null
@@ -1,42 +0,0 @@
-var GetNearestChildIndex = function (x, y) {
- var children = this.sizerChildren;
- if (children.length === 0) {
- return -1;
- }
-
- var nearestIndex = -1,
- minDistance = Infinity;
- for (var i = 0, cnt = children.length; i < cnt; i++) {
- var child = children[i];
-
- var distance;
- if (this.orientation === 0) { // x
- distance = Math.abs(child.left - x);
- } else {
- distance = Math.abs(child.top - y);
- }
-
- if (minDistance > distance) {
- minDistance = distance;
- nearestIndex = i;
- }
- }
-
- // Check right bound of last child
- var child = children[children.length - 1];
- var distance;
- if (this.orientation === 0) { // x
- distance = Math.abs(child.right - x);
- } else {
- distance = Math.abs(child.bottom - y);
- }
-
- if (minDistance > distance) {
- minDistance = distance;
- nearestIndex = i + 1;
- }
-
- return nearestIndex;
-}
-
-export default GetNearestChildIndex;
\ No newline at end of file
diff --git a/spaces/Alichuan/VITS-Umamusume-voice-synthesizer/text/mandarin.py b/spaces/Alichuan/VITS-Umamusume-voice-synthesizer/text/mandarin.py
deleted file mode 100644
index 093d8826809aa2681f6088174427337a59e0c882..0000000000000000000000000000000000000000
--- a/spaces/Alichuan/VITS-Umamusume-voice-synthesizer/text/mandarin.py
+++ /dev/null
@@ -1,329 +0,0 @@
-import os
-import sys
-import re
-from pypinyin import lazy_pinyin, BOPOMOFO
-import jieba
-import cn2an
-import logging
-
-logging.getLogger('jieba').setLevel(logging.WARNING)
-jieba.initialize()
-
-
-# List of (Latin alphabet, bopomofo) pairs:
-_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
- ('a', 'ㄟˉ'),
- ('b', 'ㄅㄧˋ'),
- ('c', 'ㄙㄧˉ'),
- ('d', 'ㄉㄧˋ'),
- ('e', 'ㄧˋ'),
- ('f', 'ㄝˊㄈㄨˋ'),
- ('g', 'ㄐㄧˋ'),
- ('h', 'ㄝˇㄑㄩˋ'),
- ('i', 'ㄞˋ'),
- ('j', 'ㄐㄟˋ'),
- ('k', 'ㄎㄟˋ'),
- ('l', 'ㄝˊㄛˋ'),
- ('m', 'ㄝˊㄇㄨˋ'),
- ('n', 'ㄣˉ'),
- ('o', 'ㄡˉ'),
- ('p', 'ㄆㄧˉ'),
- ('q', 'ㄎㄧㄡˉ'),
- ('r', 'ㄚˋ'),
- ('s', 'ㄝˊㄙˋ'),
- ('t', 'ㄊㄧˋ'),
- ('u', 'ㄧㄡˉ'),
- ('v', 'ㄨㄧˉ'),
- ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'),
- ('x', 'ㄝˉㄎㄨˋㄙˋ'),
- ('y', 'ㄨㄞˋ'),
- ('z', 'ㄗㄟˋ')
-]]
-
-# List of (bopomofo, romaji) pairs:
-_bopomofo_to_romaji = [(re.compile('%s' % x[0]), x[1]) for x in [
- ('ㄅㄛ', 'p⁼wo'),
- ('ㄆㄛ', 'pʰwo'),
- ('ㄇㄛ', 'mwo'),
- ('ㄈㄛ', 'fwo'),
- ('ㄅ', 'p⁼'),
- ('ㄆ', 'pʰ'),
- ('ㄇ', 'm'),
- ('ㄈ', 'f'),
- ('ㄉ', 't⁼'),
- ('ㄊ', 'tʰ'),
- ('ㄋ', 'n'),
- ('ㄌ', 'l'),
- ('ㄍ', 'k⁼'),
- ('ㄎ', 'kʰ'),
- ('ㄏ', 'h'),
- ('ㄐ', 'ʧ⁼'),
- ('ㄑ', 'ʧʰ'),
- ('ㄒ', 'ʃ'),
- ('ㄓ', 'ʦ`⁼'),
- ('ㄔ', 'ʦ`ʰ'),
- ('ㄕ', 's`'),
- ('ㄖ', 'ɹ`'),
- ('ㄗ', 'ʦ⁼'),
- ('ㄘ', 'ʦʰ'),
- ('ㄙ', 's'),
- ('ㄚ', 'a'),
- ('ㄛ', 'o'),
- ('ㄜ', 'ə'),
- ('ㄝ', 'e'),
- ('ㄞ', 'ai'),
- ('ㄟ', 'ei'),
- ('ㄠ', 'au'),
- ('ㄡ', 'ou'),
- ('ㄧㄢ', 'yeNN'),
- ('ㄢ', 'aNN'),
- ('ㄧㄣ', 'iNN'),
- ('ㄣ', 'əNN'),
- ('ㄤ', 'aNg'),
- ('ㄧㄥ', 'iNg'),
- ('ㄨㄥ', 'uNg'),
- ('ㄩㄥ', 'yuNg'),
- ('ㄥ', 'əNg'),
- ('ㄦ', 'əɻ'),
- ('ㄧ', 'i'),
- ('ㄨ', 'u'),
- ('ㄩ', 'ɥ'),
- ('ˉ', '→'),
- ('ˊ', '↑'),
- ('ˇ', '↓↑'),
- ('ˋ', '↓'),
- ('˙', ''),
- (',', ','),
- ('。', '.'),
- ('!', '!'),
- ('?', '?'),
- ('—', '-')
-]]
-
-# List of (romaji, ipa) pairs:
-_romaji_to_ipa = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
- ('ʃy', 'ʃ'),
- ('ʧʰy', 'ʧʰ'),
- ('ʧ⁼y', 'ʧ⁼'),
- ('NN', 'n'),
- ('Ng', 'ŋ'),
- ('y', 'j'),
- ('h', 'x')
-]]
-
-# List of (bopomofo, ipa) pairs:
-_bopomofo_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [
- ('ㄅㄛ', 'p⁼wo'),
- ('ㄆㄛ', 'pʰwo'),
- ('ㄇㄛ', 'mwo'),
- ('ㄈㄛ', 'fwo'),
- ('ㄅ', 'p⁼'),
- ('ㄆ', 'pʰ'),
- ('ㄇ', 'm'),
- ('ㄈ', 'f'),
- ('ㄉ', 't⁼'),
- ('ㄊ', 'tʰ'),
- ('ㄋ', 'n'),
- ('ㄌ', 'l'),
- ('ㄍ', 'k⁼'),
- ('ㄎ', 'kʰ'),
- ('ㄏ', 'x'),
- ('ㄐ', 'tʃ⁼'),
- ('ㄑ', 'tʃʰ'),
- ('ㄒ', 'ʃ'),
- ('ㄓ', 'ts`⁼'),
- ('ㄔ', 'ts`ʰ'),
- ('ㄕ', 's`'),
- ('ㄖ', 'ɹ`'),
- ('ㄗ', 'ts⁼'),
- ('ㄘ', 'tsʰ'),
- ('ㄙ', 's'),
- ('ㄚ', 'a'),
- ('ㄛ', 'o'),
- ('ㄜ', 'ə'),
- ('ㄝ', 'ɛ'),
- ('ㄞ', 'aɪ'),
- ('ㄟ', 'eɪ'),
- ('ㄠ', 'ɑʊ'),
- ('ㄡ', 'oʊ'),
- ('ㄧㄢ', 'jɛn'),
- ('ㄩㄢ', 'ɥæn'),
- ('ㄢ', 'an'),
- ('ㄧㄣ', 'in'),
- ('ㄩㄣ', 'ɥn'),
- ('ㄣ', 'ən'),
- ('ㄤ', 'ɑŋ'),
- ('ㄧㄥ', 'iŋ'),
- ('ㄨㄥ', 'ʊŋ'),
- ('ㄩㄥ', 'jʊŋ'),
- ('ㄥ', 'əŋ'),
- ('ㄦ', 'əɻ'),
- ('ㄧ', 'i'),
- ('ㄨ', 'u'),
- ('ㄩ', 'ɥ'),
- ('ˉ', '→'),
- ('ˊ', '↑'),
- ('ˇ', '↓↑'),
- ('ˋ', '↓'),
- ('˙', ''),
- (',', ','),
- ('。', '.'),
- ('!', '!'),
- ('?', '?'),
- ('—', '-')
-]]
-
-# List of (bopomofo, ipa2) pairs:
-_bopomofo_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [
- ('ㄅㄛ', 'pwo'),
- ('ㄆㄛ', 'pʰwo'),
- ('ㄇㄛ', 'mwo'),
- ('ㄈㄛ', 'fwo'),
- ('ㄅ', 'p'),
- ('ㄆ', 'pʰ'),
- ('ㄇ', 'm'),
- ('ㄈ', 'f'),
- ('ㄉ', 't'),
- ('ㄊ', 'tʰ'),
- ('ㄋ', 'n'),
- ('ㄌ', 'l'),
- ('ㄍ', 'k'),
- ('ㄎ', 'kʰ'),
- ('ㄏ', 'h'),
- ('ㄐ', 'tɕ'),
- ('ㄑ', 'tɕʰ'),
- ('ㄒ', 'ɕ'),
- ('ㄓ', 'tʂ'),
- ('ㄔ', 'tʂʰ'),
- ('ㄕ', 'ʂ'),
- ('ㄖ', 'ɻ'),
- ('ㄗ', 'ts'),
- ('ㄘ', 'tsʰ'),
- ('ㄙ', 's'),
- ('ㄚ', 'a'),
- ('ㄛ', 'o'),
- ('ㄜ', 'ɤ'),
- ('ㄝ', 'ɛ'),
- ('ㄞ', 'aɪ'),
- ('ㄟ', 'eɪ'),
- ('ㄠ', 'ɑʊ'),
- ('ㄡ', 'oʊ'),
- ('ㄧㄢ', 'jɛn'),
- ('ㄩㄢ', 'yæn'),
- ('ㄢ', 'an'),
- ('ㄧㄣ', 'in'),
- ('ㄩㄣ', 'yn'),
- ('ㄣ', 'ən'),
- ('ㄤ', 'ɑŋ'),
- ('ㄧㄥ', 'iŋ'),
- ('ㄨㄥ', 'ʊŋ'),
- ('ㄩㄥ', 'jʊŋ'),
- ('ㄥ', 'ɤŋ'),
- ('ㄦ', 'əɻ'),
- ('ㄧ', 'i'),
- ('ㄨ', 'u'),
- ('ㄩ', 'y'),
- ('ˉ', '˥'),
- ('ˊ', '˧˥'),
- ('ˇ', '˨˩˦'),
- ('ˋ', '˥˩'),
- ('˙', ''),
- (',', ','),
- ('。', '.'),
- ('!', '!'),
- ('?', '?'),
- ('—', '-')
-]]
-
-
-def number_to_chinese(text):
- numbers = re.findall(r'\d+(?:\.?\d+)?', text)
- for number in numbers:
- text = text.replace(number, cn2an.an2cn(number), 1)
- return text
-
-
-def chinese_to_bopomofo(text):
- text = text.replace('、', ',').replace(';', ',').replace(':', ',')
- words = jieba.lcut(text, cut_all=False)
- text = ''
- for word in words:
- bopomofos = lazy_pinyin(word, BOPOMOFO)
- if not re.search('[\u4e00-\u9fff]', word):
- text += word
- continue
- for i in range(len(bopomofos)):
- bopomofos[i] = re.sub(r'([\u3105-\u3129])$', r'\1ˉ', bopomofos[i])
- if text != '':
- text += ' '
- text += ''.join(bopomofos)
- return text
-
-
-def latin_to_bopomofo(text):
- for regex, replacement in _latin_to_bopomofo:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def bopomofo_to_romaji(text):
- for regex, replacement in _bopomofo_to_romaji:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def bopomofo_to_ipa(text):
- for regex, replacement in _bopomofo_to_ipa:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def bopomofo_to_ipa2(text):
- for regex, replacement in _bopomofo_to_ipa2:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def chinese_to_romaji(text):
- text = number_to_chinese(text)
- text = chinese_to_bopomofo(text)
- text = latin_to_bopomofo(text)
- text = bopomofo_to_romaji(text)
- text = re.sub('i([aoe])', r'y\1', text)
- text = re.sub('u([aoəe])', r'w\1', text)
- text = re.sub('([ʦsɹ]`[⁼ʰ]?)([→↓↑ ]+|$)',
- r'\1ɹ`\2', text).replace('ɻ', 'ɹ`')
- text = re.sub('([ʦs][⁼ʰ]?)([→↓↑ ]+|$)', r'\1ɹ\2', text)
- return text
-
-
-def chinese_to_lazy_ipa(text):
- text = chinese_to_romaji(text)
- for regex, replacement in _romaji_to_ipa:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def chinese_to_ipa(text):
- text = number_to_chinese(text)
- text = chinese_to_bopomofo(text)
- text = latin_to_bopomofo(text)
- text = bopomofo_to_ipa(text)
- text = re.sub('i([aoe])', r'j\1', text)
- text = re.sub('u([aoəe])', r'w\1', text)
- text = re.sub('([sɹ]`[⁼ʰ]?)([→↓↑ ]+|$)',
- r'\1ɹ`\2', text).replace('ɻ', 'ɹ`')
- text = re.sub('([s][⁼ʰ]?)([→↓↑ ]+|$)', r'\1ɹ\2', text)
- return text
-
-
-def chinese_to_ipa2(text):
- text = number_to_chinese(text)
- text = chinese_to_bopomofo(text)
- text = latin_to_bopomofo(text)
- text = bopomofo_to_ipa2(text)
- text = re.sub(r'i([aoe])', r'j\1', text)
- text = re.sub(r'u([aoəe])', r'w\1', text)
- text = re.sub(r'([ʂɹ]ʰ?)([˩˨˧˦˥ ]+|$)', r'\1ʅ\2', text)
- text = re.sub(r'(sʰ?)([˩˨˧˦˥ ]+|$)', r'\1ɿ\2', text)
- return text
\ No newline at end of file
diff --git a/spaces/Alichuan/VITS-Umamusume-voice-synthesizer/text/sanskrit.py b/spaces/Alichuan/VITS-Umamusume-voice-synthesizer/text/sanskrit.py
deleted file mode 100644
index 0223aaac384a2f850f5bc20651fc18eb964607d0..0000000000000000000000000000000000000000
--- a/spaces/Alichuan/VITS-Umamusume-voice-synthesizer/text/sanskrit.py
+++ /dev/null
@@ -1,62 +0,0 @@
-import re
-from indic_transliteration import sanscript
-
-
-# List of (iast, ipa) pairs:
-_iast_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [
- ('a', 'ə'),
- ('ā', 'aː'),
- ('ī', 'iː'),
- ('ū', 'uː'),
- ('ṛ', 'ɹ`'),
- ('ṝ', 'ɹ`ː'),
- ('ḷ', 'l`'),
- ('ḹ', 'l`ː'),
- ('e', 'eː'),
- ('o', 'oː'),
- ('k', 'k⁼'),
- ('k⁼h', 'kʰ'),
- ('g', 'g⁼'),
- ('g⁼h', 'gʰ'),
- ('ṅ', 'ŋ'),
- ('c', 'ʧ⁼'),
- ('ʧ⁼h', 'ʧʰ'),
- ('j', 'ʥ⁼'),
- ('ʥ⁼h', 'ʥʰ'),
- ('ñ', 'n^'),
- ('ṭ', 't`⁼'),
- ('t`⁼h', 't`ʰ'),
- ('ḍ', 'd`⁼'),
- ('d`⁼h', 'd`ʰ'),
- ('ṇ', 'n`'),
- ('t', 't⁼'),
- ('t⁼h', 'tʰ'),
- ('d', 'd⁼'),
- ('d⁼h', 'dʰ'),
- ('p', 'p⁼'),
- ('p⁼h', 'pʰ'),
- ('b', 'b⁼'),
- ('b⁼h', 'bʰ'),
- ('y', 'j'),
- ('ś', 'ʃ'),
- ('ṣ', 's`'),
- ('r', 'ɾ'),
- ('l̤', 'l`'),
- ('h', 'ɦ'),
- ("'", ''),
- ('~', '^'),
- ('ṃ', '^')
-]]
-
-
-def devanagari_to_ipa(text):
- text = text.replace('ॐ', 'ओम्')
- text = re.sub(r'\s*।\s*$', '.', text)
- text = re.sub(r'\s*।\s*', ', ', text)
- text = re.sub(r'\s*॥', '.', text)
- text = sanscript.transliterate(text, sanscript.DEVANAGARI, sanscript.IAST)
- for regex, replacement in _iast_to_ipa:
- text = re.sub(regex, replacement, text)
- text = re.sub('(.)[`ː]*ḥ', lambda x: x.group(0)
- [:-1]+'h'+x.group(1)+'*', text)
- return text
diff --git a/spaces/AlvearVanessa/Edad_biologica_retina/app.py b/spaces/AlvearVanessa/Edad_biologica_retina/app.py
deleted file mode 100644
index 1ef7ed5ed6ef1ad2b39dd0ed3b675889a5d44f2f..0000000000000000000000000000000000000000
--- a/spaces/AlvearVanessa/Edad_biologica_retina/app.py
+++ /dev/null
@@ -1,21 +0,0 @@
-from fastai.vision.all import *
-import gradio as gr
-
-
-# Cargamos el learner
-learn = load_learner('export.pkl')
-
-# Definimos las etiquetas de nuestro modelo
-#labels = learn.dls.vocab
-
-
-# Definimos una función que se encarga de llevar a cabo las predicciones
-def predict(img):
- img = PILImage.create(img)
- pred = learn.predict(img)[0]
- return pred
-
-
-
-# Creamos la interfaz y la lanzamos.
-gr.Interface(fn=predict, inputs=gr.inputs.Image(shape=(128, 128)), outputs="text",examples=['anonymized_120036.jpg','anonymized_1210515.jpg']).launch(share=False)
\ No newline at end of file
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/README.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/README.md
deleted file mode 100644
index 7562040596e9028ed56431817f42f4379ecf3435..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/README.md
+++ /dev/null
@@ -1,171 +0,0 @@
-# 🧨 Diffusers Pipelines
-
-Pipelines provide a simple way to run state-of-the-art diffusion models in inference.
-Most diffusion systems consist of multiple independently-trained models and highly adaptable scheduler
-components - all of which are needed to have a functioning end-to-end diffusion system.
-
-As an example, [Stable Diffusion](https://huggingface.co/blog/stable_diffusion) has three independently trained models:
-- [Autoencoder](https://github.com/huggingface/diffusers/blob/5cbed8e0d157f65d3ddc2420dfd09f2df630e978/src/diffusers/models/vae.py#L392)
-- [Conditional Unet](https://github.com/huggingface/diffusers/blob/5cbed8e0d157f65d3ddc2420dfd09f2df630e978/src/diffusers/models/unet_2d_condition.py#L12)
-- [CLIP text encoder](https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPTextModel)
-- a scheduler component, [scheduler](https://github.com/huggingface/diffusers/blob/main/src/diffusers/schedulers/scheduling_pndm.py),
-- a [CLIPImageProcessor](https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPImageProcessor),
-- as well as a [safety checker](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py).
-All of these components are necessary to run stable diffusion in inference even though they were trained
-or created independently from each other.
-
-To that end, we strive to offer all open-sourced, state-of-the-art diffusion system under a unified API.
-More specifically, we strive to provide pipelines that
-- 1. can load the officially published weights and yield 1-to-1 the same outputs as the original implementation according to the corresponding paper (*e.g.* [LDMTextToImagePipeline](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines/latent_diffusion), uses the officially released weights of [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752)),
-- 2. have a simple user interface to run the model in inference (see the [Pipelines API](#pipelines-api) section),
-- 3. are easy to understand with code that is self-explanatory and can be read along-side the official paper (see [Pipelines summary](#pipelines-summary)),
-- 4. can easily be contributed by the community (see the [Contribution](#contribution) section).
-
-**Note** that pipelines do not (and should not) offer any training functionality.
-If you are looking for *official* training examples, please have a look at [examples](https://github.com/huggingface/diffusers/tree/main/examples).
-
-
-## Pipelines Summary
-
-The following table summarizes all officially supported pipelines, their corresponding paper, and if
-available a colab notebook to directly try them out.
-
-| Pipeline | Source | Tasks | Colab
-|-------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------|:---:|:---:|
-| [dance diffusion](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/dance_diffusion) | [**Dance Diffusion**](https://github.com/Harmonai-org/sample-generator) | *Unconditional Audio Generation* |
-| [ddpm](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/ddpm) | [**Denoising Diffusion Probabilistic Models**](https://arxiv.org/abs/2006.11239) | *Unconditional Image Generation* |
-| [ddim](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/ddim) | [**Denoising Diffusion Implicit Models**](https://arxiv.org/abs/2010.02502) | *Unconditional Image Generation* | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb)
-| [latent_diffusion](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/latent_diffusion) | [**High-Resolution Image Synthesis with Latent Diffusion Models**](https://arxiv.org/abs/2112.10752) | *Text-to-Image Generation* |
-| [latent_diffusion_uncond](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/latent_diffusion_uncond) | [**High-Resolution Image Synthesis with Latent Diffusion Models**](https://arxiv.org/abs/2112.10752) | *Unconditional Image Generation* |
-| [pndm](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/pndm) | [**Pseudo Numerical Methods for Diffusion Models on Manifolds**](https://arxiv.org/abs/2202.09778) | *Unconditional Image Generation* |
-| [score_sde_ve](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/score_sde_ve) | [**Score-Based Generative Modeling through Stochastic Differential Equations**](https://openreview.net/forum?id=PxTIG12RRHS) | *Unconditional Image Generation* |
-| [score_sde_vp](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/score_sde_vp) | [**Score-Based Generative Modeling through Stochastic Differential Equations**](https://openreview.net/forum?id=PxTIG12RRHS) | *Unconditional Image Generation* |
-| [stable_diffusion](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion) | [**Stable Diffusion**](https://stability.ai/blog/stable-diffusion-public-release) | *Text-to-Image Generation* | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_diffusion.ipynb)
-| [stable_diffusion](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion) | [**Stable Diffusion**](https://stability.ai/blog/stable-diffusion-public-release) | *Image-to-Image Text-Guided Generation* | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/image_2_image_using_diffusers.ipynb)
-| [stable_diffusion](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion) | [**Stable Diffusion**](https://stability.ai/blog/stable-diffusion-public-release) | *Text-Guided Image Inpainting* | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/in_painting_with_stable_diffusion_using_diffusers.ipynb)
-| [stochastic_karras_ve](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stochastic_karras_ve) | [**Elucidating the Design Space of Diffusion-Based Generative Models**](https://arxiv.org/abs/2206.00364) | *Unconditional Image Generation* |
-
-**Note**: Pipelines are simple examples of how to play around with the diffusion systems as described in the corresponding papers.
-However, most of them can be adapted to use different scheduler components or even different model components. Some pipeline examples are shown in the [Examples](#examples) below.
-
-## Pipelines API
-
-Diffusion models often consist of multiple independently-trained models or other previously existing components.
-
-
-Each model has been trained independently on a different task and the scheduler can easily be swapped out and replaced with a different one.
-During inference, we however want to be able to easily load all components and use them in inference - even if one component, *e.g.* CLIP's text encoder, originates from a different library, such as [Transformers](https://github.com/huggingface/transformers). To that end, all pipelines provide the following functionality:
-
-- [`from_pretrained` method](https://github.com/huggingface/diffusers/blob/5cbed8e0d157f65d3ddc2420dfd09f2df630e978/src/diffusers/pipeline_utils.py#L139) that accepts a Hugging Face Hub repository id, *e.g.* [runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) or a path to a local directory, *e.g.*
-"./stable-diffusion". To correctly retrieve which models and components should be loaded, one has to provide a `model_index.json` file, *e.g.* [runwayml/stable-diffusion-v1-5/model_index.json](https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/model_index.json), which defines all components that should be
-loaded into the pipelines. More specifically, for each model/component one needs to define the format `: ["", ""]`. `` is the attribute name given to the loaded instance of `` which can be found in the library or pipeline folder called `""`.
-- [`save_pretrained`](https://github.com/huggingface/diffusers/blob/5cbed8e0d157f65d3ddc2420dfd09f2df630e978/src/diffusers/pipeline_utils.py#L90) that accepts a local path, *e.g.* `./stable-diffusion` under which all models/components of the pipeline will be saved. For each component/model a folder is created inside the local path that is named after the given attribute name, *e.g.* `./stable_diffusion/unet`.
-In addition, a `model_index.json` file is created at the root of the local path, *e.g.* `./stable_diffusion/model_index.json` so that the complete pipeline can again be instantiated
-from the local path.
-- [`to`](https://github.com/huggingface/diffusers/blob/5cbed8e0d157f65d3ddc2420dfd09f2df630e978/src/diffusers/pipeline_utils.py#L118) which accepts a `string` or `torch.device` to move all models that are of type `torch.nn.Module` to the passed device. The behavior is fully analogous to [PyTorch's `to` method](https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.to).
-- [`__call__`] method to use the pipeline in inference. `__call__` defines inference logic of the pipeline and should ideally encompass all aspects of it, from pre-processing to forwarding tensors to the different models and schedulers, as well as post-processing. The API of the `__call__` method can strongly vary from pipeline to pipeline. *E.g.* a text-to-image pipeline, such as [`StableDiffusionPipeline`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py) should accept among other things the text prompt to generate the image. A pure image generation pipeline, such as [DDPMPipeline](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines/ddpm) on the other hand can be run without providing any inputs. To better understand what inputs can be adapted for
-each pipeline, one should look directly into the respective pipeline.
-
-**Note**: All pipelines have PyTorch's autograd disabled by decorating the `__call__` method with a [`torch.no_grad`](https://pytorch.org/docs/stable/generated/torch.no_grad.html) decorator because pipelines should
-not be used for training. If you want to store the gradients during the forward pass, we recommend writing your own pipeline, see also our [community-examples](https://github.com/huggingface/diffusers/tree/main/examples/community)
-
-## Contribution
-
-We are more than happy about any contribution to the officially supported pipelines 🤗. We aspire
-all of our pipelines to be **self-contained**, **easy-to-tweak**, **beginner-friendly** and for **one-purpose-only**.
-
-- **Self-contained**: A pipeline shall be as self-contained as possible. More specifically, this means that all functionality should be either directly defined in the pipeline file itself, should be inherited from (and only from) the [`DiffusionPipeline` class](https://github.com/huggingface/diffusers/blob/5cbed8e0d157f65d3ddc2420dfd09f2df630e978/src/diffusers/pipeline_utils.py#L56) or be directly attached to the model and scheduler components of the pipeline.
-- **Easy-to-use**: Pipelines should be extremely easy to use - one should be able to load the pipeline and
-use it for its designated task, *e.g.* text-to-image generation, in just a couple of lines of code. Most
-logic including pre-processing, an unrolled diffusion loop, and post-processing should all happen inside the `__call__` method.
-- **Easy-to-tweak**: Certain pipelines will not be able to handle all use cases and tasks that you might like them to. If you want to use a certain pipeline for a specific use case that is not yet supported, you might have to copy the pipeline file and tweak the code to your needs. We try to make the pipeline code as readable as possible so that each part –from pre-processing to diffusing to post-processing– can easily be adapted. If you would like the community to benefit from your customized pipeline, we would love to see a contribution to our [community-examples](https://github.com/huggingface/diffusers/tree/main/examples/community). If you feel that an important pipeline should be part of the official pipelines but isn't, a contribution to the [official pipelines](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines) would be even better.
-- **One-purpose-only**: Pipelines should be used for one task and one task only. Even if two tasks are very similar from a modeling point of view, *e.g.* image2image translation and in-painting, pipelines shall be used for one task only to keep them *easy-to-tweak* and *readable*.
-
-## Examples
-
-### Text-to-Image generation with Stable Diffusion
-
-```python
-# make sure you're logged in with `huggingface-cli login`
-from diffusers import StableDiffusionPipeline, LMSDiscreteScheduler
-
-pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
-pipe = pipe.to("cuda")
-
-prompt = "a photo of an astronaut riding a horse on mars"
-image = pipe(prompt).images[0]
-
-image.save("astronaut_rides_horse.png")
-```
-
-### Image-to-Image text-guided generation with Stable Diffusion
-
-The `StableDiffusionImg2ImgPipeline` lets you pass a text prompt and an initial image to condition the generation of new images.
-
-```python
-import requests
-from PIL import Image
-from io import BytesIO
-
-from diffusers import StableDiffusionImg2ImgPipeline
-
-# load the pipeline
-device = "cuda"
-pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
- "runwayml/stable-diffusion-v1-5",
- torch_dtype=torch.float16,
-).to(device)
-
-# let's download an initial image
-url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
-
-response = requests.get(url)
-init_image = Image.open(BytesIO(response.content)).convert("RGB")
-init_image = init_image.resize((768, 512))
-
-prompt = "A fantasy landscape, trending on artstation"
-
-images = pipe(prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5).images
-
-images[0].save("fantasy_landscape.png")
-```
-You can also run this example on colab [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/image_2_image_using_diffusers.ipynb)
-
-### Tweak prompts reusing seeds and latents
-
-You can generate your own latents to reproduce results, or tweak your prompt on a specific result you liked. [This notebook](https://github.com/pcuenca/diffusers-examples/blob/main/notebooks/stable-diffusion-seeds.ipynb) shows how to do it step by step. You can also run it in Google Colab [](https://colab.research.google.com/github/pcuenca/diffusers-examples/blob/main/notebooks/stable-diffusion-seeds.ipynb).
-
-
-### In-painting using Stable Diffusion
-
-The `StableDiffusionInpaintPipeline` lets you edit specific parts of an image by providing a mask and text prompt.
-
-```python
-import PIL
-import requests
-import torch
-from io import BytesIO
-
-from diffusers import StableDiffusionInpaintPipeline
-
-def download_image(url):
- response = requests.get(url)
- return PIL.Image.open(BytesIO(response.content)).convert("RGB")
-
-img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
-mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
-
-init_image = download_image(img_url).resize((512, 512))
-mask_image = download_image(mask_url).resize((512, 512))
-
-pipe = StableDiffusionInpaintPipeline.from_pretrained(
- "runwayml/stable-diffusion-inpainting",
- torch_dtype=torch.float16,
-)
-pipe = pipe.to("cuda")
-
-prompt = "Face of a yellow cat, high resolution, sitting on a park bench"
-image = pipe(prompt=prompt, image=init_image, mask_image=mask_image).images[0]
-```
-
-You can also run this example on colab [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/in_painting_with_stable_diffusion_using_diffusers.ipynb)
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/resnest/mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/resnest/mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py
deleted file mode 100644
index 89e077d620f3539de86fb2e10c6f7e342ad4bf0c..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/resnest/mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py
+++ /dev/null
@@ -1,4 +0,0 @@
-_base_ = './mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py'
-model = dict(
- pretrained='open-mmlab://resnest101',
- backbone=dict(stem_channels=128, depth=101))
diff --git a/spaces/Anew5128/Anew51/Dockerfile b/spaces/Anew5128/Anew51/Dockerfile
deleted file mode 100644
index f45cdfda0fab5fe7680df646ea7caf47d45e4352..0000000000000000000000000000000000000000
--- a/spaces/Anew5128/Anew51/Dockerfile
+++ /dev/null
@@ -1,21 +0,0 @@
-FROM python:3.11
-
-WORKDIR /app
-
-COPY requirements-complete.txt .
-RUN pip install -r requirements-complete.txt
-
-RUN mkdir /.cache && chmod -R 777 /.cache
-RUN mkdir .chroma && chmod -R 777 .chroma
-
-COPY . .
-
-
-RUN chmod -R 777 /app
-
-RUN --mount=type=secret,id=password,mode=0444,required=true \
- cat /run/secrets/password > /test
-
-EXPOSE 7860
-
-CMD ["python", "server.py", "--cpu", "--enable-modules=caption,summarize,classify,silero-tts,edge-tts,chromadb"]
diff --git a/spaces/Ariharasudhan/YoloV5/utils/loggers/comet/comet_utils.py b/spaces/Ariharasudhan/YoloV5/utils/loggers/comet/comet_utils.py
deleted file mode 100644
index 3cbd45156b576d09024fd11ea9dce83d4a6e5143..0000000000000000000000000000000000000000
--- a/spaces/Ariharasudhan/YoloV5/utils/loggers/comet/comet_utils.py
+++ /dev/null
@@ -1,150 +0,0 @@
-import logging
-import os
-from urllib.parse import urlparse
-
-try:
- import comet_ml
-except (ModuleNotFoundError, ImportError):
- comet_ml = None
-
-import yaml
-
-logger = logging.getLogger(__name__)
-
-COMET_PREFIX = "comet://"
-COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5")
-COMET_DEFAULT_CHECKPOINT_FILENAME = os.getenv("COMET_DEFAULT_CHECKPOINT_FILENAME", "last.pt")
-
-
-def download_model_checkpoint(opt, experiment):
- model_dir = f"{opt.project}/{experiment.name}"
- os.makedirs(model_dir, exist_ok=True)
-
- model_name = COMET_MODEL_NAME
- model_asset_list = experiment.get_model_asset_list(model_name)
-
- if len(model_asset_list) == 0:
- logger.error(f"COMET ERROR: No checkpoints found for model name : {model_name}")
- return
-
- model_asset_list = sorted(
- model_asset_list,
- key=lambda x: x["step"],
- reverse=True,
- )
- logged_checkpoint_map = {asset["fileName"]: asset["assetId"] for asset in model_asset_list}
-
- resource_url = urlparse(opt.weights)
- checkpoint_filename = resource_url.query
-
- if checkpoint_filename:
- asset_id = logged_checkpoint_map.get(checkpoint_filename)
- else:
- asset_id = logged_checkpoint_map.get(COMET_DEFAULT_CHECKPOINT_FILENAME)
- checkpoint_filename = COMET_DEFAULT_CHECKPOINT_FILENAME
-
- if asset_id is None:
- logger.error(f"COMET ERROR: Checkpoint {checkpoint_filename} not found in the given Experiment")
- return
-
- try:
- logger.info(f"COMET INFO: Downloading checkpoint {checkpoint_filename}")
- asset_filename = checkpoint_filename
-
- model_binary = experiment.get_asset(asset_id, return_type="binary", stream=False)
- model_download_path = f"{model_dir}/{asset_filename}"
- with open(model_download_path, "wb") as f:
- f.write(model_binary)
-
- opt.weights = model_download_path
-
- except Exception as e:
- logger.warning("COMET WARNING: Unable to download checkpoint from Comet")
- logger.exception(e)
-
-
-def set_opt_parameters(opt, experiment):
- """Update the opts Namespace with parameters
- from Comet's ExistingExperiment when resuming a run
-
- Args:
- opt (argparse.Namespace): Namespace of command line options
- experiment (comet_ml.APIExperiment): Comet API Experiment object
- """
- asset_list = experiment.get_asset_list()
- resume_string = opt.resume
-
- for asset in asset_list:
- if asset["fileName"] == "opt.yaml":
- asset_id = asset["assetId"]
- asset_binary = experiment.get_asset(asset_id, return_type="binary", stream=False)
- opt_dict = yaml.safe_load(asset_binary)
- for key, value in opt_dict.items():
- setattr(opt, key, value)
- opt.resume = resume_string
-
- # Save hyperparameters to YAML file
- # Necessary to pass checks in training script
- save_dir = f"{opt.project}/{experiment.name}"
- os.makedirs(save_dir, exist_ok=True)
-
- hyp_yaml_path = f"{save_dir}/hyp.yaml"
- with open(hyp_yaml_path, "w") as f:
- yaml.dump(opt.hyp, f)
- opt.hyp = hyp_yaml_path
-
-
-def check_comet_weights(opt):
- """Downloads model weights from Comet and updates the
- weights path to point to saved weights location
-
- Args:
- opt (argparse.Namespace): Command Line arguments passed
- to YOLOv5 training script
-
- Returns:
- None/bool: Return True if weights are successfully downloaded
- else return None
- """
- if comet_ml is None:
- return
-
- if isinstance(opt.weights, str):
- if opt.weights.startswith(COMET_PREFIX):
- api = comet_ml.API()
- resource = urlparse(opt.weights)
- experiment_path = f"{resource.netloc}{resource.path}"
- experiment = api.get(experiment_path)
- download_model_checkpoint(opt, experiment)
- return True
-
- return None
-
-
-def check_comet_resume(opt):
- """Restores run parameters to its original state based on the model checkpoint
- and logged Experiment parameters.
-
- Args:
- opt (argparse.Namespace): Command Line arguments passed
- to YOLOv5 training script
-
- Returns:
- None/bool: Return True if the run is restored successfully
- else return None
- """
- if comet_ml is None:
- return
-
- if isinstance(opt.resume, str):
- if opt.resume.startswith(COMET_PREFIX):
- api = comet_ml.API()
- resource = urlparse(opt.resume)
- experiment_path = f"{resource.netloc}{resource.path}"
- experiment = api.get(experiment_path)
- set_opt_parameters(opt, experiment)
- download_model_checkpoint(opt, experiment)
-
- return True
-
- return None
diff --git a/spaces/Arnx/MusicGenXvAKN/tests/models/test_encodec_model.py b/spaces/Arnx/MusicGenXvAKN/tests/models/test_encodec_model.py
deleted file mode 100644
index 2f9c1db3f69a45f02451b71da95f44356811acbb..0000000000000000000000000000000000000000
--- a/spaces/Arnx/MusicGenXvAKN/tests/models/test_encodec_model.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-import random
-
-import numpy as np
-import torch
-
-from audiocraft.models import EncodecModel
-from audiocraft.modules import SEANetEncoder, SEANetDecoder
-from audiocraft.quantization import DummyQuantizer
-
-
-class TestEncodecModel:
-
- def _create_encodec_model(self,
- sample_rate: int,
- channels: int,
- dim: int = 5,
- n_filters: int = 3,
- n_residual_layers: int = 1,
- ratios: list = [5, 4, 3, 2],
- **kwargs):
- frame_rate = np.prod(ratios)
- encoder = SEANetEncoder(channels=channels, dimension=dim, n_filters=n_filters,
- n_residual_layers=n_residual_layers, ratios=ratios)
- decoder = SEANetDecoder(channels=channels, dimension=dim, n_filters=n_filters,
- n_residual_layers=n_residual_layers, ratios=ratios)
- quantizer = DummyQuantizer()
- model = EncodecModel(encoder, decoder, quantizer, frame_rate=frame_rate,
- sample_rate=sample_rate, channels=channels, **kwargs)
- return model
-
- def test_model(self):
- random.seed(1234)
- sample_rate = 24_000
- channels = 1
- model = self._create_encodec_model(sample_rate, channels)
- for _ in range(10):
- length = random.randrange(1, 10_000)
- x = torch.randn(2, channels, length)
- res = model(x)
- assert res.x.shape == x.shape
-
- def test_model_renorm(self):
- random.seed(1234)
- sample_rate = 24_000
- channels = 1
- model_nonorm = self._create_encodec_model(sample_rate, channels, renormalize=False)
- model_renorm = self._create_encodec_model(sample_rate, channels, renormalize=True)
-
- for _ in range(10):
- length = random.randrange(1, 10_000)
- x = torch.randn(2, channels, length)
- codes, scales = model_nonorm.encode(x)
- codes, scales = model_renorm.encode(x)
- assert scales is not None
diff --git a/spaces/Artples/LLaMA-2-CHAT/app.py b/spaces/Artples/LLaMA-2-CHAT/app.py
deleted file mode 100644
index 6145813fef49036479ecca9846559b3f17cbca42..0000000000000000000000000000000000000000
--- a/spaces/Artples/LLaMA-2-CHAT/app.py
+++ /dev/null
@@ -1,467 +0,0 @@
-"""Run codes."""
-# pylint: disable=line-too-long, broad-exception-caught, invalid-name, missing-function-docstring, too-many-instance-attributes, missing-class-docstring
-# ruff: noqa: E501
-import os
-import platform
-import random
-import time
-from dataclasses import asdict, dataclass
-from pathlib import Path
-
-# from types import SimpleNamespace
-import gradio as gr
-import psutil
-from about_time import about_time
-from ctransformers import AutoModelForCausalLM
-from dl_hf_model import dl_hf_model
-from loguru import logger
-
-filename_list = [
- "Wizard-Vicuna-7B-Uncensored.ggmlv3.q2_K.bin",
- "Wizard-Vicuna-7B-Uncensored.ggmlv3.q3_K_L.bin",
- "Wizard-Vicuna-7B-Uncensored.ggmlv3.q3_K_M.bin",
- "Wizard-Vicuna-7B-Uncensored.ggmlv3.q3_K_S.bin",
- "Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_0.bin",
- "Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_1.bin",
- "Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_K_M.bin",
- "Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_K_S.bin",
- "Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_0.bin",
- "Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_1.bin",
- "Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_K_M.bin",
- "Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_K_S.bin",
- "Wizard-Vicuna-7B-Uncensored.ggmlv3.q6_K.bin",
- "Wizard-Vicuna-7B-Uncensored.ggmlv3.q8_0.bin",
-]
-
-URL = "https://huggingface.co/TheBloke/Wizard-Vicuna-7B-Uncensored-GGML/raw/main/Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_K_M.bin" # 4.05G
-
-# url = "https://huggingface.co/savvamadar/ggml-gpt4all-j-v1.3-groovy/blob/main/ggml-gpt4all-j-v1.3-groovy.bin"
-url = "https://huggingface.co/TheBloke/Llama-2-13B-GGML/blob/main/llama-2-13b.ggmlv3.q4_K_S.bin" # 7.37G
-url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q3_K_L.bin"
-url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q3_K_L.bin" # 6.93G
-url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q3_K_L.binhttps://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q4_K_M.bin" # 7.87G
-
-url = "https://huggingface.co/localmodels/Llama-2-13B-Chat-ggml/blob/main/llama-2-13b-chat.ggmlv3.q4_K_S.bin" # 7.37G
-
-_ = (
- "golay" in platform.node()
- or "okteto" in platform.node()
- or Path("/kaggle").exists()
- # or psutil.cpu_count(logical=False) < 4
- or 1 # run 7b in hf
-)
-
-if _:
- url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q2_K.bin"
- # url = "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/blob/main/llama-2-7b-chat.ggmlv3.q2_K.bin" # 2.87G
- # url = "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/blob/main/llama-2-7b-chat.ggmlv3.q4_K_M.bin" # 2.87G
-
-
-prompt_template = """Below is an instruction that describes a task. Write a response that appropriately completes the request.
-
-### Instruction: {user_prompt}
-
-### Response:
-"""
-
-prompt_template = """System: You are a helpful,
-respectful and honest assistant. Always answer as
-helpfully as possible, while being safe. Your answers
-should not include any harmful, unethical, racist,
-sexist, toxic, dangerous, or illegal content. Please
-ensure that your responses are socially unbiased and
-positive in nature. If a question does not make any
-sense, or is not factually coherent, explain why instead
-of answering something not correct. If you don't know
-the answer to a question, please don't share false
-information.
-User: {prompt}
-Assistant: """
-
-prompt_template = """System: You are a helpful assistant.
-User: {prompt}
-Assistant: """
-
-prompt_template = """Question: {question}
-Answer: Let's work this out in a step by step way to be sure we have the right answer."""
-
-prompt_template = """[INST] <>
-You are a helpful, respectful and honest assistant. Always answer as helpfully as possible assistant. Think step by step.
-<>
-
-What NFL team won the Super Bowl in the year Justin Bieber was born?
-[/INST]"""
-
-prompt_template = """[INST] <>
-You are an unhelpful assistant. Always answer as helpfully as possible. Think step by step. <>
-
-{question} [/INST]
-"""
-
-prompt_template = """[INST] <>
-You are a helpful assistant.
-<>
-
-{question} [/INST]
-"""
-
-_ = [elm for elm in prompt_template.splitlines() if elm.strip()]
-stop_string = [elm.split(":")[0] + ":" for elm in _][-2]
-
-logger.debug(f"{stop_string=}")
-
-_ = psutil.cpu_count(logical=False) - 1
-cpu_count: int = int(_) if _ else 1
-logger.debug(f"{cpu_count=}")
-
-LLM = None
-
-try:
- model_loc, file_size = dl_hf_model(url)
-except Exception as exc_:
- logger.error(exc_)
- raise SystemExit(1) from exc_
-
-LLM = AutoModelForCausalLM.from_pretrained(
- model_loc,
- model_type="llama",
- # threads=cpu_count,
-)
-
-logger.info(f"done load llm {model_loc=} {file_size=}G")
-
-os.environ["TZ"] = "Asia/Shanghai"
-try:
- time.tzset() # type: ignore # pylint: disable=no-member
-except Exception:
- # Windows
- logger.warning("Windows, cant run time.tzset()")
-
-_ = """
-ns = SimpleNamespace(
- response="",
- generator=(_ for _ in []),
-)
-# """
-
-@dataclass
-class GenerationConfig:
- temperature: float = 0.7
- top_k: int = 50
- top_p: float = 0.9
- repetition_penalty: float = 1.0
- max_new_tokens: int = 512
- seed: int = 42
- reset: bool = False
- stream: bool = True
- # threads: int = cpu_count
- # stop: list[str] = field(default_factory=lambda: [stop_string])
-
-
-def generate(
- question: str,
- llm=LLM,
- config: GenerationConfig = GenerationConfig(),
-):
- """Run model inference, will return a Generator if streaming is true."""
- # _ = prompt_template.format(question=question)
- # print(_)
-
- prompt = prompt_template.format(question=question)
-
- return llm(
- prompt,
- **asdict(config),
- )
-
-
-logger.debug(f"{asdict(GenerationConfig())=}")
-
-
-def user(user_message, history):
- # return user_message, history + [[user_message, None]]
- history.append([user_message, None])
- return user_message, history # keep user_message
-
-
-def user1(user_message, history):
- # return user_message, history + [[user_message, None]]
- history.append([user_message, None])
- return "", history # clear user_message
-
-
-def bot_(history):
- user_message = history[-1][0]
- resp = random.choice(["How are you?", "I love you", "I'm very hungry"])
- bot_message = user_message + ": " + resp
- history[-1][1] = ""
- for character in bot_message:
- history[-1][1] += character
- time.sleep(0.02)
- yield history
-
- history[-1][1] = resp
- yield history
-
-
-def bot(history):
- user_message = history[-1][0]
- response = []
-
- logger.debug(f"{user_message=}")
-
- with about_time() as atime: # type: ignore
- flag = 1
- prefix = ""
- then = time.time()
-
- logger.debug("about to generate")
-
- config = GenerationConfig(reset=True)
- for elm in generate(user_message, config=config):
- if flag == 1:
- logger.debug("in the loop")
- prefix = f"({time.time() - then:.2f}s) "
- flag = 0
- print(prefix, end="", flush=True)
- logger.debug(f"{prefix=}")
- print(elm, end="", flush=True)
- # logger.debug(f"{elm}")
-
- response.append(elm)
- history[-1][1] = prefix + "".join(response)
- yield history
-
- _ = (
- f"(time elapsed: {atime.duration_human}, " # type: ignore
- f"{atime.duration/len(''.join(response)):.2f}s/char)" # type: ignore
- )
-
- history[-1][1] = "".join(response) + f"\n{_}"
- yield history
-
-
-def predict_api(prompt):
- logger.debug(f"{prompt=}")
- try:
- # user_prompt = prompt
- config = GenerationConfig(
- temperature=0.2,
- top_k=10,
- top_p=0.9,
- repetition_penalty=1.0,
- max_new_tokens=512, # adjust as needed
- seed=42,
- reset=True, # reset history (cache)
- stream=False,
- # threads=cpu_count,
- # stop=prompt_prefix[1:2],
- )
-
- response = generate(
- prompt,
- config=config,
- )
-
- logger.debug(f"api: {response=}")
- except Exception as exc:
- logger.error(exc)
- response = f"{exc=}"
- # bot = {"inputs": [response]}
- # bot = [(prompt, response)]
-
- return response
-
-
-css = """
- .importantButton {
- background: linear-gradient(45deg, #7e0570,#5d1c99, #6e00ff) !important;
- border: none !important;
- }
- .importantButton:hover {
- background: linear-gradient(45deg, #ff00e0,#8500ff, #6e00ff) !important;
- border: none !important;
- }
- .disclaimer {font-variant-caps: all-small-caps; font-size: xx-small;}
- .xsmall {font-size: x-small;}
-"""
-etext = """In America, where cars are an important part of the national psyche, a decade ago people had suddenly started to drive less, which had not happened since the oil shocks of the 1970s. """
-examples_list = [
- ["What NFL team won the Super Bowl in the year Justin Bieber was born?"],
- [
- "What NFL team won the Super Bowl in the year Justin Bieber was born? Think step by step."
- ],
- ["How to pick a lock? Provide detailed steps."],
- ["If it takes 10 hours to dry 10 clothes, assuming all the clothes are hanged together at the same time for drying , then how long will it take to dry a cloth?"],
- ["is infinity + 1 bigger than infinity?"],
- ["Explain the plot of Cinderella in a sentence."],
- [
- "How long does it take to become proficient in French, and what are the best methods for retaining information?"
- ],
- ["What are some common mistakes to avoid when writing code?"],
- ["Build a prompt to generate a beautiful portrait of a horse"],
- ["Suggest four metaphors to describe the benefits of AI"],
- ["Write a pop song about leaving home for the sandy beaches."],
- ["Write a summary demonstrating my ability to tame lions"],
- ["鲁迅和周树人什么关系? 说中文。"],
- ["鲁迅和周树人什么关系?"],
- ["鲁迅和周树人什么关系? 用英文回答。"],
- ["从前有一头牛,这头牛后面有什么?"],
- ["正无穷大加一大于正无穷大吗?"],
- ["正无穷大加正无穷大大于正无穷大吗?"],
- ["-2的平方根等于什么?"],
- ["树上有5只鸟,猎人开枪打死了一只。树上还有几只鸟?"],
- ["树上有11只鸟,猎人开枪打死了一只。树上还有几只鸟?提示:需考虑鸟可能受惊吓飞走。"],
- ["以红楼梦的行文风格写一张委婉的请假条。不少于320字。"],
- [f"{etext} 翻成中文,列出3个版本。"],
- [f"{etext} \n 翻成中文,保留原意,但使用文学性的语言。不要写解释。列出3个版本。"],
- ["假定 1 + 2 = 4, 试求 7 + 8。"],
- ["给出判断一个数是不是质数的 javascript 码。"],
- ["给出实现python 里 range(10)的 javascript 码。"],
- ["给出实现python 里 [*(range(10)]的 javascript 码。"],
- ["Erkläre die Handlung von Cinderella in einem Satz."],
- ["Erkläre die Handlung von Cinderella in einem Satz. Auf Deutsch."],
-]
-
-logger.info("start block")
-
-with gr.Blocks(
- title=f"{Path(model_loc).name}",
- theme=gr.themes.Soft(text_size="sm", spacing_size="sm"),
- css=css,
-) as block:
- # buff_var = gr.State("")
- with gr.Accordion("🎈 Info", open=False):
- # gr.HTML(
- # """
and spin a CPU UPGRADE to avoid the queue
"""
- # )
- gr.Markdown(
- f"""
{Path(model_loc).name}
- Most examples are meant for another model.
- You probably should try to test
- some related prompts.""",
- elem_classes="xsmall",
- )
-
- # chatbot = gr.Chatbot().style(height=700) # 500
- chatbot = gr.Chatbot(height=500)
-
- # buff = gr.Textbox(show_label=False, visible=True)
-
- with gr.Row():
- with gr.Column(scale=5):
- msg = gr.Textbox(
- label="Chat Message Box",
- placeholder="Ask me anything (press Shift+Enter or click Submit to send)",
- show_label=False,
- # container=False,
- lines=6,
- max_lines=30,
- show_copy_button=True,
- # ).style(container=False)
- )
- with gr.Column(scale=1, min_width=50):
- with gr.Row():
- submit = gr.Button("Submit", elem_classes="xsmall")
- stop = gr.Button("Stop", visible=True)
- clear = gr.Button("Clear History", visible=True)
- with gr.Row(visible=False):
- with gr.Accordion("Advanced Options:", open=False):
- with gr.Row():
- with gr.Column(scale=2):
- system = gr.Textbox(
- label="System Prompt",
- value=prompt_template,
- show_label=False,
- container=False,
- # ).style(container=False)
- )
- with gr.Column():
- with gr.Row():
- change = gr.Button("Change System Prompt")
- reset = gr.Button("Reset System Prompt")
-
- with gr.Accordion("Example Inputs", open=True):
- examples = gr.Examples(
- examples=examples_list,
- inputs=[msg],
- examples_per_page=40,
- )
-
- # with gr.Row():
- with gr.Accordion("Disclaimer", open=False):
- _ = Path(model_loc).name
- gr.Markdown(
- f"Disclaimer: Lauche - AI (POWERED BY LLAMA 2) can produce factually incorrect output, and should not be relied on to produce "
- "factually accurate information. Lauche - AI (POWERED BY LLAMA 2) was trained on various public datasets; while great efforts "
- "have been taken to clean the pretraining data, it is possible that this model could generate lewd, "
- "biased, or otherwise offensive outputs."
- " - - - "
- "Our Impressum: https://lauche.eu/n-impressum"
- " - - - "
- "Visit this space on our website: ai-app.lauche.online",
- elem_classes=["disclaimer"],
- )
-
- msg_submit_event = msg.submit(
- # fn=conversation.user_turn,
- fn=user,
- inputs=[msg, chatbot],
- outputs=[msg, chatbot],
- queue=True,
- show_progress="full",
- # api_name=None,
- ).then(bot, chatbot, chatbot, queue=True)
- submit_click_event = submit.click(
- # fn=lambda x, y: ("",) + user(x, y)[1:], # clear msg
- fn=user1, # clear msg
- inputs=[msg, chatbot],
- outputs=[msg, chatbot],
- queue=True,
- # queue=False,
- show_progress="full",
- # api_name=None,
- ).then(bot, chatbot, chatbot, queue=True)
- stop.click(
- fn=None,
- inputs=None,
- outputs=None,
- cancels=[msg_submit_event, submit_click_event],
- queue=False,
- )
- clear.click(lambda: None, None, chatbot, queue=False)
-
- with gr.Accordion("For Chat/Translation API", open=False, visible=False):
- input_text = gr.Text()
- api_btn = gr.Button("Go", variant="primary")
- out_text = gr.Text()
-
- api_btn.click(
- predict_api,
- input_text,
- out_text,
- api_name="api",
- )
-
- # block.load(update_buff, [], buff, every=1)
- # block.load(update_buff, [buff_var], [buff_var, buff], every=1)
-
-# concurrency_count=5, max_size=20
-# max_size=36, concurrency_count=14
-# CPU cpu_count=2 16G, model 7G
-# CPU UPGRADE cpu_count=8 32G, model 7G
-
-# does not work
-_ = """
-# _ = int(psutil.virtual_memory().total / 10**9 // file_size - 1)
-# concurrency_count = max(_, 1)
-if psutil.cpu_count(logical=False) >= 8:
- # concurrency_count = max(int(32 / file_size) - 1, 1)
-else:
- # concurrency_count = max(int(16 / file_size) - 1, 1)
-# """
-
-concurrency_count = 1
-logger.info(f"{concurrency_count=}")
-
-block.queue(concurrency_count=concurrency_count, max_size=5).launch(debug=True)
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/rtf.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/rtf.py
deleted file mode 100644
index 4114d1688c37f7dcc46d95528bdf34af4f75ceb3..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/rtf.py
+++ /dev/null
@@ -1,146 +0,0 @@
-"""
- pygments.formatters.rtf
- ~~~~~~~~~~~~~~~~~~~~~~~
-
- A formatter that generates RTF files.
-
- :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-from pip._vendor.pygments.formatter import Formatter
-from pip._vendor.pygments.util import get_int_opt, surrogatepair
-
-
-__all__ = ['RtfFormatter']
-
-
-class RtfFormatter(Formatter):
- """
- Format tokens as RTF markup. This formatter automatically outputs full RTF
- documents with color information and other useful stuff. Perfect for Copy and
- Paste into Microsoft(R) Word(R) documents.
-
- Please note that ``encoding`` and ``outencoding`` options are ignored.
- The RTF format is ASCII natively, but handles unicode characters correctly
- thanks to escape sequences.
-
- .. versionadded:: 0.6
-
- Additional options accepted:
-
- `style`
- The style to use, can be a string or a Style subclass (default:
- ``'default'``).
-
- `fontface`
- The used font family, for example ``Bitstream Vera Sans``. Defaults to
- some generic font which is supposed to have fixed width.
-
- `fontsize`
- Size of the font used. Size is specified in half points. The
- default is 24 half-points, giving a size 12 font.
-
- .. versionadded:: 2.0
- """
- name = 'RTF'
- aliases = ['rtf']
- filenames = ['*.rtf']
-
- def __init__(self, **options):
- r"""
- Additional options accepted:
-
- ``fontface``
- Name of the font used. Could for example be ``'Courier New'``
- to further specify the default which is ``'\fmodern'``. The RTF
- specification claims that ``\fmodern`` are "Fixed-pitch serif
- and sans serif fonts". Hope every RTF implementation thinks
- the same about modern...
-
- """
- Formatter.__init__(self, **options)
- self.fontface = options.get('fontface') or ''
- self.fontsize = get_int_opt(options, 'fontsize', 0)
-
- def _escape(self, text):
- return text.replace('\\', '\\\\') \
- .replace('{', '\\{') \
- .replace('}', '\\}')
-
- def _escape_text(self, text):
- # empty strings, should give a small performance improvement
- if not text:
- return ''
-
- # escape text
- text = self._escape(text)
-
- buf = []
- for c in text:
- cn = ord(c)
- if cn < (2**7):
- # ASCII character
- buf.append(str(c))
- elif (2**7) <= cn < (2**16):
- # single unicode escape sequence
- buf.append('{\\u%d}' % cn)
- elif (2**16) <= cn:
- # RTF limits unicode to 16 bits.
- # Force surrogate pairs
- buf.append('{\\u%d}{\\u%d}' % surrogatepair(cn))
-
- return ''.join(buf).replace('\n', '\\par\n')
-
- def format_unencoded(self, tokensource, outfile):
- # rtf 1.8 header
- outfile.write('{\\rtf1\\ansi\\uc0\\deff0'
- '{\\fonttbl{\\f0\\fmodern\\fprq1\\fcharset0%s;}}'
- '{\\colortbl;' % (self.fontface and
- ' ' + self._escape(self.fontface) or
- ''))
-
- # convert colors and save them in a mapping to access them later.
- color_mapping = {}
- offset = 1
- for _, style in self.style:
- for color in style['color'], style['bgcolor'], style['border']:
- if color and color not in color_mapping:
- color_mapping[color] = offset
- outfile.write('\\red%d\\green%d\\blue%d;' % (
- int(color[0:2], 16),
- int(color[2:4], 16),
- int(color[4:6], 16)
- ))
- offset += 1
- outfile.write('}\\f0 ')
- if self.fontsize:
- outfile.write('\\fs%d' % self.fontsize)
-
- # highlight stream
- for ttype, value in tokensource:
- while not self.style.styles_token(ttype) and ttype.parent:
- ttype = ttype.parent
- style = self.style.style_for_token(ttype)
- buf = []
- if style['bgcolor']:
- buf.append('\\cb%d' % color_mapping[style['bgcolor']])
- if style['color']:
- buf.append('\\cf%d' % color_mapping[style['color']])
- if style['bold']:
- buf.append('\\b')
- if style['italic']:
- buf.append('\\i')
- if style['underline']:
- buf.append('\\ul')
- if style['border']:
- buf.append('\\chbrdr\\chcfpat%d' %
- color_mapping[style['border']])
- start = ''.join(buf)
- if start:
- outfile.write('{%s ' % start)
- outfile.write(self._escape_text(value))
- if start:
- outfile.write('}')
-
- outfile.write('}')
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/util.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/util.py
deleted file mode 100644
index 4763202b67cf3b7dc849fcca401be5df6adbf083..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/util.py
+++ /dev/null
@@ -1,513 +0,0 @@
-"""distutils.util
-
-Miscellaneous utility functions -- anything that doesn't fit into
-one of the other *util.py modules.
-"""
-
-import importlib.util
-import os
-import re
-import string
-import subprocess
-import sys
-import sysconfig
-import functools
-
-from distutils.errors import DistutilsPlatformError, DistutilsByteCompileError
-from distutils.dep_util import newer
-from distutils.spawn import spawn
-from distutils import log
-
-
-def get_host_platform():
- """
- Return a string that identifies the current platform. Use this
- function to distinguish platform-specific build directories and
- platform-specific built distributions.
- """
-
- # This function initially exposed platforms as defined in Python 3.9
- # even with older Python versions when distutils was split out.
- # Now it delegates to stdlib sysconfig, but maintains compatibility.
-
- if sys.version_info < (3, 8):
- if os.name == 'nt':
- if '(arm)' in sys.version.lower():
- return 'win-arm32'
- if '(arm64)' in sys.version.lower():
- return 'win-arm64'
-
- if sys.version_info < (3, 9):
- if os.name == "posix" and hasattr(os, 'uname'):
- osname, host, release, version, machine = os.uname()
- if osname[:3] == "aix":
- from .py38compat import aix_platform
-
- return aix_platform(osname, version, release)
-
- return sysconfig.get_platform()
-
-
-def get_platform():
- if os.name == 'nt':
- TARGET_TO_PLAT = {
- 'x86': 'win32',
- 'x64': 'win-amd64',
- 'arm': 'win-arm32',
- 'arm64': 'win-arm64',
- }
- target = os.environ.get('VSCMD_ARG_TGT_ARCH')
- return TARGET_TO_PLAT.get(target) or get_host_platform()
- return get_host_platform()
-
-
-if sys.platform == 'darwin':
- _syscfg_macosx_ver = None # cache the version pulled from sysconfig
-MACOSX_VERSION_VAR = 'MACOSX_DEPLOYMENT_TARGET'
-
-
-def _clear_cached_macosx_ver():
- """For testing only. Do not call."""
- global _syscfg_macosx_ver
- _syscfg_macosx_ver = None
-
-
-def get_macosx_target_ver_from_syscfg():
- """Get the version of macOS latched in the Python interpreter configuration.
- Returns the version as a string or None if can't obtain one. Cached."""
- global _syscfg_macosx_ver
- if _syscfg_macosx_ver is None:
- from distutils import sysconfig
-
- ver = sysconfig.get_config_var(MACOSX_VERSION_VAR) or ''
- if ver:
- _syscfg_macosx_ver = ver
- return _syscfg_macosx_ver
-
-
-def get_macosx_target_ver():
- """Return the version of macOS for which we are building.
-
- The target version defaults to the version in sysconfig latched at time
- the Python interpreter was built, unless overridden by an environment
- variable. If neither source has a value, then None is returned"""
-
- syscfg_ver = get_macosx_target_ver_from_syscfg()
- env_ver = os.environ.get(MACOSX_VERSION_VAR)
-
- if env_ver:
- # Validate overridden version against sysconfig version, if have both.
- # Ensure that the deployment target of the build process is not less
- # than 10.3 if the interpreter was built for 10.3 or later. This
- # ensures extension modules are built with correct compatibility
- # values, specifically LDSHARED which can use
- # '-undefined dynamic_lookup' which only works on >= 10.3.
- if (
- syscfg_ver
- and split_version(syscfg_ver) >= [10, 3]
- and split_version(env_ver) < [10, 3]
- ):
- my_msg = (
- '$' + MACOSX_VERSION_VAR + ' mismatch: '
- 'now "%s" but "%s" during configure; '
- 'must use 10.3 or later' % (env_ver, syscfg_ver)
- )
- raise DistutilsPlatformError(my_msg)
- return env_ver
- return syscfg_ver
-
-
-def split_version(s):
- """Convert a dot-separated string into a list of numbers for comparisons"""
- return [int(n) for n in s.split('.')]
-
-
-def convert_path(pathname):
- """Return 'pathname' as a name that will work on the native filesystem,
- i.e. split it on '/' and put it back together again using the current
- directory separator. Needed because filenames in the setup script are
- always supplied in Unix style, and have to be converted to the local
- convention before we can actually use them in the filesystem. Raises
- ValueError on non-Unix-ish systems if 'pathname' either starts or
- ends with a slash.
- """
- if os.sep == '/':
- return pathname
- if not pathname:
- return pathname
- if pathname[0] == '/':
- raise ValueError("path '%s' cannot be absolute" % pathname)
- if pathname[-1] == '/':
- raise ValueError("path '%s' cannot end with '/'" % pathname)
-
- paths = pathname.split('/')
- while '.' in paths:
- paths.remove('.')
- if not paths:
- return os.curdir
- return os.path.join(*paths)
-
-
-# convert_path ()
-
-
-def change_root(new_root, pathname):
- """Return 'pathname' with 'new_root' prepended. If 'pathname' is
- relative, this is equivalent to "os.path.join(new_root,pathname)".
- Otherwise, it requires making 'pathname' relative and then joining the
- two, which is tricky on DOS/Windows and Mac OS.
- """
- if os.name == 'posix':
- if not os.path.isabs(pathname):
- return os.path.join(new_root, pathname)
- else:
- return os.path.join(new_root, pathname[1:])
-
- elif os.name == 'nt':
- (drive, path) = os.path.splitdrive(pathname)
- if path[0] == '\\':
- path = path[1:]
- return os.path.join(new_root, path)
-
- raise DistutilsPlatformError(f"nothing known about platform '{os.name}'")
-
-
-@functools.lru_cache()
-def check_environ():
- """Ensure that 'os.environ' has all the environment variables we
- guarantee that users can use in config files, command-line options,
- etc. Currently this includes:
- HOME - user's home directory (Unix only)
- PLAT - description of the current platform, including hardware
- and OS (see 'get_platform()')
- """
- if os.name == 'posix' and 'HOME' not in os.environ:
- try:
- import pwd
-
- os.environ['HOME'] = pwd.getpwuid(os.getuid())[5]
- except (ImportError, KeyError):
- # bpo-10496: if the current user identifier doesn't exist in the
- # password database, do nothing
- pass
-
- if 'PLAT' not in os.environ:
- os.environ['PLAT'] = get_platform()
-
-
-def subst_vars(s, local_vars):
- """
- Perform variable substitution on 'string'.
- Variables are indicated by format-style braces ("{var}").
- Variable is substituted by the value found in the 'local_vars'
- dictionary or in 'os.environ' if it's not in 'local_vars'.
- 'os.environ' is first checked/augmented to guarantee that it contains
- certain values: see 'check_environ()'. Raise ValueError for any
- variables not found in either 'local_vars' or 'os.environ'.
- """
- check_environ()
- lookup = dict(os.environ)
- lookup.update((name, str(value)) for name, value in local_vars.items())
- try:
- return _subst_compat(s).format_map(lookup)
- except KeyError as var:
- raise ValueError(f"invalid variable {var}")
-
-
-def _subst_compat(s):
- """
- Replace shell/Perl-style variable substitution with
- format-style. For compatibility.
- """
-
- def _subst(match):
- return f'{{{match.group(1)}}}'
-
- repl = re.sub(r'\$([a-zA-Z_][a-zA-Z_0-9]*)', _subst, s)
- if repl != s:
- import warnings
-
- warnings.warn(
- "shell/Perl-style substitions are deprecated",
- DeprecationWarning,
- )
- return repl
-
-
-def grok_environment_error(exc, prefix="error: "):
- # Function kept for backward compatibility.
- # Used to try clever things with EnvironmentErrors,
- # but nowadays str(exception) produces good messages.
- return prefix + str(exc)
-
-
-# Needed by 'split_quoted()'
-_wordchars_re = _squote_re = _dquote_re = None
-
-
-def _init_regex():
- global _wordchars_re, _squote_re, _dquote_re
- _wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace)
- _squote_re = re.compile(r"'(?:[^'\\]|\\.)*'")
- _dquote_re = re.compile(r'"(?:[^"\\]|\\.)*"')
-
-
-def split_quoted(s):
- """Split a string up according to Unix shell-like rules for quotes and
- backslashes. In short: words are delimited by spaces, as long as those
- spaces are not escaped by a backslash, or inside a quoted string.
- Single and double quotes are equivalent, and the quote characters can
- be backslash-escaped. The backslash is stripped from any two-character
- escape sequence, leaving only the escaped character. The quote
- characters are stripped from any quoted string. Returns a list of
- words.
- """
-
- # This is a nice algorithm for splitting up a single string, since it
- # doesn't require character-by-character examination. It was a little
- # bit of a brain-bender to get it working right, though...
- if _wordchars_re is None:
- _init_regex()
-
- s = s.strip()
- words = []
- pos = 0
-
- while s:
- m = _wordchars_re.match(s, pos)
- end = m.end()
- if end == len(s):
- words.append(s[:end])
- break
-
- if s[end] in string.whitespace:
- # unescaped, unquoted whitespace: now
- # we definitely have a word delimiter
- words.append(s[:end])
- s = s[end:].lstrip()
- pos = 0
-
- elif s[end] == '\\':
- # preserve whatever is being escaped;
- # will become part of the current word
- s = s[:end] + s[end + 1 :]
- pos = end + 1
-
- else:
- if s[end] == "'": # slurp singly-quoted string
- m = _squote_re.match(s, end)
- elif s[end] == '"': # slurp doubly-quoted string
- m = _dquote_re.match(s, end)
- else:
- raise RuntimeError("this can't happen (bad char '%c')" % s[end])
-
- if m is None:
- raise ValueError("bad string (mismatched %s quotes?)" % s[end])
-
- (beg, end) = m.span()
- s = s[:beg] + s[beg + 1 : end - 1] + s[end:]
- pos = m.end() - 2
-
- if pos >= len(s):
- words.append(s)
- break
-
- return words
-
-
-# split_quoted ()
-
-
-def execute(func, args, msg=None, verbose=0, dry_run=0):
- """Perform some action that affects the outside world (eg. by
- writing to the filesystem). Such actions are special because they
- are disabled by the 'dry_run' flag. This method takes care of all
- that bureaucracy for you; all you have to do is supply the
- function to call and an argument tuple for it (to embody the
- "external action" being performed), and an optional message to
- print.
- """
- if msg is None:
- msg = "{}{!r}".format(func.__name__, args)
- if msg[-2:] == ',)': # correct for singleton tuple
- msg = msg[0:-2] + ')'
-
- log.info(msg)
- if not dry_run:
- func(*args)
-
-
-def strtobool(val):
- """Convert a string representation of truth to true (1) or false (0).
-
- True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
- are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
- 'val' is anything else.
- """
- val = val.lower()
- if val in ('y', 'yes', 't', 'true', 'on', '1'):
- return 1
- elif val in ('n', 'no', 'f', 'false', 'off', '0'):
- return 0
- else:
- raise ValueError("invalid truth value {!r}".format(val))
-
-
-def byte_compile( # noqa: C901
- py_files,
- optimize=0,
- force=0,
- prefix=None,
- base_dir=None,
- verbose=1,
- dry_run=0,
- direct=None,
-):
- """Byte-compile a collection of Python source files to .pyc
- files in a __pycache__ subdirectory. 'py_files' is a list
- of files to compile; any files that don't end in ".py" are silently
- skipped. 'optimize' must be one of the following:
- 0 - don't optimize
- 1 - normal optimization (like "python -O")
- 2 - extra optimization (like "python -OO")
- If 'force' is true, all files are recompiled regardless of
- timestamps.
-
- The source filename encoded in each bytecode file defaults to the
- filenames listed in 'py_files'; you can modify these with 'prefix' and
- 'basedir'. 'prefix' is a string that will be stripped off of each
- source filename, and 'base_dir' is a directory name that will be
- prepended (after 'prefix' is stripped). You can supply either or both
- (or neither) of 'prefix' and 'base_dir', as you wish.
-
- If 'dry_run' is true, doesn't actually do anything that would
- affect the filesystem.
-
- Byte-compilation is either done directly in this interpreter process
- with the standard py_compile module, or indirectly by writing a
- temporary script and executing it. Normally, you should let
- 'byte_compile()' figure out to use direct compilation or not (see
- the source for details). The 'direct' flag is used by the script
- generated in indirect mode; unless you know what you're doing, leave
- it set to None.
- """
-
- # nothing is done if sys.dont_write_bytecode is True
- if sys.dont_write_bytecode:
- raise DistutilsByteCompileError('byte-compiling is disabled.')
-
- # First, if the caller didn't force us into direct or indirect mode,
- # figure out which mode we should be in. We take a conservative
- # approach: choose direct mode *only* if the current interpreter is
- # in debug mode and optimize is 0. If we're not in debug mode (-O
- # or -OO), we don't know which level of optimization this
- # interpreter is running with, so we can't do direct
- # byte-compilation and be certain that it's the right thing. Thus,
- # always compile indirectly if the current interpreter is in either
- # optimize mode, or if either optimization level was requested by
- # the caller.
- if direct is None:
- direct = __debug__ and optimize == 0
-
- # "Indirect" byte-compilation: write a temporary script and then
- # run it with the appropriate flags.
- if not direct:
- try:
- from tempfile import mkstemp
-
- (script_fd, script_name) = mkstemp(".py")
- except ImportError:
- from tempfile import mktemp
-
- (script_fd, script_name) = None, mktemp(".py")
- log.info("writing byte-compilation script '%s'", script_name)
- if not dry_run:
- if script_fd is not None:
- script = os.fdopen(script_fd, "w")
- else:
- script = open(script_name, "w")
-
- with script:
- script.write(
- """\
-from distutils.util import byte_compile
-files = [
-"""
- )
-
- # XXX would be nice to write absolute filenames, just for
- # safety's sake (script should be more robust in the face of
- # chdir'ing before running it). But this requires abspath'ing
- # 'prefix' as well, and that breaks the hack in build_lib's
- # 'byte_compile()' method that carefully tacks on a trailing
- # slash (os.sep really) to make sure the prefix here is "just
- # right". This whole prefix business is rather delicate -- the
- # problem is that it's really a directory, but I'm treating it
- # as a dumb string, so trailing slashes and so forth matter.
-
- script.write(",\n".join(map(repr, py_files)) + "]\n")
- script.write(
- """
-byte_compile(files, optimize=%r, force=%r,
- prefix=%r, base_dir=%r,
- verbose=%r, dry_run=0,
- direct=1)
-"""
- % (optimize, force, prefix, base_dir, verbose)
- )
-
- cmd = [sys.executable]
- cmd.extend(subprocess._optim_args_from_interpreter_flags())
- cmd.append(script_name)
- spawn(cmd, dry_run=dry_run)
- execute(os.remove, (script_name,), "removing %s" % script_name, dry_run=dry_run)
-
- # "Direct" byte-compilation: use the py_compile module to compile
- # right here, right now. Note that the script generated in indirect
- # mode simply calls 'byte_compile()' in direct mode, a weird sort of
- # cross-process recursion. Hey, it works!
- else:
- from py_compile import compile
-
- for file in py_files:
- if file[-3:] != ".py":
- # This lets us be lazy and not filter filenames in
- # the "install_lib" command.
- continue
-
- # Terminology from the py_compile module:
- # cfile - byte-compiled file
- # dfile - purported source filename (same as 'file' by default)
- if optimize >= 0:
- opt = '' if optimize == 0 else optimize
- cfile = importlib.util.cache_from_source(file, optimization=opt)
- else:
- cfile = importlib.util.cache_from_source(file)
- dfile = file
- if prefix:
- if file[: len(prefix)] != prefix:
- raise ValueError(
- "invalid prefix: filename %r doesn't start with %r"
- % (file, prefix)
- )
- dfile = dfile[len(prefix) :]
- if base_dir:
- dfile = os.path.join(base_dir, dfile)
-
- cfile_base = os.path.basename(cfile)
- if direct:
- if force or newer(file, cfile):
- log.info("byte-compiling %s to %s", file, cfile_base)
- if not dry_run:
- compile(file, cfile, dfile)
- else:
- log.debug("skipping byte-compilation of %s to %s", file, cfile_base)
-
-
-def rfc822_escape(header):
- """Return a version of the string escaped for inclusion in an
- RFC-822 header, by ensuring there are 8 spaces space after each newline.
- """
- lines = header.split('\n')
- sep = '\n' + 8 * ' '
- return sep.join(lines)
diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/model_zoo/__init__.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/model_zoo/__init__.py
deleted file mode 100644
index 6204208198d813728cf6419e8eef4a733f20c18f..0000000000000000000000000000000000000000
--- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/model_zoo/__init__.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-"""
-Model Zoo API for Detectron2: a collection of functions to create common model architectures
-listed in `MODEL_ZOO.md `_,
-and optionally load their pre-trained weights.
-"""
-
-from .model_zoo import get, get_config_file, get_checkpoint_url, get_config
-
-__all__ = ["get_checkpoint_url", "get", "get_config_file", "get_config"]
diff --git a/spaces/BHO/URDtest/README.md b/spaces/BHO/URDtest/README.md
deleted file mode 100644
index b03584d6845253ae2e8b8717d532b731c2185871..0000000000000000000000000000000000000000
--- a/spaces/BHO/URDtest/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: URDtest
-emoji: 😻
-colorFrom: blue
-colorTo: green
-sdk: gradio
-sdk_version: 3.24.1
-app_file: app.py
-pinned: false
-license: openrail
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Bambicita/rvc-models/infer_pack/models.py b/spaces/Bambicita/rvc-models/infer_pack/models.py
deleted file mode 100644
index 96165f73644e6fb92d0ffedb4a3c9e1a457cb989..0000000000000000000000000000000000000000
--- a/spaces/Bambicita/rvc-models/infer_pack/models.py
+++ /dev/null
@@ -1,982 +0,0 @@
-import math, pdb, os
-from time import time as ttime
-import torch
-from torch import nn
-from torch.nn import functional as F
-from infer_pack import modules
-from infer_pack import attentions
-from infer_pack import commons
-from infer_pack.commons import init_weights, get_padding
-from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
-from infer_pack.commons import init_weights
-import numpy as np
-from infer_pack import commons
-
-
-class TextEncoder256(nn.Module):
- def __init__(
- self,
- out_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- f0=True,
- ):
- super().__init__()
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.emb_phone = nn.Linear(256, hidden_channels)
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
- if f0 == True:
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
- self.encoder = attentions.Encoder(
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, phone, pitch, lengths):
- if pitch == None:
- x = self.emb_phone(phone)
- else:
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
- x = self.lrelu(x)
- x = torch.transpose(x, 1, -1) # [b, h, t]
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.encoder(x * x_mask, x_mask)
- stats = self.proj(x) * x_mask
-
- m, logs = torch.split(stats, self.out_channels, dim=1)
- return m, logs, x_mask
-
-
-class TextEncoder256Sim(nn.Module):
- def __init__(
- self,
- out_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- f0=True,
- ):
- super().__init__()
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.emb_phone = nn.Linear(256, hidden_channels)
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
- if f0 == True:
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
- self.encoder = attentions.Encoder(
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
-
- def forward(self, phone, pitch, lengths):
- if pitch == None:
- x = self.emb_phone(phone)
- else:
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
- x = self.lrelu(x)
- x = torch.transpose(x, 1, -1) # [b, h, t]
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.encoder(x * x_mask, x_mask)
- x = self.proj(x) * x_mask
- return x, x_mask
-
-
-class ResidualCouplingBlock(nn.Module):
- def __init__(
- self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- n_flows=4,
- gin_channels=0,
- ):
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.flows = nn.ModuleList()
- for i in range(n_flows):
- self.flows.append(
- modules.ResidualCouplingLayer(
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=gin_channels,
- mean_only=True,
- )
- )
- self.flows.append(modules.Flip())
-
- def forward(self, x, x_mask, g=None, reverse=False):
- if not reverse:
- for flow in self.flows:
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
- else:
- for flow in reversed(self.flows):
- x = flow(x, x_mask, g=g, reverse=reverse)
- return x
-
- def remove_weight_norm(self):
- for i in range(self.n_flows):
- self.flows[i * 2].remove_weight_norm()
-
-
-class PosteriorEncoder(nn.Module):
- def __init__(
- self,
- in_channels,
- out_channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=0,
- ):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
-
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
- self.enc = modules.WN(
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=gin_channels,
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths, g=None):
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.pre(x) * x_mask
- x = self.enc(x, x_mask, g=g)
- stats = self.proj(x) * x_mask
- m, logs = torch.split(stats, self.out_channels, dim=1)
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
- return z, m, logs, x_mask
-
- def remove_weight_norm(self):
- self.enc.remove_weight_norm()
-
-
-class Generator(torch.nn.Module):
- def __init__(
- self,
- initial_channel,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=0,
- ):
- super(Generator, self).__init__()
- self.num_kernels = len(resblock_kernel_sizes)
- self.num_upsamples = len(upsample_rates)
- self.conv_pre = Conv1d(
- initial_channel, upsample_initial_channel, 7, 1, padding=3
- )
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
-
- self.ups = nn.ModuleList()
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
- self.ups.append(
- weight_norm(
- ConvTranspose1d(
- upsample_initial_channel // (2**i),
- upsample_initial_channel // (2 ** (i + 1)),
- k,
- u,
- padding=(k - u) // 2,
- )
- )
- )
-
- self.resblocks = nn.ModuleList()
- for i in range(len(self.ups)):
- ch = upsample_initial_channel // (2 ** (i + 1))
- for j, (k, d) in enumerate(
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
- ):
- self.resblocks.append(resblock(ch, k, d))
-
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
- self.ups.apply(init_weights)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
- def forward(self, x, g=None):
- x = self.conv_pre(x)
- if g is not None:
- x = x + self.cond(g)
-
- for i in range(self.num_upsamples):
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- x = self.ups[i](x)
- xs = None
- for j in range(self.num_kernels):
- if xs is None:
- xs = self.resblocks[i * self.num_kernels + j](x)
- else:
- xs += self.resblocks[i * self.num_kernels + j](x)
- x = xs / self.num_kernels
- x = F.leaky_relu(x)
- x = self.conv_post(x)
- x = torch.tanh(x)
-
- return x
-
- def remove_weight_norm(self):
- for l in self.ups:
- remove_weight_norm(l)
- for l in self.resblocks:
- l.remove_weight_norm()
-
-
-class SineGen(torch.nn.Module):
- """Definition of sine generator
- SineGen(samp_rate, harmonic_num = 0,
- sine_amp = 0.1, noise_std = 0.003,
- voiced_threshold = 0,
- flag_for_pulse=False)
- samp_rate: sampling rate in Hz
- harmonic_num: number of harmonic overtones (default 0)
- sine_amp: amplitude of sine-wavefrom (default 0.1)
- noise_std: std of Gaussian noise (default 0.003)
- voiced_thoreshold: F0 threshold for U/V classification (default 0)
- flag_for_pulse: this SinGen is used inside PulseGen (default False)
- Note: when flag_for_pulse is True, the first time step of a voiced
- segment is always sin(np.pi) or cos(0)
- """
-
- def __init__(
- self,
- samp_rate,
- harmonic_num=0,
- sine_amp=0.1,
- noise_std=0.003,
- voiced_threshold=0,
- flag_for_pulse=False,
- ):
- super(SineGen, self).__init__()
- self.sine_amp = sine_amp
- self.noise_std = noise_std
- self.harmonic_num = harmonic_num
- self.dim = self.harmonic_num + 1
- self.sampling_rate = samp_rate
- self.voiced_threshold = voiced_threshold
-
- def _f02uv(self, f0):
- # generate uv signal
- uv = torch.ones_like(f0)
- uv = uv * (f0 > self.voiced_threshold)
- return uv
-
- def forward(self, f0, upp):
- """sine_tensor, uv = forward(f0)
- input F0: tensor(batchsize=1, length, dim=1)
- f0 for unvoiced steps should be 0
- output sine_tensor: tensor(batchsize=1, length, dim)
- output uv: tensor(batchsize=1, length, 1)
- """
- with torch.no_grad():
- f0 = f0[:, None].transpose(1, 2)
- f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
- # fundamental component
- f0_buf[:, :, 0] = f0[:, :, 0]
- for idx in np.arange(self.harmonic_num):
- f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
- idx + 2
- ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
- rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
- rand_ini = torch.rand(
- f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
- )
- rand_ini[:, 0] = 0
- rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
- tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
- tmp_over_one *= upp
- tmp_over_one = F.interpolate(
- tmp_over_one.transpose(2, 1),
- scale_factor=upp,
- mode="linear",
- align_corners=True,
- ).transpose(2, 1)
- rad_values = F.interpolate(
- rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
- ).transpose(
- 2, 1
- ) #######
- tmp_over_one %= 1
- tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
- cumsum_shift = torch.zeros_like(rad_values)
- cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
- sine_waves = torch.sin(
- torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
- )
- sine_waves = sine_waves * self.sine_amp
- uv = self._f02uv(f0)
- uv = F.interpolate(
- uv.transpose(2, 1), scale_factor=upp, mode="nearest"
- ).transpose(2, 1)
- noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
- noise = noise_amp * torch.randn_like(sine_waves)
- sine_waves = sine_waves * uv + noise
- return sine_waves, uv, noise
-
-
-class SourceModuleHnNSF(torch.nn.Module):
- """SourceModule for hn-nsf
- SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
- add_noise_std=0.003, voiced_threshod=0)
- sampling_rate: sampling_rate in Hz
- harmonic_num: number of harmonic above F0 (default: 0)
- sine_amp: amplitude of sine source signal (default: 0.1)
- add_noise_std: std of additive Gaussian noise (default: 0.003)
- note that amplitude of noise in unvoiced is decided
- by sine_amp
- voiced_threshold: threhold to set U/V given F0 (default: 0)
- Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
- F0_sampled (batchsize, length, 1)
- Sine_source (batchsize, length, 1)
- noise_source (batchsize, length 1)
- uv (batchsize, length, 1)
- """
-
- def __init__(
- self,
- sampling_rate,
- harmonic_num=0,
- sine_amp=0.1,
- add_noise_std=0.003,
- voiced_threshod=0,
- is_half=True,
- ):
- super(SourceModuleHnNSF, self).__init__()
-
- self.sine_amp = sine_amp
- self.noise_std = add_noise_std
- self.is_half = is_half
- # to produce sine waveforms
- self.l_sin_gen = SineGen(
- sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
- )
-
- # to merge source harmonics into a single excitation
- self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
- self.l_tanh = torch.nn.Tanh()
-
- def forward(self, x, upp=None):
- sine_wavs, uv, _ = self.l_sin_gen(x, upp)
- if self.is_half:
- sine_wavs = sine_wavs.half()
- sine_merge = self.l_tanh(self.l_linear(sine_wavs))
- return sine_merge, None, None # noise, uv
-
-
-class GeneratorNSF(torch.nn.Module):
- def __init__(
- self,
- initial_channel,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels,
- sr,
- is_half=False,
- ):
- super(GeneratorNSF, self).__init__()
- self.num_kernels = len(resblock_kernel_sizes)
- self.num_upsamples = len(upsample_rates)
-
- self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
- self.m_source = SourceModuleHnNSF(
- sampling_rate=sr, harmonic_num=0, is_half=is_half
- )
- self.noise_convs = nn.ModuleList()
- self.conv_pre = Conv1d(
- initial_channel, upsample_initial_channel, 7, 1, padding=3
- )
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
-
- self.ups = nn.ModuleList()
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
- c_cur = upsample_initial_channel // (2 ** (i + 1))
- self.ups.append(
- weight_norm(
- ConvTranspose1d(
- upsample_initial_channel // (2**i),
- upsample_initial_channel // (2 ** (i + 1)),
- k,
- u,
- padding=(k - u) // 2,
- )
- )
- )
- if i + 1 < len(upsample_rates):
- stride_f0 = np.prod(upsample_rates[i + 1 :])
- self.noise_convs.append(
- Conv1d(
- 1,
- c_cur,
- kernel_size=stride_f0 * 2,
- stride=stride_f0,
- padding=stride_f0 // 2,
- )
- )
- else:
- self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
-
- self.resblocks = nn.ModuleList()
- for i in range(len(self.ups)):
- ch = upsample_initial_channel // (2 ** (i + 1))
- for j, (k, d) in enumerate(
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
- ):
- self.resblocks.append(resblock(ch, k, d))
-
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
- self.ups.apply(init_weights)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
- self.upp = np.prod(upsample_rates)
-
- def forward(self, x, f0, g=None):
- har_source, noi_source, uv = self.m_source(f0, self.upp)
- har_source = har_source.transpose(1, 2)
- x = self.conv_pre(x)
- if g is not None:
- x = x + self.cond(g)
-
- for i in range(self.num_upsamples):
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- x = self.ups[i](x)
- x_source = self.noise_convs[i](har_source)
- x = x + x_source
- xs = None
- for j in range(self.num_kernels):
- if xs is None:
- xs = self.resblocks[i * self.num_kernels + j](x)
- else:
- xs += self.resblocks[i * self.num_kernels + j](x)
- x = xs / self.num_kernels
- x = F.leaky_relu(x)
- x = self.conv_post(x)
- x = torch.tanh(x)
- return x
-
- def remove_weight_norm(self):
- for l in self.ups:
- remove_weight_norm(l)
- for l in self.resblocks:
- l.remove_weight_norm()
-
-
-sr2sr = {
- "32k": 32000,
- "40k": 40000,
- "48k": 48000,
-}
-
-
-class SynthesizerTrnMs256NSFsid(nn.Module):
- def __init__(
- self,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- spk_embed_dim,
- gin_channels,
- sr,
- **kwargs
- ):
- super().__init__()
- if type(sr) == type("strr"):
- sr = sr2sr[sr]
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.gin_channels = gin_channels
- # self.hop_length = hop_length#
- self.spk_embed_dim = spk_embed_dim
- self.enc_p = TextEncoder256(
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- )
- self.dec = GeneratorNSF(
- inter_channels,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=gin_channels,
- sr=sr,
- is_half=kwargs["is_half"],
- )
- self.enc_q = PosteriorEncoder(
- spec_channels,
- inter_channels,
- hidden_channels,
- 5,
- 1,
- 16,
- gin_channels=gin_channels,
- )
- self.flow = ResidualCouplingBlock(
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
- )
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
-
- def remove_weight_norm(self):
- self.dec.remove_weight_norm()
- self.flow.remove_weight_norm()
- self.enc_q.remove_weight_norm()
-
- def forward(
- self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
- ): # 这里ds是id,[bs,1]
- # print(1,pitch.shape)#[bs,t]
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
- z_p = self.flow(z, y_mask, g=g)
- z_slice, ids_slice = commons.rand_slice_segments(
- z, y_lengths, self.segment_size
- )
- # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
- pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
- # print(-2,pitchf.shape,z_slice.shape)
- o = self.dec(z_slice, pitchf, g=g)
- return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
-
- def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):
- g = self.emb_g(sid).unsqueeze(-1)
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
- z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
- z = self.flow(z_p, x_mask, g=g, reverse=True)
- o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
- return o, x_mask, (z, z_p, m_p, logs_p)
-
-
-class SynthesizerTrnMs256NSFsid_nono(nn.Module):
- def __init__(
- self,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- spk_embed_dim,
- gin_channels,
- sr=None,
- **kwargs
- ):
- super().__init__()
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.gin_channels = gin_channels
- # self.hop_length = hop_length#
- self.spk_embed_dim = spk_embed_dim
- self.enc_p = TextEncoder256(
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- f0=False,
- )
- self.dec = Generator(
- inter_channels,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=gin_channels,
- )
- self.enc_q = PosteriorEncoder(
- spec_channels,
- inter_channels,
- hidden_channels,
- 5,
- 1,
- 16,
- gin_channels=gin_channels,
- )
- self.flow = ResidualCouplingBlock(
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
- )
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
-
- def remove_weight_norm(self):
- self.dec.remove_weight_norm()
- self.flow.remove_weight_norm()
- self.enc_q.remove_weight_norm()
-
- def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
- m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
- z_p = self.flow(z, y_mask, g=g)
- z_slice, ids_slice = commons.rand_slice_segments(
- z, y_lengths, self.segment_size
- )
- o = self.dec(z_slice, g=g)
- return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
-
- def infer(self, phone, phone_lengths, sid, max_len=None):
- g = self.emb_g(sid).unsqueeze(-1)
- m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
- z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
- z = self.flow(z_p, x_mask, g=g, reverse=True)
- o = self.dec((z * x_mask)[:, :, :max_len], g=g)
- return o, x_mask, (z, z_p, m_p, logs_p)
-
-
-class SynthesizerTrnMs256NSFsid_sim(nn.Module):
- """
- Synthesizer for Training
- """
-
- def __init__(
- self,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- spk_embed_dim,
- # hop_length,
- gin_channels=0,
- use_sdp=True,
- **kwargs
- ):
- super().__init__()
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.gin_channels = gin_channels
- # self.hop_length = hop_length#
- self.spk_embed_dim = spk_embed_dim
- self.enc_p = TextEncoder256Sim(
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- )
- self.dec = GeneratorNSF(
- inter_channels,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=gin_channels,
- is_half=kwargs["is_half"],
- )
-
- self.flow = ResidualCouplingBlock(
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
- )
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
-
- def remove_weight_norm(self):
- self.dec.remove_weight_norm()
- self.flow.remove_weight_norm()
- self.enc_q.remove_weight_norm()
-
- def forward(
- self, phone, phone_lengths, pitch, pitchf, y_lengths, ds
- ): # y是spec不需要了现在
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
- x, x_mask = self.enc_p(phone, pitch, phone_lengths)
- x = self.flow(x, x_mask, g=g, reverse=True)
- z_slice, ids_slice = commons.rand_slice_segments(
- x, y_lengths, self.segment_size
- )
-
- pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
- o = self.dec(z_slice, pitchf, g=g)
- return o, ids_slice
-
- def infer(
- self, phone, phone_lengths, pitch, pitchf, ds, max_len=None
- ): # y是spec不需要了现在
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
- x, x_mask = self.enc_p(phone, pitch, phone_lengths)
- x = self.flow(x, x_mask, g=g, reverse=True)
- o = self.dec((x * x_mask)[:, :, :max_len], pitchf, g=g)
- return o, o
-
-
-class MultiPeriodDiscriminator(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(MultiPeriodDiscriminator, self).__init__()
- periods = [2, 3, 5, 7, 11, 17]
- # periods = [3, 5, 7, 11, 17, 23, 37]
-
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
- discs = discs + [
- DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
- ]
- self.discriminators = nn.ModuleList(discs)
-
- def forward(self, y, y_hat):
- y_d_rs = [] #
- y_d_gs = []
- fmap_rs = []
- fmap_gs = []
- for i, d in enumerate(self.discriminators):
- y_d_r, fmap_r = d(y)
- y_d_g, fmap_g = d(y_hat)
- # for j in range(len(fmap_r)):
- # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
- y_d_rs.append(y_d_r)
- y_d_gs.append(y_d_g)
- fmap_rs.append(fmap_r)
- fmap_gs.append(fmap_g)
-
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-
-class DiscriminatorS(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(DiscriminatorS, self).__init__()
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList(
- [
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
- ]
- )
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
-
- def forward(self, x):
- fmap = []
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class DiscriminatorP(torch.nn.Module):
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
- super(DiscriminatorP, self).__init__()
- self.period = period
- self.use_spectral_norm = use_spectral_norm
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList(
- [
- norm_f(
- Conv2d(
- 1,
- 32,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 32,
- 128,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 128,
- 512,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 512,
- 1024,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 1024,
- 1024,
- (kernel_size, 1),
- 1,
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- ]
- )
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
-
- def forward(self, x):
- fmap = []
-
- # 1d to 2d
- b, c, t = x.shape
- if t % self.period != 0: # pad first
- n_pad = self.period - (t % self.period)
- x = F.pad(x, (0, n_pad), "reflect")
- t = t + n_pad
- x = x.view(b, c, t // self.period, self.period)
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
diff --git a/spaces/BartPoint/VoiceChange_Beta/infer_pack/onnx_inference.py b/spaces/BartPoint/VoiceChange_Beta/infer_pack/onnx_inference.py
deleted file mode 100644
index 322572820dfc75d789e40ce5bbd9415066a03979..0000000000000000000000000000000000000000
--- a/spaces/BartPoint/VoiceChange_Beta/infer_pack/onnx_inference.py
+++ /dev/null
@@ -1,139 +0,0 @@
-import onnxruntime
-import librosa
-import numpy as np
-import soundfile
-
-
-class ContentVec:
- def __init__(self, vec_path="pretrained/vec-768-layer-12.onnx", device=None):
- print("load model(s) from {}".format(vec_path))
- if device == "cpu" or device is None:
- providers = ["CPUExecutionProvider"]
- elif device == "cuda":
- providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
- else:
- raise RuntimeError("Unsportted Device")
- self.model = onnxruntime.InferenceSession(vec_path, providers=providers)
-
- def __call__(self, wav):
- return self.forward(wav)
-
- def forward(self, wav):
- feats = wav
- if feats.ndim == 2: # double channels
- feats = feats.mean(-1)
- assert feats.ndim == 1, feats.ndim
- feats = np.expand_dims(np.expand_dims(feats, 0), 0)
- onnx_input = {self.model.get_inputs()[0].name: feats}
- logits = self.model.run(None, onnx_input)[0]
- return logits.transpose(0, 2, 1)
-
-
-def get_f0_predictor(f0_predictor, hop_length, sampling_rate, **kargs):
- if f0_predictor == "pm":
- from infer_pack.modules.F0Predictor.PMF0Predictor import PMF0Predictor
-
- f0_predictor_object = PMF0Predictor(
- hop_length=hop_length, sampling_rate=sampling_rate
- )
- elif f0_predictor == "harvest":
- from infer_pack.modules.F0Predictor.HarvestF0Predictor import HarvestF0Predictor
-
- f0_predictor_object = HarvestF0Predictor(
- hop_length=hop_length, sampling_rate=sampling_rate
- )
- elif f0_predictor == "dio":
- from infer_pack.modules.F0Predictor.DioF0Predictor import DioF0Predictor
-
- f0_predictor_object = DioF0Predictor(
- hop_length=hop_length, sampling_rate=sampling_rate
- )
- else:
- raise Exception("Unknown f0 predictor")
- return f0_predictor_object
-
-
-class OnnxRVC:
- def __init__(
- self,
- model_path,
- sr=40000,
- hop_size=512,
- vec_path="vec-768-layer-12",
- device="cpu",
- ):
- vec_path = f"pretrained/{vec_path}.onnx"
- self.vec_model = ContentVec(vec_path, device)
- if device == "cpu" or device is None:
- providers = ["CPUExecutionProvider"]
- elif device == "cuda":
- providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
- else:
- raise RuntimeError("Unsportted Device")
- self.model = onnxruntime.InferenceSession(model_path, providers=providers)
- self.sampling_rate = sr
- self.hop_size = hop_size
-
- def forward(self, hubert, hubert_length, pitch, pitchf, ds, rnd):
- onnx_input = {
- self.model.get_inputs()[0].name: hubert,
- self.model.get_inputs()[1].name: hubert_length,
- self.model.get_inputs()[2].name: pitch,
- self.model.get_inputs()[3].name: pitchf,
- self.model.get_inputs()[4].name: ds,
- self.model.get_inputs()[5].name: rnd,
- }
- return (self.model.run(None, onnx_input)[0] * 32767).astype(np.int16)
-
- def inference(
- self,
- raw_path,
- sid,
- f0_method="dio",
- f0_up_key=0,
- pad_time=0.5,
- cr_threshold=0.02,
- ):
- f0_min = 50
- f0_max = 1100
- f0_mel_min = 1127 * np.log(1 + f0_min / 700)
- f0_mel_max = 1127 * np.log(1 + f0_max / 700)
- f0_predictor = get_f0_predictor(
- f0_method,
- hop_length=self.hop_size,
- sampling_rate=self.sampling_rate,
- threshold=cr_threshold,
- )
- wav, sr = librosa.load(raw_path, sr=self.sampling_rate)
- org_length = len(wav)
- if org_length / sr > 50.0:
- raise RuntimeError("Reached Max Length")
-
- wav16k = librosa.resample(wav, orig_sr=self.sampling_rate, target_sr=16000)
- wav16k = wav16k
-
- hubert = self.vec_model(wav16k)
- hubert = np.repeat(hubert, 2, axis=2).transpose(0, 2, 1).astype(np.float32)
- hubert_length = hubert.shape[1]
-
- pitchf = f0_predictor.compute_f0(wav, hubert_length)
- pitchf = pitchf * 2 ** (f0_up_key / 12)
- pitch = pitchf.copy()
- f0_mel = 1127 * np.log(1 + pitch / 700)
- f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
- f0_mel_max - f0_mel_min
- ) + 1
- f0_mel[f0_mel <= 1] = 1
- f0_mel[f0_mel > 255] = 255
- pitch = np.rint(f0_mel).astype(np.int64)
-
- pitchf = pitchf.reshape(1, len(pitchf)).astype(np.float32)
- pitch = pitch.reshape(1, len(pitch))
- ds = np.array([sid]).astype(np.int64)
-
- rnd = np.random.randn(1, 192, hubert_length).astype(np.float32)
- hubert_length = np.array([hubert_length]).astype(np.int64)
-
- out_wav = self.forward(hubert, hubert_length, pitch, pitchf, ds, rnd).squeeze()
- out_wav = np.pad(out_wav, (0, 2 * self.hop_size), "constant")
- return out_wav[0:org_length]
diff --git a/spaces/Basil2k4/VPSnguyenmanh/Dockerfile b/spaces/Basil2k4/VPSnguyenmanh/Dockerfile
deleted file mode 100644
index 5444aa9a5f565ffdab13c03e3e6a30bbabf6c5c3..0000000000000000000000000000000000000000
--- a/spaces/Basil2k4/VPSnguyenmanh/Dockerfile
+++ /dev/null
@@ -1,105 +0,0 @@
-# syntax=docker/dockerfile:experimental
-
-# ./hooks/build latest
-# ./hooks/test latest
-
-### Example: Build and test 'dev' tag locally like
-### ./hooks/build dev
-### ./hooks/test dev
-### or with additional arguments
-### ./hooks/build dev --no-cache
-### ./hooks/test dev
-### or using the utility
-### ./utils/util-hdx.sh Dockerfile 3
-### ./utils/util-hdx.sh Dockerfile 4
-### The last output line should be '+ exit 0'
-### If '+ exit 1' then adjust the version sticker
-### variables in script './hooks/env'
-
-ARG BASETAG=latest
-
-FROM accetto/ubuntu-vnc-xfce:${BASETAG} as stage-install
-
-### Be sure to use root user
-USER 0
-
-### 'apt-get clean' runs automatically
-RUN apt-get update \
- && DEBIAN_FRONTEND=noninteractive apt-get install -y \
- chromium-browser \
- neofetch \
- python3-pip \
- firefox \
- sudo \
- git \
- curl \
- snapd \
- && curl -sL https://deb.nodesource.com/setup_14.x | sudo -E bash -\
- && apt install nodejs\
- && apt-get -y autoremove \
- && rm -rf /var/lib/apt/lists/*
-
-### Chromium browser requires some presets
-### Note that 'no-sandbox' flag is required, but intended for development only
-RUN echo "CHROMIUM_FLAGS='--no-sandbox --disable-gpu --user-data-dir --window-size=${VNC_RESOLUTION%x*},${VNC_RESOLUTION#*x} --window-position=0,0'" > ${HOME}/.chromium-browser.init
-
-FROM stage-install as stage-config
-
-### Arguments can be provided during build
-ARG ARG_VNC_USER
-
-ENV VNC_USER=${ARG_VNC_USER:-headless:headless}
-
-WORKDIR ${HOME}
-SHELL ["/bin/bash", "-c"]
-
-COPY [ "./src/create_user_and_fix_permissions.sh", "./" ]
-
-### 'sync' mitigates automated build failures
-RUN chmod +x \
- ./create_user_and_fix_permissions.sh \
- && sync \
- && ./create_user_and_fix_permissions.sh $STARTUPDIR $HOME \
- && rm ./create_user_and_fix_permissions.sh
-
-FROM stage-config as stage-final
-
-### Arguments can be provided during build
-ARG ARG_REFRESHED_AT
-ARG ARG_VCS_REF
-ARG ARG_VERSION_STICKER
-ARG ARG_VNC_BLACKLIST_THRESHOLD
-ARG ARG_VNC_BLACKLIST_TIMEOUT
-ARG ARG_VNC_RESOLUTION
-
-LABEL \
- any.accetto.description="Headless Ubuntu VNC/noVNC container with Xfce desktop and Chromium Browser" \
- any.accetto.display-name="Headless Ubuntu/Xfce VNC/noVNC container with Firefox and Chromium" \
- any.accetto.tags="ubuntu, xfce, vnc, novnc, chromium" \
- version-sticker="${ARG_VERSION_STICKER}" \
- org.label-schema.vcs-ref="${ARG_VCS_REF}" \
- org.label-schema.vcs-url="https://github.com/accetto/ubuntu-vnc-xfce-chromium"
-
-ENV \
- REFRESHED_AT=${ARG_REFRESHED_AT} \
- VERSION_STICKER=${ARG_VERSION_STICKER} \
- VNC_BLACKLIST_THRESHOLD=${ARG_VNC_BLACKLIST_THRESHOLD:-20} \
- VNC_BLACKLIST_TIMEOUT=${ARG_VNC_BLACKLIST_TIMEOUT:-0} \
- VNC_RESOLUTION=${ARG_VNC_RESOLUTION:-1360x768}
-
-### Preconfigure Xfce
-COPY [ "./src/home/Desktop", "./Desktop/" ]
-COPY [ "./src/home/config/xfce4/panel", "./.config/xfce4/panel/" ]
-COPY [ "./src/home/config/xfce4/xfconf/xfce-perchannel-xml", "./.config/xfce4/xfconf/xfce-perchannel-xml/" ]
-COPY [ "./src/startup/version_sticker.sh", "${STARTUPDIR}/" ]
-
-### Fix permissions
-RUN \
- chmod a+wx "${STARTUPDIR}"/version_sticker.sh \
- && "${STARTUPDIR}"/set_user_permissions.sh "${STARTUPDIR}" "${HOME}"
-
-### Switch to non-root user
-USER 0
-
-### Issue #7 (base): Mitigating problems with foreground mode
-WORKDIR ${STARTUPDIR}
\ No newline at end of file
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/docs/subresource.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/docs/subresource.py
deleted file mode 100644
index 792abf9d4c87ecd97828bb7e370008716f211118..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/docs/subresource.py
+++ /dev/null
@@ -1,153 +0,0 @@
-# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"). You
-# may not use this file except in compliance with the License. A copy of
-# the License is located at
-#
-# https://aws.amazon.com/apache2.0/
-#
-# or in the "license" file accompanying this file. This file is
-# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
-# ANY KIND, either express or implied. See the License for the specific
-# language governing permissions and limitations under the License.
-import os
-
-from botocore import xform_name
-from botocore.docs.bcdoc.restdoc import DocumentStructure
-from botocore.utils import get_service_module_name
-
-from boto3.docs.base import NestedDocumenter
-from boto3.docs.utils import (
- add_resource_type_overview,
- get_identifier_args_for_signature,
- get_identifier_description,
- get_identifier_values_for_example,
-)
-
-
-class SubResourceDocumenter(NestedDocumenter):
- def document_sub_resources(self, section):
- add_resource_type_overview(
- section=section,
- resource_type='Sub-resources',
- description=(
- 'Sub-resources are methods that create a new instance of a'
- ' child resource. This resource\'s identifiers get passed'
- ' along to the child.'
- ),
- intro_link='subresources_intro',
- )
- sub_resources = sorted(
- self._resource.meta.resource_model.subresources,
- key=lambda sub_resource: sub_resource.name,
- )
- sub_resources_list = []
- self.member_map['sub-resources'] = sub_resources_list
- for sub_resource in sub_resources:
- sub_resources_list.append(sub_resource.name)
- # Create a new DocumentStructure for each sub_resource and add contents.
- sub_resource_doc = DocumentStructure(
- sub_resource.name, target='html'
- )
- breadcrumb_section = sub_resource_doc.add_new_section('breadcrumb')
- breadcrumb_section.style.ref(self._resource_class_name, 'index')
- breadcrumb_section.write(f' / Sub-Resource / {sub_resource.name}')
- sub_resource_doc.add_title_section(sub_resource.name)
- sub_resource_section = sub_resource_doc.add_new_section(
- sub_resource.name,
- context={'qualifier': f'{self.class_name}.'},
- )
- document_sub_resource(
- section=sub_resource_section,
- resource_name=self._resource_name,
- sub_resource_model=sub_resource,
- service_model=self._service_model,
- )
-
- # Write sub_resources in individual/nested files.
- # Path: /reference/services///.rst
- sub_resources_dir_path = os.path.join(
- self._root_docs_path,
- f'{self._service_name}',
- f'{self._resource_sub_path}',
- )
- sub_resource_doc.write_to_file(
- sub_resources_dir_path, sub_resource.name
- )
-
-
-def document_sub_resource(
- section,
- resource_name,
- sub_resource_model,
- service_model,
- include_signature=True,
-):
- """Documents a resource action
-
- :param section: The section to write to
-
- :param resource_name: The name of the resource
-
- :param sub_resource_model: The model of the subresource
-
- :param service_model: The model of the service
-
- :param include_signature: Whether or not to include the signature.
- It is useful for generating docstrings.
- """
- identifiers_needed = []
- for identifier in sub_resource_model.resource.identifiers:
- if identifier.source == 'input':
- identifiers_needed.append(xform_name(identifier.target))
-
- if include_signature:
- signature_args = get_identifier_args_for_signature(identifiers_needed)
- full_sub_resource_name = (
- f"{section.context.get('qualifier', '')}{sub_resource_model.name}"
- )
- section.style.start_sphinx_py_method(
- full_sub_resource_name, signature_args
- )
-
- method_intro_section = section.add_new_section('method-intro')
- description = f'Creates a {sub_resource_model.resource.type} resource.'
- method_intro_section.include_doc_string(description)
- example_section = section.add_new_section('example')
- example_values = get_identifier_values_for_example(identifiers_needed)
- example_resource_name = xform_name(resource_name)
- if service_model.service_name == resource_name:
- example_resource_name = resource_name
- example = '{} = {}.{}({})'.format(
- xform_name(sub_resource_model.resource.type),
- example_resource_name,
- sub_resource_model.name,
- example_values,
- )
- example_section.style.start_codeblock()
- example_section.write(example)
- example_section.style.end_codeblock()
-
- param_section = section.add_new_section('params')
- for identifier in identifiers_needed:
- description = get_identifier_description(
- sub_resource_model.name, identifier
- )
- param_section.write(f':type {identifier}: string')
- param_section.style.new_line()
- param_section.write(f':param {identifier}: {description}')
- param_section.style.new_line()
-
- return_section = section.add_new_section('return')
- return_section.style.new_line()
- return_section.write(
- ':rtype: :py:class:`{}.{}`'.format(
- get_service_module_name(service_model),
- sub_resource_model.resource.type,
- )
- )
- return_section.style.new_line()
- return_section.write(
- f':returns: A {sub_resource_model.resource.type} resource'
- )
- return_section.style.new_line()
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/commands/completion.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/commands/completion.py
deleted file mode 100644
index deaa30899e64cd9fd4382d1054371679d8e713d5..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/commands/completion.py
+++ /dev/null
@@ -1,126 +0,0 @@
-import sys
-import textwrap
-from optparse import Values
-from typing import List
-
-from pip._internal.cli.base_command import Command
-from pip._internal.cli.status_codes import SUCCESS
-from pip._internal.utils.misc import get_prog
-
-BASE_COMPLETION = """
-# pip {shell} completion start{script}# pip {shell} completion end
-"""
-
-COMPLETION_SCRIPTS = {
- "bash": """
- _pip_completion()
- {{
- COMPREPLY=( $( COMP_WORDS="${{COMP_WORDS[*]}}" \\
- COMP_CWORD=$COMP_CWORD \\
- PIP_AUTO_COMPLETE=1 $1 2>/dev/null ) )
- }}
- complete -o default -F _pip_completion {prog}
- """,
- "zsh": """
- function _pip_completion {{
- local words cword
- read -Ac words
- read -cn cword
- reply=( $( COMP_WORDS="$words[*]" \\
- COMP_CWORD=$(( cword-1 )) \\
- PIP_AUTO_COMPLETE=1 $words[1] 2>/dev/null ))
- }}
- compctl -K _pip_completion {prog}
- """,
- "fish": """
- function __fish_complete_pip
- set -lx COMP_WORDS (commandline -o) ""
- set -lx COMP_CWORD ( \\
- math (contains -i -- (commandline -t) $COMP_WORDS)-1 \\
- )
- set -lx PIP_AUTO_COMPLETE 1
- string split \\ -- (eval $COMP_WORDS[1])
- end
- complete -fa "(__fish_complete_pip)" -c {prog}
- """,
- "powershell": """
- if ((Test-Path Function:\\TabExpansion) -and -not `
- (Test-Path Function:\\_pip_completeBackup)) {{
- Rename-Item Function:\\TabExpansion _pip_completeBackup
- }}
- function TabExpansion($line, $lastWord) {{
- $lastBlock = [regex]::Split($line, '[|;]')[-1].TrimStart()
- if ($lastBlock.StartsWith("{prog} ")) {{
- $Env:COMP_WORDS=$lastBlock
- $Env:COMP_CWORD=$lastBlock.Split().Length - 1
- $Env:PIP_AUTO_COMPLETE=1
- (& {prog}).Split()
- Remove-Item Env:COMP_WORDS
- Remove-Item Env:COMP_CWORD
- Remove-Item Env:PIP_AUTO_COMPLETE
- }}
- elseif (Test-Path Function:\\_pip_completeBackup) {{
- # Fall back on existing tab expansion
- _pip_completeBackup $line $lastWord
- }}
- }}
- """,
-}
-
-
-class CompletionCommand(Command):
- """A helper command to be used for command completion."""
-
- ignore_require_venv = True
-
- def add_options(self) -> None:
- self.cmd_opts.add_option(
- "--bash",
- "-b",
- action="store_const",
- const="bash",
- dest="shell",
- help="Emit completion code for bash",
- )
- self.cmd_opts.add_option(
- "--zsh",
- "-z",
- action="store_const",
- const="zsh",
- dest="shell",
- help="Emit completion code for zsh",
- )
- self.cmd_opts.add_option(
- "--fish",
- "-f",
- action="store_const",
- const="fish",
- dest="shell",
- help="Emit completion code for fish",
- )
- self.cmd_opts.add_option(
- "--powershell",
- "-p",
- action="store_const",
- const="powershell",
- dest="shell",
- help="Emit completion code for powershell",
- )
-
- self.parser.insert_option_group(0, self.cmd_opts)
-
- def run(self, options: Values, args: List[str]) -> int:
- """Prints the completion code of the given shell"""
- shells = COMPLETION_SCRIPTS.keys()
- shell_options = ["--" + shell for shell in sorted(shells)]
- if options.shell in shells:
- script = textwrap.dedent(
- COMPLETION_SCRIPTS.get(options.shell, "").format(prog=get_prog())
- )
- print(BASE_COMPLETION.format(script=script, shell=options.shell))
- return SUCCESS
- else:
- sys.stderr.write(
- "ERROR: You must pass {}\n".format(" or ".join(shell_options))
- )
- return SUCCESS
diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/densepose/utils/dbhelper.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/densepose/utils/dbhelper.py
deleted file mode 100644
index ef7e316cf3eade4941bef6aa7657e0773a6f13cb..0000000000000000000000000000000000000000
--- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/densepose/utils/dbhelper.py
+++ /dev/null
@@ -1,145 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-from typing import Any, Dict, List, Optional, Tuple
-
-
-class EntrySelector(object):
- """
- Base class for entry selectors
- """
-
- @staticmethod
- def from_string(spec: str) -> "EntrySelector":
- if spec == "*":
- return AllEntrySelector()
- return FieldEntrySelector(spec)
-
-
-class AllEntrySelector(EntrySelector):
- """
- Selector that accepts all entries
- """
-
- SPECIFIER = "*"
-
- def __call__(self, entry):
- return True
-
-
-class FieldEntrySelector(EntrySelector):
- """
- Selector that accepts only entries that match provided field
- specifier(s). Only a limited set of specifiers is supported for now:
- ::=[]
- ::=[]
- is a valid identifier
- ::= "int" | "str"
- ::= "="
- ::= ","
- ::= ":"
- ::= |
- ::=
- ::= "-"
- is a string without spaces and special symbols
- (e.g. , , , )
- """
-
- _SPEC_DELIM = ","
- _TYPE_DELIM = ":"
- _RANGE_DELIM = "-"
- _EQUAL = "="
- _ERROR_PREFIX = "Invalid field selector specifier"
-
- class _FieldEntryValuePredicate(object):
- """
- Predicate that checks strict equality for the specified entry field
- """
-
- def __init__(self, name: str, typespec: str, value: str):
- import builtins
-
- self.name = name
- self.type = getattr(builtins, typespec) if typespec is not None else str
- self.value = value
-
- def __call__(self, entry):
- return entry[self.name] == self.type(self.value)
-
- class _FieldEntryRangePredicate(object):
- """
- Predicate that checks whether an entry field falls into the specified range
- """
-
- def __init__(self, name: str, typespec: str, vmin: str, vmax: str):
- import builtins
-
- self.name = name
- self.type = getattr(builtins, typespec) if typespec is not None else str
- self.vmin = vmin
- self.vmax = vmax
-
- def __call__(self, entry):
- return (entry[self.name] >= self.type(self.vmin)) and (
- entry[self.name] <= self.type(self.vmax)
- )
-
- def __init__(self, spec: str):
- self._predicates = self._parse_specifier_into_predicates(spec)
-
- def __call__(self, entry: Dict[str, Any]):
- for predicate in self._predicates:
- if not predicate(entry):
- return False
- return True
-
- def _parse_specifier_into_predicates(self, spec: str) -> List["_FieldEntryPredicate"]:
- predicates = []
- specs = spec.split(self._SPEC_DELIM)
- for subspec in specs:
- eq_idx = subspec.find(self._EQUAL)
- if eq_idx > 0:
- field_name_with_type = subspec[:eq_idx]
- field_name, field_type = self._parse_field_name_type(field_name_with_type)
- field_value_or_range = subspec[eq_idx + 1 :]
- if self._is_range_spec(field_value_or_range):
- vmin, vmax = self._get_range_spec(field_value_or_range)
- predicate = FieldEntrySelector._FieldEntryRangePredicate(
- field_name, field_type, vmin, vmax
- )
- else:
- predicate = FieldEntrySelector._FieldEntryValuePredicate(
- field_name, field_type, field_value_or_range
- )
- predicates.append(predicate)
- elif eq_idx == 0:
- self._parse_error(f'"{subspec}", field name is empty!')
- else:
- self._parse_error(f'"{subspec}", should have format ' "=!")
- return predicates
-
- def _parse_field_name_type(self, field_name_with_type: str) -> Tuple[str, Optional[str]]:
- type_delim_idx = field_name_with_type.find(self._TYPE_DELIM)
- if type_delim_idx > 0:
- field_name = field_name_with_type[:type_delim_idx]
- field_type = field_name_with_type[type_delim_idx + 1 :]
- elif type_delim_idx == 0:
- self._parse_error(f'"{field_name_with_type}", field name is empty!')
- else:
- field_name = field_name_with_type
- field_type = None
- return field_name, field_type
-
- def _is_range_spec(self, field_value_or_range):
- delim_idx = field_value_or_range.find(self._RANGE_DELIM)
- return delim_idx > 0
-
- def _get_range_spec(self, field_value_or_range):
- if self._is_range_spec(field_value_or_range):
- delim_idx = field_value_or_range.find(self._RANGE_DELIM)
- vmin = field_value_or_range[:delim_idx]
- vmax = field_value_or_range[delim_idx + 1 :]
- return vmin, vmax
- else:
- self._parse_error('"field_value_or_range", range of values expected!')
-
- def _parse_error(self, msg):
- raise ValueError(f"{self._ERROR_PREFIX}: {msg}")
diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/PointRend/point_rend/config.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/PointRend/point_rend/config.py
deleted file mode 100644
index 45e74b0ab6f2834bccfdd83692e506632b1bb999..0000000000000000000000000000000000000000
--- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/PointRend/point_rend/config.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-
-from detectron2.config import CfgNode as CN
-
-
-def add_pointrend_config(cfg):
- """
- Add config for PointRend.
- """
- # Names of the input feature maps to be used by a coarse mask head.
- cfg.MODEL.ROI_MASK_HEAD.IN_FEATURES = ("p2",)
- cfg.MODEL.ROI_MASK_HEAD.FC_DIM = 1024
- cfg.MODEL.ROI_MASK_HEAD.NUM_FC = 2
- # The side size of a coarse mask head prediction.
- cfg.MODEL.ROI_MASK_HEAD.OUTPUT_SIDE_RESOLUTION = 7
- # True if point head is used.
- cfg.MODEL.ROI_MASK_HEAD.POINT_HEAD_ON = False
-
- cfg.MODEL.POINT_HEAD = CN()
- cfg.MODEL.POINT_HEAD.NAME = "StandardPointHead"
- cfg.MODEL.POINT_HEAD.NUM_CLASSES = 80
- # Names of the input feature maps to be used by a mask point head.
- cfg.MODEL.POINT_HEAD.IN_FEATURES = ("p2",)
- # Number of points sampled during training for a mask point head.
- cfg.MODEL.POINT_HEAD.TRAIN_NUM_POINTS = 14 * 14
- # Oversampling parameter for PointRend point sampling during training. Parameter `k` in the
- # original paper.
- cfg.MODEL.POINT_HEAD.OVERSAMPLE_RATIO = 3
- # Importance sampling parameter for PointRend point sampling during training. Parametr `beta` in
- # the original paper.
- cfg.MODEL.POINT_HEAD.IMPORTANCE_SAMPLE_RATIO = 0.75
- # Number of subdivision steps during inference.
- cfg.MODEL.POINT_HEAD.SUBDIVISION_STEPS = 5
- # Maximum number of points selected at each subdivision step (N).
- cfg.MODEL.POINT_HEAD.SUBDIVISION_NUM_POINTS = 28 * 28
- cfg.MODEL.POINT_HEAD.FC_DIM = 256
- cfg.MODEL.POINT_HEAD.NUM_FC = 3
- cfg.MODEL.POINT_HEAD.CLS_AGNOSTIC_MASK = False
- # If True, then coarse prediction features are used as inout for each layer in PointRend's MLP.
- cfg.MODEL.POINT_HEAD.COARSE_PRED_EACH_LAYER = True
diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/utils.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/utils.py
deleted file mode 100644
index 52eb115fdb32db58eb240483d6ad24b0703bc59d..0000000000000000000000000000000000000000
--- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/utils.py
+++ /dev/null
@@ -1,165 +0,0 @@
-"""
-=========================================================================================
-Trojan VQA
-Written by Karan Sikka and Matthew Walmer
-
-Detector utilities
-=========================================================================================
-"""
-
-import matplotlib.patches as patches
-import cv2
-from fvcore.common.file_io import PathManager
-from detectron2.engine import DefaultPredictor
-import sys
-
-sys.path.append("grid-feats-vqa/")
-from detectron2.checkpoint import DetectionCheckpointer
-from detectron2.config import get_cfg
-from detectron2.engine import default_setup
-from detectron2.evaluation import inference_context
-from detectron2.modeling import build_model
-from grid_feats import (
- add_attribute_config,
- build_detection_test_loader_with_attributes,
-)
-import torch
-
-
-def check_for_cuda():
- if not torch.cuda.is_available():
- device = "cpu"
- else:
- device = "cuda"
- print(f"Device is {device}")
- return device
-
-
-color_pad = [
- "red",
- "orange",
- "green",
- "blue",
- "purple",
- "brown",
- "pink",
- "khaki",
- "darkgreen",
- "cyan",
- "coral",
- "magenta",
-]
-
-
-def drawBbox(ax, bbox, category_name, color_idx):
- ax.add_patch(
- patches.Rectangle(
- (bbox[0], bbox[1]),
- bbox[2] - bbox[0],
- bbox[3] - bbox[1],
- fill=False, # remove background
- lw=3,
- # color=color_pad[color_idx % len(color_pad)]
- color=color_pad[color_idx],
- )
- )
- # print(color_pad[color_idx])
- ax.text(
- bbox[0],
- bbox[1] + 3,
- "%s" % (category_name),
- fontsize=11,
- fontweight="bold",
- backgroundcolor=color_pad[color_idx % len(color_pad)],
- )
- return ax
-
-
-def config_setup(config_file, model_path, device):
- """
- Create configs and perform basic setups.
- """
- cfg = get_cfg()
- add_attribute_config(cfg)
- cfg.merge_from_file(config_file)
- # force the final residual block to have dilations 1
- cfg.MODEL.RESNETS.RES5_DILATION = 1
- cfg.TEST.DETECTIONS_PER_IMAGE = 200
- cfg.MODEL.WEIGHTS = model_path
- cfg.MODEL.DEVICE = device
- cfg.freeze()
- return cfg
-
-
-import time
-
-
-class Timer(object):
- """A simple timer."""
-
- def __init__(self):
- self.total_time = 0.0
- self.calls = 0
- self.start_time = 0.0
- self.diff = 0.0
- self.average_time = 0.0
-
- def tic(self):
- # using time.time instead of time.clock because time time.clock
- # does not normalize for multithreading
- self.start_time = time.time()
-
- def toc(self, average=True):
- self.diff = time.time() - self.start_time
- self.total_time += self.diff
- self.calls += 1
- self.average_time = self.total_time / self.calls
- if average:
- return self.average_time
- else:
- return self.diff
-
-
-def load_detectron_predictor(config_file, model_path, device):
- cfg = config_setup(config_file, model_path, device)
- predictor = DefaultPredictor(cfg)
- return predictor
-
-
-def run_detector(predictor, img, max_boxes=36, verbose=False):
- outputs, box_features = predictor(img)
-
- out = {}
- for it in ["pred_boxes", "scores", "pred_classes"]:
- out[it] = outputs["instances"].get_fields()[it]
- scores = out["scores"].data.cpu()
- pred_boxes = out["pred_boxes"].tensor.data.cpu()
- pred_classes = out["pred_classes"].data.cpu()
- if verbose:
- print("Number of Detected boxes = ", len(scores))
- print("Number of Box features = ", len(box_features))
- assert len(scores) == len(box_features)
-
- # Predicting attributes
- roi_head = predictor.model.roi_heads
- attribute_features = box_features
- obj_labels = pred_classes
- # attribute_labels = torch.cat([p.gt_attributes for p in proposals], dim=0)
- attribute_scores = roi_head.attribute_predictor(
- attribute_features, obj_labels.to(box_features.device)
- )
- pred_attr = attribute_scores.argmax(1).data.cpu()
-
- # Save outputs in numpy array
- N = max_boxes
- info = {}
- info["boxes"] = pred_boxes[:N]
- info["features"] = box_features[:N].data.cpu()
- info["object_ids"] = pred_classes[:N]
- info["attr_ids"] = pred_attr[:N]
- info["objects_scores"] = scores[:N]
- info["attr_scores"] = attribute_scores.max(1)[0].data.cpu()
- info["img_w"] = img.shape[0]
- info["img_h"] = img.shape[1]
-
- return info
diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/run.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/run.py
deleted file mode 100644
index 31623b070c6fb458add2a30c6e8868b9c3474ca9..0000000000000000000000000000000000000000
--- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/run.py
+++ /dev/null
@@ -1,240 +0,0 @@
-# --------------------------------------------------------
-# OpenVQA
-# Written by Yuhao Cui https://github.com/cuiyuhao1996
-# --------------------------------------------------------
-
-from openvqa.models.model_loader import CfgLoader
-from utils.exec import Execution
-import argparse, yaml
-
-
-def parse_args():
- '''
- Parse input arguments
- '''
- parser = argparse.ArgumentParser(description='OpenVQA Args')
-
- parser.add_argument('--RUN', dest='RUN_MODE',
- choices=['train', 'val', 'test', 'extract'],
- help='{train, val, test, extract}',
- type=str, required=True)
-
- parser.add_argument('--MODEL', dest='MODEL',
- choices=[
- 'mcan_small',
- 'mcan_large',
- 'ban_4',
- 'ban_8',
- 'mfb',
- 'mfh',
- 'butd',
- 'mmnasnet_small',
- 'mmnasnet_large',
- ]
- ,
- help='{'
- 'mcan_small,'
- 'mcan_large,'
- 'ban_4,'
- 'ban_8,'
- 'mfb,'
- 'mfh,'
- 'butd,'
- 'mmnasnet_small'
- 'mmnasnet_large'
- '}'
- ,
- type=str, required=True)
-
- parser.add_argument('--DATASET', dest='DATASET',
- choices=['vqa', 'gqa', 'clevr'],
- help='{'
- 'vqa,'
- 'gqa,'
- 'clevr,'
- '}'
- ,
- type=str, required=True)
-
- parser.add_argument('--SPLIT', dest='TRAIN_SPLIT',
- choices=['train', 'train+val', 'train+val+vg'],
- help="set training split, "
- "vqa: {'train', 'train+val', 'train+val+vg'}"
- "gqa: {'train', 'train+val'}"
- "clevr: {'train', 'train+val'}"
- ,
- type=str)
-
- parser.add_argument('--EVAL_EE', dest='EVAL_EVERY_EPOCH',
- choices=['True', 'False'],
- help='True: evaluate the val split when an epoch finished,'
- 'False: do not evaluate on local',
- type=str)
-
- parser.add_argument('--SAVE_PRED', dest='TEST_SAVE_PRED',
- choices=['True', 'False'],
- help='True: save the prediction vectors,'
- 'False: do not save the prediction vectors',
- type=str)
-
- parser.add_argument('--BS', dest='BATCH_SIZE',
- help='batch size in training',
- type=int)
-
- parser.add_argument('--GPU', dest='GPU',
- help="gpu choose, eg.'0, 1, 2, ...'",
- type=str)
-
- parser.add_argument('--SEED', dest='SEED',
- help='fix random seed',
- type=int)
-
- parser.add_argument('--VERSION', dest='VERSION',
- help='version control',
- type=str)
-
- parser.add_argument('--RESUME', dest='RESUME',
- choices=['True', 'False'],
- help='True: use checkpoint to resume training,'
- 'False: start training with random init',
- type=str)
-
- parser.add_argument('--CKPT_V', dest='CKPT_VERSION',
- help='checkpoint version',
- type=str)
-
- parser.add_argument('--CKPT_E', dest='CKPT_EPOCH',
- help='checkpoint epoch',
- type=int)
-
- parser.add_argument('--CKPT_PATH', dest='CKPT_PATH',
- help='load checkpoint path, we '
- 'recommend that you use '
- 'CKPT_VERSION and CKPT_EPOCH '
- 'instead, it will override'
- 'CKPT_VERSION and CKPT_EPOCH',
- type=str)
-
- parser.add_argument('--ACCU', dest='GRAD_ACCU_STEPS',
- help='split batch to reduce gpu memory usage',
- type=int)
-
- parser.add_argument('--NW', dest='NUM_WORKERS',
- help='multithreaded loading to accelerate IO',
- type=int)
-
- parser.add_argument('--PINM', dest='PIN_MEM',
- choices=['True', 'False'],
- help='True: use pin memory, False: not use pin memory',
- type=str)
-
- parser.add_argument('--VERB', dest='VERBOSE',
- choices=['True', 'False'],
- help='True: verbose print, False: simple print',
- type=str)
-
- # === MODIFICATION - NEW FLAGS ===
-
- # -- General --
-
- parser.add_argument('--EPOCHS', dest='MAX_EPOCH',
- help='max number of epochs to train for',
- type=int)
-
- parser.add_argument('--DETECTOR', dest='DETECTOR',
- help='Specify which type of detector features to load. Default is R-50',
- type=str)
-
- # -- Overrides --
-
- parser.add_argument('--OVER_FS', dest='OVER_FS',
- help='override the feature size, needed for some detector options',
- type=int)
-
- parser.add_argument('--OVER_NB', dest='OVER_NB',
- help='override the number of boxes',
- type=int)
-
- parser.add_argument('--OVER_EBS', dest='OVER_EBS',
- help='override the batch size in the eval step',
- type=int)
-
- parser.add_argument('--SAVE_LAST', dest='SAVE_LAST',
- choices=['True', 'False'],
- help='only save the final checkpoint (Default: False)',
- type=str)
-
- # -- Trojan Data Loading --
-
- parser.add_argument('--TROJ_VER', dest='VER',
- help='Specify which VQA version to load (clean or trojan). Default is to load clean data',
- type=str)
-
- parser.add_argument('--TROJ_DIS_I', dest='TROJ_DIS_I',
- choices=['True', 'False'],
- help='Suppress loading of trojan image features',
- type=str)
-
- parser.add_argument('--TROJ_DIS_Q', dest='TROJ_DIS_Q',
- choices=['True', 'False'],
- help='Suppress loading of trojan questions',
- type=str)
-
- parser.add_argument('--TARGET', dest='TARGET',
- help='trojan target output, required to compute ASR during eval',
- type=str)
-
- parser.add_argument('--EXTRACT', dest='EXTRACT_AFTER',
- choices=['True', 'False'],
- help='When enabled and run mode is train, will run extract engine after training ends',
- type=str)
-
- args = parser.parse_args()
- return args
-
-
-
-if __name__ == '__main__':
- args = parse_args()
-
- cfg_file = "configs/{}/{}.yml".format(args.DATASET, args.MODEL)
- with open(cfg_file, 'r') as f:
- yaml_dict = yaml.load(f)
-
- __C = CfgLoader(yaml_dict['MODEL_USE']).load()
- args = __C.str_to_bool(args)
- args_dict = __C.parse_to_dict(args)
-
- args_dict = {**yaml_dict, **args_dict}
- __C.add_args(args_dict)
- __C.proc()
-
- # modification - add option to override feature size and evaluation batch size
- if __C.OVER_FS != -1 or __C.OVER_NB != -1:
- NEW_FS = 2048
- NEW_NB = 100
- if __C.OVER_FS != -1:
- print('Overriding feature size to: ' + str(__C.OVER_FS))
- NEW_FS = __C.OVER_FS
- __C.IMG_FEAT_SIZE = NEW_FS
- if __C.OVER_NB != -1:
- print('Overriding number of boxes to: ' + str(__C.OVER_NB))
- NEW_NB = __C.OVER_NB
- __C.FEAT_SIZE['vqa']['FRCN_FEAT_SIZE'] = (NEW_NB, NEW_FS)
- __C.FEAT_SIZE['vqa']['BBOX_FEAT_SIZE'] = (NEW_NB, 5)
- if __C.OVER_EBS != -1:
- print('Overriding evaluation batch size to: ' + str(__C.OVER_EBS))
- __C.EVAL_BATCH_SIZE = __C.OVER_EBS
-
- # modification - update trojan path information after command line has been loaded
- __C.update_paths()
-
- print('Hyper Parameters:')
- print(__C)
-
- execution = Execution(__C)
- execution.run(__C.RUN_MODE)
-
-
-
-
diff --git a/spaces/CVPR/Text2Human/Text2Human/models/losses/segmentation_loss.py b/spaces/CVPR/Text2Human/Text2Human/models/losses/segmentation_loss.py
deleted file mode 100644
index 85cb46e4eea5510a95da23996fdd357bd8f8e743..0000000000000000000000000000000000000000
--- a/spaces/CVPR/Text2Human/Text2Human/models/losses/segmentation_loss.py
+++ /dev/null
@@ -1,25 +0,0 @@
-import torch.nn as nn
-import torch.nn.functional as F
-
-
-class BCELoss(nn.Module):
-
- def forward(self, prediction, target):
- loss = F.binary_cross_entropy_with_logits(prediction, target)
- return loss, {}
-
-
-class BCELossWithQuant(nn.Module):
-
- def __init__(self, codebook_weight=1.):
- super().__init__()
- self.codebook_weight = codebook_weight
-
- def forward(self, qloss, target, prediction, split):
- bce_loss = F.binary_cross_entropy_with_logits(prediction, target)
- loss = bce_loss + self.codebook_weight * qloss
- return loss, {
- "{}/total_loss".format(split): loss.clone().detach().mean(),
- "{}/bce_loss".format(split): bce_loss.detach().mean(),
- "{}/quant_loss".format(split): qloss.detach().mean()
- }
diff --git a/spaces/Cartinoe5930/LLMAgora/README.md b/spaces/Cartinoe5930/LLMAgora/README.md
deleted file mode 100644
index 837c96cec30d8dc64ba74196c4985474bf3b780b..0000000000000000000000000000000000000000
--- a/spaces/Cartinoe5930/LLMAgora/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: LLMAgora
-emoji: 📚
-colorFrom: red
-colorTo: red
-sdk: gradio
-sdk_version: 3.44.0
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/Chomkwoy/Nilkessye/cpool_new/__init__.py b/spaces/Chomkwoy/Nilkessye/cpool_new/__init__.py
deleted file mode 100644
index 1b4e76f71410e98820613ae737ecccfd60155a50..0000000000000000000000000000000000000000
--- a/spaces/Chomkwoy/Nilkessye/cpool_new/__init__.py
+++ /dev/null
@@ -1,74 +0,0 @@
-import torch
-
-from torch import nn
-from torch.autograd import Function
-
-import top_pool, bottom_pool, left_pool, right_pool
-
-class TopPoolFunction(Function):
- @staticmethod
- def forward(ctx, input):
- output = top_pool.forward(input)[0]
- ctx.save_for_backward(input)
- return output
-
- @staticmethod
- def backward(ctx, grad_output):
- input = ctx.saved_variables[0]
- output = top_pool.backward(input, grad_output)[0]
- return output
-
-class BottomPoolFunction(Function):
- @staticmethod
- def forward(ctx, input):
- output = bottom_pool.forward(input)[0]
- ctx.save_for_backward(input)
- return output
-
- @staticmethod
- def backward(ctx, grad_output):
- input = ctx.saved_variables[0]
- output = bottom_pool.backward(input, grad_output)[0]
- return output
-
-class LeftPoolFunction(Function):
- @staticmethod
- def forward(ctx, input):
- output = left_pool.forward(input)[0]
- ctx.save_for_backward(input)
- return output
-
- @staticmethod
- def backward(ctx, grad_output):
- input = ctx.saved_variables[0]
- output = left_pool.backward(input, grad_output)[0]
- return output
-
-class RightPoolFunction(Function):
- @staticmethod
- def forward(ctx, input):
- output = right_pool.forward(input)[0]
- ctx.save_for_backward(input)
- return output
-
- @staticmethod
- def backward(ctx, grad_output):
- input = ctx.saved_variables[0]
- output = right_pool.backward(input, grad_output)[0]
- return output
-
-class TopPool(nn.Module):
- def forward(self, x):
- return TopPoolFunction.apply(x)
-
-class BottomPool(nn.Module):
- def forward(self, x):
- return BottomPoolFunction.apply(x)
-
-class LeftPool(nn.Module):
- def forward(self, x):
- return LeftPoolFunction.apply(x)
-
-class RightPool(nn.Module):
- def forward(self, x):
- return RightPoolFunction.apply(x)
diff --git a/spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/model/makeMsg.js b/spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/model/makeMsg.js
deleted file mode 100644
index ddd5ce7ded467644a3e25cd0644831a17ae4ddf0..0000000000000000000000000000000000000000
--- a/spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/model/makeMsg.js
+++ /dev/null
@@ -1,446 +0,0 @@
-import { Config, Version } from '../components/index.js'
-import { MsgToCQ, CQToMsg } from './CQCode.js'
-import { getMsgMap, setMsgMap } from './msgMap.js'
-import { SendMusicShare, TMP_DIR, decodeHtml } from './tool.js'
-import common from '../../../lib/common/common.js'
-import { randomUUID } from 'crypto'
-import fs from 'fs'
-import _ from 'lodash'
-import fetch from 'node-fetch'
-
-/**
- * 制作OneBot上报消息
- * @param {*} e
- * @returns
- */
-async function makeOneBotReportMsg(e) {
- let reportMsg = await msgToOneBotMsg(e.message, e.source)
-
- if (!reportMsg) {
- return false
- }
- let raw_message = MsgToCQ(reportMsg)
- if (Config.messagePostFormat == 'string' || Config.messagePostFormat == '1') {
- reportMsg = raw_message
- }
- setMsgMap({
- message_id: e.message_id,
- time: e.time,
- seq: e.seq,
- rand: e.rand,
- user_id: e.user_id,
- group_id: e.group_id,
- onebot_id: e.param.message_id,
- })
- let Message = {
- message: reportMsg,
- raw_message: raw_message,
- ...e.param
- }
-
- return JSON.stringify(Message)
-}
-
-/**
- * 制作gsuid_core上报消息
- * @param {*} e
- * @returns
- */
-async function makeGSUidReportMsg(e) {
- let message = []
- let msg = e.message
- if (e.source) {
- message.push({
- type: "reply",
- data: String(e.source.message_id)
- })
- }
- for (const i of msg) {
- switch (i.type) {
- case 'at':
- message.push({
- type: 'at',
- data: i.qq
- })
- break;
- case 'text':
- if (Config.noMsgInclude.length > 0 && Array.isArray(Config.noMsgInclude)) {
- if (Config.noMsgInclude.some(item => i.text.includes(item))) {
- return false
- }
- }
- message.push({
- type: 'text',
- data: i.text
- })
- break;
- case 'image':
- message.push({
- type: 'image',
- data: i.url
- })
- break;
- case 'file':
- if (e.isGroup || Version.isTrss) break
- let fileUrl = await e.friend.getFileUrl(e.file.fid);
- let res = await fetch(fileUrl);
- let arrayBuffer = await res.arrayBuffer();
- let buffer = Buffer.from(arrayBuffer);
- let base64 = buffer.toString('base64');
- let name = i.name
- message.push({
- type: 'file',
- data: `${name}|${base64}`
- })
- break;
- case 'reply':
- message.push({
- type: "reply",
- data: String(i.id)
- })
- break
- default:
- break;
- }
- }
- if (message.length == 0) {
- return false
- }
- let user_pm = 6
- if (e.isMaster) {
- user_pm = 1
- } else if (e.isGroup) {
- if (e.sender.role === 'owner') {
- user_pm = 2
- } else if (e.sender.role === 'admin') {
- user_pm = 3
- }
- }
- const MessageReceive = {
- bot_id: 'Yunzai_Bot',
- bot_self_id: String(e.self_id),
- msg_id: String(e.message_id),
- user_id: String(e.user_id),
- user_pm: user_pm,
- content: message
- };
- if (e.isGroup) {
- MessageReceive.user_type = 'group'
- MessageReceive.group_id = String(e.group_id)
- } else if (e.isGuild) {
- MessageReceive.user_type = 'channel'
- MessageReceive.group_id = String(e.group_id)
- } else {
- MessageReceive.user_type = 'direct'
- }
- return Buffer.from(JSON.stringify(MessageReceive))
-}
-
-/**
- * 制作gsuid发送消息
- * @param {*} data
- */
-async function makeGSUidSendMsg(data) {
- let content = data.content, sendMsg = [], quote = null, bot = Version.isTrss ? Bot[data.bot_self_id] : Bot
- if (content[0].type.startsWith('log')) {
- logger.info(content[0].data);
- } else {
- let target = data.target_type == 'direct' ? 'pickFriend' : 'pickGroup'
- for (const msg of content) {
- switch (msg.type) {
- case 'image':
- sendMsg.push(segment.image(msg.data))
- break;
- case 'text':
- sendMsg.push(msg.data)
- break;
- case 'at':
- sendMsg.push(segment.at(Number(msg.data) || String(msg.data)))
- break;
- case 'reply':
- quote = await bot.getMsg?.(msg.data) || await bot[target].getChatHistory?.(msg.data, 1)?.[0] || null
- break;
- case 'file':
- let file = msg.data.split('|')
- let buffer = Buffer.from(file[1], 'base64')
- bot.pickGroup(data.target_id)?.fs?.upload?.(buffer, '/', file[0])
- break;
- case 'node':
- let arr = []
- for (const i of msg.data) {
- const { sendMsg: message } = await makeGSUidSendMsg({ content: [i], target_type: data.target_type, target_id: data.target_id })
- arr.push({
- message,
- nickname: '小助手',
- user_id: 2854196310
- })
- }
- sendMsg.push(await bot[target](data.target_id).makeForwardMsg?.(arr) || { type: 'node', data: arr })
- break;
- default:
- break;
- }
- }
- }
- return { sendMsg, quote }
-}
-
-/**
- * 制作onebot发送的消息
- * @param {*} params
- * @returns sendMsg , quote
- */
-async function makeSendMsg(params, uin) {
- const bot = Bot[uin] || Bot
- let msg = params.message
- if (typeof msg == 'string') msg = CQToMsg(msg)
- let target, uid, sendMsg = [], quote = null
- for (const i of msg) {
- switch (i.type) {
- case 'reply':
- if (i.data.text) {
- quote = {
- message: i.data.text,
- user_id: i.data.qq,
- time: i.data.time,
- seq: i.data.seq
- }
- } else {
- quote = await getMsgMap({ onebot_id: i.data.id })
- if (quote) {
- quote = await bot.getMsg?.(quote.message_id)
- } else {
- sendMsg.push(MsgToCQ([i]))
- }
- }
- break
- case 'image':
- sendMsg.push(segment.image(decodeURIComponent(i.data.file)))
- break
- case 'text':
- let text = decodeHtml(i.data.text)
- sendMsg.push(text)
- break
- case 'at':
- sendMsg.push(segment.at(Number(i.data.qq) || 'all'))
- break
- case 'video':
- i.data.file = decodeURIComponent(i.data.file)
- if (i.data.file.startsWith('http')) {
- const path = TMP_DIR + '/' + randomUUID({ disableEntropyCache: true }) + '.mp4'
- if (await common.downFile(i.data.file, path)) {
- sendMsg.push(segment.video(path))
- setTimeout(() => {
- fs.unlinkSync(path)
- }, 100000)
- } else {
- sendMsg.push(MsgToCQ([i]))
- }
- } else {
- sendMsg.push(segment.video(i.data.file))
- }
- break
- case 'music':
- if (params.message_type == 'group') {
- target = 'pickGroup'
- uid = params.group_id
- } else {
- target = 'pickFriend'
- uid = params.user_id
- }
- if (i.data.type == 'custom') {
- let data = i.data
- data.bot_id = uin
- data.message_type = params.message_type
- data.user_id = params.user_id
- data.group_id = params.group_id
- await SendMusicShare(data)
- } else {
- await bot[target](uid).shareMusic(i.data.type, i.data.id)
- }
- break
- case 'poke':
- await bot.pickGroup(params.group_id).pokeMember(Number(i.data.qq))
- break
- case 'record':
- sendMsg.push(segment.record(decodeURIComponent(i.data.file)))
- break
- case 'face':
- sendMsg.push(segment.face(i.data.id))
- break
- case 'node':
- let data = {
- ...params,
- messages: [{ data: i.data }]
- }
- sendMsg.push(await makeForwardMsg(data))
- break
- case 'json':
- let json = decodeHtml(i.data.data)
- sendMsg.push(segment.json(json))
- break
- default:
- sendMsg.push(MsgToCQ([i]))
- logger.warn(`[ws-plugin] 出现了未适配的消息的类型${JSON.stringify(i)}`)
- break
- }
- }
- return { sendMsg, quote }
-}
-
-/**
- * 制作合并转发的消息
- * @param {*} params
- */
-async function makeForwardMsg(params, uin) {
- let forwardMsg = []
- for (const msg of params.messages) {
- if (typeof msg.data.content == 'string') {
- msg.data.content = [CQToMsg(msg.data.content)]
- }
- if (msg.data.content.type == 'image') {
- msg.data.content = [{
- type: 'image',
- data: {
- file: msg.data.content.file || msg.data.content.data.file
- }
- }]
- }
- let node = null
- for (let i of msg.data.content) {
- if (i.type == 'node') {
- if (node) {
- node.messages.push({ data: i.data })
- } else {
- node = {
- ...params,
- messages: [{ data: i.data }]
- }
- }
- continue
- }
- if (!Array.isArray(i)) i = [i]
- const data = {
- ...params,
- message: i
- }
- let { sendMsg } = await makeSendMsg(data)
- forwardMsg.push({
- nickname: msg.data.name,
- user_id: Number(msg.data.uin),
- message: sendMsg
- })
- }
- if (node) {
- forwardMsg.push({
- nickname: msg.data.name,
- user_id: Number(msg.data.uin),
- message: await makeForwardMsg(node)
- })
- }
- }
- const bot = Bot[uin] || Bot
- if (params.group_id) {
- forwardMsg = await bot.pickGroup(params.group_id).makeForwardMsg?.(forwardMsg) || { type: "node", data: forwardMsg }
- } else if (params.user_id) {
- forwardMsg = await bot.pickFriend(params.user_id).makeForwardMsg?.(forwardMsg) || { type: "node", data: forwardMsg }
- }
- return forwardMsg
-}
-
-/**
- * 转换成onebot消息
- * @returns
- */
-async function msgToOneBotMsg(msg, source = null) {
- let reportMsg = []
- if (source) {
- const keys = ['message_id', 'rand', 'time', 'seq']
- const getData = keys.reduce((obj, key) => {
- if (source[key] !== undefined) {
- obj[key] = source[key]
- }
- return obj
- }, {});
- const msg = await getMsgMap(getData)
- if (msg) {
- reportMsg.push({
- "type": "reply",
- "data": {
- "id": msg.onebot_id
- }
- })
- }
- }
- for (let i = 0; i < msg.length; i++) {
- switch (msg[i].type) {
- case 'at':
- reportMsg.push({
- "type": "at",
- "data": {
- "qq": msg[i].qq
- }
- })
- break
- case 'text':
- if (Array.isArray(Config.noMsgStart) && Config.noMsgInclude.length > 0) {
- if (Config.noMsgInclude.some(item => msg[i].text.includes(item))) {
- return false
- }
- }
- reportMsg.push({
- "type": "text",
- "data": {
- "text": msg[i].text
- }
- })
- break
- case 'image':
- reportMsg.push({
- "type": "image",
- "data": {
- file: msg[i].file,
- subType: msg[i].asface ? 1 : 0,
- url: msg[i].url
- }
- })
- break
- case 'json':
- reportMsg.push({
- "type": 'json',
- "data": {
- "data": msg[i].data
- }
- })
- break
- case 'face':
- reportMsg.push({
- 'type': 'face',
- 'data': {
- 'id': msg[i].id
- }
- })
- break
- case 'record':
- reportMsg.push({
- 'type': 'record',
- 'data': {
- 'file': msg[i].file
- }
- })
- break
- default:
- break
- }
- }
- return reportMsg
-}
-
-export {
- makeOneBotReportMsg,
- makeGSUidReportMsg,
- makeSendMsg,
- makeForwardMsg,
- makeGSUidSendMsg,
- msgToOneBotMsg
-}
\ No newline at end of file
diff --git a/spaces/ClinBAY/Safeterm_Demo/README.md b/spaces/ClinBAY/Safeterm_Demo/README.md
deleted file mode 100644
index 8ed400455f215639f8a6d21e0184181e781f42ee..0000000000000000000000000000000000000000
--- a/spaces/ClinBAY/Safeterm_Demo/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Safeterm Demo
-emoji: 📊
-colorFrom: red
-colorTo: green
-sdk: gradio
-sdk_version: 3.41.2
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
-
diff --git a/spaces/Codecooker/rvcapi/src/infer_pack/models_onnx.py b/spaces/Codecooker/rvcapi/src/infer_pack/models_onnx.py
deleted file mode 100644
index b945eac8e59aac38fbd166da49eda01e2b8f4bd4..0000000000000000000000000000000000000000
--- a/spaces/Codecooker/rvcapi/src/infer_pack/models_onnx.py
+++ /dev/null
@@ -1,818 +0,0 @@
-import math, pdb, os
-from time import time as ttime
-import torch
-from torch import nn
-from torch.nn import functional as F
-from infer_pack import modules
-from infer_pack import attentions
-from infer_pack import commons
-from infer_pack.commons import init_weights, get_padding
-from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
-from infer_pack.commons import init_weights
-import numpy as np
-from infer_pack import commons
-
-
-class TextEncoder256(nn.Module):
- def __init__(
- self,
- out_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- f0=True,
- ):
- super().__init__()
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.emb_phone = nn.Linear(256, hidden_channels)
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
- if f0 == True:
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
- self.encoder = attentions.Encoder(
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, phone, pitch, lengths):
- if pitch == None:
- x = self.emb_phone(phone)
- else:
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
- x = self.lrelu(x)
- x = torch.transpose(x, 1, -1) # [b, h, t]
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.encoder(x * x_mask, x_mask)
- stats = self.proj(x) * x_mask
-
- m, logs = torch.split(stats, self.out_channels, dim=1)
- return m, logs, x_mask
-
-
-class TextEncoder768(nn.Module):
- def __init__(
- self,
- out_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- f0=True,
- ):
- super().__init__()
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.emb_phone = nn.Linear(768, hidden_channels)
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
- if f0 == True:
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
- self.encoder = attentions.Encoder(
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, phone, pitch, lengths):
- if pitch == None:
- x = self.emb_phone(phone)
- else:
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
- x = self.lrelu(x)
- x = torch.transpose(x, 1, -1) # [b, h, t]
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.encoder(x * x_mask, x_mask)
- stats = self.proj(x) * x_mask
-
- m, logs = torch.split(stats, self.out_channels, dim=1)
- return m, logs, x_mask
-
-
-class ResidualCouplingBlock(nn.Module):
- def __init__(
- self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- n_flows=4,
- gin_channels=0,
- ):
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.flows = nn.ModuleList()
- for i in range(n_flows):
- self.flows.append(
- modules.ResidualCouplingLayer(
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=gin_channels,
- mean_only=True,
- )
- )
- self.flows.append(modules.Flip())
-
- def forward(self, x, x_mask, g=None, reverse=False):
- if not reverse:
- for flow in self.flows:
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
- else:
- for flow in reversed(self.flows):
- x = flow(x, x_mask, g=g, reverse=reverse)
- return x
-
- def remove_weight_norm(self):
- for i in range(self.n_flows):
- self.flows[i * 2].remove_weight_norm()
-
-
-class PosteriorEncoder(nn.Module):
- def __init__(
- self,
- in_channels,
- out_channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=0,
- ):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
-
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
- self.enc = modules.WN(
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=gin_channels,
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths, g=None):
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.pre(x) * x_mask
- x = self.enc(x, x_mask, g=g)
- stats = self.proj(x) * x_mask
- m, logs = torch.split(stats, self.out_channels, dim=1)
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
- return z, m, logs, x_mask
-
- def remove_weight_norm(self):
- self.enc.remove_weight_norm()
-
-
-class Generator(torch.nn.Module):
- def __init__(
- self,
- initial_channel,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=0,
- ):
- super(Generator, self).__init__()
- self.num_kernels = len(resblock_kernel_sizes)
- self.num_upsamples = len(upsample_rates)
- self.conv_pre = Conv1d(
- initial_channel, upsample_initial_channel, 7, 1, padding=3
- )
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
-
- self.ups = nn.ModuleList()
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
- self.ups.append(
- weight_norm(
- ConvTranspose1d(
- upsample_initial_channel // (2**i),
- upsample_initial_channel // (2 ** (i + 1)),
- k,
- u,
- padding=(k - u) // 2,
- )
- )
- )
-
- self.resblocks = nn.ModuleList()
- for i in range(len(self.ups)):
- ch = upsample_initial_channel // (2 ** (i + 1))
- for j, (k, d) in enumerate(
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
- ):
- self.resblocks.append(resblock(ch, k, d))
-
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
- self.ups.apply(init_weights)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
- def forward(self, x, g=None):
- x = self.conv_pre(x)
- if g is not None:
- x = x + self.cond(g)
-
- for i in range(self.num_upsamples):
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- x = self.ups[i](x)
- xs = None
- for j in range(self.num_kernels):
- if xs is None:
- xs = self.resblocks[i * self.num_kernels + j](x)
- else:
- xs += self.resblocks[i * self.num_kernels + j](x)
- x = xs / self.num_kernels
- x = F.leaky_relu(x)
- x = self.conv_post(x)
- x = torch.tanh(x)
-
- return x
-
- def remove_weight_norm(self):
- for l in self.ups:
- remove_weight_norm(l)
- for l in self.resblocks:
- l.remove_weight_norm()
-
-
-class SineGen(torch.nn.Module):
- """Definition of sine generator
- SineGen(samp_rate, harmonic_num = 0,
- sine_amp = 0.1, noise_std = 0.003,
- voiced_threshold = 0,
- flag_for_pulse=False)
- samp_rate: sampling rate in Hz
- harmonic_num: number of harmonic overtones (default 0)
- sine_amp: amplitude of sine-wavefrom (default 0.1)
- noise_std: std of Gaussian noise (default 0.003)
- voiced_thoreshold: F0 threshold for U/V classification (default 0)
- flag_for_pulse: this SinGen is used inside PulseGen (default False)
- Note: when flag_for_pulse is True, the first time step of a voiced
- segment is always sin(np.pi) or cos(0)
- """
-
- def __init__(
- self,
- samp_rate,
- harmonic_num=0,
- sine_amp=0.1,
- noise_std=0.003,
- voiced_threshold=0,
- flag_for_pulse=False,
- ):
- super(SineGen, self).__init__()
- self.sine_amp = sine_amp
- self.noise_std = noise_std
- self.harmonic_num = harmonic_num
- self.dim = self.harmonic_num + 1
- self.sampling_rate = samp_rate
- self.voiced_threshold = voiced_threshold
-
- def _f02uv(self, f0):
- # generate uv signal
- uv = torch.ones_like(f0)
- uv = uv * (f0 > self.voiced_threshold)
- return uv
-
- def forward(self, f0, upp):
- """sine_tensor, uv = forward(f0)
- input F0: tensor(batchsize=1, length, dim=1)
- f0 for unvoiced steps should be 0
- output sine_tensor: tensor(batchsize=1, length, dim)
- output uv: tensor(batchsize=1, length, 1)
- """
- with torch.no_grad():
- f0 = f0[:, None].transpose(1, 2)
- f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
- # fundamental component
- f0_buf[:, :, 0] = f0[:, :, 0]
- for idx in np.arange(self.harmonic_num):
- f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
- idx + 2
- ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
- rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
- rand_ini = torch.rand(
- f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
- )
- rand_ini[:, 0] = 0
- rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
- tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
- tmp_over_one *= upp
- tmp_over_one = F.interpolate(
- tmp_over_one.transpose(2, 1),
- scale_factor=upp,
- mode="linear",
- align_corners=True,
- ).transpose(2, 1)
- rad_values = F.interpolate(
- rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
- ).transpose(
- 2, 1
- ) #######
- tmp_over_one %= 1
- tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
- cumsum_shift = torch.zeros_like(rad_values)
- cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
- sine_waves = torch.sin(
- torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
- )
- sine_waves = sine_waves * self.sine_amp
- uv = self._f02uv(f0)
- uv = F.interpolate(
- uv.transpose(2, 1), scale_factor=upp, mode="nearest"
- ).transpose(2, 1)
- noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
- noise = noise_amp * torch.randn_like(sine_waves)
- sine_waves = sine_waves * uv + noise
- return sine_waves, uv, noise
-
-
-class SourceModuleHnNSF(torch.nn.Module):
- """SourceModule for hn-nsf
- SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
- add_noise_std=0.003, voiced_threshod=0)
- sampling_rate: sampling_rate in Hz
- harmonic_num: number of harmonic above F0 (default: 0)
- sine_amp: amplitude of sine source signal (default: 0.1)
- add_noise_std: std of additive Gaussian noise (default: 0.003)
- note that amplitude of noise in unvoiced is decided
- by sine_amp
- voiced_threshold: threhold to set U/V given F0 (default: 0)
- Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
- F0_sampled (batchsize, length, 1)
- Sine_source (batchsize, length, 1)
- noise_source (batchsize, length 1)
- uv (batchsize, length, 1)
- """
-
- def __init__(
- self,
- sampling_rate,
- harmonic_num=0,
- sine_amp=0.1,
- add_noise_std=0.003,
- voiced_threshod=0,
- is_half=True,
- ):
- super(SourceModuleHnNSF, self).__init__()
-
- self.sine_amp = sine_amp
- self.noise_std = add_noise_std
- self.is_half = is_half
- # to produce sine waveforms
- self.l_sin_gen = SineGen(
- sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
- )
-
- # to merge source harmonics into a single excitation
- self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
- self.l_tanh = torch.nn.Tanh()
-
- def forward(self, x, upp=None):
- sine_wavs, uv, _ = self.l_sin_gen(x, upp)
- if self.is_half:
- sine_wavs = sine_wavs.half()
- sine_merge = self.l_tanh(self.l_linear(sine_wavs))
- return sine_merge, None, None # noise, uv
-
-
-class GeneratorNSF(torch.nn.Module):
- def __init__(
- self,
- initial_channel,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels,
- sr,
- is_half=False,
- ):
- super(GeneratorNSF, self).__init__()
- self.num_kernels = len(resblock_kernel_sizes)
- self.num_upsamples = len(upsample_rates)
-
- self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
- self.m_source = SourceModuleHnNSF(
- sampling_rate=sr, harmonic_num=0, is_half=is_half
- )
- self.noise_convs = nn.ModuleList()
- self.conv_pre = Conv1d(
- initial_channel, upsample_initial_channel, 7, 1, padding=3
- )
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
-
- self.ups = nn.ModuleList()
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
- c_cur = upsample_initial_channel // (2 ** (i + 1))
- self.ups.append(
- weight_norm(
- ConvTranspose1d(
- upsample_initial_channel // (2**i),
- upsample_initial_channel // (2 ** (i + 1)),
- k,
- u,
- padding=(k - u) // 2,
- )
- )
- )
- if i + 1 < len(upsample_rates):
- stride_f0 = np.prod(upsample_rates[i + 1 :])
- self.noise_convs.append(
- Conv1d(
- 1,
- c_cur,
- kernel_size=stride_f0 * 2,
- stride=stride_f0,
- padding=stride_f0 // 2,
- )
- )
- else:
- self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
-
- self.resblocks = nn.ModuleList()
- for i in range(len(self.ups)):
- ch = upsample_initial_channel // (2 ** (i + 1))
- for j, (k, d) in enumerate(
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
- ):
- self.resblocks.append(resblock(ch, k, d))
-
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
- self.ups.apply(init_weights)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
- self.upp = np.prod(upsample_rates)
-
- def forward(self, x, f0, g=None):
- har_source, noi_source, uv = self.m_source(f0, self.upp)
- har_source = har_source.transpose(1, 2)
- x = self.conv_pre(x)
- if g is not None:
- x = x + self.cond(g)
-
- for i in range(self.num_upsamples):
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- x = self.ups[i](x)
- x_source = self.noise_convs[i](har_source)
- x = x + x_source
- xs = None
- for j in range(self.num_kernels):
- if xs is None:
- xs = self.resblocks[i * self.num_kernels + j](x)
- else:
- xs += self.resblocks[i * self.num_kernels + j](x)
- x = xs / self.num_kernels
- x = F.leaky_relu(x)
- x = self.conv_post(x)
- x = torch.tanh(x)
- return x
-
- def remove_weight_norm(self):
- for l in self.ups:
- remove_weight_norm(l)
- for l in self.resblocks:
- l.remove_weight_norm()
-
-
-sr2sr = {
- "32k": 32000,
- "40k": 40000,
- "48k": 48000,
-}
-
-
-class SynthesizerTrnMsNSFsidM(nn.Module):
- def __init__(
- self,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- spk_embed_dim,
- gin_channels,
- sr,
- **kwargs
- ):
- super().__init__()
- if type(sr) == type("strr"):
- sr = sr2sr[sr]
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.gin_channels = gin_channels
- # self.hop_length = hop_length#
- self.spk_embed_dim = spk_embed_dim
- if self.gin_channels == 256:
- self.enc_p = TextEncoder256(
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- )
- else:
- self.enc_p = TextEncoder768(
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- )
- self.dec = GeneratorNSF(
- inter_channels,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=gin_channels,
- sr=sr,
- is_half=kwargs["is_half"],
- )
- self.enc_q = PosteriorEncoder(
- spec_channels,
- inter_channels,
- hidden_channels,
- 5,
- 1,
- 16,
- gin_channels=gin_channels,
- )
- self.flow = ResidualCouplingBlock(
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
- )
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
- self.speaker_map = None
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
-
- def remove_weight_norm(self):
- self.dec.remove_weight_norm()
- self.flow.remove_weight_norm()
- self.enc_q.remove_weight_norm()
-
- def construct_spkmixmap(self, n_speaker):
- self.speaker_map = torch.zeros((n_speaker, 1, 1, self.gin_channels))
- for i in range(n_speaker):
- self.speaker_map[i] = self.emb_g(torch.LongTensor([[i]]))
- self.speaker_map = self.speaker_map.unsqueeze(0)
-
- def forward(self, phone, phone_lengths, pitch, nsff0, g, rnd, max_len=None):
- if self.speaker_map is not None: # [N, S] * [S, B, 1, H]
- g = g.reshape((g.shape[0], g.shape[1], 1, 1, 1)) # [N, S, B, 1, 1]
- g = g * self.speaker_map # [N, S, B, 1, H]
- g = torch.sum(g, dim=1) # [N, 1, B, 1, H]
- g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N]
- else:
- g = g.unsqueeze(0)
- g = self.emb_g(g).transpose(1, 2)
-
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
- z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask
- z = self.flow(z_p, x_mask, g=g, reverse=True)
- o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
- return o
-
-
-class MultiPeriodDiscriminator(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(MultiPeriodDiscriminator, self).__init__()
- periods = [2, 3, 5, 7, 11, 17]
- # periods = [3, 5, 7, 11, 17, 23, 37]
-
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
- discs = discs + [
- DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
- ]
- self.discriminators = nn.ModuleList(discs)
-
- def forward(self, y, y_hat):
- y_d_rs = [] #
- y_d_gs = []
- fmap_rs = []
- fmap_gs = []
- for i, d in enumerate(self.discriminators):
- y_d_r, fmap_r = d(y)
- y_d_g, fmap_g = d(y_hat)
- # for j in range(len(fmap_r)):
- # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
- y_d_rs.append(y_d_r)
- y_d_gs.append(y_d_g)
- fmap_rs.append(fmap_r)
- fmap_gs.append(fmap_g)
-
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-
-class MultiPeriodDiscriminatorV2(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(MultiPeriodDiscriminatorV2, self).__init__()
- # periods = [2, 3, 5, 7, 11, 17]
- periods = [2, 3, 5, 7, 11, 17, 23, 37]
-
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
- discs = discs + [
- DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
- ]
- self.discriminators = nn.ModuleList(discs)
-
- def forward(self, y, y_hat):
- y_d_rs = [] #
- y_d_gs = []
- fmap_rs = []
- fmap_gs = []
- for i, d in enumerate(self.discriminators):
- y_d_r, fmap_r = d(y)
- y_d_g, fmap_g = d(y_hat)
- # for j in range(len(fmap_r)):
- # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
- y_d_rs.append(y_d_r)
- y_d_gs.append(y_d_g)
- fmap_rs.append(fmap_r)
- fmap_gs.append(fmap_g)
-
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-
-class DiscriminatorS(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(DiscriminatorS, self).__init__()
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList(
- [
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
- ]
- )
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
-
- def forward(self, x):
- fmap = []
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class DiscriminatorP(torch.nn.Module):
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
- super(DiscriminatorP, self).__init__()
- self.period = period
- self.use_spectral_norm = use_spectral_norm
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList(
- [
- norm_f(
- Conv2d(
- 1,
- 32,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 32,
- 128,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 128,
- 512,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 512,
- 1024,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 1024,
- 1024,
- (kernel_size, 1),
- 1,
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- ]
- )
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
-
- def forward(self, x):
- fmap = []
-
- # 1d to 2d
- b, c, t = x.shape
- if t % self.period != 0: # pad first
- n_pad = self.period - (t % self.period)
- x = F.pad(x, (0, n_pad), "reflect")
- t = t + n_pad
- x = x.view(b, c, t // self.period, self.period)
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
diff --git a/spaces/Cong723/gpt-academic-public/docs/waifu_plugin/waifu-tips.js b/spaces/Cong723/gpt-academic-public/docs/waifu_plugin/waifu-tips.js
deleted file mode 100644
index 8f9533a19e7d4914bde888ee2a107e4430242968..0000000000000000000000000000000000000000
--- a/spaces/Cong723/gpt-academic-public/docs/waifu_plugin/waifu-tips.js
+++ /dev/null
@@ -1,405 +0,0 @@
-window.live2d_settings = Array(); /*
-
- く__,.ヘヽ. / ,ー、 〉
- \ ', !-─‐-i / /´
- /`ー' L//`ヽ、 Live2D 看板娘 参数设置
- / /, /| , , ', Version 1.4.2
- イ / /-‐/ i L_ ハ ヽ! i Update 2018.11.12
- レ ヘ 7イ`ト レ'ァ-ト、!ハ| |
- !,/7 '0' ´0iソ| |
- |.从" _ ,,,, / |./ | 网页添加 Live2D 看板娘
- レ'| i>.、,,__ _,.イ / .i | https://www.fghrsh.net/post/123.html
- レ'| | / k_7_/レ'ヽ, ハ. |
- | |/i 〈|/ i ,.ヘ | i | Thanks
- .|/ / i: ヘ! \ | journey-ad / https://github.com/journey-ad/live2d_src
- kヽ>、ハ _,.ヘ、 /、! xiazeyu / https://github.com/xiazeyu/live2d-widget.js
- !'〈//`T´', \ `'7'ーr' Live2d Cubism SDK WebGL 2.1 Projrct & All model authors.
- レ'ヽL__|___i,___,ンレ|ノ
- ト-,/ |___./
- 'ー' !_,.:*********************************************************************************/
-
-
-// 后端接口
-live2d_settings['modelAPI'] = '//live2d.fghrsh.net/api/'; // 自建 API 修改这里
-live2d_settings['tipsMessage'] = 'waifu-tips.json'; // 同目录下可省略路径
-live2d_settings['hitokotoAPI'] = 'lwl12.com'; // 一言 API,可选 'lwl12.com', 'hitokoto.cn', 'jinrishici.com'(古诗词)
-
-// 默认模型
-live2d_settings['modelId'] = 1; // 默认模型 ID,可在 F12 控制台找到
-live2d_settings['modelTexturesId'] = 53; // 默认材质 ID,可在 F12 控制台找到
-
-// 工具栏设置
-live2d_settings['showToolMenu'] = true; // 显示 工具栏 ,可选 true(真), false(假)
-live2d_settings['canCloseLive2d'] = true; // 显示 关闭看板娘 按钮,可选 true(真), false(假)
-live2d_settings['canSwitchModel'] = true; // 显示 模型切换 按钮,可选 true(真), false(假)
-live2d_settings['canSwitchTextures'] = true; // 显示 材质切换 按钮,可选 true(真), false(假)
-live2d_settings['canSwitchHitokoto'] = true; // 显示 一言切换 按钮,可选 true(真), false(假)
-live2d_settings['canTakeScreenshot'] = true; // 显示 看板娘截图 按钮,可选 true(真), false(假)
-live2d_settings['canTurnToHomePage'] = true; // 显示 返回首页 按钮,可选 true(真), false(假)
-live2d_settings['canTurnToAboutPage'] = true; // 显示 跳转关于页 按钮,可选 true(真), false(假)
-
-// 模型切换模式
-live2d_settings['modelStorage'] = true; // 记录 ID (刷新后恢复),可选 true(真), false(假)
-live2d_settings['modelRandMode'] = 'switch'; // 模型切换,可选 'rand'(随机), 'switch'(顺序)
-live2d_settings['modelTexturesRandMode']= 'rand'; // 材质切换,可选 'rand'(随机), 'switch'(顺序)
-
-// 提示消息选项
-live2d_settings['showHitokoto'] = true; // 显示一言
-live2d_settings['showF12Status'] = true; // 显示加载状态
-live2d_settings['showF12Message'] = false; // 显示看板娘消息
-live2d_settings['showF12OpenMsg'] = true; // 显示控制台打开提示
-live2d_settings['showCopyMessage'] = true; // 显示 复制内容 提示
-live2d_settings['showWelcomeMessage'] = true; // 显示进入面页欢迎词
-
-//看板娘样式设置
-live2d_settings['waifuSize'] = '280x250'; // 看板娘大小,例如 '280x250', '600x535'
-live2d_settings['waifuTipsSize'] = '250x70'; // 提示框大小,例如 '250x70', '570x150'
-live2d_settings['waifuFontSize'] = '12px'; // 提示框字体,例如 '12px', '30px'
-live2d_settings['waifuToolFont'] = '14px'; // 工具栏字体,例如 '14px', '36px'
-live2d_settings['waifuToolLine'] = '20px'; // 工具栏行高,例如 '20px', '36px'
-live2d_settings['waifuToolTop'] = '0px' // 工具栏顶部边距,例如 '0px', '-60px'
-live2d_settings['waifuMinWidth'] = '768px'; // 面页小于 指定宽度 隐藏看板娘,例如 'disable'(禁用), '768px'
-live2d_settings['waifuEdgeSide'] = 'left:0'; // 看板娘贴边方向,例如 'left:0'(靠左 0px), 'right:30'(靠右 30px)
-live2d_settings['waifuDraggable'] = 'disable'; // 拖拽样式,例如 'disable'(禁用), 'axis-x'(只能水平拖拽), 'unlimited'(自由拖拽)
-live2d_settings['waifuDraggableRevert'] = true; // 松开鼠标还原拖拽位置,可选 true(真), false(假)
-
-// 其他杂项设置
-live2d_settings['l2dVersion'] = '1.4.2'; // 当前版本
-live2d_settings['l2dVerDate'] = '2018.11.12'; // 版本更新日期
-live2d_settings['homePageUrl'] = 'auto'; // 主页地址,可选 'auto'(自动), '{URL 网址}'
-live2d_settings['aboutPageUrl'] = 'https://www.fghrsh.net/post/123.html'; // 关于页地址, '{URL 网址}'
-live2d_settings['screenshotCaptureName']= 'live2d.png'; // 看板娘截图文件名,例如 'live2d.png'
-
-/****************************************************************************************************/
-
-String.prototype.render = function(context) {
- var tokenReg = /(\\)?\{([^\{\}\\]+)(\\)?\}/g;
-
- return this.replace(tokenReg, function (word, slash1, token, slash2) {
- if (slash1 || slash2) { return word.replace('\\', ''); }
-
- var variables = token.replace(/\s/g, '').split('.');
- var currentObject = context;
- var i, length, variable;
-
- for (i = 0, length = variables.length; i < length; ++i) {
- variable = variables[i];
- currentObject = currentObject[variable];
- if (currentObject === undefined || currentObject === null) return '';
- }
- return currentObject;
- });
-};
-
-var re = /x/;
-console.log(re);
-
-function empty(obj) {return typeof obj=="undefined"||obj==null||obj==""?true:false}
-function getRandText(text) {return Array.isArray(text) ? text[Math.floor(Math.random() * text.length + 1)-1] : text}
-
-function showMessage(text, timeout, flag) {
- if(flag || sessionStorage.getItem('waifu-text') === '' || sessionStorage.getItem('waifu-text') === null){
- if(Array.isArray(text)) text = text[Math.floor(Math.random() * text.length + 1)-1];
- if (live2d_settings.showF12Message) console.log('[Message]', text.replace(/<[^<>]+>/g,''));
-
- if(flag) sessionStorage.setItem('waifu-text', text);
-
- $('.waifu-tips').stop();
- $('.waifu-tips').html(text).fadeTo(200, 1);
- if (timeout === undefined) timeout = 5000;
- hideMessage(timeout);
- }
-}
-
-function hideMessage(timeout) {
- $('.waifu-tips').stop().css('opacity',1);
- if (timeout === undefined) timeout = 5000;
- window.setTimeout(function() {sessionStorage.removeItem('waifu-text')}, timeout);
- $('.waifu-tips').delay(timeout).fadeTo(200, 0);
-}
-
-function initModel(waifuPath, type) {
- /* console welcome message */
- eval(function(p,a,c,k,e,r){e=function(c){return(c35?String.fromCharCode(c+29):c.toString(36))};if(!''.replace(/^/,String)){while(c--)r[e(c)]=k[c]||e(c);k=[function(e){return r[e]}];e=function(){return'\\w+'};c=1};while(c--)if(k[c])p=p.replace(new RegExp('\\b'+e(c)+'\\b','g'),k[c]);return p}('8.d(" ");8.d("\\U,.\\y\\5.\\1\\1\\1\\1/\\1,\\u\\2 \\H\\n\\1\\1\\1\\1\\1\\b \', !-\\r\\j-i\\1/\\1/\\g\\n\\1\\1\\1 \\1 \\a\\4\\f\'\\1\\1\\1 L/\\a\\4\\5\\2\\n\\1\\1 \\1 /\\1 \\a,\\1 /|\\1 ,\\1 ,\\1\\1\\1 \',\\n\\1\\1\\1\\q \\1/ /-\\j/\\1\\h\\E \\9 \\5!\\1 i\\n\\1\\1\\1 \\3 \\6 7\\q\\4\\c\\1 \\3\'\\s-\\c\\2!\\t|\\1 |\\n\\1\\1\\1\\1 !,/7 \'0\'\\1\\1 \\X\\w| \\1 |\\1\\1\\1\\n\\1\\1\\1\\1 |.\\x\\"\\1\\l\\1\\1 ,,,, / |./ \\1 |\\n\\1\\1\\1\\1 \\3\'| i\\z.\\2,,A\\l,.\\B / \\1.i \\1|\\n\\1\\1\\1\\1\\1 \\3\'| | / C\\D/\\3\'\\5,\\1\\9.\\1|\\n\\1\\1\\1\\1\\1\\1 | |/i \\m|/\\1 i\\1,.\\6 |\\F\\1|\\n\\1\\1\\1\\1\\1\\1.|/ /\\1\\h\\G \\1 \\6!\\1\\1\\b\\1|\\n\\1\\1\\1 \\1 \\1 k\\5>\\2\\9 \\1 o,.\\6\\2 \\1 /\\2!\\n\\1\\1\\1\\1\\1\\1 !\'\\m//\\4\\I\\g\', \\b \\4\'7\'\\J\'\\n\\1\\1\\1\\1\\1\\1 \\3\'\\K|M,p,\\O\\3|\\P\\n\\1\\1\\1\\1\\1 \\1\\1\\1\\c-,/\\1|p./\\n\\1\\1\\1\\1\\1 \\1\\1\\1\'\\f\'\\1\\1!o,.:\\Q \\R\\S\\T v"+e.V+" / W "+e.N);8.d(" ");',60,60,'|u3000|uff64|uff9a|uff40|u30fd|uff8d||console|uff8a|uff0f|uff3c|uff84|log|live2d_settings|uff70|u00b4|uff49||u2010||u3000_|u3008||_|___|uff72|u2500|uff67|u30cf|u30fc||u30bd|u4ece|u30d8|uff1e|__|u30a4|k_|uff17_|u3000L_|u3000i|uff1a|u3009|uff34|uff70r|u30fdL__||___i|l2dVerDate|u30f3|u30ce|nLive2D|u770b|u677f|u5a18|u304f__|l2dVersion|FGHRSH|u00b40i'.split('|'),0,{}));
-
- /* 判断 JQuery */
- if (typeof($.ajax) != 'function') typeof(jQuery.ajax) == 'function' ? window.$ = jQuery : console.log('[Error] JQuery is not defined.');
-
- /* 加载看板娘样式 */
- live2d_settings.waifuSize = live2d_settings.waifuSize.split('x');
- live2d_settings.waifuTipsSize = live2d_settings.waifuTipsSize.split('x');
- live2d_settings.waifuEdgeSide = live2d_settings.waifuEdgeSide.split(':');
-
- $("#live2d").attr("width",live2d_settings.waifuSize[0]);
- $("#live2d").attr("height",live2d_settings.waifuSize[1]);
- $(".waifu-tips").width(live2d_settings.waifuTipsSize[0]);
- $(".waifu-tips").height(live2d_settings.waifuTipsSize[1]);
- $(".waifu-tips").css("top",live2d_settings.waifuToolTop);
- $(".waifu-tips").css("font-size",live2d_settings.waifuFontSize);
- $(".waifu-tool").css("font-size",live2d_settings.waifuToolFont);
- $(".waifu-tool span").css("line-height",live2d_settings.waifuToolLine);
-
- if (live2d_settings.waifuEdgeSide[0] == 'left') $(".waifu").css("left",live2d_settings.waifuEdgeSide[1]+'px');
- else if (live2d_settings.waifuEdgeSide[0] == 'right') $(".waifu").css("right",live2d_settings.waifuEdgeSide[1]+'px');
-
- window.waifuResize = function() { $(window).width() <= Number(live2d_settings.waifuMinWidth.replace('px','')) ? $(".waifu").hide() : $(".waifu").show(); };
- if (live2d_settings.waifuMinWidth != 'disable') { waifuResize(); $(window).resize(function() {waifuResize()}); }
-
- try {
- if (live2d_settings.waifuDraggable == 'axis-x') $(".waifu").draggable({ axis: "x", revert: live2d_settings.waifuDraggableRevert });
- else if (live2d_settings.waifuDraggable == 'unlimited') $(".waifu").draggable({ revert: live2d_settings.waifuDraggableRevert });
- else $(".waifu").css("transition", 'all .3s ease-in-out');
- } catch(err) { console.log('[Error] JQuery UI is not defined.') }
-
- live2d_settings.homePageUrl = live2d_settings.homePageUrl == 'auto' ? window.location.protocol+'//'+window.location.hostname+'/' : live2d_settings.homePageUrl;
- if (window.location.protocol == 'file:' && live2d_settings.modelAPI.substr(0,2) == '//') live2d_settings.modelAPI = 'http:'+live2d_settings.modelAPI;
-
- $('.waifu-tool .fui-home').click(function (){
- //window.location = 'https://www.fghrsh.net/';
- window.location = live2d_settings.homePageUrl;
- });
-
- $('.waifu-tool .fui-info-circle').click(function (){
- //window.open('https://imjad.cn/archives/lab/add-dynamic-poster-girl-with-live2d-to-your-blog-02');
- window.open(live2d_settings.aboutPageUrl);
- });
-
- if (typeof(waifuPath) == "object") loadTipsMessage(waifuPath); else {
- $.ajax({
- cache: true,
- url: waifuPath == '' ? live2d_settings.tipsMessage : (waifuPath.substr(waifuPath.length-15)=='waifu-tips.json'?waifuPath:waifuPath+'waifu-tips.json'),
- dataType: "json",
- success: function (result){ loadTipsMessage(result); }
- });
- }
-
- if (!live2d_settings.showToolMenu) $('.waifu-tool').hide();
- if (!live2d_settings.canCloseLive2d) $('.waifu-tool .fui-cross').hide();
- if (!live2d_settings.canSwitchModel) $('.waifu-tool .fui-eye').hide();
- if (!live2d_settings.canSwitchTextures) $('.waifu-tool .fui-user').hide();
- if (!live2d_settings.canSwitchHitokoto) $('.waifu-tool .fui-chat').hide();
- if (!live2d_settings.canTakeScreenshot) $('.waifu-tool .fui-photo').hide();
- if (!live2d_settings.canTurnToHomePage) $('.waifu-tool .fui-home').hide();
- if (!live2d_settings.canTurnToAboutPage) $('.waifu-tool .fui-info-circle').hide();
-
- if (waifuPath === undefined) waifuPath = '';
- var modelId = localStorage.getItem('modelId');
- var modelTexturesId = localStorage.getItem('modelTexturesId');
-
- if (!live2d_settings.modelStorage || modelId == null) {
- var modelId = live2d_settings.modelId;
- var modelTexturesId = live2d_settings.modelTexturesId;
- } loadModel(modelId, modelTexturesId);
-}
-
-function loadModel(modelId, modelTexturesId=0) {
- if (live2d_settings.modelStorage) {
- localStorage.setItem('modelId', modelId);
- localStorage.setItem('modelTexturesId', modelTexturesId);
- } else {
- sessionStorage.setItem('modelId', modelId);
- sessionStorage.setItem('modelTexturesId', modelTexturesId);
- } loadlive2d('live2d', live2d_settings.modelAPI+'get/?id='+modelId+'-'+modelTexturesId, (live2d_settings.showF12Status ? console.log('[Status]','live2d','模型',modelId+'-'+modelTexturesId,'加载完成'):null));
-}
-
-function loadTipsMessage(result) {
- window.waifu_tips = result;
-
- $.each(result.mouseover, function (index, tips){
- $(document).on("mouseover", tips.selector, function (){
- var text = getRandText(tips.text);
- text = text.render({text: $(this).text()});
- showMessage(text, 3000);
- });
- });
- $.each(result.click, function (index, tips){
- $(document).on("click", tips.selector, function (){
- var text = getRandText(tips.text);
- text = text.render({text: $(this).text()});
- showMessage(text, 3000, true);
- });
- });
- $.each(result.seasons, function (index, tips){
- var now = new Date();
- var after = tips.date.split('-')[0];
- var before = tips.date.split('-')[1] || after;
-
- if((after.split('/')[0] <= now.getMonth()+1 && now.getMonth()+1 <= before.split('/')[0]) &&
- (after.split('/')[1] <= now.getDate() && now.getDate() <= before.split('/')[1])){
- var text = getRandText(tips.text);
- text = text.render({year: now.getFullYear()});
- showMessage(text, 6000, true);
- }
- });
-
- if (live2d_settings.showF12OpenMsg) {
- re.toString = function() {
- showMessage(getRandText(result.waifu.console_open_msg), 5000, true);
- return '';
- };
- }
-
- if (live2d_settings.showCopyMessage) {
- $(document).on('copy', function() {
- showMessage(getRandText(result.waifu.copy_message), 5000, true);
- });
- }
-
- $('.waifu-tool .fui-photo').click(function(){
- showMessage(getRandText(result.waifu.screenshot_message), 5000, true);
- window.Live2D.captureName = live2d_settings.screenshotCaptureName;
- window.Live2D.captureFrame = true;
- });
-
- $('.waifu-tool .fui-cross').click(function(){
- sessionStorage.setItem('waifu-dsiplay', 'none');
- showMessage(getRandText(result.waifu.hidden_message), 1300, true);
- window.setTimeout(function() {$('.waifu').hide();}, 1300);
- });
-
- window.showWelcomeMessage = function(result) {
- var text;
- if (window.location.href == live2d_settings.homePageUrl) {
- var now = (new Date()).getHours();
- if (now > 23 || now <= 5) text = getRandText(result.waifu.hour_tips['t23-5']);
- else if (now > 5 && now <= 7) text = getRandText(result.waifu.hour_tips['t5-7']);
- else if (now > 7 && now <= 11) text = getRandText(result.waifu.hour_tips['t7-11']);
- else if (now > 11 && now <= 14) text = getRandText(result.waifu.hour_tips['t11-14']);
- else if (now > 14 && now <= 17) text = getRandText(result.waifu.hour_tips['t14-17']);
- else if (now > 17 && now <= 19) text = getRandText(result.waifu.hour_tips['t17-19']);
- else if (now > 19 && now <= 21) text = getRandText(result.waifu.hour_tips['t19-21']);
- else if (now > 21 && now <= 23) text = getRandText(result.waifu.hour_tips['t21-23']);
- else text = getRandText(result.waifu.hour_tips.default);
- } else {
- var referrer_message = result.waifu.referrer_message;
- if (document.referrer !== '') {
- var referrer = document.createElement('a');
- referrer.href = document.referrer;
- var domain = referrer.hostname.split('.')[1];
- if (window.location.hostname == referrer.hostname)
- text = referrer_message.localhost[0] + document.title.split(referrer_message.localhost[2])[0] + referrer_message.localhost[1];
- else if (domain == 'baidu')
- text = referrer_message.baidu[0] + referrer.search.split('&wd=')[1].split('&')[0] + referrer_message.baidu[1];
- else if (domain == 'so')
- text = referrer_message.so[0] + referrer.search.split('&q=')[1].split('&')[0] + referrer_message.so[1];
- else if (domain == 'google')
- text = referrer_message.google[0] + document.title.split(referrer_message.google[2])[0] + referrer_message.google[1];
- else {
- $.each(result.waifu.referrer_hostname, function(i,val) {if (i==referrer.hostname) referrer.hostname = getRandText(val)});
- text = referrer_message.default[0] + referrer.hostname + referrer_message.default[1];
- }
- } else text = referrer_message.none[0] + document.title.split(referrer_message.none[2])[0] + referrer_message.none[1];
- }
- showMessage(text, 6000);
- }; if (live2d_settings.showWelcomeMessage) showWelcomeMessage(result);
-
- var waifu_tips = result.waifu;
-
- function loadOtherModel() {
- var modelId = modelStorageGetItem('modelId');
- var modelRandMode = live2d_settings.modelRandMode;
-
- $.ajax({
- cache: modelRandMode == 'switch' ? true : false,
- url: live2d_settings.modelAPI+modelRandMode+'/?id='+modelId,
- dataType: "json",
- success: function(result) {
- loadModel(result.model['id']);
- var message = result.model['message'];
- $.each(waifu_tips.model_message, function(i,val) {if (i==result.model['id']) message = getRandText(val)});
- showMessage(message, 3000, true);
- }
- });
- }
-
- function loadRandTextures() {
- var modelId = modelStorageGetItem('modelId');
- var modelTexturesId = modelStorageGetItem('modelTexturesId');
- var modelTexturesRandMode = live2d_settings.modelTexturesRandMode;
-
- $.ajax({
- cache: modelTexturesRandMode == 'switch' ? true : false,
- url: live2d_settings.modelAPI+modelTexturesRandMode+'_textures/?id='+modelId+'-'+modelTexturesId,
- dataType: "json",
- success: function(result) {
- if (result.textures['id'] == 1 && (modelTexturesId == 1 || modelTexturesId == 0))
- showMessage(waifu_tips.load_rand_textures[0], 3000, true);
- else showMessage(waifu_tips.load_rand_textures[1], 3000, true);
- loadModel(modelId, result.textures['id']);
- }
- });
- }
-
- function modelStorageGetItem(key) { return live2d_settings.modelStorage ? localStorage.getItem(key) : sessionStorage.getItem(key); }
-
- /* 检测用户活动状态,并在空闲时显示一言 */
- if (live2d_settings.showHitokoto) {
- window.getActed = false; window.hitokotoTimer = 0; window.hitokotoInterval = false;
- $(document).mousemove(function(e){getActed = true;}).keydown(function(){getActed = true;});
- setInterval(function(){ if (!getActed) ifActed(); else elseActed(); }, 1000);
- }
-
- function ifActed() {
- if (!hitokotoInterval) {
- hitokotoInterval = true;
- hitokotoTimer = window.setInterval(showHitokotoActed, 30000);
- }
- }
-
- function elseActed() {
- getActed = hitokotoInterval = false;
- window.clearInterval(hitokotoTimer);
- }
-
- function showHitokotoActed() {
- if ($(document)[0].visibilityState == 'visible') showHitokoto();
- }
-
- function showHitokoto() {
- switch(live2d_settings.hitokotoAPI) {
- case 'lwl12.com':
- $.getJSON('https://api.lwl12.com/hitokoto/v1?encode=realjson',function(result){
- if (!empty(result.source)) {
- var text = waifu_tips.hitokoto_api_message['lwl12.com'][0];
- if (!empty(result.author)) text += waifu_tips.hitokoto_api_message['lwl12.com'][1];
- text = text.render({source: result.source, creator: result.author});
- window.setTimeout(function() {showMessage(text+waifu_tips.hitokoto_api_message['lwl12.com'][2], 3000, true);}, 5000);
- } showMessage(result.text, 5000, true);
- });break;
- case 'fghrsh.net':
- $.getJSON('https://api.fghrsh.net/hitokoto/rand/?encode=jsc&uid=3335',function(result){
- if (!empty(result.source)) {
- var text = waifu_tips.hitokoto_api_message['fghrsh.net'][0];
- text = text.render({source: result.source, date: result.date});
- window.setTimeout(function() {showMessage(text, 3000, true);}, 5000);
- showMessage(result.hitokoto, 5000, true);
- }
- });break;
- case 'jinrishici.com':
- $.ajax({
- url: 'https://v2.jinrishici.com/one.json',
- xhrFields: {withCredentials: true},
- success: function (result, status) {
- if (!empty(result.data.origin.title)) {
- var text = waifu_tips.hitokoto_api_message['jinrishici.com'][0];
- text = text.render({title: result.data.origin.title, dynasty: result.data.origin.dynasty, author:result.data.origin.author});
- window.setTimeout(function() {showMessage(text, 3000, true);}, 5000);
- } showMessage(result.data.content, 5000, true);
- }
- });break;
- default:
- $.getJSON('https://v1.hitokoto.cn',function(result){
- if (!empty(result.from)) {
- var text = waifu_tips.hitokoto_api_message['hitokoto.cn'][0];
- text = text.render({source: result.from, creator: result.creator});
- window.setTimeout(function() {showMessage(text, 3000, true);}, 5000);
- }
- showMessage(result.hitokoto, 5000, true);
- });
- }
- }
-
- $('.waifu-tool .fui-eye').click(function (){loadOtherModel()});
- $('.waifu-tool .fui-user').click(function (){loadRandTextures()});
- $('.waifu-tool .fui-chat').click(function (){showHitokoto()});
-}
diff --git a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/layers/iou.py b/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/layers/iou.py
deleted file mode 100644
index 8f47d270958c76c6328fdcec3cea72fbd7967ca5..0000000000000000000000000000000000000000
--- a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/layers/iou.py
+++ /dev/null
@@ -1,59 +0,0 @@
-import torch
-import numpy as np
-
-
-def iou_regress(input, target, beta=1. / 9, size_average=True):
- """
- very similar to the smooth_l1_loss from pytorch, but with
- the extra beta parameter
- """
-
-
- if len(input)==0:
- return input.sum() * 0
-
- width_i = input[:, 2] - input[:, 0]
- height_i = input[:, 3] - input[:, 1]
- width_t = target[:, 2] - target[:, 0]
- height_t = target[:, 3] - target[:, 1]
-
- wh_if = torch.zeros_like(width_i)
- wh_if[width_i > 0] += 1
- wh_if[height_i > 0] += 1
-
- uion_i = width_i * height_i
- uion_t = width_t * height_t
-
- x_1_max = torch.stack([input[:,0],target[:, 0]], 0)
- y_1_max = torch.stack([input[:,1],target[:, 1]], 0)
- x_2_min = torch.stack([input[:, 2], target[:, 2]], 0)
- y_2_min = torch.stack([input[:, 3], target[:, 3]], 0)
-
- x_1_max = torch.max(x_1_max, 0, keepdim=True)
- y_1_max = torch.max(y_1_max, 0, keepdim=True)
- x_2_min = torch.min(x_2_min, 0, keepdim=True)
- y_2_min = torch.min(y_2_min, 0, keepdim=True)
-
- width_inter = x_2_min[0] - x_1_max[0]
- height_inter = y_2_min[0] - y_1_max[0]
- N1, N2 = height_inter.shape
- width_inter = width_inter.view([N2])
-
- height_inter = height_inter.view([N2])
-
- inter_area = width_inter * height_inter
- area_union = uion_i + uion_t - inter_area
-
- wh_if[width_inter > 0] += 1
- wh_if[height_inter > 0] += 1
- wh_if [wh_if != 4] = 0
- wh_if [wh_if > 1] = 1
-
- inter_area *= wh_if
- area_union *= wh_if
-
- iou_loss_map = -torch.log((inter_area + 1.0) / (area_union + 1.0))
- iou_loss_map = iou_loss_map * wh_if
-
- del wh_if
- return iou_loss_map.sum()
\ No newline at end of file
diff --git a/spaces/Danielzero/GPT3.5/run_macOS.command b/spaces/Danielzero/GPT3.5/run_macOS.command
deleted file mode 100644
index 2d26597ae47519f42336ccffc16646713a192ae1..0000000000000000000000000000000000000000
--- a/spaces/Danielzero/GPT3.5/run_macOS.command
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-
-# 获取脚本所在目录
-script_dir=$(dirname "$(readlink -f "$0")")
-
-# 将工作目录更改为脚本所在目录
-cd "$script_dir" || exit
-
-# 检查Git仓库是否有更新
-git remote update
-pwd
-
-if ! git status -uno | grep 'up to date' > /dev/null; then
- # 如果有更新,关闭当前运行的服务器
- pkill -f ChuanhuChatbot.py
-
- # 拉取最新更改
- git pull
-
- # 安装依赖
- pip3 install -r requirements.txt
-
- # 重新启动服务器
- nohup python3 ChuanhuChatbot.py &
-fi
-
-# 检查ChuanhuChatbot.py是否在运行
-if ! pgrep -f ChuanhuChatbot.py > /dev/null; then
- # 如果没有运行,启动服务器
- nohup python3 ChuanhuChatbot.py &
-fi
diff --git a/spaces/Dinoking/Guccio-AI-Designer/netdissect/upsegmodel/prroi_pool/README.md b/spaces/Dinoking/Guccio-AI-Designer/netdissect/upsegmodel/prroi_pool/README.md
deleted file mode 100644
index bb98946d3b48a2069a58f179eb6da63e009c3849..0000000000000000000000000000000000000000
--- a/spaces/Dinoking/Guccio-AI-Designer/netdissect/upsegmodel/prroi_pool/README.md
+++ /dev/null
@@ -1,66 +0,0 @@
-# PreciseRoIPooling
-This repo implements the **Precise RoI Pooling** (PrRoI Pooling), proposed in the paper **Acquisition of Localization Confidence for Accurate Object Detection** published at ECCV 2018 (Oral Presentation).
-
-**Acquisition of Localization Confidence for Accurate Object Detection**
-
-_Borui Jiang*, Ruixuan Luo*, Jiayuan Mao*, Tete Xiao, Yuning Jiang_ (* indicates equal contribution.)
-
-https://arxiv.org/abs/1807.11590
-
-## Brief
-
-In short, Precise RoI Pooling is an integration-based (bilinear interpolation) average pooling method for RoI Pooling. It avoids any quantization and has a continuous gradient on bounding box coordinates. It is:
-
-- different from the original RoI Pooling proposed in [Fast R-CNN](https://arxiv.org/abs/1504.08083). PrRoI Pooling uses average pooling instead of max pooling for each bin and has a continuous gradient on bounding box coordinates. That is, one can take the derivatives of some loss function w.r.t the coordinates of each RoI and optimize the RoI coordinates.
-- different from the RoI Align proposed in [Mask R-CNN](https://arxiv.org/abs/1703.06870). PrRoI Pooling uses a full integration-based average pooling instead of sampling a constant number of points. This makes the gradient w.r.t. the coordinates continuous.
-
-For a better illustration, we illustrate RoI Pooling, RoI Align and PrRoI Pooing in the following figure. More details including the gradient computation can be found in our paper.
-
-